backprop-learn-0.1.0.0: Combinators and useful tools for ANNs using the backprop library

Safe HaskellNone
LanguageHaskell2010

Backprop.Learn.Model.Types

Contents

Synopsis

Model type

type ModelFunc p s a b = forall z. Reifies z W => PMaybe (BVar z) p -> BVar z a -> PMaybe (BVar z) s -> (BVar z b, PMaybe (BVar z) s) Source #

type ModelFuncStoch p s a b = forall m z. (PrimMonad m, Reifies z W) => Gen (PrimState m) -> PMaybe (BVar z) p -> BVar z a -> PMaybe (BVar z) s -> m (BVar z b, PMaybe (BVar z) s) Source #

data Model :: Maybe Type -> Maybe Type -> Type -> Type -> Type where Source #

General parameterized model with potential state

Constructors

Model 

Fields

Instances
Category (Model p s :: Type -> Type -> Type) Source #

Share parameter and sequence state

Instance details

Defined in Backprop.Learn.Model.Types

Methods

id :: Model p s a a #

(.) :: Model p s b c -> Model p s a b -> Model p s a c #

modelD :: ModelFunc p s a b -> Model p s a b Source #

Construct a deterministic model, with no stochastic component.

Specialized Models

Stateless

type ModelFuncStateless p a b = forall z. Reifies z W => PMaybe (BVar z) p -> BVar z a -> BVar z b Source #

type ModelFuncStochStateless p a b = forall m z. (PrimMonad m, Reifies z W) => Gen (PrimState m) -> PMaybe (BVar z) p -> BVar z a -> m (BVar z b) Source #

type ModelStateless p = Model p Nothing Source #

Parameterized model with no state

runLearnStateless :: ModelStateless p a b -> forall z. Reifies z W => PMaybe (BVar z) p -> BVar z a -> BVar z b Source #

runLearnStochStateless :: ModelStateless p a b -> forall (m :: Type -> Type) z. (PrimMonad m, Reifies z W) => Gen (PrimState m) -> PMaybe (BVar z) p -> BVar z a -> m (BVar z b) Source #

modelStatelessD :: ModelFuncStateless p a b -> ModelStateless p a b Source #

Construct a deterministic stateless model, with no stochastic component.

Stateless and Parameterless

type BFunc a b = forall z. Reifies z W => BVar z a -> BVar z b Source #

type BFuncStoch a b = forall m z. (PrimMonad m, Reifies z W) => Gen (PrimState m) -> BVar z a -> m (BVar z b) Source #

pattern Func :: BFunc a b -> BFuncStoch a b -> Func a b Source #

Unparameterized model with no state

runFunc :: Func a b -> forall z. Reifies z W => BVar z a -> BVar z b Source #

runFuncStoch :: Func a b -> forall (m :: Type -> Type) z. (PrimMonad m, Reifies z W) => Gen (PrimState m) -> BVar z a -> m (BVar z b) Source #

funcD :: BFunc a b -> Func a b Source #

Construct an deterministic unparameterized stateless model, with no stochastic component.

Manipulating models as functions

type ModelFuncM m p s a b = forall z. Reifies z W => PMaybe (BVar z) p -> BVar z a -> PMaybe (BVar z) s -> m (BVar z b, PMaybe (BVar z) s) Source #

withModelFunc0 :: (forall m. Monad m => ModelFuncM m p s a b) -> Model p s a b Source #

withModelFunc :: (forall m. Monad m => ModelFuncM m p s a b -> ModelFuncM m q t c d) -> Model p s a b -> Model q t c d Source #

withModelFunc2 :: (forall m. Monad m => ModelFuncM m p s a b -> ModelFuncM m q t c d -> ModelFuncM m r u e f) -> Model p s a b -> Model q t c d -> Model r u e f Source #

Utility

data PMaybe (a :: k -> Type) (b :: Maybe k) :: forall k. (k -> Type) -> Maybe k -> Type where #

A PMaybe f 'Nothing contains nothing, and a PMaybe f ('Just a) contains an f a.

In practice this can be useful to write polymorphic functions/abstractions that contain an argument that can be "turned off" for different instances.

Constructors

PNothing :: forall k (a :: k -> Type) (b :: Maybe k). PMaybe a (Nothing :: Maybe k) 
PJust :: forall k (a :: k -> Type) (b :: Maybe k) (a1 :: k). a a1 -> PMaybe a (Just a1) 

Bundled Patterns

pattern TJust :: a -> TMaybe (Just a) 
Instances
PureProdC Maybe Backprop as => Backprop (TMaybe as) Source # 
Instance details

Defined in Data.Type.Tuple

Methods

zero :: TMaybe as -> TMaybe as #

add :: TMaybe as -> TMaybe as -> TMaybe as #

one :: TMaybe as -> TMaybe as #

Decidable (TyPred (PMaybe (Sing :: k -> Type)) :: Predicate (Maybe k))

Since: decidable-2.0.0

Instance details

Defined in Data.Type.Predicate

Provable (TyPred (PMaybe (Sing :: k -> Type)) :: Predicate (Maybe k))

Since: decidable-2.0.0

Instance details

Defined in Data.Type.Predicate

Methods

prove :: Prove (TyPred (PMaybe Sing)) #

SingI as => Auto (TyPred (PMaybe (Sing :: k -> Type)) :: Predicate (Maybe k)) (as :: Maybe k)

Since: decidable-2.0.0

Instance details

Defined in Data.Type.Predicate.Auto

Methods

auto :: TyPred (PMaybe Sing) @@ as #

ReifyConstraintProd Maybe Eq f as => Eq (PMaybe f as) 
Instance details

Defined in Data.Type.Functor.Product

Methods

(==) :: PMaybe f as -> PMaybe f as -> Bool #

(/=) :: PMaybe f as -> PMaybe f as -> Bool #

(ReifyConstraintProd Maybe Eq f as, ReifyConstraintProd Maybe Ord f as) => Ord (PMaybe f as) 
Instance details

Defined in Data.Type.Functor.Product

Methods

compare :: PMaybe f as -> PMaybe f as -> Ordering #

(<) :: PMaybe f as -> PMaybe f as -> Bool #

(<=) :: PMaybe f as -> PMaybe f as -> Bool #

(>) :: PMaybe f as -> PMaybe f as -> Bool #

(>=) :: PMaybe f as -> PMaybe f as -> Bool #

max :: PMaybe f as -> PMaybe f as -> PMaybe f as #

min :: PMaybe f as -> PMaybe f as -> PMaybe f as #

ReifyConstraintProd Maybe Show f as => Show (PMaybe f as) 
Instance details

Defined in Data.Type.Functor.Product

Methods

showsPrec :: Int -> PMaybe f as -> ShowS #

show :: PMaybe f as -> String #

showList :: [PMaybe f as] -> ShowS #

(PureProdC Maybe Backprop as, PureProdC Maybe Regularize as) => Regularize (PMaybe TF as) Source # 
Instance details

Defined in Backprop.Learn.Regularize

fromPJust :: PMaybe f (Just a) -> f a Source #

type Learnables as = (RecApplicative as, ReifyConstraint Backprop TF as, RMap as, RApply as) Source #

Combination of common constraints for type-level lists.

class (Initialize a, Regularize a, Binary a, NFData a, LinearInPlace m Double a) => Learnable m a Source #

Helpful utility class for ensuring that a parameter or state type is Learnable

Instances
(KnownNat n, PrimMonad m) => Learnable m (R n) Source # 
Instance details

Defined in Backprop.Learn.Model.Types

(Initialize a, Regularize a, Binary a, NFData a, LinearInPlace m Double a) => Learnable m (TF a) Source # 
Instance details

Defined in Backprop.Learn.Model.Types

(KnownNat n, KnownNat o, PrimMonad m) => Learnable m (L n o) Source # 
Instance details

Defined in Backprop.Learn.Model.Types

(Initialize a, Initialize b, Regularize a, Regularize b, Binary a, Binary b, NFData a, NFData b, LinearInPlace m Double a, LinearInPlace m Double b) => Learnable m (a :# b) Source # 
Instance details

Defined in Backprop.Learn.Model.Types

(KnownNat i, KnownNat o, PrimMonad m) => Learnable m (LRp i o) Source # 
Instance details

Defined in Backprop.Learn.Model.Regression

(KnownNat p, KnownNat q, PrimMonad m) => Learnable m (ARIMAp p q) Source # 
Instance details

Defined in Backprop.Learn.Model.Regression

(PrimMonad m, KnownNat i, KnownNat o) => Learnable m (LSTMp i o) Source # 
Instance details

Defined in Backprop.Learn.Model.Neural.LSTM

(KnownNat i, KnownNat o, PrimMonad m) => Learnable m (GRUp i o) Source # 
Instance details

Defined in Backprop.Learn.Model.Neural.LSTM

(RPureConstrained Initialize as, RPureConstrained Regularize as, RPureConstrained Binary as, RPureConstrained NFData as, ReifyConstraint Backprop TF as, RMap as, RApply as, RFoldMap as, RecordToList as, LinearInPlace m Double (Rec TF as)) => Learnable m (Rec TF as) Source # 
Instance details

Defined in Backprop.Learn.Model.Types

(KnownNat p, KnownNat d, KnownNat q, PrimMonad m) => Learnable m (ARIMAs p d q) Source # 
Instance details

Defined in Backprop.Learn.Model.Regression

class Backprop p => Regularize p where Source #

A class for data types that support regularization during training.

This class is somewhat similar to Metric Double, in that it supports summing the components and summing squared components. However, the main difference is that when summing components, we only consider components that we want to regularize.

Often, this doesn't include bias terms (terms that "add" to inputs), and only includes terms that "scale" inputs, like components in a weight matrix of a feed-forward neural network layer.

However, if all of your components are to be regularized, you can use norm_1, norm_2, lassoLinear, and ridgeLinear as sensible implementations, or use DerivingVia with RegularizeMetric:

data MyType = ...
  deriving Regularize via (RegularizeMetric MyType)

You can also derive an instance where no are regularized, using NoRegularize:

data MyType = ...
  deriving Regularize via (NoRegularize MyType)

The default implementations are based on Generics, and work for types that are records of items that are all instances of Regularize.

Minimal complete definition

Nothing

Methods

rnorm_1 :: p -> Double Source #

Like norm_1: sums all of the weights in p, but only the ones you want to regularize:

\[ \sum_w \lvert w \rvert \]

Note that typically bias terms (terms that add to inputs) are not regularized. Only "weight" terms that scale inputs are typically regularized.

If p is an instance of Metric, then you can set rnorm_1 = norm_1. However, this would count all terms in p, even potential bias terms.

rnorm_1 :: (ADT p, Constraints p Regularize) => p -> Double Source #

Like norm_1: sums all of the weights in p, but only the ones you want to regularize:

\[ \sum_w \lvert w \rvert \]

Note that typically bias terms (terms that add to inputs) are not regularized. Only "weight" terms that scale inputs are typically regularized.

If p is an instance of Metric, then you can set rnorm_1 = norm_1. However, this would count all terms in p, even potential bias terms.

rnorm_2 :: p -> Double Source #

Like norm_2: sums all of the squares of the weights n p, but only the ones you want to regularize:

\[ \sum_w w^2 \]

Note that typically bias terms (terms that add to inputs) are not regularized. Only "weight" terms that scale inputs are typically regularized.

If p is an instance of Metric, then you can set rnorm_2 = norm_2. However, this would count all terms in p, even potential bias terms.

rnorm_2 :: (ADT p, Constraints p Regularize) => p -> Double Source #

Like norm_2: sums all of the squares of the weights n p, but only the ones you want to regularize:

\[ \sum_w w^2 \]

Note that typically bias terms (terms that add to inputs) are not regularized. Only "weight" terms that scale inputs are typically regularized.

If p is an instance of Metric, then you can set rnorm_2 = norm_2. However, this would count all terms in p, even potential bias terms.

lasso :: Double -> p -> p Source #

lasso r p sets all regularized components (that is, components summed by rnorm_1) in p to be either r if that component was positive, or -r if that component was negative. Behavior is not defined if the component is exactly zero, but either r or -r are sensible possibilities.

It must set all non-regularized components (like bias terms, or whatever items that rnorm_1 ignores) to zero.

If p is an instance of Linear Double and Num, then you can set lasso = lassoLinear. However, this is only valid if rnorm_1 counts all terms in p, including potential bias terms.

lasso :: (ADT p, Constraints p Regularize) => Double -> p -> p Source #

lasso r p sets all regularized components (that is, components summed by rnorm_1) in p to be either r if that component was positive, or -r if that component was negative. Behavior is not defined if the component is exactly zero, but either r or -r are sensible possibilities.

It must set all non-regularized components (like bias terms, or whatever items that rnorm_1 ignores) to zero.

If p is an instance of Linear Double and Num, then you can set lasso = lassoLinear. However, this is only valid if rnorm_1 counts all terms in p, including potential bias terms.

ridge :: Double -> p -> p Source #

ridge r p scales all regularized components (that is, components summed by rnorm_2) in p by r.

It must set all non-regularized components (like bias terms, or whatever items that rnorm_2 ignores) to zero.

If p is an instance of Linear Double and Num, then you can set ridge = ridgeLinear. However, this is only valid if rnorm_2 counts all terms in p, including potential bias terms.

ridge :: (ADT p, Constraints p Regularize) => Double -> p -> p Source #

ridge r p scales all regularized components (that is, components summed by rnorm_2) in p by r.

It must set all non-regularized components (like bias terms, or whatever items that rnorm_2 ignores) to zero.

If p is an instance of Linear Double and Num, then you can set ridge = ridgeLinear. However, this is only valid if rnorm_2 counts all terms in p, including potential bias terms.

Instances
Regularize Double Source # 
Instance details

Defined in Backprop.Learn.Regularize

Regularize Float Source # 
Instance details

Defined in Backprop.Learn.Regularize

Regularize () Source # 
Instance details

Defined in Backprop.Learn.Regularize

Methods

rnorm_1 :: () -> Double Source #

rnorm_2 :: () -> Double Source #

lasso :: Double -> () -> () Source #

ridge :: Double -> () -> () Source #

Integral a => Regularize (Ratio a) Source # 
Instance details

Defined in Backprop.Learn.Regularize

KnownNat n => Regularize (R n) Source # 
Instance details

Defined in Backprop.Learn.Regularize

Methods

rnorm_1 :: R n -> Double Source #

rnorm_2 :: R n -> Double Source #

lasso :: Double -> R n -> R n Source #

ridge :: Double -> R n -> R n Source #

Regularize a => Regularize (TF a) Source # 
Instance details

Defined in Backprop.Learn.Regularize

Methods

rnorm_1 :: TF a -> Double Source #

rnorm_2 :: TF a -> Double Source #

lasso :: Double -> TF a -> TF a Source #

ridge :: Double -> TF a -> TF a Source #

Backprop a => Regularize (NoRegularize a) Source # 
Instance details

Defined in Backprop.Learn.Regularize

(Metric Double p, Num p, Backprop p) => Regularize (RegularizeMetric p) Source # 
Instance details

Defined in Backprop.Learn.Regularize

(Regularize a, Regularize b) => Regularize (a, b) Source # 
Instance details

Defined in Backprop.Learn.Regularize

Methods

rnorm_1 :: (a, b) -> Double Source #

rnorm_2 :: (a, b) -> Double Source #

lasso :: Double -> (a, b) -> (a, b) Source #

ridge :: Double -> (a, b) -> (a, b) Source #

(KnownNat n, KnownNat m) => Regularize (L n m) Source # 
Instance details

Defined in Backprop.Learn.Regularize

Methods

rnorm_1 :: L n m -> Double Source #

rnorm_2 :: L n m -> Double Source #

lasso :: Double -> L n m -> L n m Source #

ridge :: Double -> L n m -> L n m Source #

(Regularize a, Regularize b) => Regularize (a :# b) Source # 
Instance details

Defined in Backprop.Learn.Regularize

Methods

rnorm_1 :: (a :# b) -> Double Source #

rnorm_2 :: (a :# b) -> Double Source #

lasso :: Double -> (a :# b) -> a :# b Source #

ridge :: Double -> (a :# b) -> a :# b Source #

(KnownNat i, KnownNat o) => Regularize (LRp i o) Source # 
Instance details

Defined in Backprop.Learn.Model.Regression

Methods

rnorm_1 :: LRp i o -> Double Source #

rnorm_2 :: LRp i o -> Double Source #

lasso :: Double -> LRp i o -> LRp i o Source #

ridge :: Double -> LRp i o -> LRp i o Source #

(KnownNat p, KnownNat q) => Regularize (ARIMAp p q) Source # 
Instance details

Defined in Backprop.Learn.Model.Regression

Methods

rnorm_1 :: ARIMAp p q -> Double Source #

rnorm_2 :: ARIMAp p q -> Double Source #

lasso :: Double -> ARIMAp p q -> ARIMAp p q Source #

ridge :: Double -> ARIMAp p q -> ARIMAp p q Source #

KnownNat o => Regularize (LSTMp i o) Source # 
Instance details

Defined in Backprop.Learn.Model.Neural.LSTM

Methods

rnorm_1 :: LSTMp i o -> Double Source #

rnorm_2 :: LSTMp i o -> Double Source #

lasso :: Double -> LSTMp i o -> LSTMp i o Source #

ridge :: Double -> LSTMp i o -> LSTMp i o Source #

KnownNat o => Regularize (GRUp i o) Source # 
Instance details

Defined in Backprop.Learn.Model.Neural.LSTM

Methods

rnorm_1 :: GRUp i o -> Double Source #

rnorm_2 :: GRUp i o -> Double Source #

lasso :: Double -> GRUp i o -> GRUp i o Source #

ridge :: Double -> GRUp i o -> GRUp i o Source #

(Regularize a, Regularize b, Regularize c) => Regularize (a, b, c) Source # 
Instance details

Defined in Backprop.Learn.Regularize

Methods

rnorm_1 :: (a, b, c) -> Double Source #

rnorm_2 :: (a, b, c) -> Double Source #

lasso :: Double -> (a, b, c) -> (a, b, c) Source #

ridge :: Double -> (a, b, c) -> (a, b, c) Source #

(RPureConstrained Regularize as, ReifyConstraint Backprop TF as, RMap as, RApply as, RFoldMap as) => Regularize (Rec TF as) Source # 
Instance details

Defined in Backprop.Learn.Regularize

Methods

rnorm_1 :: Rec TF as -> Double Source #

rnorm_2 :: Rec TF as -> Double Source #

lasso :: Double -> Rec TF as -> Rec TF as Source #

ridge :: Double -> Rec TF as -> Rec TF as Source #

(PureProdC Maybe Backprop as, PureProdC Maybe Regularize as) => Regularize (PMaybe TF as) Source # 
Instance details

Defined in Backprop.Learn.Regularize

(Vector v a, Regularize a, Backprop (Vector v n a)) => Regularize (Vector v n a) Source # 
Instance details

Defined in Backprop.Learn.Regularize

Methods

rnorm_1 :: Vector v n a -> Double Source #

rnorm_2 :: Vector v n a -> Double Source #

lasso :: Double -> Vector v n a -> Vector v n a Source #

ridge :: Double -> Vector v n a -> Vector v n a Source #

Regularize (ARIMAs p d q) Source # 
Instance details

Defined in Backprop.Learn.Model.Regression

Methods

rnorm_1 :: ARIMAs p d q -> Double Source #

rnorm_2 :: ARIMAs p d q -> Double Source #

lasso :: Double -> ARIMAs p d q -> ARIMAs p d q Source #

ridge :: Double -> ARIMAs p d q -> ARIMAs p d q Source #

(Regularize a, Regularize b, Regularize c, Regularize d) => Regularize (a, b, c, d) Source # 
Instance details

Defined in Backprop.Learn.Regularize

Methods

rnorm_1 :: (a, b, c, d) -> Double Source #

rnorm_2 :: (a, b, c, d) -> Double Source #

lasso :: Double -> (a, b, c, d) -> (a, b, c, d) Source #

ridge :: Double -> (a, b, c, d) -> (a, b, c, d) Source #

(Regularize a, Regularize b, Regularize c, Regularize d, Regularize e) => Regularize (a, b, c, d, e) Source # 
Instance details

Defined in Backprop.Learn.Regularize

Methods

rnorm_1 :: (a, b, c, d, e) -> Double Source #

rnorm_2 :: (a, b, c, d, e) -> Double Source #

lasso :: Double -> (a, b, c, d, e) -> (a, b, c, d, e) Source #

ridge :: Double -> (a, b, c, d, e) -> (a, b, c, d, e) Source #

class Initialize p where Source #

Class for types that are basically a bunch of Doubles, which can be initialized with a given identical and independent distribution.

Minimal complete definition

Nothing

Methods

initialize :: (ContGen d, PrimMonad m) => d -> Gen (PrimState m) -> m p Source #

initialize :: (ADTRecord p, Constraints p Initialize, ContGen d, PrimMonad m) => d -> Gen (PrimState m) -> m p Source #

Instances
Initialize Double Source # 
Instance details

Defined in Backprop.Learn.Initialize

Methods

initialize :: (ContGen d, PrimMonad m) => d -> Gen (PrimState m) -> m Double Source #

Initialize Float Source # 
Instance details

Defined in Backprop.Learn.Initialize

Methods

initialize :: (ContGen d, PrimMonad m) => d -> Gen (PrimState m) -> m Float Source #

Initialize () Source # 
Instance details

Defined in Backprop.Learn.Initialize

Methods

initialize :: (ContGen d, PrimMonad m) => d -> Gen (PrimState m) -> m () Source #

Initialize T0 Source # 
Instance details

Defined in Backprop.Learn.Initialize

Methods

initialize :: (ContGen d, PrimMonad m) => d -> Gen (PrimState m) -> m T0 Source #

Initialize a => Initialize (Complex a) Source #

Initializes real and imaginary components identically

Instance details

Defined in Backprop.Learn.Initialize

Methods

initialize :: (ContGen d, PrimMonad m) => d -> Gen (PrimState m) -> m (Complex a) Source #

KnownNat n => Initialize (R n) Source # 
Instance details

Defined in Backprop.Learn.Initialize

Methods

initialize :: (ContGen d, PrimMonad m) => d -> Gen (PrimState m) -> m (R n) Source #

KnownNat n => Initialize (C n) Source # 
Instance details

Defined in Backprop.Learn.Initialize

Methods

initialize :: (ContGen d, PrimMonad m) => d -> Gen (PrimState m) -> m (C n) Source #

Initialize a => Initialize (TF a) Source # 
Instance details

Defined in Backprop.Learn.Initialize

Methods

initialize :: (ContGen d, PrimMonad m) => d -> Gen (PrimState m) -> m (TF a) Source #

RPureConstrained Initialize as => Initialize (T as) Source # 
Instance details

Defined in Backprop.Learn.Initialize

Methods

initialize :: (ContGen d, PrimMonad m) => d -> Gen (PrimState m) -> m (T as) Source #

(Initialize a, Initialize b) => Initialize (a, b) Source # 
Instance details

Defined in Backprop.Learn.Initialize

Methods

initialize :: (ContGen d, PrimMonad m) => d -> Gen (PrimState m) -> m (a, b) Source #

(KnownNat n, KnownNat m) => Initialize (L n m) Source # 
Instance details

Defined in Backprop.Learn.Initialize

Methods

initialize :: (ContGen d, PrimMonad m0) => d -> Gen (PrimState m0) -> m0 (L n m) Source #

(KnownNat n, KnownNat m) => Initialize (M n m) Source # 
Instance details

Defined in Backprop.Learn.Initialize

Methods

initialize :: (ContGen d, PrimMonad m0) => d -> Gen (PrimState m0) -> m0 (M n m) Source #

(Initialize a, Initialize b) => Initialize (a :# b) Source # 
Instance details

Defined in Backprop.Learn.Initialize

Methods

initialize :: (ContGen d, PrimMonad m) => d -> Gen (PrimState m) -> m (a :# b) Source #

(KnownNat o, KnownNat i) => Initialize (LRp i o) Source # 
Instance details

Defined in Backprop.Learn.Model.Regression

Methods

initialize :: (ContGen d, PrimMonad m) => d -> Gen (PrimState m) -> m (LRp i o) Source #

(KnownNat a, KnownNat b) => Initialize (ARIMAp a b) Source # 
Instance details

Defined in Backprop.Learn.Model.Regression

Methods

initialize :: (ContGen d, PrimMonad m) => d -> Gen (PrimState m) -> m (ARIMAp a b) Source #

(KnownNat i, KnownNat o) => Initialize (LSTMp i o) Source #

Forget biases initialized to 1

Instance details

Defined in Backprop.Learn.Model.Neural.LSTM

Methods

initialize :: (ContGen d, PrimMonad m) => d -> Gen (PrimState m) -> m (LSTMp i o) Source #

KnownNat o => Initialize (GRUp i o) Source # 
Instance details

Defined in Backprop.Learn.Model.Neural.LSTM

Methods

initialize :: (ContGen d, PrimMonad m) => d -> Gen (PrimState m) -> m (GRUp i o) Source #

(Initialize a, Initialize b, Initialize c) => Initialize (a, b, c) Source # 
Instance details

Defined in Backprop.Learn.Initialize

Methods

initialize :: (ContGen d, PrimMonad m) => d -> Gen (PrimState m) -> m (a, b, c) Source #

(Vector v a, KnownNat n, Initialize a) => Initialize (Vector v n a) Source # 
Instance details

Defined in Backprop.Learn.Initialize

Methods

initialize :: (ContGen d, PrimMonad m) => d -> Gen (PrimState m) -> m (Vector v n a) Source #

KnownNat c => Initialize (ARIMAs a b c) Source # 
Instance details

Defined in Backprop.Learn.Model.Regression

Methods

initialize :: (ContGen d, PrimMonad m) => d -> Gen (PrimState m) -> m (ARIMAs a b c) Source #

(Initialize a, Initialize b, Initialize c, Initialize d) => Initialize (a, b, c, d) Source # 
Instance details

Defined in Backprop.Learn.Initialize

Methods

initialize :: (ContGen d0, PrimMonad m) => d0 -> Gen (PrimState m) -> m (a, b, c, d) Source #

(Initialize a, Initialize b, Initialize c, Initialize d, Initialize e) => Initialize (a, b, c, d, e) Source # 
Instance details

Defined in Backprop.Learn.Initialize

Methods

initialize :: (ContGen d0, PrimMonad m) => d0 -> Gen (PrimState m) -> m (a, b, c, d, e) Source #