Safe Haskell | None |
---|---|
Language | Haskell2010 |
Synopsis
- type Loss a = forall s. Reifies s W => a -> BVar s a -> BVar s Double
- crossEntropy :: KnownNat n => Loss (R n)
- crossEntropy1 :: Loss Double
- squaredError :: Loss Double
- absError :: Loss Double
- totalSquaredError :: (Backprop (t Double), Num (t Double), Foldable t, Functor t) => Loss (t Double)
- squaredErrorV :: KnownNat n => Loss (R n)
- scaleLoss :: Double -> Loss a -> Loss a
- sumLoss :: (Traversable t, Applicative t, Backprop a) => Loss a -> Loss (t a)
- sumLossDecay :: forall n a. (KnownNat n, Backprop a) => Double -> Loss a -> Loss (Vector n a)
- lastLoss :: forall n a. (KnownNat (n + 1), Backprop a) => Loss a -> Loss (Vector (n + 1) a)
- zipLoss :: (Traversable t, Applicative t, Backprop a) => t Double -> Loss a -> Loss (t a)
- t2Loss :: (Backprop a, Backprop b) => Loss a -> Loss b -> Loss (a :# b)
- type Regularizer p = forall s. Reifies s W => BVar s p -> BVar s Double
- l2Reg :: Regularize p => Double -> Regularizer p
- l1Reg :: Regularize p => Double -> Regularizer p
- noReg :: Regularizer p
- addReg :: Regularizer p -> Regularizer p -> Regularizer p
- scaleReg :: Double -> Regularizer p -> Regularizer p
Loss functions
totalSquaredError :: (Backprop (t Double), Num (t Double), Foldable t, Functor t) => Loss (t Double) Source #
Manipulate loss functions
sumLoss :: (Traversable t, Applicative t, Backprop a) => Loss a -> Loss (t a) Source #
sumLossDecay :: forall n a. (KnownNat n, Backprop a) => Double -> Loss a -> Loss (Vector n a) Source #
lastLoss :: forall n a. (KnownNat (n + 1), Backprop a) => Loss a -> Loss (Vector (n + 1) a) Source #
zipLoss :: (Traversable t, Applicative t, Backprop a) => t Double -> Loss a -> Loss (t a) Source #
:: (Backprop a, Backprop b) | |
=> Loss a | loss on first component |
-> Loss b | loss on second component |
-> Loss (a :# b) |
Lift and sum a loss function over the components of a :&
.
Regularization
type Regularizer p = forall s. Reifies s W => BVar s p -> BVar s Double Source #
A regularizer on parameters
l2Reg :: Regularize p => Double -> Regularizer p Source #
Backpropagatable L2 regularization; also known as ridge regularization.
\[ \sum_w w^2 \]
Note that typically bias terms (terms that add to inputs) are not regularized. Only "weight" terms that scale inputs are typically regularized.
l1Reg :: Regularize p => Double -> Regularizer p Source #
Backpropagatable L1 regularization; also known as lasso regularization.
\[ \sum_w \lvert w \rvert \]
Note that typically bias terms (terms that add to inputs) are not regularized. Only "weight" terms that scale inputs are typically regularized.
noReg :: Regularizer p Source #
No regularization
Manipulate regularizers
addReg :: Regularizer p -> Regularizer p -> Regularizer p Source #
Add together two regularizers
scaleReg :: Double -> Regularizer p -> Regularizer p Source #
Scale a regularizer's influence