Safe Haskell | None |
---|---|
Language | Haskell2010 |
Synopsis
- meanModel :: (Backprop (t a), Foldable t, Functor t, Fractional a, Reifies s W) => BVar s (t a) -> BVar s a
- varModel :: (Backprop (t a), Foldable t, Functor t, Fractional a, Reifies s W) => BVar s (t a) -> BVar s a
- stdevModel :: (Backprop (t a), Foldable t, Functor t, Floating a, Reifies s W) => BVar s (t a) -> BVar s a
- rangeModel :: (Backprop (t a), Foldable t, Functor t, Ord a, Num a, Reifies s W) => BVar s (t a) -> BVar s a
- step :: (Ord a, Num a) => a -> a
- logistic :: Floating a => a -> a
- softsign :: Fractional a => a -> a
- reLU :: (Num a, Ord a) => a -> a
- softPlus :: Floating a => a -> a
- bentIdentity :: Floating a => a -> a
- siLU :: Floating a => a -> a
- softExponential :: (Floating a, Ord a) => a -> a -> a
- sinc :: (Floating a, Eq a) => a -> a
- gaussian :: Floating a => a -> a
- tanh :: Floating a => a -> a
- atan :: Floating a => a -> a
- sin :: Floating a => a -> a
- vmap :: (KnownNat n, Reifies s W) => (BVar s ℝ -> BVar s ℝ) -> BVar s (R n) -> BVar s (R n)
- vmap' :: (Num (vec n), Storable field, Sized field (vec n) Vector, Backprop (vec n), Backprop field, Reifies s W) => (forall s'. Reifies s' W => BVar s' field -> BVar s' field) -> BVar s (vec n) -> BVar s (vec n)
- liftUniform :: (Reifies s W, KnownNat n) => (BVar s (R n) -> r) -> BVar s Double -> r
- isru :: Floating a => a -> a -> a
- preLU :: (Num a, Ord a) => a -> a -> a
- sreLU :: (Num a, Ord a) => a -> a -> a -> a -> a -> a
- sreLUPFP :: (KnownNat n, Reifies s W) => BVar s ((Double :# Double) :# (Double :# Double)) -> BVar s (R n) -> BVar s (R n)
- eLU :: (Floating a, Ord a) => a -> a -> a
- isrLU :: (Floating a, Ord a) => a -> a -> a
- apl :: (KnownNat n, KnownNat m, Reifies s W) => BVar s (L n m) -> BVar s (L n m) -> BVar s (R m) -> BVar s (R m)
- aplPFP :: (KnownNat n, KnownNat m, Reifies s W) => BVar s (L n m :# L n m) -> BVar s (R m) -> BVar s (R m)
- softMax :: (KnownNat i, Reifies s W) => BVar s (R i) -> BVar s (R i)
- maxout :: (KnownNat n, Reifies s W) => BVar s (R n) -> BVar s Double
- kSparse :: forall n s. (Reifies s W, KnownNat n) => Int -> BVar s (R n) -> BVar s (R n)
Statistics
meanModel :: (Backprop (t a), Foldable t, Functor t, Fractional a, Reifies s W) => BVar s (t a) -> BVar s a Source #
varModel :: (Backprop (t a), Foldable t, Functor t, Fractional a, Reifies s W) => BVar s (t a) -> BVar s a Source #
stdevModel :: (Backprop (t a), Foldable t, Functor t, Floating a, Reifies s W) => BVar s (t a) -> BVar s a Source #
rangeModel :: (Backprop (t a), Foldable t, Functor t, Ord a, Num a, Reifies s W) => BVar s (t a) -> BVar s a Source #
Activation functions
See https://en.wikipedia.org/wiki/Activation_function
- * Maps
- ** Unparameterized
softsign :: Fractional a => a -> a Source #
Softsign activation function
\[ \frac{x}{1 + \lvert x \rvert} \]
bentIdentity :: Floating a => a -> a Source #
Bent identity
\[ \frac{\sqrt{x^2 + 1} - 1}{2} + x \]
siLU :: Floating a => a -> a Source #
Sigmoid-weighted linear unit. Multiply logistic
by its input.
\[ x \sigma(x) \]
sinc :: (Floating a, Eq a) => a -> a Source #
Sinc
\[ \begin{cases} 1 & \text{for } x = 0 \\ \frac{\sin(x)}{x} & \text{for } x \ne 0 \end{cases} \]
vmap :: (KnownNat n, Reifies s W) => (BVar s ℝ -> BVar s ℝ) -> BVar s (R n) -> BVar s (R n) #
Note: if possible, use the potentially much more performant vmap'
.
vmap' :: (Num (vec n), Storable field, Sized field (vec n) Vector, Backprop (vec n), Backprop field, Reifies s W) => (forall s'. Reifies s' W => BVar s' field -> BVar s' field) -> BVar s (vec n) -> BVar s (vec n) #
Parameterized
liftUniform :: (Reifies s W, KnownNat n) => (BVar s (R n) -> r) -> BVar s Double -> r Source #
Usable with functions like *
, isru
, etc. to turn them into a form
usable with PFP
:
liftUniform
(*
) ::BVar
sDouble
-> BVar s (R
n) -> BVar s (R n) liftUniformisru
:: BVar s Double -> BVar s (R n) -> BVar s (R n)
Basically turns a parmaeterized function on individual elements of into one that shares the same parameter across all elements of the vector.
:: Floating a | |
=> a | α (scaling parameter) |
-> a | x |
-> a |
Inverse square root unit
\[ \frac{x}{\sqrt{1 + \alpha x^2}} \]
See liftUniform
to make this compatible with PFP
.
You can also just use this after partially applying it, to fix the parameter (and not have it trained).
Parametric rectified linear unit
To use with vectors (R
), use vmap'
.
If scaling parameter is a fixed (and not learned) parameter, this is typically called a leaky recitified linear unit (typically with α = 0.01).
To use as a learned parameter:
vmap
.preLU
::BVar
s Double ->BVar
s (R
n) -> BVar s (R n)
This can be give directly to PFP
.
To fix the paramater ("leaky"), just partially apply a parameter:
preLU
0.01 ::BVar
s (R
n) -> BVar s (R n) preLU (realToFrac
α) :: BVar s (R n) -> BVar s (R n)
See also rreLU
.
\[ \begin{cases} \alpha x & \text{for } x < 0 \\ x & \text{for } x \ge 0 \end{cases} \]
S-shaped rectified linear activiation unit
See sreLUPFP
for an uncurried and uniformly lifted version usable with
PFP
.
\[ \begin{cases} t_l + a_l (x - t_l) & \text{for } x \le t_l \\ x & \text{for } t_l < x < t_r \\ t_r + a_r (x - t_r) & \text{for } x \ge t_r \end{cases} \]
sreLUPFP :: (KnownNat n, Reifies s W) => BVar s ((Double :# Double) :# (Double :# Double)) -> BVar s (R n) -> BVar s (R n) Source #
An uncurried and uniformly lifted version of sreLU
directly usable
with PFP
.
Exponential linear unit
To use with vectors (R
), use vmap'
.
To use as a learned parameter:
vmap
.eLU
::BVar
s Double ->BVar
s (R
n) -> BVar s (R n)
This can be give directly to PFP
.
To fix the paramater, just partially apply a parameter:
vmap'
(eLU
0.01) ::BVar
s (R
n) -> BVar s (R n)
\[ \begin{cases} \alpha (e^x - 1) & \text{for } x < 0 \\ x & \text{for } x \ge 0 \end{cases} \]
:: (KnownNat n, KnownNat m, Reifies s W) | |
=> BVar s (L n m) | a |
-> BVar s (L n m) | b |
-> BVar s (R m) | x |
-> BVar s (R m) |
Adaptive piecewise linear activation unit
See aplPFP
for an uncurried version usable with PFP
.
\[ \max(0, x_i) + \sum_j^M a_i^j \max(0, -x_i + b_i^j) \]
aplPFP :: (KnownNat n, KnownNat m, Reifies s W) => BVar s (L n m :# L n m) -> BVar s (R m) -> BVar s (R m) Source #
apl
uncurried, to be directly usable with PFP
.
Mixing
maxout :: (KnownNat n, Reifies s W) => BVar s (R n) -> BVar s Double Source #
Maximum of vector.
Compare to norm_InfV
, which gives the maximum absolute value.
Keep only the top k
values, and zero out all of the rest.
Useful for postcomposing in between layers (with a logistic function
before) to encourage the number of "activated" neurons is kept to be
around k
. Used in k-Sprase autoencoders (see KAutoencoder
).
http://www.ericlwilkinson.com/blog/2014/11/19/deep-learning-sparse-autoencoders