Safe Haskell | Safe-Inferred |
---|---|
Language | Haskell2010 |
Synopsis
- type LearningRate = Tensor
- type Loss = Tensor
- newtype Gradients = Gradients [Tensor]
- newtype OptimizerState option = OptimizerState option
- grad' :: Loss -> [Parameter] -> Gradients
- class Optimizer optimizer where
- step :: LearningRate -> Gradients -> [Tensor] -> optimizer -> ([Tensor], optimizer)
- runStep :: Parameterized model => model -> optimizer -> Loss -> LearningRate -> IO (model, optimizer)
- runStep' :: Parameterized model => model -> optimizer -> Gradients -> LearningRate -> IO (model, optimizer)
- data GD = GD
- gd :: LearningRate -> Gradients -> [Tensor] -> [Tensor]
- gd' :: LearningRate -> Gradients -> [Tensor] -> GD -> ([Tensor], GD)
- sgd :: LearningRate -> [Parameter] -> [Tensor] -> [Tensor]
- data GDM = GDM {}
- gdm :: LearningRate -> Gradients -> [Tensor] -> GDM -> ([Tensor], GDM)
- data Adam = Adam {}
- mkAdam :: Int -> Float -> Float -> [Parameter] -> Adam
- adam :: LearningRate -> Gradients -> [Tensor] -> Adam -> ([Tensor], Adam)
- data Adagrad = Adagrad {}
- adagrad :: LearningRate -> Gradients -> [Tensor] -> Adagrad -> ([Tensor], Adagrad)
- foldLoop :: a -> Int -> (a -> Int -> IO a) -> IO a
Documentation
type LearningRate = Tensor Source #
newtype OptimizerState option Source #
OptimizerState option |
class Optimizer optimizer where Source #
step :: LearningRate -> Gradients -> [Tensor] -> optimizer -> ([Tensor], optimizer) Source #
runStep :: Parameterized model => model -> optimizer -> Loss -> LearningRate -> IO (model, optimizer) Source #
run a single iteration of an optimizer, returning new parameters and updated optimizer state
runStep' :: Parameterized model => model -> optimizer -> Gradients -> LearningRate -> IO (model, optimizer) Source #
run a single iteration of an optimizer, returning new parameters and updated optimizer state
Instances
gd' :: LearningRate -> Gradients -> [Tensor] -> GD -> ([Tensor], GD) Source #
Gradient descent step with a dummy state variable
State representation for Adam Optimizer
:: LearningRate | learning rate |
-> Gradients | model parameter gradients |
-> [Tensor] | model parameters |
-> Adam | adam parameters - beta1, beta2, moments, iteration |
-> ([Tensor], Adam) | returns new parameters + updated adam parameters |
Adam step
State representation for Adagrad Optimizer
Instances
Show Adagrad Source # | |
Optimizer Adagrad Source # | |
Defined in Torch.Optim step :: LearningRate -> Gradients -> [Tensor] -> Adagrad -> ([Tensor], Adagrad) Source # runStep :: Parameterized model => model -> Adagrad -> Loss -> LearningRate -> IO (model, Adagrad) Source # runStep' :: Parameterized model => model -> Adagrad -> Gradients -> LearningRate -> IO (model, Adagrad) Source # |