hasktorch-0.2.0.0: Functional differentiable programming in Haskell
Safe HaskellSafe-Inferred
LanguageHaskell2010

Torch.Optim

Synopsis

Documentation

newtype Gradients Source #

Constructors

Gradients [Tensor] 

Instances

Instances details
Show Gradients Source # 
Instance details

Defined in Torch.Optim

newtype OptimizerState option Source #

Constructors

OptimizerState option 

class Optimizer optimizer where Source #

Minimal complete definition

step

Methods

step :: LearningRate -> Gradients -> [Tensor] -> optimizer -> ([Tensor], optimizer) Source #

runStep :: Parameterized model => model -> optimizer -> Loss -> LearningRate -> IO (model, optimizer) Source #

run a single iteration of an optimizer, returning new parameters and updated optimizer state

runStep' :: Parameterized model => model -> optimizer -> Gradients -> LearningRate -> IO (model, optimizer) Source #

run a single iteration of an optimizer, returning new parameters and updated optimizer state

Instances

Instances details
Optimizer Adagrad Source # 
Instance details

Defined in Torch.Optim

Methods

step :: LearningRate -> Gradients -> [Tensor] -> Adagrad -> ([Tensor], Adagrad) Source #

runStep :: Parameterized model => model -> Adagrad -> Loss -> LearningRate -> IO (model, Adagrad) Source #

runStep' :: Parameterized model => model -> Adagrad -> Gradients -> LearningRate -> IO (model, Adagrad) Source #

Optimizer Adam Source # 
Instance details

Defined in Torch.Optim

Methods

step :: LearningRate -> Gradients -> [Tensor] -> Adam -> ([Tensor], Adam) Source #

runStep :: Parameterized model => model -> Adam -> Loss -> LearningRate -> IO (model, Adam) Source #

runStep' :: Parameterized model => model -> Adam -> Gradients -> LearningRate -> IO (model, Adam) Source #

Optimizer GD Source # 
Instance details

Defined in Torch.Optim

Methods

step :: LearningRate -> Gradients -> [Tensor] -> GD -> ([Tensor], GD) Source #

runStep :: Parameterized model => model -> GD -> Loss -> LearningRate -> IO (model, GD) Source #

runStep' :: Parameterized model => model -> GD -> Gradients -> LearningRate -> IO (model, GD) Source #

Optimizer GDM Source # 
Instance details

Defined in Torch.Optim

Methods

step :: LearningRate -> Gradients -> [Tensor] -> GDM -> ([Tensor], GDM) Source #

runStep :: Parameterized model => model -> GDM -> Loss -> LearningRate -> IO (model, GDM) Source #

runStep' :: Parameterized model => model -> GDM -> Gradients -> LearningRate -> IO (model, GDM) Source #

CppOptimizer option => Optimizer (CppOptimizerState option) Source # 
Instance details

Defined in Torch.Optim.CppOptim

Methods

step :: LearningRate -> Gradients -> [Tensor] -> CppOptimizerState option -> ([Tensor], CppOptimizerState option) Source #

runStep :: Parameterized model => model -> CppOptimizerState option -> Loss -> LearningRate -> IO (model, CppOptimizerState option) Source #

runStep' :: Parameterized model => model -> CppOptimizerState option -> Gradients -> LearningRate -> IO (model, CppOptimizerState option) Source #

data GD Source #

Constructors

GD 

Instances

Instances details
Show GD Source # 
Instance details

Defined in Torch.Optim

Optimizer GD Source # 
Instance details

Defined in Torch.Optim

Methods

step :: LearningRate -> Gradients -> [Tensor] -> GD -> ([Tensor], GD) Source #

runStep :: Parameterized model => model -> GD -> Loss -> LearningRate -> IO (model, GD) Source #

runStep' :: Parameterized model => model -> GD -> Gradients -> LearningRate -> IO (model, GD) Source #

gd :: LearningRate -> Gradients -> [Tensor] -> [Tensor] Source #

Stateless gradient descent step

gd' :: LearningRate -> Gradients -> [Tensor] -> GD -> ([Tensor], GD) Source #

Gradient descent step with a dummy state variable

data GDM Source #

Constructors

GDM 

Fields

Instances

Instances details
Show GDM Source # 
Instance details

Defined in Torch.Optim

Optimizer GDM Source # 
Instance details

Defined in Torch.Optim

Methods

step :: LearningRate -> Gradients -> [Tensor] -> GDM -> ([Tensor], GDM) Source #

runStep :: Parameterized model => model -> GDM -> Loss -> LearningRate -> IO (model, GDM) Source #

runStep' :: Parameterized model => model -> GDM -> Gradients -> LearningRate -> IO (model, GDM) Source #

gdm Source #

Arguments

:: LearningRate

learning rate

-> Gradients

model parameter gradients

-> [Tensor]

model parameters

-> GDM

beta & momentum

-> ([Tensor], GDM)

returns new parameters + updated momentum

data Adam Source #

State representation for Adam Optimizer

Constructors

Adam 

Fields

Instances

Instances details
Show Adam Source # 
Instance details

Defined in Torch.Optim

Optimizer Adam Source # 
Instance details

Defined in Torch.Optim

Methods

step :: LearningRate -> Gradients -> [Tensor] -> Adam -> ([Tensor], Adam) Source #

runStep :: Parameterized model => model -> Adam -> Loss -> LearningRate -> IO (model, Adam) Source #

runStep' :: Parameterized model => model -> Adam -> Gradients -> LearningRate -> IO (model, Adam) Source #

adam Source #

Arguments

:: LearningRate

learning rate

-> Gradients

model parameter gradients

-> [Tensor]

model parameters

-> Adam

adam parameters - beta1, beta2, moments, iteration

-> ([Tensor], Adam)

returns new parameters + updated adam parameters

Adam step

data Adagrad Source #

State representation for Adagrad Optimizer

Constructors

Adagrad 

Fields

Instances

Instances details
Show Adagrad Source # 
Instance details

Defined in Torch.Optim

Optimizer Adagrad Source # 
Instance details

Defined in Torch.Optim

Methods

step :: LearningRate -> Gradients -> [Tensor] -> Adagrad -> ([Tensor], Adagrad) Source #

runStep :: Parameterized model => model -> Adagrad -> Loss -> LearningRate -> IO (model, Adagrad) Source #

runStep' :: Parameterized model => model -> Adagrad -> Gradients -> LearningRate -> IO (model, Adagrad) Source #

adagrad Source #

Arguments

:: LearningRate

learning rate

-> Gradients

model parameter gradients

-> [Tensor]

model parameters

-> Adagrad

adagrad parameters - gsum, iteration

-> ([Tensor], Adagrad)

returns new parameters + updated adam parameters

Adagrad step

foldLoop :: a -> Int -> (a -> Int -> IO a) -> IO a Source #

syntactic sugar for looping with foldM