hasktorch-0.2.0.0: Functional differentiable programming in Haskell
Safe HaskellSafe-Inferred
LanguageHaskell2010

Torch.Typed.Functional

Synopsis

Documentation

>>> :set -XOverloadedLists

bitwiseNot Source #

Arguments

:: forall device shape. Tensor device 'Bool shape

input

-> Tensor device 'Bool shape

output

Computes the bitwise NOT of the given input tensor. The input tensor must be of integral or Boolean types. For bool tensors, it computes the logical NOT.

>>> dtype &&& shape $ bitwiseNot (ones :: CPUTensor 'D.Bool [3,3])
(Bool,[3,3])

logicalNot Source #

Arguments

:: forall device shape. Tensor device 'Bool shape

input

-> Tensor device 'Bool shape

output

Computes the element-wise logical NOT of the given input tensor. If not specified, the output tensor will have the bool dtype. If the input tensor is not a bool tensor, zeros are treated as False and non-zeros are treated as True.

logicalXor Source #

Arguments

:: forall device shape. Tensor device 'Bool shape

self

-> Tensor device 'Bool shape

other

-> Tensor device 'Bool shape 

logicalAnd Source #

Arguments

:: forall device shape. Tensor device 'Bool shape

self

-> Tensor device 'Bool shape

other

-> Tensor device 'Bool shape 

logicalOr Source #

Arguments

:: forall device shape. Tensor device 'Bool shape

self

-> Tensor device 'Bool shape

other

-> Tensor device 'Bool shape 

type family SumDTypeIsValid (device :: (DeviceType, Nat)) (dtype :: DType) :: Constraint where ... Source #

Equations

SumDTypeIsValid '('CPU, 0) dtype = DTypeIsNotHalf '('CPU, 0) dtype 
SumDTypeIsValid '('CUDA, _) dtype = () 
SumDTypeIsValid '(deviceType, _) dtype = UnsupportedDTypeForDevice deviceType dtype 

sumAll Source #

Arguments

:: forall shape dtype' dtype device. (SumDTypeIsValid device dtype, dtype' ~ SumDType dtype) 
=> Tensor device dtype shape

input

-> Tensor device dtype' '[]

output

sumAll

>>> dtype &&& shape &&& (\t' -> D.asValue (toDynamic t') :: Int) $ sumAll (ones :: CPUTensor 'D.Bool '[2, 3])
(Int64,([],6))
>>> dtype &&& shape &&& (\t' -> D.asValue (toDynamic t') :: Int) $ sumAll (ones :: CPUTensor 'D.UInt8 '[2, 3])
(Int64,([],6))
>>> dtype &&& shape &&& (\t' -> D.asValue (toDynamic t') :: Int) $ sumAll (ones :: CPUTensor 'D.Int8 '[2, 3])
(Int64,([],6))
>>> dtype &&& shape &&& (\t' -> D.asValue (toDynamic t') :: Int) $ sumAll (ones :: CPUTensor 'D.Int16 '[2, 3])
(Int64,([],6))
>>> dtype &&& shape &&& (\t' -> D.asValue (toDynamic t') :: Int) $ sumAll (ones :: CPUTensor 'D.Int32 '[2, 3])
(Int64,([],6))
>>> dtype &&& shape &&& (\t' -> D.asValue (toDynamic t') :: Int) $ sumAll (ones :: CPUTensor 'D.Int64 '[2, 3])
(Int64,([],6))
>>> dtype &&& shape &&& (\t' -> D.asValue (toDynamic t') :: Float) $ sumAll (ones :: CPUTensor 'D.Float '[2, 3])
(Float,([],6.0))
>>> dtype &&& shape &&& (\t' -> D.asValue (toDynamic t') :: Double) $ sumAll (ones :: CPUTensor 'D.Double '[2, 3])
(Double,([],6.0))

sumDim Source #

Arguments

:: forall d shape shape' dtype dtype' device. (KnownNat d, shape' ~ DropValue shape d, SumDTypeIsValid device dtype, dtype' ~ SumDType dtype) 
=> Tensor device dtype shape

input

-> Tensor device dtype' shape'

output

sumDim

>>> dtype &&& shape $ sumDim @0 (ones :: CPUTensor 'D.Float '[3,4,5])
(Float,[4,5])
>>> sumDim @1 (ones :: CPUTensor 'D.Float '[2,4])
Tensor Float [2] [ 4.0000   ,  4.0000   ]

abs Source #

Arguments

:: forall shape dtype device. StandardDTypeValidation device dtype 
=> Tensor device dtype shape

input

-> Tensor device dtype shape

output

abs

>>> dtype &&& shape $ abs (ones :: CPUTensor 'D.Float '[2,2])
(Float,[2,2])

ceil Source #

Arguments

:: forall shape dtype device. StandardFloatingPointDTypeValidation device dtype 
=> Tensor device dtype shape

input

-> Tensor device dtype shape

output

ceil

>>> dtype &&& shape $ ceil (ones :: CPUTensor 'D.Float '[2,2])
(Float,[2,2])

floor Source #

Arguments

:: forall shape dtype device. StandardFloatingPointDTypeValidation device dtype 
=> Tensor device dtype shape

input

-> Tensor device dtype shape

output

floor

>>> dtype &&& shape $ floor (ones :: CPUTensor 'D.Float '[2,2])
(Float,[2,2])

type family MinMaxDTypeIsValid (device :: (DeviceType, Nat)) (dtype :: DType) :: Constraint where ... Source #

Equations

MinMaxDTypeIsValid '('CPU, 0) dtype = DTypeIsNotHalf '('CPU, 0) dtype 
MinMaxDTypeIsValid '('CUDA, _) dtype = () 
MinMaxDTypeIsValid '(deviceType, _) dtype = UnsupportedDTypeForDevice deviceType dtype 

min Source #

Arguments

:: forall shape dtype device. (MinMaxDTypeIsValid device dtype, AllDimsPositive shape) 
=> Tensor device dtype shape

input

-> Tensor device dtype '[]

output

min

>>> dtype &&& shape $ min (ones :: CPUTensor 'D.Float '[2,2])
(Float,[])

max Source #

Arguments

:: forall shape dtype device. (MinMaxDTypeIsValid device dtype, AllDimsPositive shape) 
=> Tensor device dtype shape

input

-> Tensor device dtype '[]

output

max

>>> dtype &&& shape $ max (ones :: CPUTensor 'D.Float '[2,2])
(Float,[])

type family MeanDTypeValidation (device :: (DeviceType, Nat)) (dtype :: DType) :: Constraint where ... Source #

Equations

MeanDTypeValidation '(deviceType, deviceIndex) dtype = (DTypeIsFloatingPoint '(deviceType, deviceIndex) dtype, DTypeIsNotHalf '(deviceType, deviceIndex) dtype) 

meanAll Source #

Arguments

:: forall shape dtype device. (MeanDTypeValidation device dtype, AllDimsPositive shape) 
=> Tensor device dtype shape

input

-> Tensor device dtype '[]

output

Computes the mean while carrying out a full reduction of all tensor dimensions.

>>> meanAll (ones :: CPUTensor 'D.Float '[])
Tensor Float []  1.0000
>>> meanAll (zeros :: CPUTensor 'D.Float '[2,2])
Tensor Float []  0.0000

unsafeMeanAll Source #

Arguments

:: forall shape dtype device. MeanDTypeValidation device dtype 
=> Tensor device dtype shape

input

-> Tensor device dtype '[]

output

Computes the mean while carrying out a full reduction of all tensor dimensions. This version is not restricted and can return NaN.

>>> unsafeMeanAll (ones :: CPUTensor 'D.Float '[])
Tensor Float []  1.0000
>>> unsafeMeanAll (ones :: CPUTensor 'D.Float '[0])
Tensor Float [] NaN
>>> unsafeMeanAll (zeros :: CPUTensor 'D.Float '[2,2])
Tensor Float []  0.0000

meanDim Source #

Arguments

:: forall dim shape' shape dtype device. (KnownNat dim, shape' ~ DropValue shape dim, MeanDTypeValidation device dtype, AllDimsPositive shape) 
=> Tensor device dtype shape

input

-> Tensor device dtype shape'

output

Computes the mean and reduces the tensor over the specified dimension.

>>> t = ones :: CPUTensor 'D.Float '[3,4,5]
>>> dtype &&& shape $ meanDim @0 t
(Float,[4,5])
>>> dtype &&& shape $ meanDim @1 t
(Float,[3,5])
>>> dtype &&& shape $ meanDim @2 t
(Float,[3,4])

meanNamedDim Source #

Arguments

:: forall dim shape' shape dtype device. (KnownNat (FindDim dim shape), shape' ~ DropNamedValue shape dim, MeanDTypeValidation device dtype) 
=> NamedTensor device dtype shape

input

-> NamedTensor device dtype shape'

output

Computes the mean and reduces the tensor over the specified dimension.

>>> import Torch.Typed.Factories
>>> import Data.Default.Class
>>> t = def :: NamedTensor '( D.CPU, 0) 'D.Float '[Vector 3, Vector 4, Vector 5]
>>> dtype &&& shape $ meanNamedDim @(Vector 4) t
(Float,[3,5])

mean :: forall dim keepOrDropDim shape' shape dtype device. (KnownNat dim, KnownKeepOrDropDim keepOrDropDim, shape' ~ ConditionalDropDimension shape dim keepOrDropDim, MeanDTypeValidation device dtype, AllDimsPositive shape) => Tensor device dtype shape -> Tensor device dtype shape' Source #

Computes the mean and optionally reduces the tensor over the specified dimension.

See https://pytorch.org/docs/stable/torch.html#torch.mean for more information.

>>> t = fromJust [[5, 1], [3, 2], [4, 1], [2, 7]] :: CPUTensor 'D.Float '[4, 2]
>>> mean @0 @KeepDim t
Tensor Float [1,2] [[ 3.5000   ,  2.7500   ]]

medianAll Source #

Arguments

:: forall shape dtype device. (StandardDTypeValidation device dtype, AllDimsPositive shape) 
=> Tensor device dtype shape

input

-> Tensor device dtype '[]

output

Computes the median while carrying out a full reduction of all tensor dimensions.

>>> dtype &&& shape $ medianAll (ones :: CPUTensor 'D.Float '[2,2])
(Float,[])

medianDim Source #

Arguments

:: forall dim shape' shape dtype device. (KnownNat dim, shape' ~ DropValue shape dim, StandardDTypeValidation device dtype, AllDimsPositive shape) 
=> Tensor device dtype shape

input

-> (Tensor device dtype shape', Tensor device 'Int64 shape')

output

Computes the median and reduces the tensor over the specified dimension.

>>> t = ones :: CPUTensor 'D.Float '[3,4,5]
>>> dtype &&& shape $ fst $ medianDim @0 t
(Float,[4,5])
>>> dtype &&& shape $ fst $ medianDim @1 t
(Float,[3,5])
>>> dtype &&& shape $ fst $ medianDim @2 t
(Float,[3,4])

median Source #

Arguments

:: forall dim keepOrDropDim shape' shape dtype device. (KnownNat dim, KnownKeepOrDropDim keepOrDropDim, shape' ~ ConditionalDropDimension shape dim keepOrDropDim, StandardDTypeValidation device dtype, AllDimsPositive shape) 
=> Tensor device dtype shape

input

-> (Tensor device dtype shape', Tensor device 'Int64 shape')

output

Computes the median and optionally reduces the tensor over the specified dimension.

See https://pytorch.org/docs/stable/torch.html#torch.median for more information.

>>> t = fromJust [[5, 1], [3, 2], [4, 1], [2, 7]] :: CPUTensor 'D.Float '[4, 2]
  • - libtorch 1.7.0
  • - (Tensor Float [1,2] [[ 3.0000 , 1.0000 ]],Tensor Int64 [1,2] [[ 1, 0]])
  • - libtorch 1.8.0 >>> median 0 KeepDim t (Tensor Float [1,2] [[ 3.0000 , 1.0000 ]],Tensor Int64 [1,2] [[ 1, 2]])

mode Source #

Arguments

:: forall dim keepOrDropDim shape' shape dtype device. (KnownNat dim, KnownKeepOrDropDim keepOrDropDim, shape' ~ ConditionalDropDimension shape dim keepOrDropDim, StandardDTypeValidation device dtype, AllDimsPositive shape) 
=> Tensor device dtype shape

input

-> (Tensor device dtype shape', Tensor device 'Int64 shape')

output

Returns a tuple '(modes, indices)' where modes is the mode value of each row of the input tensor in the given dimension dim, i.e. a value which appears most often in that row, and indices is the index location of each mode value found.

See https://pytorch.org/docs/stable/torch.html#torch.mode for more information.

>>> t = fromJust [[0, 5], [0, 2], [3, 5]] :: CPUTensor 'D.Int64 '[3, 2]
>>> (modes :: CPUTensor 'D.Int64 '[2], indices :: CPUTensor 'D.Int64 '[2]) = mode @0 @DropDim t
>>> (dtype modes, shape modes, D.asValue (toDynamic modes) :: [Int])
(Int64,[2],[0,5])
>>> (dtype indices, shape indices, D.asValue (toDynamic indices) :: [Int])
(Int64,[2],[1,2])
>>> t = fromJust [[0, 0], [0, 1], [3, 3]] :: CPUTensor 'D.Float '[3, 2]
>>> (modes :: CPUTensor 'D.Float '[3,1], indices :: CPUTensor 'D.Int64 '[3,1]) = mode @1 @KeepDim t
>>> (dtype modes, shape modes, D.asValue (toDynamic modes) :: [[Float]])
(Float,[3,1],[[0.0],[0.0],[3.0]])
>>> (dtype indices, shape indices, D.asValue (toDynamic indices) :: [[Int]])
(Int64,[3,1],[[1],[0],[1]])

addScalar Source #

Arguments

:: forall a shape dtype device. Scalar a 
=> a

scalar input

-> Tensor device dtype shape

tensor input

-> Tensor device dtype shape

output

addScalar TODO: what dtypes is this defined for? TODO: what scalar types is this defined for?

>>> dtype &&& shape $ addScalar 1 (ones :: CPUTensor 'D.Float '[2,2])
(Float,[2,2])

subScalar Source #

Arguments

:: forall a shape dtype device. Scalar a 
=> a

scalar input

-> Tensor device dtype shape

tensor input

-> Tensor device dtype shape

output

subScalar TODO: what dtypes is this defined for? TODO: what scalar types is this defined for?

>>> dtype &&& shape $ subScalar 1 (ones :: CPUTensor 'D.Float '[2,2])
(Float,[2,2])

mulScalar Source #

Arguments

:: forall a shape dtype device. Scalar a 
=> a

scalar input

-> Tensor device dtype shape

tensor input

-> Tensor device dtype shape

output

mulScalar TODO: what dtypes is this defined for? TODO: what scalar types is this defined for?

>>> dtype &&& shape $ mulScalar 2 (ones :: CPUTensor 'D.Float '[2,2])
(Float,[2,2])

divScalar Source #

Arguments

:: forall a shape dtype device. Scalar a 
=> a

scalar input

-> Tensor device dtype shape

tensor input

-> Tensor device dtype shape

output

divScalar TODO: what dtypes is this defined for? TODO: what scalar types is this defined for?

>>> dtype &&& shape $ divScalar 2 (ones :: CPUTensor 'D.Float '[2,2])
(Float,[2,2])

powScalar Source #

Arguments

:: forall a shape dtype device. Scalar a 
=> a

power

-> Tensor device dtype shape

input tensor

-> Tensor device dtype shape

output tensor

powScalar TODO: probably only defined for floating point tensors, or maybe numeric type is lifted?

>>> dtype &&& shape $ powScalar 2 (ones :: CPUTensor 'D.Float '[3,2])
(Float,[3,2])

erf Source #

Arguments

:: forall shape dtype device. StandardFloatingPointDTypeValidation device dtype 
=> Tensor device dtype shape

input

-> Tensor device dtype shape

output

erf

>>> dtype &&& shape $ erf (ones :: CPUTensor 'D.Float '[3,2])
(Float,[3,2])

exp Source #

Arguments

:: forall shape dtype device. StandardFloatingPointDTypeValidation device dtype 
=> Tensor device dtype shape

input

-> Tensor device dtype shape

output

exp

>>> dtype &&& shape $ exp (ones :: CPUTensor 'D.Float '[3,2])
(Float,[3,2])

log1p Source #

Arguments

:: forall shape dtype device. StandardFloatingPointDTypeValidation device dtype 
=> Tensor device dtype shape

input

-> Tensor device dtype shape

output

log1p

>>> dtype &&& shape $ log1p (ones :: CPUTensor 'D.Float '[3,2])
(Float,[3,2])

log2 Source #

Arguments

:: forall shape dtype device. StandardFloatingPointDTypeValidation device dtype 
=> Tensor device dtype shape

input

-> Tensor device dtype shape

output

log2 >>> dtype &&& shape $ log2 (ones :: CPUTensor 'D.Float '[3,2]) (Float,[3,2])

log10 Source #

Arguments

:: forall shape dtype device. StandardFloatingPointDTypeValidation device dtype 
=> Tensor device dtype shape

input

-> Tensor device dtype shape

output

log10

>>> dtype &&& shape $ log10 (ones :: CPUTensor 'D.Float '[3,2])
(Float,[3,2])

pow Source #

Arguments

:: forall shape'' shape shape' dtype device. (BasicArithmeticDTypeIsValid device dtype, shape'' ~ Broadcast shape shape') 
=> Tensor device dtype shape

power

-> Tensor device dtype shape'

input tensor

-> Tensor device dtype shape''

output tensor

pow this operation supports broadcasting TODO: probably only defined for floating point tensors, or maybe numeric type is lifted?

>>> dtype &&& shape $ pow (2 :: CPUTensor 'D.Float '[]) (ones :: CPUTensor 'D.Float '[3,2])
(Float,[3,2])

relu Source #

Arguments

:: forall shape dtype device t. (StandardFloatingPointDTypeValidation device dtype, IsUnnamed t device dtype shape) 
=> t

input

-> t

output

relu activation function

>>> dtype &&& shape $ relu (ones :: CPUTensor 'D.Float '[3,2])
(Float,[3,2])

selu Source #

Arguments

:: forall shape dtype device. StandardFloatingPointDTypeValidation device dtype 
=> Tensor device dtype shape

input

-> Tensor device dtype shape

output

selu

>>> dtype &&& shape $ selu (ones :: CPUTensor 'D.Float '[3,2])
(Float,[3,2])

mish :: forall shape dtype device. (StandardFloatingPointDTypeValidation device dtype, BasicArithmeticDTypeIsValid device dtype, shape ~ Broadcast shape shape) => Tensor device dtype shape -> Tensor device dtype shape Source #

mish mish is a smooth activation function, see https://arxiv.org/abs/1908.08681 for details.

>>> dtype &&& shape &&& (\t -> D.asValue (toDynamic t) :: [[Float]]) $ mish (ones :: CPUTensor 'D.Float '[3,2])
(Float,([3,2],[[0.86509836,0.86509836],[0.86509836,0.86509836],[0.86509836,0.86509836]]))

sigmoid Source #

Arguments

:: forall shape dtype device. StandardFloatingPointDTypeValidation device dtype 
=> Tensor device dtype shape

input

-> Tensor device dtype shape

output

sigmoid

>>> dtype &&& shape $ sigmoid (ones :: CPUTensor 'D.Float '[3,2])
(Float,[3,2])

sin Source #

Arguments

:: forall shape dtype device t. (StandardFloatingPointDTypeValidation device dtype, IsUnnamed t device dtype shape) 
=> t

input

-> t

output

sin

>>> dtype &&& shape $ sin (ones :: CPUTensor 'D.Float '[3,2])
(Float,[3,2])

sinh Source #

Arguments

:: forall shape dtype device t. (StandardFloatingPointDTypeValidation device dtype, IsUnnamed t device dtype shape) 
=> t

input

-> t

output

sinh

>>> dtype &&& shape $ sinh (ones :: CPUTensor 'D.Float '[3,2])
(Float,[3,2])

cos Source #

Arguments

:: forall shape dtype device t. (StandardFloatingPointDTypeValidation device dtype, IsUnnamed t device dtype shape) 
=> t

input

-> t

output

cos

>>> dtype &&& shape $ cos (ones :: CPUTensor 'D.Float '[3,2])
(Float,[3,2])

sqrt Source #

Arguments

:: forall shape dtype device t. (StandardFloatingPointDTypeValidation device dtype, IsUnnamed t device dtype shape) 
=> t

input

-> t

output

sqrt

tanh Source #

Arguments

:: forall shape dtype device. StandardFloatingPointDTypeValidation device dtype 
=> Tensor device dtype shape

input

-> Tensor device dtype shape

output

tanh

type family ConditionalReduction (shape :: [Nat]) (reduction :: Reduction) :: [Nat] where ... Source #

ConditionalReduction

>>> :kind! ConditionalReduction '[3,2] ReduceNone
ConditionalReduction '[3,2] ReduceNone :: [Natural]
= '[3, 2]
>>> :kind! ConditionalReduction '[3,2] ReduceMean
ConditionalReduction '[3,2] ReduceMean :: [Natural]
= '[]

Equations

ConditionalReduction shape ReduceNone = shape 
ConditionalReduction shape _ = '[] 

class KnownReduction reduction where Source #

Instances

Instances details
KnownReduction 'ReduceMean Source # 
Instance details

Defined in Torch.Typed.Functional

KnownReduction 'ReduceNone Source # 
Instance details

Defined in Torch.Typed.Functional

KnownReduction 'ReduceSum Source # 
Instance details

Defined in Torch.Typed.Functional

binaryCrossEntropy Source #

Arguments

:: forall (reduction :: Reduction) shape shape' dtype device. (KnownReduction reduction, shape' ~ ConditionalReduction shape reduction, StandardFloatingPointDTypeValidation device dtype) 
=> Tensor device dtype shape

weight

-> Tensor device dtype shape

prediction

-> Tensor device dtype shape

target

-> Tensor device dtype shape'

output

binary cross entropy

>>> t = ones :: CPUTensor 'D.Float '[2,2]
>>> dtype &&& shape $ binaryCrossEntropy @ReduceNone t t t
(Float,[2,2])
>>> dtype &&& shape $ binaryCrossEntropy @ReduceMean t t t
(Float,[])
>>> dtype &&& shape $ binaryCrossEntropy @ReduceSum t t t
(Float,[])

mseLoss Source #

Arguments

:: forall (reduction :: Reduction) shape shape' dtype device. (KnownReduction reduction, shape' ~ ConditionalReduction shape reduction, StandardFloatingPointDTypeValidation device dtype) 
=> Tensor device dtype shape

prediction

-> Tensor device dtype shape

target

-> Tensor device dtype shape'

output

mseLoss

>>> t = ones :: CPUTensor 'D.Float '[2,2]
>>> dtype &&& shape $ mseLoss @ReduceNone t t
(Float,[2,2])
>>> dtype &&& shape $ mseLoss @ReduceMean t t
(Float,[])
>>> dtype &&& shape $ mseLoss @ReduceSum t t
(Float,[])

softmax Source #

Arguments

:: forall dim shape dtype device. (KnownNat dim, DimOutOfBoundCheck shape dim, KnownDType dtype, StandardFloatingPointDTypeValidation device dtype) 
=> Tensor device dtype shape

input

-> Tensor device dtype shape

output

softmax

>>> t = ones :: CPUTensor 'D.Float '[2,2]
>>> dtype &&& shape $ softmax @0 t
(Float,[2,2])
>>> dtype &&& shape $ softmax @1 t
(Float,[2,2])

logSoftmax Source #

Arguments

:: forall dim shape dtype device. (KnownNat dim, DimOutOfBoundCheck shape dim, KnownDType dtype, StandardFloatingPointDTypeValidation device dtype) 
=> Tensor device dtype shape

input

-> Tensor device dtype shape

output

logSoftmax

>>> t = ones :: CPUTensor 'D.Float '[2,2]
>>> dtype &&& shape $ logSoftmax @0 t
(Float,[2,2])
>>> dtype &&& shape $ logSoftmax @1 t
(Float,[2,2])

type family Square (shape :: [Nat]) :: [Nat] where ... Source #

Equations

Square (n ': (n ': '[])) = '[n, n] 
Square (b ': (n ': (n ': '[]))) = '[b, n, n] 
Square _ = TypeError (Text "This shape must be square matrix or batch + square matrix.") 

type family VectorOfSquare (shape :: [Nat]) :: [Nat] where ... Source #

Equations

VectorOfSquare (n ': (n ': '[])) = '[n] 
VectorOfSquare (b ': (n ': (n ': '[]))) = '[b, n] 
VectorOfSquare _ = TypeError (Text "This shape must be square matrix or batch + square matrix.") 

type family FstSquareDim (shape :: [Nat]) :: Nat where ... Source #

Equations

FstSquareDim (n ': (m ': '[])) = n 
FstSquareDim (b ': (n ': (m ': '[]))) = n 
FstSquareDim _ = TypeError (Text "Can not get first dimention of matrix or batch + matrix.") 

type family InverseShapeIsValid (device :: (DeviceType, Nat)) (shape :: [Nat]) :: Constraint where ... Source #

Equations

InverseShapeIsValid '('CPU, 0) _ = () 
InverseShapeIsValid '('CUDA, _) shape = AllDimsPositive shape 

type family InverseDTypeIsValid (device :: (DeviceType, Nat)) (dtype :: DType) :: Constraint where ... Source #

Equations

InverseDTypeIsValid '('CPU, 0) dtype = (DTypeIsFloatingPoint '('CPU, 0) dtype, DTypeIsNotHalf '('CPU, 0) dtype) 
InverseDTypeIsValid '('CUDA, deviceIndex) dtype = (DTypeIsFloatingPoint '('CUDA, deviceIndex) dtype, DTypeIsNotHalf '('CUDA, deviceIndex) dtype) 
InverseDTypeIsValid '(deviceType, _) dtype = UnsupportedDTypeForDevice deviceType dtype 

inverse Source #

Arguments

:: forall shape shape' dtype device. (shape' ~ Square shape, InverseShapeIsValid device shape, InverseDTypeIsValid device dtype) 
=> Tensor device dtype shape

inverse

-> Tensor device dtype shape'

output

inverse TODO: if rank < n for any tensors in the batch, then this will not work. we can't decide this statically, but we should prevent runtime errors. therefore, return Maybe?

>>> t <- randn :: IO (CPUTensor 'D.Float '[3,2,2])
>>> dtype &&& shape $ inverse t
(Float,[3,2,2])
>>> t <- randn :: IO (CPUTensor 'D.Float '[2,2])
>>> dtype &&& shape $ inverse t
(Float,[2,2])

type family SymeigDTypeIsValid (device :: (DeviceType, Nat)) (dtype :: DType) :: Constraint where ... Source #

Equations

SymeigDTypeIsValid '('CPU, 0) dtype = (DTypeIsFloatingPoint '('CPU, 0) dtype, DTypeIsNotHalf '('CPU, 0) dtype) 
SymeigDTypeIsValid '('CUDA, deviceIndex) dtype = (DTypeIsFloatingPoint '('CUDA, deviceIndex) dtype, DTypeIsNotHalf '('CUDA, deviceIndex) dtype) 
SymeigDTypeIsValid '(deviceType, _) dtype = UnsupportedDTypeForDevice deviceType dtype 

symeig Source #

Arguments

:: forall shape shape' shape'' dtype device. (shape' ~ VectorOfSquare shape, shape'' ~ Square shape, SymeigDTypeIsValid device dtype) 
=> Tri

upper or lower triagonal

-> Tensor device dtype shape

input

-> (Tensor device dtype shape', Tensor device dtype shape'')

eigenvalues and eigenvectors

symeig Warning: torch.symeig is deprecated in favor of torch.linalg.eigh and will be removed in a future PyTorch release. The default behavior has changed from using the upper triangular portion of the matrix by default to using the lower triangular portion. L, _ = torch.symeig(A, upper=upper) should be replaced with L = torch.linalg.eigvalsh(A, UPLO=U if upper else L) and L, V = torch.symeig(A, eigenvectors=True) should be replaced with L, V = torch.linalg.eigh(A, UPLO=U if upper else L) (function operator())

>>> t <- rand :: IO (CPUTensor 'D.Float '[3,2,2])
>>> (eigenVals,eigenVecs) = symeig Upper t
>>> dtype &&& shape $ eigenVals -- Skip warning
...
>>> dtype &&& shape $ eigenVals
(Float,[3,2])
>>> :t eigenVals
eigenVals :: Tensor '( 'D.CPU, 0) 'D.Float '[3, 2]
>>> dtype &&& shape $ eigenVecs
(Float,[3,2,2])
>>> :t eigenVecs
eigenVecs :: Tensor '( 'D.CPU, 0) 'D.Float '[3, 2, 2]
>>> (eigenVals,eigenVecs) = symeig Lower t
>>> dtype &&& shape $ eigenVals
(Float,[3,2])
>>> dtype &&& shape $ eigenVecs
(Float,[3,2,2])

symeigvalues Source #

Arguments

:: forall shape shape' dtype device. (shape' ~ VectorOfSquare shape, SymeigDTypeIsValid device dtype) 
=> Tri

upper or lower triagonal

-> Tensor device dtype shape

input

-> Tensor device dtype shape' 

symeigvalues

>>> t <- rand :: IO (CPUTensor 'D.Float '[3,2,2])
>>> eigenVals = symeigvalues Upper t
>>> dtype &&& shape $ eigenVals
(Float,[3,2])
>>> :t eigenVals
eigenVals :: Tensor '( 'D.CPU, 0) 'D.Float '[3, 2]

type family ConditionalEigenVectors (eigenvectors :: EigenVectors) (n :: Nat) :: [Nat] where ... Source #

type family EigDTypeIsValid (device :: (DeviceType, Nat)) (dtype :: DType) :: Constraint where ... Source #

Equations

EigDTypeIsValid '('CPU, 0) dtype = (DTypeIsFloatingPoint '('CPU, 0) dtype, DTypeIsNotHalf '('CPU, 0) dtype) 
EigDTypeIsValid '('CUDA, deviceIndex) dtype = (DTypeIsFloatingPoint '('CUDA, deviceIndex) dtype, DTypeIsNotHalf '('CUDA, deviceIndex) dtype) 
EigDTypeIsValid '(deviceType, _) dtype = UnsupportedDTypeForDevice deviceType dtype 

eig Source #

Arguments

:: forall eigenvectors n shape dtype device. (KnownNat n, KnownEigenVectors eigenvectors, shape ~ ConditionalEigenVectors eigenvectors n, EigDTypeIsValid device dtype) 
=> Tensor device dtype '[n, n]

input matrix

-> (Tensor device dtype '[n, 2], Tensor device dtype shape)

eigenvalues and eigenvectors

eig Warning: torch.eig is deprecated in favor of torch.linalg.eig and will be removed in a future PyTorch release. torch.linalg.eig returns complex tensors of dtype cfloat or cdouble rather than real tensors mimicking complex tensors. L, _ = torch.eig(A) should be replaced with L_complex = torch.linalg.eigvals(A) and L, V = torch.eig(A, eigenvectors=True) should be replaced with L_complex, V_complex = torch.linalg.eig(A) (function operator())

>>> t <- rand :: IO (CPUTensor 'D.Float '[3,3])
>>> (eigenVals,eigenVecs) = eig @EnableEigenVectors t
>>> dtype &&& shape $ eigenVals -- Skip warning
...
>>> dtype &&& shape $ eigenVals
(Float,[3,2])
>>> :t eigenVals
eigenVals :: Tensor '( 'D.CPU, 0) 'D.Float '[3, 2]
>>> dtype &&& shape $ eigenVecs
(Float,[3,3])
>>> :t eigenVecs
eigenVecs :: Tensor '( 'D.CPU, 0) 'D.Float '[3, 3]
>>> (eigenVals,eigenVecs) = eig @DisableEigenVectors t
>>> dtype &&& shape $ eigenVals
(Float,[3,2])
>>> dtype &&& shape $ eigenVecs
(Float,[0])
>>> :t eigenVecs
eigenVecs :: Tensor '( 'D.CPU, 0) 'D.Float '[0]

type family SVDShapes (shape :: [Nat]) (reduced :: ReducedSVD) :: ([Nat], [Nat], [Nat]) where ... Source #

Equations

SVDShapes '[0, n] 'ThinSVD = '('[0, 0], '[0], '[n, n]) 
SVDShapes '[m, n] 'ThinSVD = '('[m, Min m n], '[Min m n], '[n, Min m n]) 
SVDShapes '[m, n] 'FullSVD = '('[m, m], '[Min m n], '[n, n]) 
SVDShapes '[b, 0, n] 'ThinSVD = '('[b, 0, 0], '[b, 0], '[b, n, n]) 
SVDShapes '[b, m, n] 'ThinSVD = '('[b, m, Min m n], '[b, Min m n], '[b, n, Min m n]) 
SVDShapes '[b, m, n] 'FullSVD = '('[b, m, m], '[b, Min m n], '[b, n, n]) 
SVDShapes _ _ = TypeError (Text "A singular value decomposition can only be computed for 2D matrices for at most one batch dimension.") 

data ReducedSVD Source #

Constructors

ThinSVD 
FullSVD 

class KnownReducedSVD (reduced :: ReducedSVD) where Source #

Instances

Instances details
KnownReducedSVD 'FullSVD Source # 
Instance details

Defined in Torch.Typed.Functional

KnownReducedSVD 'ThinSVD Source # 
Instance details

Defined in Torch.Typed.Functional

type family SVDDTypeIsValid (device :: (DeviceType, Nat)) (dtype :: DType) :: Constraint where ... Source #

Equations

SVDDTypeIsValid '('CPU, 0) dtype = (DTypeIsFloatingPoint '('CPU, 0) dtype, DTypeIsNotHalf '('CPU, 0) dtype) 
SVDDTypeIsValid '('CUDA, deviceIndex) dtype = (DTypeIsFloatingPoint '('CUDA, deviceIndex) dtype, DTypeIsNotHalf '('CUDA, deviceIndex) dtype) 
SVDDTypeIsValid '(deviceType, _) dtype = UnsupportedDTypeForDevice deviceType dtype 

svd Source #

Arguments

:: forall reduced shape shapeU shapeS shapeV dtype device. (KnownReducedSVD reduced, '(shapeU, shapeS, shapeV) ~ SVDShapes shape reduced, SVDDTypeIsValid device dtype) 
=> Tensor device dtype shape

(batched) input real matrix

-> (Tensor device dtype shapeU, Tensor device dtype shapeS, Tensor device dtype shapeV)

(batched) output tuple of u, s, and v

Singular Value Decomposition TODO: When compute_uv is False, backward cannot be performed since u and v from the forward pass are required for the backward operation. There is no way to encode in the types at this point in time. Thus, only True is supported currently.

This function returns a tuple `(u, s, v)` which is the singular value decomposition of a input real matrix or batches of real matrices input such that `input = U×diag(S)×V^T`.

>>> a <- randn :: IO (CPUTensor 'D.Float '[3, 5])
>>> (u, s, v) = svd @'ThinSVD a
>>> dtype &&& shape $ u
(Float,[3,3])
>>> dtype &&& shape $ s
(Float,[3])
>>> dtype &&& shape $ v
(Float,[5,3])
>>> (u, s, v) = svd @'FullSVD a
>>> dtype &&& shape $ u
(Float,[3,3])
>>> dtype &&& shape $ s
(Float,[3])
>>> dtype &&& shape $ v
(Float,[5,5])
>>> a <- randn :: IO (CPUTensor 'D.Float '[5, 3])
>>> (u, s, v) = svd @'ThinSVD a
>>> dtype &&& shape $ u
(Float,[5,3])
>>> dtype &&& shape $ s
(Float,[3])
>>> dtype &&& shape $ v
(Float,[3,3])
>>> (u, s, v) = svd @'FullSVD a
>>> dtype &&& shape $ u
(Float,[5,5])
>>> dtype &&& shape $ s
(Float,[3])
>>> dtype &&& shape $ v
(Float,[3,3])

type family CholeskyDTypeIsValid (device :: (DeviceType, Nat)) (dtype :: DType) :: Constraint where ... Source #

Equations

CholeskyDTypeIsValid '('CPU, 0) dtype = (DTypeIsFloatingPoint '('CPU, 0) dtype, DTypeIsNotHalf '('CPU, 0) dtype) 
CholeskyDTypeIsValid '('CUDA, deviceIndex) dtype = (DTypeIsFloatingPoint '('CUDA, deviceIndex) dtype, DTypeIsNotHalf '('CUDA, deviceIndex) dtype) 
CholeskyDTypeIsValid '(deviceType, _) dtype = UnsupportedDTypeForDevice deviceType dtype 

cholesky Source #

Arguments

:: forall shape shape' dtype device. (shape' ~ Square shape, CholeskyDTypeIsValid device dtype) 
=> Tri

indicate whether to return an upper or lower triangular matrix.

-> Tensor device dtype shape

input

-> Tensor device dtype shape'

output

cholesky TODO: cholesky can throw if the input is not positive-definite. Computes the Cholesky decomposition of a symmetric positive-definite matrix. The operation supports batching.

Warning: torch.cholesky is deprecated in favor of torch.linalg.cholesky and will be removed in a future PyTorch release. L = torch.cholesky(A) should be replaced with L = torch.linalg.cholesky(A) and U = torch.cholesky(A, upper=True) should be replaced with U = torch.linalg.cholesky(A.transpose(-2, -1).conj()).transpose(-2, -1).conj() (function operator())

>>> t <- rand :: IO (CPUTensor 'D.Float '[2,2])
>>> u = cholesky Upper (t `matmul` transpose2D t) -- Skip warning
...
>>> dtype &&& shape $ u
(Float,[2,2])
>>> :t u
u :: Tensor '( 'D.CPU, 0) 'D.Float '[2, 2]

choleskyInverse Source #

Arguments

:: forall n dtype device. (1 <= n, CholeskyDTypeIsValid device dtype) 
=> Tri

decides whether the upper or the lower triangular part of the input tensor is used

-> Tensor device dtype '[n, n]

the input 2-D tensor u, an upper or lower triangular Cholesky factor

-> Tensor device dtype '[n, n]

the output 2-D tensor

choleskyInverse Computes the inverse of a symmetric positive-definite matrix using its Cholesky factor, returned, e.g., by cholesky. Unlike cholesky, this operation does not support batching. The inverse is computed using the LAPACK routine `?potri`.

>>> t <- rand :: IO (CPUTensor 'D.Float '[2,2])
>>> tri = Upper
>>> u = cholesky tri (t `matmul` transpose2D t)
>>> dtype &&& shape $ choleskyInverse tri u
(Float,[2,2])

choleskySolve Source #

Arguments

:: forall m_k m_m dtype device. (Square m_m ~ m_m, FstSquareDim m_m ~ FstSquareDim m_k, 1 <= FstSquareDim m_m, CholeskyDTypeIsValid device dtype) 
=> Tri

decides whether the upper or the lower triangular part of the input tensor u is used

-> Tensor device dtype m_k

the (batched) RHS tensor b

-> Tensor device dtype m_m

the (batched) input 2-D tensor u, an upper or lower triangular Cholesky factor

-> Tensor device dtype m_k

the (batched) output 2-D tensor

choleskySolve Solves the system of linear equations represented by `a c = b` using the Cholesky factor matrix u of a (returned, e.g., by cholesky), where a is a positive semidefinite matrix. The operation supports batching.

>>> t <- rand :: IO (CPUTensor 'D.Float '[3,3])
>>> a = t `matmul` transpose2D t
>>> b <- rand :: IO (CPUTensor 'D.Float '[3,2])
>>> tri = Upper
>>> u = cholesky tri a
>>> dtype &&& shape $ choleskySolve tri b u
(Float,[3,2])

type family SolveDTypeIsValid (device :: (DeviceType, Nat)) (dtype :: DType) :: Constraint where ... Source #

Equations

SolveDTypeIsValid '('CPU, 0) dtype = (DTypeIsFloatingPoint '('CPU, 0) dtype, DTypeIsNotHalf '('CPU, 0) dtype) 
SolveDTypeIsValid '('CUDA, deviceIndex) dtype = (DTypeIsFloatingPoint '('CUDA, deviceIndex) dtype, DTypeIsNotHalf '('CUDA, deviceIndex) dtype) 
SolveDTypeIsValid '(deviceType, _) dtype = UnsupportedDTypeForDevice deviceType dtype 

solve Source #

Arguments

:: forall m_k m_m dtype device. (Square m_m ~ m_m, FstSquareDim m_m ~ FstSquareDim m_k, 1 <= FstSquareDim m_m, SolveDTypeIsValid device dtype) 
=> Tensor device dtype m_k

the (batched) RHS tensor b

-> Tensor device dtype m_m

the (batched) positive semidefinite matrix a

-> (Tensor device dtype m_k, Tensor device dtype m_m)

the (batched) outputs c and lu

solve Solves the system of linear equations represented by `a c = b` and also returns the LU decomposition of a. a has to be a positive semidefinite matrix. The operation supports batching.

Warning: torch.solve is deprecated in favor of torch.linalg.solveand will be removed in a future PyTorch release. torch.linalg.solve has its arguments reversed and does not return the LU factorization. To get the LU factorization see torch.lu, which can be used with torch.lu_solve or torch.lu_unpack. X = torch.solve(B, A).solution should be replaced with X = torch.linalg.solve(A, B) (function operator())

>>> t <- rand :: IO (CPUTensor 'D.Float '[10,10])
>>> a = t `matmul` transpose2D t
>>> b <- rand :: IO (CPUTensor 'D.Float '[10,3])
>>> (c,lu) = solve b a
>>> dtype &&& shape $ c -- Skip warning
...
>>> dtype &&& shape $ c
(Float,[10,3])
>>> dtype &&& shape $ lu
(Float,[10,10])
>>> :t c
c :: Tensor '( 'D.CPU, 0) 'D.Float '[10, 3]
>>> :t lu
lu :: Tensor '( 'D.CPU, 0) 'D.Float '[10, 10]

geqrf Source #

Arguments

:: forall m n dtype device. Tensor device dtype '[m, n]

input matrix

-> (Tensor device dtype '[m, n], Tensor device dtype '[Min m n])

tuple `(a, tau)` of output matrices

geqrf TODO: probably only defined for floating point tensors, or maybe numeric type is lifted? geqrf computes a QR decomposition of the given input matrix, but without constructing Q and R as explicit separate matrices. Rather, this function directly calls the underlying LAPACK function `?geqrf` which produces a tuple `(a, tau)` of intermediate results as defined in the LAPACK documentation for `?geqrf`.

You can use orgqr on `(a, tau)` to compute the real orthogonal matrix Q, but in general you may just want to use qr instead.

See the LAPACK documentation for `?geqrf` for further details, https://software.intel.com/en-us/node/521004.

>>> (a, tau) = geqrf (ones :: CPUTensor 'D.Float '[3,4])
>>> dtype &&& shape $ a
(Float,[3,4])
>>> dtype &&& shape $ tau
(Float,[3])
>>> (a, tau) = geqrf (ones :: CPUTensor 'D.Float '[4,3])
>>> dtype &&& shape $ a
(Float,[4,3])
>>> dtype &&& shape $ tau
(Float,[3])

orgqr :: forall m n dtype device. (KnownNat n, KnownNat m, n <= m) => Tensor device dtype '[m, n] -> Tensor device dtype '[n] -> Tensor device dtype '[m, n] Source #

orgqr TODO: probably only defined for floating point tensors, or maybe numeric type is lifted? Computes the orthogonal matrix Q of a QR factorization from the `(a, tau)` tuple returned by geqrf.

This directly calls the underlying LAPACK function `?orgqr`. See the LAPACK documentation for `?orgqr` for further details, https://software.intel.com/en-us/mkl-developer-reference-c-orgqr.

When libtorch-1.7, this function behavior is changed. First dimention should be greater than second dimention.

>>> dtype &&& shape $ orgqr (ones :: CPUTensor 'D.Float '[4,3]) (ones :: CPUTensor 'D.Float '[3])
(Float,[4,3])

sign Source #

Arguments

:: forall shape dtype device. Tensor device dtype shape

input

-> Tensor device dtype shape

output

sign works for all dtypes

>>> dtype &&& shape $ sign (ones :: CPUTensor 'D.Float '[3,2])
(Float,[3,2])

type family SetValue (shape :: [Nat]) (i :: Nat) (j :: Nat) :: [Nat] where ... Source #

Equations

SetValue '[] _ _ = '[] 
SetValue (x ': xs) 0 j = j ': xs 
SetValue (x ': xs) i j = x ': SetValue xs (i - 1) j 

type family GetValue (shape :: [Nat]) (i :: Nat) :: Nat where ... Source #

Equations

GetValue '[] _ = TypeError (Text "Can not find a element in the list.") 
GetValue (x ': xs) 0 = x 
GetValue (x ': xs) i = GetValue xs (i - 1) 

type family Transpose (shape :: [Nat]) (dim0 :: Nat) (dim1 :: Nat) :: [Nat] where ... Source #

Transpose

>>> :kind! Transpose '[3,2] 0 1
Transpose '[3,2] 0 1 :: [Natural]
= '[2, 3]
>>> :kind! Transpose '[3,2,1] 1 2
Transpose '[3,2,1] 1 2 :: [Natural]
= '[3, 1, 2]

Equations

Transpose s d0 d1 = SetValue (SetValue s d0 (GetValue s d1)) d1 (GetValue s d0) 

transpose Source #

Arguments

:: forall n m shape shape' dtype device. (KnownNat n, KnownNat m, shape' ~ Transpose shape n m) 
=> Tensor device dtype shape

input

-> Tensor device dtype shape'

output

transpose See "........depspytorchatensrcATennativeTensorShape.cpp".

>>> dtype &&& shape $ transpose @0 @1 (ones :: CPUTensor 'D.Float '[3,2])
(Float,[2,3])
>>> dtype &&& shape $ transpose @0 @1 (ones :: CPUTensor 'D.Float '[3,2,1])
(Float,[2,3,1])
>>> dtype &&& shape $ transpose @1 @2 (ones :: CPUTensor 'D.Float '[3,2,1])
(Float,[3,1,2])

transpose2D Source #

Arguments

:: forall (i :: Nat) (j :: Nat) dtype device. Tensor device dtype '[i, j]

input

-> Tensor device dtype '[j, i]

output

transpose2d, special case for a 2D tensor

>>> dtype &&& shape $ transpose2D (ones :: CPUTensor 'D.Float '[3,2])
(Float,[2,3])

class KnownTri (tri :: Tri) where Source #

Methods

triVal :: Tri Source #

Instances

Instances details
KnownTri 'Lower Source # 
Instance details

Defined in Torch.Typed.Functional

Methods

triVal :: Tri Source #

KnownTri 'Upper Source # 
Instance details

Defined in Torch.Typed.Functional

Methods

triVal :: Tri Source #

type family DiagSize (tri :: Tri) (index :: Nat) (m :: Nat) (n :: Nat) :: Nat where ... Source #

Equations

DiagSize 'Upper i m n = If (i <=? n) (Min m (n - i)) (TypeError (((((Text "For a matrix with shape " :<>: ShowType '[m, n]) :<>: Text ", the maximum index for an upper diagonal is ") :<>: ShowType n) :<>: Text ", but asked for index ") :<>: ShowType i)) 
DiagSize 'Lower i m n = If (i <=? m) (Min (m - i) n) (TypeError (((((Text "For a matrix with shape " :<>: ShowType '[m, n]) :<>: Text ", the maximum index for a lower diagonal is ") :<>: ShowType m) :<>: Text ", but asked for index ") :<>: ShowType i)) 

type family DiagShape (tri :: Tri) (index :: Nat) (shape :: [Nat]) :: [Nat] where ... Source #

Equations

DiagShape _ i '[n] = '[n + i, n + i] 
DiagShape tri i '[m, n] = '[DiagSize tri i m n] 
DiagShape _ _ shape = TypeError ((Text "The input must be a matrix or a vector, but it has " :<>: ShowType (ListLength shape)) :<>: Text " dimensions.") 

diag Source #

Arguments

:: forall tri index shape shape' device dtype. (KnownTri tri, KnownNat index, StandardDTypeValidation device dtype, shape' ~ DiagShape tri index shape) 
=> Tensor device dtype shape

input

-> Tensor device dtype shape'

output

diag

>>> dtype &&& shape $ diag @'Upper @0 (ones :: CPUTensor 'D.Float '[3,2])
(Float,[2])
>>> dtype &&& shape $ diag @'Upper @1 (ones :: CPUTensor 'D.Float '[3,2])
(Float,[1])
>>> dtype &&& shape $ diag @'Lower @1 (ones :: CPUTensor 'D.Float '[3,2])
(Float,[2])

all Source #

Arguments

:: forall shape device. Tensor device 'Bool shape

input

-> Tensor device 'Bool '[]

output

all See https://pytorch.org/docs/stable/tensors.html#torch.BoolTensor.all.

>>> t = all (fromJust [False, False] :: CPUTensor 'D.Bool '[2])
>>> toInt t == 1
False
>>> t = all (fromJust [False, True] :: CPUTensor 'D.Bool '[2])
>>> toInt t == 1
False
>>> t = all (fromJust [True, True] :: CPUTensor 'D.Bool '[2])
>>> toInt t == 1
True

any Source #

Arguments

:: forall shape device. Tensor device 'Bool shape

input

-> Tensor device 'Bool '[]

output

any See https://pytorch.org/docs/stable/tensors.html#torch.BoolTensor.any.

>>> t = any (fromJust [False, False] :: CPUTensor 'D.Bool '[2])
>>> toInt t == 1
False
>>> t = any (fromJust [False, True] :: CPUTensor 'D.Bool '[2])
>>> toInt t == 1
True
>>> t = any (fromJust [True, True] :: CPUTensor 'D.Bool '[2])
>>> toInt t == 1
True

data KeepOrDropDim Source #

Constructors

KeepDim 
DropDim 

Instances

Instances details
KnownKeepOrDropDim 'DropDim Source # 
Instance details

Defined in Torch.Typed.Functional

KnownKeepOrDropDim 'KeepDim Source # 
Instance details

Defined in Torch.Typed.Functional

class KnownKeepOrDropDim keepOrDropDim where Source #

Instances

Instances details
KnownKeepOrDropDim 'DropDim Source # 
Instance details

Defined in Torch.Typed.Functional

KnownKeepOrDropDim 'KeepDim Source # 
Instance details

Defined in Torch.Typed.Functional

type family ConditionalDropDimension (shape :: [Nat]) (dim :: Nat) (keepOrDropDim :: KeepOrDropDim) :: [Nat] where ... Source #

Equations

ConditionalDropDimension '[] _ _ = TypeError (Text "The specified dimension is not available.") 
ConditionalDropDimension (x ': xs) 0 KeepDim = 1 ': xs 
ConditionalDropDimension (x ': xs) 0 DropDim = xs 
ConditionalDropDimension (x ': xs) i keepOrDropDim = x ': ConditionalDropDimension xs (i - 1) keepOrDropDim 

allDim Source #

Arguments

:: forall dim keepOrDropDim shape' shape device. (KnownNat dim, KnownKeepOrDropDim keepOrDropDim, shape' ~ ConditionalDropDimension shape dim keepOrDropDim) 
=> Tensor device 'Bool shape

input

-> Tensor device 'Bool shape'

output

allDim See https://pytorch.org/docs/stable/tensors.html#torch.BoolTensor.all.

>>> t = fromJust [[True, True], [True, False], [True, True], [True, True]] :: CPUTensor 'D.Bool '[4, 2]
>>> dtype &&& shape &&& (\t' -> D.asValue (toDynamic t') :: [Bool]) $ allDim @1 @DropDim t
(Bool,([4],[True,False,True,True]))
>>> dtype &&& shape &&& (\t' -> D.asValue (toDynamic t') :: [[Bool]]) $ allDim @1 @KeepDim t
(Bool,([4,1],[[True],[False],[True],[True]]))
>>> dtype &&& shape &&& (\t' -> D.asValue (toDynamic t') :: [Bool]) $ allDim @0 @DropDim t
(Bool,([2],[True,False]))
>>> dtype &&& shape &&& (\t' -> D.asValue (toDynamic t') :: [[Bool]]) $ allDim @0 @KeepDim t
(Bool,([1,2],[[True,False]]))

anyDim Source #

Arguments

:: forall dim keepOrDropDim shape' shape device. (KnownNat dim, KnownKeepOrDropDim keepOrDropDim, shape' ~ ConditionalDropDimension shape dim keepOrDropDim) 
=> Tensor device 'Bool shape

input

-> Tensor device 'Bool shape'

output

anyDim See https://pytorch.org/docs/stable/tensors.html#torch.BoolTensor.any.

>>> t = fromJust [[True, True], [True, False], [True, True], [True, True]] :: CPUTensor 'D.Bool '[4, 2]
>>> dtype &&& shape &&& (\t' -> D.asValue (toDynamic t') :: [Bool]) $ anyDim @1 @DropDim t
(Bool,([4],[True,True,True,True]))
>>> dtype &&& shape &&& (\t' -> D.asValue (toDynamic t') :: [[Bool]]) $ anyDim @1 @KeepDim t
(Bool,([4,1],[[True],[True],[True],[True]]))
>>> dtype &&& shape &&& (\t' -> D.asValue (toDynamic t') :: [Bool]) $ anyDim @0 @DropDim t
(Bool,([2],[True,True]))
>>> dtype &&& shape &&& (\t' -> D.asValue (toDynamic t') :: [[Bool]]) $ anyDim @0 @KeepDim t
(Bool,([1,2],[[True,True]]))

dropout Source #

Arguments

:: forall shape dtype device. Double

dropout probability

-> Bool

whether or not to activate dropout

-> Tensor device dtype shape

input

-> IO (Tensor device dtype shape)

output

dropout TODO: probably only defined for floating point tensors, or maybe numeric type is lifted? TODO: get rid of IO by exposing the RNG state TODO: can we use D.Scalar for the dropout probability?

>>> t = ones :: CPUTensor 'D.Float '[3,2]
>>> t' <- dropout 0.5 False t
>>> dtype &&& shape $ t'
(Float,[3,2])
>>> t'' <- dropout 0.5 False t
>>> t ==. t''
Tensor Bool [3,2] [[ 1,  1],
                   [ 1,  1],
                   [ 1,  1]]
>>> t''' <- dropout 0.0 True t
>>> t ==. t'''
Tensor Bool [3,2] [[ 1,  1],
                   [ 1,  1],
                   [ 1,  1]]
>>> t'''' <- dropout 1.0 True t
>>> t''''
Tensor Float [3,2] [[ 0.0000,  0.0000],
                    [ 0.0000,  0.0000],
                    [ 0.0000,  0.0000]]

featureDropout Source #

Arguments

:: forall shape dtype device. Double

dropout probability

-> Bool

whether or not to activate dropout

-> Tensor device dtype shape

input

-> Tensor device dtype shape

output

featureDropout TODO: probably only defined for floating point tensors, or maybe numeric type is lifted? TODO: why not IO? TODO: can we use D.Scalar for the dropout probability?

>>> c = featureDropout 0.1 True (ones :: CPUTensor 'D.Float '[2,2])
>>> dtype &&& shape $ c
(Float,[2,2])

alphaDropout Source #

Arguments

:: forall shape dtype device. Double

dropout probability

-> Bool

whether or not to activate dropout

-> Tensor device dtype shape

input

-> Tensor device dtype shape

output

alphaDropout TODO: probably only defined for floating point tensors, or maybe numeric type is lifted? TODO: why not IO? TODO: can we use D.Scalar for the dropout probability?

>>> c = alphaDropout 0.1 True (ones :: CPUTensor 'D.Float '[2,2])
>>> dtype &&& shape $ c
(Float,[2,2])

featureAlphaDropout Source #

Arguments

:: forall shape dtype device. Double

dropout probability

-> Bool

whether or not to activate dropout

-> Tensor device dtype shape

input

-> Tensor device dtype shape

output

featureAlphaDropout TODO: probably only defined for floating point tensors, or maybe numeric type is lifted? TODO: why not IO? TODO: can we use D.Scalar for the dropout probability?

>>> c = featureAlphaDropout 0.1 True (ones :: CPUTensor 'D.Float '[2,2])
>>> dtype &&& shape $ c
(Float,[2,2])

acos Source #

Arguments

:: forall shape dtype device. StandardFloatingPointDTypeValidation device dtype 
=> Tensor device dtype shape

input

-> Tensor device dtype shape

output

acos

>>> dtype &&& shape $ acos (ones :: CPUTensor 'D.Float '[3,2])
(Float,[3,2])

avgPool1d Source #

Arguments

:: forall kernelSize stride padding channelSize inputSize batchSize outputSize dtype device. (All KnownNat '[kernelSize, stride, padding, channelSize, inputSize, batchSize], ConvSideCheck inputSize kernelSize stride padding outputSize) 
=> Tensor device dtype '[batchSize, channelSize, inputSize]

input

-> Tensor device dtype '[batchSize, channelSize, outputSize]

output

avgPool1d TODO: probably only defined for floating point tensors, or maybe numeric type is lifted?

>>> t = avgPool1d @1 @1 @0 (ones :: CPUTensor 'D.Float '[1,3,4])
>>> shape t
[1,3,4]
>>> :t t
t :: Tensor '( 'D.CPU, 0) 'D.Float '[1, 3, 4]

adaptiveAvgPool1d Source #

Arguments

:: forall outputSize channelSize inputSize batchSize dtype device. All KnownNat '[channelSize, inputSize, batchSize, outputSize] 
=> Tensor device dtype '[batchSize, channelSize, inputSize]

input

-> Tensor device dtype '[batchSize, channelSize, outputSize]

output

adaptiveAvgPool1d TODO: probably only defined for floating point tensors, or maybe numeric type is lifted?

>>> t = adaptiveAvgPool1d @8 (ones :: CPUTensor 'D.Float '[1,3,16])
>>> shape t
[1,3,8]
>>> :t t
t :: Tensor '( 'D.CPU, 0) 'D.Float '[1, 3, 8]

adaptiveMaxPool1d Source #

Arguments

:: forall outputSize channelSize inputSize batchSize dtype device. All KnownNat '[channelSize, inputSize, batchSize, outputSize] 
=> Tensor device dtype '[batchSize, channelSize, inputSize]

input

-> (Tensor device dtype '[batchSize, channelSize, outputSize], Tensor device 'Int64 '[batchSize, channelSize, outputSize])

output

adaptiveMaxPool1d TODO: probably only defined for floating point tensors, or maybe numeric type is lifted?

>>> tt = adaptiveMaxPool1d @8 (ones :: CPUTensor 'D.Float '[1,3,16])
>>> shape . fst $ tt
[1,3,8]
>>> :t tt
tt
  :: (Tensor '( 'D.CPU, 0) 'D.Float '[1, 3, 8],
      Tensor '( 'D.CPU, 0) 'D.Int64 '[1, 3, 8])

addmv Source #

Arguments

:: forall shape' shape n m dtype device. (KnownNat n, KnownNat m, shape' ~ Broadcast shape '[n]) 
=> Float

beta

-> Float

alpha

-> Tensor device dtype '[n, m]

matrix

-> Tensor device dtype '[m]

vector

-> Tensor device dtype shape

input

-> Tensor device dtype shape'

output

addmv TODO: probably only defined for floating point tensors, or maybe numeric type is lifted? TODO: can we use D.Scalar for beta and alpha?

>>> t = addmv 1 1 (ones :: CPUTensor 'D.Float '[3,2]) (zeros :: CPUTensor 'D.Float '[2]) (ones :: CPUTensor 'D.Float '[])
>>> dtype &&& shape $ t
(Float,[3])
>>> :t t
t :: Tensor '( 'D.CPU, 0) 'D.Float '[3]

allclose Source #

Arguments

:: forall shape dtype device. Double

relative tolerance

-> Double

absolute tolerance

-> Bool

whether or not NaN equals NaN

-> Tensor device dtype shape

input tensor

-> Tensor device dtype shape

other input tensor

-> Bool

output

allclose

>>> allclose 0.1 0.1 True (ones :: CPUTensor 'D.Float '[3,3]) (ones :: CPUTensor 'D.Float '[3,3])
True

argmax Source #

Arguments

:: forall dim keepOrDropDim shape' shape dtype device. (KnownNat dim, KnownKeepOrDropDim keepOrDropDim, shape' ~ ConditionalDropDimension shape dim keepOrDropDim, StandardDTypeValidation device dtype) 
=> Tensor device dtype shape

input

-> Tensor device 'Int64 shape'

output

argmax See https://pytorch.org/docs/stable/torch.html#torch.argmax.

>>> t = fromJust [[0, 1], [-1, 2], [0, 1], [0, -2]] :: CPUTensor 'D.Float '[4, 2]
>>> dtype &&& shape &&& (\t' -> D.asValue (toDynamic t') :: [Int]) $ argmax @1 @DropDim t
(Int64,([4],[1,1,1,0]))
>>> dtype &&& shape &&& (\t' -> D.asValue (toDynamic t') :: [[Int]]) $ argmax @1 @KeepDim t
(Int64,([4,1],[[1],[1],[1],[0]]))
>>> dtype &&& shape &&& (\t' -> D.asValue (toDynamic t') :: [Int]) $ argmax @0 @DropDim t
(Int64,([2],[0,1]))
>>> dtype &&& shape &&& (\t' -> D.asValue (toDynamic t') :: [[Int]]) $ argmax @0 @KeepDim t
(Int64,([1,2],[[0,1]]))

argmin Source #

Arguments

:: forall dim keepOrDropDim shape' shape dtype device. (KnownNat dim, KnownKeepOrDropDim keepOrDropDim, shape' ~ ConditionalDropDimension shape dim keepOrDropDim, StandardDTypeValidation device dtype) 
=> Tensor device dtype shape

input

-> Tensor device 'Int64 shape'

output

argmin See https://pytorch.org/docs/stable/torch.html#torch.argmin.

>>> t = fromJust [[0, 1], [-1, 2], [0, 1], [0, -2]] :: CPUTensor 'D.Float '[4, 2]
>>> dtype &&& shape &&& (\t' -> D.asValue (toDynamic t') :: [Int]) $ argmin @1 @DropDim t
(Int64,([4],[0,0,0,1]))
>>> dtype &&& shape &&& (\t' -> D.asValue (toDynamic t') :: [[Int]]) $ argmin @1 @KeepDim t
(Int64,([4,1],[[0],[0],[0],[1]]))
>>> dtype &&& shape &&& (\t' -> D.asValue (toDynamic t') :: [Int]) $ argmin @0 @DropDim t
(Int64,([2],[1,3]))
>>> dtype &&& shape &&& (\t' -> D.asValue (toDynamic t') :: [[Int]]) $ argmin @0 @KeepDim t
(Int64,([1,2],[[1,3]]))

asin :: forall shape dtype device. StandardFloatingPointDTypeValidation device dtype => Tensor device dtype shape -> Tensor device dtype shape Source #

asin

>>> dtype &&& shape $ asin (ones :: CPUTensor 'D.Float '[3,2])
(Float,[3,2])

atan :: forall shape dtype device. StandardFloatingPointDTypeValidation device dtype => Tensor device dtype shape -> Tensor device dtype shape Source #

atan

>>> dtype &&& shape $ atan (ones :: CPUTensor 'D.Float '[3,2])
(Float,[3,2])

baddbmm Source #

Arguments

:: forall shape' shape batchSize n m k dtype device. (KnownNat n, KnownNat m, KnownNat k, shape' ~ Broadcast shape '[batchSize, n, m]) 
=> Float

beta

-> Float

alpha

-> Tensor device dtype '[batchSize, n, k]

first batch

-> Tensor device dtype '[batchSize, k, m]

second batch

-> Tensor device dtype shape

input

-> Tensor device dtype shape'

output

baddbmm TODO: probably only defined for floating point tensors, or maybe numeric type is lifted?

>>> t = baddbmm 1 1 (ones :: CPUTensor 'D.Float '[5,3,2]) (zeros :: CPUTensor 'D.Float '[5,2,4]) (ones :: CPUTensor 'D.Float '[])
>>> dtype &&& shape $ t
(Float,[5,3,4])
>>> :t t
t :: Tensor '( 'D.CPU, 0) 'D.Float '[5, 3, 4]

bmm Source #

Arguments

:: forall batchSize n m k dtype device. Tensor device dtype '[batchSize, n, k]

input

-> Tensor device dtype '[batchSize, k, m]

other input

-> Tensor device dtype '[batchSize, n, m]

output

batched matrix multiplication TODO: probably only defined for floating point tensors, or maybe numeric type is lifted?

>>> dtype &&& shape $ bmm (ones :: CPUTensor 'D.Float '[5,3,2]) (zeros :: CPUTensor 'D.Float '[5,2,4])
(Float,[5,3,4])

type family BroadcastTensorsImpl (tensors :: [a]) (acc :: Maybe ([Nat], DType, (DeviceType, Nat))) :: Maybe ([Nat], DType, (DeviceType, Nat)) where ... Source #

BroadcastTensorsImpl

>>> type Ty = BroadcastTensorsImpl '[] 'Nothing
>>> :kind! Ty
Ty :: Maybe ([Natural], D.DType, (D.DeviceType, Natural))
= 'Nothing
>>> type Ty = BroadcastTensorsImpl '[Tensor '( 'D.CPU, 0) 'D.Float '[1, 3], Tensor '( 'D.CPU, 0) 'D.Float '[2, 1]] 'Nothing
>>> :kind! Ty
Ty :: Maybe ([Natural], D.DType, (D.DeviceType, Natural))
= 'Just '( '[2, 3], 'D.Float, '( 'D.CPU, 0))
>>> type Ty = BroadcastTensorsImpl '[Tensor '( 'D.CPU, 0) 'D.Float '[1, 3], Tensor '( 'D.CPU, 0) 'D.Float '[2, 1], Tensor '( 'D.CPU, 0) 'D.Float '[5, 1, 1]] 'Nothing
>>> :kind! Ty
Ty :: Maybe ([Natural], D.DType, (D.DeviceType, Natural))
= 'Just '( '[5, 2, 3], 'D.Float, '( 'D.CPU, 0))
>>> type Ty = BroadcastTensorsImpl '[Tensor '( 'D.CPU, 0) 'D.Float '[1, 3], Tensor '( 'D.CPU, 0) 'D.Float '[2, 1], Tensor '( 'D.CPU, 0) 'D.Float '[1, 5, 1]] 'Nothing
>>> :kind! Ty
Ty :: Maybe ([Natural], D.DType, (D.DeviceType, Natural))
= 'Nothing

Equations

BroadcastTensorsImpl '[] 'Nothing = 'Nothing 
BroadcastTensorsImpl '[] ('Just '(reverseShape, dtype, device)) = 'Just '(Reverse reverseShape, dtype, device) 
BroadcastTensorsImpl (Tensor device dtype shape ': tensors) 'Nothing = BroadcastTensorsImpl tensors ('Just '(Reverse shape, dtype, device)) 
BroadcastTensorsImpl (Tensor device dtype shape ': tensors) ('Just '(reverseShape', dtype, device)) = BroadcastTensorsImpl tensors (MaybeTriple (ComputeBroadcast (Reverse shape) reverseShape') ('Just dtype) ('Just device)) 
BroadcastTensorsImpl (Tensor device dtype shape ': _) ('Just _) = Nothing 

type family BroadcastTensorsCheck (tensors :: [a]) (result :: Maybe ([Nat], DType, (DeviceType, Nat))) :: [a] where ... Source #

Equations

BroadcastTensorsCheck tensors 'Nothing = TypeError (Text "Cannot broadcast tensors due to incompatible shapes and/or dtypes: " :<>: ShowType tensors) 
BroadcastTensorsCheck tensors ('Just '(shape, dtype, device)) = HReplicateR (ListLength tensors) (Tensor device dtype shape) 

broadcastTensors Source #

Arguments

:: forall tensors tensors'. (tensors' ~ BroadcastTensors tensors, Castable (HList tensors) [ATenTensor], Castable (HList tensors') [ATenTensor]) 
=> HList tensors

input list of tensors

-> HList tensors'

output list of tensors

broadcast tensors TODO: broadcastTensors returns garbage data and is hence broken See https://pytorch.org/docs/stable/_modules/torch/functional.html#broadcast_tensors.

>>> x = ones :: CPUTensor 'D.Float '[1, 3]
>>> y = ones :: CPUTensor 'D.Float '[2, 1]
>>> z = ones :: CPUTensor 'D.Float '[5, 1, 1]
  • - >>> x' :. y' :. z' :. HNil = broadcastTensors (x :. y :. z :. HNil)
  • - >>> :type x'
  • - x' :: Tensor '( 'D.CPU, 0) 'D.Float '[5, 2, 3]
  • - >>> dtype &&& shape &&& (t -> D.asValue (toDynamic t) :: [[[Float]]]) $ x'
  • - >>> :type y'
  • - y' :: Tensor '( 'D.CPU, 0) 'D.Float '[5, 2, 3]
  • - >>> dtype &&& shape &&& (t -> D.asValue (toDynamic t) :: [[[Float]]]) $ y'
  • - >>> :type z'
  • - z' :: Tensor '( 'D.CPU, 0) 'D.Float '[5, 2, 3]
  • - >>> dtype &&& shape &&& (t -> D.asValue (toDynamic t) :: [[[Float]]]) $ z'

type family CatImpl (dim :: Nat) (tensors :: [a]) (acc :: Maybe ([Nat], DType, (DeviceType, Nat))) :: Maybe ([Nat], DType, (DeviceType, Nat)) where ... Source #

Equations

CatImpl _ '[] acc = acc 
CatImpl dim (Tensor device dtype shape ': tensors) acc = CatImpl dim tensors (MaybeTriple (ComputeCatShape dim shape acc) (ComputeCatDType dtype acc) (ComputeCatDevice device acc)) 

type family ComputeCatShape (dim :: Nat) (shape :: [Nat]) (acc :: Maybe ([Nat], DType, (DeviceType, Nat))) :: Maybe [Nat] where ... Source #

Equations

ComputeCatShape 0 (x ': xs) Nothing = Just (x ': xs) 
ComputeCatShape dim (x ': xs) Nothing = AppendToMaybe x (ComputeCatShape (dim - 1) xs Nothing) 
ComputeCatShape 0 (x ': xs) (Just '(y ': xs, _, _)) = Just ((x + y) ': xs) 
ComputeCatShape dim (x ': xs) (Just '(x ': ys, dtype, device)) = AppendToMaybe x (ComputeCatShape (dim - 1) xs (Just '(ys, dtype, device))) 
ComputeCatShape _ _ _ = Nothing 

type family ComputeCatDType (dtype :: DType) (acc :: Maybe ([Nat], DType, (DeviceType, Nat))) :: Maybe DType where ... Source #

Equations

ComputeCatDType dtype Nothing = Just dtype 
ComputeCatDType dtype (Just '(_, dtype, _)) = Just dtype 
ComputeCatDType _ _ = Nothing 

type family ComputeCatDevice (device :: (DeviceType, Nat)) (acc :: Maybe ([Nat], DType, (DeviceType, Nat))) :: Maybe (DeviceType, Nat) where ... Source #

Equations

ComputeCatDevice device Nothing = Just device 
ComputeCatDevice device (Just '(_, _, device)) = Just device 
ComputeCatDevice _ _ = Nothing 

type family CatCheck (res :: Maybe ([Nat], DType, (DeviceType, Nat))) :: ([Nat], DType, (DeviceType, Nat)) where ... Source #

Equations

CatCheck 'Nothing = TypeError (Text "Concatenation impossible.") 
CatCheck ('Just '(shape, dtype, device)) = '(shape, dtype, device) 

type Cat dim tensors = CatCheck (CatImpl dim tensors Nothing) Source #

Cat

>>> type Ty = Cat 0 '[Tensor '( 'D.CPU, 0) 'D.Float '[1]]
>>> :kind! Ty
Ty :: ([Natural], D.DType, (D.DeviceType, Natural))
= '( '[1], 'D.Float, '( 'D.CPU, 0))
>>> type Ty = Cat 0 '[Tensor '( 'D.CPU, 0) 'D.Float '[1], Tensor '( 'D.CPU, 0) 'D.Float '[2]]
>>> :kind! Ty
Ty :: ([Natural], D.DType, (D.DeviceType, Natural))
= '( '[3], 'D.Float, '( 'D.CPU, 0))
>>> type Ty = Cat 0 '[Tensor '( 'D.CPU, 0) 'D.Float '[1, 3], Tensor '( 'D.CPU, 0) 'D.Float '[2, 3]]
>>> :kind! Ty
Ty :: ([Natural], D.DType, (D.DeviceType, Natural))
= '( '[3, 3], 'D.Float, '( 'D.CPU, 0))
>>> type Ty = Cat 1 '[Tensor '( 'D.CPU, 0) 'D.Float '[3, 1], Tensor '( 'D.CPU, 0) 'D.Float '[3, 2]]
>>> :kind! Ty
Ty :: ([Natural], D.DType, (D.DeviceType, Natural))
= '( '[3, 3], 'D.Float, '( 'D.CPU, 0))
>>> type Ty = Cat 1 '[Tensor '( 'D.CPU, 0) 'D.Float '[2, 5, 4, 2], Tensor '( 'D.CPU, 0) 'D.Float '[2, 1, 4, 2], Tensor '( 'D.CPU, 0) 'D.Float '[2, 3, 4, 2], Tensor '( 'D.CPU, 0) 'D.Float '[2, 1, 4, 2]]
>>> :kind! Ty
Ty :: ([Natural], D.DType, (D.DeviceType, Natural))
= '( '[2, 10, 4, 2], 'D.Float, '( 'D.CPU, 0))

cat Source #

Arguments

:: forall dim shape dtype device tensors. (KnownNat dim, '(shape, dtype, device) ~ Cat dim tensors, Castable (HList tensors) [ATenTensor]) 
=> HList tensors

input list of tensors

-> Tensor device dtype shape

output tensor

cat

>>> t = ones :: CPUTensor 'D.Float '[2,2]
>>> t' = cat @0 (t :. HNil)
>>> :type t'
t' :: Tensor '( 'D.CPU, 0) 'D.Float '[2, 2]
>>> dtype &&& shape &&& (\t'' -> D.asValue (toDynamic t'') :: [[Float]]) $ t'
(Float,([2,2],[[1.0,1.0],[1.0,1.0]]))
>>> t' = cat @1 (t :. HNil)
>>> :type t'
t' :: Tensor '( 'D.CPU, 0) 'D.Float '[2, 2]
>>> dtype &&& shape &&& (\t'' -> D.asValue (toDynamic t'') :: [[Float]]) $ t'
(Float,([2,2],[[1.0,1.0],[1.0,1.0]]))
>>> t' = cat @0 (t :. t :. t :. HNil)
>>> :type t'
t' :: Tensor '( 'D.CPU, 0) 'D.Float '[6, 2]
>>> dtype &&& shape &&& (\t'' -> D.asValue (toDynamic t'') :: [[Float]]) $ t'
(Float,([6,2],[[1.0,1.0],[1.0,1.0],[1.0,1.0],[1.0,1.0],[1.0,1.0],[1.0,1.0]]))
>>> t' = cat @1 (t :. t :. t :. HNil)
>>> :type t'
t' :: Tensor '( 'D.CPU, 0) 'D.Float '[2, 6]
>>> dtype &&& shape &&& (\t'' -> D.asValue (toDynamic t'') :: [[Float]]) $ t'
(Float,([2,6],[[1.0,1.0,1.0,1.0,1.0,1.0],[1.0,1.0,1.0,1.0,1.0,1.0]]))

type family ChunkImpl (chunkShapes :: Maybe [[Nat]]) (dtype :: DType) (device :: (DeviceType, Nat)) :: Maybe a where ... Source #

Equations

ChunkImpl (Just '[]) _ _ = Just '[] 
ChunkImpl (Just (shape ': shapes)) dtype device = AppendToMaybe (Tensor device dtype shape) (ChunkImpl (Just shapes) dtype device) 
ChunkImpl Nothing _ _ = Nothing 

type family ChunkCheck (shape :: [Nat]) (dim :: Nat) (result :: Maybe a) :: a where ... Source #

Equations

ChunkCheck shape dim Nothing = DimOutOfBound shape dim 
ChunkCheck _ _ (Just result) = result 

type family ComputeChunksChunkGo (n' :: Nat) (r :: Nat) (cmp :: Ordering) (cmp' :: Ordering) :: [Nat] where ... Source #

Equations

ComputeChunksChunkGo n' r GT _ = n' ': ComputeChunksChunkGo n' (r - n') (CmpNat (r - n') n') (CmpNat (r - n') 0) 
ComputeChunksChunkGo n' r EQ _ = n' ': ComputeChunksChunkGo n' (r - n') (CmpNat (r - n') n') (CmpNat (r - n') 0) 
ComputeChunksChunkGo n' r _ GT = '[r] 
ComputeChunksChunkGo n' _ _ _ = '[] 

type family ComputeChunksChunkGo0 (n' :: Nat) (chunks :: Nat) :: [Nat] where ... Source #

Equations

ComputeChunksChunkGo0 _ 0 = '[] 
ComputeChunksChunkGo0 n' chunks = n' ': ComputeChunksChunkGo0 n' (chunks - 1) 

type family ComputeChunks (n :: Maybe Nat) (chunks :: Nat) :: Maybe [Nat] where ... Source #

Equations

ComputeChunks (Just n) chunks = Just (ComputeChunks' n chunks (Mod n chunks)) 
ComputeChunks Nothing _ = Nothing 

type family ComputeChunks' (n :: Nat) (chunks :: Nat) (m :: Nat) :: [Nat] where ... Source #

Equations

ComputeChunks' n chunks 0 = ComputeChunksChunkGo0 (Div n chunks) chunks 
ComputeChunks' n chunks _ = ComputeChunksChunkGo (Div ((n + chunks) - 1) chunks) n (CmpNat n (Div ((n + chunks) - 1) chunks)) (CmpNat n 0) 

type family ChunkShapesImpl (chunks :: Maybe [Nat]) (dim :: Nat) (shape :: [Nat]) :: Maybe [[Nat]] where ... Source #

Equations

ChunkShapesImpl (Just (n ': ns)) dim shape = AppendToMaybe' (ReplaceDim dim shape n) (ChunkShapesImpl (Just ns) dim shape) 
ChunkShapesImpl (Just '[]) _ _ = Just '[] 
ChunkShapesImpl Nothing _ _ = Nothing 

type ChunkShapes chunks dim shape = ChunkShapesImpl (ComputeChunks (ExtractDim dim shape) chunks) dim shape Source #

type Chunk chunks dim shape dtype device = ChunkCheck shape dim (ChunkImpl (ChunkShapes chunks dim shape) dtype device) Source #

chunk Source #

Arguments

:: forall chunks dim shape dtype device tensorChunks. (KnownNat chunks, KnownNat dim, tensorChunks ~ Chunk chunks dim shape dtype device, Castable (HList tensorChunks) [ATenTensor]) 
=> Tensor device dtype shape

input tensor

-> HList tensorChunks

output list of tensors

chunk

  • - >>> :type chunk 3 1 (ones :: CPUTensor 'D.Float '[2, 2])
  • - chunk 3 1 (ones :: CPUTensor 'D.Float '[2, 2])
  • - :: HList
  • - '[Tensor '( 'D.CPU, 0) 'D.Float '[2, 1],
  • - Tensor '( 'D.CPU, 0) 'D.Float '[2, 1]] >>> t0 :. t1 :. HNil = chunk 3 1 (ones :: CPUTensor 'D.Float '[2, 2]) >>> dtype &&& shape $ t0 (Float,[2,1]) >>> dtype &&& shape $ t1 (Float,[2,1])
  • - >>> :type chunk 3 1 (ones :: CPUTensor 'D.Float '[1, 0, 3])
  • - chunk 3 1 (ones :: CPUTensor 'D.Float '[1, 0, 3])
  • - :: HList
  • - '[Tensor '( 'D.CPU, 0) 'D.Float '[1, 0, 3],
  • - Tensor '( 'D.CPU, 0) 'D.Float '[1, 0, 3],
  • - Tensor '( 'D.CPU, 0) 'D.Float '[1, 0, 3]] >>> t0 :. t1 :. t2 :. HNil = chunk 3 1 (ones :: CPUTensor 'D.Float '[1, 0, 3]) >>> dtype &&& shape $ t0 (Float,[1,0,3]) >>> dtype &&& shape $ t1 (Float,[1,0,3]) >>> dtype &&& shape $ t2 (Float,[1,0,3])
  • - >>> :type chunk 6 0 (ones :: CPUTensor 'D.Float '[19, 4])
  • - chunk 6 0 (ones :: CPUTensor 'D.Float '[19, 4])
  • - :: HList
  • - '[Tensor '( 'D.CPU, 0) 'D.Float '[4, 4],
  • - Tensor '( 'D.CPU, 0) 'D.Float '[4, 4],
  • - Tensor '( 'D.CPU, 0) 'D.Float '[4, 4],
  • - Tensor '( 'D.CPU, 0) 'D.Float '[4, 4],
  • - Tensor '( 'D.CPU, 0) 'D.Float '[3, 4]] >>> t0 :. t1 :. t2 :. t3 :. t4 :. HNil = chunk 6 0 (ones :: CPUTensor 'D.Float '[19, 4]) >>> dtype &&& shape $ t0 (Float,[4,4]) >>> dtype &&& shape $ t1 (Float,[4,4]) >>> dtype &&& shape $ t2 (Float,[4,4]) >>> dtype &&& shape $ t3 (Float,[4,4]) >>> dtype &&& shape $ t4 (Float,[3,4])

clamp Source #

Arguments

:: forall shape dtype device a. Scalar a 
=> a

minimum value

-> a

maximum value

-> Tensor device dtype shape

input

-> Tensor device dtype shape

output

clamp TODO: probably only defined for floating point tensors, or maybe numeric type is lifted? TODO: can we use D.Scalar for the minimum and maximum values?

>>> dtype &&& shape $ clamp 0 1 (ones :: CPUTensor 'D.Float '[3,2])
(Float,[3,2])

clampMax Source #

Arguments

:: forall shape dtype device a. Scalar a 
=> a

maximum value

-> Tensor device dtype shape

input

-> Tensor device dtype shape

output

clampMax TODO: probably only defined for floating point tensors, or maybe numeric type is lifted? TODO: can we use D.Scalar for the maximum value?

>>> dtype &&& shape $ clampMax 1 (ones :: CPUTensor 'D.Float '[3,2])
(Float,[3,2])

clampMin Source #

Arguments

:: forall shape dtype device a. Scalar a 
=> a

minimum value

-> Tensor device dtype shape

input

-> Tensor device dtype shape

output

clampMin TODO: probably only defined for floating point tensors, or maybe numeric type is lifted? TODO: can we use D.Scalar for the minimum value?

>>> dtype &&& shape $ clampMin 0 (ones :: CPUTensor 'D.Float '[3,2])
(Float,[3,2])

cudnnIsAcceptable Source #

Arguments

:: forall shape dtype device. Tensor device dtype shape

input

-> Bool

output

cudnnIsAcceptable TODO: calling this probably makes only sense when the device is CUDA

constantPadNd1d :: forall (pad :: (Nat, Nat)) n dtype device. All KnownNat '[Fst pad, Snd pad, n] => Float -> Tensor device dtype '[n] -> Tensor device dtype '[(n + Fst pad) + Snd pad] Source #

type ConvSideCheck (inputSize :: Nat) (kernelSize :: Nat) (stride :: Nat) (padding :: Nat) (outputSize :: Nat) = (1 <= kernelSize, 1 <= stride, (kernelSize - 1) <= (inputSize + (2 * padding)), 1 <= outputSize, outputSize ~ ConvOutputSize inputSize kernelSize stride padding) Source #

type family ConvOutputSize (inputSize :: Nat) (kernelSize :: Nat) (stride :: Nat) (padding :: Nat) :: Nat where ... Source #

ConvOutputSize

>>> :kind! ConvOutputSize 4 1 1 0
ConvOutputSize 4 1 1 0 :: Natural
= 4

Equations

ConvOutputSize inputSize kernelSize stride padding = Div ((inputSize + (2 * padding)) - kernelSize) stride + 1 

conv1d Source #

Arguments

:: forall (stride :: Nat) (padding :: Nat) inputChannelSize outputChannelSize kernelSize inputSize batchSize outputSize dtype device. (All KnownNat '[stride, padding, inputChannelSize, outputChannelSize, kernelSize, inputSize, batchSize, outputSize], ConvSideCheck inputSize kernelSize stride padding outputSize) 
=> Tensor device dtype '[outputChannelSize, inputChannelSize, kernelSize]

weight

-> Tensor device dtype '[outputChannelSize]

bias

-> Tensor device dtype '[batchSize, inputChannelSize, inputSize]

input

-> Tensor device dtype '[batchSize, outputChannelSize, outputSize]

output

conv1d TODO: probably only defined for floating point tensors, or maybe numeric type is lifted?

>>> t = conv1d @1 @0 (ones :: CPUTensor 'D.Float '[10, 3, 1]) (ones :: CPUTensor 'D.Float '[10]) (ones :: CPUTensor 'D.Float '[1, 3, 4])
>>> :type t
t :: Tensor '( 'D.CPU, 0) 'D.Float '[1, 10, 4]
>>> dtype &&& shape &&& (\t' -> D.asValue (toDynamic t') :: [[[Float]]]) $ t
(Float,([1,10,4],[[[4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0]]]))

conv2d Source #

Arguments

:: forall (stride :: (Nat, Nat)) (padding :: (Nat, Nat)) inputChannelSize outputChannelSize kernelSize0 kernelSize1 inputSize0 inputSize1 batchSize outputSize0 outputSize1 dtype device. (All KnownNat '[Fst stride, Snd stride, Fst padding, Snd padding, inputChannelSize, outputChannelSize, kernelSize0, kernelSize1, inputSize0, inputSize1, batchSize, outputSize0, outputSize1], ConvSideCheck inputSize0 kernelSize0 (Fst stride) (Fst padding) outputSize0, ConvSideCheck inputSize1 kernelSize1 (Snd stride) (Snd padding) outputSize1) 
=> Tensor device dtype '[outputChannelSize, inputChannelSize, kernelSize0, kernelSize1]

weight

-> Tensor device dtype '[outputChannelSize]

bias

-> Tensor device dtype '[batchSize, inputChannelSize, inputSize0, inputSize1]

input

-> Tensor device dtype '[batchSize, outputChannelSize, outputSize0, outputSize1]

output

conv2d TODO: probably only defined for floating point tensors, or maybe numeric type is lifted?

>>> t = conv2d @'(1, 1) @'(0, 0) (ones :: CPUTensor 'D.Float '[10, 3, 1, 1]) (ones :: CPUTensor 'D.Float '[10]) (ones :: CPUTensor 'D.Float '[1, 3, 4, 5])
>>> :type t
t :: Tensor '( 'D.CPU, 0) 'D.Float '[1, 10, 4, 5]
>>> dtype &&& shape &&& (\t' -> D.asValue (toDynamic t') :: [[[[Float]]]]) $ t
(Float,([1,10,4,5],[[[[4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0]],[[4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0]],[[4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0]],[[4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0]],[[4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0]],[[4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0]],[[4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0]],[[4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0]],[[4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0]],[[4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0]]]]))

conv3d Source #

Arguments

:: forall (stride :: (Nat, Nat, Nat)) (padding :: (Nat, Nat, Nat)) inputChannelSize outputChannelSize kernelSize0 kernelSize1 kernelSize2 inputSize0 inputSize1 inputSize2 batchSize outputSize0 outputSize1 outputSize2 dtype device. (All KnownNat '[Fst3 stride, Snd3 stride, Trd3 stride, Fst3 padding, Snd3 padding, Trd3 padding, inputChannelSize, outputChannelSize, kernelSize0, kernelSize1, kernelSize2, inputSize0, inputSize1, inputSize2, batchSize], ConvSideCheck inputSize0 kernelSize0 (Fst3 stride) (Fst3 padding) outputSize0, ConvSideCheck inputSize1 kernelSize1 (Snd3 stride) (Snd3 padding) outputSize1, ConvSideCheck inputSize2 kernelSize2 (Trd3 stride) (Trd3 padding) outputSize2) 
=> Tensor device dtype '[outputChannelSize, inputChannelSize, kernelSize0, kernelSize1, kernelSize2]

weight

-> Tensor device dtype '[outputChannelSize]

bias

-> Tensor device dtype '[batchSize, inputChannelSize, inputSize0, inputSize1, inputSize2]

input

-> Tensor device dtype '[batchSize, outputChannelSize, outputSize0, outputSize1, outputSize2]

output

conv3d TODO: probably only defined for floating point tensors, or maybe numeric type is lifted?

>>> t = conv3d @'(1, 1, 1) @'(0, 0, 0) (ones :: CPUTensor 'D.Float '[10, 3, 1, 1, 1]) (ones :: CPUTensor 'D.Float '[10]) (ones :: CPUTensor 'D.Float '[1, 3, 4, 5, 6])
>>> :type t
t :: Tensor '( 'D.CPU, 0) 'D.Float '[1, 10, 4, 5, 6]
>>> dtype &&& shape &&& (\t' -> D.asValue (toDynamic t') :: [[[[[Float]]]]]) $ t
(Float,([1,10,4,5,6],[[[[[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0]],[[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0]],[[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0]],[[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0]]],[[[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0]],[[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0]],[[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0]],[[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0]]],[[[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0]],[[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0]],[[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0]],[[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0]]],[[[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0]],[[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0]],[[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0]],[[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0]]],[[[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0]],[[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0]],[[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0]],[[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0]]],[[[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0]],[[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0]],[[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0]],[[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0]]],[[[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0]],[[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0]],[[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0]],[[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0]]],[[[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0]],[[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0]],[[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0]],[[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0]]],[[[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0]],[[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0]],[[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0]],[[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0]]],[[[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0]],[[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0]],[[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0]],[[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0]]]]]))

convTBC :: forall padding timeSize batchSize kernelSize inputChannels outputChannels dtype device. KnownNat padding => Tensor device dtype '[kernelSize, inputChannels, outputChannels] -> Tensor device dtype '[outputChannels] -> Tensor device dtype '[timeSize, batchSize, inputChannels] -> Tensor device dtype '[((timeSize + (padding * 2)) + 1) - kernelSize, batchSize, outputChannels] Source #

convTBC TODO: probably only defined for floating point tensors, or maybe numeric type is lifted? 1D convolution over an input of shape `[timeSize, batchSize, inputChannels]`.

convTranspose1d Source #

Arguments

:: forall (stride :: Nat) (padding :: Nat) inputChannelSize outputChannelSize kernelSize inputSize batchSize outputSize dtype device. (All KnownNat '[stride, padding, inputChannelSize, outputChannelSize, kernelSize, inputSize, batchSize, outputSize], ConvSideCheck inputSize kernelSize stride padding outputSize) 
=> Tensor device dtype '[inputChannelSize, outputChannelSize, kernelSize]

weight

-> Tensor device dtype '[outputChannelSize]

bias

-> Tensor device dtype '[batchSize, inputChannelSize, inputSize]

input

-> Tensor device dtype '[batchSize, outputChannelSize, outputSize]

output

convTranspose1d TODO: probably only defined for floating point tensors, or maybe numeric type is lifted?

>>> t = convTranspose1d @1 @0 (ones :: CPUTensor 'D.Float '[3, 10, 1]) (ones :: CPUTensor 'D.Float '[10]) (ones :: CPUTensor 'D.Float '[1, 3, 4])
>>> :type t
t :: Tensor '( 'D.CPU, 0) 'D.Float '[1, 10, 4]
>>> dtype &&& shape &&& (\t' -> D.asValue (toDynamic t') :: [[[Float]]]) $ t
(Float,([1,10,4],[[[4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0]]]))

convTranspose2d Source #

Arguments

:: forall (stride :: (Nat, Nat)) (padding :: (Nat, Nat)) inputChannelSize outputChannelSize kernelSize0 kernelSize1 inputSize0 inputSize1 batchSize outputSize0 outputSize1 dtype device. (All KnownNat '[Fst stride, Snd stride, Fst padding, Snd padding, inputChannelSize, outputChannelSize, kernelSize0, kernelSize1, inputSize0, inputSize1, batchSize, outputSize0, outputSize1], ConvSideCheck inputSize0 kernelSize0 (Fst stride) (Fst padding) outputSize0, ConvSideCheck inputSize1 kernelSize1 (Snd stride) (Snd padding) outputSize1) 
=> Tensor device dtype '[inputChannelSize, outputChannelSize, kernelSize0, kernelSize1]

weight

-> Tensor device dtype '[outputChannelSize]

bias

-> Tensor device dtype '[batchSize, inputChannelSize, inputSize0, inputSize1]

input

-> Tensor device dtype '[batchSize, outputChannelSize, outputSize0, outputSize1]

output

convTranspose2d TODO: probably only defined for floating point tensors, or maybe numeric type is lifted?

>>> t = convTranspose2d @'(1, 1) @'(0, 0) (ones :: CPUTensor 'D.Float '[3, 10, 1, 1]) (ones :: CPUTensor 'D.Float '[10]) (ones :: CPUTensor 'D.Float '[1, 3, 4, 5])
>>> :type t
t :: Tensor '( 'D.CPU, 0) 'D.Float '[1, 10, 4, 5]
>>> dtype &&& shape &&& (\t' -> D.asValue (toDynamic t') :: [[[[Float]]]]) $ t
(Float,([1,10,4,5],[[[[4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0]],[[4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0]],[[4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0]],[[4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0]],[[4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0]],[[4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0]],[[4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0]],[[4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0]],[[4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0]],[[4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0]]]]))

convTranspose3d Source #

Arguments

:: forall (stride :: (Nat, Nat, Nat)) (padding :: (Nat, Nat, Nat)) inputChannelSize outputChannelSize kernelSize0 kernelSize1 kernelSize2 inputSize0 inputSize1 inputSize2 batchSize outputSize0 outputSize1 outputSize2 dtype device. (All KnownNat '[Fst3 stride, Snd3 stride, Trd3 stride, Fst3 padding, Snd3 padding, Trd3 padding, inputChannelSize, outputChannelSize, kernelSize0, kernelSize1, kernelSize2, inputSize0, inputSize1, inputSize2, batchSize], ConvSideCheck inputSize0 kernelSize0 (Fst3 stride) (Fst3 padding) outputSize0, ConvSideCheck inputSize1 kernelSize1 (Snd3 stride) (Snd3 padding) outputSize1, ConvSideCheck inputSize2 kernelSize2 (Trd3 stride) (Trd3 padding) outputSize2) 
=> Tensor device dtype '[inputChannelSize, outputChannelSize, kernelSize0, kernelSize1, kernelSize2]

weight

-> Tensor device dtype '[outputChannelSize]

bias

-> Tensor device dtype '[batchSize, inputChannelSize, inputSize0, inputSize1, inputSize2]

input

-> Tensor device dtype '[batchSize, outputChannelSize, outputSize0, outputSize1, outputSize2]

output

convTranspose3d TODO: probably only defined for floating point tensors, or maybe numeric type is lifted?

>>> t = convTranspose3d @'(1, 1, 1) @'(0, 0, 0) (ones :: CPUTensor 'D.Float '[3, 10, 1, 1, 1]) (ones :: CPUTensor 'D.Float '[10]) (ones :: CPUTensor 'D.Float '[1, 3, 4, 5, 6])
>>> :type t
t :: Tensor '( 'D.CPU, 0) 'D.Float '[1, 10, 4, 5, 6]
>>> dtype &&& shape &&& (\t' -> D.asValue (toDynamic t') :: [[[[[Float]]]]]) $ t
(Float,([1,10,4,5,6],[[[[[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0]],[[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0]],[[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0]],[[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0]]],[[[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0]],[[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0]],[[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0]],[[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0]]],[[[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0]],[[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0]],[[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0]],[[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0]]],[[[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0]],[[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0]],[[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0]],[[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0]]],[[[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0]],[[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0]],[[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0]],[[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0]]],[[[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0]],[[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0]],[[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0]],[[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0]]],[[[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0]],[[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0]],[[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0]],[[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0]]],[[[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0]],[[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0]],[[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0]],[[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0]]],[[[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0]],[[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0]],[[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0]],[[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0]]],[[[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0]],[[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0]],[[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0]],[[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0],[4.0,4.0,4.0,4.0,4.0,4.0]]]]]))

cosh Source #

Arguments

:: forall shape dtype device. StandardFloatingPointDTypeValidation device dtype 
=> Tensor device dtype shape

input

-> Tensor device dtype shape

output

cosh

>>> dtype &&& shape $ cosh (ones :: CPUTensor 'D.Float '[3,2])
(Float,[3,2])

type family Det (shape :: [Nat]) :: [Nat] where ... Source #

Det

>>> :kind! Det '[2,2]
Det '[2,2] :: [Natural]
= '[]
>>> :kind! Det '[3,2,2]
Det '[3,2,2] :: [Natural]
= '[3]

Equations

Det (n ': (n ': '[])) = '[] 
Det (b ': (n ': (n ': '[]))) = '[b] 
Det _ = TypeError (Text "This shape must be square matrix or batch + squre matrix.") 

det Source #

Arguments

:: forall shape dtype device. Tensor device dtype shape

input

-> Tensor device dtype (Det shape)

output

det TODO: probably only defined for floating point tensors, or maybe numeric type is lifted?

>>> dtype &&& shape $ det (ones :: CPUTensor 'D.Float '[2,2])
(Float,[])
>>> dtype &&& shape $ det (ones :: CPUTensor 'D.Float '[3,2,2])
(Float,[3])

type family DimsDistinctAscendingCheck (dim1 :: Nat) (dim2 :: Nat) (cmp :: Ordering) :: Constraint where ... Source #

Equations

DimsDistinctAscendingCheck _ _ 'LT = () 
DimsDistinctAscendingCheck dim1 dim2 _ = TypeError (((Text "Dimensions must be distinct and in ascending order, but got " :<>: ShowType dim1) :<>: Text ", ") :<>: ShowType dim2) 

type family DimsDistinctAscending (dim1 :: Nat) (dim2 :: Nat) :: Constraint where ... Source #

Equations

DimsDistinctAscending dim1 dim2 = DimsDistinctAscendingCheck dim1 dim2 (CmpNat dim1 dim2) 

type family DiagEmbedShapeImpl (dim1 :: Nat) (dim2 :: Nat) (shape :: [Nat]) (n :: Nat) :: [Nat] where ... Source #

Equations

DiagEmbedShapeImpl dim1 dim2 shape n = Insert dim1 n (Insert (dim2 - 1) n (Init shape)) 

type family DiagEmbedShape (index :: Nat) (dim1 :: Nat) (dim2 :: Nat) (shape :: [Nat]) :: [Nat] where ... Source #

Equations

DiagEmbedShape index dim1 dim2 shape = DiagEmbedShapeImpl dim1 dim2 shape (Last shape + index) 

diagEmbed Source #

Arguments

:: forall index dim1 dim2 shape shape' device dtype. (KnownNat index, KnownNat dim1, KnownNat dim2, shape' ~ DiagEmbedShape index dim1 dim2 shape, DimsDistinctAscending dim1 dim2, StandardDTypeValidation device dtype) 
=> Tri 
-> Tensor device dtype shape

input

-> Tensor device dtype shape'

output

diagEmbed

>>> dtype &&& shape $ diagEmbed @0 @1 @2 Upper (ones :: CPUTensor 'D.Float '[2,3])
(Float,[2,3,3])
>>> dtype &&& shape $ diagEmbed @1 @0 @2 Upper (ones :: CPUTensor 'D.Float '[2,3])
(Float,[4,2,4])

type family DiagflatShapeImpl (d :: Nat) :: [Nat] where ... Source #

Equations

DiagflatShapeImpl d = '[d, d] 

type family DiagflatShape (index :: Nat) (shape :: [Nat]) :: [Nat] where ... Source #

Equations

DiagflatShape index shape = DiagflatShapeImpl (Numel shape + index) 

diagflat Source #

Arguments

:: forall index shape shape' device dtype. (KnownNat index, shape' ~ DiagflatShape index shape, StandardDTypeValidation device dtype) 
=> Tri 
-> Tensor device dtype shape

input

-> Tensor device dtype shape'

output

diagflat

>>> dtype &&& shape $ diagflat @0 Upper (ones :: CPUTensor 'D.Float '[3])
(Float,[3,3])
>>> dtype &&& shape $ diagflat @1 Upper (ones :: CPUTensor 'D.Float '[3])
(Float,[4,4])
>>> dtype &&& shape $ diagflat @0 Upper (ones :: CPUTensor 'D.Float '[2,2])
(Float,[4,4])

type family NDimAtLeastCheck (ndim :: Nat) (shape :: [Nat]) (cmp :: Ordering) :: Constraint where ... Source #

Equations

NDimAtLeastCheck ndim shape 'GT = TypeError (((Text "Input must have at least " :<>: ShowType ndim) :<>: Text " dimensions, but got ") :<>: ShowType (ListLength shape)) 
NDimAtLeastCheck _ _ _ = () 

type family NDimAtLeast (ndim :: Nat) (shape :: [Nat]) :: Constraint where ... Source #

Equations

NDimAtLeast ndim shape = NDimAtLeastCheck ndim shape (CmpNat ndim (ListLength shape)) 

type family DiagonalShape (tri :: Tri) (index :: Nat) (dim1 :: Nat) (dim2 :: Nat) (shape :: [Nat]) :: [Nat] where ... Source #

Equations

DiagonalShape tri index dim1 dim2 shape = Remove (Remove shape dim2) dim1 ++ '[DiagSize tri index (Index shape dim1) (Index shape dim2)] 

diagonal Source #

Arguments

:: forall tri index dim1 dim2 shape shape' device dtype. (KnownTri tri, KnownNat index, KnownNat dim1, KnownNat dim2, NDimAtLeast 2 shape, DimsDistinctAscending dim1 dim2, shape' ~ DiagonalShape tri index dim1 dim2 shape, StandardDTypeValidation device dtype) 
=> Tensor device dtype shape

input

-> Tensor device dtype shape'

output

diagonal

>>> dtype &&& shape $ diagonal @'Upper @0 @0 @1 (ones :: CPUTensor 'D.Float '[3,3])
(Float,[3])
>>> dtype &&& shape $ diagonal @'Upper @1 @0 @1 (ones :: CPUTensor 'D.Float '[3,3])
(Float,[2])
>>> dtype &&& shape $ diagonal @'Lower @1 @1 @2 (ones :: CPUTensor 'D.Float '[2,5,4,2])
(Float,[2,2,4])

type family DotDTypeIsValid (device :: (DeviceType, Nat)) (dtype :: DType) :: Constraint where ... Source #

Equations

DotDTypeIsValid '('CPU, 0) dtype = (DTypeIsNotBool '('CPU, 0) dtype, DTypeIsNotHalf '('CPU, 0) dtype) 
DotDTypeIsValid '('CUDA, deviceIndex) dtype = DTypeIsFloatingPoint '('CUDA, deviceIndex) dtype 
DotDTypeIsValid '(deviceType, _) dtype = UnsupportedDTypeForDevice deviceType dtype 

dot Source #

Arguments

:: forall size dtype device. DotDTypeIsValid device dtype 
=> Tensor device dtype '[size]

input

-> Tensor device dtype '[size]

other input

-> Tensor device dtype '[]

dot product

dot product Note that this function does not broadcast.

class KnownMaybeNat (n :: Maybe Nat) where Source #

Instances

Instances details
KnownMaybeNat ('Nothing :: Maybe Nat) Source # 
Instance details

Defined in Torch.Typed.Functional

KnownNat n => KnownMaybeNat ('Just n) Source # 
Instance details

Defined in Torch.Typed.Functional

type family PaddingIdxCheck (idx :: Maybe Nat) (numEmbeds :: Nat) :: Constraint where ... Source #

Equations

PaddingIdxCheck (Just n) numEmbeds = (n + 1) <= numEmbeds 
PaddingIdxCheck Nothing _ = () 

embedding Source #

Arguments

:: forall (paddingIdx :: Maybe Nat) numEmbeds embedDim shape dtype device. (KnownMaybeNat paddingIdx, PaddingIdxCheck paddingIdx numEmbeds) 
=> Bool

whether or not to scale the gradient by the frequencies

-> Bool

whether or not the embedding is sparse

-> Tensor device dtype '[numEmbeds, embedDim]

weights

-> Tensor device 'Int64 shape

indices

-> Tensor device dtype (Reverse (embedDim ': Reverse shape))

output

embedding TODO: probably only defined for floating point tensors, or maybe numeric type is lifted? TODO: what about sparsity here? TODO: what output dtypes are supported?

>>> weights = fromJust [[1, 1], [2, 2], [3, 3], [4, 4]] :: CPUTensor 'D.Float '[4, 2]
>>> indices = fromJust [[0], [2], [0], [1]] :: CPUTensor 'D.Int64 '[4, 1]
>>> t = embedding @('Just 1) False False weights indices
>>> :type t
t :: Tensor '( 'D.CPU, 0) 'D.Float '[4, 1, 2]
  • - libtorch 1.7
  • - >>> dtype &&& shape &&& (t' -> D.asValue (toDynamic t') :: [[[Float]]]) $ t
  • - (Float,([4,1,2],[[[1.0,1.0]],[[3.0,3.0]],[[1.0,1.0]],[[2.0,2.0]]]))
  • -
  • - libtorch 1.8
  • - The behavior of libtorch 1.8 changes. See https://github.com/pytorch/pytorch/issues/53368
  • - (Float,([4,1,2],[[[1.0,1.0]],[[3.0,3.0]],[[1.0,1.0]],[[0.0,0.0]]]))
  • -
  • - libtorch 1.8.1
  • - The behavior of libtorch 1.8.1 is reverted.
  • - (Float,([4,1,2],[[[1.0,1.0]],[[3.0,3.0]],[[1.0,1.0]],[[2.0,2.0]]])) >>> dtype &&& shape &&& (t' -> D.asValue (toDynamic t') :: [[[Float]]]) $ t (Float,([4,1,2],[[[1.0,1.0]],[[3.0,3.0]],[[1.0,1.0]],[[2.0,2.0]]])) >>> t = embedding @'Nothing False False weights indices >>> :type t t :: Tensor '( 'D.CPU, 0) 'D.Float '[4, 1, 2] >>> dtype &&& shape &&& (t' -> D.asValue (toDynamic t') :: [[[Float]]]) $ t (Float,([4,1,2],[[[1.0,1.0]],[[3.0,3.0]],[[1.0,1.0]],[[2.0,2.0]]]))

emptyLike Source #

Arguments

:: forall shape dtype device. Tensor device dtype shape

input

-> IO (Tensor device dtype shape)

output

emptyLike TODO: this seems quite unsafe, the values of this tensor will be random

>>> t <- emptyLike (ones :: CPUTensor 'D.Float '[3,4,5])
>>> dtype &&& shape $ t
(Float,[3,4,5])

erfc Source #

Arguments

:: forall shape dtype device. StandardFloatingPointDTypeValidation device dtype 
=> Tensor device dtype shape

input

-> Tensor device dtype shape

output

erfc

>>> dtype &&& shape $ erfc (ones :: CPUTensor 'D.Float '[3,2])
(Float,[3,2])

expm1 Source #

Arguments

:: forall shape dtype device. StandardFloatingPointDTypeValidation device dtype 
=> Tensor device dtype shape

input

-> Tensor device dtype shape

output

expm1 TODO: probably only defined for floating point tensors, or maybe numeric type is lifted?

>>> dtype &&& shape $ expm1 (ones :: CPUTensor 'D.Float '[3,2])
(Float,[3,2])

expand Source #

Arguments

:: forall shape' shape dtype device. (KnownShape shape', shape' ~ Broadcast shape shape') 
=> Bool

some boolean value with unknown function

-> Tensor device dtype shape

input

-> Tensor device dtype shape'

output

expand TODO: figure out what the implicit boolean value does

>>> t = ones :: CPUTensor 'D.Float '[2]
>>> t' = expand @'[3, 1, 2] False t
>>> dtype &&& shape $ t'
(Float,[3,1,2])
>>> t'' = expand @'[3, 1, 2] True t
>>> dtype &&& shape $ t''
(Float,[3,1,2])
>>> toInt (all (t' ==. t'')) == 1
True

flattenAll Source #

Arguments

:: forall shape dtype device. KnownShape shape 
=> Tensor device dtype shape

input

-> Tensor device dtype '[Product shape]

output

flattenAll

>>> t = flattenAll (ones :: CPUTensor 'D.Float '[3,2])
>>> dtype &&& shape $ t
(Float,[6])
>>> :t t
t :: Tensor '( 'D.CPU, 0) 'D.Float '[6]

frac Source #

Arguments

:: forall shape dtype device. StandardFloatingPointDTypeValidation device dtype 
=> Tensor device dtype shape

input

-> Tensor device dtype shape

output

frac

>>> dtype &&& shape $ frac (ones :: CPUTensor 'D.Float '[3,2])
(Float,[3,2])

fullLike Source #

Arguments

:: forall shape dtype device. Float

fill value

-> Tensor device dtype shape

input

-> Tensor device dtype shape

output

full like

>>> dtype &&& shape $ fullLike 3.0 (ones :: CPUTensor 'D.Float '[3,2])
(Float,[3,2])

isclose Source #

Arguments

:: forall shape dtype device. Double

relative tolerance

-> Double

absolute tolerance

-> Bool

whether or not NaN equals NaN

-> Tensor device dtype shape

input tensor

-> Tensor device dtype shape

other input tensor

-> Tensor device 'Bool shape

output

isclose TODO: probably only defined for floating point tensors, or maybe numeric type is lifted?

>>> dtype &&& shape $ isclose 0.1 0.1 False (ones :: CPUTensor 'D.Float '[3,2]) (ones :: CPUTensor 'D.Float '[3,2])
(Bool,[3,2])

isNaN Source #

Arguments

:: forall shape dtype device. Tensor device dtype shape

input

-> Tensor device 'Bool shape

output

is NaN TODO: probably only defined for floating point tensors, or maybe numeric type is lifted?

>>> dtype &&& shape $ isNaN (ones :: CPUTensor 'D.Float '[3,2])
(Bool,[3,2])

isDistributed Source #

Arguments

:: forall shape dtype device. Tensor device dtype shape

input

-> Bool

output

is distributed

isFloatingPoint Source #

Arguments

:: forall shape dtype device. Tensor device dtype shape

input

-> Bool

output

is floating point TODO: this can be decided statically

isComplex Source #

Arguments

:: forall shape dtype device. Tensor device dtype shape

input

-> Bool

output

is complex

isNonZero Source #

Arguments

:: forall shape dtype device. Numel shape ~ 1 
=> Tensor device dtype shape

input

-> Bool

output

is non-zero this operation is only defined for tensors with shape '[] or '[1]

isSameSize Source #

Arguments

:: forall shape shape' dtype device. Tensor device dtype shape

input tensor

-> Tensor device dtype shape'

other input tensor

-> Bool

output

is same size TODO: this can be decided statically

isSigned Source #

Arguments

:: forall shape dtype device. Tensor device dtype shape

input

-> Bool

output

layerNorm Source #

Arguments

:: forall normalizedShape shape dtype device. (KnownShape normalizedShape, IsSuffixOf normalizedShape shape) 
=> Tensor device dtype normalizedShape

weight

-> Tensor device dtype normalizedShape

bias

-> Double

eps

-> Tensor device dtype shape

input tensor

-> Tensor device dtype shape

output tensor

layerNorm TODO: probably only defined for floating point tensors, or maybe numeric type is lifted? TODO: figure out if and when CUDNN works here, tie it also to the device

>>> t = layerNorm @'[1, 2] @'[2, 1, 2] @'D.Float @'( 'D.CPU, 0) ones ones 0.01 ones
>>> :type t
t :: Tensor '( 'D.CPU, 0) 'D.Float '[2, 1, 2]
>>> dtype &&& shape $ t
(Float,[2,1,2])

linear :: forall batchSize inputFeatures outputFeatures dtype device. Tensor device dtype '[outputFeatures, inputFeatures] -> Tensor device dtype '[outputFeatures] -> Tensor device dtype '[batchSize, inputFeatures] -> Tensor device dtype '[batchSize, outputFeatures] Source #

linear TODO: probably only defined for floating point tensors, or maybe numeric type is lifted? https://pytorch.org/docs/stable/_modules/torch/nn/functional.html#linear

>>> w = fromJust [[-0.5, -2,  0.5], [1.5, -0.5, 0.5]] :: CPUTensor 'D.Float '[2, 3]
>>> b = fromJust [0, 0.5] :: CPUTensor 'D.Float '[2]
>>> t = fromJust [[-2, 0.5, 1], [0.5, 0, 0], [0, 1, 0], [0, 0, 0], [1, -1, 0]] :: CPUTensor 'D.Float '[5, 3]
>>> t' = linear w b t
>>> :type t'
t' :: Tensor '( 'D.CPU, 0) 'D.Float '[5, 2]
>>> dtype &&& shape &&& (\t'' -> D.asValue (toDynamic t'') :: [[Float]]) $ t'
(Float,([5,2],[[0.5,-2.25],[-0.25,1.25],[-2.0,0.0],[0.0,0.5],[1.5,2.5]]))

linear' Source #

Arguments

:: forall (inputFeatures :: Nat) (outputFeatures :: Nat) (shape :: [Nat]) (shape' :: [Nat]) dtype device (shape'' :: [Nat]). (shape'' ~ MatMul shape '[inputFeatures, outputFeatures], shape' ~ Broadcast shape'' shape'') 
=> Tensor device dtype '[outputFeatures, inputFeatures]

weight

-> Tensor device dtype '[outputFeatures]

bias

-> Tensor device dtype shape

input

-> Tensor device dtype shape'

output linear' weight bias input = Torch.Static.add (matmul input $ transpose 0 1 weight) bias

linear' TODO: probably only defined for floating point tensors, or maybe numeric type is lifted? TODO: can we use the ATen linear function or not here? https://pytorch.org/docs/stable/_modules/torch/nn/functional.html#linear

>>> w = fromJust [[-0.5, -2,  0.5], [1.5, -0.5, 0.5]] :: CPUTensor 'D.Float '[2, 3]
>>> b = fromJust [0, 0.5] :: CPUTensor 'D.Float '[2]
>>> t = fromJust [[-2, 0.5, 1], [0.5, 0, 0], [0, 1, 0], [0, 0, 0], [1, -1, 0]] :: CPUTensor 'D.Float '[5, 3]
>>> t' = linear' w b t
>>> :type t'
t' :: Tensor '( 'D.CPU, 0) 'D.Float '[5, 2]
>>> dtype &&& shape &&& (\t'' -> D.asValue (toDynamic t'') :: [[Float]]) $ t'
(Float,([5,2],[[0.5,-2.25],[-0.25,1.25],[-2.0,0.0],[0.0,0.5],[1.5,2.5]]))
>>> t = fromJust [[[[-2, 0.5, 1], [0.5, 0, 0], [0, 1, 0], [0, 0, 0], [1, -1, 0]], [[-2, 0.5, 1], [0.5, 0, 0], [0, 1, 0], [0, 0, 0], [1, -1, 0]]]] :: CPUTensor 'D.Float '[1, 2, 5, 3]
>>> t' = linear' w b t
>>> :type t'
t' :: Tensor '( 'D.CPU, 0) 'D.Float '[1, 2, 5, 2]
>>> dtype &&& shape &&& (\t' -> D.asValue (toDynamic t') :: [[[[Float]]]]) $ t'
(Float,([1,2,5,2],[[[[0.5,-2.25],[-0.25,1.25],[-2.0,0.0],[0.0,0.5],[1.5,2.5]],[[0.5,-2.25],[-0.25,1.25],[-2.0,0.0],[0.0,0.5],[1.5,2.5]]]]))

mkldnnLinear Source #

Arguments

:: forall batchSize inputFeatures outputFeatures dtype device. Tensor device dtype '[outputFeatures, inputFeatures]

weight

-> Tensor device dtype '[outputFeatures]

bias

-> Tensor device dtype '[batchSize, inputFeatures]

input

-> Tensor device dtype '[batchSize, outputFeatures]

output

mkldnnLinear TODO: probably only defined for floating point tensors, or maybe numeric type is lifted? TODO: mkldnnLinear does not return a usuable tensor value and is hence broken TODO: figure out device for this

>>> w = fromJust [[-0.5, -2,  0.5], [1.5, -0.5, 0.5]] :: CPUTensor 'D.Float '[2, 3]
>>> b = fromJust [0, 0.5] :: CPUTensor 'D.Float '[2]
>>> t = fromJust [[-2, 0.5, 1], [0.5, 0, 0], [0, 1, 0], [0, 0, 0], [1, -1, 0]] :: CPUTensor 'D.Float '[5, 3]
  • - >>> t' = mkldnnLinear (toMKLDNN w) (toMKLDNN b) (toMKLDNN t)
  • - >>> :type t'
  • - t' :: Tensor '( 'D.CPU, 0) 'D.Float '[5, 2]
  • - >>> dtype &&& shape &&& (t'' -> D.asValue (toDynamic t'') :: [[Float]]) $ t'
  • - (Float,([5,2],[[0.5,-2.25],[-0.25,1.25],[-2.0,0.0],[0.0,0.5],[1.5,2.5]]))

log Source #

Arguments

:: forall shape dtype device. Tensor device dtype shape

input

-> Tensor device dtype shape

output

log TODO: probably only defined for floating point tensors, or maybe numeric type is lifted? TODO: will log throw for negative numbers or just generate NaNs? should we return a Maybe?

>>> dtype &&& shape $ log (ones :: CPUTensor 'D.Float '[3,2])
(Float,[3,2])

logDet Source #

Arguments

:: forall shape' shape dtype device. shape' ~ Det shape 
=> Tensor device dtype shape

input

-> Tensor device dtype shape'

output

logDet TODO: probably only defined for floating point tensors, or maybe numeric type is lifted? TODO: will logDet throw? and if so, should we return a Maybe?

>>> dtype &&& shape $ logDet (ones :: CPUTensor 'D.Float '[2,2])
(Float,[])
>>> dtype &&& shape $ logDet (ones :: CPUTensor 'D.Float '[3,2,2])
(Float,[3])

logSumExp Source #

Arguments

:: forall dim keepOrDropDim shape' shape dtype device. (KnownNat dim, KnownKeepOrDropDim keepOrDropDim, Reifies dtype DType, DTypeIsFloatingPoint device dtype, shape' ~ ConditionalDropDimension shape dim keepOrDropDim) 
=> Tensor device dtype shape

input

-> Tensor device dtype shape'

output

logarithm of the sum of the exponentials TODO: probably only defined for floating point tensors, or maybe numeric type is lifted? See https://pytorch.org/docs/stable/torch.html#torch.logsumexp.

>>> t = fromJust [[5, 1], [3, 2], [4, 1], [2, 7]] :: CPUTensor 'D.Float '[4, 2]
>>> dtype &&& shape &&& (\t' -> D.asValue (toDynamic t') :: [Float]) $ logSumExp @1 @DropDim t
(Float,([4],[5.01815,3.3132617,4.0485873,7.0067153]))
>>> dtype &&& shape &&& (\t' -> D.asValue (toDynamic t') :: [[Float]]) $ logSumExp @1 @KeepDim t
(Float,([4,1],[[5.01815],[3.3132617],[4.0485873],[7.0067153]]))
>>> dtype &&& shape &&& (\t' -> D.asValue (toDynamic t') :: [Float]) $ logSumExp @0 @DropDim t
(Float,([2],[5.44019,7.0116277]))
>>> dtype &&& shape &&& (\t' -> D.asValue (toDynamic t') :: [[Float]]) $ logSumExp @0 @KeepDim t
(Float,([1,2],[[5.44019,7.0116277]]))

matrixPower Source #

Arguments

:: forall shape' shape dtype device. shape' ~ Square shape 
=> Int

power

-> Tensor device dtype shape

input matrix

-> Tensor device dtype shape'

output

matrixPower TODO: probably only defined for floating point tensors, or maybe numeric type is lifted? TODO: figure out input shape restrictions, should be matrix or a batched matrix TODO: figure out restrictions on the power, can it be zero or negative?

>>> dtype &&& shape $ matrixPower 2 (ones :: CPUTensor 'D.Float '[3,4,4])
(Float,[3,4,4])

maxValues Source #

Arguments

:: forall dim keepOrDropDim shape' shape dtype device. (KnownNat dim, KnownKeepOrDropDim keepOrDropDim, shape' ~ ConditionalDropDimension shape dim keepOrDropDim) 
=> Tensor device dtype shape

input

-> Tensor device dtype shape'

output

maxValues TODO: probably only defined for floating point tensors, or maybe numeric type is lifted?

>>> t = ones :: CPUTensor 'D.Float '[3,4,5]
>>> dtype &&& shape $ maxValues @0 @KeepDim t
(Float,[1,4,5])
>>> dtype &&& shape $ maxValues @0 @DropDim t
(Float,[4,5])
>>> dtype &&& shape $ maxValues @1 @KeepDim t
(Float,[3,1,5])
>>> dtype &&& shape $ maxValues @1 @DropDim t
(Float,[3,5])

minValues Source #

Arguments

:: forall dim keepOrDropDim shape' shape dtype device. (KnownNat dim, KnownKeepOrDropDim keepOrDropDim, shape' ~ ConditionalDropDimension shape dim keepOrDropDim) 
=> Tensor device dtype shape

input

-> Tensor device dtype shape'

output

minValues TODO: probably only defined for floating point tensors, or maybe numeric type is lifted?

>>> t = ones :: CPUTensor 'D.Float '[3,4,5]
>>> dtype &&& shape $ minValues @0 @KeepDim t
(Float,[1,4,5])
>>> dtype &&& shape $ minValues @0 @DropDim t
(Float,[4,5])
>>> dtype &&& shape $ minValues @1 @KeepDim t
(Float,[3,1,5])
>>> dtype &&& shape $ minValues @1 @DropDim t
(Float,[3,5])

maxPool1d Source #

Arguments

:: forall kernelSize stride padding channelSize inputSize batchSize outputSize dtype device. (All KnownNat '[kernelSize, stride, padding, channelSize, inputSize, batchSize], ConvSideCheck inputSize kernelSize stride padding outputSize) 
=> Tensor device dtype '[batchSize, channelSize, inputSize]

input

-> Tensor device dtype '[batchSize, channelSize, outputSize]

output

maxPool1d TODO: probably only defined for floating point tensors, or maybe numeric type is lifted?

>>> t = maxPool1d @1 @1 @0 (ones :: CPUTensor 'D.Float '[1,3,4])
>>> shape t
[1,3,4]
>>> :t t
t :: Tensor '( 'D.CPU, 0) 'D.Float '[1, 3, 4]

maxPool2d Source #

Arguments

:: forall kernelSize stride padding channelSize inputSize0 inputSize1 batchSize outputSize0 outputSize1 dtype device. (All KnownNat '[Fst kernelSize, Snd kernelSize, Fst stride, Snd stride, Fst padding, Snd padding, channelSize, inputSize0, inputSize1, batchSize], ConvSideCheck inputSize0 (Fst kernelSize) (Fst stride) (Fst padding) outputSize0, ConvSideCheck inputSize1 (Snd kernelSize) (Snd stride) (Snd padding) outputSize1) 
=> Tensor device dtype '[batchSize, channelSize, inputSize0, inputSize1]

input

-> Tensor device dtype '[batchSize, channelSize, outputSize0, outputSize1]

output

maxPool2d TODO: probably only defined for floating point tensors, or maybe numeric type is lifted?

>>> t = maxPool2d @'(1,1) @'(1,1) @'(0,0) (ones :: CPUTensor 'D.Float '[1,3,4,5]) -- Skip warning
...
>>> shape t
[1,3,4,5]
>>> :t t
t :: Tensor '( 'D.CPU, 0) 'D.Float '[1, 3, 4, 5]

mkldnnMaxPool2d Source #

Arguments

:: forall kernelSize stride padding channelSize inputSize0 inputSize1 batchSize outputSize0 outputSize1 dtype device. (All KnownNat '[Fst kernelSize, Snd kernelSize, Fst stride, Snd stride, Fst padding, Snd padding, channelSize, inputSize0, inputSize1, batchSize], ConvSideCheck inputSize0 (Fst kernelSize) (Fst stride) (Fst padding) outputSize0, ConvSideCheck inputSize1 (Snd kernelSize) (Snd stride) (Snd padding) outputSize1) 
=> Tensor device dtype '[batchSize, channelSize, inputSize0, inputSize1]

input

-> Tensor device dtype '[batchSize, channelSize, outputSize0, outputSize1]

output

mkldnnMaxPool2d TODO: probably only defined for floating point tensors, or maybe numeric type is lifted? TODO: does this function work, that is, does it return values without throwing? when does it work? TODO: this should probably be only callable if the device is MKLDNN? -- >>> t = mkldnnMaxPool2d '(1,1) '(1,1) @'(0,0) (toMKLDNN (ones :: CPUTensor 'D.Float '[1,3,4,5])) -- >>> shape t -- [1,3,4,5] -- >>> :t t -- t :: Tensor '( 'D.CPU, 0) 'D.Float '[1, 3, 4, 5]

quantizedMaxPool2d Source #

Arguments

:: forall kernelSize stride padding channelSize inputSize0 inputSize1 batchSize outputSize0 outputSize1 dtype device. (All KnownNat '[Fst kernelSize, Snd kernelSize, Fst stride, Snd stride, Fst padding, Snd padding, channelSize, inputSize0, inputSize1, batchSize], ConvSideCheck inputSize0 (Fst kernelSize) (Fst stride) (Fst padding) outputSize0, ConvSideCheck inputSize1 (Snd kernelSize) (Snd stride) (Snd padding) outputSize1) 
=> Tensor device dtype '[batchSize, channelSize, inputSize0, inputSize1]

input

-> Tensor device dtype '[batchSize, channelSize, outputSize0, outputSize1]

output

quantizedMaxPool2d TODO: probably only defined for floating point tensors, or maybe numeric type is lifted? TODO: what are quantized functions and when are they available? -- >>> t = quantizedMaxPool2d '(1,1) '(1,1) @'(0,0) (ones :: CPUTensor 'D.Float '[1,3,4,5]) -- >>> shape t -- [1,3,4,5] -- >>> :t t -- t :: Tensor 'D.Float '[1, 3, 4, 5]

maxPool3d Source #

Arguments

:: forall kernelSize stride padding channelSize inputSize0 inputSize1 inputSize2 batchSize outputSize0 outputSize1 outputSize2 dtype device. (All KnownNat '[Fst3 kernelSize, Snd3 kernelSize, Trd3 kernelSize, Fst3 stride, Snd3 stride, Trd3 stride, Fst3 padding, Snd3 padding, Trd3 padding, channelSize, inputSize0, inputSize1, inputSize2, batchSize], ConvSideCheck inputSize0 (Fst3 kernelSize) (Fst3 stride) (Fst3 padding) outputSize0, ConvSideCheck inputSize1 (Snd3 kernelSize) (Snd3 stride) (Snd3 padding) outputSize1, ConvSideCheck inputSize2 (Trd3 kernelSize) (Trd3 stride) (Trd3 padding) outputSize2) 
=> Tensor device dtype '[batchSize, channelSize, inputSize0, inputSize1, inputSize2]

input

-> Tensor device dtype '[batchSize, channelSize, outputSize0, outputSize1, outputSize2]

output

maxPool3d TODO: probably only defined for floating point tensors, or maybe numeric type is lifted?

>>> t = maxPool3d @'(1,1,1) @'(1,1,1) @'(0,0,0) (ones :: CPUTensor 'D.Float '[1,3,4,5,6])
>>> shape t
[1,3,4,5,6]
>>> :t t
t :: Tensor '( 'D.CPU, 0) 'D.Float '[1, 3, 4, 5, 6]

maskedFill Source #

Arguments

:: forall a shape shape' shape'' dtype device. (Scalar a, shape'' ~ Broadcast shape shape') 
=> Tensor device 'Bool shape'

mask

-> a

fill value

-> Tensor device dtype shape

input

-> Tensor device dtype shape''

output

maskedFill TODO: probably only defined for floating point tensors, or maybe numeric type is lifted?

>>> t = ones :: CPUTensor 'D.Float '[2, 1, 3]
>>> m = fromJust [[False], [True], [False]] :: CPUTensor 'D.Bool '[3, 1]
>>> t' = maskedFill @Float m 0.5 t
>>> :type t'
t' :: Tensor '( 'D.CPU, 0) 'D.Float '[2, 3, 3]
>>> dtype &&& shape &&& (\u -> D.asValue (toDynamic u) :: [[[Float]]]) $ t'
(Float,([2,3,3],[[[1.0,1.0,1.0],[0.5,0.5,0.5],[1.0,1.0,1.0]],[[1.0,1.0,1.0],[0.5,0.5,0.5],[1.0,1.0,1.0]]]))

mm Source #

Arguments

:: forall n k m dtype device. Tensor device dtype '[n, k]

first input matrix

-> Tensor device dtype '[k, m]

second input matrix

-> Tensor device dtype '[n, m]

output matrix

matrix-matrix multiplication TODO: probably only defined for floating point tensors, or maybe numeric type is lifted?

>>> dtype &&& shape $ mm (ones :: CPUTensor 'D.Float '[3,2]) (zeros :: CPUTensor 'D.Float '[2,4])
(Float,[3,4])

mv Source #

Arguments

:: forall n m dtype device. Tensor device dtype '[n, m]

input matrix

-> Tensor device dtype '[m]

input vector

-> Tensor device dtype '[n]

output vector

matrix-vector multiplication TODO: probably only defined for floating point tensors, or maybe numeric type is lifted?

>>> dtype &&& shape $ mv (ones :: CPUTensor 'D.Float '[3,2]) (zeros :: CPUTensor 'D.Float '[2])
(Float,[3])

type family NarrowCheck (mbCurrent :: Maybe Nat) (mbUpdated :: Maybe [Nat]) (shape :: [Nat]) (dim :: Nat) (start :: Nat) (length :: Nat) :: [Nat] where ... Source #

Equations

NarrowCheck Nothing _ sh d _ _ = DimOutOfBound sh d 
NarrowCheck (Just c) Nothing sh d s l = DimOutOfBound sh d 
NarrowCheck _ (Just r) _ _ _ _ = r 

type family Narrow' (dim :: Nat) (shape :: [Nat]) (current :: Maybe Nat) (start :: Nat) (length :: Nat) :: Maybe [Nat] where ... Source #

Equations

Narrow' d sh (Just c) s l = If ((s + l) <=? c) (ReplaceDim d sh l) (TypeError (((((Text "The end of the requested narrow segment " :<>: ShowType (s + l)) :<>: Text " would be larger than current size ") :<>: ShowType c) :<>: Text " at dimension ") :<>: ShowType d)) 
Narrow' d sh Nothing s l = TypeError (((Text "Requested narrow dimension " :<>: ShowType d) :<>: Text " doesnt exist in ") :<>: ShowType sh) 

type family Narrow (shape :: [Nat]) (dim :: Nat) (start :: Nat) (length :: Nat) :: [Nat] where ... Source #

Equations

Narrow shape dim start length = NarrowCheck (ExtractDim dim shape) (Narrow' dim shape (ExtractDim dim shape) start length) shape dim start length 

narrow :: forall dim start length shape mbSize mbNewShape dtype device. (All KnownNat '[dim, start, length], All KnownNat shape) => Tensor device dtype shape -> Tensor device dtype (Narrow shape dim start length) Source #

Narrow a tensor by returning a tensor that is a slice from start of length length along dim

>>> dtype &&& shape $ narrow @0 @0 @2 (ones :: CPUTensor 'D.Float '[3,3,3])
(Float,[2,3,3])
>>> dtype &&& shape $ narrow @1 @1 @2 (ones :: CPUTensor 'D.Half '[3,3,3])
(Half,[3,2,3])
>>> dtype &&& shape $ narrow @1 @1 @2 (ones :: CPUTensor 'D.Bool '[3,3,3])
(Bool,[3,2,3])

onesLike Source #

Arguments

:: forall shape dtype device. Tensor device dtype shape

input

-> Tensor device dtype shape

output

onesLike

>>> dtype &&& shape $ onesLike (ones :: CPUTensor 'D.Float '[3,4,5])
(Float,[3,4,5])

randLike Source #

Arguments

:: forall shape dtype device. Tensor device dtype shape

input

-> IO (Tensor device dtype shape)

output

randLike TODO: probably only defined for floating point tensors, or maybe numeric type is lifted?

>>> t <- randLike (ones :: CPUTensor 'D.Float '[3,4,5])
>>> dtype &&& shape $ t
(Float,[3,4,5])

randnLike Source #

Arguments

:: forall shape dtype device. Tensor device dtype shape

input

-> IO (Tensor device dtype shape)

output

randnLike TODO: probably only defined for floating point tensors, or maybe numeric type is lifted?

>>> t <- randnLike (ones :: CPUTensor 'D.Float '[3,4,5])
>>> dtype &&& shape $ t
(Float,[3,4,5])

reciprocal Source #

Arguments

:: forall shape dtype device. Tensor device dtype shape

input

-> Tensor device dtype shape

output

reciprocal

>>> dtype &&& shape $ reciprocal (ones :: CPUTensor 'D.Float '[3,2])
(Float,[3,2])

neg Source #

Arguments

:: forall shape dtype device. Tensor device dtype shape

input

-> Tensor device dtype shape

output

negate TODO: probably not defined for Bool tensors

>>> dtype &&& shape $ neg (ones :: CPUTensor 'D.Float '[3,2])
(Float,[3,2])

round Source #

Arguments

:: forall shape dtype device. Tensor device dtype shape

input

-> Tensor device dtype shape

output

round TODO: probably only defined for floating point tensors

>>> dtype &&& shape $ round (ones :: CPUTensor 'D.Float '[3,2])
(Float,[3,2])

prelu Source #

Arguments

:: forall shape dtype device. Tensor device dtype '[]

weight

-> Tensor device dtype shape

input

-> Tensor device dtype shape

output

prelu activation function TODO: probably only defined for floating point tensors, or maybe numeric type is lifted?

>>> dtype &&& shape $ prelu (ones :: CPUTensor 'D.Float '[]) (ones :: CPUTensor 'D.Float '[3,2])
(Float,[3,2])

type family GeluDTypeIsValid (device :: (DeviceType, Nat)) (dtype :: DType) :: Constraint where ... Source #

Equations

GeluDTypeIsValid '('CPU, 0) dtype = (DTypeIsFloatingPoint '('CPU, 0) dtype, DTypeIsNotHalf '('CPU, 0) dtype) 
GeluDTypeIsValid '('CUDA, deviceIndex) dtype = (DTypeIsFloatingPoint '('CUDA, deviceIndex) dtype, DTypeIsNotHalf '('CUDA, deviceIndex) dtype) 
GeluDTypeIsValid '(deviceType, _) dtype = UnsupportedDTypeForDevice deviceType dtype 

gelu Source #

Arguments

:: forall shape dtype device. GeluDTypeIsValid device dtype 
=> Tensor device dtype shape

input

-> Tensor device dtype shape

output

gelu activation function

>>> dtype &&& shape $ round (ones :: CPUTensor 'D.Float '[3,2])
(Float,[3,2])

rsqrt Source #

Arguments

:: forall shape dtype device. StandardFloatingPointDTypeValidation device dtype 
=> Tensor device dtype shape

input

-> Tensor device dtype shape

output

rsqrt

>>> dtype &&& shape $ rsqrt (ones :: CPUTensor 'D.Float '[3,2])
(Float,[3,2])

celu Source #

Arguments

:: forall shape dtype device. Float

alpha

-> Tensor device dtype shape

input

-> Tensor device dtype shape

output

celu activation function TODO: probably only defined for floating point tensors, or maybe numeric type is lifted?

>>> dtype &&& shape $ celu 3.0 (ones :: CPUTensor 'D.Float '[3,2])
(Float,[3,2])

type family StackImpl (dim :: Nat) (tensors :: [a]) (count :: Nat) :: Maybe ([Nat], DType, (DeviceType, Nat)) where ... Source #

Equations

StackImpl dim '[] count = Nothing 
StackImpl dim (Tensor device dtype shape ': '[]) count = MaybeTriple (ComputeStackShape shape dim count) (Just dtype) (Just device) 
StackImpl dim (Tensor device dtype shape ': (Tensor device dtype shape ': tensors)) count = StackImpl dim (Tensor device dtype shape ': tensors) (count + 1) 
StackImpl _ _ _ = Nothing 

type family MaybePair (a' :: Maybe a) (b' :: Maybe b) :: Maybe (a, b) where ... Source #

Equations

MaybePair Nothing _ = Nothing 
MaybePair _ Nothing = Nothing 
MaybePair (Just a') (Just b') = Just '(a', b') 

type family MaybeTriple (a' :: Maybe a) (b' :: Maybe b) (c' :: Maybe c) :: Maybe (a, b, c) where ... Source #

Equations

MaybeTriple Nothing _ _ = Nothing 
MaybeTriple _ Nothing _ = Nothing 
MaybeTriple _ _ Nothing = Nothing 
MaybeTriple (Just a') (Just b') (Just c') = Just '(a', b', c') 

type family ComputeStackShape (shape :: [Nat]) (dim :: Nat) (count :: Nat) :: Maybe [Nat] where ... Source #

Equations

ComputeStackShape _ _ 0 = Nothing 
ComputeStackShape xs 0 count = Just (count ': xs) 
ComputeStackShape (x ': xs) dim count = AppendToMaybe x (ComputeStackShape xs (dim - 1) count) 
ComputeStackShape '[] _ _ = Nothing 

type family StackCheck (res :: Maybe ([Nat], DType, (DeviceType, Nat))) :: ([Nat], DType, (DeviceType, Nat)) where ... Source #

Equations

StackCheck 'Nothing = TypeError (Text "Stacking impossible.") 
StackCheck ('Just '(shape, dtype, device)) = '(shape, dtype, device) 

type Stack dim tensors = StackCheck (StackImpl dim tensors 1) Source #

Stack

>>> type Ty = Stack 0 '[Tensor '( 'D.CPU, 0) 'D.Float '[]]
>>> :kind! Ty
Ty :: ([Natural], D.DType, (D.DeviceType, Natural))
= '( '[1], 'D.Float, '( 'D.CPU, 0))
>>> type Ty = Stack 0 '[Tensor '( 'D.CPU, 0) 'D.Float '[2,2]]
>>> :kind! Ty
Ty :: ([Natural], D.DType, (D.DeviceType, Natural))
= '( '[1, 2, 2], 'D.Float, '( 'D.CPU, 0))
>>> type Ty = Stack 1 '[Tensor '( 'D.CPU, 0) 'D.Float '[2,2]]
>>> :kind! Ty
Ty :: ([Natural], D.DType, (D.DeviceType, Natural))
= '( '[2, 1, 2], 'D.Float, '( 'D.CPU, 0))
>>> type Ty = Stack 2 '[Tensor '( 'D.CPU, 0) 'D.Float '[2,2]]
>>> :kind! Ty
Ty :: ([Natural], D.DType, (D.DeviceType, Natural))
= '( '[2, 2, 1], 'D.Float, '( 'D.CPU, 0))
>>> type Ty = Stack 2 '[Tensor '( 'D.CPU, 0) 'D.Float '[2,2], Tensor '( 'D.CPU, 0) 'D.Float '[2,2], Tensor '( 'D.CPU, 0) 'D.Float '[2,2]]
>>> :kind! Ty
Ty :: ([Natural], D.DType, (D.DeviceType, Natural))
= '( '[2, 2, 3], 'D.Float, '( 'D.CPU, 0))

stack Source #

Arguments

:: forall dim shape dtype device tensors. (KnownNat dim, '(shape, dtype, device) ~ Stack dim tensors, Castable (HList tensors) [ATenTensor]) 
=> HList tensors

input list of tensors

-> Tensor device dtype shape

output

stack

>>> t = ones :: CPUTensor 'D.Float '[]
>>> t' = stack @0 (t :. HNil)
>>> :type t'
t' :: Tensor '( 'D.CPU, 0) 'D.Float '[1]
>>> dtype &&& shape &&& (\t'' -> D.asValue (toDynamic t'') :: [Float]) $ t'
(Float,([1],[1.0]))
>>> t = ones :: CPUTensor 'D.Float '[2,2]
>>> t' = stack @0 (t :. HNil)
>>> :type t'
t' :: Tensor '( 'D.CPU, 0) 'D.Float '[1, 2, 2]
>>> dtype &&& shape &&& (\t'' -> D.asValue (toDynamic t'') :: [[[Float]]]) $ t'
(Float,([1,2,2],[[[1.0,1.0],[1.0,1.0]]]))
>>> t' = stack @1 (t :. HNil)
>>> :type t'
t' :: Tensor '( 'D.CPU, 0) 'D.Float '[2, 1, 2]
>>> dtype &&& shape &&& (\t'' -> D.asValue (toDynamic t'') :: [[[Float]]]) $ t'
(Float,([2,1,2],[[[1.0,1.0]],[[1.0,1.0]]]))
>>> t' = stack @2 (t :. HNil)
>>> :type t'
t' :: Tensor '( 'D.CPU, 0) 'D.Float '[2, 2, 1]
>>> dtype &&& shape &&& (\t'' -> D.asValue (toDynamic t'') :: [[[Float]]]) $ t'
(Float,([2,2,1],[[[1.0],[1.0]],[[1.0],[1.0]]]))
>>> t' = stack @2 (t :. t :. t :. HNil)
>>> :type t'
t' :: Tensor '( 'D.CPU, 0) 'D.Float '[2, 2, 3]
>>> dtype &&& shape &&& (\t'' -> D.asValue (toDynamic t'') :: [[[Float]]]) $ t'
(Float,([2,2,3],[[[1.0,1.0,1.0],[1.0,1.0,1.0]],[[1.0,1.0,1.0],[1.0,1.0,1.0]]]))

vecStack Source #

Arguments

:: forall dim n shape dtype device. (KnownNat dim, KnownNat n) 
=> Vector n (Tensor device dtype shape)

Input list of tensors

-> Tensor device dtype (Insert dim n shape)

Output list of tensors

t Source #

Arguments

:: forall shape dtype device. Tensor device dtype shape

input

-> Tensor device dtype shape

output

t

dtype &&& shape $ t ones :: CPUTensor 'D.Float '3,2

tan Source #

Arguments

:: forall shape dtype device. StandardFloatingPointDTypeValidation device dtype 
=> Tensor device dtype shape

input

-> Tensor device dtype shape

output

tan

>>> dtype &&& shape $ tan (ones :: CPUTensor 'D.Float '[3,2])
(Float,[3,2])

trunc Source #

Arguments

:: forall shape dtype device. StandardFloatingPointDTypeValidation device dtype 
=> Tensor device dtype shape

input

-> Tensor device dtype shape

output

trunc TODO: probably only defined for floating point tensors, or maybe numeric type is lifted?

>>> dtype &&& shape $ trunc (ones :: CPUTensor 'D.Float '[3,2])
(Float,[3,2])

type family UnsqueezeImpl (shape :: [a]) (dim :: Nat) :: Maybe [a] where ... Source #

UnsqueezeImpl

>>> :kind! UnsqueezeImpl '[4] 0
UnsqueezeImpl '[4] 0 :: Maybe [Natural]
= 'Just '[1, 4]
>>> :kind! UnsqueezeImpl '[4] 1
UnsqueezeImpl '[4] 1 :: Maybe [Natural]
= 'Just '[4, 1]
>>> :kind! UnsqueezeImpl '[4] 2
UnsqueezeImpl '[4] 2 :: Maybe [Natural]
= 'Nothing

Equations

UnsqueezeImpl xs 0 = Just (1 ': xs) 
UnsqueezeImpl (x ': xs) dim = AppendToMaybe x (UnsqueezeImpl xs (dim - 1)) 
UnsqueezeImpl '[] _ = Nothing 

type family UnsqueezeCheck (shape :: [a]) (dim :: Nat) (result :: Maybe [a]) :: [a] where ... Source #

Equations

UnsqueezeCheck shape dim Nothing = TypeError ((((Text "Cannot unsqueeze the tensor since the specified dimension " :<>: ShowType dim) :<>: Text " is too large (the tensor is only ") :<>: ShowType (ListLength shape)) :<>: Text "D)") 
UnsqueezeCheck _ _ (Just shape') = shape' 

type Unsqueeze shape dim = UnsqueezeCheck shape dim (UnsqueezeImpl shape dim) Source #

unsqueeze Source #

Arguments

:: forall dim shape shape' dtype device. (KnownNat dim, shape' ~ Unsqueeze shape dim) 
=> Tensor device dtype shape

input

-> Tensor device dtype shape'

output

unsqueeze

>>> t = fromJust [1, 2, 3, 4] :: CPUTensor 'D.Int64 '[4]
>>> t' = unsqueeze @0 t
>>> :type t'
t' :: Tensor '( 'D.CPU, 0) 'D.Int64 '[1, 4]
>>> dtype &&& shape &&& (\u -> D.asValue (toDynamic u) :: [[Int]]) $ t'
(Int64,([1,4],[[1,2,3,4]]))
>>> t'' = unsqueeze @1 t
>>> :type t''
t'' :: Tensor '( 'D.CPU, 0) 'D.Int64 '[4, 1]
>>> dtype &&& shape &&& (\u -> D.asValue (toDynamic u) :: [[Int]]) $ t''
(Int64,([4,1],[[1],[2],[3],[4]]))

type family SqueezeAll (shape :: [Nat]) :: [Nat] where ... Source #

Equations

SqueezeAll '[] = '[] 
SqueezeAll (1 ': xs) = SqueezeAll xs 
SqueezeAll (x ': xs) = x ': SqueezeAll xs 

squeezeAll Source #

Arguments

:: forall shape shape' dtype device. shape' ~ SqueezeAll shape 
=> Tensor device dtype shape

input

-> Tensor device dtype shape'

output

squeeze all dimensions

>>> dtype &&& shape $ squeezeAll (ones :: CPUTensor 'D.Float '[2,1,2,1,2])
(Float,[2,2,2])
>>> squeezeAll (ones :: CPUTensor 'D.Float '[2,1,2,1,2])
Tensor Float [2,2,2] [[[ 1.0000   ,  1.0000   ],
                       [ 1.0000   ,  1.0000   ]],
                      [[ 1.0000   ,  1.0000   ],
                       [ 1.0000   ,  1.0000   ]]]

type family SqueezeDimImpl (shape :: [a]) (dim :: Nat) :: Maybe [a] where ... Source #

Equations

SqueezeDimImpl (1 ': xs) 0 = Just xs 
SqueezeDimImpl _ 0 = Nothing 
SqueezeDimImpl (x ': xs) dim = AppendToMaybe x (SqueezeDimImpl xs (dim - 1)) 
SqueezeDimImpl _ _ = Nothing 

type family SqueezeDimCheck (shape :: [a]) (dim :: Nat) (result :: Maybe [a]) :: [a] where ... Source #

Equations

SqueezeDimCheck shape dim Nothing = TypeError (Text "The tensor cannot be squeezed at the specified dimension " :<>: ShowType dim) 
SqueezeDimCheck _ _ ('Just shape') = shape' 

type SqueezeDim shape dim = SqueezeDimCheck shape dim (SqueezeDimImpl shape dim) Source #

Calculate the output shape of a squeeze along a given dimension

>>> :kind! SqueezeDim '[2,1,2] 1
SqueezeDim '[2,1,2] 1 :: [Natural]
= '[2, 2]

squeezeDim Source #

Arguments

:: forall dim shape shape' dtype device. (KnownNat dim, shape' ~ SqueezeDim shape dim) 
=> Tensor device dtype shape

input

-> Tensor device dtype shape'

output

squeeze a particular dimension

>>> dtype &&& shape $ squeezeDim @1 (ones :: CPUTensor 'D.Float '[2,1,2,1,2])
(Float,[2,2,1,2])
>>> dtype &&& shape $ squeezeDim @3 (ones :: CPUTensor 'D.Float '[2,1,2,1,2])
(Float,[2,1,2,2])

zerosLike Source #

Arguments

:: forall shape dtype device. Tensor device dtype shape

input

-> Tensor device dtype shape

output

zerosLike

>>> dtype &&& shape $ zerosLike (ones :: CPUTensor 'D.Float '[3,4,5])
(Float,[3,4,5])

clone :: forall shape dtype device. Tensor device dtype shape -> IO (Tensor device dtype shape) Source #

clone

>>> t <- clone (ones :: CPUTensor 'D.Float '[3,2])
>>> dtype &&& shape $ t
(Float,[3,2])

addmm Source #

Arguments

:: forall shape' shape n k m dtype device. (All KnownNat '[n, k, m], shape' ~ Broadcast shape '[n, m]) 
=> Float

beta

-> Float

alpha

-> Tensor device dtype '[n, k]

first input matrix

-> Tensor device dtype '[k, m]

second input matrix

-> Tensor device dtype shape

input tensor

-> Tensor device dtype shape'

output tensor

addmm TODO: probably only defined for floating point tensors, or maybe numeric type is lifted? TODO: can we use D.Scalar here for beta and alpha?

>>> t = addmm 1 1 (ones :: CPUTensor 'D.Float '[3,2]) (zeros :: CPUTensor 'D.Float '[2,4]) (ones :: CPUTensor 'D.Float '[])
>>> dtype &&& shape $ t
(Float,[3,4])
>>> :t t
t :: Tensor '( 'D.CPU, 0) 'D.Float '[3, 4]

numel Source #

Arguments

:: forall shape dtype device. Tensor device dtype shape

input

-> Int

output

numel TODO: since this is decidable at compile time, this should probably be calculated from the tensor type instead

qScale Source #

Arguments

:: forall shape dtype device. Tensor device dtype shape

input

-> Double

output

qScale TODO: are there any restrictions on the dtype?

qZeroPoint Source #

Arguments

:: forall shape dtype device. Tensor device dtype shape

input

-> Int

output

qZeroPoint TODO: are there any restrictions on the dtype?

data RNNDirectionality Source #

The directional specification of a recurrent function

Constructors

Bidirectional

Forward and backward along the sequential axis using independant parameters for each.

Unidirectional

Forward along the sequential axis.

Instances

Instances details
Generic RNNDirectionality Source # 
Instance details

Defined in Torch.Typed.Functional

Associated Types

type Rep RNNDirectionality :: Type -> Type Source #

Show RNNDirectionality Source # 
Instance details

Defined in Torch.Typed.Functional

type Rep RNNDirectionality Source # 
Instance details

Defined in Torch.Typed.Functional

type Rep RNNDirectionality = D1 ('MetaData "RNNDirectionality" "Torch.Typed.Functional" "hasktorch-0.2.0.0-F6yFRaDiRF49lpq95SVuR8" 'False) (C1 ('MetaCons "Bidirectional" 'PrefixI 'False) (U1 :: Type -> Type) :+: C1 ('MetaCons "Unidirectional" 'PrefixI 'False) (U1 :: Type -> Type))

type family NumberOfDirections (directionality :: RNNDirectionality) :: Nat where ... Source #

data RNNShapeOrder Source #

Specification for the sequential axis of a recurrent function.

Constructors

BatchFirst

Input is of shape (Batch, Sequence, Features)

SequenceFirst

Input is of shape (Sequence, Batch, Features)

Instances

Instances details
Generic RNNShapeOrder Source # 
Instance details

Defined in Torch.Typed.Functional

Associated Types

type Rep RNNShapeOrder :: Type -> Type Source #

Show RNNShapeOrder Source # 
Instance details

Defined in Torch.Typed.Functional

type Rep RNNShapeOrder Source # 
Instance details

Defined in Torch.Typed.Functional

type Rep RNNShapeOrder = D1 ('MetaData "RNNShapeOrder" "Torch.Typed.Functional" "hasktorch-0.2.0.0-F6yFRaDiRF49lpq95SVuR8" 'False) (C1 ('MetaCons "BatchFirst" 'PrefixI 'False) (U1 :: Type -> Type) :+: C1 ('MetaCons "SequenceFirst" 'PrefixI 'False) (U1 :: Type -> Type))

class KnownRNNShapeOrder (shapeOrder :: RNNShapeOrder) where Source #

Instances

Instances details
KnownRNNShapeOrder 'BatchFirst Source # 
Instance details

Defined in Torch.Typed.Functional

KnownRNNShapeOrder 'SequenceFirst Source # 
Instance details

Defined in Torch.Typed.Functional

type family RNNShape (shapeOrder :: RNNShapeOrder) (seqLen :: Nat) (batchSize :: Nat) (featureSize :: Nat) :: [Nat] where ... Source #

Equations

RNNShape BatchFirst seqLen batchSize featureSize = '[batchSize, seqLen, featureSize] 
RNNShape SequenceFirst seqLen batchSize featureSize = '[seqLen, batchSize, featureSize] 

type LSTMWIShape hiddenSize inputSize = '[4 * hiddenSize, inputSize] Source #

type LSTMWHShape hiddenSize inputSize = '[4 * hiddenSize, hiddenSize] Source #

type LSTMBIShape hiddenSize inputSize = '[4 * hiddenSize] Source #

type LSTMBHShape hiddenSize inputSize = '[4 * hiddenSize] Source #

type family LSTMRImpl (inputSize :: Nat) (hiddenSize :: Nat) (numLayers :: Nat) (directionality :: RNNDirectionality) :: [[Nat]] where ... Source #

Equations

LSTMRImpl inputSize hiddenSize 1 'Unidirectional = '[LSTMWIShape hiddenSize inputSize, LSTMWHShape hiddenSize inputSize, LSTMBIShape hiddenSize inputSize, LSTMBHShape hiddenSize inputSize] 
LSTMRImpl inputSize hiddenSize numLayers 'Unidirectional = LSTMRImpl inputSize hiddenSize (numLayers - 1) 'Unidirectional ++ '[LSTMWIShape hiddenSize (hiddenSize * NumberOfDirections 'Unidirectional), LSTMWHShape hiddenSize (hiddenSize * NumberOfDirections 'Unidirectional), LSTMBIShape hiddenSize (hiddenSize * NumberOfDirections 'Unidirectional), LSTMBHShape hiddenSize (hiddenSize * NumberOfDirections 'Unidirectional)] 
LSTMRImpl inputSize hiddenSize 1 'Bidirectional = '[LSTMWIShape hiddenSize inputSize, LSTMWHShape hiddenSize inputSize, LSTMBIShape hiddenSize inputSize, LSTMBHShape hiddenSize inputSize, LSTMWIShape hiddenSize inputSize, LSTMWHShape hiddenSize inputSize, LSTMBIShape hiddenSize inputSize, LSTMBHShape hiddenSize inputSize] 
LSTMRImpl inputSize hiddenSize numLayers 'Bidirectional = LSTMRImpl inputSize hiddenSize (numLayers - 1) 'Bidirectional ++ '[LSTMWIShape hiddenSize (hiddenSize * NumberOfDirections 'Bidirectional), LSTMWHShape hiddenSize (hiddenSize * NumberOfDirections 'Bidirectional), LSTMBIShape hiddenSize (hiddenSize * NumberOfDirections 'Bidirectional), LSTMBHShape hiddenSize (hiddenSize * NumberOfDirections 'Bidirectional), LSTMWIShape hiddenSize (hiddenSize * NumberOfDirections 'Bidirectional), LSTMWHShape hiddenSize (hiddenSize * NumberOfDirections 'Bidirectional), LSTMBIShape hiddenSize (hiddenSize * NumberOfDirections 'Bidirectional), LSTMBHShape hiddenSize (hiddenSize * NumberOfDirections 'Bidirectional)] 

type family LSTMR' (shapes :: [[Nat]]) (dtype :: DType) (device :: (DeviceType, Nat)) :: [a] where ... Source #

Equations

LSTMR' '[] dtype device = '[] 
LSTMR' (shape ': shapes) dtype device = Tensor device dtype shape ': LSTMR' shapes dtype device 

type LSTMR inputSize hiddenSize numLayers directionality dtype device = LSTMR' (LSTMRImpl inputSize hiddenSize numLayers directionality) dtype device Source #

lstm :: forall shapeOrder directionality numLayers seqLen batchSize inputSize outputSize hiddenSize inputShape outputShape hxShape tensorParameters dtype device. (KnownNat numLayers, KnownRNNShapeOrder shapeOrder, KnownRNNDirectionality directionality, outputSize ~ (hiddenSize * NumberOfDirections directionality), inputShape ~ RNNShape shapeOrder seqLen batchSize inputSize, outputShape ~ RNNShape shapeOrder seqLen batchSize outputSize, hxShape ~ '[numLayers * NumberOfDirections directionality, batchSize, hiddenSize], tensorParameters ~ LSTMR inputSize hiddenSize numLayers directionality dtype device, Castable (HList tensorParameters) [ATenTensor]) => HList tensorParameters -> Double -> Bool -> (Tensor device dtype hxShape, Tensor device dtype hxShape) -> Tensor device dtype inputShape -> (Tensor device dtype outputShape, Tensor device dtype hxShape, Tensor device dtype hxShape) Source #

lstm Parameters for this ATen function are non-trivially provided. See the LSTM module for doctests.

lstmCell :: forall inputSize hiddenSize batchSize dtype device. Tensor device dtype '[4 * hiddenSize, inputSize] -> Tensor device dtype '[4 * hiddenSize, hiddenSize] -> Tensor device dtype '[4 * hiddenSize] -> Tensor device dtype '[4 * hiddenSize] -> (Tensor device dtype '[batchSize, hiddenSize], Tensor device dtype '[batchSize, hiddenSize]) -> Tensor device dtype '[batchSize, inputSize] -> (Tensor device dtype '[batchSize, hiddenSize], Tensor device dtype '[batchSize, hiddenSize]) Source #

lstmCell

>>> dtype &&& shape $ fst $ lstmCell (ones :: CPUTensor 'D.Float '[12,2]) (ones :: CPUTensor 'D.Float '[12,3]) (ones :: CPUTensor 'D.Float '[12]) (ones :: CPUTensor 'D.Float '[12]) ((ones :: CPUTensor 'D.Float '[2,3]), (ones :: CPUTensor 'D.Float '[2,3])) (ones :: CPUTensor 'D.Float '[2,2])
(Float,[2,3])

type GRUWIShape hiddenSize inputSize = '[3 * hiddenSize, inputSize] Source #

type GRUWHShape hiddenSize inputSize = '[3 * hiddenSize, hiddenSize] Source #

type GRUBIShape hiddenSize inputSize = '[3 * hiddenSize] Source #

type GRUBHShape hiddenSize inputSize = '[3 * hiddenSize] Source #

type family GRURImpl (inputSize :: Nat) (hiddenSize :: Nat) (numLayers :: Nat) (directionality :: RNNDirectionality) :: [[Nat]] where ... Source #

Equations

GRURImpl inputSize hiddenSize 1 'Unidirectional = '[GRUWIShape hiddenSize inputSize, GRUWHShape hiddenSize inputSize, GRUBIShape hiddenSize inputSize, GRUBHShape hiddenSize inputSize] 
GRURImpl inputSize hiddenSize numLayers 'Unidirectional = GRURImpl inputSize hiddenSize (numLayers - 1) 'Unidirectional ++ '[GRUWIShape hiddenSize (hiddenSize * NumberOfDirections 'Unidirectional), GRUWHShape hiddenSize (hiddenSize * NumberOfDirections 'Unidirectional), GRUBIShape hiddenSize (hiddenSize * NumberOfDirections 'Unidirectional), GRUBHShape hiddenSize (hiddenSize * NumberOfDirections 'Unidirectional)] 
GRURImpl inputSize hiddenSize 1 'Bidirectional = '[GRUWIShape hiddenSize inputSize, GRUWHShape hiddenSize inputSize, GRUBIShape hiddenSize inputSize, GRUBHShape hiddenSize inputSize, GRUWIShape hiddenSize inputSize, GRUWHShape hiddenSize inputSize, GRUBIShape hiddenSize inputSize, GRUBHShape hiddenSize inputSize] 
GRURImpl inputSize hiddenSize numLayers 'Bidirectional = GRURImpl inputSize hiddenSize (numLayers - 1) 'Bidirectional ++ '[GRUWIShape hiddenSize (hiddenSize * NumberOfDirections 'Bidirectional), GRUWHShape hiddenSize (hiddenSize * NumberOfDirections 'Bidirectional), GRUBIShape hiddenSize (hiddenSize * NumberOfDirections 'Bidirectional), GRUBHShape hiddenSize (hiddenSize * NumberOfDirections 'Bidirectional), GRUWIShape hiddenSize (hiddenSize * NumberOfDirections 'Bidirectional), GRUWHShape hiddenSize (hiddenSize * NumberOfDirections 'Bidirectional), GRUBIShape hiddenSize (hiddenSize * NumberOfDirections 'Bidirectional), GRUBHShape hiddenSize (hiddenSize * NumberOfDirections 'Bidirectional)] 

type family GRUR' (shapes :: [[Nat]]) (dtype :: DType) (device :: (DeviceType, Nat)) :: [a] where ... Source #

Equations

GRUR' '[] dtype device = '[] 
GRUR' (shape ': shapes) dtype device = Tensor device dtype shape ': GRUR' shapes dtype device 

type GRUR inputSize hiddenSize numLayers directionality dtype device = GRUR' (GRURImpl inputSize hiddenSize numLayers directionality) dtype device Source #

gru :: forall shapeOrder directionality numLayers seqLen batchSize inputSize outputSize hiddenSize inputShape outputShape hcShape tensorParameters dtype device. (KnownNat numLayers, KnownRNNShapeOrder shapeOrder, KnownRNNDirectionality directionality, outputSize ~ (hiddenSize * NumberOfDirections directionality), inputShape ~ RNNShape shapeOrder seqLen batchSize inputSize, outputShape ~ RNNShape shapeOrder seqLen batchSize outputSize, hcShape ~ '[numLayers * NumberOfDirections directionality, batchSize, hiddenSize], tensorParameters ~ GRUR inputSize hiddenSize numLayers directionality dtype device, Castable (HList tensorParameters) [ATenTensor]) => HList tensorParameters -> Double -> Bool -> Tensor device dtype hcShape -> Tensor device dtype inputShape -> (Tensor device dtype outputShape, Tensor device dtype hcShape) Source #

gru Parameters for this ATen function are non-trivially provided. See the GRU module for doctests.

gruCell :: forall inputSize hiddenSize batchSize dtype device. Tensor device dtype '[3 * hiddenSize, inputSize] -> Tensor device dtype '[3 * hiddenSize, hiddenSize] -> Tensor device dtype '[3 * hiddenSize] -> Tensor device dtype '[3 * hiddenSize] -> Tensor device dtype '[batchSize, hiddenSize] -> Tensor device dtype '[batchSize, inputSize] -> Tensor device dtype '[batchSize, hiddenSize] Source #

gruCell

>>> dtype &&& shape $ gruCell (ones :: CPUTensor 'D.Float '[9,2]) (ones :: CPUTensor 'D.Float '[9,3]) (ones :: CPUTensor 'D.Float '[9]) (ones :: CPUTensor 'D.Float '[9]) (ones :: CPUTensor 'D.Float '[2,3]) (ones :: CPUTensor 'D.Float '[2,2])
(Float,[2,3])

type family MatrixOrMatrixBatch (shape :: [Nat]) :: [Nat] where ... Source #

Equations

MatrixOrMatrixBatch (n ': (m ': '[])) = '[n, m] 
MatrixOrMatrixBatch (b ': (n ': (m ': '[]))) = '[b, n, m] 
MatrixOrMatrixBatch _ = TypeError (Text "The input must be matrix or a batch of matrices.") 

triu Source #

Arguments

:: forall shape dtype device. shape ~ MatrixOrMatrixBatch shape 
=> Int

diagonal

-> Tensor device dtype shape

input

-> Tensor device dtype shape

output

triu TODO: triu is not implemented for D.Bool, or maybe numeric type is lifted?

>>> t = ones :: CPUTensor 'D.Float '[3, 4]
>>> dtype &&& shape &&& (\t' -> D.asValue (toDynamic t') :: [[Float]]) $ triu 0 t
(Float,([3,4],[[1.0,1.0,1.0,1.0],[0.0,1.0,1.0,1.0],[0.0,0.0,1.0,1.0]]))
>>> dtype &&& shape &&& (\t' -> D.asValue (toDynamic t') :: [[Float]]) $ triu 1 t
(Float,([3,4],[[0.0,1.0,1.0,1.0],[0.0,0.0,1.0,1.0],[0.0,0.0,0.0,1.0]]))
>>> dtype &&& shape &&& (\t' -> D.asValue (toDynamic t') :: [[Float]]) $ triu (-1) t
(Float,([3,4],[[1.0,1.0,1.0,1.0],[1.0,1.0,1.0,1.0],[0.0,1.0,1.0,1.0]]))

tril Source #

Arguments

:: forall shape dtype device. shape ~ MatrixOrMatrixBatch shape 
=> Int

diagonal

-> Tensor device dtype shape

input

-> Tensor device dtype shape

output

tril TODO: tril is not implemented for D.Bool, or maybe numeric type is lifted?

>>> t = ones :: CPUTensor 'D.Float '[3, 4]
>>> dtype &&& shape &&& (\t' -> D.asValue (toDynamic t') :: [[Float]]) $ tril 0 t
(Float,([3,4],[[1.0,0.0,0.0,0.0],[1.0,1.0,0.0,0.0],[1.0,1.0,1.0,0.0]]))
>>> dtype &&& shape &&& (\t' -> D.asValue (toDynamic t') :: [[Float]]) $ tril 1 t
(Float,([3,4],[[1.0,1.0,0.0,0.0],[1.0,1.0,1.0,0.0],[1.0,1.0,1.0,1.0]]))
>>> dtype &&& shape &&& (\t' -> D.asValue (toDynamic t') :: [[Float]]) $ tril (-1) t
(Float,([3,4],[[0.0,0.0,0.0,0.0],[1.0,0.0,0.0,0.0],[1.0,1.0,0.0,0.0]]))

trace Source #

Arguments

:: forall shape dtype device. Tensor device dtype shape

input

-> Tensor device dtype shape

output

trace

>>> dtype &&& shape $ trace (ones :: CPUTensor 'D.Float '[3,2])
(Float,[3,2])

maskedSelect :: forall shape shape' shape'' dtype device. shape'' ~ Broadcast shape shape' => Tensor device 'Bool shape -> Tensor device dtype shape' -> UnknownShapeTensor device dtype Source #

nonzero Source #

Arguments

:: forall shape dtype device. Tensor device dtype shape

input

-> Tensor device dtype shape

output

nonzero

>>> dtype &&& shape $ nonzero (zeros :: CPUTensor 'D.Float '[3,2])
(Float,[3,2])

type family GatherDimImpl (shape :: [Nat]) (shape' :: [Nat]) (dim :: Nat) :: Maybe [Nat] where ... Source #

GatherDimImpl

>>> :kind! GatherDimImpl '[2, 1, 1] '[2, 4, 1] 1
GatherDimImpl '[2, 1, 1] '[2, 4, 1] 1 :: Maybe [Natural]
= 'Just '[2, 4, 1]
>>> :kind! GatherDimImpl '[2, 1, 1] '[2, 4, 2] 1
GatherDimImpl '[2, 1, 1] '[2, 4, 2] 1 :: Maybe [Natural]
= 'Nothing
>>> :kind! GatherDimImpl '[2, 1, 1] '[2, 0, 1] 1
GatherDimImpl '[2, 1, 1] '[2, 0, 1] 1 :: Maybe [Natural]
= 'Nothing
>>> :kind! GatherDimImpl '[2, 1, 1] '[2, 1] 1
GatherDimImpl '[2, 1, 1] '[2, 1] 1 :: Maybe [Natural]
= 'Nothing
>>> :kind! GatherDimImpl '[2, 1, 1] '[2, 1, 3] 2
GatherDimImpl '[2, 1, 1] '[2, 1, 3] 2 :: Maybe [Natural]
= 'Just '[2, 1, 3]

Equations

GatherDimImpl (x ': xs) (y ': xs) 0 = If (1 <=? y) (Just (y ': xs)) Nothing 
GatherDimImpl (x ': xs) (x ': ys) dim = AppendToMaybe x (GatherDimImpl xs ys (dim - 1)) 
GatherDimImpl _ _ _ = Nothing 

type family GatherDimCheck (shape :: [a]) (shape' :: [a]) (dim :: Nat) (result :: Maybe [a]) :: [a] where ... Source #

Equations

GatherDimCheck shape shape' dim Nothing = TypeError (((Text "Cannot gather the tensor at dimension " :<>: ShowType dim) :<>: Text " using index of shape ") :<>: ShowType shape') 
GatherDimCheck _ _ _ (Just shape'') = shape'' 

type GatherDim shape shape' dim = GatherDimCheck shape shape' dim (GatherDimImpl shape shape' dim) Source #

Calculate the output shape of a gather operation for a given index shape along a given axis

>>> :kind! GatherDim '[2, 1, 1] '[2, 1, 3] 2
GatherDim '[2, 1, 1] '[2, 1, 3] 2 :: [Natural]
= '[2, 1, 3]

gatherDim Source #

Arguments

:: forall dim shape shape' dtype device. (KnownNat dim, shape' ~ GatherDim shape shape' dim) 
=> Tensor device 'Int64 shape'

the indices of elements to gather

-> Tensor device dtype shape

input

-> Tensor device dtype shape'

output

gather values along an axis for a specified dimension.

lgamma Source #

Arguments

:: forall shape dtype device. StandardFloatingPointDTypeValidation device dtype 
=> Tensor device dtype shape

input

-> Tensor device dtype shape

output

lgamma function

>>> dtype &&& shape $ lgamma (ones :: CPUTensor 'D.Float '[3,2])
(Float,[3,2])

digamma Source #

Arguments

:: forall shape dtype device. StandardFloatingPointDTypeValidation device dtype 
=> Tensor device dtype shape

input

-> Tensor device dtype shape

output

digamma function

>>> dtype &&& shape $ digamma (ones :: CPUTensor 'D.Float '[3,2])
(Float,[3,2])

polygamma Source #

Arguments

:: forall shape dtype device. Int

order

-> Tensor device dtype shape

input

-> Tensor device dtype shape

output

polygamma function TODO: probably only defined for floating point tensors, or maybe numeric type is lifted?

erfinv Source #

Arguments

:: forall shape dtype device. StandardFloatingPointDTypeValidation device dtype 
=> Tensor device dtype shape

input

-> Tensor device dtype shape

output

inverse of the error function

>>> dtype &&& shape $ erfinv (ones :: CPUTensor 'D.Float '[3,2])
(Float,[3,2])

minAll Source #

Arguments

:: forall shape dtype device. Tensor device dtype shape

input

-> Tensor device dtype '[]

output

minAll

>>> dtype &&& shape $ minAll (ones :: CPUTensor 'D.Float '[2,2])
(Float,[])

type family DropValue (shape :: [Nat]) (i :: Nat) :: [Nat] where ... Source #

Equations

DropValue '[] _ = TypeError (Text "Can not find a element in the list.") 
DropValue (x ': xs) 0 = xs 
DropValue (x ': xs) i = x ': DropValue xs (i - 1) 

type family DropNamedValue (shape :: Shape) (i :: Size) :: Shape where ... Source #

Equations

DropNamedValue '[] _ = TypeError (Text "Can not find a element in the list.") 
DropNamedValue (x ': xs) x = xs 
DropNamedValue (x ': xs) y = x ': DropNamedValue xs y 

minDim Source #

Arguments

:: forall d shape dtype device. KnownNat d 
=> Tensor device dtype shape

input

-> (Tensor device dtype (DropValue shape d), Tensor device 'Int64 (DropValue shape d))

output

minDim

>>> t = ones :: CPUTensor 'D.Float '[3,4,5]
>>> dtype &&& shape $ fst $ minDim @0 t
(Float,[4,5])
>>> dtype &&& shape $ fst $ minDim @1 t
(Float,[3,5])
>>> dtype &&& shape $ fst $ minDim @2 t
(Float,[3,4])

maxAll Source #

Arguments

:: forall shape dtype device. Tensor device dtype shape

input

-> Tensor device dtype '[]

output

maxAll

>>> dtype &&& shape $ maxAll (ones :: CPUTensor 'D.Float '[2,2])
(Float,[])

maxDim Source #

Arguments

:: forall d shape dtype device. KnownNat d 
=> Tensor device dtype shape

input

-> (Tensor device dtype (DropValue shape d), Tensor device 'Int64 (DropValue shape d))

output

maxDim

>>> t = ones :: CPUTensor 'D.Float '[3,4,5]
>>> dtype &&& shape $ fst $ maxDim @0 t
(Float,[4,5])
>>> dtype &&& shape $ fst $ maxDim @1 t
(Float,[3,5])
>>> dtype &&& shape $ fst $ maxDim @2 t
(Float,[3,4])

type family HasDim (dim :: Nat) (shape :: [Nat]) :: Constraint where ... Source #

Equations

HasDim _ '[] = TypeError (Text "The dimension of the argument is incorrect.") 
HasDim 0 (_ ': _) = () 
HasDim n (_ ': xs) = HasDim (n - 1) xs 

sortDim :: forall dim shape dtype device. (KnownNat dim, HasDim dim shape) => Bool -> Tensor device dtype shape -> (Tensor device dtype shape, Tensor device Int64 shape) Source #

sortDim

>>> t = ones :: CPUTensor 'D.Float '[3,4,5]
>>> dtype &&& shape $ fst $ sortDim @0 True t
(Float,[3,4,5])
>>> dtype &&& shape $ snd $ sortDim @0 True t
(Int64,[3,4,5])

sortNamedDim :: forall dim shape dtype device. KnownNat (FindDim dim shape) => Bool -> NamedTensor device dtype shape -> (NamedTensor device dtype shape, NamedTensor device Int64 shape) Source #

sortNamedDim

>>> import Torch.Typed.Factories
>>> import Data.Default.Class
>>> t = def :: NamedTensor '( D.CPU, 0) 'D.Float '[Vector 3, Vector 4, Vector 5]
>>> dtype &&& shape $ fst $ sortNamedDim @(Vector 3) True t
(Float,[3,4,5])
>>> dtype &&& shape $ snd $ sortNamedDim @(Vector 3) True t
(Int64,[3,4,5])

argSortDim :: forall dim shape dtype device. (KnownNat dim, HasDim dim shape) => Bool -> Tensor device dtype shape -> Tensor device Int64 shape Source #

argSortDim

>>> t = ones :: CPUTensor 'D.Float '[3,4,5]
>>> dtype &&& shape $ argSortDim @0 True t
(Int64,[3,4,5])

argSortNamedDim :: forall dim shape dtype device. KnownNat (FindDim dim shape) => Bool -> NamedTensor device dtype shape -> NamedTensor device Int64 shape Source #

type family TopKCheck (k :: Nat) (shape :: [Nat]) (dim :: Nat) (satd :: Maybe Nat) (result :: Maybe a) :: a where ... Source #

Equations

TopKCheck _ shape dim _ Nothing = DimOutOfBound shape dim 
TopKCheck _ shape dim Nothing _ = DimOutOfBound shape dim 
TopKCheck k shape dim (Just v) (Just result) = If (k <=? v) result (TypeError (Text "k must be less than or equal to the number of elements in the requested dimension.")) 

type TopK k shape dim = TopKCheck k shape dim (ExtractDim dim shape) (ReplaceDim dim shape k) Source #

type family TopKDeviceAndDTypeCheck dtype (device :: (DeviceType, Nat)) :: Constraint where ... Source #

Equations

TopKDeviceAndDTypeCheck Bool _ = TypeError (Text "topk is not defined for Bool tensors.") 
TopKDeviceAndDTypeCheck Half '(CPU, _) = TypeError (Text "topk is not defined for Half types on CPU.") 
TopKDeviceAndDTypeCheck _ _ = () 

topk Source #

Arguments

:: forall k dim shape' shape dtype device. (KnownNat k, KnownNat dim, All KnownNat shape, TopKDeviceAndDTypeCheck dtype device, shape' ~ TopK k shape dim) 
=> Bool

if we're returning the top k largest (or, if False, the top k smallest)

-> Bool

if the resulting k elements are themselves sorted

-> Tensor device dtype shape

input

-> (Tensor device dtype shape', Tensor device 'Int64 shape')

output

Returns the k largest (if largest is True) elements of the given input tensor along a given dimension.

>>> topk @3 @1 True True (ones :: CPUTensor 'D.Float '[2,3])
(Tensor Float [2,3] [[ 1.0000   ,  1.0000   ,  1.0000   ],
                    [ 1.0000   ,  1.0000   ,  1.0000   ]],Tensor Int64 [2,3] [[ 0,  1,  2],
                    [ 0,  1,  2]])
>>> topk @0 @1 True True (ones :: CPUTensor 'D.Float '[2,3])
(Tensor Float [2,0] [[],
                    []],Tensor Int64 [2,0] [[],
                    []])

alias Source #

Arguments

:: forall shape dtype device. Tensor device dtype shape

input

-> Tensor device dtype shape

output

alias

>>> dtype &&& shape $ alias (ones :: CPUTensor 'D.Float '[3,2])
(Float,[3,2])

l1Loss Source #

Arguments

:: forall reduction shape dtype device. KnownReduction reduction 
=> Tensor device dtype shape

prediciton

-> Tensor device dtype shape

target

-> Tensor device dtype (ConditionalReduction shape reduction)

loss

L1 loss TODO: probably only defined for floating point tensors, or maybe numeric type is lifted?

>>> dtype &&& shape $ l1Loss @ReduceNone (ones :: CPUTensor 'D.Float '[2,2]) (ones :: CPUTensor 'D.Float '[2,2])
(Float,[2,2])
>>> dtype &&& shape $ l1Loss @ReduceSum (ones :: CPUTensor 'D.Float '[2,2]) (ones :: CPUTensor 'D.Float '[2,2])
(Float,[])

nllLoss Source #

Arguments

:: forall reduction n c ds dtype device. (KnownReduction reduction, KnownNat n, KnownNat c, KnownShape ds) 
=> Tensor device dtype '[c]

weight

-> Int

ignore which index

-> Tensor device dtype (n ': (c ': ds))

prediction

-> Tensor device 'Int64 (n ': ds)

target

-> Tensor device dtype (ConditionalReduction (n ': ds) reduction)

loss

negative log likelihood loss TODO: probably only defined for floating point tensors, or maybe numeric type is lifted? See https://pytorch.org/docs/stable/nn.functional.html?highlight=nll_loss#torch.nn.functional.nll_loss.

>>> input <- randn @'[3, 5] @'D.Float @'( 'D.CPU, 0)
>>> target = fromJust [1, 0, 4] :: CPUTensor 'D.Int64 '[3]
>>> weight = ones @'[5] @'D.Float @'( 'D.CPU, 0)
>>> dtype &&& shape $ nllLoss @ReduceNone @3 @5 @'[] weight (-100) (logSoftmax @1 input) target
(Float,[3])
>>> dtype &&& shape $ nllLoss @ReduceMean @3 @5 @'[] weight (-100) (logSoftmax @1 input) target
(Float,[])
>>> input <- randn @'[3, 5, 2] @'D.Float @'( 'D.CPU, 0)
>>> target = fromJust [[1, 1], [0, 1], [4, 0]] :: CPUTensor 'D.Int64 '[3, 2]
>>> weight = ones @'[5] @'D.Float @'( 'D.CPU, 0)
>>> dtype &&& shape $ nllLoss @ReduceNone @3 @5 @'[2] weight (-100) (logSoftmax @1 input) target
(Float,[3,2])
>>> dtype &&& shape $ nllLoss @ReduceMean @3 @5 @'[2] weight (-100) (logSoftmax @1 input) target
(Float,[])
>>> input <- randn @'[3, 5, 1, 2] @'D.Float @'( 'D.CPU, 0)
>>> target = fromJust [[[1, 1]], [[0, 1]], [[4, 0]]] :: CPUTensor 'D.Int64 '[3, 1, 2]
>>> weight = ones @'[5] @'D.Float @'( 'D.CPU, 0)
>>> dtype &&& shape $ nllLoss @ReduceNone @3 @5 @'[1, 2] weight (-100) (logSoftmax @1 input) target
(Float,[3,1,2])
>>> dtype &&& shape $ nllLoss @ReduceMean @3 @5 @'[1, 2] weight (-100) (logSoftmax @1 input) target
(Float,[])
>>> input <- randn @'[3, 5, 2, 1, 2] @'D.Float @'( 'D.CPU, 0)
>>> target = fromJust [[[[1, 1]], [[0, 2]]], [[[0, 1]], [[1, 0]]], [[[4, 0]], [[1, 2]]]] :: CPUTensor 'D.Int64 '[3, 2, 1, 2]
>>> weight = ones @'[5] @'D.Float @'( 'D.CPU, 0)
>>> dtype &&& shape $ nllLoss @ReduceNone @3 @5 @'[2, 1, 2] weight (-100) (logSoftmax @1 input) target
(Float,[3,2,1,2])
>>> dtype &&& shape $ nllLoss @ReduceMean @3 @5 @'[2, 1, 2] weight (-100) (logSoftmax @1 input) target
(Float,[])

smoothL1Loss Source #

Arguments

:: forall reduction shape dtype device. KnownReduction reduction 
=> Tensor device dtype shape

prediction

-> Tensor device dtype shape

target

-> Tensor device dtype (ConditionalReduction shape reduction)

loss

smooth L1 loss TODO: probably only defined for floating point tensors, or maybe numeric type is lifted?

>>> dtype &&& shape $ smoothL1Loss @ReduceNone (ones :: CPUTensor 'D.Float '[2,2]) (ones :: CPUTensor 'D.Float '[2,2])
(Float,[2,2])
>>> dtype &&& shape $ smoothL1Loss @ReduceSum (ones :: CPUTensor 'D.Float '[2,2]) (ones :: CPUTensor 'D.Float '[2,2])
(Float,[])

softMarginLoss Source #

Arguments

:: forall reduction shape dtype device. KnownReduction reduction 
=> Tensor device dtype shape

prediction

-> Tensor device dtype shape

target

-> Tensor device dtype (ConditionalReduction shape reduction)

loss

soft margin loss TODO: probably only defined for floating point tensors, or maybe numeric type is lifted?

>>> dtype &&& shape $ softMarginLoss @ReduceNone (ones :: CPUTensor 'D.Float '[2,2]) (ones :: CPUTensor 'D.Float '[2,2])
(Float,[2,2])
>>> dtype &&& shape $ softMarginLoss @ReduceSum (ones :: CPUTensor 'D.Float '[2,2]) (ones :: CPUTensor 'D.Float '[2,2])
(Float,[])

elu Source #

Arguments

:: forall shape dtype a device. (Scalar a, StandardFloatingPointDTypeValidation device dtype) 
=> a

alpha

-> a

scale

-> a

input scale

-> Tensor device dtype shape

input

-> Tensor device dtype shape

output

elu TODO: probably only defined for floating point tensors, or maybe numeric type is lifted?

>>> dtype &&& shape $ elu 0.1 0.1 0.3 (ones :: CPUTensor 'D.Float '[3,2])
(Float,[3,2])

hardTanh Source #

Arguments

:: forall shape dtype device. Float

minimum value

-> Float

maximum value

-> Tensor device dtype shape

input

-> Tensor device dtype shape

output

glu -- >>> dtype &&& shape $ glu (ones :: CPUTensor 'D.Float '[3,2]) 1 -- (Float,[3,1]) -- >>> dtype &&& shape $ glu (ones :: CPUTensor 'D.Float '[3,2]) 3 -- (Float,[3,2]) glu :: Tensor device dtype shape -> Int -> Tensor device dtype shape glu _input _dim = unsafePerformIO $ (ATen.cast2 ATen.Managed.glu_tl) _input _dim

hard tanh TODO: probably only defined for floating point tensors, or maybe numeric type is lifted?

>>> dtype &&& shape $ hardTanh 0 1 (ones :: CPUTensor 'D.Float '[3,2])
(Float,[3,2])

leakyRelu Source #

Arguments

:: forall a shape dtype device. (Scalar a, StandardFloatingPointDTypeValidation device dtype) 
=> a

negative slope

-> Tensor device dtype shape

input

-> Tensor device dtype shape

output

leaky relu

>>> dtype &&& shape $ leakyRelu 0.01 (ones :: CPUTensor 'D.Float '[3,2])
(Float,[3,2])

logSigmoid Source #

Arguments

:: forall shape dtype device. StandardFloatingPointDTypeValidation device dtype 
=> Tensor device dtype shape

input

-> Tensor device dtype shape

output

logarithm of the sigmoid

>>> dtype &&& shape $ logSigmoid (ones :: CPUTensor 'D.Float '[3,2])
(Float,[3,2])

softplus :: forall a shape dtype device. Scalar a => a -> a -> Tensor device dtype shape -> Tensor device dtype shape Source #

softplus TODO: probably only defined for floating point tensors, or maybe numeric type is lifted? See https://pytorch.org/docs/stable/nn.functional.html?highlight=softplus#torch.nn.functional.softplus.

>>> dtype &&& shape &&& (\t -> D.asValue (toDynamic t) :: [[Float]]) $ softplus 1 20 (ones :: CPUTensor 'D.Float '[3,2])
(Float,([3,2],[[1.3132616,1.3132616],[1.3132616,1.3132616],[1.3132616,1.3132616]]))

softShrink Source #

Arguments

:: forall shape dtype device. Float

lambda

-> Tensor device dtype shape

input

-> Tensor device dtype shape

output

soft shrink TODO: probably only defined for floating point tensors, or maybe numeric type is lifted?

>>> dtype &&& shape $ softShrink 0.2 (ones :: CPUTensor 'D.Float '[3,2])
(Float,[3,2])

adaptiveAvgPool2d Source #

Arguments

:: forall outputSize channelSize inputSize0 inputSize1 batchSize dtype device. All KnownNat '[channelSize, inputSize0, inputSize1, batchSize, Fst outputSize, Snd outputSize] 
=> Tensor device dtype '[batchSize, channelSize, inputSize0, inputSize1]

input

-> Tensor device dtype '[batchSize, channelSize, Fst outputSize, Snd outputSize]

output

adaptive averaged 2-D pooling TODO: probably only defined for floating point tensors, or maybe numeric type is lifted?

>>> t = adaptiveAvgPool2d @'(8,16) (ones :: CPUTensor 'D.Float '[1,3,16,32])
>>> shape t
[1,3,8,16]
>>> :t t
t :: Tensor '( 'D.CPU, 0) 'D.Float '[1, 3, 8, 16]

mkldnnAdaptiveAvgPool2d Source #

Arguments

:: forall outputSize channelSize inputSize0 inputSize1 batchSize dtype device. All KnownNat '[channelSize, inputSize0, inputSize1, batchSize, Fst outputSize, Snd outputSize] 
=> Tensor device dtype '[batchSize, channelSize, inputSize0, inputSize1]

input

-> Tensor device dtype '[batchSize, channelSize, Fst outputSize, Snd outputSize]

output

MKLDNN adaptive averaged 2-D pooling TODO: probably only defined for floating point tensors, or maybe numeric type is lifted? TODO: broken? TODO: only defined for MKLDNN device? TODO: test for availability of MKLDNN device? TODO: merge with adaptiveAvgPool2d and dispatch based on (availability of MKLDNN) device in the function body?

  • - >>> t = mkldnnAdaptiveAvgPool2d @'(8,16) (toMKLDNN (ones :: CPUTensor 'D.Float '[1,3,16,32]))
  • - >>> shape t
  • - [1,3,8,16]
  • - >>> :t t
  • - t :: Tensor '( 'D.CPU, 0) 'D.Float '[1, 3, 8, 16]

adaptiveAvgPool3d Source #

Arguments

:: forall outputSize channelSize inputSize0 inputSize1 inputSize2 batchSize dtype device. All KnownNat '[channelSize, inputSize0, inputSize1, inputSize2, batchSize, Fst3 outputSize, Snd3 outputSize, Trd3 outputSize] 
=> Tensor device dtype '[batchSize, channelSize, inputSize0, inputSize1, inputSize2]

input

-> Tensor device dtype '[batchSize, channelSize, Fst3 outputSize, Snd3 outputSize, Trd3 outputSize]

output

adaptive averaged 3-D pooling TODO: probably only defined for floating point tensors, or maybe numeric type is lifted?

>>> t = adaptiveAvgPool3d @'(8,16,2) (ones :: CPUTensor 'D.Float '[1,3,16,32,4])
>>> shape t
[1,3,8,16,2]
>>> :t t
t :: Tensor '( 'D.CPU, 0) 'D.Float '[1, 3, 8, 16, 2]

adaptiveMaxPool2d Source #

Arguments

:: forall outputSize channelSize inputSize0 inputSize1 batchSize dtype device. All KnownNat '[channelSize, inputSize0, inputSize1, batchSize, Fst outputSize, Snd outputSize] 
=> Tensor device dtype '[batchSize, channelSize, inputSize0, inputSize1]

input

-> (Tensor device dtype '[batchSize, channelSize, Fst outputSize, Snd outputSize], Tensor device 'Int64 '[batchSize, channelSize, Fst outputSize, Snd outputSize])

output

adaptive 2-D max-pool TODO: probably only defined for floating point tensors, or maybe numeric type is lifted?

>>> (t, t') = adaptiveMaxPool2d @'(8,16) (ones :: CPUTensor 'D.Float '[1,3,16,32])
>>> shape t
[1,3,8,16]
>>> :t t
t :: Tensor '( 'D.CPU, 0) 'D.Float '[1, 3, 8, 16]

adaptiveMaxPool3d Source #

Arguments

:: forall outputSize channelSize inputSize0 inputSize1 inputSize2 batchSize dtype device. All KnownNat '[channelSize, inputSize0, inputSize1, inputSize2, batchSize, Fst3 outputSize, Snd3 outputSize, Trd3 outputSize] 
=> Tensor device dtype '[batchSize, channelSize, inputSize0, inputSize1, inputSize2]

input

-> (Tensor device dtype '[batchSize, channelSize, Fst3 outputSize, Snd3 outputSize, Trd3 outputSize], Tensor device 'Int64 '[batchSize, channelSize, Fst3 outputSize, Snd3 outputSize, Trd3 outputSize])

output

adaptive 3-D max-pool TODO: probably only defined for floating point tensors, or maybe numeric type is lifted?

>>> (t, t') = adaptiveMaxPool3d @'(8,16,2) (ones :: CPUTensor 'D.Float '[1,3,16,32,4])
>>> shape t
[1,3,8,16,2]
>>> :t t
t :: Tensor '( 'D.CPU, 0) 'D.Float '[1, 3, 8, 16, 2]

avgPool2d Source #

Arguments

:: forall kernelSize stride padding channelSize inputSize0 inputSize1 batchSize outputSize0 outputSize1 dtype device. (All KnownNat '[Fst kernelSize, Snd kernelSize, Fst stride, Snd stride, Fst padding, Snd padding, channelSize, inputSize0, inputSize1, batchSize], ConvSideCheck inputSize0 (Fst kernelSize) (Fst stride) (Fst padding) outputSize0, ConvSideCheck inputSize1 (Snd kernelSize) (Snd stride) (Snd padding) outputSize1) 
=> Tensor device dtype '[batchSize, channelSize, inputSize0, inputSize1]

input

-> Tensor device dtype '[batchSize, channelSize, outputSize0, outputSize1]

output

averaged 2-D pooling TODO: probably only defined for floating point tensors, or maybe numeric type is lifted?

>>> t = avgPool2d @'(1,1) @'(1,1) @'(0,0) (ones :: CPUTensor 'D.Float '[1,3,4,5])
>>> shape t
[1,3,4,5]
>>> :t t
t :: Tensor '( 'D.CPU, 0) 'D.Float '[1, 3, 4, 5]

avgPool3d Source #

Arguments

:: forall kernelSize stride padding channelSize inputSize0 inputSize1 inputSize2 batchSize outputSize0 outputSize1 outputSize2 dtype device. (All KnownNat '[Fst3 kernelSize, Snd3 kernelSize, Trd3 kernelSize, Fst3 stride, Snd3 stride, Trd3 stride, Fst3 padding, Snd3 padding, Trd3 padding, channelSize, inputSize0, inputSize1, inputSize2, batchSize], ConvSideCheck inputSize0 (Fst3 kernelSize) (Fst3 stride) (Fst3 padding) outputSize0, ConvSideCheck inputSize1 (Snd3 kernelSize) (Snd3 stride) (Snd3 padding) outputSize1, ConvSideCheck inputSize2 (Trd3 kernelSize) (Trd3 stride) (Trd3 padding) outputSize2) 
=> Tensor device dtype '[batchSize, channelSize, inputSize0, inputSize1, inputSize2]

input

-> Tensor device dtype '[batchSize, channelSize, outputSize0, outputSize1, outputSize2]

output

averaged 3-D pooling TODO: probably only defined for floating point tensors, or maybe numeric type is lifted?

>>> t = avgPool3d @'(1,1,1) @'(1,1,1) @'(0,0,0) (ones :: CPUTensor 'D.Float '[1,3,4,5,6])
>>> shape t
[1,3,4,5,6]
>>> :t t
t :: Tensor '( 'D.CPU, 0) 'D.Float '[1, 3, 4, 5, 6]

type family Upsample2dCheck shape h w where ... Source #

Equations

Upsample2dCheck (b ': (c ': (w ': (h ': '[])))) h' w' = If (h <=? h') (If (w <=? w') (b ': (c ': (w' ': (h' ': '[])))) (TypeError (Text "Target width must be greater than current width!"))) (TypeError (Text "Target height must be greater than current height!")) 
Upsample2dCheck _ _ _ = TypeError (Text "Shape must be 4 dimensional!") 

type Upsample2d shape h w = Upsample2dCheck shape h w Source #

upsample_bilinear2d Source #

Arguments

:: forall w h shape dtype device. (KnownNat h, KnownNat w, All KnownNat shape) 
=> Bool

if True, the corner pixels of the input and output tensors are aligned, and thus preserving the values at those pixels.

-> Tensor device dtype shape 
-> Tensor device dtype (Upsample2d shape h w) 

Applies a 2D bilinear upsampling to an input signal composed of several input channels.

>>> (dtype &&& shape) $ upsample_bilinear2d @3 @5 False (ones :: CPUTensor 'D.Float '[2,3,2,2])
(Float,[2,3,3,5])

upsample_bicubic2d :: forall w h shape dtype device. (KnownNat h, KnownNat w, All KnownNat shape) => Bool -> Tensor device dtype shape -> Tensor device dtype (Upsample2d shape h w) Source #

Applies a 2D bicubic upsampling to an input signal composed of several input channels.

>>> (dtype &&& shape) $ upsample_bicubic2d @3 @5 False (ones :: CPUTensor 'D.Float '[2,3,2,2])
(Float,[2,3,3,5])

upsample_nearest2d :: forall w h shape dtype device. (KnownNat h, KnownNat w, All KnownNat shape) => Tensor device dtype shape -> Tensor device dtype (Upsample2d shape h w) Source #

Applies a 2D bicubic upsampling to an input signal composed of several input channels.

>>> (dtype &&& shape) $ upsample_nearest2d @3 @5 (ones :: CPUTensor 'D.Float '[2,3,2,2])
(Float,[2,3,3,5])