hasktorch-0.2.0.0: Functional differentiable programming in Haskell
Safe HaskellSafe-Inferred
LanguageHaskell2010

Torch.Functional

Synopsis

Documentation

newtype Diag Source #

Constructors

Diag Int 

data CeilMode Source #

Constructors

Ceil 
Floor 

Instances

Instances details
Show CeilMode Source # 
Instance details

Defined in Torch.Functional

Eq CeilMode Source # 
Instance details

Defined in Torch.Functional

Castable CeilMode CBool Source # 
Instance details

Defined in Torch.Functional

Methods

cast :: CeilMode -> (CBool -> IO r) -> IO r Source #

uncast :: CBool -> (CeilMode -> IO r) -> IO r Source #

data KeepDim Source #

Constructors

KeepDim 
RemoveDim 

Instances

Instances details
Show KeepDim Source # 
Instance details

Defined in Torch.Functional

Eq KeepDim Source # 
Instance details

Defined in Torch.Functional

newtype Dim Source #

Constructors

Dim Int 

data Reduction Source #

Instances

Instances details
Show Reduction Source # 
Instance details

Defined in Torch.Functional

Eq Reduction Source # 
Instance details

Defined in Torch.Functional

KnownReduction 'ReduceMean Source # 
Instance details

Defined in Torch.Typed.Functional

KnownReduction 'ReduceNone Source # 
Instance details

Defined in Torch.Typed.Functional

KnownReduction 'ReduceSum Source # 
Instance details

Defined in Torch.Typed.Functional

Castable Reduction Int64 Source # 
Instance details

Defined in Torch.Functional

Methods

cast :: Reduction -> (Int64 -> IO r) -> IO r Source #

uncast :: Int64 -> (Reduction -> IO r) -> IO r Source #

data Tri Source #

Constructors

Upper 
Lower 

Instances

Instances details
Show Tri Source # 
Instance details

Defined in Torch.Functional

Eq Tri Source # 
Instance details

Defined in Torch.Functional

Methods

(==) :: Tri -> Tri -> Bool Source #

(/=) :: Tri -> Tri -> Bool Source #

mean Source #

Arguments

:: Tensor

input

-> Tensor

output

Returns the mean value of all elements in the input tensor.

std Source #

Arguments

:: Tensor

input

-> Tensor

output

Returns the standard deviation of all elements in the input tensor.

var Source #

Arguments

:: Tensor

input

-> Tensor

output

Returns the variance of all elements in the input tensor.

sumAll Source #

Arguments

:: Tensor

input

-> Tensor

output

Returns the sum of all elements in the input tensor.

abs Source #

Arguments

:: Tensor

input

-> Tensor

output

Computes the element-wise absolute value of the given input tensor.

frac Source #

Arguments

:: Tensor

input

-> Tensor

output

Computes the fractional portion of each element in input. out_i = input_i - (floor . abs) input_i * (sign input_i)

argmax Source #

Arguments

:: Dim

the dimension to reduce

-> KeepDim

whether the output tensor has dim retained or not

-> Tensor

input

-> Tensor

output

Returns the indices of the maximum value of all elements in the input tensor.

add Source #

Arguments

:: Tensor

input

-> Tensor

other

-> Tensor

output

Each element of the tensor other added to each element of the tensor input. The resulting tensor is returned.

mul Source #

Arguments

:: Tensor

input

-> Tensor

other

-> Tensor

output

Multiplies each element of the tensor other to each element of the input tensor and returns a new resulting tensor.

sub Source #

Arguments

:: Tensor

input

-> Tensor

other

-> Tensor

output

Element wise subtraction of other tensor from input tensor and returns a new resulting tensor

div Source #

Arguments

:: Tensor

input

-> Tensor

other

-> Tensor 

Element wise division of input tensor by other tensor and returns a new resulting tensor

ceil Source #

Arguments

:: Tensor

input

-> Tensor

output

ceil

floor Source #

Arguments

:: Tensor

input

-> Tensor

output

floor

min Source #

Arguments

:: Tensor

input

-> Tensor

output

min

max Source #

Arguments

:: Tensor

input

-> Tensor

output

max

median Source #

Arguments

:: Tensor

input

-> Tensor

output

median

addScalar Source #

Arguments

:: Scalar a 
=> a

summand

-> Tensor

input

-> Tensor

output

Adds each element of the input input with the scalar and returns a new resulting tensor.

subScalar Source #

Arguments

:: Scalar a 
=> a

subtrahend

-> Tensor

input

-> Tensor

output

Subtracts each element of the input input with the scalar and returns a new resulting tensor.

mulScalar Source #

Arguments

:: Scalar a 
=> a

multiplier

-> Tensor

input

-> Tensor

output

Multiplies each element of the input input with the scalar and returns a new resulting tensor.

divScalar Source #

Arguments

:: Scalar a 
=> a

divisor

-> Tensor

input

-> Tensor

output

Divides each element of the input input with the scalar and returns a new resulting tensor.

matmul Source #

Arguments

:: Tensor

first tensor for matrix multiplication

-> Tensor

second tensor for matrix multiplication

-> Tensor

output

Matrix product of two tensors.

The behavior depends on the dimensionality of the tensors as follows:

If both tensors are 1-dimensional, the dot product (scalar) is returned. If both arguments are 2-dimensional, the matrix-matrix product is returned. If the first argument is 1-dimensional and the second argument is 2-dimensional, a 1 is prepended to its dimension for the purpose of the matrix multiply. After the matrix multiply, the prepended dimension is removed. If the first argument is 2-dimensional and the second argument is 1-dimensional, the matrix-vector product is returned. If both arguments are at least 1-dimensional and at least one argument is N-dimensional (where N > 2), then a batched matrix multiply is returned. If the first argument is 1-dimensional, a 1 is prepended to its dimension for the purpose of the batched matrix multiply and removed after. If the second argument is 1-dimensional, a 1 is appended to its dimension for the purpose of the batched matrix multiple and removed after. The non-matrix (i.e. batch) dimensions are broadcasted (and thus must be broadcastable). For example, if input is a (j times 1 times n times m)(j×1×n×m) tensor and other is a (k times m times p)(k×m×p) tensor, out will be an (j times k times n times p)(j×k×n×p) tensor.

embedding Source #

Arguments

:: Bool

whether or not to scale the gradient by the frequencies

-> Bool

whether or not the embedding is sparse

-> Tensor

weights

-> Int

padding

-> Tensor

indices

-> Tensor

output

A simple lookup table that looks up embeddings in a fixed dictionary and size. This module is often used to retrieve word embeddings using indices. The input to the module is a list of indices, and the embedding matrix, and the output is the corresponding word embeddings.

embedding' Source #

Arguments

:: Tensor

weights

-> Tensor

indices

-> Tensor

output

oneHot Source #

Arguments

:: Int

number of classes

-> Tensor

input

-> Tensor 

A one hot encoding of the given input. The encoding is based on the given number of classes.

erf Source #

Arguments

:: Tensor

input

-> Tensor

output

Computes the error function of each element

erfc Source #

Arguments

:: Tensor

input

-> Tensor

output

Computes the complementary error function of each element of input

erfinv Source #

Arguments

:: Tensor

input

-> Tensor

output

Computes the inverse error function of each element of input. The inverse error function is defined in the range (-1, 1)(−1,1) as: erfinv(erf(x)) = x

lgamma Source #

Arguments

:: Tensor

input

-> Tensor

output

Computes the logarithm of the gamma function on input.

digamma Source #

Arguments

:: Tensor

input

-> Tensor

output

Computes the logarithmic derivative of the gamma function on input.

polygamma Source #

Arguments

:: Int

n

-> Tensor

input

-> Tensor

output

Computes the nth derivative of the digamma function on input. n geq 0n≥0 is called the order of the polygamma function.

mvlgamma Source #

Arguments

:: Int

p

-> Tensor

input

-> Tensor

output

Computes the multivariate log-gamma function with dimension pp element-wise. All elements must be greater than (p-1)/2, otherwise an error would be thrown.

exp Source #

Arguments

:: Tensor

input

-> Tensor

output

Returns a new tensor with the exponential of the elements of the input tensor input.

log1p :: Tensor -> Tensor Source #

Returns a new tensor with the natural logarithm of (1 + input).

log2 Source #

Arguments

:: Tensor

input

-> Tensor

output

Returns a new tensor with the logarithm to the base 2 of the elements of input.

log Source #

Arguments

:: Tensor

input

-> Tensor

output

Returns a new tensor with the natural logarithm of the elements of input.

log10 Source #

Arguments

:: Tensor

input

-> Tensor

output

Returns a new tensor with the logarithm to the base 10 of the elements of input.

pow Source #

Arguments

:: Scalar a 
=> a

exponent

-> Tensor

input

-> Tensor

output

Takes the power of each element in input with exponent and returns a tensor with the result.

powt Source #

Arguments

:: Tensor

input

-> Tensor

exponent

-> Tensor

output

Takes the power of each element in input with exponent and returns a tensor with the result. Exponent is a tensor with the same number of elements as input.

relu Source #

Arguments

:: Tensor

input

-> Tensor

output

Applies the rectified linear unit function element-wise.

elu Source #

Arguments

:: Scalar s 
=> s

alpha value for ELU formulation

-> Tensor

input

-> Tensor

output

Applies Exponential linear unit function element-wise, with alpha input, \(\text{ELU}(x) = \max(0,x) + \min(0, \alpha * (\exp(x) - 1))\)

elu' Source #

Arguments

:: Tensor

input

-> Tensor

output

Applies exponential linear unit function element wise with default alpha value = 1

selu Source #

Arguments

:: Tensor

input

-> Tensor

output

Applies element-wise, \(\text{SELU}(x) = scale * (\max(0,x) + \min(0, \alpha * (\exp(x) - 1))\) , with α=1.6732632423543772848170429916717 and scale=1.0507009873554804934193349852946.

celu Source #

Arguments

:: Float

alpha

-> Tensor

input

-> Tensor

output

Applies element-wise, \(\text{CELU}(x) = \max(0,x) + \min(0, \alpha * (\exp(x/\alpha) - 1))\).

sigmoid Source #

Arguments

:: Tensor

input

-> Tensor

output

Applies the element-wise function sigmoid.

softmax Source #

Arguments

:: Dim

dimension

-> Tensor

input

-> Tensor

output

Applies a softmax function. It is applied to all slices along dim, and will re-scale them so that the elements lie in the range [0, 1] and sum to 1.

logSoftmax Source #

Arguments

:: Dim

dimension

-> Tensor

input

-> Tensor

output

Applies a softmax followed by a logarithm. While mathematically equivalent to log(softmax(x)), doing these two operations separately is slower, and numerically unstable. This function uses an alternative formulation to compute the output and gradient correctly.

threshold Source #

Arguments

:: Float

threshold

-> Float

value

-> Tensor

input

-> Tensor

output

Thresholds each element of the input Tensor.

sin Source #

Arguments

:: Tensor

input

-> Tensor

output

Returns a new tensor with the sine of the elements of input.

cos Source #

Arguments

:: Tensor

input

-> Tensor

output

Returns a new tensor with the cos of the elements of input.

tan Source #

Arguments

:: Tensor

input

-> Tensor

output

Returns a new tensor with the tangent of the elements of input.

sinh Source #

Arguments

:: Tensor

input

-> Tensor

output

Returns a new tensor with the hyperbolic sine of the elements of input.

cosh Source #

Arguments

:: Tensor

input

-> Tensor

output

Returns a new tensor with the hyperbolic cosine of the elements of input.

tanh Source #

Arguments

:: Tensor

input

-> Tensor

output

Returns a new tensor with the hyperbolic tangent of the elements of input.

sqrt Source #

Arguments

:: Tensor

input

-> Tensor

output

Returns a new tensor with the square-root of the elements of input.

gt Source #

Arguments

:: Tensor

input

-> Tensor

output

-> Tensor

other

Computes input > other element-wise. The second argument can be a number or a tensor whose shape is broadcastable with the first argument.

lt Source #

Arguments

:: Tensor

input

-> Tensor

other

-> Tensor

output

Computes input < other element-wise. The second argument can be a number or a tensor whose shape is broadcastable with the first argument.

ge Source #

Arguments

:: Tensor

input

-> Tensor

other

-> Tensor

output

Computes input >= other element-wise. The second argument can be a number or a tensor whose shape is broadcastable with the first argument.

le Source #

Arguments

:: Tensor

input

-> Tensor

other

-> Tensor

output

Computes input <= other element-wise. The second argument can be a number or a tensor whose shape is broadcastable with the first argument.

eq Source #

Arguments

:: Tensor

input

-> Tensor

other

-> Tensor

output

Computes input == other element-wise. The second argument can be a number or a tensor whose shape is broadcastable with the first argument.

take Source #

Arguments

:: Tensor

index

-> Tensor

input

-> Tensor

output

Returns a new tensor with the elements of input at the given indices. The input tensor is treated as if it were viewed as a 1-D tensor. The result takes the same shape as the indices.

maskedSelect Source #

Arguments

:: Tensor

mask

-> Tensor

input

-> Tensor

output

Returns a new 1-D tensor which indexes the input tensor according to the boolean mask mask which is a BoolTensor. The shapes of the mask tensor and the input tensor don’t need to match, but they must be broadcastable.

nonzero Source #

Arguments

:: Tensor

input

-> Tensor

output

Returns a tuple of 1-D tensors, one for each dimension in input, each containing the indices (in that dimension) of all non-zero elements of input .

isclose Source #

Arguments

:: Double

rtol

-> Double

atol

-> Bool

equal_nan

-> Tensor

self

-> Tensor

other

-> Tensor 

isnan Source #

Arguments

:: Tensor

self

-> Tensor 

isNonzero Source #

Arguments

:: Tensor

self

-> Bool 

isSameSize Source #

Arguments

:: Tensor

self

-> Tensor

other

-> Bool 

isSigned Source #

Arguments

:: Tensor

input

-> Bool

True if the data type of input is a signed type

ne Source #

Arguments

:: Tensor

input

-> Tensor

other

-> Tensor

output

Computes input /= other element-wise. The second argument can be a number or a tensor whose shape is broadcastable with the first argument.

toDType Source #

Arguments

:: DType

data type to cast to

-> Tensor

input

-> Tensor

output

Casting to given Dtype, where Dtype is an object that represents the data type of a tensor in hasktorch.

squeezeAll Source #

Arguments

:: Tensor

input

-> Tensor

output

squeezeAll

squeezeDim Source #

Arguments

:: Int

dim

-> Tensor

input

-> Tensor

output

squeezeDim

cummax Source #

Arguments

:: Int

dim

-> Tensor

input

-> (Tensor, Tensor)

output (values, indices)

Returns a tuple (values, indices) where values is the cumulative maximum of elements of input in the dimension dim. And indices is the index location of each maximum value found in the dimension dim.

cummin Source #

Arguments

:: Int

dim

-> Tensor

input

-> (Tensor, Tensor)

output (values, indices)

Returns a tuple (values, indices) where values is the cumulative minimum of elements of input in the dimension dim. And indices is the index location of each maximum value found in the dimension dim.

cumprod Source #

Arguments

:: Int

dim

-> DType

dtype

-> Tensor

input

-> Tensor

output

Returns the cumulative product of elements of input in the dimension dim. For example, if input is a vector of size N, the result will also be a vector of size N, with elements.

cumsum Source #

Arguments

:: Int

dim

-> DType

dtype

-> Tensor

input

-> Tensor

output

Returns the cumulative sum of elements of input in the dimension dim. For example, if input is a vector of size N, the result will also be a vector of size N, with elements.

binaryCrossEntropyLoss Source #

Arguments

:: Reduction

Specifies the reduction to apply to the output

-> Tensor

target

-> Tensor

weight

-> Tensor

input

-> Tensor

output

Function that measures the Binary Cross Entropy between the target and the output.

binaryCrossEntropyLoss' Source #

Arguments

:: Tensor

target

-> Tensor

input

-> Tensor

output

Binary Cross Entropy with weights defaulted to 1.0 & reduction defaulted to ReduceMean

binaryCrossEntropyWithLogits Source #

Arguments

:: Reduction

Specifies the reduction to apply to the output

-> Tensor

target

-> Tensor

weight

-> Tensor

pos_weight

-> Tensor

input

-> Tensor

output

This loss combines a Sigmoid layer and the BCELoss in one single class. This version is more numerically stable than using a plain Sigmoid followed by a BCELoss as, by combining the operations into one layer, we take advantage of the log-sum-exp trick for numerical stability.

mseLoss Source #

Arguments

:: Tensor

target tensor

-> Tensor

input

-> Tensor

output

Creates a criterion that measures the mean squared error (squared L2 norm) between each element in the input and target.

nllLoss' Source #

Arguments

:: Tensor

target tensor

-> Tensor

input

-> Tensor

output

The negative log likelihood loss.

cosineSimilarity Source #

Arguments

:: Dim

dimension of vectors (default=1)

-> Double

small value to avoid division by 0 (default=1e-8)

-> Tensor

x1

-> Tensor

x2

-> Tensor

output

Returns cosine similarity between x1 and x2, computed along dim.

cosineSimilarity' Source #

Arguments

:: Tensor

x1

-> Tensor

x2

-> Tensor

output

Returns cosine similarity with defaulted options.

ctcLoss Source #

Arguments

:: Bool

zero_infinity - Whether to zero infinite losses and the associated gradients (False by default). Infinite losses mainly occur when the inputs are too short to be aligned to the targets.

-> Int

blank label

-> Reduction

reduction

-> [Int]

input_lengths

-> [Int]

target_lengths

-> Tensor

log_probs

-> Tensor

targets

-> Tensor

output

The Connectionist Temporal Classification loss. Calculates loss between a continuous (unsegmented) time series and a target sequence. CTCLoss sums over the probability of possible alignments of input to target, producing a loss value which is differentiable with respect to each input node. The alignment of input to target is assumed to be “many-to-one”, which limits the length of the target sequence such that it must be leq≤ the input length.

ctcLoss' Source #

Arguments

:: Reduction

reduction

-> [Int]

input lengths

-> [Int]

target lengths

-> Tensor

log probs

-> Tensor

targets

-> Tensor

output

Returns CTC loss with defaulted options.

dist Source #

Arguments

:: Float

p

-> Tensor

other

-> Tensor

input

-> Tensor

output

Returns the p-norm of (input - other) The shapes of input and other must be broadcastable.

hingeEmbeddingLoss Source #

Arguments

:: Double

margin

-> Reduction

reduction

-> Tensor

target

-> Tensor

self

-> Tensor

output

Measures the loss given an input tensor xx and a labels tensor yy (containing 1 or -1). This is usually used for measuring whether two inputs are similar or dissimilar, e.g. using the L1 pairwise distance as xx, and is typically used for learning nonlinear embeddings or semi-supervised learning.

marginRankingLoss Source #

Arguments

:: Tensor

input1

-> Tensor

input2

-> Tensor

target

-> Double

margin

-> Reduction

reduction

-> Tensor

output

nllLoss2D :: Reduction -> Int -> Tensor -> Tensor -> Tensor -> Tensor Source #

The 2D negative log likelihood loss

multiMarginLoss Source #

Arguments

:: Reduction

reduction

-> Float

p

-> Float

margin

-> Tensor

input

-> Tensor

target

-> Tensor

weight

-> Tensor

output

Creates a criterion that optimizes a multi-class classification hinge loss (margin-based loss) between input \(x\) (a 2D mini-batch Tensor) and output \(y\) (which is a 1D tensor of target class indices)

multiLabelMarginLoss :: Reduction -> Tensor -> Tensor -> Tensor Source #

Creates a criterion that optimizes a multi-label one-versus-all loss based on max-entropy, between input \(x\) and target \(y\) of size \((N,C)\) .

klDiv Source #

Arguments

:: Reduction 
-> Tensor

self

-> Tensor

target

-> Tensor

output

The Kullback-Leibler divergence Loss KL divergence is a useful distance measure for continuous distributions and is often useful when performing direct regression over the space of (discretely sampled) continuous output distributions. As with NLLLoss, the input given is expected to contain log-probabilities and is not restricted to a 2D Tensor. The targets are interpreted as probabilities by default, but could be considered as log-probabilities with log_target set to True. This criterion expects a target Tensor of the same size as the input Tensor.

smoothL1Loss Source #

Arguments

:: Reduction

reduction

-> Tensor

self

-> Tensor

target

-> Tensor

output

Creates a criterion that uses a squared term if the absolute element-wise error falls below 1 and an L1 term otherwise. It is less sensitive to outliers than the MSELoss and in some cases prevents exploding gradients (e.g. see Fast R-CNN paper by Ross Girshick). Also known as the Huber loss.

softMarginLoss Source #

Arguments

:: Reduction

reduction

-> Tensor

input

-> Tensor

target

-> Tensor

output

Creates a criterion that optimizes a two-class classification logistic loss between input tensor \(x\) and target tensor \(y\) (containing 1 or -1).

adaptiveMaxPool1d Source #

Arguments

:: Int

output size

-> Tensor

input

-> (Tensor, Tensor)

output

Applies a 1D adaptive max pooling over an input signal composed of several input planes.

adaptiveMaxPool2d Source #

Arguments

:: (Int, Int)

output size

-> Tensor

input

-> (Tensor, Tensor)

output

Applies a 2D adaptive max pooling over an input signal composed of several input planes.

adaptiveMaxPool3d Source #

Arguments

:: (Int, Int)

output size

-> Tensor

input

-> (Tensor, Tensor) 

Applies a 3D adaptive max pooling over an input signal composed of several input planes

maxPool1dWithIndices Source #

Arguments

:: Int

kernel size

-> Int

stride

-> Int

padding

-> Int

dilation

-> CeilMode

ceil mode

-> Tensor

input

-> (Tensor, Tensor)

output, indices

maxPool1dWithIndices

maxPool1d Source #

Arguments

:: Int

kernel size

-> Int

stride

-> Int

padding

-> Int

dilation

-> CeilMode

ceil mode

-> Tensor

input

-> Tensor

output

Applies a 1D max pooling over an input signal composed of several input planes.

maxPool2d Source #

Arguments

:: (Int, Int)

kernel size

-> (Int, Int)

stride

-> (Int, Int)

padding

-> (Int, Int)

dilation

-> CeilMode

ceil mode

-> Tensor

input

-> Tensor

output

Applies a 2D max pooling over an input signal composed of several input planes.

maxPool3d Source #

Arguments

:: (Int, Int, Int)

kernel size

-> (Int, Int, Int)

stride

-> (Int, Int, Int)

padding

-> (Int, Int, Int)

dilation

-> CeilMode

ceil mode

-> Tensor

input

-> Tensor

output

Applies a 3D max pooling over an input signal composed of several input planes.

maxPool2dDim Source #

Arguments

:: (Int, Int)

kernel size

-> (Int, Int)

stride

-> (Int, Int)

padding

-> (Int, Int)

dilation

-> CeilMode

Ceiling or Floor

-> (Int, Int)

image dimensions

-> (Int, Int)

height, width after maxPool

Calculates resulting dimensions from a 2d maxpool operation see https://pytorch.org/docs/master/generated/torch.nn.MaxPool2d.html#torch.nn.MaxPool2d

avgPool1d Source #

Arguments

:: Int

kernel size

-> Int

stride

-> Int

padding

-> CeilMode

ceil mode

-> Bool

count include pad

-> Tensor

input

-> Tensor

output

Applies a 1D average pooling over an input signal composed of several input planes.

avgPool1d' Source #

Arguments

:: Int

kernel size

-> Int

stride

-> Int

padding

-> Tensor

input

-> Tensor

output

adaptiveAvgPool1d Source #

Arguments

:: Int 
-> Tensor

input

-> Tensor

output

Applies a 1D adaptive average pooling over an input signal composed of several input planes.

adaptiveAvgPool2d Source #

Arguments

:: (Int, Int)

output size (Height * Width)

-> Tensor

input

-> Tensor

output

Applies a 2D adaptive average pooling over an input signal composed of several input planes.

adaptiveAvgPool3d Source #

Arguments

:: (Int, Int, Int)

output size (Depth * Height * Width)

-> Tensor

input

-> Tensor

output

Applies a 3D adaptive average pooling over an input signal composed of several input planes.

inverse Source #

Arguments

:: Tensor

input

-> Tensor

output

Takes the inverse of the square matrix input. input can be batches of 2D square tensors, in which case this function would return a tensor composed of individual inverses.

triangularSolve Source #

Arguments

:: Tensor

A

-> Bool

upper

-> Bool

transpose

-> Bool

unitriangular

-> Tensor

input

-> (Tensor, Tensor)

output

Solves a system of equations with a triangular coefficient matrix AA and multiple right-hand sides bb

symeig Source #

Arguments

:: Bool

bool which controls whether eigenvectors have to be computed

-> Tri

controls whether to consider upper-triangular or lower-triangular region

-> Tensor

input tensor

-> (Tensor, Tensor)

output tensors

This function returns eigenvalues and eigenvectors of a real symmetric matrix input or a batch of real symmetric matrices, represented by a namedtuple (eigenvalues, eigenvectors).

eig Source #

Arguments

:: Bool

bool to compute both eigenvalues and eigenvectors; otherwise, only eigenvalues will be computed

-> Tensor

input (square matrix) for which the eigen values and eigen vectors are to be computed

-> (Tensor, Tensor)

output tensors

Computes the eigenvalues and eigenvectors of a real square matrix

svd Source #

Arguments

:: Bool

controls the shape of returned U and V

-> Bool

option whether to compute U and V or not

-> Tensor

input

-> (Tensor, Tensor, Tensor)

output tuple of tensors

This function returns a namedtuple (U, S, V) which is the singular value decomposition of a input real matrix or batches of real matrices input such that input = U * diag(S) * V^T

cholesky Source #

Arguments

:: Tri

flag that indicates whether to return a upper or lower triangular matrix.

-> Tensor

input

-> Tensor

output

Computes the Cholesky decomposition of a symmetric positive-definite matrix AA or for batches of symmetric positive-definite matrices.

choleskySolve Source #

Arguments

:: Tri

bool whether to consider the Cholesky factor as a lower or upper triangular matrix

-> Tensor

input matrix b

-> Tensor

input matrix u

-> Tensor

output

Solves a linear system of equations with a positive semidefinite matrix to be inverted given its Cholesky factor matrix uu .

solve Source #

Arguments

:: Tensor

input matrix

-> Tensor

input square matrix

-> (Tensor, Tensor)

output tuple with solution and LU

This function returns the solution to the system of linear equations represented by AX = BAX=B and the LU factorization of A, in order as a namedtuple solution, LU. LU contains L and U factors for LU factorization of A

choleskyInverse Source #

Arguments

:: Tri

upper or lower triangle

-> Tensor

input

-> Tensor

solution

Solves a linear system of equations with a positive semidefinite matrix to be inverted given its Cholesky factor matrix uu .

geqrf Source #

Arguments

:: Tensor

input

-> (Tensor, Tensor)

a, tau output matrices (see https://software.intel.com/en-us/node/521004)

This is a low-level function for calling LAPACK directly. This function returns a namedtuple (a, tau) as defined in LAPACK documentation for geqrf.

orgqr Source #

Arguments

:: Tensor

the a from geqrf function

-> Tensor

the tau from geqrf function

-> Tensor

output

Computes the orthogonal matrix Q of a QR factorization, from the (input, input2) tuple returned by geqrf function. This directly calls the underlying LAPACK function ?orgqr. See LAPACK documentation for orgqr for further details.

ormqr Source #

Arguments

:: Tensor

input2

-> Tensor

input3

-> Bool

left

-> Bool

transpose

-> Tensor

input

-> Tensor

output

Multiplies mat (given by input3) by the orthogonal Q matrix of the QR factorization formed by torch.geqrf() that is represented by (a, tau) (given by (input, input2)). This directly calls the underlying LAPACK function ?ormqr. See LAPACK documentation for ormqr for further details.

luSolve Source #

Arguments

:: Tensor

LU_data

-> Tensor

LU_pivots

-> Tensor

input

-> Tensor

output

Returns the LU solve of the linear system Ax = bAx=b using the partially pivoted LU factorization of A from torch.lu().

dropout Source #

Arguments

:: Double

dropout probability

-> Bool

whether or not to activate dropout

-> Tensor

input

-> IO Tensor

output

During training, randomly zeroes some of the elements of the input tensor with probability p using samples from a Bernoulli distribution.

featureDropout Source #

Arguments

:: Double

dropout probability

-> Bool

whether or not to activate dropout

-> Tensor

input

-> IO Tensor

output

alphaDropout Source #

Arguments

:: Double

dropout probability

-> Bool

whether or not to activate dropout

-> Tensor

input

-> IO Tensor

output

Applies alpha dropout to the input.

featureAlphaDropout Source #

Arguments

:: Double

dropout probability

-> Bool

whether or not to activate dropout

-> Tensor

input

-> IO Tensor

output

bitwiseNot Source #

Arguments

:: Tensor

input

-> Tensor

output

Computes the bitwise NOT of the given input tensor. The input tensor must be of integral or Boolean types. For bool tensors, it computes the logical NOT.

logicalNot Source #

Arguments

:: Tensor

input

-> Tensor

output

Computes the element-wise logical NOT of the given input tensor. If not specified, the output tensor will have the bool dtype. If the input tensor is not a bool tensor, zeros are treated as False and non-zeros are treated as True.

logicalXor Source #

Arguments

:: Tensor

self

-> Tensor

other

-> Tensor 

logicalAnd Source #

Arguments

:: Tensor

self

-> Tensor

other

-> Tensor 

logicalOr Source #

Arguments

:: Tensor

self

-> Tensor

other

-> Tensor 

cat Source #

Arguments

:: Dim

dimension

-> [Tensor]

list of tensors to concatenate

-> Tensor

output tensor

Concatenates the given sequence of seq tensors in the given dimension. All tensors must either have the same shape (except in the concatenating dimension) or be empty.

index Source #

Arguments

:: [Tensor]

indices

-> Tensor

input

-> Tensor

output

indexCopy Source #

Arguments

:: Int

dim

-> Tensor

index

-> Tensor

source

-> Tensor

input

-> Tensor

output

indexCopyWithDimname Source #

Arguments

:: Dimname

dim

-> Tensor

index

-> Tensor

source

-> Tensor

input

-> Tensor

output

indexPut Source #

Arguments

:: Bool

accumulate

-> [Tensor]

indices

-> Tensor

values

-> Tensor

input

-> Tensor

output

Puts values from the tensor value into the input tensor (out-of-place) using the indices specified in indices (which is a tuple of Tensors). The expression tensor.index_put_(indices, value) is equivalent to tensor[indices] = value. If accumulate is True, the elements in value are added to self. If accumulate is False, the behavior is undefined if indices contain duplicate elements.

chunk Source #

Arguments

:: Int

chunks

-> Dim

dim

-> Tensor

input tensor

-> [Tensor]

output list of tensors

Splits a tensor into a specific number of chunks. Last chunk will be smaller if the tensor size along the given dimension dim is not divisible by chunks.

clamp Source #

Arguments

:: Float

minimum value

-> Float

maximum value

-> Tensor

input

-> Tensor

output

Clamp all elements in input into the range [ min, max ] and return a resulting tensor.

clampMax Source #

Arguments

:: Float

maximum value

-> Tensor

input

-> Tensor

output

Clamps all elements in input to be smaller or equal max.

clampMin Source #

Arguments

:: Float

minimum value

-> Tensor

input

-> Tensor

output

Clamps all elements in input to be larger or equal min.

cudnnIsAcceptable Source #

Arguments

:: Tensor

input

-> Bool

output

constantPadNd1d Source #

Arguments

:: [Int]

list of padding per dimension

-> Float

value

-> Tensor

input

-> Tensor

ouptut

Pads the input tensor boundaries with a constant value.

conv1d Source #

Arguments

:: Tensor

weight

-> Tensor

bias

-> Int

stride

-> Int

padding

-> Int

dilation

-> Int

groups

-> Tensor

input

-> Tensor

output

Applies a 1D convolution over an input signal composed of several input planes.

conv1d' Source #

Arguments

:: Tensor

weight

-> Tensor

bias

-> Int

strides

-> Int

padding

-> Tensor

input

-> Tensor

output

conv2d Source #

Arguments

:: Tensor

weight

-> Tensor

bias

-> (Int, Int)

strides

-> (Int, Int)

padding

-> (Int, Int)

dilation

-> Int

groups

-> Tensor

input

-> Tensor

output

Applies a 2D convolution over an input signal composed of several input planes.

conv2d' Source #

Arguments

:: Tensor

weight

-> Tensor

bias

-> (Int, Int)

strides

-> (Int, Int)

padding

-> Tensor

input

-> Tensor

output

conv3d Source #

Arguments

:: Tensor

weight

-> Tensor

bias

-> (Int, Int, Int)

strides

-> (Int, Int, Int)

padding

-> (Int, Int, Int)

dilation

-> Int

groups

-> Tensor

input

-> Tensor

output

Applies a 3D convolution over an input signal composed of several input planes.

conv3d' Source #

Arguments

:: Tensor

weight

-> Tensor

bias

-> (Int, Int, Int)

strides

-> (Int, Int, Int)

padding

-> Tensor

input

-> Tensor

output

convTranspose1d Source #

Arguments

:: Tensor

weight

-> Tensor

bias

-> Int

strides

-> Int

padding

-> Int

output padding

-> Int

groups

-> Tensor

input

-> Tensor

output

Applies a 1D transposed convolution over an input signal composed of several input planes

convTranspose1d' Source #

Arguments

:: Tensor

weight

-> Tensor

bias

-> Int

strides

-> Int

padding

-> Tensor

input

-> Tensor

output

convTranspose2d Source #

Arguments

:: Tensor

weight

-> Tensor

bias

-> (Int, Int)

strides

-> (Int, Int)

padding

-> (Int, Int)

output padding

-> Int

groups

-> Tensor

input

-> Tensor

output

Applies a 2D transposed convolution over an input signal composed of several input planes

convTranspose2d' Source #

Arguments

:: Tensor

weight

-> Tensor

bias

-> (Int, Int)

strides

-> (Int, Int)

padding

-> Tensor

input

-> Tensor

output

convTranspose3d Source #

Arguments

:: Tensor

weight

-> Tensor

bias

-> (Int, Int, Int)

strides

-> (Int, Int, Int)

padding

-> (Int, Int, Int)

output padding

-> Int

groups

-> Tensor

input

-> Tensor

output

Applies a 3D transposed convolution over an input signal composed of several input planes

convTranspose3d' Source #

Arguments

:: Tensor

weight

-> Tensor

bias

-> (Int, Int, Int)

strides

-> (Int, Int, Int)

padding

-> Tensor

input

-> Tensor

output

sign Source #

Arguments

:: Tensor

input

-> Tensor

output

Returns a new tensor with the signs of the elements of input

transpose Source #

Arguments

:: Dim

dim1

-> Dim

dim2

-> Tensor

input

-> Tensor

output

Returns a tensor that is a transposed version of input. The given dimensions dim0 and dim1 are swapped.

transpose2D Source #

Arguments

:: Tensor

input

-> Tensor

output

transpose special case for a 2D tensor

diag Source #

Arguments

:: Diag

diagonal

-> Tensor

input

-> Tensor

output

Returns a tensor with the elements of input as the diagonal. The second argument controls which diagonal to consider: If Int = 0, it is the main diagonal. If Int > 0, it is above the main diagonal. If Int < 0, it is below the main diagonal.

diagEmbed Source #

Arguments

:: Diag

offset

-> Dim

dim1

-> Dim

dim2

-> Tensor

self

-> Tensor 

diagflat Source #

Arguments

:: Diag

offset

-> Tensor

self

-> Tensor

output

If input is a vector (1-D tensor), then returns a 2-D square tensor with the elements of input as the diagonal. If input is a tensor with more than one dimension, then returns a 2-D tensor with diagonal elements equal to a flattened input. The argument offset controls which diagonal to consider: If offset = 0, it is the main diagonal. If offset > 0, it is above the main diagonal. If offset < 0, it is below the main diagonal.

diagonal Source #

Arguments

:: Diag

offset

-> Dim

dim1

-> Dim

dim2

-> Tensor

input

-> Tensor

output

Returns a partial view of input with the its diagonal elements with respect to dim1 and dim2 appended as a dimension at the end of the shape. Applying diagEmbed to the output of this function with the same arguments yields a diagonal matrix with the diagonal entries of the input. However, diagEmbed has different default dimensions, so those need to be explicitly specified.

all Source #

Arguments

:: Tensor

input

-> Bool

output

Returns True if all elements in the tensor are True, False otherwise.

any Source #

Arguments

:: Tensor

input

-> Bool

output

Returns True if any elements in the tensor are True, False otherwise.

allDim Source #

Arguments

:: Dim

dimension

-> Bool

boolean corresponding to keepdim

-> Tensor

input

-> Tensor

output

Returns True if all elements in each row of the tensor in the given dimension dim are True, False otherwise. If keepdim is True, the output tensor is of the same size as input except in the dimension dim where it is of size 1. Otherwise, dim is squeezed, resulting in the output tensor having 1 fewer dimension than input.

anyDim Source #

Arguments

:: Dim

dimension

-> Bool

boolean corresponding to keepdim

-> Tensor

input

-> Tensor 

Returns True if any elements in each row of the tensor in the given dimension dim are True, False otherwise. If keepdim is True, the output tensor is of the same size as input except in the dimension dim where it is of size 1. Otherwise, dim is squeezed, resulting in the output tensor having 1 fewer dimension than input.

permute Source #

Arguments

:: [Int]

list corresponding to ordering of dimensions to permute with

-> Tensor

input

-> Tensor 

Permute the dimensions of this tensor.

expand Source #

Arguments

:: Tensor

input

-> Bool

some boolean value with unknown function

-> [Int]

the desired expanded size

-> Tensor

output

expand TODO: figure out what the implicit boolean value does

flatten Source #

Arguments

:: Dim

startDim

-> Dim

endDim

-> Tensor

self

-> Tensor

output

flatten

flattenAll Source #

Arguments

:: Tensor

input

-> Tensor

output

flattenAll

lstm Source #

Arguments

:: Tensor

input

-> [Tensor]

hx

-> [Tensor]

params

-> Bool

has_biases

-> Int

num_layers

-> Double

dropout

-> Bool

train

-> Bool

bidirectional

-> Bool

batch_first

-> (Tensor, Tensor, Tensor) 

lstm' Source #

Arguments

:: Tensor

batch_sizes

-> [Tensor]

hx

-> [Tensor]

params

-> Bool

has_biases

-> Int

num_layers

-> Double

dropout

-> Bool

train

-> Bool

bidirectional

-> Tensor

data

-> (Tensor, Tensor, Tensor) 

gru Source #

Arguments

:: Tensor

hx

-> [Tensor]

params

-> Bool

has_biases

-> Int

num_layers

-> Double

dropout

-> Bool

train

-> Bool

bidirectional

-> Bool

batch_first

-> Tensor

input

-> (Tensor, Tensor) 

gru' Source #

Arguments

:: Tensor

batch_sizes

-> Tensor

hx

-> [Tensor]

params

-> Bool

has_biases

-> Int

num_layers

-> Double

dropout

-> Bool

train

-> Bool

bidirectional

-> Tensor

data

-> (Tensor, Tensor) 

rnnTanh Source #

Arguments

:: Tensor

hx

-> [Tensor]

params

-> Bool

has_biases

-> Int

num_layers

-> Double

dropout

-> Bool

train

-> Bool

bidirectional

-> Bool

batch_first

-> Tensor

input

-> (Tensor, Tensor) 

rnnTanh' Source #

Arguments

:: Tensor

batch_sizes

-> Tensor

hx

-> [Tensor]

params

-> Bool

has_biases

-> Int

num_layers

-> Double

dropout

-> Bool

train

-> Bool

bidirectional

-> Tensor

data

-> (Tensor, Tensor) 

rnnRelu Source #

Arguments

:: Tensor

hx

-> [Tensor]

params

-> Bool

has_biases

-> Int

num_layers

-> Double

dropout

-> Bool

train

-> Bool

bidirectional

-> Bool

batch_first

-> Tensor

input

-> (Tensor, Tensor) 

rnnRelu' Source #

Arguments

:: Tensor

data

-> Tensor

batch_sizes

-> Tensor

hx

-> [Tensor]

params

-> Bool

has_biases

-> Int

num_layers

-> Double

dropout

-> Bool

train

-> Bool

bidirectional

-> (Tensor, Tensor) 

lstmCell Source #

Arguments

:: Tensor

input-hidden weights (4*hidden_size, input_size)

-> Tensor

hidden-hidden weights (4*hidden_size, hidden_size)

-> Tensor

input-hidden bias (4*hidden_size)

-> Tensor

hidden-hidden bias, of shape (4*hidden_size)

-> (Tensor, Tensor)

hidden state

-> Tensor

input

-> (Tensor, Tensor) 

A long short-term memory (LSTM) cell.

gruCell Source #

Arguments

:: Tensor

input-hidden weights

-> Tensor

hidden-hidden weights

-> Tensor

input-hidden bias

-> Tensor

hidden-hidden bias

-> Tensor

hidden state

-> Tensor

input

-> Tensor

output

A gated recurrent unit (GRU) cell

rnnTanhCell Source #

Arguments

:: Tensor

input-hidden weights

-> Tensor

hidden-hidden weights

-> Tensor

input-hidden bias

-> Tensor

hidden-hidden bias

-> Tensor

hidden state

-> Tensor

input

-> Tensor

output

An Elman RNN cell with tanh non-linearity

rnnReluCell Source #

Arguments

:: Tensor

input-hidden weights

-> Tensor

hidden-hidden weights

-> Tensor

input-hidden bias

-> Tensor

hidden-hidden bias

-> Tensor

hidden state

-> Tensor

input

-> Tensor

output

An Elman RNN cell with ReLU non-linearity

quantizedLstmCell Source #

Arguments

:: Tensor

input-hidden weights

-> Tensor

hidden-hidden weights

-> Tensor

input-hidden bias

-> Tensor

hidden-hidden bias

-> Tensor

input-hidden packed

-> Tensor

hidden-hidden packed

-> Tensor

input-hidden column offsets

-> Tensor

hidden-hidden column offsets

-> Float

input-hidden scale

-> Float

hidden-hidden scale

-> Float

input-hidden zero point

-> Float

hidden-hidden zero point

-> (Tensor, Tensor)

hidden state

-> Tensor

input

-> (Tensor, Tensor)

output

A quantized long short-term memory (LSTM) cell.

quantizedGruCell Source #

Arguments

:: Tensor

input-hidden weights

-> Tensor

hidden-hidden weights

-> Tensor

input-hidden bias

-> Tensor

hidden-hidden bias

-> Tensor

input-hidden packed

-> Tensor

hidden-hidden packed

-> Tensor

input-hidden column offsets

-> Tensor

hidden-hidden column offsets

-> Float

input-hidden scale

-> Float

hidden-hidden scale

-> Float

input-hidden zero point

-> Float

hidden-hidden zero point

-> Tensor

hidden state

-> Tensor

input

-> Tensor

output

A quantized long gated recurrent unit (GRU) cell.

quantizedRnnReluCell Source #

Arguments

:: Tensor

input-hidden weights

-> Tensor

hidden-hidden weights

-> Tensor

input-hidden bias

-> Tensor

hidden-hidden bias

-> Tensor

input-hidden packed

-> Tensor

hidden-hidden packed

-> Tensor

input-hidden column offsets

-> Tensor

hidden-hidden column offsets

-> Float

input-hidden scale

-> Float

hidden-hidden scale

-> Float

input-hidden zero point

-> Float

hidden-hidden zero point

-> Tensor

hidden state

-> Tensor

input

-> Tensor

output

A quantized Elman RNN cell with relu non-linearity

quantizedRnnTanhCell Source #

Arguments

:: Tensor

input-hidden weights

-> Tensor

hidden-hidden weights

-> Tensor

input-hidden bias

-> Tensor

hidden-hidden bias

-> Tensor

input-hidden packed

-> Tensor

hidden-hidden packed

-> Tensor

input-hidden column offsets

-> Tensor

hidden-hidden column offsets

-> Float

input-hidden scale

-> Float

hidden-hidden scale

-> Float

input-hidden zero point

-> Float

hidden-hidden zero point

-> Tensor

hidden state

-> Tensor

input

-> Tensor

output

A quantized Elman RNN cell with tanh non-linearity

softShrink Source #

Arguments

:: Float

lambda

-> Tensor

input

-> Tensor

output

Applies the soft shrinkage function elementwise

stack Source #

Arguments

:: Dim

dim

-> [Tensor]

input

-> Tensor

output

Concatenates sequence of tensors along a new dimension. All tensors need to be of the same size.

sumDim Source #

Arguments

:: Dim

dim to sum along

-> KeepDim

whether the output tensor has dim retained or not

-> DType

datatype

-> Tensor

input

-> Tensor

output

Returns the sum of each row of the input tensor in the given dimension dim. If keepdim is True, the output tensor is of the same size as input except in the dimension(s) dim where it is of size 1. Otherwise, dim is squeezed, resulting in the output tensor having 1 (or len(dim)) fewer dimension(s).

topK Source #

Arguments

:: Int

k

-> Dim

dim to find topK along

-> Bool

largest

-> Bool

sorted

-> Tensor

input

-> (Tensor, Tensor)

output

Returns the k largest elements of the given input tensor along a given dimension. If largest is False then the k smallest elements are returned. The boolean option sorted if True, will make sure that the returned k elements are themselves sorted A tuple of (values, indices) is returned, where the indices are the indices of the elements in the original input tensor.

logsumexp Source #

Arguments

:: Bool

keepdim

-> Int

dim

-> Tensor

input

-> Tensor

output

Returns the log of summed exponentials of each row of the input tensor in the given dimension dim. The computation is numerically stabilized.

triu Source #

Arguments

:: Diag

diagonal

-> Tensor

input

-> Tensor

output

Returns the upper triangular part of a matrix (2-D tensor) or batch of matrices input, the other elements of the result tensor out are set to 0. The upper triangular part of the matrix is defined as the elements on and above the diagonal. The argument diagonal controls which diagonal to consider. If diagonal = 0, all elements on and above the main diagonal are retained. A positive value excludes just as many diagonals above the main diagonal, and similarly a negative value includes just as many diagonals below the main diagonal. The main diagonal are the set of indices \((i,i)\) for \(i\) \(\in [0,\min(d_1,d_2)-1]\) where \(d_1\) and \(d_2 \) are the dimensions of the matrix.

tril Source #

Arguments

:: Diag

diagonal

-> Tensor

input

-> Tensor

output

Returns the lower triangular part of the matrix (2-D tensor) or batch of matrices input, the other elements of the result tensor out are set to 0. The lower triangular part of the matrix is defined as the elements on and below the diagonal. The argument diagonal controls which diagonal to consider. If diagonal = 0, all elements on and below the main diagonal are retained. A positive value includes just as many diagonals above the main diagonal, and similarly a negative value excludes just as many diagonals below the main diagonal. The main diagonals are the set of indices \((i,i)\) for \(i\) \(\in [0,\min(d_1,d_2)-1]\) where \(d_1\) and \(d_2 \) are the dimensions of the matrix.

trunc Source #

Arguments

:: Tensor

input

-> Tensor

output

Returns a new tensor with the truncated integer values of the elements of input.

uniqueDim Source #

Arguments

:: Int

dim

-> Bool

sorted

-> Bool

return_inverse

-> Bool

return_counts

-> Tensor

input

-> (Tensor, Tensor, Tensor)

output

Returns the unique elements of the input tensor along a dimension.

uniqueConsecutive Source #

Arguments

:: Bool

return_inverse

-> Bool

return_counts

-> Int

dim

-> Tensor

input

-> (Tensor, Tensor, Tensor)

output

Eliminates all but the first element from every consecutive group of equivalent elements. This function is different from uniqueDim in the sense that this function only eliminates consecutive duplicate values.

uniqueDimConsecutive Source #

Arguments

:: Int

dim

-> Bool

return_inverse

-> Bool

return_counts

-> Tensor

input

-> (Tensor, Tensor, Tensor)

output

Eliminates all but the first element from every consecutive group of equivalent elements along a dimension. This function is different from uniqueDim in the sense that this function only eliminates consecutive duplicate values.

unsqueeze Source #

Arguments

:: Dim

dim

-> Tensor

input

-> Tensor

output

Returns a new tensor with a dimension of size one inserted at the specified position. The returned tensor shares the same underlying data with this tensor. A dim value within the range [(dim input) - 1, (dim input) + 1)] can be used. Negative dim will correspond to unsqueeze applied at dim = dim + (dim input) + 1

upsampleBilinear2d Source #

Arguments

:: (Int, Int)

output-size

-> Bool

align corners

-> Tensor

self

-> Tensor 

Upsamples the input, using bilinear upsampling. Expected inputs are spatial (4 dimensional).

upsampleNearest2d Source #

Arguments

:: (Int, Int)

output_size

-> Double

scales_h

-> Double

scales_w

-> Tensor

self

-> Tensor 

Applies a 2D nearest neighbor upsampling to an input signal composed of several input channels.

split Source #

Arguments

:: Int

split-size

-> Dim

dim

-> Tensor

self

-> [Tensor] 

Splits the tensor into chunks of given size if possible.

l1Loss Source #

Arguments

:: Reduction

reduction

-> Tensor

input

-> Tensor

target

-> Tensor

output

Creates a criterion that measures the mean absolute error (MAE) between each element in the input \(x\) and target \(y\) .

leakyRelu Source #

Arguments

:: Float

negative slope

-> Tensor

input

-> Tensor

output

Applies the element-wise function: \(\text{LeakyReLU}(x) = \max(0,x) + \text{negative_slope} ∗ \min(0,x)\)

logSigmoid Source #

Arguments

:: Tensor

input

-> Tensor

output

Applies the element-wise function: \(\text{LogSigmoid}(x) = \log(\frac{ 1 }{ 1 + \exp(-x)})\)

maxDim Source #

Arguments

:: Dim

dimension

-> KeepDim

keepdim

-> Tensor

input

-> (Tensor, Tensor)

output

Returns a namedtuple (values, indices) where values is the maximum value of each row of the input tensor in the given dimension dim. And indices is the index location of each maximum value found (argmax). If keepdim is True, the output tensors are of the same size as input except in the dimension dim where they are of size 1. Otherwise, dim is squeezed , resulting in the output tensors having 1 fewer dimension than input.

minDim Source #

Arguments

:: Dim

dimension

-> KeepDim

keepdim

-> Tensor

input

-> (Tensor, Tensor) 

Returns a namedtuple (values, indices) where values is the minimum value of each row of the input tensor in the given dimension dim. And indices is the index location of each minimum value found (argmin). If keepdim is True, the output tensors are of the same size as input except in the dimension dim where they are of size 1. Otherwise, dim is squeezed, resulting in the output tensors having 1 fewer dimension than input.

meanDim Source #

Arguments

:: Dim

dimension

-> KeepDim

keepdim

-> DType

dtype

-> Tensor

input

-> Tensor

output

Returns the mean value of each row of the input tensor in the given dimension dim. If dim is a list of dimensions, reduce over all of them. If keepdim is True, the output tensor is of the same size as input except in the dimension(s) dim where it is of size 1. Otherwise, dim is squeezed (see torch.squeeze()), resulting in the output tensor having 1 (or len(dim)) fewer dimension(s).

medianDim Source #

Arguments

:: Dim

dimension

-> KeepDim

keepdim

-> Tensor

input

-> (Tensor, Tensor)

output

Returns a namedtuple (values, indices) where values is the median value of each row of the input tensor in the given dimension dim. And indices is the index location of each median value found. By default, dim is the last dimension of the input tensor. If keepdim is True, the output tensors are of the same size as input except in the dimension dim where they are of size 1. Otherwise, dim is squeezed (see torch.squeeze()), resulting in the outputs tensor having 1 fewer dimension than input.

chainMatmul Source #

Arguments

:: [Tensor]

list of tensors

-> Tensor

output

Returns the matrix product of the NN 2-D tensors. This product is efficiently computed using the matrix chain order algorithm which selects the order in which incurs the lowest cost in terms of arithmetic operations. Note that since this is a function to compute the product, NN needs to be greater than or equal to 2; if equal to 2 then a trivial matrix-matrix product is returned. If NN is 1, then this is a no-op - the original matrix is returned as is.

gelu Source #

Arguments

:: Tensor

input

-> Tensor

output

Applies element-wise the function \(\text{GELU}(x) = x * \Phi(x)\) where \(\Phi(x)\) is the Cumulative Distribution Function for Gaussian Distribution.

glu Source #

Arguments

:: Dim

dimension

-> Tensor

input

-> Tensor

output

The gated linear unit. Computes: \(\text{GLU}(a, b) = a \otimes \sigma(b)\) where input is split in half along dim to form a and b, \(\sigma\) is the sigmoid function and \(\otimes\) is the element-wise product between matrices.

stdMean Source #

Arguments

:: Bool

unbiased

-> Tensor

input

-> (Tensor, Tensor)

output

Returns the standard-deviation and mean of all elements in the input tensor. If unbiased is False, then the standard-deviation will be calculated via the biased estimator. Otherwise, Bessel’s correction will be used.

stdMeanDim Source #

Arguments

:: Dim

dimension

-> Bool

unbiased

-> KeepDim

whether the output tensor has dim retained or not

-> Tensor

input

-> (Tensor, Tensor)

output

Returns the standard-deviation and mean of each row of the input tensor in the dimension dim. If dim is a list of dimensions, reduce over all of them. If keepdim is True, the output tensor is of the same size as input except in the dimension(s) dim where it is of size 1. Otherwise, dim is squeezed, resulting in the output tensor having 1 (or len(dim)) fewer dimension(s). If unbiased is False, then the standard-deviation will be calculated via the biased estimator. Otherwise, Bessel’s correction will be used.

clone Source #

Arguments

:: Tensor

input

-> IO Tensor

output

Returns a copy of input. Output tensor keeps a computational graph and a requires_grad value of input tensor. https://discuss.pytorch.org/t/clone-and-detach-in-v0-4-0/16861/41

detach Source #

Arguments

:: Tensor

input

-> IO Tensor

output

Returns a copy of input. Output tensor does not keep a computational graph and a requires_grad value of input tensor.

view Source #

Arguments

:: [Int]

the desired size

-> Tensor

input

-> Tensor

output

Returns a new tensor with the same data as the input tensor but of a different shape.

repeat Source #

Arguments

:: [Int]

The number of times to repeat this tensor along each dimension

-> Tensor

input

-> Tensor

output

Repeats this tensor along the specified dimensions.

batchNormIO Source #

Arguments

:: Tensor

weight

-> Tensor

bias

-> MutableTensor

running_mean

-> MutableTensor

running_var

-> Bool

training

-> Double

momentum

-> Double

eps

-> Tensor

input

-> IO Tensor

output

instanceNormIO Source #

Arguments

:: Tensor

weight

-> Tensor

bias

-> MutableTensor

running_mean

-> MutableTensor

running_var

-> Bool

training

-> Double

momentum

-> Double

eps

-> Tensor

input

-> IO Tensor

output

repeatInterleave Source #

Arguments

:: Tensor

self

-> Tensor

repeats

-> Int

dim

-> Tensor 

repeatInterleaveScalar Source #

Arguments

:: Tensor

self

-> Int

repeats

-> Int

dim

-> Tensor 

acos Source #

Arguments

:: Tensor

self

-> Tensor 

addmv Source #

Arguments

:: Tensor

self

-> Tensor

mat

-> Tensor

vec

-> Float

beta

-> Float

alpha

-> Tensor 

addr Source #

Arguments

:: Tensor

self

-> Tensor

vec1

-> Tensor

vec2

-> Float

beta

-> Float

alpha

-> Tensor 

allclose Source #

Arguments

:: Tensor

self

-> Tensor

other

-> Double

rtol

-> Double

atol

-> Bool

equal_nan

-> Bool 

argmin Source #

Arguments

:: Tensor

self

-> Int

dim

-> Bool

keepdim

-> Tensor 

asin Source #

Arguments

:: Tensor

self

-> Tensor 

atan Source #

Arguments

:: Tensor

self

-> Tensor 

baddbmm Source #

Arguments

:: Tensor

self

-> Tensor

batch1

-> Tensor

batch2

-> Float

beta

-> Float

alpha

-> Tensor 

bmm Source #

Arguments

:: Tensor

self

-> Tensor

mat2

-> Tensor 

conj Source #

Arguments

:: Tensor

self

-> Tensor 

det Source #

Arguments

:: Tensor

self

-> Tensor 

dot Source #

Arguments

:: Tensor

self

-> Tensor

tensor

-> Tensor 

einsum Source #

Arguments

:: String

equation

-> [Tensor]

tensors

-> Tensor 

expm1 Source #

Arguments

:: Tensor

self

-> Tensor 

ger Source #

Arguments

:: Tensor

self

-> Tensor

vec2

-> Tensor 

logdet Source #

Arguments

:: Tensor

self

-> Tensor 

lstsq Source #

Arguments

:: Tensor

self

-> Tensor

A

-> (Tensor, Tensor) 

mv Source #

Arguments

:: Tensor

self

-> Tensor

vec

-> Tensor 

sumWithDimnames Source #

Arguments

:: Tensor

self

-> [Dimname]

dim

-> Bool

keepdim

-> DType

dtype

-> Tensor 

Orphan instances