hasktorch-gradually-typed-0.2.0.0: experimental project for hasktorch
Safe HaskellSafe-Inferred
LanguageHaskell2010

Torch.GraduallyTyped.NN.Transformer.BERT.Common

Synopsis

Documentation

type BERTDType = 'Float Source #

BERT dType.

bertDType :: SDType BERTDType Source #

BERT dType singleton.

type BERTDataType = 'DataType BERTDType Source #

BERT data type.

bertDataType :: SDataType BERTDataType Source #

BERT data type singleton.

bertDropoutP :: Double Source #

BERT dropout rate. 'dropout_rate = 0.1'

type BERTPosEncDim = 'Dim ('Name "*") ('Size 512) Source #

BERT positional encoding dimension.

bertPosEncDim :: SDim BERTPosEncDim Source #

BERT positional encoding dimension singleton.

bertEps :: Double Source #

BERT layer-norm epsilon. 'layer_norm_epsilon = 1e-12'

bertMaxPositionEmbeddings :: Int Source #

BERT maximum number of position embeddings. 'max_position_embeddings = 512'

bertPadTokenId :: Int Source #

BERT padding token id. 'pad_token_id = 0'

bertAttentionMaskBias :: Double Source #

BERT attention mask bias

type family BERTModelF (transformerHead :: TransformerHead) (numLayers :: Nat) (gradient :: Gradient RequiresGradient) (device :: Device (DeviceType Nat)) (headDim :: Dim (Name Symbol) (Size Nat)) (headEmbedDim :: Dim (Name Symbol) (Size Nat)) (embedDim :: Dim (Name Symbol) (Size Nat)) (inputEmbedDim :: Dim (Name Symbol) (Size Nat)) (ffnDim :: Dim (Name Symbol) (Size Nat)) (vocabDim :: Dim (Name Symbol) (Size Nat)) (typeVocabDim :: Dim (Name Symbol) (Size Nat)) (hasDropout :: HasDropout) :: Type where ... Source #

Specifies the BERT model.

Equations

BERTModelF transformerHead numLayers gradient device headDim headEmbedDim embedDim inputEmbedDim ffnDim vocabDim typeVocabDim hasDropout = GSimplifiedEncoderOnlyTransformer (GEncoderOnlyTransformerF 'BERT transformerHead numLayers gradient device BERTDataType headDim headEmbedDim embedDim inputEmbedDim ffnDim BERTPosEncDim vocabDim typeVocabDim hasDropout) MkAbsPos MkTransformerPaddingMask (MkTransformerAttentionMask BERTDataType) 

bertModelSpec :: forall transformerHead numLayers gradient device headDim headEmbedDim embedDim inputEmbedDim ffnDim vocabDim typeVocabDim hasDropout. (SingI headDim, SingI headEmbedDim, SingI embedDim, SingI inputEmbedDim, SingI ffnDim, SingI vocabDim, SingI typeVocabDim) => STransformerHead transformerHead -> SNat numLayers -> SGradient gradient -> SDevice device -> SHasDropout hasDropout -> ModelSpec (BERTModelF transformerHead numLayers gradient device headDim headEmbedDim embedDim inputEmbedDim ffnDim vocabDim typeVocabDim hasDropout) Source #

Specifies the parameters of a BERT model.

  • transformerHead: the head of the BERT model.
  • numLayers: the number of layers in the BERT model.
  • gradient: whether to compute the gradient of the BERT model.
  • device: the computational device on which the BERT model parameters are to be allocated.

mkBERTInput :: forall batchDim seqDim device m output. (MonadThrow m, SGetDim batchDim, SGetDim seqDim, Catch ('Shape '['Dim ('Name "*") 'UncheckedSize, 'Dim ('Name "*") 'UncheckedSize] <+> 'Shape '[batchDim, seqDim]), output ~ Tensor ('Gradient 'WithoutGradient) ('Layout 'Dense) device ('DataType 'Int64) ('Shape '[batchDim, seqDim])) => SDim batchDim -> SDim seqDim -> SDevice device -> [[Int]] -> m output Source #