Definition at line 56 of file LSTMLayer.h.
Public Types | |
using | HelperDescriptor_t = typename Architecture_t::DropoutDescriptor_t |
using | LayerDescriptor_t = typename Architecture_t::RecurrentDescriptor_t |
using | Matrix_t = typename Architecture_t::Matrix_t |
using | RNNDescriptors_t = typename Architecture_t::RNNDescriptors_t |
using | RNNWorkspace_t = typename Architecture_t::RNNWorkspace_t |
using | Scalar_t = typename Architecture_t::Scalar_t |
using | Tensor_t = typename Architecture_t::Tensor_t |
using | TensorDescriptor_t = typename Architecture_t::TensorDescriptor_t |
using | WeightsDescriptor_t = typename Architecture_t::FilterDescriptor_t |
Public Member Functions | |
TBasicLSTMLayer (const TBasicLSTMLayer &) | |
Copy Constructor. | |
TBasicLSTMLayer (size_t batchSize, size_t stateSize, size_t inputSize, size_t timeSteps, bool rememberState=false, bool returnSequence=false, DNN::EActivationFunction f1=DNN::EActivationFunction::kSigmoid, DNN::EActivationFunction f2=DNN::EActivationFunction::kTanh, bool training=true, DNN::EInitialization fA=DNN::EInitialization::kZero) | |
Constructor. | |
void | AddWeightsXMLTo (void *parent) |
Writes the information and the weights about the layer in an XML node. | |
void | Backward (Tensor_t &gradients_backward, const Tensor_t &activations_backward) |
Backpropagates the error. | |
void | CandidateValue (const Matrix_t &input, Matrix_t &dc) |
Decides the new candidate values (NN with Tanh) | |
Matrix_t & | CellBackward (Matrix_t &state_gradients_backward, Matrix_t &cell_gradients_backward, const Matrix_t &precStateActivations, const Matrix_t &precCellActivations, const Matrix_t &input_gate, const Matrix_t &forget_gate, const Matrix_t &candidate_gate, const Matrix_t &output_gate, const Matrix_t &input, Matrix_t &input_gradient, Matrix_t &di, Matrix_t &df, Matrix_t &dc, Matrix_t &dout, size_t t) |
Backward for a single time unit a the corresponding call to Forward(...). | |
void | CellForward (Matrix_t &inputGateValues, const Matrix_t &forgetGateValues, const Matrix_t &candidateValues, const Matrix_t &outputGateValues) |
Forward for a single cell (time unit) | |
bool | DoesRememberState () const |
bool | DoesReturnSequence () const |
void | ForgetGate (const Matrix_t &input, Matrix_t &df) |
Forgets the past values (NN with Sigmoid) | |
void | Forward (Tensor_t &input, bool isTraining=true) |
Computes the next hidden state and next cell state with given input matrix. | |
DNN::EActivationFunction | GetActivationFunctionF1 () const |
DNN::EActivationFunction | GetActivationFunctionF2 () const |
Matrix_t & | GetCandidateBias () |
const Matrix_t & | GetCandidateBias () const |
Matrix_t & | GetCandidateBiasGradients () |
const Matrix_t & | GetCandidateBiasGradients () const |
Matrix_t & | GetCandidateDerivativesAt (size_t i) |
const Matrix_t & | GetCandidateDerivativesAt (size_t i) const |
std::vector< Matrix_t > & | GetCandidateGateTensor () |
const std::vector< Matrix_t > & | GetCandidateGateTensor () const |
Matrix_t & | GetCandidateGateTensorAt (size_t i) |
const Matrix_t & | GetCandidateGateTensorAt (size_t i) const |
Matrix_t & | GetCandidateValue () |
const Matrix_t & | GetCandidateValue () const |
Matrix_t & | GetCell () |
const Matrix_t & | GetCell () const |
size_t | GetCellSize () const |
std::vector< Matrix_t > & | GetCellTensor () |
const std::vector< Matrix_t > & | GetCellTensor () const |
Matrix_t & | GetCellTensorAt (size_t i) |
const Matrix_t & | GetCellTensorAt (size_t i) const |
std::vector< Matrix_t > & | GetDerivativesCandidate () |
const std::vector< Matrix_t > & | GetDerivativesCandidate () const |
std::vector< Matrix_t > & | GetDerivativesForget () |
const std::vector< Matrix_t > & | GetDerivativesForget () const |
std::vector< Matrix_t > & | GetDerivativesInput () |
const std::vector< Matrix_t > & | GetDerivativesInput () const |
std::vector< Matrix_t > & | GetDerivativesOutput () |
const std::vector< Matrix_t > & | GetDerivativesOutput () const |
Tensor_t & | GetDX () |
Tensor_t & | GetDY () |
Matrix_t & | GetForgetBiasGradients () |
const Matrix_t & | GetForgetBiasGradients () const |
Matrix_t & | GetForgetDerivativesAt (size_t i) |
const Matrix_t & | GetForgetDerivativesAt (size_t i) const |
Matrix_t & | GetForgetGateBias () |
const Matrix_t & | GetForgetGateBias () const |
std::vector< Matrix_t > & | GetForgetGateTensor () |
const std::vector< Matrix_t > & | GetForgetGateTensor () const |
Matrix_t & | GetForgetGateTensorAt (size_t i) |
const Matrix_t & | GetForgetGateTensorAt (size_t i) const |
Matrix_t & | GetForgetGateValue () |
const Matrix_t & | GetForgetGateValue () const |
Matrix_t & | GetInputBiasGradients () |
const Matrix_t & | GetInputBiasGradients () const |
Matrix_t & | GetInputDerivativesAt (size_t i) |
const Matrix_t & | GetInputDerivativesAt (size_t i) const |
Matrix_t & | GetInputGateBias () |
const Matrix_t & | GetInputGateBias () const |
std::vector< Matrix_t > & | GetInputGateTensor () |
const std::vector< Matrix_t > & | GetInputGateTensor () const |
Matrix_t & | GetInputGateTensorAt (size_t i) |
const Matrix_t & | GetInputGateTensorAt (size_t i) const |
Matrix_t & | GetInputGateValue () |
const Matrix_t & | GetInputGateValue () const |
size_t | GetInputSize () const |
Getters. | |
Matrix_t & | GetOutputBiasGradients () |
const Matrix_t & | GetOutputBiasGradients () const |
Matrix_t & | GetOutputDerivativesAt (size_t i) |
const Matrix_t & | GetOutputDerivativesAt (size_t i) const |
Matrix_t & | GetOutputGateBias () |
const Matrix_t & | GetOutputGateBias () const |
std::vector< Matrix_t > & | GetOutputGateTensor () |
const std::vector< Matrix_t > & | GetOutputGateTensor () const |
Matrix_t & | GetOutputGateTensorAt (size_t i) |
const Matrix_t & | GetOutputGateTensorAt (size_t i) const |
Matrix_t & | GetOutputGateValue () |
const Matrix_t & | GetOutputGateValue () const |
Matrix_t & | GetState () |
const Matrix_t & | GetState () const |
size_t | GetStateSize () const |
size_t | GetTimeSteps () const |
Tensor_t & | GetWeightGradientsTensor () |
const Tensor_t & | GetWeightGradientsTensor () const |
Matrix_t & | GetWeightsCandidate () |
const Matrix_t & | GetWeightsCandidate () const |
Matrix_t & | GetWeightsCandidateGradients () |
const Matrix_t & | GetWeightsCandidateGradients () const |
Matrix_t & | GetWeightsCandidateState () |
const Matrix_t & | GetWeightsCandidateState () const |
Matrix_t & | GetWeightsCandidateStateGradients () |
const Matrix_t & | GetWeightsCandidateStateGradients () const |
Matrix_t & | GetWeightsForgetGate () |
const Matrix_t & | GetWeightsForgetGate () const |
Matrix_t & | GetWeightsForgetGateState () |
const Matrix_t & | GetWeightsForgetGateState () const |
Matrix_t & | GetWeightsForgetGradients () |
const Matrix_t & | GetWeightsForgetGradients () const |
Matrix_t & | GetWeightsForgetStateGradients () |
Matrix_t & | GetWeightsInputGate () |
const Matrix_t & | GetWeightsInputGate () const |
Matrix_t & | GetWeightsInputGateState () |
const Matrix_t & | GetWeightsInputGateState () const |
Matrix_t & | GetWeightsInputGradients () |
const Matrix_t & | GetWeightsInputGradients () const |
Matrix_t & | GetWeightsInputStateGradients () |
const Matrix_t & | GetWeightsInputStateGradients () const |
Matrix_t & | GetWeightsOutputGate () |
const Matrix_t & | GetWeightsOutputGate () const |
Matrix_t & | GetWeightsOutputGateState () |
const Matrix_t & | GetWeightsOutputGateState () const |
Matrix_t & | GetWeightsOutputGradients () |
const Matrix_t & | GetWeightsOutputGradients () const |
Matrix_t & | GetWeightsOutputStateGradients () |
const Matrix_t & | GetWeightsOutputStateGradients () const |
Tensor_t & | GetWeightsTensor () |
const Tensor_t & | GetWeightsTensor () const |
const Matrix_t & | GetWeigthsForgetStateGradients () const |
Tensor_t & | GetX () |
Tensor_t & | GetY () |
virtual void | Initialize () |
Initialize the weights according to the given initialization method. | |
void | InitState (DNN::EInitialization m=DNN::EInitialization::kZero) |
Initialize the hidden state and cell state method. | |
void | InputGate (const Matrix_t &input, Matrix_t &di) |
Decides the values we'll update (NN with Sigmoid) | |
void | OutputGate (const Matrix_t &input, Matrix_t &dout) |
Computes output values (NN with Sigmoid) | |
void | Print () const |
Prints the info about the layer. | |
void | ReadWeightsFromXML (void *parent) |
Read the information and the weights about the layer from XML node. | |
void | Update (const Scalar_t learningRate) |
Public Member Functions inherited from TMVA::DNN::VGeneralLayer< Architecture_t > | |
VGeneralLayer (const VGeneralLayer &) | |
Copy Constructor. | |
VGeneralLayer (size_t BatchSize, size_t InputDepth, size_t InputHeight, size_t InputWidth, size_t Depth, size_t Height, size_t Width, size_t WeightsNSlices, size_t WeightsNRows, size_t WeightsNCols, size_t BiasesNSlices, size_t BiasesNRows, size_t BiasesNCols, size_t OutputNSlices, size_t OutputNRows, size_t OutputNCols, EInitialization Init) | |
Constructor. | |
VGeneralLayer (size_t BatchSize, size_t InputDepth, size_t InputHeight, size_t InputWidth, size_t Depth, size_t Height, size_t Width, size_t WeightsNSlices, std::vector< size_t > WeightsNRows, std::vector< size_t > WeightsNCols, size_t BiasesNSlices, std::vector< size_t > BiasesNRows, std::vector< size_t > BiasesNCols, size_t OutputNSlices, size_t OutputNRows, size_t OutputNCols, EInitialization Init) | |
General Constructor with different weights dimension. | |
VGeneralLayer (VGeneralLayer< Architecture_t > *layer) | |
Copy the layer provided as a pointer. | |
virtual | ~VGeneralLayer () |
Virtual Destructor. | |
void | CopyBiases (const std::vector< Matrix_t > &otherBiases) |
Copies the biases provided as an input. | |
template<typename Arch > | |
void | CopyParameters (const VGeneralLayer< Arch > &layer) |
Copy all trainable weight and biases from another equivalent layer but with different architecture The function can copy also extra parameters in addition to weights and biases if they are return by the function GetExtraLayerParameters. | |
void | CopyWeights (const std::vector< Matrix_t > &otherWeights) |
Copies the weights provided as an input. | |
Tensor_t & | GetActivationGradients () |
const Tensor_t & | GetActivationGradients () const |
Matrix_t | GetActivationGradientsAt (size_t i) |
const Matrix_t & | GetActivationGradientsAt (size_t i) const |
size_t | GetBatchSize () const |
Getters. | |
std::vector< Matrix_t > & | GetBiases () |
const std::vector< Matrix_t > & | GetBiases () const |
Matrix_t & | GetBiasesAt (size_t i) |
const Matrix_t & | GetBiasesAt (size_t i) const |
std::vector< Matrix_t > & | GetBiasGradients () |
const std::vector< Matrix_t > & | GetBiasGradients () const |
Matrix_t & | GetBiasGradientsAt (size_t i) |
const Matrix_t & | GetBiasGradientsAt (size_t i) const |
size_t | GetDepth () const |
virtual std::vector< Matrix_t > | GetExtraLayerParameters () const |
size_t | GetHeight () const |
EInitialization | GetInitialization () const |
size_t | GetInputDepth () const |
size_t | GetInputHeight () const |
size_t | GetInputWidth () const |
Tensor_t & | GetOutput () |
const Tensor_t & | GetOutput () const |
Matrix_t | GetOutputAt (size_t i) |
const Matrix_t & | GetOutputAt (size_t i) const |
std::vector< Matrix_t > & | GetWeightGradients () |
const std::vector< Matrix_t > & | GetWeightGradients () const |
Matrix_t & | GetWeightGradientsAt (size_t i) |
const Matrix_t & | GetWeightGradientsAt (size_t i) const |
std::vector< Matrix_t > & | GetWeights () |
const std::vector< Matrix_t > & | GetWeights () const |
Matrix_t & | GetWeightsAt (size_t i) |
const Matrix_t & | GetWeightsAt (size_t i) const |
size_t | GetWidth () const |
bool | IsTraining () const |
void | ReadMatrixXML (void *node, const char *name, Matrix_t &matrix) |
virtual void | ResetTraining () |
Reset some training flags after a loop on all batches Some layer (e.g. | |
void | SetBatchSize (size_t batchSize) |
Setters. | |
void | SetDepth (size_t depth) |
virtual void | SetDropoutProbability (Scalar_t) |
Set Dropout probability. | |
virtual void | SetExtraLayerParameters (const std::vector< Matrix_t > &) |
void | SetHeight (size_t height) |
void | SetInputDepth (size_t inputDepth) |
void | SetInputHeight (size_t inputHeight) |
void | SetInputWidth (size_t inputWidth) |
void | SetIsTraining (bool isTraining) |
void | SetWidth (size_t width) |
void | Update (const Scalar_t learningRate) |
Updates the weights and biases, given the learning rate. | |
void | UpdateBiases (const std::vector< Matrix_t > &biasGradients, const Scalar_t learningRate) |
Updates the biases, given the gradients and the learning rate. | |
void | UpdateBiasGradients (const std::vector< Matrix_t > &biasGradients, const Scalar_t learningRate) |
Updates the bias gradients, given some other weight gradients and learning rate. | |
void | UpdateWeightGradients (const std::vector< Matrix_t > &weightGradients, const Scalar_t learningRate) |
Updates the weight gradients, given some other weight gradients and learning rate. | |
void | UpdateWeights (const std::vector< Matrix_t > &weightGradients, const Scalar_t learningRate) |
Updates the weights, given the gradients and the learning rate,. | |
void | WriteMatrixToXML (void *node, const char *name, const Matrix_t &matrix) |
void | WriteTensorToXML (void *node, const char *name, const std::vector< Matrix_t > &tensor) |
helper functions for XML | |
Private Attributes | |
std::vector< Matrix_t > | candidate_gate_value |
candidate gate value for every time step | |
std::vector< Matrix_t > | cell_value |
cell value for every time step | |
Matrix_t & | fCandidateBias |
Candidate Gate bias. | |
Matrix_t & | fCandidateBiasGradients |
Gradients w.r.t the candidate gate - bias weights. | |
Matrix_t | fCandidateValue |
Computed candidate values. | |
Matrix_t | fCell |
Cell state of LSTM. | |
size_t | fCellSize |
Cell state size of LSTM. | |
std::vector< Matrix_t > | fDerivativesCandidate |
First fDerivatives of the activations candidate gate. | |
std::vector< Matrix_t > | fDerivativesForget |
First fDerivatives of the activations forget gate. | |
std::vector< Matrix_t > | fDerivativesInput |
First fDerivatives of the activations input gate. | |
std::vector< Matrix_t > | fDerivativesOutput |
First fDerivatives of the activations output gate. | |
TDescriptors * | fDescriptors = nullptr |
Keeps all the RNN descriptors. | |
Tensor_t | fDx |
cached gradient on the input (output of backward) as T x B x I | |
Tensor_t | fDy |
cached activation gradient (input of backward) as T x B x S | |
DNN::EActivationFunction | fF1 |
Activation function: sigmoid. | |
DNN::EActivationFunction | fF2 |
Activation function: tanh. | |
Matrix_t & | fForgetBiasGradients |
Gradients w.r.t the forget gate - bias weights. | |
Matrix_t & | fForgetGateBias |
Forget Gate bias. | |
Matrix_t | fForgetValue |
Computed forget gate values. | |
Matrix_t & | fInputBiasGradients |
Gradients w.r.t the input gate - bias weights. | |
Matrix_t & | fInputGateBias |
Input Gate bias. | |
Matrix_t | fInputValue |
Computed input gate values. | |
std::vector< Matrix_t > | forget_gate_value |
forget gate value for every time step | |
Matrix_t & | fOutputBiasGradients |
Gradients w.r.t the output gate - bias weights. | |
Matrix_t & | fOutputGateBias |
Output Gate bias. | |
Matrix_t | fOutputValue |
Computed output gate values. | |
bool | fRememberState |
Remember state in next pass. | |
bool | fReturnSequence = false |
Return in output full sequence or just last element. | |
Matrix_t | fState |
Hidden state of LSTM. | |
size_t | fStateSize |
Hidden state size for LSTM. | |
size_t | fTimeSteps |
Timesteps for LSTM. | |
Tensor_t | fWeightGradientsTensor |
Tensor for all weight gradients. | |
Matrix_t & | fWeightsCandidate |
Candidate Gate weights for input, fWeights[4]. | |
Matrix_t & | fWeightsCandidateGradients |
Gradients w.r.t the candidate gate - input weights. | |
Matrix_t & | fWeightsCandidateState |
Candidate Gate weights for prev state, fWeights[5]. | |
Matrix_t & | fWeightsCandidateStateGradients |
Gradients w.r.t the candidate gate - hidden state weights. | |
Matrix_t & | fWeightsForgetGate |
Forget Gate weights for input, fWeights[2]. | |
Matrix_t & | fWeightsForgetGateState |
Forget Gate weights for prev state, fWeights[3]. | |
Matrix_t & | fWeightsForgetGradients |
Gradients w.r.t the forget gate - input weights. | |
Matrix_t & | fWeightsForgetStateGradients |
Gradients w.r.t the forget gate - hidden state weights. | |
Matrix_t & | fWeightsInputGate |
Input Gate weights for input, fWeights[0]. | |
Matrix_t & | fWeightsInputGateState |
Input Gate weights for prev state, fWeights[1]. | |
Matrix_t & | fWeightsInputGradients |
Gradients w.r.t the input gate - input weights. | |
Matrix_t & | fWeightsInputStateGradients |
Gradients w.r.t the input gate - hidden state weights. | |
Matrix_t & | fWeightsOutputGate |
Output Gate weights for input, fWeights[6]. | |
Matrix_t & | fWeightsOutputGateState |
Output Gate weights for prev state, fWeights[7]. | |
Matrix_t & | fWeightsOutputGradients |
Gradients w.r.t the output gate - input weights. | |
Matrix_t & | fWeightsOutputStateGradients |
Gradients w.r.t the output gate - hidden state weights. | |
Tensor_t | fWeightsTensor |
Tensor for all weights. | |
TWorkspace * | fWorkspace = nullptr |
Tensor_t | fX |
cached input tensor as T x B x I | |
Tensor_t | fY |
cached output tensor as T x B x S | |
std::vector< Matrix_t > | input_gate_value |
input gate value for every time step | |
std::vector< Matrix_t > | output_gate_value |
output gate value for every time step | |
Additional Inherited Members | |
Protected Attributes inherited from TMVA::DNN::VGeneralLayer< Architecture_t > | |
Tensor_t | fActivationGradients |
Gradients w.r.t. the activations of this layer. | |
size_t | fBatchSize |
Batch size used for training and evaluation. | |
std::vector< Matrix_t > | fBiases |
The biases associated to the layer. | |
std::vector< Matrix_t > | fBiasGradients |
Gradients w.r.t. the bias values of the layer. | |
size_t | fDepth |
The depth of the layer. | |
size_t | fHeight |
The height of the layer. | |
EInitialization | fInit |
The initialization method. | |
size_t | fInputDepth |
The depth of the previous layer or input. | |
size_t | fInputHeight |
The height of the previous layer or input. | |
size_t | fInputWidth |
The width of the previous layer or input. | |
bool | fIsTraining |
Flag indicating the mode. | |
Tensor_t | fOutput |
Activations of this layer. | |
std::vector< Matrix_t > | fWeightGradients |
Gradients w.r.t. the weights of the layer. | |
std::vector< Matrix_t > | fWeights |
The weights associated to the layer. | |
size_t | fWidth |
The width of this layer. | |
#include <TMVA/DNN/RNN/LSTMLayer.h>
using TMVA::DNN::RNN::TBasicLSTMLayer< Architecture_t >::HelperDescriptor_t = typename Architecture_t::DropoutDescriptor_t |
Definition at line 68 of file LSTMLayer.h.
using TMVA::DNN::RNN::TBasicLSTMLayer< Architecture_t >::LayerDescriptor_t = typename Architecture_t::RecurrentDescriptor_t |
Definition at line 65 of file LSTMLayer.h.
using TMVA::DNN::RNN::TBasicLSTMLayer< Architecture_t >::Matrix_t = typename Architecture_t::Matrix_t |
Definition at line 61 of file LSTMLayer.h.
using TMVA::DNN::RNN::TBasicLSTMLayer< Architecture_t >::RNNDescriptors_t = typename Architecture_t::RNNDescriptors_t |
Definition at line 71 of file LSTMLayer.h.
using TMVA::DNN::RNN::TBasicLSTMLayer< Architecture_t >::RNNWorkspace_t = typename Architecture_t::RNNWorkspace_t |
Definition at line 70 of file LSTMLayer.h.
using TMVA::DNN::RNN::TBasicLSTMLayer< Architecture_t >::Scalar_t = typename Architecture_t::Scalar_t |
Definition at line 62 of file LSTMLayer.h.
using TMVA::DNN::RNN::TBasicLSTMLayer< Architecture_t >::Tensor_t = typename Architecture_t::Tensor_t |
Definition at line 63 of file LSTMLayer.h.
using TMVA::DNN::RNN::TBasicLSTMLayer< Architecture_t >::TensorDescriptor_t = typename Architecture_t::TensorDescriptor_t |
Definition at line 67 of file LSTMLayer.h.
using TMVA::DNN::RNN::TBasicLSTMLayer< Architecture_t >::WeightsDescriptor_t = typename Architecture_t::FilterDescriptor_t |
Definition at line 66 of file LSTMLayer.h.
TMVA::DNN::RNN::TBasicLSTMLayer< Architecture_t >::TBasicLSTMLayer | ( | size_t | batchSize, |
size_t | stateSize, | ||
size_t | inputSize, | ||
size_t | timeSteps, | ||
bool | rememberState = false , |
||
bool | returnSequence = false , |
||
DNN::EActivationFunction | f1 = DNN::EActivationFunction::kSigmoid , |
||
DNN::EActivationFunction | f2 = DNN::EActivationFunction::kTanh , |
||
bool | training = true , |
||
DNN::EInitialization | fA = DNN::EInitialization::kZero |
||
) |
Constructor.
Definition at line 341 of file LSTMLayer.h.
TMVA::DNN::RNN::TBasicLSTMLayer< Architecture_t >::TBasicLSTMLayer | ( | const TBasicLSTMLayer< Architecture_t > & | layer | ) |
Copy Constructor.
Definition at line 385 of file LSTMLayer.h.
|
inlinevirtual |
Writes the information and the weights about the layer in an XML node.
Implements TMVA::DNN::VGeneralLayer< Architecture_t >.
Definition at line 923 of file LSTMLayer.h.
|
inlinevirtual |
Backpropagates the error.
Must only be called directly at the corresponding call to Forward(...).
Implements TMVA::DNN::VGeneralLayer< Architecture_t >.
Definition at line 681 of file LSTMLayer.h.
|
inline |
Decides the new candidate values (NN with Tanh)
Definition at line 515 of file LSTMLayer.h.
|
inline |
Backward for a single time unit a the corresponding call to Forward(...).
Definition at line 863 of file LSTMLayer.h.
|
inline |
Forward for a single cell (time unit)
Definition at line 655 of file LSTMLayer.h.
|
inline |
Definition at line 216 of file LSTMLayer.h.
|
inline |
Definition at line 217 of file LSTMLayer.h.
|
inline |
Forgets the past values (NN with Sigmoid)
Definition at line 497 of file LSTMLayer.h.
|
inlinevirtual |
Computes the next hidden state and next cell state with given input matrix.
Implements TMVA::DNN::VGeneralLayer< Architecture_t >.
Definition at line 553 of file LSTMLayer.h.
|
inline |
Definition at line 219 of file LSTMLayer.h.
|
inline |
Definition at line 220 of file LSTMLayer.h.
|
inline |
Definition at line 296 of file LSTMLayer.h.
|
inline |
Definition at line 295 of file LSTMLayer.h.
|
inline |
Definition at line 316 of file LSTMLayer.h.
|
inline |
Definition at line 315 of file LSTMLayer.h.
|
inline |
Definition at line 264 of file LSTMLayer.h.
|
inline |
Definition at line 263 of file LSTMLayer.h.
|
inline |
Definition at line 279 of file LSTMLayer.h.
|
inline |
Definition at line 278 of file LSTMLayer.h.
|
inline |
Definition at line 281 of file LSTMLayer.h.
|
inline |
Definition at line 280 of file LSTMLayer.h.
|
inline |
Definition at line 225 of file LSTMLayer.h.
|
inline |
Definition at line 224 of file LSTMLayer.h.
|
inline |
Definition at line 234 of file LSTMLayer.h.
|
inline |
Definition at line 233 of file LSTMLayer.h.
|
inline |
Definition at line 214 of file LSTMLayer.h.
|
inline |
Definition at line 287 of file LSTMLayer.h.
|
inline |
Definition at line 286 of file LSTMLayer.h.
|
inline |
Definition at line 289 of file LSTMLayer.h.
|
inline |
Definition at line 288 of file LSTMLayer.h.
|
inline |
Definition at line 262 of file LSTMLayer.h.
|
inline |
Definition at line 261 of file LSTMLayer.h.
|
inline |
Definition at line 258 of file LSTMLayer.h.
|
inline |
Definition at line 257 of file LSTMLayer.h.
|
inline |
Definition at line 254 of file LSTMLayer.h.
|
inline |
Definition at line 253 of file LSTMLayer.h.
|
inline |
Definition at line 266 of file LSTMLayer.h.
|
inline |
Definition at line 265 of file LSTMLayer.h.
|
inline |
Definition at line 331 of file LSTMLayer.h.
|
inline |
Definition at line 332 of file LSTMLayer.h.
|
inline |
Definition at line 310 of file LSTMLayer.h.
|
inline |
Definition at line 309 of file LSTMLayer.h.
|
inline |
Definition at line 260 of file LSTMLayer.h.
|
inline |
Definition at line 259 of file LSTMLayer.h.
|
inline |
Definition at line 294 of file LSTMLayer.h.
|
inline |
Definition at line 293 of file LSTMLayer.h.
|
inline |
Definition at line 275 of file LSTMLayer.h.
|
inline |
Definition at line 274 of file LSTMLayer.h.
|
inline |
Definition at line 277 of file LSTMLayer.h.
|
inline |
Definition at line 276 of file LSTMLayer.h.
|
inline |
Definition at line 227 of file LSTMLayer.h.
|
inline |
Definition at line 226 of file LSTMLayer.h.
|
inline |
Definition at line 304 of file LSTMLayer.h.
|
inline |
Definition at line 303 of file LSTMLayer.h.
|
inline |
Definition at line 256 of file LSTMLayer.h.
|
inline |
Definition at line 255 of file LSTMLayer.h.
|
inline |
Definition at line 292 of file LSTMLayer.h.
|
inline |
Definition at line 291 of file LSTMLayer.h.
|
inline |
Definition at line 271 of file LSTMLayer.h.
|
inline |
Definition at line 270 of file LSTMLayer.h.
|
inline |
Definition at line 273 of file LSTMLayer.h.
|
inline |
Definition at line 272 of file LSTMLayer.h.
|
inline |
Definition at line 223 of file LSTMLayer.h.
|
inline |
Definition at line 222 of file LSTMLayer.h.
|
inline |
Getters.
Definition at line 211 of file LSTMLayer.h.
|
inline |
Definition at line 322 of file LSTMLayer.h.
|
inline |
Definition at line 321 of file LSTMLayer.h.
|
inline |
Definition at line 268 of file LSTMLayer.h.
|
inline |
Definition at line 267 of file LSTMLayer.h.
|
inline |
Definition at line 298 of file LSTMLayer.h.
|
inline |
Definition at line 297 of file LSTMLayer.h.
|
inline |
Definition at line 283 of file LSTMLayer.h.
|
inline |
Definition at line 282 of file LSTMLayer.h.
|
inline |
Definition at line 285 of file LSTMLayer.h.
|
inline |
Definition at line 284 of file LSTMLayer.h.
|
inline |
Definition at line 229 of file LSTMLayer.h.
|
inline |
Definition at line 228 of file LSTMLayer.h.
|
inline |
Definition at line 232 of file LSTMLayer.h.
|
inline |
Definition at line 231 of file LSTMLayer.h.
|
inline |
Definition at line 213 of file LSTMLayer.h.
|
inline |
Definition at line 212 of file LSTMLayer.h.
|
inline |
Definition at line 326 of file LSTMLayer.h.
|
inline |
Definition at line 327 of file LSTMLayer.h.
|
inline |
Definition at line 239 of file LSTMLayer.h.
|
inline |
Definition at line 238 of file LSTMLayer.h.
|
inline |
Definition at line 312 of file LSTMLayer.h.
|
inline |
Definition at line 311 of file LSTMLayer.h.
|
inline |
Definition at line 249 of file LSTMLayer.h.
|
inline |
Definition at line 248 of file LSTMLayer.h.
|
inline |
Definition at line 314 of file LSTMLayer.h.
|
inline |
Definition at line 313 of file LSTMLayer.h.
|
inline |
Definition at line 241 of file LSTMLayer.h.
|
inline |
Definition at line 240 of file LSTMLayer.h.
|
inline |
Definition at line 247 of file LSTMLayer.h.
|
inline |
Definition at line 246 of file LSTMLayer.h.
|
inline |
Definition at line 306 of file LSTMLayer.h.
|
inline |
Definition at line 305 of file LSTMLayer.h.
|
inline |
Definition at line 308 of file LSTMLayer.h.
|
inline |
Definition at line 237 of file LSTMLayer.h.
|
inline |
Definition at line 236 of file LSTMLayer.h.
|
inline |
Definition at line 245 of file LSTMLayer.h.
|
inline |
Definition at line 244 of file LSTMLayer.h.
|
inline |
Definition at line 300 of file LSTMLayer.h.
|
inline |
Definition at line 299 of file LSTMLayer.h.
|
inline |
Definition at line 302 of file LSTMLayer.h.
|
inline |
Definition at line 301 of file LSTMLayer.h.
|
inline |
Definition at line 243 of file LSTMLayer.h.
|
inline |
Definition at line 242 of file LSTMLayer.h.
|
inline |
Definition at line 251 of file LSTMLayer.h.
|
inline |
Definition at line 250 of file LSTMLayer.h.
|
inline |
Definition at line 318 of file LSTMLayer.h.
|
inline |
Definition at line 317 of file LSTMLayer.h.
|
inline |
Definition at line 320 of file LSTMLayer.h.
|
inline |
Definition at line 319 of file LSTMLayer.h.
|
inline |
Definition at line 324 of file LSTMLayer.h.
|
inline |
Definition at line 325 of file LSTMLayer.h.
|
inline |
Definition at line 307 of file LSTMLayer.h.
|
inline |
Definition at line 329 of file LSTMLayer.h.
|
inline |
Definition at line 330 of file LSTMLayer.h.
|
virtual |
Initialize the weights according to the given initialization method.
Reimplemented from TMVA::DNN::VGeneralLayer< Architecture_t >.
Definition at line 469 of file LSTMLayer.h.
auto TMVA::DNN::RNN::TBasicLSTMLayer< Architecture_t >::InitState | ( | DNN::EInitialization | m = DNN::EInitialization::kZero | ) |
Initialize the hidden state and cell state method.
Definition at line 902 of file LSTMLayer.h.
|
inline |
Decides the values we'll update (NN with Sigmoid)
Definition at line 479 of file LSTMLayer.h.
|
inline |
Computes output values (NN with Sigmoid)
Definition at line 533 of file LSTMLayer.h.
|
virtual |
Prints the info about the layer.
Implements TMVA::DNN::VGeneralLayer< Architecture_t >.
Definition at line 911 of file LSTMLayer.h.
|
inlinevirtual |
Read the information and the weights about the layer from XML node.
Implements TMVA::DNN::VGeneralLayer< Architecture_t >.
Definition at line 953 of file LSTMLayer.h.
void TMVA::DNN::RNN::TBasicLSTMLayer< Architecture_t >::Update | ( | const Scalar_t | learningRate | ) |
|
private |
candidate gate value for every time step
Definition at line 110 of file LSTMLayer.h.
|
private |
cell value for every time step
Definition at line 112 of file LSTMLayer.h.
|
private |
Candidate Gate bias.
Definition at line 102 of file LSTMLayer.h.
|
private |
Gradients w.r.t the candidate gate - bias weights.
Definition at line 126 of file LSTMLayer.h.
|
private |
Computed candidate values.
Definition at line 86 of file LSTMLayer.h.
|
private |
Cell state of LSTM.
Definition at line 90 of file LSTMLayer.h.
|
private |
Cell state size of LSTM.
Definition at line 76 of file LSTMLayer.h.
|
private |
First fDerivatives of the activations candidate gate.
Definition at line 115 of file LSTMLayer.h.
|
private |
First fDerivatives of the activations forget gate.
Definition at line 114 of file LSTMLayer.h.
|
private |
First fDerivatives of the activations input gate.
Definition at line 113 of file LSTMLayer.h.
|
private |
First fDerivatives of the activations output gate.
Definition at line 116 of file LSTMLayer.h.
|
private |
Keeps all the RNN descriptors.
Definition at line 141 of file LSTMLayer.h.
|
private |
cached gradient on the input (output of backward) as T x B x I
Definition at line 138 of file LSTMLayer.h.
|
private |
cached activation gradient (input of backward) as T x B x S
Definition at line 139 of file LSTMLayer.h.
|
private |
Activation function: sigmoid.
Definition at line 82 of file LSTMLayer.h.
|
private |
Activation function: tanh.
Definition at line 83 of file LSTMLayer.h.
|
private |
Gradients w.r.t the forget gate - bias weights.
Definition at line 123 of file LSTMLayer.h.
|
private |
Forget Gate bias.
Definition at line 98 of file LSTMLayer.h.
|
private |
Computed forget gate values.
Definition at line 87 of file LSTMLayer.h.
|
private |
Gradients w.r.t the input gate - bias weights.
Definition at line 120 of file LSTMLayer.h.
|
private |
Input Gate bias.
Definition at line 94 of file LSTMLayer.h.
|
private |
Computed input gate values.
Definition at line 85 of file LSTMLayer.h.
|
private |
forget gate value for every time step
Definition at line 109 of file LSTMLayer.h.
|
private |
Gradients w.r.t the output gate - bias weights.
Definition at line 129 of file LSTMLayer.h.
|
private |
Output Gate bias.
Definition at line 106 of file LSTMLayer.h.
|
private |
Computed output gate values.
Definition at line 88 of file LSTMLayer.h.
|
private |
Remember state in next pass.
Definition at line 79 of file LSTMLayer.h.
|
private |
Return in output full sequence or just last element.
Definition at line 80 of file LSTMLayer.h.
|
private |
Hidden state of LSTM.
Definition at line 89 of file LSTMLayer.h.
|
private |
Hidden state size for LSTM.
Definition at line 75 of file LSTMLayer.h.
|
private |
Timesteps for LSTM.
Definition at line 77 of file LSTMLayer.h.
|
private |
Tensor for all weight gradients.
Definition at line 133 of file LSTMLayer.h.
|
private |
Candidate Gate weights for input, fWeights[4].
Definition at line 100 of file LSTMLayer.h.
|
private |
Gradients w.r.t the candidate gate - input weights.
Definition at line 124 of file LSTMLayer.h.
|
private |
Candidate Gate weights for prev state, fWeights[5].
Definition at line 101 of file LSTMLayer.h.
|
private |
Gradients w.r.t the candidate gate - hidden state weights.
Definition at line 125 of file LSTMLayer.h.
|
private |
Forget Gate weights for input, fWeights[2].
Definition at line 96 of file LSTMLayer.h.
|
private |
Forget Gate weights for prev state, fWeights[3].
Definition at line 97 of file LSTMLayer.h.
|
private |
Gradients w.r.t the forget gate - input weights.
Definition at line 121 of file LSTMLayer.h.
|
private |
Gradients w.r.t the forget gate - hidden state weights.
Definition at line 122 of file LSTMLayer.h.
|
private |
Input Gate weights for input, fWeights[0].
Definition at line 92 of file LSTMLayer.h.
|
private |
Input Gate weights for prev state, fWeights[1].
Definition at line 93 of file LSTMLayer.h.
|
private |
Gradients w.r.t the input gate - input weights.
Definition at line 118 of file LSTMLayer.h.
|
private |
Gradients w.r.t the input gate - hidden state weights.
Definition at line 119 of file LSTMLayer.h.
|
private |
Output Gate weights for input, fWeights[6].
Definition at line 104 of file LSTMLayer.h.
|
private |
Output Gate weights for prev state, fWeights[7].
Definition at line 105 of file LSTMLayer.h.
|
private |
Gradients w.r.t the output gate - input weights.
Definition at line 127 of file LSTMLayer.h.
|
private |
Gradients w.r.t the output gate - hidden state weights.
Definition at line 128 of file LSTMLayer.h.
|
private |
Tensor for all weights.
Definition at line 132 of file LSTMLayer.h.
|
private |
Definition at line 142 of file LSTMLayer.h.
|
private |
cached input tensor as T x B x I
Definition at line 136 of file LSTMLayer.h.
|
private |
cached output tensor as T x B x S
Definition at line 137 of file LSTMLayer.h.
|
private |
input gate value for every time step
Definition at line 108 of file LSTMLayer.h.
|
private |
output gate value for every time step
Definition at line 111 of file LSTMLayer.h.