Definition at line 56 of file RNNLayer.h.
Public Types | |
| using | HelperDescriptor_t = typename Architecture_t::DropoutDescriptor_t |
| using | LayerDescriptor_t = typename Architecture_t::RecurrentDescriptor_t |
| using | Matrix_t = typename Architecture_t::Matrix_t |
| using | RNNDescriptors_t = typename Architecture_t::RNNDescriptors_t |
| using | RNNWorkspace_t = typename Architecture_t::RNNWorkspace_t |
| using | Scalar_t = typename Architecture_t::Scalar_t |
| using | Tensor_t = typename Architecture_t::Tensor_t |
| using | TensorDescriptor_t = typename Architecture_t::TensorDescriptor_t |
| using | WeightsDescriptor_t = typename Architecture_t::FilterDescriptor_t |
Public Member Functions | |
| TBasicRNNLayer (const TBasicRNNLayer &) | |
| Copy Constructor. | |
| TBasicRNNLayer (size_t batchSize, size_t stateSize, size_t inputSize, size_t timeSteps, bool rememberState=false, bool returnSequence=false, DNN::EActivationFunction f=DNN::EActivationFunction::kTanh, bool training=true, DNN::EInitialization fA=DNN::EInitialization::kZero) | |
| Constructor. | |
| virtual | ~TBasicRNNLayer () |
| Destructor. | |
| void | AddWeightsXMLTo (void *parent) override |
| Writes the information and the weights about the layer in an XML node. | |
| void | Backward (Tensor_t &gradients_backward, const Tensor_t &activations_backward) override |
| Backpropagates the error. | |
| Matrix_t & | CellBackward (Matrix_t &state_gradients_backward, const Matrix_t &precStateActivations, const Matrix_t &input, Matrix_t &input_gradient, Matrix_t &dF) |
| Backward for a single time unit a the corresponding call to Forward(...). | |
| void | CellForward (const Matrix_t &input, Matrix_t &dF) |
| Forward for a single cell (time unit). | |
| void | CopyBiases (const std::vector< Matrix_t > &otherBiases) |
| Copies the biases provided as an input. | |
| template<typename Arch> | |
| void | CopyParameters (const VGeneralLayer< Arch > &layer) |
| Copy all trainable weight and biases from another equivalent layer but with different architecture The function can copy also extra parameters in addition to weights and biases if they are return by the function GetExtraLayerParameters. | |
| void | CopyWeights (const std::vector< Matrix_t > &otherWeights) |
| Copies the weights provided as an input. | |
| bool | DoesRememberState () const |
| bool | DoesReturnSequence () const |
| void | Forward (Tensor_t &input, bool isTraining=true) override |
| Compute and return the next state with given input matrix. | |
| DNN::EActivationFunction | GetActivationFunction () const |
| Tensor_t & | GetActivationGradients () |
| const Tensor_t & | GetActivationGradients () const |
| Matrix_t | GetActivationGradientsAt (size_t i) |
| const Matrix_t & | GetActivationGradientsAt (size_t i) const |
| size_t | GetBatchSize () const |
| Getters. | |
| std::vector< Matrix_t > & | GetBiases () |
| const std::vector< Matrix_t > & | GetBiases () const |
| Matrix_t & | GetBiasesAt (size_t i) |
| const Matrix_t & | GetBiasesAt (size_t i) const |
| Matrix_t & | GetBiasesState () |
| const Matrix_t & | GetBiasesState () const |
| std::vector< Matrix_t > & | GetBiasGradients () |
| const std::vector< Matrix_t > & | GetBiasGradients () const |
| Matrix_t & | GetBiasGradientsAt (size_t i) |
| const Matrix_t & | GetBiasGradientsAt (size_t i) const |
| Matrix_t & | GetBiasStateGradients () |
| const Matrix_t & | GetBiasStateGradients () const |
| Matrix_t & | GetCell () |
| const Matrix_t & | GetCell () const |
| size_t | GetDepth () const |
| Tensor_t & | GetDerivatives () |
| const Tensor_t & | GetDerivatives () const |
| Tensor_t & | GetDX () |
| Tensor_t & | GetDY () |
| virtual std::vector< Matrix_t > | GetExtraLayerParameters () const |
| size_t | GetHeight () const |
| EInitialization | GetInitialization () const |
| size_t | GetInputDepth () const |
| size_t | GetInputHeight () const |
| size_t | GetInputSize () const |
| size_t | GetInputWidth () const |
| Tensor_t & | GetOutput () |
| const Tensor_t & | GetOutput () const |
| Matrix_t | GetOutputAt (size_t i) |
| const Matrix_t & | GetOutputAt (size_t i) const |
| Matrix_t & | GetState () |
| const Matrix_t & | GetState () const |
| size_t | GetStateSize () const |
| size_t | GetTimeSteps () const |
| Getters. | |
| std::vector< Matrix_t > & | GetWeightGradients () |
| const std::vector< Matrix_t > & | GetWeightGradients () const |
| Matrix_t & | GetWeightGradientsAt (size_t i) |
| const Matrix_t & | GetWeightGradientsAt (size_t i) const |
| Tensor_t & | GetWeightGradientsTensor () |
| const Tensor_t & | GetWeightGradientsTensor () const |
| Matrix_t & | GetWeightInputGradients () |
| const Matrix_t & | GetWeightInputGradients () const |
| std::vector< Matrix_t > & | GetWeights () |
| const std::vector< Matrix_t > & | GetWeights () const |
| Matrix_t & | GetWeightsAt (size_t i) |
| const Matrix_t & | GetWeightsAt (size_t i) const |
| Matrix_t & | GetWeightsInput () |
| const Matrix_t & | GetWeightsInput () const |
| Matrix_t & | GetWeightsState () |
| const Matrix_t & | GetWeightsState () const |
| Matrix_t & | GetWeightStateGradients () |
| const Matrix_t & | GetWeightStateGradients () const |
| Tensor_t & | GetWeightsTensor () |
| const Tensor_t & | GetWeightsTensor () const |
| size_t | GetWidth () const |
| Tensor_t & | GetX () |
| Tensor_t & | GetY () |
| void | Initialize () override |
| Initialize the weights according to the given initialization method. | |
| void | InitState (DNN::EInitialization m=DNN::EInitialization::kZero) |
| Initialize the state method. | |
| void | InitTensors () |
| bool | IsTraining () const |
| void | Print () const override |
| Prints the info about the layer. | |
| void | ReadMatrixXML (void *node, const char *name, Matrix_t &matrix) |
| void | ReadWeightsFromXML (void *parent) override |
| Read the information and the weights about the layer from XML node. | |
| virtual void | ResetTraining () |
| Reset some training flags after a loop on all batches Some layer (e.g. | |
| void | SetBatchSize (size_t batchSize) |
| Setters. | |
| void | SetDepth (size_t depth) |
| virtual void | SetDropoutProbability (Scalar_t) |
| Set Dropout probability. | |
| virtual void | SetExtraLayerParameters (const std::vector< Matrix_t > &) |
| void | SetHeight (size_t height) |
| void | SetInputDepth (size_t inputDepth) |
| void | SetInputHeight (size_t inputHeight) |
| void | SetInputWidth (size_t inputWidth) |
| void | SetIsTraining (bool isTraining) |
| void | SetWidth (size_t width) |
| void | Update (const Scalar_t learningRate) |
| void | UpdateBiases (const std::vector< Matrix_t > &biasGradients, const Scalar_t learningRate) |
| Updates the biases, given the gradients and the learning rate. | |
| void | UpdateBiasGradients (const std::vector< Matrix_t > &biasGradients, const Scalar_t learningRate) |
| Updates the bias gradients, given some other weight gradients and learning rate. | |
| void | UpdateWeightGradients (const std::vector< Matrix_t > &weightGradients, const Scalar_t learningRate) |
| Updates the weight gradients, given some other weight gradients and learning rate. | |
| void | UpdateWeights (const std::vector< Matrix_t > &weightGradients, const Scalar_t learningRate) |
| Updates the weights, given the gradients and the learning rate,. | |
| void | WriteMatrixToXML (void *node, const char *name, const Matrix_t &matrix) |
| void | WriteTensorToXML (void *node, const char *name, const std::vector< Matrix_t > &tensor) |
| helper functions for XML | |
Protected Attributes | |
| Tensor_t | fActivationGradients |
| Gradients w.r.t. the activations of this layer. | |
| size_t | fBatchSize |
| Batch size used for training and evaluation. | |
| size_t | fDepth |
| The depth of the layer. | |
| size_t | fHeight |
| The height of the layer. | |
| EInitialization | fInit |
| The initialization method. | |
| size_t | fInputDepth |
| The depth of the previous layer or input. | |
| size_t | fInputHeight |
| The height of the previous layer or input. | |
| size_t | fInputWidth |
| The width of the previous layer or input. | |
| bool | fIsTraining |
| Flag indicating the mode. | |
| Tensor_t | fOutput |
| Activations of this layer. | |
| std::vector< Matrix_t > | fWeightGradients |
| Gradients w.r.t. the weights of the layer. | |
| std::vector< Matrix_t > | fWeights |
| The weights associated to the layer. | |
| size_t | fWidth |
| The width of this layer. | |
Private Attributes | |
| Architecture_t::ActivationDescriptor_t | fActivationDesc |
| Matrix_t & | fBiases |
| Biases. | |
| Matrix_t & | fBiasGradients |
| Gradients w.r.t. the bias values. | |
| Matrix_t | fCell |
| Empty matrix for RNN. | |
| Tensor_t | fDerivatives |
| First fDerivatives of the activations. | |
| TDescriptors * | fDescriptors = nullptr |
| Keeps all the RNN descriptors. | |
| Tensor_t | fDx |
| cached gradient on the input (output of backward) as T x B x I | |
| Tensor_t | fDy |
| cached activation gradient (input of backward) as T x B x S | |
| DNN::EActivationFunction | fF |
| Activation function of the hidden state. | |
| bool | fRememberState |
| Remember state in next pass. | |
| bool | fReturnSequence = false |
| Return in output full sequence or just last element in time. | |
| Matrix_t | fState |
| Hidden State. | |
| size_t | fStateSize |
| Hidden state size of RNN. | |
| size_t | fTimeSteps |
| Timesteps for RNN. | |
| Tensor_t | fWeightGradientsTensor |
| Matrix_t & | fWeightInputGradients |
| Gradients w.r.t. the input weights. | |
| Matrix_t & | fWeightsInput |
| Input weights, fWeights[0]. | |
| Matrix_t & | fWeightsState |
| Prev state weights, fWeights[1]. | |
| Matrix_t & | fWeightStateGradients |
| Gradients w.r.t. the recurring weights. | |
| Tensor_t | fWeightsTensor |
| TWorkspace * | fWorkspace = nullptr |
| Tensor_t | fX |
| cached input tensor as T x B x I | |
| Tensor_t | fY |
| cached output tensor as T x B x S | |
#include <TMVA/DNN/RNN/RNNLayer.h>
| using TMVA::DNN::RNN::TBasicRNNLayer< Architecture_t >::HelperDescriptor_t = typename Architecture_t::DropoutDescriptor_t |
Definition at line 68 of file RNNLayer.h.
| using TMVA::DNN::RNN::TBasicRNNLayer< Architecture_t >::LayerDescriptor_t = typename Architecture_t::RecurrentDescriptor_t |
Definition at line 65 of file RNNLayer.h.
| using TMVA::DNN::RNN::TBasicRNNLayer< Architecture_t >::Matrix_t = typename Architecture_t::Matrix_t |
Definition at line 62 of file RNNLayer.h.
| using TMVA::DNN::RNN::TBasicRNNLayer< Architecture_t >::RNNDescriptors_t = typename Architecture_t::RNNDescriptors_t |
Definition at line 71 of file RNNLayer.h.
| using TMVA::DNN::RNN::TBasicRNNLayer< Architecture_t >::RNNWorkspace_t = typename Architecture_t::RNNWorkspace_t |
Definition at line 70 of file RNNLayer.h.
| using TMVA::DNN::RNN::TBasicRNNLayer< Architecture_t >::Scalar_t = typename Architecture_t::Scalar_t |
Definition at line 63 of file RNNLayer.h.
| using TMVA::DNN::RNN::TBasicRNNLayer< Architecture_t >::Tensor_t = typename Architecture_t::Tensor_t |
Definition at line 61 of file RNNLayer.h.
| using TMVA::DNN::RNN::TBasicRNNLayer< Architecture_t >::TensorDescriptor_t = typename Architecture_t::TensorDescriptor_t |
Definition at line 67 of file RNNLayer.h.
| using TMVA::DNN::RNN::TBasicRNNLayer< Architecture_t >::WeightsDescriptor_t = typename Architecture_t::FilterDescriptor_t |
Definition at line 66 of file RNNLayer.h.
| TMVA::DNN::RNN::TBasicRNNLayer< Architecture_t >::TBasicRNNLayer | ( | size_t | batchSize, |
| size_t | stateSize, | ||
| size_t | inputSize, | ||
| size_t | timeSteps, | ||
| bool | rememberState = false, | ||
| bool | returnSequence = false, | ||
| DNN::EActivationFunction | f = DNN::EActivationFunction::kTanh, | ||
| bool | training = true, | ||
| DNN::EInitialization | fA = DNN::EInitialization::kZero ) |
Constructor.
Definition at line 213 of file RNNLayer.h.
| TMVA::DNN::RNN::TBasicRNNLayer< Architecture_t >::TBasicRNNLayer | ( | const TBasicRNNLayer< Architecture_t > & | layer | ) |
Copy Constructor.
Definition at line 231 of file RNNLayer.h.
|
virtual |
Destructor.
Definition at line 249 of file RNNLayer.h.
|
overridevirtual |
Writes the information and the weights about the layer in an XML node.
Implements TMVA::DNN::VGeneralLayer< Architecture_t >.
Definition at line 599 of file RNNLayer.h.
|
inlineoverridevirtual |
Backpropagates the error.
Must only be called directly at the corresponding call to Forward(...).
Implements TMVA::DNN::VGeneralLayer< Architecture_t >.
Definition at line 431 of file RNNLayer.h.
|
inline |
Backward for a single time unit a the corresponding call to Forward(...).
Definition at line 587 of file RNNLayer.h.
|
inline |
Forward for a single cell (time unit).
Definition at line 408 of file RNNLayer.h.
|
inherited |
Copies the biases provided as an input.
Definition at line 468 of file GeneralLayer.h.
|
inherited |
Copy all trainable weight and biases from another equivalent layer but with different architecture The function can copy also extra parameters in addition to weights and biases if they are return by the function GetExtraLayerParameters.
Definition at line 478 of file GeneralLayer.h.
|
inherited |
Copies the weights provided as an input.
Definition at line 458 of file GeneralLayer.h.
|
inline |
Definition at line 171 of file RNNLayer.h.
|
inline |
Definition at line 172 of file RNNLayer.h.
|
overridevirtual |
Compute and return the next state with given input matrix.
Implements TMVA::DNN::VGeneralLayer< Architecture_t >.
Definition at line 322 of file RNNLayer.h.
|
inline |
Definition at line 173 of file RNNLayer.h.
|
inlineinherited |
Definition at line 200 of file GeneralLayer.h.
|
inlineinherited |
Definition at line 199 of file GeneralLayer.h.
|
inlineinherited |
Definition at line 205 of file GeneralLayer.h.
|
inlineinherited |
Definition at line 206 of file GeneralLayer.h.
|
inlineinherited |
Getters.
Definition at line 163 of file GeneralLayer.h.
|
inlineinherited |
Definition at line 179 of file GeneralLayer.h.
|
inlineinherited |
Definition at line 178 of file GeneralLayer.h.
|
inlineinherited |
Definition at line 182 of file GeneralLayer.h.
|
inlineinherited |
Definition at line 181 of file GeneralLayer.h.
|
inline |
Definition at line 188 of file RNNLayer.h.
|
inline |
Definition at line 189 of file RNNLayer.h.
|
inlineinherited |
Definition at line 191 of file GeneralLayer.h.
|
inlineinherited |
Definition at line 190 of file GeneralLayer.h.
|
inlineinherited |
Definition at line 194 of file GeneralLayer.h.
|
inlineinherited |
Definition at line 193 of file GeneralLayer.h.
|
inline |
Definition at line 190 of file RNNLayer.h.
|
inline |
Definition at line 191 of file RNNLayer.h.
|
inline |
Definition at line 176 of file RNNLayer.h.
|
inline |
Definition at line 177 of file RNNLayer.h.
|
inlineinherited |
Definition at line 167 of file GeneralLayer.h.
|
inline |
Definition at line 183 of file RNNLayer.h.
|
inline |
Definition at line 184 of file RNNLayer.h.
|
inline |
Definition at line 204 of file RNNLayer.h.
|
inline |
Definition at line 205 of file RNNLayer.h.
|
inlinevirtualinherited |
Reimplemented in TMVA::DNN::TBatchNormLayer< Architecture_t >, TMVA::DNN::TBatchNormLayer< TCpu< AReal > >, and TMVA::DNN::TBatchNormLayer< TCuda< AReal > >.
Definition at line 210 of file GeneralLayer.h.
|
inlineinherited |
Definition at line 168 of file GeneralLayer.h.
|
inlineinherited |
Definition at line 214 of file GeneralLayer.h.
|
inlineinherited |
Definition at line 164 of file GeneralLayer.h.
|
inlineinherited |
Definition at line 165 of file GeneralLayer.h.
|
inline |
Definition at line 170 of file RNNLayer.h.
|
inlineinherited |
Definition at line 166 of file GeneralLayer.h.
|
inlineinherited |
Definition at line 197 of file GeneralLayer.h.
|
inlineinherited |
Definition at line 196 of file GeneralLayer.h.
|
inlineinherited |
Definition at line 202 of file GeneralLayer.h.
|
inlineinherited |
Definition at line 203 of file GeneralLayer.h.
|
inline |
Definition at line 174 of file RNNLayer.h.
|
inline |
Definition at line 175 of file RNNLayer.h.
|
inline |
Definition at line 169 of file RNNLayer.h.
|
inline |
Getters.
Definition at line 168 of file RNNLayer.h.
|
inlineinherited |
Definition at line 185 of file GeneralLayer.h.
|
inlineinherited |
Definition at line 184 of file GeneralLayer.h.
|
inlineinherited |
Definition at line 188 of file GeneralLayer.h.
|
inlineinherited |
Definition at line 187 of file GeneralLayer.h.
|
inline |
Definition at line 199 of file RNNLayer.h.
|
inline |
Definition at line 200 of file RNNLayer.h.
|
inline |
Definition at line 192 of file RNNLayer.h.
|
inline |
Definition at line 193 of file RNNLayer.h.
|
inlineinherited |
Definition at line 173 of file GeneralLayer.h.
|
inlineinherited |
Definition at line 172 of file GeneralLayer.h.
|
inlineinherited |
Definition at line 176 of file GeneralLayer.h.
|
inlineinherited |
Definition at line 175 of file GeneralLayer.h.
|
inline |
Definition at line 179 of file RNNLayer.h.
|
inline |
Definition at line 180 of file RNNLayer.h.
|
inline |
Definition at line 181 of file RNNLayer.h.
|
inline |
Definition at line 182 of file RNNLayer.h.
|
inline |
Definition at line 194 of file RNNLayer.h.
|
inline |
Definition at line 195 of file RNNLayer.h.
|
inline |
Definition at line 197 of file RNNLayer.h.
|
inline |
Definition at line 198 of file RNNLayer.h.
|
inlineinherited |
Definition at line 169 of file GeneralLayer.h.
|
inline |
Definition at line 202 of file RNNLayer.h.
|
inline |
Definition at line 203 of file RNNLayer.h.
|
overridevirtual |
Initialize the weights according to the given initialization method.
Reimplemented from TMVA::DNN::VGeneralLayer< Architecture_t >.
Definition at line 264 of file RNNLayer.h.
| auto TMVA::DNN::RNN::TBasicRNNLayer< Architecture_t >::InitState | ( | DNN::EInitialization | m = DNN::EInitialization::kZero | ) |
Initialize the state method.
Definition at line 286 of file RNNLayer.h.
| void TMVA::DNN::RNN::TBasicRNNLayer< Architecture_t >::InitTensors | ( | ) |
Definition at line 279 of file RNNLayer.h.
|
inlineinherited |
Definition at line 170 of file GeneralLayer.h.
|
overridevirtual |
Prints the info about the layer.
Implements TMVA::DNN::VGeneralLayer< Architecture_t >.
Definition at line 295 of file RNNLayer.h.
|
inherited |
Definition at line 544 of file GeneralLayer.h.
|
overridevirtual |
Read the information and the weights about the layer from XML node.
Implements TMVA::DNN::VGeneralLayer< Architecture_t >.
Definition at line 620 of file RNNLayer.h.
|
inlinevirtualinherited |
Reset some training flags after a loop on all batches Some layer (e.g.
batchnormalization) might need to implement the function in case some operations are needed after looping an all batches
Reimplemented in TMVA::DNN::TBatchNormLayer< Architecture_t >, TMVA::DNN::TBatchNormLayer< TCpu< AReal > >, and TMVA::DNN::TBatchNormLayer< TCuda< AReal > >.
Definition at line 121 of file GeneralLayer.h.
|
inlineinherited |
Setters.
Definition at line 217 of file GeneralLayer.h.
|
inlineinherited |
Definition at line 221 of file GeneralLayer.h.
|
inlinevirtualinherited |
Set Dropout probability.
Reimplemented for layers supporting droput
Reimplemented in TMVA::DNN::TDenseLayer< Architecture_t >.
Definition at line 160 of file GeneralLayer.h.
|
inlinevirtualinherited |
Reimplemented in TMVA::DNN::TBatchNormLayer< Architecture_t >, TMVA::DNN::TBatchNormLayer< TCpu< AReal > >, and TMVA::DNN::TBatchNormLayer< TCuda< AReal > >.
Definition at line 212 of file GeneralLayer.h.
|
inlineinherited |
Definition at line 222 of file GeneralLayer.h.
|
inlineinherited |
Definition at line 218 of file GeneralLayer.h.
|
inlineinherited |
Definition at line 219 of file GeneralLayer.h.
|
inlineinherited |
Definition at line 220 of file GeneralLayer.h.
|
inlineinherited |
Definition at line 224 of file GeneralLayer.h.
|
inlineinherited |
Definition at line 223 of file GeneralLayer.h.
| void TMVA::DNN::RNN::TBasicRNNLayer< Architecture_t >::Update | ( | const Scalar_t | learningRate | ) |
|
inherited |
Updates the biases, given the gradients and the learning rate.
Definition at line 428 of file GeneralLayer.h.
|
inherited |
Updates the bias gradients, given some other weight gradients and learning rate.
Definition at line 448 of file GeneralLayer.h.
|
inherited |
Updates the weight gradients, given some other weight gradients and learning rate.
Definition at line 438 of file GeneralLayer.h.
|
inherited |
Updates the weights, given the gradients and the learning rate,.
Definition at line 418 of file GeneralLayer.h.
|
inherited |
Definition at line 521 of file GeneralLayer.h.
|
inherited |
helper functions for XML
Definition at line 496 of file GeneralLayer.h.
|
private |
Definition at line 95 of file RNNLayer.h.
|
protectedinherited |
Gradients w.r.t. the activations of this layer.
Definition at line 78 of file GeneralLayer.h.
|
protectedinherited |
Batch size used for training and evaluation.
Definition at line 59 of file GeneralLayer.h.
|
private |
Biases.
Definition at line 85 of file RNNLayer.h.
|
private |
Gradients w.r.t. the bias values.
Definition at line 90 of file RNNLayer.h.
|
private |
Empty matrix for RNN.
Definition at line 100 of file RNNLayer.h.
|
protectedinherited |
The depth of the layer.
Definition at line 65 of file GeneralLayer.h.
|
private |
First fDerivatives of the activations.
Definition at line 87 of file RNNLayer.h.
|
private |
Keeps all the RNN descriptors.
Definition at line 97 of file RNNLayer.h.
|
private |
cached gradient on the input (output of backward) as T x B x I
Definition at line 105 of file RNNLayer.h.
|
private |
cached activation gradient (input of backward) as T x B x S
Definition at line 106 of file RNNLayer.h.
|
private |
Activation function of the hidden state.
Definition at line 80 of file RNNLayer.h.
|
protectedinherited |
The height of the layer.
Definition at line 66 of file GeneralLayer.h.
|
protectedinherited |
The initialization method.
Definition at line 80 of file GeneralLayer.h.
|
protectedinherited |
The depth of the previous layer or input.
Definition at line 61 of file GeneralLayer.h.
|
protectedinherited |
The height of the previous layer or input.
Definition at line 62 of file GeneralLayer.h.
|
protectedinherited |
The width of the previous layer or input.
Definition at line 63 of file GeneralLayer.h.
|
protectedinherited |
Flag indicating the mode.
Definition at line 69 of file GeneralLayer.h.
|
protectedinherited |
Activations of this layer.
Definition at line 77 of file GeneralLayer.h.
|
private |
Remember state in next pass.
Definition at line 77 of file RNNLayer.h.
|
private |
Return in output full sequence or just last element in time.
Definition at line 78 of file RNNLayer.h.
|
private |
Hidden State.
Definition at line 82 of file RNNLayer.h.
|
private |
Hidden state size of RNN.
Definition at line 76 of file RNNLayer.h.
|
private |
Timesteps for RNN.
Definition at line 75 of file RNNLayer.h.
|
protectedinherited |
Gradients w.r.t. the weights of the layer.
Definition at line 74 of file GeneralLayer.h.
|
private |
Definition at line 93 of file RNNLayer.h.
|
private |
Gradients w.r.t. the input weights.
Definition at line 88 of file RNNLayer.h.
|
protectedinherited |
The weights associated to the layer.
Definition at line 71 of file GeneralLayer.h.
|
private |
Input weights, fWeights[0].
Definition at line 83 of file RNNLayer.h.
|
private |
Prev state weights, fWeights[1].
Definition at line 84 of file RNNLayer.h.
|
private |
Gradients w.r.t. the recurring weights.
Definition at line 89 of file RNNLayer.h.
|
private |
Definition at line 92 of file RNNLayer.h.
|
protectedinherited |
The width of this layer.
Definition at line 67 of file GeneralLayer.h.
|
private |
Definition at line 98 of file RNNLayer.h.
|
private |
cached input tensor as T x B x I
Definition at line 103 of file RNNLayer.h.
|
private |
cached output tensor as T x B x S
Definition at line 104 of file RNNLayer.h.