Definition at line 56 of file GRULayer.h.
Public Types | |
using | HelperDescriptor_t = typename Architecture_t::DropoutDescriptor_t |
using | LayerDescriptor_t = typename Architecture_t::RecurrentDescriptor_t |
using | Matrix_t = typename Architecture_t::Matrix_t |
using | RNNDescriptors_t = typename Architecture_t::RNNDescriptors_t |
using | RNNWorkspace_t = typename Architecture_t::RNNWorkspace_t |
using | Scalar_t = typename Architecture_t::Scalar_t |
using | Tensor_t = typename Architecture_t::Tensor_t |
using | TensorDescriptor_t = typename Architecture_t::TensorDescriptor_t |
using | WeightsDescriptor_t = typename Architecture_t::FilterDescriptor_t |
Public Member Functions | |
TBasicGRULayer (const TBasicGRULayer &) | |
Copy Constructor. More... | |
TBasicGRULayer (size_t batchSize, size_t stateSize, size_t inputSize, size_t timeSteps, bool rememberState=false, bool returnSequence=false, bool resetGateAfter=false, DNN::EActivationFunction f1=DNN::EActivationFunction::kSigmoid, DNN::EActivationFunction f2=DNN::EActivationFunction::kTanh, bool training=true, DNN::EInitialization fA=DNN::EInitialization::kZero) | |
Constructor. More... | |
void | AddWeightsXMLTo (void *parent) |
Writes the information and the weights about the layer in an XML node. More... | |
void | Backward (Tensor_t &gradients_backward, const Tensor_t &activations_backward) |
Backpropagates the error. More... | |
void | CandidateValue (const Matrix_t &input, Matrix_t &dc) |
Decides the new candidate values (NN with Tanh) More... | |
Matrix_t & | CellBackward (Matrix_t &state_gradients_backward, const Matrix_t &precStateActivations, const Matrix_t &reset_gate, const Matrix_t &update_gate, const Matrix_t &candidate_gate, const Matrix_t &input, Matrix_t &input_gradient, Matrix_t &dr, Matrix_t &du, Matrix_t &dc) |
Backward for a single time unit a the corresponding call to Forward(...). More... | |
void | CellForward (Matrix_t &updateGateValues, Matrix_t &candidateValues) |
Forward for a single cell (time unit) More... | |
bool | DoesRememberState () const |
bool | DoesReturnSequence () const |
void | Forward (Tensor_t &input, bool isTraining=true) |
Computes the next hidden state and next cell state with given input matrix. More... | |
DNN::EActivationFunction | GetActivationFunctionF1 () const |
DNN::EActivationFunction | GetActivationFunctionF2 () const |
Matrix_t & | GetCandidateBias () |
const Matrix_t & | GetCandidateBias () const |
Matrix_t & | GetCandidateBiasGradients () |
const Matrix_t & | GetCandidateBiasGradients () const |
Matrix_t & | GetCandidateDerivativesAt (size_t i) |
const Matrix_t & | GetCandidateDerivativesAt (size_t i) const |
std::vector< Matrix_t > & | GetCandidateGateTensor () |
const std::vector< Matrix_t > & | GetCandidateGateTensor () const |
Matrix_t & | GetCandidateGateTensorAt (size_t i) |
const Matrix_t & | GetCandidateGateTensorAt (size_t i) const |
Matrix_t & | GetCandidateValue () |
const Matrix_t & | GetCandidateValue () const |
Matrix_t & | GetCell () |
const Matrix_t & | GetCell () const |
std::vector< Matrix_t > & | GetDerivativesCandidate () |
const std::vector< Matrix_t > & | GetDerivativesCandidate () const |
std::vector< Matrix_t > & | GetDerivativesReset () |
const std::vector< Matrix_t > & | GetDerivativesReset () const |
std::vector< Matrix_t > & | GetDerivativesUpdate () |
const std::vector< Matrix_t > & | GetDerivativesUpdate () const |
Tensor_t & | GetDX () |
Tensor_t & | GetDY () |
size_t | GetInputSize () const |
Getters. More... | |
Matrix_t & | GetResetBiasGradients () |
const Matrix_t & | GetResetBiasGradients () const |
Matrix_t & | GetResetDerivativesAt (size_t i) |
const Matrix_t & | GetResetDerivativesAt (size_t i) const |
Matrix_t & | GetResetGateBias () |
const Matrix_t & | GetResetGateBias () const |
std::vector< Matrix_t > & | GetResetGateTensor () |
const std::vector< Matrix_t > & | GetResetGateTensor () const |
Matrix_t & | GetResetGateTensorAt (size_t i) |
const Matrix_t & | GetResetGateTensorAt (size_t i) const |
Matrix_t & | GetResetGateValue () |
const Matrix_t & | GetResetGateValue () const |
Matrix_t & | GetState () |
const Matrix_t & | GetState () const |
size_t | GetStateSize () const |
size_t | GetTimeSteps () const |
Matrix_t & | GetUpdateBiasGradients () |
const Matrix_t & | GetUpdateBiasGradients () const |
Matrix_t & | GetUpdateDerivativesAt (size_t i) |
const Matrix_t & | GetUpdateDerivativesAt (size_t i) const |
Matrix_t & | GetUpdateGateBias () |
const Matrix_t & | GetUpdateGateBias () const |
std::vector< Matrix_t > & | GetUpdateGateTensor () |
const std::vector< Matrix_t > & | GetUpdateGateTensor () const |
Matrix_t & | GetUpdateGateTensorAt (size_t i) |
const Matrix_t & | GetUpdateGateTensorAt (size_t i) const |
Matrix_t & | GetUpdateGateValue () |
const Matrix_t & | GetUpdateGateValue () const |
Tensor_t & | GetWeightGradientsTensor () |
const Tensor_t & | GetWeightGradientsTensor () const |
Matrix_t & | GetWeightsCandidate () |
const Matrix_t & | GetWeightsCandidate () const |
Matrix_t & | GetWeightsCandidateGradients () |
const Matrix_t & | GetWeightsCandidateGradients () const |
Matrix_t & | GetWeightsCandidateState () |
const Matrix_t & | GetWeightsCandidateState () const |
Matrix_t & | GetWeightsCandidateStateGradients () |
const Matrix_t & | GetWeightsCandidateStateGradients () const |
Matrix_t & | GetWeightsResetGate () |
const Matrix_t & | GetWeightsResetGate () const |
Matrix_t & | GetWeightsResetGateState () |
const Matrix_t & | GetWeightsResetGateState () const |
Matrix_t & | GetWeightsResetGradients () |
const Matrix_t & | GetWeightsResetGradients () const |
Matrix_t & | GetWeightsResetStateGradients () |
const Matrix_t & | GetWeightsResetStateGradients () const |
Tensor_t & | GetWeightsTensor () |
const Tensor_t & | GetWeightsTensor () const |
Matrix_t & | GetWeightsUpdateGate () |
const Matrix_t & | GetWeightsUpdateGate () const |
Matrix_t & | GetWeightsUpdateGateState () |
const Matrix_t & | GetWeightsUpdateGateState () const |
Matrix_t & | GetWeightsUpdateGradients () |
const Matrix_t & | GetWeightsUpdateGradients () const |
Matrix_t & | GetWeightsUpdateStateGradients () |
const Matrix_t & | GetWeigthsUpdateStateGradients () const |
Tensor_t & | GetX () |
Tensor_t & | GetY () |
virtual void | Initialize () |
Initialize the weights according to the given initialization method. More... | |
void | InitState (DNN::EInitialization m=DNN::EInitialization::kZero) |
Initialize the hidden state and cell state method. More... | |
void | Print () const |
Prints the info about the layer. More... | |
void | ReadWeightsFromXML (void *parent) |
Read the information and the weights about the layer from XML node. More... | |
void | ResetGate (const Matrix_t &input, Matrix_t &di) |
Decides the values we'll update (NN with Sigmoid) More... | |
void | Update (const Scalar_t learningRate) |
void | UpdateGate (const Matrix_t &input, Matrix_t &df) |
Forgets the past values (NN with Sigmoid) More... | |
Public Member Functions inherited from TMVA::DNN::VGeneralLayer< Architecture_t > | |
VGeneralLayer (const VGeneralLayer &) | |
Copy Constructor. More... | |
VGeneralLayer (size_t BatchSize, size_t InputDepth, size_t InputHeight, size_t InputWidth, size_t Depth, size_t Height, size_t Width, size_t WeightsNSlices, size_t WeightsNRows, size_t WeightsNCols, size_t BiasesNSlices, size_t BiasesNRows, size_t BiasesNCols, size_t OutputNSlices, size_t OutputNRows, size_t OutputNCols, EInitialization Init) | |
Constructor. More... | |
VGeneralLayer (size_t BatchSize, size_t InputDepth, size_t InputHeight, size_t InputWidth, size_t Depth, size_t Height, size_t Width, size_t WeightsNSlices, std::vector< size_t > WeightsNRows, std::vector< size_t > WeightsNCols, size_t BiasesNSlices, std::vector< size_t > BiasesNRows, std::vector< size_t > BiasesNCols, size_t OutputNSlices, size_t OutputNRows, size_t OutputNCols, EInitialization Init) | |
General Constructor with different weights dimension. More... | |
VGeneralLayer (VGeneralLayer< Architecture_t > *layer) | |
Copy the layer provided as a pointer. More... | |
virtual | ~VGeneralLayer () |
Virtual Destructor. More... | |
virtual void | AddWeightsXMLTo (void *parent)=0 |
Writes the information and the weights about the layer in an XML node. More... | |
virtual void | Backward (Tensor_t &gradients_backward, const Tensor_t &activations_backward)=0 |
Backpropagates the error. More... | |
void | CopyBiases (const std::vector< Matrix_t > &otherBiases) |
Copies the biases provided as an input. More... | |
template<typename Arch > | |
void | CopyParameters (const VGeneralLayer< Arch > &layer) |
Copy all trainable weight and biases from another equivalent layer but with different architecture The function can copy also extra parameters in addition to weights and biases if they are return by the function GetExtraLayerParameters. More... | |
void | CopyWeights (const std::vector< Matrix_t > &otherWeights) |
Copies the weights provided as an input. More... | |
virtual void | Forward (Tensor_t &input, bool applyDropout=false)=0 |
Computes activation of the layer for the given input. More... | |
Tensor_t & | GetActivationGradients () |
const Tensor_t & | GetActivationGradients () const |
Matrix_t | GetActivationGradientsAt (size_t i) |
const Matrix_t & | GetActivationGradientsAt (size_t i) const |
size_t | GetBatchSize () const |
Getters. More... | |
std::vector< Matrix_t > & | GetBiases () |
const std::vector< Matrix_t > & | GetBiases () const |
Matrix_t & | GetBiasesAt (size_t i) |
const Matrix_t & | GetBiasesAt (size_t i) const |
std::vector< Matrix_t > & | GetBiasGradients () |
const std::vector< Matrix_t > & | GetBiasGradients () const |
Matrix_t & | GetBiasGradientsAt (size_t i) |
const Matrix_t & | GetBiasGradientsAt (size_t i) const |
size_t | GetDepth () const |
virtual std::vector< Matrix_t > | GetExtraLayerParameters () const |
size_t | GetHeight () const |
EInitialization | GetInitialization () const |
size_t | GetInputDepth () const |
size_t | GetInputHeight () const |
size_t | GetInputWidth () const |
Tensor_t & | GetOutput () |
const Tensor_t & | GetOutput () const |
Matrix_t | GetOutputAt (size_t i) |
const Matrix_t & | GetOutputAt (size_t i) const |
std::vector< Matrix_t > & | GetWeightGradients () |
const std::vector< Matrix_t > & | GetWeightGradients () const |
Matrix_t & | GetWeightGradientsAt (size_t i) |
const Matrix_t & | GetWeightGradientsAt (size_t i) const |
std::vector< Matrix_t > & | GetWeights () |
const std::vector< Matrix_t > & | GetWeights () const |
Matrix_t & | GetWeightsAt (size_t i) |
const Matrix_t & | GetWeightsAt (size_t i) const |
size_t | GetWidth () const |
virtual void | Initialize () |
Initialize the weights and biases according to the given initialization method. More... | |
bool | IsTraining () const |
virtual void | Print () const =0 |
Prints the info about the layer. More... | |
void | ReadMatrixXML (void *node, const char *name, Matrix_t &matrix) |
virtual void | ReadWeightsFromXML (void *parent)=0 |
Read the information and the weights about the layer from XML node. More... | |
virtual void | ResetTraining () |
Reset some training flags after a loop on all batches Some layer (e.g. More... | |
void | SetBatchSize (size_t batchSize) |
Setters. More... | |
void | SetDepth (size_t depth) |
virtual void | SetDropoutProbability (Scalar_t) |
Set Dropout probability. More... | |
virtual void | SetExtraLayerParameters (const std::vector< Matrix_t > &) |
void | SetHeight (size_t height) |
void | SetInputDepth (size_t inputDepth) |
void | SetInputHeight (size_t inputHeight) |
void | SetInputWidth (size_t inputWidth) |
void | SetIsTraining (bool isTraining) |
void | SetWidth (size_t width) |
void | Update (const Scalar_t learningRate) |
Updates the weights and biases, given the learning rate. More... | |
void | UpdateBiases (const std::vector< Matrix_t > &biasGradients, const Scalar_t learningRate) |
Updates the biases, given the gradients and the learning rate. More... | |
void | UpdateBiasGradients (const std::vector< Matrix_t > &biasGradients, const Scalar_t learningRate) |
Updates the bias gradients, given some other weight gradients and learning rate. More... | |
void | UpdateWeightGradients (const std::vector< Matrix_t > &weightGradients, const Scalar_t learningRate) |
Updates the weight gradients, given some other weight gradients and learning rate. More... | |
void | UpdateWeights (const std::vector< Matrix_t > &weightGradients, const Scalar_t learningRate) |
Updates the weights, given the gradients and the learning rate,. More... | |
void | WriteMatrixToXML (void *node, const char *name, const Matrix_t &matrix) |
void | WriteTensorToXML (void *node, const char *name, const std::vector< Matrix_t > &tensor) |
helper functions for XML More... | |
Private Attributes | |
std::vector< Matrix_t > | candidate_gate_value |
Candidate gate value for every time step. More... | |
Matrix_t & | fCandidateBias |
Candidate Gate bias. More... | |
Matrix_t & | fCandidateBiasGradients |
Gradients w.r.t the candidate gate - bias weights. More... | |
Matrix_t | fCandidateValue |
Computed candidate values. More... | |
Matrix_t | fCell |
EMpty matrix for GRU. More... | |
std::vector< Matrix_t > | fDerivativesCandidate |
First fDerivatives of the activations candidate gate. More... | |
std::vector< Matrix_t > | fDerivativesReset |
First fDerivatives of the activations reset gate. More... | |
std::vector< Matrix_t > | fDerivativesUpdate |
First fDerivatives of the activations update gate. More... | |
TDescriptors * | fDescriptors = nullptr |
Keeps all the RNN descriptors. More... | |
Tensor_t | fDx |
cached gradient on the input (output of backward) as T x B x I More... | |
Tensor_t | fDy |
cached activation gradient (input of backward) as T x B x S More... | |
DNN::EActivationFunction | fF1 |
Activation function: sigmoid. More... | |
DNN::EActivationFunction | fF2 |
Activaton function: tanh. More... | |
bool | fRememberState |
Remember state in next pass. More... | |
Matrix_t & | fResetBiasGradients |
Gradients w.r.t the reset gate - bias weights. More... | |
bool | fResetGateAfter = false |
GRU variant to Apply the reset gate multiplication afterwards (used by cuDNN) More... | |
Matrix_t & | fResetGateBias |
Input Gate bias. More... | |
Matrix_t | fResetValue |
Computed reset gate values. More... | |
bool | fReturnSequence = false |
Return in output full sequence or just last element. More... | |
Matrix_t | fState |
Hidden state of GRU. More... | |
size_t | fStateSize |
Hidden state size for GRU. More... | |
size_t | fTimeSteps |
Timesteps for GRU. More... | |
Matrix_t & | fUpdateBiasGradients |
Gradients w.r.t the update gate - bias weights. More... | |
Matrix_t & | fUpdateGateBias |
Update Gate bias. More... | |
Matrix_t | fUpdateValue |
Computed forget gate values. More... | |
Tensor_t | fWeightGradientsTensor |
Tensor for all weight gradients. More... | |
Matrix_t & | fWeightsCandidate |
Candidate Gate weights for input, fWeights[4]. More... | |
Matrix_t & | fWeightsCandidateGradients |
Gradients w.r.t the candidate gate - input weights. More... | |
Matrix_t & | fWeightsCandidateState |
Candidate Gate weights for prev state, fWeights[5]. More... | |
Matrix_t & | fWeightsCandidateStateGradients |
Gradients w.r.t the candidate gate - hidden state weights. More... | |
Matrix_t & | fWeightsResetGate |
Reset Gate weights for input, fWeights[0]. More... | |
Matrix_t & | fWeightsResetGateState |
Input Gate weights for prev state, fWeights[1]. More... | |
Matrix_t & | fWeightsResetGradients |
Gradients w.r.t the reset gate - input weights. More... | |
Matrix_t & | fWeightsResetStateGradients |
Gradients w.r.t the reset gate - hidden state weights. More... | |
Tensor_t | fWeightsTensor |
Tensor for all weights. More... | |
Matrix_t & | fWeightsUpdateGate |
Update Gate weights for input, fWeights[2]. More... | |
Matrix_t & | fWeightsUpdateGateState |
Update Gate weights for prev state, fWeights[3]. More... | |
Matrix_t & | fWeightsUpdateGradients |
Gradients w.r.t the update gate - input weights. More... | |
Matrix_t & | fWeightsUpdateStateGradients |
Gradients w.r.t the update gate - hidden state weights. More... | |
TWorkspace * | fWorkspace = nullptr |
Tensor_t | fX |
cached input tensor as T x B x I More... | |
Tensor_t | fY |
cached output tensor as T x B x S More... | |
std::vector< Matrix_t > | reset_gate_value |
Reset gate value for every time step. More... | |
std::vector< Matrix_t > | update_gate_value |
Update gate value for every time step. More... | |
Additional Inherited Members | |
Protected Attributes inherited from TMVA::DNN::VGeneralLayer< Architecture_t > | |
Tensor_t | fActivationGradients |
Gradients w.r.t. the activations of this layer. More... | |
size_t | fBatchSize |
Batch size used for training and evaluation. More... | |
std::vector< Matrix_t > | fBiases |
The biases associated to the layer. More... | |
std::vector< Matrix_t > | fBiasGradients |
Gradients w.r.t. the bias values of the layer. More... | |
size_t | fDepth |
The depth of the layer. More... | |
size_t | fHeight |
The height of the layer. More... | |
EInitialization | fInit |
The initialization method. More... | |
size_t | fInputDepth |
The depth of the previous layer or input. More... | |
size_t | fInputHeight |
The height of the previous layer or input. More... | |
size_t | fInputWidth |
The width of the previous layer or input. More... | |
bool | fIsTraining |
Flag indicating the mode. More... | |
Tensor_t | fOutput |
Activations of this layer. More... | |
std::vector< Matrix_t > | fWeightGradients |
Gradients w.r.t. the weights of the layer. More... | |
std::vector< Matrix_t > | fWeights |
The weights associated to the layer. More... | |
size_t | fWidth |
The width of this layer. More... | |
#include <TMVA/DNN/RNN/GRULayer.h>
using TMVA::DNN::RNN::TBasicGRULayer< Architecture_t >::HelperDescriptor_t = typename Architecture_t::DropoutDescriptor_t |
Definition at line 68 of file GRULayer.h.
using TMVA::DNN::RNN::TBasicGRULayer< Architecture_t >::LayerDescriptor_t = typename Architecture_t::RecurrentDescriptor_t |
Definition at line 65 of file GRULayer.h.
using TMVA::DNN::RNN::TBasicGRULayer< Architecture_t >::Matrix_t = typename Architecture_t::Matrix_t |
Definition at line 61 of file GRULayer.h.
using TMVA::DNN::RNN::TBasicGRULayer< Architecture_t >::RNNDescriptors_t = typename Architecture_t::RNNDescriptors_t |
Definition at line 71 of file GRULayer.h.
using TMVA::DNN::RNN::TBasicGRULayer< Architecture_t >::RNNWorkspace_t = typename Architecture_t::RNNWorkspace_t |
Definition at line 70 of file GRULayer.h.
using TMVA::DNN::RNN::TBasicGRULayer< Architecture_t >::Scalar_t = typename Architecture_t::Scalar_t |
Definition at line 62 of file GRULayer.h.
using TMVA::DNN::RNN::TBasicGRULayer< Architecture_t >::Tensor_t = typename Architecture_t::Tensor_t |
Definition at line 63 of file GRULayer.h.
using TMVA::DNN::RNN::TBasicGRULayer< Architecture_t >::TensorDescriptor_t = typename Architecture_t::TensorDescriptor_t |
Definition at line 67 of file GRULayer.h.
using TMVA::DNN::RNN::TBasicGRULayer< Architecture_t >::WeightsDescriptor_t = typename Architecture_t::FilterDescriptor_t |
Definition at line 66 of file GRULayer.h.
TMVA::DNN::RNN::TBasicGRULayer< Architecture_t >::TBasicGRULayer | ( | size_t | batchSize, |
size_t | stateSize, | ||
size_t | inputSize, | ||
size_t | timeSteps, | ||
bool | rememberState = false , |
||
bool | returnSequence = false , |
||
bool | resetGateAfter = false , |
||
DNN::EActivationFunction | f1 = DNN::EActivationFunction::kSigmoid , |
||
DNN::EActivationFunction | f2 = DNN::EActivationFunction::kTanh , |
||
bool | training = true , |
||
DNN::EInitialization | fA = DNN::EInitialization::kZero |
||
) |
Constructor.
Definition at line 308 of file GRULayer.h.
TMVA::DNN::RNN::TBasicGRULayer< Architecture_t >::TBasicGRULayer | ( | const TBasicGRULayer< Architecture_t > & | layer | ) |
Copy Constructor.
Definition at line 344 of file GRULayer.h.
|
inlinevirtual |
Writes the information and the weights about the layer in an XML node.
Implements TMVA::DNN::VGeneralLayer< Architecture_t >.
Definition at line 820 of file GRULayer.h.
|
inlinevirtual |
Backpropagates the error.
Must only be called directly at the corresponding call to Forward(...).
Implements TMVA::DNN::VGeneralLayer< Architecture_t >.
Definition at line 610 of file GRULayer.h.
|
inline |
Decides the new candidate values (NN with Tanh)
Definition at line 459 of file GRULayer.h.
|
inline |
Backward for a single time unit a the corresponding call to Forward(...).
Definition at line 775 of file GRULayer.h.
|
inline |
Forward for a single cell (time unit)
Definition at line 590 of file GRULayer.h.
|
inline |
Definition at line 204 of file GRULayer.h.
|
inline |
Definition at line 205 of file GRULayer.h.
|
inlinevirtual |
Computes the next hidden state and next cell state with given input matrix.
Implements TMVA::DNN::VGeneralLayer< Architecture_t >.
Definition at line 494 of file GRULayer.h.
|
inline |
Definition at line 207 of file GRULayer.h.
|
inline |
Definition at line 208 of file GRULayer.h.
|
inline |
Definition at line 269 of file GRULayer.h.
|
inline |
Definition at line 268 of file GRULayer.h.
|
inline |
Definition at line 288 of file GRULayer.h.
|
inline |
Definition at line 287 of file GRULayer.h.
|
inline |
Definition at line 247 of file GRULayer.h.
|
inline |
Definition at line 246 of file GRULayer.h.
|
inline |
Definition at line 258 of file GRULayer.h.
|
inline |
Definition at line 257 of file GRULayer.h.
|
inline |
Definition at line 260 of file GRULayer.h.
|
inline |
Definition at line 259 of file GRULayer.h.
|
inline |
Definition at line 213 of file GRULayer.h.
|
inline |
Definition at line 212 of file GRULayer.h.
|
inline |
Definition at line 220 of file GRULayer.h.
|
inline |
Definition at line 219 of file GRULayer.h.
|
inline |
Definition at line 245 of file GRULayer.h.
|
inline |
Definition at line 244 of file GRULayer.h.
|
inline |
Definition at line 237 of file GRULayer.h.
|
inline |
Definition at line 236 of file GRULayer.h.
|
inline |
Definition at line 241 of file GRULayer.h.
|
inline |
Definition at line 240 of file GRULayer.h.
|
inline |
Definition at line 297 of file GRULayer.h.
|
inline |
Definition at line 298 of file GRULayer.h.
|
inline |
Getters.
Definition at line 200 of file GRULayer.h.
|
inline |
Definition at line 276 of file GRULayer.h.
|
inline |
Definition at line 275 of file GRULayer.h.
|
inline |
Definition at line 239 of file GRULayer.h.
|
inline |
Definition at line 238 of file GRULayer.h.
|
inline |
Definition at line 265 of file GRULayer.h.
|
inline |
Definition at line 264 of file GRULayer.h.
|
inline |
Definition at line 250 of file GRULayer.h.
|
inline |
Definition at line 249 of file GRULayer.h.
|
inline |
Definition at line 252 of file GRULayer.h.
|
inline |
Definition at line 251 of file GRULayer.h.
|
inline |
Definition at line 211 of file GRULayer.h.
|
inline |
Definition at line 210 of file GRULayer.h.
|
inline |
Definition at line 218 of file GRULayer.h.
|
inline |
Definition at line 217 of file GRULayer.h.
|
inline |
Definition at line 202 of file GRULayer.h.
|
inline |
Definition at line 201 of file GRULayer.h.
|
inline |
Definition at line 282 of file GRULayer.h.
|
inline |
Definition at line 281 of file GRULayer.h.
|
inline |
Definition at line 243 of file GRULayer.h.
|
inline |
Definition at line 242 of file GRULayer.h.
|
inline |
Definition at line 267 of file GRULayer.h.
|
inline |
Definition at line 266 of file GRULayer.h.
|
inline |
Definition at line 254 of file GRULayer.h.
|
inline |
Definition at line 253 of file GRULayer.h.
|
inline |
Definition at line 256 of file GRULayer.h.
|
inline |
Definition at line 255 of file GRULayer.h.
|
inline |
Definition at line 215 of file GRULayer.h.
|
inline |
Definition at line 214 of file GRULayer.h.
|
inline |
Definition at line 292 of file GRULayer.h.
|
inline |
Definition at line 293 of file GRULayer.h.
|
inline |
Definition at line 225 of file GRULayer.h.
|
inline |
Definition at line 224 of file GRULayer.h.
|
inline |
Definition at line 284 of file GRULayer.h.
|
inline |
Definition at line 283 of file GRULayer.h.
|
inline |
Definition at line 234 of file GRULayer.h.
|
inline |
Definition at line 233 of file GRULayer.h.
|
inline |
Definition at line 286 of file GRULayer.h.
|
inline |
Definition at line 285 of file GRULayer.h.
|
inline |
Definition at line 223 of file GRULayer.h.
|
inline |
Definition at line 222 of file GRULayer.h.
|
inline |
Definition at line 230 of file GRULayer.h.
|
inline |
Definition at line 229 of file GRULayer.h.
|
inline |
Definition at line 272 of file GRULayer.h.
|
inline |
Definition at line 271 of file GRULayer.h.
|
inline |
Definition at line 274 of file GRULayer.h.
|
inline |
Definition at line 273 of file GRULayer.h.
|
inline |
Definition at line 290 of file GRULayer.h.
|
inline |
Definition at line 291 of file GRULayer.h.
|
inline |
Definition at line 227 of file GRULayer.h.
|
inline |
Definition at line 226 of file GRULayer.h.
|
inline |
Definition at line 232 of file GRULayer.h.
|
inline |
Definition at line 231 of file GRULayer.h.
|
inline |
Definition at line 278 of file GRULayer.h.
|
inline |
Definition at line 277 of file GRULayer.h.
|
inline |
Definition at line 280 of file GRULayer.h.
|
inline |
Definition at line 279 of file GRULayer.h.
|
inline |
Definition at line 295 of file GRULayer.h.
|
inline |
Definition at line 296 of file GRULayer.h.
|
virtual |
Initialize the weights according to the given initialization method.
Reimplemented from TMVA::DNN::VGeneralLayer< Architecture_t >.
Definition at line 409 of file GRULayer.h.
auto TMVA::DNN::RNN::TBasicGRULayer< Architecture_t >::InitState | ( | DNN::EInitialization | m = DNN::EInitialization::kZero | ) |
Initialize the hidden state and cell state method.
Definition at line 800 of file GRULayer.h.
|
virtual |
Prints the info about the layer.
Implements TMVA::DNN::VGeneralLayer< Architecture_t >.
Definition at line 808 of file GRULayer.h.
|
inlinevirtual |
Read the information and the weights about the layer from XML node.
Implements TMVA::DNN::VGeneralLayer< Architecture_t >.
Definition at line 847 of file GRULayer.h.
|
inline |
Decides the values we'll update (NN with Sigmoid)
Definition at line 423 of file GRULayer.h.
void TMVA::DNN::RNN::TBasicGRULayer< Architecture_t >::Update | ( | const Scalar_t | learningRate | ) |
|
inline |
Forgets the past values (NN with Sigmoid)
Definition at line 441 of file GRULayer.h.
|
private |
Candidate gate value for every time step.
Definition at line 106 of file GRULayer.h.
|
private |
Candidate Gate bias.
Definition at line 101 of file GRULayer.h.
|
private |
Gradients w.r.t the candidate gate - bias weights.
Definition at line 120 of file GRULayer.h.
|
private |
Computed candidate values.
Definition at line 87 of file GRULayer.h.
|
private |
EMpty matrix for GRU.
Definition at line 122 of file GRULayer.h.
|
private |
First fDerivatives of the activations candidate gate.
Definition at line 110 of file GRULayer.h.
|
private |
First fDerivatives of the activations reset gate.
Definition at line 108 of file GRULayer.h.
|
private |
First fDerivatives of the activations update gate.
Definition at line 109 of file GRULayer.h.
|
private |
Keeps all the RNN descriptors.
Definition at line 134 of file GRULayer.h.
|
private |
cached gradient on the input (output of backward) as T x B x I
Definition at line 131 of file GRULayer.h.
|
private |
cached activation gradient (input of backward) as T x B x S
Definition at line 132 of file GRULayer.h.
|
private |
Activation function: sigmoid.
Definition at line 82 of file GRULayer.h.
|
private |
Activaton function: tanh.
Definition at line 83 of file GRULayer.h.
|
private |
Remember state in next pass.
Definition at line 78 of file GRULayer.h.
|
private |
Gradients w.r.t the reset gate - bias weights.
Definition at line 114 of file GRULayer.h.
|
private |
GRU variant to Apply the reset gate multiplication afterwards (used by cuDNN)
Definition at line 80 of file GRULayer.h.
|
private |
Input Gate bias.
Definition at line 93 of file GRULayer.h.
|
private |
Computed reset gate values.
Definition at line 85 of file GRULayer.h.
|
private |
Return in output full sequence or just last element.
Definition at line 79 of file GRULayer.h.
|
private |
Hidden state of GRU.
Definition at line 88 of file GRULayer.h.
|
private |
Hidden state size for GRU.
Definition at line 75 of file GRULayer.h.
|
private |
Timesteps for GRU.
Definition at line 76 of file GRULayer.h.
|
private |
Gradients w.r.t the update gate - bias weights.
Definition at line 117 of file GRULayer.h.
|
private |
Update Gate bias.
Definition at line 97 of file GRULayer.h.
|
private |
Computed forget gate values.
Definition at line 86 of file GRULayer.h.
|
private |
Tensor for all weight gradients.
Definition at line 126 of file GRULayer.h.
|
private |
Candidate Gate weights for input, fWeights[4].
Definition at line 99 of file GRULayer.h.
|
private |
Gradients w.r.t the candidate gate - input weights.
Definition at line 118 of file GRULayer.h.
|
private |
Candidate Gate weights for prev state, fWeights[5].
Definition at line 100 of file GRULayer.h.
|
private |
Gradients w.r.t the candidate gate - hidden state weights.
Definition at line 119 of file GRULayer.h.
|
private |
Reset Gate weights for input, fWeights[0].
Definition at line 91 of file GRULayer.h.
|
private |
Input Gate weights for prev state, fWeights[1].
Definition at line 92 of file GRULayer.h.
|
private |
Gradients w.r.t the reset gate - input weights.
Definition at line 112 of file GRULayer.h.
|
private |
Gradients w.r.t the reset gate - hidden state weights.
Definition at line 113 of file GRULayer.h.
|
private |
Tensor for all weights.
Definition at line 125 of file GRULayer.h.
|
private |
Update Gate weights for input, fWeights[2].
Definition at line 95 of file GRULayer.h.
|
private |
Update Gate weights for prev state, fWeights[3].
Definition at line 96 of file GRULayer.h.
|
private |
Gradients w.r.t the update gate - input weights.
Definition at line 115 of file GRULayer.h.
|
private |
Gradients w.r.t the update gate - hidden state weights.
Definition at line 116 of file GRULayer.h.
|
private |
Definition at line 135 of file GRULayer.h.
|
private |
cached input tensor as T x B x I
Definition at line 129 of file GRULayer.h.
|
private |
cached output tensor as T x B x S
Definition at line 130 of file GRULayer.h.
|
private |
Reset gate value for every time step.
Definition at line 104 of file GRULayer.h.
|
private |
Update gate value for every time step.
Definition at line 105 of file GRULayer.h.