Logo ROOT  
Reference Guide
 
Loading...
Searching...
No Matches
TMVA::DNN::TReference< AReal > Class Template Reference

template<typename AReal>
class TMVA::DNN::TReference< AReal >

The reference architecture class.

Class template that contains the reference implementation of the low-level interface for the DNN implementation. The reference implementation uses the TMatrixT class template to represent matrices.

Template Parameters
ARealThe floating point type used to represent scalars.

Definition at line 52 of file Reference.h.

Public Types

using Matrix_t = TMatrixT< AReal >
 
using Scalar_t = AReal
 
using Tensor_t = TMatrixT< AReal >
 

Static Public Member Functions

static void AdamUpdate (TMatrixT< AReal > &A, const TMatrixT< AReal > &M, const TMatrixT< AReal > &V, AReal alpha, AReal eps)
 Update functions for ADAM optimizer.
 
static void AdamUpdateFirstMom (TMatrixT< AReal > &A, const TMatrixT< AReal > &B, AReal beta)
 
static void AdamUpdateSecondMom (TMatrixT< AReal > &A, const TMatrixT< AReal > &B, AReal beta)
 
static void AddBiases (TMatrixT< AReal > &A, const TMatrixT< AReal > &biases)
 
static void ConstAdd (TMatrixT< AReal > &A, AReal beta)
 Add the constant beta to all the elements of matrix A and write the result into A.
 
static void ConstMult (TMatrixT< AReal > &A, AReal beta)
 Multiply the constant beta to all the elements of matrix A and write the result into A.
 
static void ConvLayerForward (std::vector< TMatrixT< AReal > > &, std::vector< TMatrixT< AReal > > &, const std::vector< TMatrixT< AReal > > &, const TMatrixT< AReal > &, const TMatrixT< AReal > &, const DNN::CNN::TConvParams &, EActivationFunction, std::vector< TMatrixT< AReal > > &)
 Forward propagation in the Convolutional layer.
 
static void CorruptInput (TMatrixT< AReal > &input, TMatrixT< AReal > &corruptedInput, AReal corruptionLevel)
 
static void EncodeInput (TMatrixT< AReal > &input, TMatrixT< AReal > &compressedInput, TMatrixT< AReal > &Weights)
 
static void ForwardLogReg (TMatrixT< AReal > &input, TMatrixT< AReal > &p, TMatrixT< AReal > &fWeights)
 
static void Hadamard (TMatrixT< AReal > &A, const TMatrixT< AReal > &B)
 In-place Hadamard (element-wise) product of matrices A and B with the result being written into A.
 
static void PrepareInternals (std::vector< TMatrixT< AReal > > &)
 Dummy placeholder - preparation is currently only required for the CUDA architecture.
 
static void ReciprocalElementWise (TMatrixT< AReal > &A)
 Reciprocal each element of the matrix A and write the result into A.
 
static void ReconstructInput (TMatrixT< AReal > &compressedInput, TMatrixT< AReal > &reconstructedInput, TMatrixT< AReal > &fWeights)
 
static void SoftmaxAE (TMatrixT< AReal > &A)
 
static void SqrtElementWise (TMatrixT< AReal > &A)
 Square root each element of the matrix A and write the result into A.
 
static void SquareElementWise (TMatrixT< AReal > &A)
 Square each element of the matrix A and write the result into A.
 
static void SumColumns (TMatrixT< AReal > &B, const TMatrixT< AReal > &A)
 Sum columns of (m x n) matrix A and write the results into the first m elements in A.
 
static void UpdateParams (TMatrixT< AReal > &x, TMatrixT< AReal > &tildeX, TMatrixT< AReal > &y, TMatrixT< AReal > &z, TMatrixT< AReal > &fVBiases, TMatrixT< AReal > &fHBiases, TMatrixT< AReal > &fWeights, TMatrixT< AReal > &VBiasError, TMatrixT< AReal > &HBiasError, AReal learningRate, size_t fBatchSize)
 
static void UpdateParamsLogReg (TMatrixT< AReal > &input, TMatrixT< AReal > &output, TMatrixT< AReal > &difference, TMatrixT< AReal > &p, TMatrixT< AReal > &fWeights, TMatrixT< AReal > &fBiases, AReal learningRate, size_t fBatchSize)
 
Forward Propagation

Low-level functions required for the forward propagation of activations through the network.

static void MultiplyTranspose (TMatrixT< Scalar_t > &output, const TMatrixT< Scalar_t > &input, const TMatrixT< Scalar_t > &weights)
 Matrix-multiply input with the transpose of weights and write the results into output.
 
static void AddRowWise (TMatrixT< Scalar_t > &output, const TMatrixT< Scalar_t > &biases)
 Add the vectors biases row-wise to the matrix output.
 
Backward Propagation

Low-level functions required for the forward propagation of activations through the network.

static void Backward (TMatrixT< Scalar_t > &activationGradientsBackward, TMatrixT< Scalar_t > &weightGradients, TMatrixT< Scalar_t > &biasGradients, TMatrixT< Scalar_t > &df, const TMatrixT< Scalar_t > &activationGradients, const TMatrixT< Scalar_t > &weights, const TMatrixT< Scalar_t > &activationBackward)
 Perform the complete backward propagation step.
 
static Matrix_tRecurrentLayerBackward (TMatrixT< Scalar_t > &state_gradients_backward, TMatrixT< Scalar_t > &input_weight_gradients, TMatrixT< Scalar_t > &state_weight_gradients, TMatrixT< Scalar_t > &bias_gradients, TMatrixT< Scalar_t > &df, const TMatrixT< Scalar_t > &state, const TMatrixT< Scalar_t > &weights_input, const TMatrixT< Scalar_t > &weights_state, const TMatrixT< Scalar_t > &input, TMatrixT< Scalar_t > &input_gradient)
 Backpropagation step for a Recurrent Neural Network.
 
static Matrix_tLSTMLayerBackward (TMatrixT< Scalar_t > &state_gradients_backward, TMatrixT< Scalar_t > &cell_gradients_backward, TMatrixT< Scalar_t > &input_weight_gradients, TMatrixT< Scalar_t > &forget_weight_gradients, TMatrixT< Scalar_t > &candidate_weight_gradients, TMatrixT< Scalar_t > &output_weight_gradients, TMatrixT< Scalar_t > &input_state_weight_gradients, TMatrixT< Scalar_t > &forget_state_weight_gradients, TMatrixT< Scalar_t > &candidate_state_weight_gradients, TMatrixT< Scalar_t > &output_state_weight_gradients, TMatrixT< Scalar_t > &input_bias_gradients, TMatrixT< Scalar_t > &forget_bias_gradients, TMatrixT< Scalar_t > &candidate_bias_gradients, TMatrixT< Scalar_t > &output_bias_gradients, TMatrixT< Scalar_t > &di, TMatrixT< Scalar_t > &df, TMatrixT< Scalar_t > &dc, TMatrixT< Scalar_t > &dout, const TMatrixT< Scalar_t > &precStateActivations, const TMatrixT< Scalar_t > &precCellActivations, const TMatrixT< Scalar_t > &fInput, const TMatrixT< Scalar_t > &fForget, const TMatrixT< Scalar_t > &fCandidate, const TMatrixT< Scalar_t > &fOutput, const TMatrixT< Scalar_t > &weights_input, const TMatrixT< Scalar_t > &weights_forget, const TMatrixT< Scalar_t > &weights_candidate, const TMatrixT< Scalar_t > &weights_output, const TMatrixT< Scalar_t > &weights_input_state, const TMatrixT< Scalar_t > &weights_forget_state, const TMatrixT< Scalar_t > &weights_candidate_state, const TMatrixT< Scalar_t > &weights_output_state, const TMatrixT< Scalar_t > &input, TMatrixT< Scalar_t > &input_gradient, TMatrixT< Scalar_t > &cell_gradient, TMatrixT< Scalar_t > &cell_tanh)
 Backward pass for LSTM Network.
 
static Matrix_tGRULayerBackward (TMatrixT< Scalar_t > &state_gradients_backward, TMatrixT< Scalar_t > &reset_weight_gradients, TMatrixT< Scalar_t > &update_weight_gradients, TMatrixT< Scalar_t > &candidate_weight_gradients, TMatrixT< Scalar_t > &reset_state_weight_gradients, TMatrixT< Scalar_t > &update_state_weight_gradients, TMatrixT< Scalar_t > &candidate_state_weight_gradients, TMatrixT< Scalar_t > &reset_bias_gradients, TMatrixT< Scalar_t > &update_bias_gradients, TMatrixT< Scalar_t > &candidate_bias_gradients, TMatrixT< Scalar_t > &dr, TMatrixT< Scalar_t > &du, TMatrixT< Scalar_t > &dc, const TMatrixT< Scalar_t > &precStateActivations, const TMatrixT< Scalar_t > &fReset, const TMatrixT< Scalar_t > &fUpdate, const TMatrixT< Scalar_t > &fCandidate, const TMatrixT< Scalar_t > &weights_reset, const TMatrixT< Scalar_t > &weights_update, const TMatrixT< Scalar_t > &weights_candidate, const TMatrixT< Scalar_t > &weights_reset_state, const TMatrixT< Scalar_t > &weights_update_state, const TMatrixT< Scalar_t > &weights_candidate_state, const TMatrixT< Scalar_t > &input, TMatrixT< Scalar_t > &input_gradient)
 Backward pass for GRU Network.
 
static void ScaleAdd (TMatrixT< Scalar_t > &A, const TMatrixT< Scalar_t > &B, Scalar_t beta=1.0)
 Adds a the elements in matrix B scaled by c to the elements in the matrix A.
 
static void Copy (TMatrixT< Scalar_t > &A, const TMatrixT< Scalar_t > &B)
 
template<typename AMatrix_t >
static void CopyDiffArch (TMatrixT< Scalar_t > &A, const AMatrix_t &B)
 
static void ScaleAdd (std::vector< TMatrixT< Scalar_t > > &A, const std::vector< TMatrixT< Scalar_t > > &B, Scalar_t beta=1.0)
 Above functions extended to vectors.
 
static void Copy (std::vector< TMatrixT< Scalar_t > > &A, const std::vector< TMatrixT< Scalar_t > > &B)
 
template<typename AMatrix_t >
static void CopyDiffArch (std::vector< TMatrixT< Scalar_t > > &A, const std::vector< AMatrix_t > &B)
 
Activation Functions

For each activation function, the low-level interface contains two routines.

One that applies the activation function to a matrix and one that evaluate the derivatives of the activation function at the elements of a given matrix and writes the results into the result matrix.

static void Identity (TMatrixT< AReal > &B)
 
static void IdentityDerivative (TMatrixT< AReal > &B, const TMatrixT< AReal > &A)
 
static void Relu (TMatrixT< AReal > &B)
 
static void ReluDerivative (TMatrixT< AReal > &B, const TMatrixT< AReal > &A)
 
static void Sigmoid (TMatrixT< AReal > &B)
 
static void SigmoidDerivative (TMatrixT< AReal > &B, const TMatrixT< AReal > &A)
 
static void Tanh (TMatrixT< AReal > &B)
 
static void TanhDerivative (TMatrixT< AReal > &B, const TMatrixT< AReal > &A)
 
static void FastTanh (Tensor_t &B)
 
static void FastTanhDerivative (Tensor_t &B, const Tensor_t &A)
 
static void SymmetricRelu (TMatrixT< AReal > &B)
 
static void SymmetricReluDerivative (TMatrixT< AReal > &B, const TMatrixT< AReal > &A)
 
static void SoftSign (TMatrixT< AReal > &B)
 
static void SoftSignDerivative (TMatrixT< AReal > &B, const TMatrixT< AReal > &A)
 
static void Gauss (TMatrixT< AReal > &B)
 
static void GaussDerivative (TMatrixT< AReal > &B, const TMatrixT< AReal > &A)
 
Loss Functions

Loss functions compute a scalar value given the output of the network for a given training input and the expected network prediction Y that quantifies the quality of the prediction.

For each function also a routing that computes the gradients (suffixed by Gradients) must be provided for the starting of the backpropagation algorithm.

static AReal MeanSquaredError (const TMatrixT< AReal > &Y, const TMatrixT< AReal > &output, const TMatrixT< AReal > &weights)
 
static void MeanSquaredErrorGradients (TMatrixT< AReal > &dY, const TMatrixT< AReal > &Y, const TMatrixT< AReal > &output, const TMatrixT< AReal > &weights)
 
static AReal CrossEntropy (const TMatrixT< AReal > &Y, const TMatrixT< AReal > &output, const TMatrixT< AReal > &weights)
 Sigmoid transformation is implicitly applied, thus output should hold the linear activations of the last layer in the net.
 
static void CrossEntropyGradients (TMatrixT< AReal > &dY, const TMatrixT< AReal > &Y, const TMatrixT< AReal > &output, const TMatrixT< AReal > &weights)
 
static AReal SoftmaxCrossEntropy (const TMatrixT< AReal > &Y, const TMatrixT< AReal > &output, const TMatrixT< AReal > &weights)
 Softmax transformation is implicitly applied, thus output should hold the linear activations of the last layer in the net.
 
static void SoftmaxCrossEntropyGradients (TMatrixT< AReal > &dY, const TMatrixT< AReal > &Y, const TMatrixT< AReal > &output, const TMatrixT< AReal > &weights)
 
Output Functions

Output functions transform the activations output of the output layer in the network to a valid prediction YHat for the desired usage of the network, e.g.

the identity function for regression or the sigmoid transformation for two-class classification.

static void Sigmoid (TMatrixT< AReal > &YHat, const TMatrixT< AReal > &)
 
static void Softmax (TMatrixT< AReal > &YHat, const TMatrixT< AReal > &)
 
Regularization

For each regularization type two functions are required, one named <Type>Regularization that evaluates the corresponding regularization functional for a given weight matrix and the Add<Type>RegularizationGradients, that adds the regularization component in the gradients to the provided matrix.

static AReal L1Regularization (const TMatrixT< AReal > &W)
 
static void AddL1RegularizationGradients (TMatrixT< AReal > &A, const TMatrixT< AReal > &W, AReal weightDecay)
 
static AReal L2Regularization (const TMatrixT< AReal > &W)
 
static void AddL2RegularizationGradients (TMatrixT< AReal > &A, const TMatrixT< AReal > &W, AReal weightDecay)
 
Initialization

For each initialization method, one function in the low-level interface is provided.

The naming scheme is

Initialize<Type>

for a given initialization method Type.

static void InitializeGauss (TMatrixT< AReal > &A)
 
static void InitializeUniform (TMatrixT< AReal > &A)
 
static void InitializeIdentity (TMatrixT< AReal > &A)
 
static void InitializeZero (TMatrixT< AReal > &A)
 
static void InitializeGlorotUniform (TMatrixT< AReal > &A)
 Sample from a uniform distribution in range [ -lim,+lim] where lim = sqrt(6/N_in+N_out).
 
static void InitializeGlorotNormal (TMatrixT< AReal > &A)
 Truncated normal initialization (Glorot, called also Xavier normal) The values are sample with a normal distribution with stddev = sqrt(2/N_input + N_output) and values larger than 2 * stddev are discarded See Glorot & Bengio, AISTATS 2010 - http://jmlr.org/proceedings/papers/v9/glorot10a/glorot10a.pdf.
 
static TRandomGetRandomGenerator ()
 
static void SetRandomSeed (size_t seed)
 
Dropout
static void DropoutForward (Tensor_t &A, TDescriptors *descriptors, TWorkspace *workspace, Scalar_t p)
 Apply dropout with activation probability p to the given matrix A and scale the result by reciprocal of p.
 
static void DropoutForward (Matrix_t &A, Scalar_t p)
 
Forward Propagation in Convolutional Layer
static void Im2col (TMatrixT< AReal > &A, const TMatrixT< AReal > &B, size_t imgHeight, size_t imgWidth, size_t fltHeight, size_t fltWidth, size_t strideRows, size_t strideCols, size_t zeroPaddingHeight, size_t zeroPaddingWidth)
 Transform the matrix B in local view format, suitable for convolution, and store it in matrix A.
 
static void Im2colIndices (std::vector< int > &, const TMatrixT< AReal > &, size_t, size_t, size_t, size_t, size_t, size_t, size_t, size_t, size_t)
 
static void Im2colFast (TMatrixT< AReal > &, const TMatrixT< AReal > &, const std::vector< int > &)
 
static void RotateWeights (TMatrixT< AReal > &A, const TMatrixT< AReal > &B, size_t filterDepth, size_t filterHeight, size_t filterWidth, size_t numFilters)
 Rotates the matrix B, which is representing a weights, and stores them in the matrix A.
 
static void AddConvBiases (TMatrixT< AReal > &output, const TMatrixT< AReal > &biases)
 Add the biases in the Convolutional Layer.
 
Backward Propagation in Convolutional Layer
static void ConvLayerBackward (std::vector< TMatrixT< AReal > > &, TMatrixT< AReal > &, TMatrixT< AReal > &, std::vector< TMatrixT< AReal > > &, const std::vector< TMatrixT< AReal > > &, const TMatrixT< AReal > &, const std::vector< TMatrixT< AReal > > &, size_t, size_t, size_t, size_t, size_t, size_t, size_t, size_t, size_t, size_t)
 Perform the complete backward propagation step in a Convolutional Layer.
 
Forward Propagation in Max Pooling Layer
static void Downsample (TMatrixT< AReal > &A, TMatrixT< AReal > &B, const TMatrixT< AReal > &C, size_t imgHeight, size_t imgWidth, size_t fltHeight, size_t fltWidth, size_t strideRows, size_t strideCols)
 Downsample the matrix C to the matrix A, using max operation, such that the winning indices are stored in matrix B.
 
Backward Propagation in Max Pooling Layer
static void MaxPoolLayerBackward (TMatrixT< AReal > &activationGradientsBackward, const TMatrixT< AReal > &activationGradients, const TMatrixT< AReal > &indexMatrix, size_t imgHeight, size_t imgWidth, size_t fltHeight, size_t fltWidth, size_t strideRows, size_t strideCol, size_t nLocalViews)
 Perform the complete backward propagation step in a Max Pooling Layer.
 
Forward and Backward Propagation in Reshape Layer
static void Reshape (TMatrixT< AReal > &A, const TMatrixT< AReal > &B)
 Transform the matrix B to a matrix with different dimensions A.
 
static void Flatten (TMatrixT< AReal > &A, const std::vector< TMatrixT< AReal > > &B, size_t size, size_t nRows, size_t nCols)
 Flattens the tensor B, such that each matrix, is stretched in one row, resulting with a matrix A.
 
static void Deflatten (std::vector< TMatrixT< AReal > > &A, const TMatrixT< Scalar_t > &B, size_t index, size_t nRows, size_t nCols)
 Transforms each row of B to a matrix and stores it in the tensor B.
 
static void Rearrange (std::vector< TMatrixT< AReal > > &out, const std::vector< TMatrixT< AReal > > &in)
 Rearrage data according to time fill B x T x D out with T x B x D matrix in.
 

Static Private Attributes

static TRandomfgRandomGen = nullptr
 

#include <TMVA/DNN/Architectures/Reference.h>

Member Typedef Documentation

◆ Matrix_t

template<typename AReal >
using TMVA::DNN::TReference< AReal >::Matrix_t = TMatrixT<AReal>

Definition at line 58 of file Reference.h.

◆ Scalar_t

template<typename AReal >
using TMVA::DNN::TReference< AReal >::Scalar_t = AReal

Definition at line 57 of file Reference.h.

◆ Tensor_t

template<typename AReal >
using TMVA::DNN::TReference< AReal >::Tensor_t = TMatrixT<AReal>

Definition at line 59 of file Reference.h.

Member Function Documentation

◆ AdamUpdate()

template<typename AReal >
void TMVA::DNN::TReference< AReal >::AdamUpdate ( TMatrixT< AReal > &  A,
const TMatrixT< AReal > &  M,
const TMatrixT< AReal > &  V,
AReal  alpha,
AReal  eps 
)
static

Update functions for ADAM optimizer.

Adam updates.

Definition at line 103 of file Arithmetic.hxx.

◆ AdamUpdateFirstMom()

template<typename AReal >
void TMVA::DNN::TReference< AReal >::AdamUpdateFirstMom ( TMatrixT< AReal > &  A,
const TMatrixT< AReal > &  B,
AReal  beta 
)
static

Definition at line 117 of file Arithmetic.hxx.

◆ AdamUpdateSecondMom()

template<typename AReal >
void TMVA::DNN::TReference< AReal >::AdamUpdateSecondMom ( TMatrixT< AReal > &  A,
const TMatrixT< AReal > &  B,
AReal  beta 
)
static

Definition at line 129 of file Arithmetic.hxx.

◆ AddBiases()

template<typename AReal >
void TMVA::DNN::TReference< Real_t >::AddBiases ( TMatrixT< AReal > &  A,
const TMatrixT< AReal > &  biases 
)
static

Definition at line 30 of file DenoisePropagation.hxx.

◆ AddConvBiases()

template<typename AReal >
void TMVA::DNN::TReference< AReal >::AddConvBiases ( TMatrixT< AReal > &  output,
const TMatrixT< AReal > &  biases 
)
static

Add the biases in the Convolutional Layer.


Definition at line 159 of file Propagation.hxx.

◆ AddL1RegularizationGradients()

template<typename AReal >
void TMVA::DNN::TReference< Real_t >::AddL1RegularizationGradients ( TMatrixT< AReal > &  A,
const TMatrixT< AReal > &  W,
AReal  weightDecay 
)
static

Definition at line 44 of file Regularization.hxx.

◆ AddL2RegularizationGradients()

template<typename AReal >
void TMVA::DNN::TReference< Real_t >::AddL2RegularizationGradients ( TMatrixT< AReal > &  A,
const TMatrixT< AReal > &  W,
AReal  weightDecay 
)
static

Definition at line 82 of file Regularization.hxx.

◆ AddRowWise()

template<typename AReal >
void TMVA::DNN::TReference< AReal >::AddRowWise ( TMatrixT< Scalar_t > &  output,
const TMatrixT< Scalar_t > &  biases 
)
static

Add the vectors biases row-wise to the matrix output.

Definition at line 30 of file Propagation.hxx.

◆ Backward()

template<typename AReal >
void TMVA::DNN::TReference< AReal >::Backward ( TMatrixT< Scalar_t > &  activationGradientsBackward,
TMatrixT< Scalar_t > &  weightGradients,
TMatrixT< Scalar_t > &  biasGradients,
TMatrixT< Scalar_t > &  df,
const TMatrixT< Scalar_t > &  activationGradients,
const TMatrixT< Scalar_t > &  weights,
const TMatrixT< Scalar_t > &  activationBackward 
)
static

Perform the complete backward propagation step.

If the provided activationGradientsBackward matrix is not empty, compute the gradients of the objective function with respect to the activations of the previous layer (backward direction). Also compute the weight and the bias gradients. Modifies the values in df and thus produces only a valid result, if it is applied the first time after the corresponding forward propagation has been per- formed.

Definition at line 40 of file Propagation.hxx.

◆ ConstAdd()

template<typename AReal >
void TMVA::DNN::TReference< AReal >::ConstAdd ( TMatrixT< AReal > &  A,
AReal  beta 
)
static

Add the constant beta to all the elements of matrix A and write the result into A.

Definition at line 48 of file Arithmetic.hxx.

◆ ConstMult()

template<typename AReal >
void TMVA::DNN::TReference< AReal >::ConstMult ( TMatrixT< AReal > &  A,
AReal  beta 
)
static

Multiply the constant beta to all the elements of matrix A and write the result into A.

Definition at line 59 of file Arithmetic.hxx.

◆ ConvLayerBackward()

template<typename AReal >
static void TMVA::DNN::TReference< AReal >::ConvLayerBackward ( std::vector< TMatrixT< AReal > > &  ,
TMatrixT< AReal > &  ,
TMatrixT< AReal > &  ,
std::vector< TMatrixT< AReal > > &  ,
const std::vector< TMatrixT< AReal > > &  ,
const TMatrixT< AReal > &  ,
const std::vector< TMatrixT< AReal > > &  ,
size_t  ,
size_t  ,
size_t  ,
size_t  ,
size_t  ,
size_t  ,
size_t  ,
size_t  ,
size_t  ,
size_t   
)
inlinestatic

Perform the complete backward propagation step in a Convolutional Layer.

If the provided activationGradientsBackward matrix is not empty, compute the gradients of the objective function with respect to the activations of the previous layer (backward direction). Also compute the weight and the bias gradients. Modifies the values in df and thus produces only a valid result, if it is applied the first time after the corresponding forward propagation has been per- formed.

Definition at line 459 of file Reference.h.

◆ ConvLayerForward()

template<typename AReal >
static void TMVA::DNN::TReference< AReal >::ConvLayerForward ( std::vector< TMatrixT< AReal > > &  ,
std::vector< TMatrixT< AReal > > &  ,
const std::vector< TMatrixT< AReal > > &  ,
const TMatrixT< AReal > &  ,
const TMatrixT< AReal > &  ,
const DNN::CNN::TConvParams ,
EActivationFunction  ,
std::vector< TMatrixT< AReal > > &   
)
inlinestatic

Forward propagation in the Convolutional layer.

Definition at line 437 of file Reference.h.

◆ Copy() [1/2]

template<typename AReal >
void TMVA::DNN::TReference< AReal >::Copy ( std::vector< TMatrixT< Scalar_t > > &  A,
const std::vector< TMatrixT< Scalar_t > > &  B 
)
static

Definition at line 100 of file Propagation.hxx.

◆ Copy() [2/2]

template<typename AReal >
void TMVA::DNN::TReference< AReal >::Copy ( TMatrixT< Scalar_t > &  A,
const TMatrixT< Scalar_t > &  B 
)
static

Definition at line 86 of file Propagation.hxx.

◆ CopyDiffArch() [1/2]

template<typename AReal >
template<typename AMatrix_t >
void TMVA::DNN::TReference< AReal >::CopyDiffArch ( std::vector< TMatrixT< Scalar_t > > &  A,
const std::vector< AMatrix_t > &  B 
)
static

Definition at line 668 of file Reference.h.

◆ CopyDiffArch() [2/2]

template<typename AReal >
template<typename AMatrix_t >
void TMVA::DNN::TReference< AReal >::CopyDiffArch ( TMatrixT< Scalar_t > &  A,
const AMatrix_t &  B 
)
static

Definition at line 660 of file Reference.h.

◆ CorruptInput()

template<typename AReal >
void TMVA::DNN::TReference< Real_t >::CorruptInput ( TMatrixT< AReal > &  input,
TMatrixT< AReal > &  corruptedInput,
AReal  corruptionLevel 
)
static

Definition at line 108 of file DenoisePropagation.hxx.

◆ CrossEntropy()

template<typename AReal >
AReal TMVA::DNN::TReference< AReal >::CrossEntropy ( const TMatrixT< AReal > &  Y,
const TMatrixT< AReal > &  output,
const TMatrixT< AReal > &  weights 
)
static

Sigmoid transformation is implicitly applied, thus output should hold the linear activations of the last layer in the net.

Definition at line 64 of file LossFunctions.hxx.

◆ CrossEntropyGradients()

template<typename AReal >
void TMVA::DNN::TReference< AReal >::CrossEntropyGradients ( TMatrixT< AReal > &  dY,
const TMatrixT< AReal > &  Y,
const TMatrixT< AReal > &  output,
const TMatrixT< AReal > &  weights 
)
static

Definition at line 85 of file LossFunctions.hxx.

◆ Deflatten()

template<typename AReal >
void TMVA::DNN::TReference< AReal >::Deflatten ( std::vector< TMatrixT< AReal > > &  A,
const TMatrixT< Scalar_t > &  B,
size_t  index,
size_t  nRows,
size_t  nCols 
)
static

Transforms each row of B to a matrix and stores it in the tensor B.

Definition at line 422 of file Propagation.hxx.

◆ Downsample()

template<typename AReal >
void TMVA::DNN::TReference< AReal >::Downsample ( TMatrixT< AReal > &  A,
TMatrixT< AReal > &  B,
const TMatrixT< AReal > &  C,
size_t  imgHeight,
size_t  imgWidth,
size_t  fltHeight,
size_t  fltWidth,
size_t  strideRows,
size_t  strideCols 
)
static

Downsample the matrix C to the matrix A, using max operation, such that the winning indices are stored in matrix B.

Definition at line 334 of file Propagation.hxx.

◆ DropoutForward() [1/2]

template<typename AReal >
static void TMVA::DNN::TReference< AReal >::DropoutForward ( Matrix_t A,
Scalar_t  p 
)
inlinestatic

Definition at line 385 of file Reference.h.

◆ DropoutForward() [2/2]

template<typename AReal >
static void TMVA::DNN::TReference< AReal >::DropoutForward ( Tensor_t A,
TDescriptors descriptors,
TWorkspace workspace,
Scalar_t  p 
)
static

Apply dropout with activation probability p to the given matrix A and scale the result by reciprocal of p.

◆ EncodeInput()

template<typename AReal >
void TMVA::DNN::TReference< Real_t >::EncodeInput ( TMatrixT< AReal > &  input,
TMatrixT< AReal > &  compressedInput,
TMatrixT< AReal > &  Weights 
)
static

Definition at line 134 of file DenoisePropagation.hxx.

◆ FastTanh()

template<typename AReal >
static void TMVA::DNN::TReference< AReal >::FastTanh ( Tensor_t B)
inlinestatic

Definition at line 242 of file Reference.h.

◆ FastTanhDerivative()

template<typename AReal >
static void TMVA::DNN::TReference< AReal >::FastTanhDerivative ( Tensor_t B,
const Tensor_t A 
)
inlinestatic

Definition at line 243 of file Reference.h.

◆ Flatten()

template<typename AReal >
void TMVA::DNN::TReference< AReal >::Flatten ( TMatrixT< AReal > &  A,
const std::vector< TMatrixT< AReal > > &  B,
size_t  size,
size_t  nRows,
size_t  nCols 
)
static

Flattens the tensor B, such that each matrix, is stretched in one row, resulting with a matrix A.

Definition at line 408 of file Propagation.hxx.

◆ ForwardLogReg()

template<typename AReal >
void TMVA::DNN::TReference< Real_t >::ForwardLogReg ( TMatrixT< AReal > &  input,
TMatrixT< AReal > &  p,
TMatrixT< AReal > &  fWeights 
)
static

Definition at line 171 of file DenoisePropagation.hxx.

◆ Gauss()

template<typename AReal >
void TMVA::DNN::TReference< Real_t >::Gauss ( TMatrixT< AReal > &  B)
inlinestatic

Definition at line 206 of file ActivationFunctions.hxx.

◆ GaussDerivative()

template<typename AReal >
void TMVA::DNN::TReference< Real_t >::GaussDerivative ( TMatrixT< AReal > &  B,
const TMatrixT< AReal > &  A 
)
inlinestatic

Definition at line 222 of file ActivationFunctions.hxx.

◆ GetRandomGenerator()

template<typename Real_t >
TRandom & TMVA::DNN::TReference< Real_t >::GetRandomGenerator
static

Definition at line 35 of file Initialization.hxx.

◆ GRULayerBackward()

template<typename Scalar_t >
auto TMVA::DNN::TReference< Scalar_t >::GRULayerBackward ( TMatrixT< Scalar_t > &  state_gradients_backward,
TMatrixT< Scalar_t > &  reset_weight_gradients,
TMatrixT< Scalar_t > &  update_weight_gradients,
TMatrixT< Scalar_t > &  candidate_weight_gradients,
TMatrixT< Scalar_t > &  reset_state_weight_gradients,
TMatrixT< Scalar_t > &  update_state_weight_gradients,
TMatrixT< Scalar_t > &  candidate_state_weight_gradients,
TMatrixT< Scalar_t > &  reset_bias_gradients,
TMatrixT< Scalar_t > &  update_bias_gradients,
TMatrixT< Scalar_t > &  candidate_bias_gradients,
TMatrixT< Scalar_t > &  dr,
TMatrixT< Scalar_t > &  du,
TMatrixT< Scalar_t > &  dc,
const TMatrixT< Scalar_t > &  precStateActivations,
const TMatrixT< Scalar_t > &  fReset,
const TMatrixT< Scalar_t > &  fUpdate,
const TMatrixT< Scalar_t > &  fCandidate,
const TMatrixT< Scalar_t > &  weights_reset,
const TMatrixT< Scalar_t > &  weights_update,
const TMatrixT< Scalar_t > &  weights_candidate,
const TMatrixT< Scalar_t > &  weights_reset_state,
const TMatrixT< Scalar_t > &  weights_update_state,
const TMatrixT< Scalar_t > &  weights_candidate_state,
const TMatrixT< Scalar_t > &  input,
TMatrixT< Scalar_t > &  input_gradient 
)
static

Backward pass for GRU Network.

Definition at line 224 of file RecurrentPropagation.hxx.

◆ Hadamard()

template<typename AReal >
void TMVA::DNN::TReference< AReal >::Hadamard ( TMatrixT< AReal > &  A,
const TMatrixT< AReal > &  B 
)
static

In-place Hadamard (element-wise) product of matrices A and B with the result being written into A.

Definition at line 37 of file Arithmetic.hxx.

◆ Identity()

template<typename AReal >
static void TMVA::DNN::TReference< AReal >::Identity ( TMatrixT< AReal > &  B)
static

◆ IdentityDerivative()

template<typename AReal >
void TMVA::DNN::TReference< Real_t >::IdentityDerivative ( TMatrixT< AReal > &  B,
const TMatrixT< AReal > &  A 
)
static

Definition at line 27 of file ActivationFunctions.hxx.

◆ Im2col()

template<typename AReal >
void TMVA::DNN::TReference< AReal >::Im2col ( TMatrixT< AReal > &  A,
const TMatrixT< AReal > &  B,
size_t  imgHeight,
size_t  imgWidth,
size_t  fltHeight,
size_t  fltWidth,
size_t  strideRows,
size_t  strideCols,
size_t  zeroPaddingHeight,
size_t  zeroPaddingWidth 
)
static

Transform the matrix B in local view format, suitable for convolution, and store it in matrix A.

Definition at line 109 of file Propagation.hxx.

◆ Im2colFast()

template<typename AReal >
static void TMVA::DNN::TReference< AReal >::Im2colFast ( TMatrixT< AReal > &  ,
const TMatrixT< AReal > &  ,
const std::vector< int > &   
)
inlinestatic

Definition at line 420 of file Reference.h.

◆ Im2colIndices()

template<typename AReal >
static void TMVA::DNN::TReference< AReal >::Im2colIndices ( std::vector< int > &  ,
const TMatrixT< AReal > &  ,
size_t  ,
size_t  ,
size_t  ,
size_t  ,
size_t  ,
size_t  ,
size_t  ,
size_t  ,
size_t   
)
inlinestatic

Definition at line 416 of file Reference.h.

◆ InitializeGauss()

template<typename AReal >
void TMVA::DNN::TReference< Real_t >::InitializeGauss ( TMatrixT< AReal > &  A)
static

Definition at line 43 of file Initialization.hxx.

◆ InitializeGlorotNormal()

template<typename AReal >
void TMVA::DNN::TReference< Real_t >::InitializeGlorotNormal ( TMatrixT< AReal > &  A)
static

Truncated normal initialization (Glorot, called also Xavier normal) The values are sample with a normal distribution with stddev = sqrt(2/N_input + N_output) and values larger than 2 * stddev are discarded See Glorot & Bengio, AISTATS 2010 - http://jmlr.org/proceedings/papers/v9/glorot10a/glorot10a.pdf.

Definition at line 85 of file Initialization.hxx.

◆ InitializeGlorotUniform()

template<typename AReal >
void TMVA::DNN::TReference< Real_t >::InitializeGlorotUniform ( TMatrixT< AReal > &  A)
static

Sample from a uniform distribution in range [ -lim,+lim] where lim = sqrt(6/N_in+N_out).

This initialization is also called Xavier uniform see Glorot & Bengio, AISTATS 2010 - http://jmlr.org/proceedings/papers/v9/glorot10a/glorot10a.pdf

Definition at line 110 of file Initialization.hxx.

◆ InitializeIdentity()

template<typename AReal >
void TMVA::DNN::TReference< Real_t >::InitializeIdentity ( TMatrixT< AReal > &  A)
static

Definition at line 129 of file Initialization.hxx.

◆ InitializeUniform()

template<typename AReal >
void TMVA::DNN::TReference< Real_t >::InitializeUniform ( TMatrixT< AReal > &  A)
static

Definition at line 62 of file Initialization.hxx.

◆ InitializeZero()

template<typename AReal >
void TMVA::DNN::TReference< Real_t >::InitializeZero ( TMatrixT< AReal > &  A)
static

Definition at line 148 of file Initialization.hxx.

◆ L1Regularization()

template<typename AReal >
Real_t TMVA::DNN::TReference< Real_t >::L1Regularization ( const TMatrixT< AReal > &  W)
static

Definition at line 26 of file Regularization.hxx.

◆ L2Regularization()

template<typename AReal >
Real_t TMVA::DNN::TReference< Real_t >::L2Regularization ( const TMatrixT< AReal > &  W)
static

Definition at line 64 of file Regularization.hxx.

◆ LSTMLayerBackward()

template<typename Scalar_t >
auto TMVA::DNN::TReference< Scalar_t >::LSTMLayerBackward ( TMatrixT< Scalar_t > &  state_gradients_backward,
TMatrixT< Scalar_t > &  cell_gradients_backward,
TMatrixT< Scalar_t > &  input_weight_gradients,
TMatrixT< Scalar_t > &  forget_weight_gradients,
TMatrixT< Scalar_t > &  candidate_weight_gradients,
TMatrixT< Scalar_t > &  output_weight_gradients,
TMatrixT< Scalar_t > &  input_state_weight_gradients,
TMatrixT< Scalar_t > &  forget_state_weight_gradients,
TMatrixT< Scalar_t > &  candidate_state_weight_gradients,
TMatrixT< Scalar_t > &  output_state_weight_gradients,
TMatrixT< Scalar_t > &  input_bias_gradients,
TMatrixT< Scalar_t > &  forget_bias_gradients,
TMatrixT< Scalar_t > &  candidate_bias_gradients,
TMatrixT< Scalar_t > &  output_bias_gradients,
TMatrixT< Scalar_t > &  di,
TMatrixT< Scalar_t > &  df,
TMatrixT< Scalar_t > &  dc,
TMatrixT< Scalar_t > &  dout,
const TMatrixT< Scalar_t > &  precStateActivations,
const TMatrixT< Scalar_t > &  precCellActivations,
const TMatrixT< Scalar_t > &  fInput,
const TMatrixT< Scalar_t > &  fForget,
const TMatrixT< Scalar_t > &  fCandidate,
const TMatrixT< Scalar_t > &  fOutput,
const TMatrixT< Scalar_t > &  weights_input,
const TMatrixT< Scalar_t > &  weights_forget,
const TMatrixT< Scalar_t > &  weights_candidate,
const TMatrixT< Scalar_t > &  weights_output,
const TMatrixT< Scalar_t > &  weights_input_state,
const TMatrixT< Scalar_t > &  weights_forget_state,
const TMatrixT< Scalar_t > &  weights_candidate_state,
const TMatrixT< Scalar_t > &  weights_output_state,
const TMatrixT< Scalar_t > &  input,
TMatrixT< Scalar_t > &  input_gradient,
TMatrixT< Scalar_t > &  cell_gradient,
TMatrixT< Scalar_t > &  cell_tanh 
)
static

Backward pass for LSTM Network.

Definition at line 86 of file RecurrentPropagation.hxx.

◆ MaxPoolLayerBackward()

template<typename AReal >
void TMVA::DNN::TReference< AReal >::MaxPoolLayerBackward ( TMatrixT< AReal > &  activationGradientsBackward,
const TMatrixT< AReal > &  activationGradients,
const TMatrixT< AReal > &  indexMatrix,
size_t  imgHeight,
size_t  imgWidth,
size_t  fltHeight,
size_t  fltWidth,
size_t  strideRows,
size_t  strideCol,
size_t  nLocalViews 
)
static

Perform the complete backward propagation step in a Max Pooling Layer.

Based on the winning indices stored in the index matrix, it just forwards the activation gradients to the previous layer.

Definition at line 367 of file Propagation.hxx.

◆ MeanSquaredError()

template<typename AReal >
AReal TMVA::DNN::TReference< AReal >::MeanSquaredError ( const TMatrixT< AReal > &  Y,
const TMatrixT< AReal > &  output,
const TMatrixT< AReal > &  weights 
)
static

Definition at line 25 of file LossFunctions.hxx.

◆ MeanSquaredErrorGradients()

template<typename AReal >
void TMVA::DNN::TReference< AReal >::MeanSquaredErrorGradients ( TMatrixT< AReal > &  dY,
const TMatrixT< AReal > &  Y,
const TMatrixT< AReal > &  output,
const TMatrixT< AReal > &  weights 
)
static

Definition at line 45 of file LossFunctions.hxx.

◆ MultiplyTranspose()

template<typename AReal >
void TMVA::DNN::TReference< AReal >::MultiplyTranspose ( TMatrixT< Scalar_t > &  output,
const TMatrixT< Scalar_t > &  input,
const TMatrixT< Scalar_t > &  weights 
)
static

Matrix-multiply input with the transpose of weights and write the results into output.

Definition at line 23 of file Propagation.hxx.

◆ PrepareInternals()

template<typename AReal >
static void TMVA::DNN::TReference< AReal >::PrepareInternals ( std::vector< TMatrixT< AReal > > &  )
inlinestatic

Dummy placeholder - preparation is currently only required for the CUDA architecture.

Definition at line 434 of file Reference.h.

◆ Rearrange()

template<typename AReal >
void TMVA::DNN::TReference< AReal >::Rearrange ( std::vector< TMatrixT< AReal > > &  out,
const std::vector< TMatrixT< AReal > > &  in 
)
static

Rearrage data according to time fill B x T x D out with T x B x D matrix in.

Definition at line 436 of file Propagation.hxx.

◆ ReciprocalElementWise()

template<typename AReal >
void TMVA::DNN::TReference< AReal >::ReciprocalElementWise ( TMatrixT< AReal > &  A)
static

Reciprocal each element of the matrix A and write the result into A.

Definition at line 70 of file Arithmetic.hxx.

◆ ReconstructInput()

template<typename AReal >
void TMVA::DNN::TReference< Real_t >::ReconstructInput ( TMatrixT< AReal > &  compressedInput,
TMatrixT< AReal > &  reconstructedInput,
TMatrixT< AReal > &  fWeights 
)
static

Definition at line 152 of file DenoisePropagation.hxx.

◆ RecurrentLayerBackward()

template<typename Scalar_t >
auto TMVA::DNN::TReference< Scalar_t >::RecurrentLayerBackward ( TMatrixT< Scalar_t > &  state_gradients_backward,
TMatrixT< Scalar_t > &  input_weight_gradients,
TMatrixT< Scalar_t > &  state_weight_gradients,
TMatrixT< Scalar_t > &  bias_gradients,
TMatrixT< Scalar_t > &  df,
const TMatrixT< Scalar_t > &  state,
const TMatrixT< Scalar_t > &  weights_input,
const TMatrixT< Scalar_t > &  weights_state,
const TMatrixT< Scalar_t > &  input,
TMatrixT< Scalar_t > &  input_gradient 
)
static

Backpropagation step for a Recurrent Neural Network.

Definition at line 26 of file RecurrentPropagation.hxx.

◆ Relu()

template<typename AReal >
void TMVA::DNN::TReference< Real_t >::Relu ( TMatrixT< AReal > &  B)
static

Definition at line 43 of file ActivationFunctions.hxx.

◆ ReluDerivative()

template<typename AReal >
void TMVA::DNN::TReference< Real_t >::ReluDerivative ( TMatrixT< AReal > &  B,
const TMatrixT< AReal > &  A 
)
inlinestatic

Definition at line 58 of file ActivationFunctions.hxx.

◆ Reshape()

template<typename AReal >
void TMVA::DNN::TReference< AReal >::Reshape ( TMatrixT< AReal > &  A,
const TMatrixT< AReal > &  B 
)
static

Transform the matrix B to a matrix with different dimensions A.

Definition at line 393 of file Propagation.hxx.

◆ RotateWeights()

template<typename AReal >
void TMVA::DNN::TReference< AReal >::RotateWeights ( TMatrixT< AReal > &  A,
const TMatrixT< AReal > &  B,
size_t  filterDepth,
size_t  filterHeight,
size_t  filterWidth,
size_t  numFilters 
)
static

Rotates the matrix B, which is representing a weights, and stores them in the matrix A.

Definition at line 144 of file Propagation.hxx.

◆ ScaleAdd() [1/2]

template<typename AReal >
void TMVA::DNN::TReference< AReal >::ScaleAdd ( std::vector< TMatrixT< Scalar_t > > &  A,
const std::vector< TMatrixT< Scalar_t > > &  B,
Scalar_t  beta = 1.0 
)
static

Above functions extended to vectors.

Definition at line 92 of file Propagation.hxx.

◆ ScaleAdd() [2/2]

template<typename AReal >
void TMVA::DNN::TReference< AReal >::ScaleAdd ( TMatrixT< Scalar_t > &  A,
const TMatrixT< Scalar_t > &  B,
Scalar_t  beta = 1.0 
)
static

Adds a the elements in matrix B scaled by c to the elements in the matrix A.

This is required for the weight update in the gradient descent step.

Definition at line 76 of file Propagation.hxx.

◆ SetRandomSeed()

template<typename Real_t >
void TMVA::DNN::TReference< Real_t >::SetRandomSeed ( size_t  seed)
static

Definition at line 29 of file Initialization.hxx.

◆ Sigmoid() [1/2]

template<typename AReal >
static void TMVA::DNN::TReference< AReal >::Sigmoid ( TMatrixT< AReal > &  B)
static

◆ Sigmoid() [2/2]

template<typename AReal >
void TMVA::DNN::TReference< AReal >::Sigmoid ( TMatrixT< AReal > &  YHat,
const TMatrixT< AReal > &  A 
)
static

Definition at line 23 of file OutputFunctions.hxx.

◆ SigmoidDerivative()

template<typename AReal >
void TMVA::DNN::TReference< Real_t >::SigmoidDerivative ( TMatrixT< AReal > &  B,
const TMatrixT< AReal > &  A 
)
inlinestatic

Definition at line 92 of file ActivationFunctions.hxx.

◆ Softmax()

template<typename AReal >
void TMVA::DNN::TReference< AReal >::Softmax ( TMatrixT< AReal > &  YHat,
const TMatrixT< AReal > &  A 
)
static

Definition at line 39 of file OutputFunctions.hxx.

◆ SoftmaxAE()

template<typename AReal >
void TMVA::DNN::TReference< Real_t >::SoftmaxAE ( TMatrixT< AReal > &  A)
static

Definition at line 85 of file DenoisePropagation.hxx.

◆ SoftmaxCrossEntropy()

template<typename AReal >
AReal TMVA::DNN::TReference< AReal >::SoftmaxCrossEntropy ( const TMatrixT< AReal > &  Y,
const TMatrixT< AReal > &  output,
const TMatrixT< AReal > &  weights 
)
static

Softmax transformation is implicitly applied, thus output should hold the linear activations of the last layer in the net.

Definition at line 107 of file LossFunctions.hxx.

◆ SoftmaxCrossEntropyGradients()

template<typename AReal >
void TMVA::DNN::TReference< AReal >::SoftmaxCrossEntropyGradients ( TMatrixT< AReal > &  dY,
const TMatrixT< AReal > &  Y,
const TMatrixT< AReal > &  output,
const TMatrixT< AReal > &  weights 
)
static

Definition at line 131 of file LossFunctions.hxx.

◆ SoftSign()

template<typename AReal >
void TMVA::DNN::TReference< Real_t >::SoftSign ( TMatrixT< AReal > &  B)
inlinestatic

Definition at line 173 of file ActivationFunctions.hxx.

◆ SoftSignDerivative()

template<typename AReal >
void TMVA::DNN::TReference< Real_t >::SoftSignDerivative ( TMatrixT< AReal > &  B,
const TMatrixT< AReal > &  A 
)
inlinestatic

Definition at line 189 of file ActivationFunctions.hxx.

◆ SqrtElementWise()

template<typename AReal >
void TMVA::DNN::TReference< AReal >::SqrtElementWise ( TMatrixT< AReal > &  A)
static

Square root each element of the matrix A and write the result into A.

Definition at line 92 of file Arithmetic.hxx.

◆ SquareElementWise()

template<typename AReal >
void TMVA::DNN::TReference< AReal >::SquareElementWise ( TMatrixT< AReal > &  A)
static

Square each element of the matrix A and write the result into A.

Definition at line 81 of file Arithmetic.hxx.

◆ SumColumns()

template<typename AReal >
void TMVA::DNN::TReference< AReal >::SumColumns ( TMatrixT< AReal > &  B,
const TMatrixT< AReal > &  A 
)
static

Sum columns of (m x n) matrix A and write the results into the first m elements in A.

Definition at line 25 of file Arithmetic.hxx.

◆ SymmetricRelu()

template<typename AReal >
void TMVA::DNN::TReference< Real_t >::SymmetricRelu ( TMatrixT< AReal > &  B)
inlinestatic

Definition at line 142 of file ActivationFunctions.hxx.

◆ SymmetricReluDerivative()

template<typename AReal >
void TMVA::DNN::TReference< Real_t >::SymmetricReluDerivative ( TMatrixT< AReal > &  B,
const TMatrixT< AReal > &  A 
)
inlinestatic

Definition at line 157 of file ActivationFunctions.hxx.

◆ Tanh()

template<typename AReal >
void TMVA::DNN::TReference< Real_t >::Tanh ( TMatrixT< AReal > &  B)
inlinestatic

Definition at line 109 of file ActivationFunctions.hxx.

◆ TanhDerivative()

template<typename AReal >
void TMVA::DNN::TReference< Real_t >::TanhDerivative ( TMatrixT< AReal > &  B,
const TMatrixT< AReal > &  A 
)
inlinestatic

Definition at line 125 of file ActivationFunctions.hxx.

◆ UpdateParams()

template<typename AReal >
void TMVA::DNN::TReference< Real_t >::UpdateParams ( TMatrixT< AReal > &  x,
TMatrixT< AReal > &  tildeX,
TMatrixT< AReal > &  y,
TMatrixT< AReal > &  z,
TMatrixT< AReal > &  fVBiases,
TMatrixT< AReal > &  fHBiases,
TMatrixT< AReal > &  fWeights,
TMatrixT< AReal > &  VBiasError,
TMatrixT< AReal > &  HBiasError,
AReal  learningRate,
size_t  fBatchSize 
)
static

Definition at line 48 of file DenoisePropagation.hxx.

◆ UpdateParamsLogReg()

template<typename AReal >
void TMVA::DNN::TReference< Real_t >::UpdateParamsLogReg ( TMatrixT< AReal > &  input,
TMatrixT< AReal > &  output,
TMatrixT< AReal > &  difference,
TMatrixT< AReal > &  p,
TMatrixT< AReal > &  fWeights,
TMatrixT< AReal > &  fBiases,
AReal  learningRate,
size_t  fBatchSize 
)
static

Definition at line 191 of file DenoisePropagation.hxx.

Member Data Documentation

◆ fgRandomGen

template<typename Real_t >
TRandom * TMVA::DNN::TReference< Real_t >::fgRandomGen = nullptr
staticprivate

Definition at line 55 of file Reference.h.

  • tmva/tmva/inc/TMVA/DNN/Architectures/Reference.h
  • tmva/tmva/src/DNN/Architectures/Reference/ActivationFunctions.hxx
  • tmva/tmva/src/DNN/Architectures/Reference/Arithmetic.hxx
  • tmva/tmva/src/DNN/Architectures/Reference/DenoisePropagation.hxx
  • tmva/tmva/src/DNN/Architectures/Reference/Initialization.hxx
  • tmva/tmva/src/DNN/Architectures/Reference/LossFunctions.hxx
  • tmva/tmva/src/DNN/Architectures/Reference/OutputFunctions.hxx
  • tmva/tmva/src/DNN/Architectures/Reference/Propagation.hxx
  • tmva/tmva/src/DNN/Architectures/Reference/RecurrentPropagation.hxx
  • tmva/tmva/src/DNN/Architectures/Reference/Regularization.hxx