Logo ROOT  
Reference Guide
 
Loading...
Searching...
No Matches
TMVA::DNN::TBatchNormLayer< Architecture_t > Member List

This is the complete list of members for TMVA::DNN::TBatchNormLayer< Architecture_t >, including all inherited members.

AddWeightsXMLTo(void *parent)TMVA::DNN::TBatchNormLayer< Architecture_t >virtual
Backward(Tensor_t &gradients_backward, const Tensor_t &activations_backward)TMVA::DNN::TBatchNormLayer< Architecture_t >virtual
BNormDescriptors_t typedefTMVA::DNN::TBatchNormLayer< Architecture_t >
CalculateNormDim(int axis, size_t c, size_t h, size_t w)TMVA::DNN::TBatchNormLayer< Architecture_t >inlineprotectedstatic
CopyBiases(const std::vector< Matrix_t > &otherBiases)TMVA::DNN::VGeneralLayer< Architecture_t >
CopyParameters(const VGeneralLayer< Arch > &layer)TMVA::DNN::VGeneralLayer< Architecture_t >
CopyWeights(const std::vector< Matrix_t > &otherWeights)TMVA::DNN::VGeneralLayer< Architecture_t >
fActivationGradientsTMVA::DNN::VGeneralLayer< Architecture_t >protected
fBatchSizeTMVA::DNN::VGeneralLayer< Architecture_t >protected
fBiasesTMVA::DNN::VGeneralLayer< Architecture_t >protected
fBiasGradientsTMVA::DNN::VGeneralLayer< Architecture_t >protected
fDepthTMVA::DNN::VGeneralLayer< Architecture_t >protected
fDerivativesTMVA::DNN::TBatchNormLayer< Architecture_t >private
fDescriptorsTMVA::DNN::TBatchNormLayer< Architecture_t >private
fEpsilonTMVA::DNN::TBatchNormLayer< Architecture_t >private
fHeightTMVA::DNN::VGeneralLayer< Architecture_t >protected
fInitTMVA::DNN::VGeneralLayer< Architecture_t >protected
fInputDepthTMVA::DNN::VGeneralLayer< Architecture_t >protected
fInputHeightTMVA::DNN::VGeneralLayer< Architecture_t >protected
fInputWidthTMVA::DNN::VGeneralLayer< Architecture_t >protected
fIsTrainingTMVA::DNN::VGeneralLayer< Architecture_t >protected
fIVarTMVA::DNN::TBatchNormLayer< Architecture_t >private
fMomentumTMVA::DNN::TBatchNormLayer< Architecture_t >private
fMuTMVA::DNN::TBatchNormLayer< Architecture_t >private
fMu_TrainingTMVA::DNN::TBatchNormLayer< Architecture_t >private
fNormAxisTMVA::DNN::TBatchNormLayer< Architecture_t >private
Forward(Tensor_t &input, bool inTraining=true)TMVA::DNN::TBatchNormLayer< Architecture_t >virtual
fOutputTMVA::DNN::VGeneralLayer< Architecture_t >protected
fReshapedDataTMVA::DNN::TBatchNormLayer< Architecture_t >private
fTrainedBatchesTMVA::DNN::TBatchNormLayer< Architecture_t >private
fVarTMVA::DNN::TBatchNormLayer< Architecture_t >private
fVar_TrainingTMVA::DNN::TBatchNormLayer< Architecture_t >private
fWeightGradientsTMVA::DNN::VGeneralLayer< Architecture_t >protected
fWeightsTMVA::DNN::VGeneralLayer< Architecture_t >protected
fWidthTMVA::DNN::VGeneralLayer< Architecture_t >protected
GetActivationGradients() constTMVA::DNN::VGeneralLayer< Architecture_t >inline
GetActivationGradients()TMVA::DNN::VGeneralLayer< Architecture_t >inline
GetActivationGradientsAt(size_t i)TMVA::DNN::VGeneralLayer< Architecture_t >inline
GetActivationGradientsAt(size_t i) constTMVA::DNN::VGeneralLayer< Architecture_t >inline
GetBatchMean() constTMVA::DNN::TBatchNormLayer< Architecture_t >inline
GetBatchMean()TMVA::DNN::TBatchNormLayer< Architecture_t >inline
GetBatchSize() constTMVA::DNN::VGeneralLayer< Architecture_t >inline
GetBiases() constTMVA::DNN::VGeneralLayer< Architecture_t >inline
GetBiases()TMVA::DNN::VGeneralLayer< Architecture_t >inline
GetBiasesAt(size_t i) constTMVA::DNN::VGeneralLayer< Architecture_t >inline
GetBiasesAt(size_t i)TMVA::DNN::VGeneralLayer< Architecture_t >inline
GetBiasGradients() constTMVA::DNN::VGeneralLayer< Architecture_t >inline
GetBiasGradients()TMVA::DNN::VGeneralLayer< Architecture_t >inline
GetBiasGradientsAt(size_t i) constTMVA::DNN::VGeneralLayer< Architecture_t >inline
GetBiasGradientsAt(size_t i)TMVA::DNN::VGeneralLayer< Architecture_t >inline
GetDepth() constTMVA::DNN::VGeneralLayer< Architecture_t >inline
GetEpsilon() constTMVA::DNN::TBatchNormLayer< Architecture_t >inline
GetExtraLayerParameters() constTMVA::DNN::TBatchNormLayer< Architecture_t >inlinevirtual
GetHeight() constTMVA::DNN::VGeneralLayer< Architecture_t >inline
GetInitialization() constTMVA::DNN::VGeneralLayer< Architecture_t >inline
GetInputDepth() constTMVA::DNN::VGeneralLayer< Architecture_t >inline
GetInputHeight() constTMVA::DNN::VGeneralLayer< Architecture_t >inline
GetInputWidth() constTMVA::DNN::VGeneralLayer< Architecture_t >inline
GetIVariance() constTMVA::DNN::TBatchNormLayer< Architecture_t >inline
GetIVariance()TMVA::DNN::TBatchNormLayer< Architecture_t >inline
GetMomentum() constTMVA::DNN::TBatchNormLayer< Architecture_t >inline
GetMuVector() constTMVA::DNN::TBatchNormLayer< Architecture_t >inline
GetMuVector()TMVA::DNN::TBatchNormLayer< Architecture_t >inline
GetNormAxis() constTMVA::DNN::TBatchNormLayer< Architecture_t >inline
GetNTrainedBatches() constTMVA::DNN::TBatchNormLayer< Architecture_t >inline
GetNTrainedBatches()TMVA::DNN::TBatchNormLayer< Architecture_t >inline
GetOutput() constTMVA::DNN::VGeneralLayer< Architecture_t >inline
GetOutput()TMVA::DNN::VGeneralLayer< Architecture_t >inline
GetOutputAt(size_t i)TMVA::DNN::VGeneralLayer< Architecture_t >inline
GetOutputAt(size_t i) constTMVA::DNN::VGeneralLayer< Architecture_t >inline
GetReshapedData() constTMVA::DNN::TBatchNormLayer< Architecture_t >inline
GetReshapedData()TMVA::DNN::TBatchNormLayer< Architecture_t >inline
GetVariance() constTMVA::DNN::TBatchNormLayer< Architecture_t >inline
GetVariance()TMVA::DNN::TBatchNormLayer< Architecture_t >inline
GetVarVector() constTMVA::DNN::TBatchNormLayer< Architecture_t >inline
GetVarVector()TMVA::DNN::TBatchNormLayer< Architecture_t >inline
GetWeightGradients() constTMVA::DNN::VGeneralLayer< Architecture_t >inline
GetWeightGradients()TMVA::DNN::VGeneralLayer< Architecture_t >inline
GetWeightGradientsAt(size_t i) constTMVA::DNN::VGeneralLayer< Architecture_t >inline
GetWeightGradientsAt(size_t i)TMVA::DNN::VGeneralLayer< Architecture_t >inline
GetWeights() constTMVA::DNN::VGeneralLayer< Architecture_t >inline
GetWeights()TMVA::DNN::VGeneralLayer< Architecture_t >inline
GetWeightsAt(size_t i) constTMVA::DNN::VGeneralLayer< Architecture_t >inline
GetWeightsAt(size_t i)TMVA::DNN::VGeneralLayer< Architecture_t >inline
GetWidth() constTMVA::DNN::VGeneralLayer< Architecture_t >inline
HelperDescriptor_t typedefTMVA::DNN::TBatchNormLayer< Architecture_t >
Initialize()TMVA::DNN::TBatchNormLayer< Architecture_t >virtual
IsTraining() constTMVA::DNN::VGeneralLayer< Architecture_t >inline
Matrix_t typedefTMVA::DNN::TBatchNormLayer< Architecture_t >
Print() constTMVA::DNN::TBatchNormLayer< Architecture_t >virtual
ReadMatrixXML(void *node, const char *name, Matrix_t &matrix)TMVA::DNN::VGeneralLayer< Architecture_t >
ReadWeightsFromXML(void *parent)TMVA::DNN::TBatchNormLayer< Architecture_t >virtual
ResetTraining()TMVA::DNN::TBatchNormLayer< Architecture_t >inlinevirtual
Scalar_t typedefTMVA::DNN::TBatchNormLayer< Architecture_t >
SetBatchSize(size_t batchSize)TMVA::DNN::VGeneralLayer< Architecture_t >inline
SetDepth(size_t depth)TMVA::DNN::VGeneralLayer< Architecture_t >inline
SetDropoutProbability(Scalar_t)TMVA::DNN::VGeneralLayer< Architecture_t >inlinevirtual
SetExtraLayerParameters(const std::vector< Matrix_t > &params)TMVA::DNN::TBatchNormLayer< Architecture_t >inlinevirtual
SetHeight(size_t height)TMVA::DNN::VGeneralLayer< Architecture_t >inline
SetInputDepth(size_t inputDepth)TMVA::DNN::VGeneralLayer< Architecture_t >inline
SetInputHeight(size_t inputHeight)TMVA::DNN::VGeneralLayer< Architecture_t >inline
SetInputWidth(size_t inputWidth)TMVA::DNN::VGeneralLayer< Architecture_t >inline
SetIsTraining(bool isTraining)TMVA::DNN::VGeneralLayer< Architecture_t >inline
SetWidth(size_t width)TMVA::DNN::VGeneralLayer< Architecture_t >inline
TBatchNormLayer(size_t batchSize, size_t inputDepth, size_t inputHeight, size_t inputWidth, const std::vector< size_t > &shape, int axis=-1, Scalar_t momentum=-1., Scalar_t epsilon=0.0001)TMVA::DNN::TBatchNormLayer< Architecture_t >
TBatchNormLayer(TBatchNormLayer< Architecture_t > *layer)TMVA::DNN::TBatchNormLayer< Architecture_t >
TBatchNormLayer(const TBatchNormLayer &)TMVA::DNN::TBatchNormLayer< Architecture_t >
Tensor_t typedefTMVA::DNN::TBatchNormLayer< Architecture_t >
Update(const Scalar_t learningRate)TMVA::DNN::VGeneralLayer< Architecture_t >
UpdateBiases(const std::vector< Matrix_t > &biasGradients, const Scalar_t learningRate)TMVA::DNN::VGeneralLayer< Architecture_t >
UpdateBiasGradients(const std::vector< Matrix_t > &biasGradients, const Scalar_t learningRate)TMVA::DNN::VGeneralLayer< Architecture_t >
UpdateWeightGradients(const std::vector< Matrix_t > &weightGradients, const Scalar_t learningRate)TMVA::DNN::VGeneralLayer< Architecture_t >
UpdateWeights(const std::vector< Matrix_t > &weightGradients, const Scalar_t learningRate)TMVA::DNN::VGeneralLayer< Architecture_t >
VGeneralLayer(size_t BatchSize, size_t InputDepth, size_t InputHeight, size_t InputWidth, size_t Depth, size_t Height, size_t Width, size_t WeightsNSlices, size_t WeightsNRows, size_t WeightsNCols, size_t BiasesNSlices, size_t BiasesNRows, size_t BiasesNCols, size_t OutputNSlices, size_t OutputNRows, size_t OutputNCols, EInitialization Init)TMVA::DNN::VGeneralLayer< Architecture_t >
VGeneralLayer(size_t BatchSize, size_t InputDepth, size_t InputHeight, size_t InputWidth, size_t Depth, size_t Height, size_t Width, size_t WeightsNSlices, std::vector< size_t > WeightsNRows, std::vector< size_t > WeightsNCols, size_t BiasesNSlices, std::vector< size_t > BiasesNRows, std::vector< size_t > BiasesNCols, size_t OutputNSlices, size_t OutputNRows, size_t OutputNCols, EInitialization Init)TMVA::DNN::VGeneralLayer< Architecture_t >
VGeneralLayer(VGeneralLayer< Architecture_t > *layer)TMVA::DNN::VGeneralLayer< Architecture_t >
VGeneralLayer(const VGeneralLayer &)TMVA::DNN::VGeneralLayer< Architecture_t >
WriteMatrixToXML(void *node, const char *name, const Matrix_t &matrix)TMVA::DNN::VGeneralLayer< Architecture_t >
WriteTensorToXML(void *node, const char *name, const std::vector< Matrix_t > &tensor)TMVA::DNN::VGeneralLayer< Architecture_t >
~TBatchNormLayer()TMVA::DNN::TBatchNormLayer< Architecture_t >
~VGeneralLayer()TMVA::DNN::VGeneralLayer< Architecture_t >virtual