Logo ROOT   6.10/09
Reference Guide
TMVA::DNN::TNet< Architecture_t, Layer_t > Member List

This is the complete list of members for TMVA::DNN::TNet< Architecture_t, Layer_t >, including all inherited members.

AddLayer(size_t width, EActivationFunction f, Scalar_t dropoutProbability=1.0)TMVA::DNN::TNet< Architecture_t, Layer_t >
AddLayer(SharedLayer &layer)TMVA::DNN::TNet< Architecture_t, Layer_t >
AddLayer(SharedLayer_t &layer)TMVA::DNN::TNet< Architecture_t, Layer_t >inline
Backward(const Matrix_t &X, const Matrix_t &Y)TMVA::DNN::TNet< Architecture_t, Layer_t >inline
Clear()TMVA::DNN::TNet< Architecture_t, Layer_t >
CreateClone(size_t batchSize)TMVA::DNN::TNet< Architecture_t, Layer_t >
fBatchSizeTMVA::DNN::TNet< Architecture_t, Layer_t >private
fDummyTMVA::DNN::TNet< Architecture_t, Layer_t >private
fInputWidthTMVA::DNN::TNet< Architecture_t, Layer_t >private
fJTMVA::DNN::TNet< Architecture_t, Layer_t >private
fLayersTMVA::DNN::TNet< Architecture_t, Layer_t >private
Forward(Matrix_t &X, bool applyDropout=false)TMVA::DNN::TNet< Architecture_t, Layer_t >inline
fRTMVA::DNN::TNet< Architecture_t, Layer_t >private
fWeightDecayTMVA::DNN::TNet< Architecture_t, Layer_t >private
GetBatchSize() constTMVA::DNN::TNet< Architecture_t, Layer_t >inline
GetDepth() constTMVA::DNN::TNet< Architecture_t, Layer_t >inline
GetInputWidth() constTMVA::DNN::TNet< Architecture_t, Layer_t >inline
GetLayer(size_t i)TMVA::DNN::TNet< Architecture_t, Layer_t >inline
GetLayer(size_t i) constTMVA::DNN::TNet< Architecture_t, Layer_t >inline
GetLossFunction() constTMVA::DNN::TNet< Architecture_t, Layer_t >inline
GetNFlops()TMVA::DNN::TNet< Architecture_t, Layer_t >
GetOutput()TMVA::DNN::TNet< Architecture_t, Layer_t >inline
GetOutputWidth() constTMVA::DNN::TNet< Architecture_t, Layer_t >inline
GetRegularization() constTMVA::DNN::TNet< Architecture_t, Layer_t >inline
GetWeightDecay() constTMVA::DNN::TNet< Architecture_t, Layer_t >inline
Initialize(EInitialization m)TMVA::DNN::TNet< Architecture_t, Layer_t >inline
InitializeGradients()TMVA::DNN::TNet< Architecture_t, Layer_t >inline
LayerIterator_t typedefTMVA::DNN::TNet< Architecture_t, Layer_t >
LayersBegin()TMVA::DNN::TNet< Architecture_t, Layer_t >inline
LayersEnd()TMVA::DNN::TNet< Architecture_t, Layer_t >inline
Loss(const Matrix_t &Y, bool includeRegularization=true) constTMVA::DNN::TNet< Architecture_t, Layer_t >inline
Loss(Matrix_t &X, const Matrix_t &Y, bool applyDropout=false)TMVA::DNN::TNet< Architecture_t, Layer_t >inline
Matrix_t typedefTMVA::DNN::TNet< Architecture_t, Layer_t >
Prediction(Matrix_t &Y_hat, Matrix_t &X, EOutputFunction f)TMVA::DNN::TNet< Architecture_t, Layer_t >inline
Prediction(Matrix_t &Y_hat, EOutputFunction f) constTMVA::DNN::TNet< Architecture_t, Layer_t >inline
Print()TMVA::DNN::TNet< Architecture_t, Layer_t >
Scalar_t typedefTMVA::DNN::TNet< Architecture_t, Layer_t >
SetBatchSize(size_t batchSize)TMVA::DNN::TNet< Architecture_t, Layer_t >inline
SetDropoutProbabilities(const std::vector< Double_t > &probabilities)TMVA::DNN::TNet< Architecture_t, Layer_t >
SetInputWidth(size_t inputWidth)TMVA::DNN::TNet< Architecture_t, Layer_t >inline
SetLossFunction(ELossFunction J)TMVA::DNN::TNet< Architecture_t, Layer_t >inline
SetRegularization(ERegularization R)TMVA::DNN::TNet< Architecture_t, Layer_t >inline
SetWeightDecay(Scalar_t weightDecay)TMVA::DNN::TNet< Architecture_t, Layer_t >inline
TNet()TMVA::DNN::TNet< Architecture_t, Layer_t >
TNet(const TNet &other)TMVA::DNN::TNet< Architecture_t, Layer_t >
TNet(size_t batchSize, const TNet< OtherArchitecture_t > &)TMVA::DNN::TNet< Architecture_t, Layer_t >
TNet(size_t batchSize, size_t inputWidth, ELossFunction fJ, ERegularization fR=ERegularization::kNone, Scalar_t fWeightDecay=0.0)TMVA::DNN::TNet< Architecture_t, Layer_t >