template<
typename Architecture_t,
typename Layer_t = VGeneralLayer<Architecture_t>>
class TMVA::DNN::TDeepNet< Architecture_t, Layer_t >
Generic Deep Neural Network class.
This class encapsulates the information for all types of Deep Neural Networks.
- Template Parameters
-
| Architecture | The Architecture type that holds the architecture-specific data types. |
Definition at line 73 of file DeepNet.h.
|
| | TDeepNet () |
| | Default Constructor.
|
| |
| | TDeepNet (const TDeepNet &) |
| | Copy-constructor.
|
| |
| | TDeepNet (size_t BatchSize, size_t InputDepth, size_t InputHeight, size_t InputWidth, size_t BatchDepth, size_t BatchHeight, size_t BatchWidth, ELossFunction fJ, EInitialization fI=EInitialization::kZero, ERegularization fR=ERegularization::kNone, Scalar_t fWeightDecay=0.0, bool isTraining=false) |
| | Constructor.
|
| |
| | ~TDeepNet () |
| | Destructor.
|
| |
| TBasicGRULayer< Architecture_t > * | AddBasicGRULayer (size_t stateSize, size_t inputSize, size_t timeSteps, bool rememberState=false, bool returnSequence=false, bool resetGateAfter=false) |
| | Function for adding GRU Layer in the Deep Neural Network, with given parameters.
|
| |
| void | AddBasicGRULayer (TBasicGRULayer< Architecture_t > *basicGRULayer) |
| | Function for adding GRU Layer in the Deep Neural Network, when the layer is already created.
|
| |
| TBasicLSTMLayer< Architecture_t > * | AddBasicLSTMLayer (size_t stateSize, size_t inputSize, size_t timeSteps, bool rememberState=false, bool returnSequence=false) |
| | Function for adding LSTM Layer in the Deep Neural Network, with given parameters.
|
| |
| void | AddBasicLSTMLayer (TBasicLSTMLayer< Architecture_t > *basicLSTMLayer) |
| | Function for adding LSTM Layer in the Deep Neural Network, when the layer is already created.
|
| |
| TBasicRNNLayer< Architecture_t > * | AddBasicRNNLayer (size_t stateSize, size_t inputSize, size_t timeSteps, bool rememberState=false, bool returnSequence=false, EActivationFunction f=EActivationFunction::kTanh) |
| | Function for adding Recurrent Layer in the Deep Neural Network, with given parameters.
|
| |
| void | AddBasicRNNLayer (TBasicRNNLayer< Architecture_t > *basicRNNLayer) |
| | Function for adding Vanilla RNN when the layer is already created.
|
| |
| TBatchNormLayer< Architecture_t > * | AddBatchNormLayer (Scalar_t momentum=-1, Scalar_t epsilon=0.0001) |
| | Function for adding a Batch Normalization layer with given parameters.
|
| |
| TConvLayer< Architecture_t > * | AddConvLayer (size_t depth, size_t filterHeight, size_t filterWidth, size_t strideRows, size_t strideCols, size_t paddingHeight, size_t paddingWidth, EActivationFunction f, Scalar_t dropoutProbability=1.0) |
| | Function for adding Convolution layer in the Deep Neural Network, with a given depth, filter height and width, striding in rows and columns, the zero paddings, as well as the activation function and the dropout probability.
|
| |
| void | AddConvLayer (TConvLayer< Architecture_t > *convLayer) |
| | Function for adding Convolution Layer in the Deep Neural Network, when the layer is already created.
|
| |
| TDenseLayer< Architecture_t > * | AddDenseLayer (size_t width, EActivationFunction f, Scalar_t dropoutProbability=1.0) |
| | Function for adding Dense Connected Layer in the Deep Neural Network, with a given width, activation function and dropout probability.
|
| |
| void | AddDenseLayer (TDenseLayer< Architecture_t > *denseLayer) |
| | Function for adding Dense Layer in the Deep Neural Network, when the layer is already created.
|
| |
| void | AddMaxPoolLayer (CNN::TMaxPoolLayer< Architecture_t > *maxPoolLayer) |
| | Function for adding Max Pooling layer in the Deep Neural Network, when the layer is already created.
|
| |
| TMaxPoolLayer< Architecture_t > * | AddMaxPoolLayer (size_t frameHeight, size_t frameWidth, size_t strideRows, size_t strideCols, Scalar_t dropoutProbability=1.0) |
| | Function for adding Pooling layer in the Deep Neural Network, with a given filter height and width, striding in rows and columns as well as the dropout probability.
|
| |
| TReshapeLayer< Architecture_t > * | AddReshapeLayer (size_t depth, size_t height, size_t width, bool flattening) |
| | Function for adding Reshape Layer in the Deep Neural Network, with a given height and width.
|
| |
| void | AddReshapeLayer (TReshapeLayer< Architecture_t > *reshapeLayer) |
| | Function for adding Reshape Layer in the Deep Neural Network, when the layer is already created.
|
| |
| void | Backward (const Tensor_t &input, const Matrix_t &groundTruth, const Matrix_t &weights) |
| | Function that executes the entire backward pass in the network.
|
| |
| void | Clear () |
| | Remove all layers from the network.
|
| |
| void | Forward (Tensor_t &input, bool applyDropout=false) |
| | Function that executes the entire forward pass in the network.
|
| |
| size_t | GetBatchDepth () const |
| |
| size_t | GetBatchHeight () const |
| |
| size_t | GetBatchSize () const |
| | Getters.
|
| |
| size_t | GetBatchWidth () const |
| |
| size_t | GetDepth () const |
| |
| EInitialization | GetInitialization () const |
| |
| size_t | GetInputDepth () const |
| |
| size_t | GetInputHeight () const |
| |
| size_t | GetInputWidth () const |
| |
| Layer_t * | GetLayerAt (size_t i) |
| | Get the layer in the vector of layers at position i.
|
| |
| const Layer_t * | GetLayerAt (size_t i) const |
| |
| std::vector< Layer_t * > & | GetLayers () |
| |
| const std::vector< Layer_t * > & | GetLayers () const |
| |
| ELossFunction | GetLossFunction () const |
| |
| size_t | GetOutputWidth () const |
| |
| ERegularization | GetRegularization () const |
| |
| Scalar_t | GetWeightDecay () const |
| |
| void | Initialize () |
| | DAE functions.
|
| |
| bool | IsTraining () const |
| |
| Scalar_t | Loss (const Matrix_t &groundTruth, const Matrix_t &weights, bool includeRegularization=true) const |
| | Function for evaluating the loss, based on the activations stored in the last layer.
|
| |
| Scalar_t | Loss (Tensor_t &input, const Matrix_t &groundTruth, const Matrix_t &weights, bool inTraining=false, bool includeRegularization=true) |
| | Function for evaluating the loss, based on the propagation of the given input.
|
| |
| void | Prediction (Matrix_t &predictions, EOutputFunction f) const |
| | Prediction based on activations stored in the last layer.
|
| |
| void | Prediction (Matrix_t &predictions, Tensor_t &input, EOutputFunction f) |
| | Prediction for the given inputs, based on what network learned.
|
| |
| void | Print () const |
| | Print the Deep Net Info.
|
| |
| Scalar_t | RegularizationTerm () const |
| | Function for computing the regularizaton term to be added to the loss function
|
| |
| void | ResetTraining () |
| | Function that reset some training flags after looping all the events but not the weights.
|
| |
| void | SetBatchDepth (size_t batchDepth) |
| |
| void | SetBatchHeight (size_t batchHeight) |
| |
| void | SetBatchSize (size_t batchSize) |
| | Setters.
|
| |
| void | SetBatchWidth (size_t batchWidth) |
| |
| void | SetDropoutProbabilities (const std::vector< Double_t > &probabilities) |
| |
| void | SetInitialization (EInitialization I) |
| |
| void | SetInputDepth (size_t inputDepth) |
| |
| void | SetInputHeight (size_t inputHeight) |
| |
| void | SetInputWidth (size_t inputWidth) |
| |
| void | SetLossFunction (ELossFunction J) |
| |
| void | SetRegularization (ERegularization R) |
| |
| void | SetWeightDecay (Scalar_t weightDecay) |
| |
| void | Update (Scalar_t learningRate) |
| | Function that will update the weights and biases in the layers that contain weights and biases.
|
| |