|
enum class | TMVA::DNN::EActivationFunction {
TMVA::DNN::kIdentity = 0
, TMVA::DNN::kRelu = 1
, TMVA::DNN::kSigmoid = 2
, TMVA::DNN::kTanh = 3
,
TMVA::DNN::kSymmRelu = 4
, TMVA::DNN::kSoftSign = 5
, TMVA::DNN::kGauss = 6
} |
| Enum that represents layer activation functions. More...
|
|
enum class | TMVA::DNN::EInitialization {
TMVA::DNN::kGauss = 'G'
, TMVA::DNN::kUniform = 'U'
, TMVA::DNN::kIdentity = 'I'
, TMVA::DNN::kZero = 'Z'
,
TMVA::DNN::kGlorotNormal = 'X'
, TMVA::DNN::kGlorotUniform = 'F'
} |
|
enum class | TMVA::DNN::ELossFunction { TMVA::DNN::kCrossEntropy = 'C'
, TMVA::DNN::kMeanSquaredError = 'R'
, TMVA::DNN::kSoftmaxCrossEntropy = 'S'
} |
| Enum that represents objective functions for the net, i.e. More...
|
|
enum class | TMVA::DNN::EOptimizer {
TMVA::DNN::kSGD = 0
, TMVA::DNN::kAdam = 1
, TMVA::DNN::kAdagrad = 2
, TMVA::DNN::kRMSProp = 3
,
TMVA::DNN::kAdadelta = 4
} |
| Enum representing the optimizer used for training. More...
|
|
enum class | TMVA::DNN::EOutputFunction { TMVA::DNN::kIdentity = 'I'
, TMVA::DNN::kSigmoid = 'S'
, TMVA::DNN::kSoftmax = 'M'
} |
| Enum that represents output functions. More...
|
|
enum class | TMVA::DNN::ERegularization { TMVA::DNN::kNone = '0'
, TMVA::DNN::kL1 = '1'
, TMVA::DNN::kL2 = '2'
} |
| Enum representing the regularization type applied for a given layer. More...
|
|
|
template<typename Architecture_t > |
void | TMVA::DNN::addRegularizationGradients (typename Architecture_t::Matrix_t &A, const typename Architecture_t::Matrix_t &W, typename Architecture_t::Scalar_t weightDecay, ERegularization R) |
| Add the regularization gradient corresponding to weight matrix W, to the matrix A. More...
|
|
template<typename Architecture_t > |
auto | TMVA::DNN::evaluate (ELossFunction f, const typename Architecture_t::Matrix_t &Y, const typename Architecture_t::Matrix_t &output, const typename Architecture_t::Matrix_t &weights) -> decltype(Architecture_t::CrossEntropy(Y, output, weights)) |
| Compute the value of the objective function f for given activations of the ouput layer and the truth Y. More...
|
|
template<typename Architecture_t > |
void | TMVA::DNN::evaluate (typename Architecture_t::Matrix_t &A, EOutputFunction f, const typename Architecture_t::Matrix_t &X) |
| Apply the given output function to each value in the given tensor A. More...
|
|
template<typename Architecture_t > |
void | TMVA::DNN::evaluate (typename Architecture_t::Tensor_t &A, EActivationFunction f) |
| Apply the given activation function to each value in the given tensor A. More...
|
|
template<typename Architecture_t > |
void | TMVA::DNN::evaluateDerivative (typename Architecture_t::Tensor_t &B, EActivationFunction f, const typename Architecture_t::Tensor_t &A) |
| Compute the first partial derivative of the activation function for the values given in tensor A and write the results into B. More...
|
|
template<typename Architecture_t > |
void | TMVA::DNN::evaluateGradients (typename Architecture_t::Matrix_t &dY, ELossFunction f, const typename Architecture_t::Matrix_t &Y, const typename Architecture_t::Matrix_t &output, const typename Architecture_t::Matrix_t &weights) |
| Compute the gradient of the given output function f for given activations output of the output layer and truth Y and write the results into dY. More...
|
|
template<typename Architecture_t > |
void | TMVA::DNN::initialize (typename Architecture_t::Matrix_t &A, EInitialization m) |
|
template<typename Architecture_t > |
auto | TMVA::DNN::regularization (const typename Architecture_t::Matrix_t &A, ERegularization R) -> decltype(Architecture_t::L1Regularization(A)) |
| Evaluate the regularization functional for a given weight matrix. More...
|
|