18 #ifndef TMVA_DNN_FUNCTIONS 19 #define TMVA_DNN_FUNCTIONS 86 template<
typename Architecture_t>
87 inline void evaluate(
typename Architecture_t::Matrix_t &
A,
112 template<
typename Architecture_t>
115 const typename Architecture_t::Matrix_t &
A)
143 template<
typename Architecture_t>
144 inline void evaluate(
typename Architecture_t::Matrix_t &
A,
146 const typename Architecture_t::Matrix_t &X)
166 template <
typename Architecture_t>
168 const typename Architecture_t::Matrix_t &
output,
const typename Architecture_t::Matrix_t &weights)
169 -> decltype(Architecture_t::CrossEntropy(Y,
output, weights))
183 template <
typename Architecture_t>
185 const typename Architecture_t::Matrix_t &Y,
186 const typename Architecture_t::Matrix_t &
output,
187 const typename Architecture_t::Matrix_t &weights)
194 Architecture_t::SoftmaxCrossEntropyGradients(dY, Y, output, weights);
206 template<
typename Architecture_t>
209 -> decltype(Architecture_t::L1Regularization(
A))
216 return Architecture_t::L1Regularization(
A);
218 return Architecture_t::L2Regularization(
A);
226 template<
typename Architecture_t>
228 const typename Architecture_t::Matrix_t &W,
237 Architecture_t::AddL1RegularizationGradients(A, W, weightDecay);
240 Architecture_t::AddL2RegularizationGradients(A, W, weightDecay);
250 template<
typename Architecture_t>
void evaluateDerivative(typename Architecture_t::Matrix_t &B, EActivationFunction f, const typename Architecture_t::Matrix_t &A)
Compute the first partial derivative of the activation function for the values given in matrix A and ...
#define R(a, b, c, d, e, f, g, h, i)
void evaluate(typename Architecture_t::Matrix_t &A, EActivationFunction f)
Apply the given activation function to each value in the given matrix A.
std::shared_ptr< std::function< double(double)> > SoftSign
double weightDecay(double error, ItWeight itWeight, ItWeight itWeightEnd, double factorWeightDecay, EnumRegularization eRegularization)
compute the weight decay for regularization (L1 or L2)
void evaluateGradients(typename Architecture_t::Matrix_t &dY, ELossFunction f, const typename Architecture_t::Matrix_t &Y, const typename Architecture_t::Matrix_t &output, const typename Architecture_t::Matrix_t &weights)
Compute the gradient of the given output function f for given activations output of the output layer ...
auto regularization(const typename Architecture_t::Matrix_t &A, ERegularization R) -> decltype(Architecture_t::L1Regularization(A))
Evaluate the regularization functional for a given weight matrix.
std::shared_ptr< std::function< double(double)> > Tanh
void Copy(void *source, void *dest)
void addRegularizationGradients(typename Architecture_t::Matrix_t &A, const typename Architecture_t::Matrix_t &W, typename Architecture_t::Scalar_t weightDecay, ERegularization R)
Add the regularization gradient corresponding to weight matrix W, to the matrix A.
EOutputFunction
Enum that represents output functions.
ELossFunction
Enum that represents objective functions for the net, i.e.
Abstract ClassifierFactory template that handles arbitrary types.
std::shared_ptr< std::function< double(double)> > Sigmoid
ERegularization
Enum representing the regularization type applied for a given layer.
EActivationFunction
Enum that represents layer activation functions.
std::shared_ptr< std::function< double(double)> > Gauss
void initialize(typename Architecture_t::Matrix_t &A, EInitialization m)