22#define TANH_IMPL_X vdt::fast_tanhf(x)
24#define TANH_IMPL_X tanh(x)
34template<
typename AFloat>
37 const double ,
const AFloat ,
const AFloat )
40 TMVA::DNN::evaluate<TCpu<AFloat>>( X, activFunct);
43template<
typename AFloat>
48 const AFloat ,
const AFloat )
53 TMVA::DNN::evaluateDerivative<TCpu<AFloat>>(dX, activFunct, X);
58template<
typename AFloat>
62 auto f = [](AFloat) {
return 1.0;};
67template<
typename AFloat>
70 auto f = [](AFloat
x) {
return (
x < 0.0) ? 0.0 :
x;};
75template<
typename AFloat>
79 auto f = [](AFloat
x) {
return (
x < 0.0) ? 0.0 : 1.0;};
84template<
typename AFloat>
87 auto f = [](AFloat
x) {
return 1.0 / (1.0 +
exp(-
x));};
92template<
typename AFloat>
96 auto f = [](AFloat
x) {
97 AFloat sig = 1.0 / (1.0 +
exp(-
x));
98 return sig * (1.0 - sig);
104template<
typename AFloat>
112template<
typename AFloat>
116 auto f = [](AFloat
x) {
124template<
typename AFloat>
127 auto f = [](AFloat
x) {
return fabs(
x);};
132template<
typename AFloat>
136 auto f = [](AFloat
x) {
137 return (
x < 0.0) ? -1.0 : 1.0;
143template<
typename AFloat>
146 auto f = [](AFloat
x) {
return x / (1 +
fabs(
x));};
151template<
typename AFloat>
155 auto f = [](AFloat
x) {
164template<
typename AFloat>
167 auto f = [](AFloat
x) {
return exp(-
x *
x);};
172template<
typename AFloat>
176 auto f = [](AFloat
x) {
return - 2.0 *
x *
exp(-
x *
x);};
static void Gauss(Tensor_t &B)
static void Sigmoid(Tensor_t &B)
static void SoftSign(Tensor_t &B)
static void SymmetricReluDerivative(Tensor_t &B, const Tensor_t &A)
static void SymmetricRelu(Tensor_t &B)
static void TanhDerivative(Tensor_t &B, const Tensor_t &A)
static void Tanh(Tensor_t &B)
static void ActivationFunctionForward(Tensor_t &X, EActivationFunction activFunct, const ActivationDescriptor_t activationDescr, const double coef=0.0, const Scalar_t alpha=1, const Scalar_t beta=0)
static void SoftSignDerivative(Tensor_t &B, const Tensor_t &A)
static void IdentityDerivative(Tensor_t &B, const Tensor_t &A)
static void Relu(Tensor_t &B)
static void ActivationFunctionBackward(Tensor_t &dX, const Tensor_t &Y, const Tensor_t &dY, const Tensor_t &X, EActivationFunction activFunct, const ActivationDescriptor_t activationDescr, const Scalar_t alpha=1, const Scalar_t beta=0)
Computes the gradient of the activation function.
static void GaussDerivative(Tensor_t &B, const Tensor_t &A)
static void SigmoidDerivative(Tensor_t &B, const Tensor_t &A)
static void ReluDerivative(Tensor_t &B, const Tensor_t &A)
VecExpr< UnaryOp< Fabs< T >, VecExpr< A, T, D >, T >, T, D > fabs(const VecExpr< A, T, D > &rhs)
EActivationFunction
Enum that represents layer activation functions.
create variable transformations