31template<
typename AFloat>
 
   34                                             const double , 
const AFloat , 
const AFloat )
 
 
   40template<
typename AFloat>
 
   45                                                const AFloat , 
const AFloat )
 
   50   TMVA::DNN::evaluateDerivative<TCpu<AFloat>>(
dX, 
activFunct, 
X);
 
 
   55template<
typename AFloat>
 
   59   auto f = [](AFloat) {
return 1.0;};
 
 
   64template<
typename AFloat>
 
   67   auto f = [](AFloat 
x) {
return (
x < 0.0) ? 0.0 : 
x;};
 
 
   72template<
typename AFloat>
 
   76   auto f = [](AFloat 
x) {
return (
x < 0.0) ? 0.0 : 1.0;};
 
 
   81template<
typename AFloat>
 
   84   auto f = [](AFloat 
x) {
return 1.0 / (1.0 + exp(-
x));};
 
   89template<
typename AFloat>
 
   93   auto f = [](AFloat 
x) {
 
   94      AFloat sig = 1.0 / (1.0 + exp(-
x));
 
   95      return sig * (1.0 - sig);
 
 
  101template<
typename AFloat>
 
  104   auto f = [](AFloat 
x) {
return tanh(
x);};
 
 
  109template<
typename AFloat>
 
  113   auto f = [](AFloat 
x) {
 
 
  125   auto f = [](
float x) { 
return vdt::fast_tanhf(
x); };
 
  131   auto f = [](
double x) { 
return vdt::fast_tanh(
x); };
 
  139   auto f = [](
float x) {
 
  140      double t = vdt::fast_tanhf(
x);
 
  148   auto f = [](
double x) {
 
  149      double t = vdt::fast_tanh(
x);
 
  157template <
typename AFloat>
 
  164template <
typename AFloat>
 
  172template<
typename AFloat>
 
  175   auto f = [](AFloat 
x) {
return fabs(
x);};
 
 
  180template<
typename AFloat>
 
  184   auto f = [](AFloat 
x) {
 
  185      return (
x < 0.0) ? -1.0 : 1.0;
 
 
  191template<
typename AFloat>
 
  194   auto f = [](AFloat 
x) {
return x / (1 + fabs(
x));};
 
 
  199template<
typename AFloat>
 
  203   auto f = [](AFloat 
x) {
 
 
  212template<
typename AFloat>
 
  215   auto f = [](AFloat 
x) {
return exp(- 
x * 
x);};
 
 
  220template<
typename AFloat>
 
  224   auto f = [](AFloat 
x) {
return - 2.0 * 
x * exp(- 
x * 
x);};
 
 
ROOT::Detail::TRangeCast< T, true > TRangeDynCast
TRangeDynCast is an adapter class that allows the typed iteration through a TCollection.
void Map(Function_t &f)
Map the given function over the matrix elements.
static void FastTanh(Tensor_t &B)
static void Gauss(Tensor_t &B)
static void Sigmoid(Tensor_t &B)
static void SoftSign(Tensor_t &B)
static void SymmetricReluDerivative(Tensor_t &B, const Tensor_t &A)
static void SymmetricRelu(Tensor_t &B)
static void TanhDerivative(Tensor_t &B, const Tensor_t &A)
static void Tanh(Tensor_t &B)
static void ActivationFunctionForward(Tensor_t &X, EActivationFunction activFunct, const ActivationDescriptor_t activationDescr, const double coef=0.0, const Scalar_t alpha=1, const Scalar_t beta=0)
static void SoftSignDerivative(Tensor_t &B, const Tensor_t &A)
static void IdentityDerivative(Tensor_t &B, const Tensor_t &A)
static void Relu(Tensor_t &B)
static void ActivationFunctionBackward(Tensor_t &dX, const Tensor_t &Y, const Tensor_t &dY, const Tensor_t &X, EActivationFunction activFunct, const ActivationDescriptor_t activationDescr, const Scalar_t alpha=1, const Scalar_t beta=0)
Computes the gradient of the activation function.
static void GaussDerivative(Tensor_t &B, const Tensor_t &A)
static void SigmoidDerivative(Tensor_t &B, const Tensor_t &A)
static void FastTanhDerivative(Tensor_t &B, const Tensor_t &A)
static void ReluDerivative(Tensor_t &B, const Tensor_t &A)
EActivationFunction
Enum that represents layer activation functions.
create variable transformations