31template<
typename AFloat>
34 const double ,
const AFloat ,
const AFloat )
40template<
typename AFloat>
45 const AFloat ,
const AFloat )
55template<
typename AFloat>
59 auto f = [](AFloat) {
return 1.0;};
64template<
typename AFloat>
67 auto f = [](AFloat
x) {
return (
x < 0.0) ? 0.0 :
x;};
72template<
typename AFloat>
76 auto f = [](AFloat
x) {
return (
x < 0.0) ? 0.0 : 1.0;};
81template<
typename AFloat>
84 auto f = [](AFloat
x) {
return 1.0 / (1.0 + exp(-
x));};
89template<
typename AFloat>
93 auto f = [](AFloat
x) {
94 AFloat sig = 1.0 / (1.0 + exp(-
x));
95 return sig * (1.0 - sig);
101template<
typename AFloat>
104 auto f = [](AFloat
x) {
return tanh(
x);};
109template<
typename AFloat>
113 auto f = [](AFloat
x) {
125 auto f = [](
float x) {
return vdt::fast_tanhf(
x); };
131 auto f = [](
double x) {
return vdt::fast_tanh(
x); };
139 auto f = [](
float x) {
140 double t = vdt::fast_tanhf(
x);
148 auto f = [](
double x) {
149 double t = vdt::fast_tanh(
x);
157template <
typename AFloat>
164template <
typename AFloat>
172template<
typename AFloat>
175 auto f = [](AFloat
x) {
return fabs(
x);};
180template<
typename AFloat>
184 auto f = [](AFloat
x) {
185 return (
x < 0.0) ? -1.0 : 1.0;
191template<
typename AFloat>
194 auto f = [](AFloat
x) {
return x / (1 + fabs(
x));};
199template<
typename AFloat>
203 auto f = [](AFloat
x) {
212template<
typename AFloat>
215 auto f = [](AFloat
x) {
return exp(-
x *
x);};
220template<
typename AFloat>
224 auto f = [](AFloat
x) {
return - 2.0 *
x * exp(-
x *
x);};
void Map(Function_t &f)
Map the given function over the matrix elements.
void MapFrom(Function_t &f, const TCpuTensor< AFloat > &A)
Same as maps but takes the input values from the tensor A and writes the results in this tensor.
static void FastTanh(Tensor_t &B)
static void Gauss(Tensor_t &B)
static void Sigmoid(Tensor_t &B)
static void SoftSign(Tensor_t &B)
TCpuTensor< AReal > Tensor_t
static void SymmetricReluDerivative(Tensor_t &B, const Tensor_t &A)
static void Hadamard(Tensor_t &A, const Tensor_t &B)
In-place Hadamard (element-wise) product of matrices A and B with the result being written into A.
static void SymmetricRelu(Tensor_t &B)
static void TanhDerivative(Tensor_t &B, const Tensor_t &A)
static void Tanh(Tensor_t &B)
DummyDescriptor ActivationDescriptor_t
static void ActivationFunctionForward(Tensor_t &X, EActivationFunction activFunct, const ActivationDescriptor_t activationDescr, const double coef=0.0, const Scalar_t alpha=1, const Scalar_t beta=0)
static void SoftSignDerivative(Tensor_t &B, const Tensor_t &A)
static void IdentityDerivative(Tensor_t &B, const Tensor_t &A)
static void Relu(Tensor_t &B)
static void ActivationFunctionBackward(Tensor_t &dX, const Tensor_t &Y, const Tensor_t &dY, const Tensor_t &X, EActivationFunction activFunct, const ActivationDescriptor_t activationDescr, const Scalar_t alpha=1, const Scalar_t beta=0)
Computes the gradient of the activation function.
static void GaussDerivative(Tensor_t &B, const Tensor_t &A)
static void SigmoidDerivative(Tensor_t &B, const Tensor_t &A)
static void FastTanhDerivative(Tensor_t &B, const Tensor_t &A)
static void ReluDerivative(Tensor_t &B, const Tensor_t &A)
void evaluate(typename Architecture_t::Tensor_t &A, EActivationFunction f)
Apply the given activation function to each value in the given tensor A.
EActivationFunction
Enum that represents layer activation functions.
void evaluateDerivative(typename Architecture_t::Tensor_t &B, EActivationFunction f, const typename Architecture_t::Tensor_t &A)
Compute the first partial derivative of the activation function for the values given in tensor A and ...
create variable transformations