27template<
typename AFloat>
36template<
typename AFloat>
46 TMVA::DNN::evaluateDerivative<TCuda<AFloat>>(
dX,
activFunct,
X);
52template<
typename AFloat>
58 cudaStream_t s = A.GetComputeStream();
59 ::TMVA::DNN::Cuda::IdentityDerivative<<<gridDims, blockDims, 0, s>>>(
63 B.SetComputeStream(s);
67template<
typename AFloat>
72 cudaStream_t s = A.GetComputeStream();
73 ::TMVA::DNN::Cuda::Relu<<<gridDims, blockDims, 0, s>>>(
80template<
typename AFloat>
84 assert(B.GetNrows() == A.GetNrows() && B.GetNcols() == A.GetNcols());
87 cudaStream_t s = A.GetComputeStream();
88 ::TMVA::DNN::Cuda::ReluDerivative<<<gridDims, blockDims, 0, s>>>(
93 B.SetComputeStream(s);
97template<
typename AFloat>
102 cudaStream_t s = A.GetComputeStream();
103 ::TMVA::DNN::Cuda::Sigmoid<<<gridDims, blockDims, 0, s>>>(
110template<
typename AFloat>
114 assert(B.GetNrows() == A.GetNrows() && B.GetNcols() == A.GetNcols());
117 cudaStream_t s = A.GetComputeStream();
118 ::TMVA::DNN::Cuda::SigmoidDerivative<<<gridDims, blockDims, 0, s>>>(
123 B.SetComputeStream(s);
127template<
typename AFloat>
132 cudaStream_t s = A.GetComputeStream();
133 ::TMVA::DNN::Cuda::Tanh<<<gridDims, blockDims, 0, s>>>(
140template<
typename AFloat>
144 assert(B.GetNrows() == A.GetNrows() && B.GetNcols() == A.GetNcols());
147 cudaStream_t s = A.GetComputeStream();
148 ::TMVA::DNN::Cuda::TanhDerivative<<<gridDims, blockDims, 0, s>>>(
153 B.SetComputeStream(s);
157template<
typename AFloat>
162 cudaStream_t s = A.GetComputeStream();
163 ::TMVA::DNN::Cuda::SymmetricRelu<<<gridDims, blockDims, 0, s>>>(
170template<
typename AFloat>
174 assert(B.GetNrows() == A.GetNrows() && B.GetNcols() == A.GetNcols());
177 cudaStream_t s = A.GetComputeStream();
178 ::TMVA::DNN::Cuda::SymmetricReluDerivative<<<gridDims, blockDims, 0, s>>>(
183 B.SetComputeStream(s);
187template<
typename AFloat>
192 cudaStream_t s = A.GetComputeStream();
193 ::TMVA::DNN::Cuda::SoftSign<<<gridDims, blockDims, 0, s>>>(
200template<
typename AFloat>
204 assert(B.GetNrows() == A.GetNrows() && B.GetNcols() == A.GetNcols());
207 cudaStream_t s = A.GetComputeStream();
208 ::TMVA::DNN::Cuda::SoftSignDerivative<<<gridDims, blockDims, 0, s>>>(
213 B.SetComputeStream(s);
217template<
typename AFloat>
222 cudaStream_t s = A.GetComputeStream();
223 ::TMVA::DNN::Cuda::Gauss<<<gridDims, blockDims, 0, s>>>(
230template<
typename AFloat>
234 assert(B.GetNrows() == A.GetNrows() && B.GetNcols() == A.GetNcols());
237 cudaStream_t s = A.GetComputeStream();
238 ::TMVA::DNN::Cuda::GaussDerivative<<<gridDims, blockDims, 0, s>>>(
243 B.SetComputeStream(s);
ROOT::Detail::TRangeCast< T, true > TRangeDynCast
TRangeDynCast is an adapter class that allows the typed iteration through a TCollection.
static void SoftSignDerivative(Tensor_t &B, const Tensor_t &A)
static void SymmetricReluDerivative(Tensor_t &B, const Tensor_t &A)
static void IdentityDerivative(Tensor_t &B, const Tensor_t &A)
static void ActivationFunctionForward(Tensor_t &X, EActivationFunction activFunct, const ActivationDescriptor_t activationDescr, const double coef=0.0, const AFloat alpha=1, const AFloat beta=0)
static void SoftSign(Tensor_t &B)
static void Gauss(Tensor_t &B)
static void Sigmoid(Tensor_t &B)
static void Tanh(Tensor_t &B)
static void ActivationFunctionBackward(Tensor_t &dX, const Tensor_t &Y, const Tensor_t &dY, const Tensor_t &X, EActivationFunction activFunct, const ActivationDescriptor_t activationDescr, const AFloat alpha=1, const AFloat beta=0)
Computes the gradient of the activation function.
static void ReluDerivative(Tensor_t &B, const Tensor_t &A)
static void GaussDerivative(Tensor_t &B, const Tensor_t &A)
static void Relu(Tensor_t &B)
static void SymmetricRelu(Tensor_t &B)
static void SigmoidDerivative(Tensor_t &B, const Tensor_t &A)
static void TanhDerivative(Tensor_t &B, const Tensor_t &A)
static dim3 BlockDims2D()
static dim3 GridDims2D(int nrows, int ncols)
EActivationFunction
Enum that represents layer activation functions.
create variable transformations