Logo ROOT  
Reference Guide
 
Loading...
Searching...
No Matches
RecurrentPropagation.cu
Go to the documentation of this file.
1// @(#)root/tmva/tmva/dnn:$Id$
2// Author: Saurav Shekhar 23/06/17
3
4/*************************************************************************
5 * Copyright (C) 2017, Saurav Shekhar *
6 * All rights reserved. *
7 * *
8 * For the licensing terms see $ROOTSYS/LICENSE. *
9 * For the list of contributors see $ROOTSYS/README/CREDITS. *
10 *************************************************************************/
11
12 //////////////////////////////////////////////////////////////////
13 // Implementation of the functions required for the forward and //
14 // backward propagation of activations through a neural network //
15 // for CUDA architectures. //
16 //////////////////////////////////////////////////////////////////
17
20#include "Kernels.cuh"
21
22namespace TMVA
23{
24namespace DNN
25{
26
27//____________________________________________________________________________
28template<typename AFloat>
30 TCudaMatrix<AFloat> & input_weight_gradients,
31 TCudaMatrix<AFloat> & state_weight_gradients,
32 TCudaMatrix<AFloat> & bias_gradients,
33 TCudaMatrix<AFloat> & df, //DxH
34 const TCudaMatrix<AFloat> & state, // BxH
35 const TCudaMatrix<AFloat> & weights_input, // HxD
36 const TCudaMatrix<AFloat> & weights_state, // HxH
37 const TCudaMatrix<AFloat> & input, // BxD
38 TCudaMatrix<AFloat> & input_gradient)
39{
40
41 Fatal("TCuda::RecurrentLayerBackward", "Recurrent layers are not supported in the native Cuda architecture!!!");
42
43#if 0
44 /// LM: This needs to be fixed !
45
46 // Compute element-wise product.
47 TCuda<AFloat>::Hadamard(df, state_gradients_backward); // B x H
48
49 // Input gradients.
50 if (input_gradient.GetNoElements() > 0) {
51 TCuda<AFloat>::Multiply(input_gradient, df, weights_input);
52 }
53
54 // State gradients.
55 if (state_gradients_backward.GetNoElements() > 0) {
56 TCuda<AFloat>::Multiply(state_gradients_backward, df, weights_state);
57 }
58
59 // Weights gradients
60 if (input_weight_gradients.GetNoElements() > 0) {
61 TCudaMatrix<AFloat> tmp(input_weight_gradients);
62 TCuda<AFloat>::TransposeMultiply(input_weight_gradients, df, input); // H x B . B x D
63 TCuda<AFloat>::ScaleAdd(input_weight_gradients, tmp, 1);
64 }
65 if (state_weight_gradients.GetNoElements() > 0) {
66 TCudaMatrix<AFloat> tmp(state_weight_gradients);
67 TCuda<AFloat>::TransposeMultiply(state_weight_gradients, df, state); // H x B . B x H
68 TCuda<AFloat>::ScaleAdd(state_weight_gradients, tmp, 1);
69 }
70
71 // Bias gradients.
72 if (bias_gradients.GetNoElements() > 0) {
73 TCuda<AFloat>::SumColumns(bias_gradients, df);
74 }
75#endif
76
77 return input_gradient;
78
79
80}
81
82} // namespace DNN
83} // namespace TMVA
void Fatal(const char *location, const char *msgfmt,...)
Use this function in case of a fatal error. It will abort the program.
Definition TError.cxx:244
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void input
TCudaMatrix Class.
Definition CudaMatrix.h:109
size_t GetNoElements() const
Definition CudaMatrix.h:167
static Matrix_t & RecurrentLayerBackward(Matrix_t &state_gradients_backward, Matrix_t &input_weight_gradients, Matrix_t &state_weight_gradients, Matrix_t &bias_gradients, Matrix_t &df, const Matrix_t &state, const Matrix_t &weights_input, const Matrix_t &weights_state, const Matrix_t &input, Matrix_t &input_gradient)
Backward pass for Recurrent Networks.
static void Multiply(Matrix_t &C, const Matrix_t &A, const Matrix_t &B)
Standard multiplication of two matrices A and B with the result being written into C.
static void SumColumns(Matrix_t &B, const Matrix_t &A, Scalar_t alpha=1.0, Scalar_t beta=0.)
Sum columns of (m x n) matrix A and write the results into the first m elements in A.
static void Hadamard(Tensor_t &A, const Tensor_t &B)
In-place Hadamard (element-wise) product of matrices A and B with the result being written into A.
static void TransposeMultiply(Matrix_t &output, const Matrix_t &input, const Matrix_t &Weights, Scalar_t alpha=1.0, Scalar_t beta=0.)
Matrix multiplication of two matrices A and B^T (transposed) with the result being written into C.
static void ScaleAdd(Matrix_t &A, const Matrix_t &B, Scalar_t beta=1.0)
Adds a the elements in matrix B scaled by c to the elements in the matrix A.
create variable transformations