27template<
typename AFloat>
49 Hadamard(df, state_gradients_backward);
52 if (input_gradient.GetNoElements() > 0) Multiply(input_gradient, df, weights_input);
55 if (state_gradients_backward.GetNoElements() > 0) Multiply(state_gradients_backward, df, weights_state);
62 if (input_weight_gradients.GetNoElements() > 0) {
63 TransposeMultiply(input_weight_gradients, df, input, 1. , 1.);
65 if (state_weight_gradients.GetNoElements() > 0) {
66 TransposeMultiply(state_weight_gradients, df, state, 1. , 1. );
70 if (bias_gradients.GetNoElements() > 0) {
71 SumColumns(bias_gradients, df, 1., 1.);
81 return input_gradient;
static Matrix_t & RecurrentLayerBackward(TCpuMatrix< Scalar_t > &state_gradients_backward, TCpuMatrix< Scalar_t > &input_weight_gradients, TCpuMatrix< Scalar_t > &state_weight_gradients, TCpuMatrix< Scalar_t > &bias_gradients, TCpuMatrix< Scalar_t > &df, const TCpuMatrix< Scalar_t > &state, const TCpuMatrix< Scalar_t > &weights_input, const TCpuMatrix< Scalar_t > &weights_state, const TCpuMatrix< Scalar_t > &input, TCpuMatrix< Scalar_t > &input_gradient)
Backward pass for Recurrent Networks.
create variable transformations