Logo ROOT   6.16/01
Reference Guide
Cpu.h
Go to the documentation of this file.
1// @(#)root/tmva/tmva/dnn:$Id$
2// Author: Simon Pfreundschuh 05/07/16
3
4/*************************************************************************
5 * Copyright (C) 2016, Simon Pfreundschuh *
6 * All rights reserved. *
7 * *
8 * For the licensing terms see $ROOTSYS/LICENSE. *
9 * For the list of contributors see $ROOTSYS/README/CREDITS. *
10 *************************************************************************/
11
12 //////////////////////////////////////////////////////////////////
13// Definition of the TCpu architecture, which provides a //
14 // multi-threaded CPU implementation of the low-level interface //
15 // networks for Cpus using BLAS and Roots TThreadExecutor //
16 //////////////////////////////////////////////////////////////////
17
18#ifndef TMVA_DNN_ARCHITECTURES_CPU
19#define TMVA_DNN_ARCHITECTURES_CPU
20
21#include "TMVA/DNN/Functions.h"
23
24#include "Cpu/CpuBuffer.h"
25#include "Cpu/CpuMatrix.h"
26#include <vector>
27
28class TRandom;
29
30namespace TMVA
31{
32namespace DNN
33{
34 //class EActivationFunction;
35
36/** The TCpu architecture class.
37 *
38 * Low-level interface class for multi-threaded CPU architectures. Contains as
39 * public types the declaration of the scalar, matrix and data loader types
40 * for this architecture as well as the remaining functions in the low-level
41 * interface in the form of static members.
42 */
43template<typename AReal = Real_t>
44class TCpu
45{
46private:
48public:
49
50 using Scalar_t = AReal;
54
55 //____________________________________________________________________________
56 //
57 // Propagation
58 //____________________________________________________________________________
59
60 /** @name Forward Propagation
61 * Low-level functions required for the forward propagation of activations
62 * through the network.
63 */
64 ///@{
65 /** Matrix-multiply \p input with the transpose of \pweights and
66 * write the results into \p output. */
68 const TCpuMatrix<Scalar_t> &input,
69 const TCpuMatrix<Scalar_t> &weights);
70 /** Add the vectors biases row-wise to the matrix output */
72 const TCpuMatrix<Scalar_t> &biases);
73 ///@}
74
75 /** @name Backward Propagation
76 * Low-level functions required for the forward propagation of activations
77 * through the network.
78 */
79 ///@{
80 /** Perform the complete backward propagation step. If the provided
81 * \p activationGradientsBackward matrix is not empty, compute the
82 * gradients of the objective function with respect to the activations
83 * of the previous layer (backward direction).
84 * Also compute the weight and the bias gradients. Modifies the values
85 * in \p df and thus produces only a valid result, if it is applied the
86 * first time after the corresponding forward propagation has been per-
87 * formed. */
88 static void Backward(TCpuMatrix<Scalar_t> & activationGradientsBackward,
89 TCpuMatrix<Scalar_t> & weightGradients,
90 TCpuMatrix<Scalar_t> & biasGradients,
92 const TCpuMatrix<Scalar_t> & activationGradients,
93 const TCpuMatrix<Scalar_t> & weights,
94 const TCpuMatrix<Scalar_t> & activationBackward);
95 /** Backward pass for Recurrent Networks */
96 static Matrix_t & RecurrentLayerBackward(TCpuMatrix<Scalar_t> & state_gradients_backward, // BxH
97 TCpuMatrix<Scalar_t> & input_weight_gradients,
98 TCpuMatrix<Scalar_t> & state_weight_gradients,
99 TCpuMatrix<Scalar_t> & bias_gradients,
100 TCpuMatrix<Scalar_t> & df, //DxH
101 const TCpuMatrix<Scalar_t> & state, // BxH
102 const TCpuMatrix<Scalar_t> & weights_input, // HxD
103 const TCpuMatrix<Scalar_t> & weights_state, // HxH
104 const TCpuMatrix<Scalar_t> & input, // BxD
105 TCpuMatrix<Scalar_t> & input_gradient);
106 /** Adds a the elements in matrix B scaled by c to the elements in
107 * the matrix A. This is required for the weight update in the gradient
108 * descent step.*/
110 const TCpuMatrix<Scalar_t> & B,
111 Scalar_t beta = 1.0);
112
114 const TCpuMatrix<Scalar_t> & A);
115
116 // copy from another type of matrix
117 template<typename AMatrix_t>
118 static void CopyDiffArch(TCpuMatrix<Scalar_t> & B, const AMatrix_t & A);
119
120
121 /** Above functions extended to vectors */
122 static void ScaleAdd(std::vector<TCpuMatrix<Scalar_t>> & A,
123 const std::vector<TCpuMatrix<Scalar_t>> & B,
124 Scalar_t beta = 1.0);
125
126 static void Copy(std::vector<TCpuMatrix<Scalar_t>> & A,
127 const std::vector<TCpuMatrix<Scalar_t>> & B);
128
129 // copy from another architecture
130 template<typename AMatrix_t>
131 static void CopyDiffArch(std::vector<TCpuMatrix<Scalar_t>> & A,
132 const std::vector<AMatrix_t> & B);
133
134 ///@}
135
136 //____________________________________________________________________________
137 //
138 // Activation Functions
139 //____________________________________________________________________________
140
141 /** @name Activation Functions
142 * For each activation function, the low-level interface contains two routines.
143 * One that applies the acitvation function to a matrix and one that evaluate
144 * the derivatives of the activation function at the elements of a given matrix
145 * and writes the results into the result matrix.
146 */
147 ///@{
149 const TCpuMatrix<Scalar_t> &A);
150
151 static void Relu(TCpuMatrix<Scalar_t> & B);
153 const TCpuMatrix<Scalar_t> & A);
154
157 const TCpuMatrix<Scalar_t> & A);
158
159 static void Tanh(TCpuMatrix<Scalar_t> & B);
161 const TCpuMatrix<Scalar_t> & A);
162
163 static void SymmetricRelu(TCpuMatrix<Scalar_t> & B);
165 const TCpuMatrix<Scalar_t> & A);
166
167 static void SoftSign(TCpuMatrix<Scalar_t> & B);
169 const TCpuMatrix<Scalar_t> & A);
170
171 static void Gauss(TCpuMatrix<Scalar_t> & B);
173 const TCpuMatrix<Scalar_t> & A);
174 ///@}
175
176 //____________________________________________________________________________
177 //
178 // Loss Functions
179 //____________________________________________________________________________
180
181 /** @name Loss Functions
182 * Loss functions compute a scalar value given the \p output of the network
183 * for a given training input and the expected network prediction \p Y that
184 * quantifies the quality of the prediction. For each function also a routing
185 * that computes the gradients (suffixed by Gradients) must be provided for
186 * the starting of the backpropagation algorithm.
187 */
188 ///@{
189
191 const TCpuMatrix<Scalar_t> &weights);
193 const TCpuMatrix<Scalar_t> &output, const TCpuMatrix<Scalar_t> &weights);
194
195 /** Sigmoid transformation is implicitly applied, thus \p output should
196 * hold the linear activations of the last layer in the net. */
198 const TCpuMatrix<Scalar_t> &weights);
199
201 const TCpuMatrix<Scalar_t> &output, const TCpuMatrix<Scalar_t> &weights);
202
203 /** Softmax transformation is implicitly applied, thus \p output should
204 * hold the linear activations of the last layer in the net. */
206 const TCpuMatrix<Scalar_t> &weights);
208 const TCpuMatrix<Scalar_t> &output, const TCpuMatrix<Scalar_t> &weights);
209 ///@}
210
211 //____________________________________________________________________________
212 //
213 // Output Functions
214 //____________________________________________________________________________
215
216 /** @name Output Functions
217 * Output functions transform the activations \p output of the
218 * output layer in the network to a valid prediction \p YHat for
219 * the desired usage of the network, e.g. the identity function
220 * for regression or the sigmoid transformation for two-class
221 * classification.
222 */
223 ///@{
224 static void Sigmoid(TCpuMatrix<Scalar_t> &YHat,
225 const TCpuMatrix<Scalar_t> & );
226 static void Softmax(TCpuMatrix<Scalar_t> &YHat,
227 const TCpuMatrix<Scalar_t> & );
228 ///@}
229
230 //____________________________________________________________________________
231 //
232 // Regularization
233 //____________________________________________________________________________
234
235 /** @name Regularization
236 * For each regularization type two functions are required, one named
237 * <tt><Type>Regularization</tt> that evaluates the corresponding
238 * regularization functional for a given weight matrix and the
239 * <tt>Add<Type>RegularizationGradients</tt>, that adds the regularization
240 * component in the gradients to the provided matrix.
241 */
242 ///@{
243
246 const TCpuMatrix<Scalar_t> & W,
248
251 const TCpuMatrix<Scalar_t> & W,
253 ///@}
254
255 //____________________________________________________________________________
256 //
257 // Initialization
258 //____________________________________________________________________________
259
260 /** @name Initialization
261 * For each initialization method, one function in the low-level interface
262 * is provided. The naming scheme is <p>Initialize<Type></p> for a given
263 * initialization method Type.
264 */
265 ///@{
266
273
274 // return static instance of random generator used for initialization
275 // if generator does not exist it is created the first time with a random seed (e.g. seed = 0)
276 static TRandom & GetRandomGenerator();
277 // set random seed for the static geenrator
278 // if the static geneerator does not exists it is created
279 static void SetRandomSeed(size_t seed);
280 ///@}
281
282 //____________________________________________________________________________
283 //
284 // Dropout
285 //____________________________________________________________________________
286
287 /** @name Dropout
288 */
289 ///@{
290
291 /** Apply dropout with activation probability \p p to the given
292 * matrix \p A and scale the result by reciprocal of \p p. */
293 static void Dropout(TCpuMatrix<Scalar_t> & A, Scalar_t p);
294
295 ///@}
296
297 //____________________________________________________________________________
298 //
299 // Convolutional Layer Propagation
300 //____________________________________________________________________________
301
302 /** @name Forward Propagation in Convolutional Layer
303 */
304 ///@{
305
306 /** Calculate how many neurons "fit" in the output layer, given the input as well as the layer's hyperparameters. */
307 static size_t calculateDimension(size_t imgDim, size_t fltDim, size_t padding, size_t stride);
308
309 /** Transform the matrix B in local view format, suitable for
310 * convolution, and store it in matrix A */
311 static void Im2col(TCpuMatrix<AReal> &A,
312 const TCpuMatrix<AReal> &B,
313 size_t imgHeight,
314 size_t imgWidth,
315 size_t fltHeight,
316 size_t fltWidth,
317 size_t strideRows,
318 size_t strideCols,
319 size_t zeroPaddingHeight,
320 size_t zeroPaddingWidth);
321
322 static void Im2colIndices(std::vector<int> &V, const TCpuMatrix<AReal> &B, size_t nLocalViews, size_t imgHeight, size_t imgWidth, size_t fltHeight,
323 size_t fltWidth, size_t strideRows, size_t strideCols, size_t zeroPaddingHeight,
324 size_t zeroPaddingWidth);
325 static void Im2colFast(TCpuMatrix<AReal> &A, const TCpuMatrix<AReal> &B, const std::vector<int> & V);
326
327 /** Rotates the matrix \p B, which is representing a weights,
328 * and stores them in the matrix \p A. */
329 static void RotateWeights(TCpuMatrix<AReal> &A, const TCpuMatrix<AReal> &B, size_t filterDepth, size_t filterHeight,
330 size_t filterWidth, size_t numFilters);
331
332 /** Add the biases in the Convolutional Layer. */
334 ///@}
335
336 /** Dummy placeholder - preparation is currently only required for the CUDA architecture. */
337 static void PrepareInternals(std::vector<TCpuMatrix<Scalar_t>> &) {}
338
339 /** Forward propagation in the Convolutional layer */
340 static void ConvLayerForward(std::vector<TCpuMatrix<Scalar_t>> & output,
341 std::vector<TCpuMatrix<Scalar_t>> & derivatives,
342 const std::vector<TCpuMatrix<Scalar_t>> &input,
343 const TCpuMatrix<Scalar_t> &weights, const TCpuMatrix<Scalar_t> & biases,
344 const DNN::CNN::TConvParams & params, EActivationFunction activFunc,
345 std::vector<TCpuMatrix<Scalar_t>> & /* inputPrime */);
346
347 /** @name Backward Propagation in Convolutional Layer
348 */
349 ///@{
350
351 /** Perform the complete backward propagation step in a Convolutional Layer.
352 * If the provided \p activationGradientsBackward matrix is not empty, compute the
353 * gradients of the objective function with respect to the activations
354 * of the previous layer (backward direction).
355 * Also compute the weight and the bias gradients. Modifies the values
356 * in \p df and thus produces only a valid result, if it is applied the
357 * first time after the corresponding forward propagation has been per-
358 * formed. */
359 static void ConvLayerBackward(std::vector<TCpuMatrix<Scalar_t>> &activationGradientsBackward,
360 TCpuMatrix<Scalar_t> &weightGradients, TCpuMatrix<Scalar_t> &biasGradients,
361 std::vector<TCpuMatrix<Scalar_t>> &df,
362 const std::vector<TCpuMatrix<Scalar_t>> &activationGradients,
363 const TCpuMatrix<Scalar_t> &weights,
364 const std::vector<TCpuMatrix<Scalar_t>> &activationBackward, size_t batchSize,
365 size_t inputHeight, size_t inputWidth, size_t depth, size_t height, size_t width,
366 size_t filterDepth, size_t filterHeight, size_t filterWidth, size_t nLocalViews);
367
368 /** Utility function for calculating the activation gradients of the layer
369 * before the convolutional layer. */
370 static void CalculateConvActivationGradients(std::vector<TCpuMatrix<Scalar_t>> &activationGradientsBackward,
371 const std::vector<TCpuMatrix<Scalar_t>> &df,
372 const TCpuMatrix<Scalar_t> &weights, size_t batchSize,
373 size_t inputHeight, size_t inputWidth, size_t depth, size_t height,
374 size_t width, size_t filterDepth, size_t filterHeight,
375 size_t filterWidth);
376
377 /** Utility function for calculating the weight gradients of the convolutional
378 * layer. */
379 static void CalculateConvWeightGradients(TCpuMatrix<Scalar_t> &weightGradients,
380 const std::vector<TCpuMatrix<Scalar_t>> &df,
381 const std::vector<TCpuMatrix<Scalar_t>> &activations_backward,
382 size_t batchSize, size_t inputHeight, size_t inputWidth, size_t depth,
383 size_t height, size_t width, size_t filterDepth, size_t filterHeight,
384 size_t filterWidth, size_t nLocalViews);
385
386 /** Utility function for calculating the bias gradients of the convolutional
387 * layer */
388 static void CalculateConvBiasGradients(TCpuMatrix<Scalar_t> &biasGradients, const std::vector<TCpuMatrix<Scalar_t>> &df,
389 size_t batchSize, size_t depth, size_t nLocalViews);
390 ///@}
391
392 //____________________________________________________________________________
393 //
394 // Max Pooling Layer Propagation
395 //____________________________________________________________________________
396 /** @name Forward Propagation in Max Pooling Layer
397 */
398 ///@{
399
400 /** Downsample the matrix \p C to the matrix \p A, using max
401 * operation, such that the winning indices are stored in matrix
402 * \p B. */
403 static void Downsample(TCpuMatrix<AReal> &A, TCpuMatrix<AReal> &B, const TCpuMatrix<AReal> &C, size_t imgHeight,
404 size_t imgWidth, size_t fltHeight, size_t fltWidth, size_t strideRows, size_t strideCols);
405
406 ///@}
407
408 /** @name Backward Propagation in Max Pooling Layer
409 */
410 ///@{
411 /** Perform the complete backward propagation step in a Pooling Layer. Based on the
412 * winning idices stored in the index matrix, it just forwards the actiovation
413 * gradients to the previous layer. */
414 static void MaxPoolLayerBackward(TCpuMatrix<AReal> &activationGradientsBackward,
415 const TCpuMatrix<AReal> &activationGradients,
416 const TCpuMatrix<AReal> &indexMatrix,
417 size_t imgHeight,
418 size_t imgWidth,
419 size_t fltHeight,
420 size_t fltWidth,
421 size_t strideRows,
422 size_t strideCols,
423 size_t nLocalViews);
424
425 ///@}
426
427 //____________________________________________________________________________
428 //
429 // Reshape Layer Propagation
430 //____________________________________________________________________________
431 /** @name Forward and Backward Propagation in Reshape Layer
432 */
433 ///@{
434
435 /** Transform the matrix \p B to a matrix with different dimensions \p A */
436 static void Reshape(TCpuMatrix<AReal> &A, const TCpuMatrix<AReal> &B);
437
438 /** Flattens the tensor \p B, such that each matrix, is stretched in
439 * one row, resulting with a matrix \p A. */
440 static void Flatten(TCpuMatrix<AReal> &A, const std::vector<TCpuMatrix<AReal>> &B, size_t size, size_t nRows,
441 size_t nCols);
442
443 /** Transforms each row of \p B to a matrix and stores it in the
444 * tensor \p B. */
445 static void Deflatten(std::vector<TCpuMatrix<AReal>> &A, const TCpuMatrix<AReal> &B, size_t index, size_t nRows,
446 size_t nCols);
447 /** Rearrage data accoring to time fill B x T x D out with T x B x D matrix in*/
448 static void Rearrange(std::vector<TCpuMatrix<AReal>> &out, const std::vector<TCpuMatrix<AReal>> &in);
449
450
451 ///@}
452
453 //____________________________________________________________________________
454 //
455 // Additional Arithmetic Functions
456 //____________________________________________________________________________
457
458 /** @name Additional Arithmetic Functions
459 *
460 * Additional arithmetic on CUDA matrices used to implement the low-level
461 * interface.
462 */
463 ///@{
464
465 /** Standard multiplication of two matrices \p A and \p B with the result being
466 * written into C.
467 */
468 static void Multiply(TCpuMatrix<Scalar_t> &C,
469 const TCpuMatrix<Scalar_t> &A,
470 const TCpuMatrix<Scalar_t> &B);
471 /** Matrix multiplication of two matrices \p A and \p B^T (transposed) with the
472 * result being written into C.
473 */
475 const TCpuMatrix<Scalar_t> &input,
476 const TCpuMatrix<Scalar_t> &Weights,
477 Scalar_t alpha = 1.0, Scalar_t beta = 0.);
478 /** In-place Hadamard (element-wise) product of matrices \p A and \p B
479 * with the result being written into \p A.
480 */
481 static void Hadamard(TCpuMatrix<Scalar_t> &A,
482 const TCpuMatrix<Scalar_t> &B);
483
484 /** Sum columns of (m x n) matrixx \p A and write the results into the first
485 * m elements in \p A.
486 */
487 static void SumColumns(TCpuMatrix<Scalar_t> &B,
488 const TCpuMatrix<Scalar_t> &A,
489 Scalar_t alpha = 1.0, Scalar_t beta = 0.);
490
491 /** Compute the sum of all elements in \p A */
493
494 /** Check two matrices for equality, taking floating point arithmetic errors into account. */
495 static bool AlmostEquals(const TCpuMatrix<Scalar_t> &A, const TCpuMatrix<Scalar_t> &B, double epsilon = 0.1);
496
497 /** Add the constant \p beta to all the elements of matrix \p A and write the
498 * result into \p A.
499 */
501
502 /** Multiply the constant \p beta to all the elements of matrix \p A and write the
503 * result into \p A.
504 */
506
507 /** Reciprocal each element of the matrix \p A and write the result into
508 * \p A
509 */
511
512 /** Square each element of the matrix \p A and write the result into
513 * \p A
514 */
516
517 /** Square root each element of the matrix \p A and write the result into
518 * \p A
519 */
521
522 // optimizer functions
523 static void AdamUpdate(TCpuMatrix<Scalar_t> & A, const TCpuMatrix<Scalar_t> & M, const TCpuMatrix<Scalar_t> & V, Scalar_t alpha, Scalar_t eps);
526
527};
528
529//____________________________________________________________________________
530template <typename Real_t>
531template <typename AMatrix_t>
533 const AMatrix_t &A)
534{
535 // copy from another architecture using the reference one
536 // this is not very efficient since creates temporary objects
537 TMatrixT<Real_t> tmp = A;
538 Copy(B, TCpuMatrix<Real_t>(tmp) );
539}
540
541//____________________________________________________________________________
542template <typename Real_t>
543template <typename AMatrix_t>
545 const std::vector<AMatrix_t> &A)
546{
547 for (size_t i = 0; i < B.size(); ++i) {
548 CopyDiffArch(B[i], A[i]);
549 }
550}
551
552
553} // namespace DNN
554} // namespace TMVA
555
556#endif
include TDocParser_001 C image html pict1_TDocParser_001 png width
Definition: TDocParser.cxx:121
The TCpuMatrix class.
Definition: CpuMatrix.h:89
The TCpu architecture class.
Definition: Cpu.h:45
static void SymmetricRelu(TCpuMatrix< Scalar_t > &B)
static TRandom * fgRandomGen
Definition: Cpu.h:47
static void CalculateConvActivationGradients(std::vector< TCpuMatrix< Scalar_t > > &activationGradientsBackward, const std::vector< TCpuMatrix< Scalar_t > > &df, const TCpuMatrix< Scalar_t > &weights, size_t batchSize, size_t inputHeight, size_t inputWidth, size_t depth, size_t height, size_t width, size_t filterDepth, size_t filterHeight, size_t filterWidth)
Utility function for calculating the activation gradients of the layer before the convolutional layer...
static void Im2col(TCpuMatrix< AReal > &A, const TCpuMatrix< AReal > &B, size_t imgHeight, size_t imgWidth, size_t fltHeight, size_t fltWidth, size_t strideRows, size_t strideCols, size_t zeroPaddingHeight, size_t zeroPaddingWidth)
Transform the matrix B in local view format, suitable for convolution, and store it in matrix A.
Definition: Propagation.cxx:99
static void AddRowWise(TCpuMatrix< Scalar_t > &output, const TCpuMatrix< Scalar_t > &biases)
Add the vectors biases row-wise to the matrix output.
Definition: Propagation.cxx:60
AReal Scalar_t
Definition: Cpu.h:50
static void Hadamard(TCpuMatrix< Scalar_t > &A, const TCpuMatrix< Scalar_t > &B)
In-place Hadamard (element-wise) product of matrices A and B with the result being written into A.
Definition: Arithmetic.cxx:91
static void AddL2RegularizationGradients(TCpuMatrix< Scalar_t > &A, const TCpuMatrix< Scalar_t > &W, Scalar_t weightDecay)
static void CrossEntropyGradients(TCpuMatrix< Scalar_t > &dY, const TCpuMatrix< Scalar_t > &Y, const TCpuMatrix< Scalar_t > &output, const TCpuMatrix< Scalar_t > &weights)
static Scalar_t L2Regularization(const TCpuMatrix< Scalar_t > &W)
static void Im2colFast(TCpuMatrix< AReal > &A, const TCpuMatrix< AReal > &B, const std::vector< int > &V)
static void Copy(std::vector< TCpuMatrix< Scalar_t > > &A, const std::vector< TCpuMatrix< Scalar_t > > &B)
static void Copy(TCpuMatrix< Scalar_t > &B, const TCpuMatrix< Scalar_t > &A)
static Scalar_t SoftmaxCrossEntropy(const TCpuMatrix< Scalar_t > &Y, const TCpuMatrix< Scalar_t > &output, const TCpuMatrix< Scalar_t > &weights)
Softmax transformation is implicitly applied, thus output should hold the linear activations of the l...
static void AdamUpdateSecondMom(TCpuMatrix< Scalar_t > &A, const TCpuMatrix< Scalar_t > &B, Scalar_t beta)
Definition: Arithmetic.cxx:286
static Scalar_t MeanSquaredError(const TCpuMatrix< Scalar_t > &Y, const TCpuMatrix< Scalar_t > &output, const TCpuMatrix< Scalar_t > &weights)
static void Gauss(TCpuMatrix< Scalar_t > &B)
static void Dropout(TCpuMatrix< Scalar_t > &A, Scalar_t p)
Apply dropout with activation probability p to the given matrix A and scale the result by reciprocal ...
Definition: Dropout.cxx:24
static void TransposeMultiply(TCpuMatrix< Scalar_t > &output, const TCpuMatrix< Scalar_t > &input, const TCpuMatrix< Scalar_t > &Weights, Scalar_t alpha=1.0, Scalar_t beta=0.)
Matrix multiplication of two matrices A and B^T (transposed) with the result being written into C.
Definition: Arithmetic.cxx:62
static void AdamUpdateFirstMom(TCpuMatrix< Scalar_t > &A, const TCpuMatrix< Scalar_t > &B, Scalar_t beta)
Definition: Arithmetic.cxx:274
static void InitializeUniform(TCpuMatrix< Scalar_t > &A)
static void ScaleAdd(std::vector< TCpuMatrix< Scalar_t > > &A, const std::vector< TCpuMatrix< Scalar_t > > &B, Scalar_t beta=1.0)
Above functions extended to vectors.
static void Downsample(TCpuMatrix< AReal > &A, TCpuMatrix< AReal > &B, const TCpuMatrix< AReal > &C, size_t imgHeight, size_t imgWidth, size_t fltHeight, size_t fltWidth, size_t strideRows, size_t strideCols)
Downsample the matrix C to the matrix A, using max operation, such that the winning indices are store...
static void CalculateConvWeightGradients(TCpuMatrix< Scalar_t > &weightGradients, const std::vector< TCpuMatrix< Scalar_t > > &df, const std::vector< TCpuMatrix< Scalar_t > > &activations_backward, size_t batchSize, size_t inputHeight, size_t inputWidth, size_t depth, size_t height, size_t width, size_t filterDepth, size_t filterHeight, size_t filterWidth, size_t nLocalViews)
Utility function for calculating the weight gradients of the convolutional layer.
static Matrix_t & RecurrentLayerBackward(TCpuMatrix< Scalar_t > &state_gradients_backward, TCpuMatrix< Scalar_t > &input_weight_gradients, TCpuMatrix< Scalar_t > &state_weight_gradients, TCpuMatrix< Scalar_t > &bias_gradients, TCpuMatrix< Scalar_t > &df, const TCpuMatrix< Scalar_t > &state, const TCpuMatrix< Scalar_t > &weights_input, const TCpuMatrix< Scalar_t > &weights_state, const TCpuMatrix< Scalar_t > &input, TCpuMatrix< Scalar_t > &input_gradient)
Backward pass for Recurrent Networks.
static void SymmetricReluDerivative(TCpuMatrix< Scalar_t > &B, const TCpuMatrix< Scalar_t > &A)
static void Relu(TCpuMatrix< Scalar_t > &B)
static void InitializeGlorotNormal(TCpuMatrix< Scalar_t > &A)
Truncated normal initialization (Glorot, called also Xavier normal) The values are sample with a norm...
static void InitializeIdentity(TCpuMatrix< Scalar_t > &A)
static void AdamUpdate(TCpuMatrix< Scalar_t > &A, const TCpuMatrix< Scalar_t > &M, const TCpuMatrix< Scalar_t > &V, Scalar_t alpha, Scalar_t eps)
Adam updates.
Definition: Arithmetic.cxx:260
static void MaxPoolLayerBackward(TCpuMatrix< AReal > &activationGradientsBackward, const TCpuMatrix< AReal > &activationGradients, const TCpuMatrix< AReal > &indexMatrix, size_t imgHeight, size_t imgWidth, size_t fltHeight, size_t fltWidth, size_t strideRows, size_t strideCols, size_t nLocalViews)
Perform the complete backward propagation step in a Pooling Layer.
static TRandom & GetRandomGenerator()
static void SoftSign(TCpuMatrix< Scalar_t > &B)
static void Flatten(TCpuMatrix< AReal > &A, const std::vector< TCpuMatrix< AReal > > &B, size_t size, size_t nRows, size_t nCols)
Flattens the tensor B, such that each matrix, is stretched in one row, resulting with a matrix A.
static void ConstAdd(TCpuMatrix< Scalar_t > &A, Scalar_t beta)
Add the constant beta to all the elements of matrix A and write the result into A.
Definition: Arithmetic.cxx:219
static void Reshape(TCpuMatrix< AReal > &A, const TCpuMatrix< AReal > &B)
Transform the matrix B to a matrix with different dimensions A.
static void SqrtElementWise(TCpuMatrix< Scalar_t > &A)
Square root each element of the matrix A and write the result into A.
Definition: Arithmetic.cxx:251
static void ConvLayerForward(std::vector< TCpuMatrix< Scalar_t > > &output, std::vector< TCpuMatrix< Scalar_t > > &derivatives, const std::vector< TCpuMatrix< Scalar_t > > &input, const TCpuMatrix< Scalar_t > &weights, const TCpuMatrix< Scalar_t > &biases, const DNN::CNN::TConvParams &params, EActivationFunction activFunc, std::vector< TCpuMatrix< Scalar_t > > &)
Forward propagation in the Convolutional layer.
static void TanhDerivative(TCpuMatrix< Scalar_t > &B, const TCpuMatrix< Scalar_t > &A)
static void Multiply(TCpuMatrix< Scalar_t > &C, const TCpuMatrix< Scalar_t > &A, const TCpuMatrix< Scalar_t > &B)
Standard multiplication of two matrices A and B with the result being written into C.
Definition: Arithmetic.cxx:34
static void SoftSignDerivative(TCpuMatrix< Scalar_t > &B, const TCpuMatrix< Scalar_t > &A)
static void SetRandomSeed(size_t seed)
static void CopyDiffArch(TCpuMatrix< Scalar_t > &B, const AMatrix_t &A)
static void Rearrange(std::vector< TCpuMatrix< AReal > > &out, const std::vector< TCpuMatrix< AReal > > &in)
Rearrage data accoring to time fill B x T x D out with T x B x D matrix in.
static void RotateWeights(TCpuMatrix< AReal > &A, const TCpuMatrix< AReal > &B, size_t filterDepth, size_t filterHeight, size_t filterWidth, size_t numFilters)
Rotates the matrix B, which is representing a weights, and stores them in the matrix A.
static Scalar_t L1Regularization(const TCpuMatrix< Scalar_t > &W)
static void MultiplyTranspose(TCpuMatrix< Scalar_t > &output, const TCpuMatrix< Scalar_t > &input, const TCpuMatrix< Scalar_t > &weights)
Matrix-multiply input with the transpose of \pweights and write the results into output.
Definition: Propagation.cxx:25
static bool AlmostEquals(const TCpuMatrix< Scalar_t > &A, const TCpuMatrix< Scalar_t > &B, double epsilon=0.1)
Check two matrices for equality, taking floating point arithmetic errors into account.
Definition: Arithmetic.cxx:133
static void SumColumns(TCpuMatrix< Scalar_t > &B, const TCpuMatrix< Scalar_t > &A, Scalar_t alpha=1.0, Scalar_t beta=0.)
Sum columns of (m x n) matrixx A and write the results into the first m elements in A.
Definition: Arithmetic.cxx:151
static void IdentityDerivative(TCpuMatrix< Scalar_t > &B, const TCpuMatrix< Scalar_t > &A)
static void Tanh(TCpuMatrix< Scalar_t > &B)
static void SoftmaxCrossEntropyGradients(TCpuMatrix< Scalar_t > &dY, const TCpuMatrix< Scalar_t > &Y, const TCpuMatrix< Scalar_t > &output, const TCpuMatrix< Scalar_t > &weights)
static void Sigmoid(TCpuMatrix< Scalar_t > &B)
static void SigmoidDerivative(TCpuMatrix< Scalar_t > &B, const TCpuMatrix< Scalar_t > &A)
static size_t calculateDimension(size_t imgDim, size_t fltDim, size_t padding, size_t stride)
Calculate how many neurons "fit" in the output layer, given the input as well as the layer's hyperpar...
static void CalculateConvBiasGradients(TCpuMatrix< Scalar_t > &biasGradients, const std::vector< TCpuMatrix< Scalar_t > > &df, size_t batchSize, size_t depth, size_t nLocalViews)
Utility function for calculating the bias gradients of the convolutional layer.
static void ConvLayerBackward(std::vector< TCpuMatrix< Scalar_t > > &activationGradientsBackward, TCpuMatrix< Scalar_t > &weightGradients, TCpuMatrix< Scalar_t > &biasGradients, std::vector< TCpuMatrix< Scalar_t > > &df, const std::vector< TCpuMatrix< Scalar_t > > &activationGradients, const TCpuMatrix< Scalar_t > &weights, const std::vector< TCpuMatrix< Scalar_t > > &activationBackward, size_t batchSize, size_t inputHeight, size_t inputWidth, size_t depth, size_t height, size_t width, size_t filterDepth, size_t filterHeight, size_t filterWidth, size_t nLocalViews)
Perform the complete backward propagation step in a Convolutional Layer.
static void CopyDiffArch(std::vector< TCpuMatrix< Scalar_t > > &A, const std::vector< AMatrix_t > &B)
static void MeanSquaredErrorGradients(TCpuMatrix< Scalar_t > &dY, const TCpuMatrix< Scalar_t > &Y, const TCpuMatrix< Scalar_t > &output, const TCpuMatrix< Scalar_t > &weights)
static Scalar_t CrossEntropy(const TCpuMatrix< Scalar_t > &Y, const TCpuMatrix< Scalar_t > &output, const TCpuMatrix< Scalar_t > &weights)
Sigmoid transformation is implicitly applied, thus output should hold the linear activations of the l...
static void AddL1RegularizationGradients(TCpuMatrix< Scalar_t > &A, const TCpuMatrix< Scalar_t > &W, Scalar_t weightDecay)
static void Backward(TCpuMatrix< Scalar_t > &activationGradientsBackward, TCpuMatrix< Scalar_t > &weightGradients, TCpuMatrix< Scalar_t > &biasGradients, TCpuMatrix< Scalar_t > &df, const TCpuMatrix< Scalar_t > &activationGradients, const TCpuMatrix< Scalar_t > &weights, const TCpuMatrix< Scalar_t > &activationBackward)
Perform the complete backward propagation step.
Definition: Propagation.cxx:79
static void ReciprocalElementWise(TCpuMatrix< Scalar_t > &A)
Reciprocal each element of the matrix A and write the result into A.
Definition: Arithmetic.cxx:235
static void ScaleAdd(TCpuMatrix< Scalar_t > &A, const TCpuMatrix< Scalar_t > &B, Scalar_t beta=1.0)
Adds a the elements in matrix B scaled by c to the elements in the matrix A.
static void ConstMult(TCpuMatrix< Scalar_t > &A, Scalar_t beta)
Multiply the constant beta to all the elements of matrix A and write the result into A.
Definition: Arithmetic.cxx:227
static Scalar_t Sum(const TCpuMatrix< Scalar_t > &A)
Compute the sum of all elements in A.
static void InitializeZero(TCpuMatrix< Scalar_t > &A)
static void InitializeGauss(TCpuMatrix< Scalar_t > &A)
static void AddConvBiases(TCpuMatrix< Scalar_t > &output, const TCpuMatrix< Scalar_t > &biases)
Add the biases in the Convolutional Layer.
static void PrepareInternals(std::vector< TCpuMatrix< Scalar_t > > &)
Dummy placeholder - preparation is currently only required for the CUDA architecture.
Definition: Cpu.h:337
static void ReluDerivative(TCpuMatrix< Scalar_t > &B, const TCpuMatrix< Scalar_t > &A)
static void Sigmoid(TCpuMatrix< Scalar_t > &YHat, const TCpuMatrix< Scalar_t > &)
static void Im2colIndices(std::vector< int > &V, const TCpuMatrix< AReal > &B, size_t nLocalViews, size_t imgHeight, size_t imgWidth, size_t fltHeight, size_t fltWidth, size_t strideRows, size_t strideCols, size_t zeroPaddingHeight, size_t zeroPaddingWidth)
static void InitializeGlorotUniform(TCpuMatrix< Scalar_t > &A)
Sample from a uniform distribution in range [ -lim,+lim] where lim = sqrt(6/N_in+N_out).
static void GaussDerivative(TCpuMatrix< Scalar_t > &B, const TCpuMatrix< Scalar_t > &A)
static void Softmax(TCpuMatrix< Scalar_t > &YHat, const TCpuMatrix< Scalar_t > &)
static void Deflatten(std::vector< TCpuMatrix< AReal > > &A, const TCpuMatrix< AReal > &B, size_t index, size_t nRows, size_t nCols)
Transforms each row of B to a matrix and stores it in the tensor B.
static void SquareElementWise(TCpuMatrix< Scalar_t > &A)
Square each element of the matrix A and write the result into A.
Definition: Arithmetic.cxx:243
TMatrixT.
Definition: TMatrixT.h:39
This is the base class for the ROOT Random number generators.
Definition: TRandom.h:27
double beta(double x, double y)
Calculates the beta function.
static double B[]
static double A[]
static double C[]
void Copy(void *source, void *dest)
double weightDecay(double error, ItWeight itWeight, ItWeight itWeightEnd, double factorWeightDecay, EnumRegularization eRegularization)
compute the weight decay for regularization (L1 or L2)
Definition: NeuralNet.icc:496
EActivationFunction
Enum that represents layer activation functions.
Definition: Functions.h:32
Abstract ClassifierFactory template that handles arbitrary types.
REAL epsilon
Definition: triangle.c:617