Logo ROOT  
Reference Guide
 
Loading...
Searching...
No Matches
ConvLayer.h
Go to the documentation of this file.
1// @(#)root/tmva/tmva/dnn:$Id$
2// Author: Vladimir Ilievski
3
4/**********************************************************************************
5 * Project: TMVA - a Root-integrated toolkit for multivariate data analysis *
6 * Package: TMVA *
7 * Class : TConvLayer *
8 * *
9 * *
10 * Description: *
11 * Convolutional Deep Neural Network Layer *
12 * *
13 * Authors (alphabetical): *
14 * Vladimir Ilievski <ilievski.vladimir@live.com> - CERN, Switzerland *
15 * *
16 * Copyright (c) 2005-2015: *
17 * CERN, Switzerland *
18 * U. of Victoria, Canada *
19 * MPI-K Heidelberg, Germany *
20 * U. of Bonn, Germany *
21 * *
22 * Redistribution and use in source and binary forms, with or without *
23 * modification, are permitted according to the terms listed in LICENSE *
24 * (see tmva/doc/LICENSE) *
25 **********************************************************************************/
26
27#ifndef TMVA_CNN_CONVLAYER
28#define TMVA_CNN_CONVLAYER
29
30#include "TMatrix.h"
31
33#include "TMVA/DNN/Functions.h"
35
36#include <vector>
37#include <iostream>
38#include <string>
39
40namespace TMVA {
41namespace DNN {
42namespace CNN {
43
44typedef struct TConvParams {
45
46public:
47 size_t batchSize; ///< Batch size used for training and evaluation
48
49 size_t inputDepth; ///< The depth of the previous layer or input.
50 size_t inputHeight; ///< The height of the previous layer or input.
51 size_t inputWidth; ///< The width of the previous layer or input.
52
53 size_t numberFilters; ///< The number of the filters, which is equal to the output's depth.
54 size_t filterHeight; ///< The height of the filter.
55 size_t filterWidth; ///< The width of the filter.
56
57 size_t strideRows; ///< The number of row pixels to slid the filter each step.
58 size_t strideCols; ///< The number of column pixels to slid the filter each step.
59 size_t paddingHeight; ///< The number of zero layers added top and bottom of the input.
60 size_t paddingWidth; ///< The number of zero layers left and right of the input.
61
62 TConvParams(size_t _batchSize, size_t _inputDepth, size_t _inputHeight, size_t _inputWidth, size_t _numberFilters,
63 size_t _filterHeight, size_t _filterWidth, size_t _strideRows, size_t _strideCols,
64 size_t _paddingHeight, size_t _paddingWidth)
65 : batchSize(_batchSize), inputDepth(_inputDepth), inputHeight(_inputHeight), inputWidth(_inputWidth),
66 numberFilters(_numberFilters), filterHeight(_filterHeight), filterWidth(_filterWidth),
67 strideRows(_strideRows), strideCols(_strideCols), paddingHeight(_paddingHeight),
68 paddingWidth(_paddingWidth)
69 {}
71
72
73
74template <typename Architecture_t>
75class TConvLayer : public VGeneralLayer<Architecture_t> {
76public:
77 using Tensor_t = typename Architecture_t::Tensor_t;
78 using Matrix_t = typename Architecture_t::Matrix_t;
79 using Scalar_t = typename Architecture_t::Scalar_t;
80
81 using LayerDescriptor_t = typename Architecture_t::ConvolutionDescriptor_t;
82 using WeightsDescriptor_t = typename Architecture_t::FilterDescriptor_t;
83 using HelperDescriptor_t = typename Architecture_t::ActivationDescriptor_t;
84
85 using AlgorithmForward_t = typename Architecture_t::AlgorithmForward_t; // Forward layer operation
86 using AlgorithmBackward_t = typename Architecture_t::AlgorithmBackward_t; // Backward layer operation
87 using AlgorithmHelper_t = typename Architecture_t::AlgorithmHelper_t; // Used for weight grad backward pass
88 using ReduceTensorDescriptor_t = typename Architecture_t::ReduceTensorDescriptor_t; // used for reduction of tensor(bias grad)
89
90 // FIXME: Add other cudnn types (algorithm preference etc.)
91 using AlgorithmDataType_t = typename Architecture_t::AlgorithmDataType_t;
92
93 /* Calculate the output dimension of the convolutional layer */
94 static size_t calculateDimension(size_t imgDim, size_t fltDim, size_t padding, size_t stride);
95
96 /* Calculate the number of pixels in a single receptive field */
97 static size_t inline calculateNLocalViewPixels(size_t depth, size_t height, size_t width) { return depth * height * width; }
98
99 /* Calculate the number of receptive fields in an image given the filter and image sizes */
100 static size_t calculateNLocalViews(size_t inputHeight, size_t filterHeight, size_t paddingHeight, size_t strideRows,
101 size_t inputWidth, size_t filterWidth, size_t paddingWidth, size_t strideCols);
102
103protected:
104 size_t fFilterDepth; ///< The depth of the filter.
105 size_t fFilterHeight; ///< The height of the filter.
106 size_t fFilterWidth; ///< The width of the filter.
107
108 size_t fStrideRows; ///< The number of row pixels to slid the filter each step.
109 size_t fStrideCols; ///< The number of column pixels to slid the filter each step.
110
111 size_t fNLocalViewPixels; ///< The number of pixels in one local image view.
112 size_t fNLocalViews; ///< The number of local views in one image.
113
114 Scalar_t fDropoutProbability; ///< Probability that an input is active.
115
116 TDescriptors * fDescriptors = nullptr; ///< Keeps the convolution, activations and filter descriptors
117
119private:
120 size_t fPaddingHeight; ///< The number of zero layers added top and bottom of the input.
121 size_t fPaddingWidth; ///< The number of zero layers left and right of the input.
122
123 Tensor_t fInputActivation; ///< First output of this layer after conv, before activation.
124
125 std::vector<int> fBackwardIndices; ///< Vector of indices used for a fast Im2Col in backward pass
126
127 EActivationFunction fF; ///< Activation function of the layer.
128 ERegularization fReg; ///< The regularization method.
129 Scalar_t fWeightDecay; ///< The weight decay.
130
131 Tensor_t fForwardTensor; ///< Cache tensor used for speeding-up the forward pass.
132
134 void ReleaseDescriptors();
135 void InitializeWorkspace();
136 void FreeWorkspace();
137
138public:
139 /*! Constructor. */
140 TConvLayer(size_t BatchSize, size_t InputDepth, size_t InputHeight, size_t InputWidth, size_t Depth, EInitialization Init,
141 size_t FilterHeight, size_t FilterWidth, size_t StrideRows, size_t StrideCols, size_t PaddingHeight,
142 size_t PaddingWidth, Scalar_t DropoutProbability, EActivationFunction f, ERegularization Reg,
143 Scalar_t WeightDecay);
144
145 /*! Copy the conv layer provided as a pointer */
147
148 /*! Copy constructor. */
149 TConvLayer(const TConvLayer &);
150
151 /*! Destructor. */
152 virtual ~TConvLayer();
153
154 //virtual void Initialize();
155
156 /*! Computes activation of the layer for the given input. The input
157 * must be in 3D tensor form with the different matrices corresponding to
158 * different events in the batch. Computes activations as well as
159 * the first partial derivative of the activation function at those
160 * activations. */
161 void Forward(Tensor_t &input, bool applyDropout = false);
162
163 /*! Compute weight, bias and activation gradients. Uses the precomputed
164 * first partial derivatives of the activation function computed during
165 * forward propagation and modifies them. Must only be called directly
166 * at the corresponding call to Forward(...). */
167 void Backward(Tensor_t &gradients_backward, const Tensor_t &activations_backward);
168 //// Tensor_t &inp1, Tensor_t &inp2);
169
170 /*! Prints the info about the layer. */
171 void Print() const;
172
173 /*! Writes the information and the weights about the layer in an XML node. */
174 virtual void AddWeightsXMLTo(void *parent);
175
176 /*! Read the information and the weights about the layer from XML node. */
177 virtual void ReadWeightsFromXML(void *parent);
178
179 /*! Getters */
180 size_t GetFilterDepth() const { return fFilterDepth; }
181 size_t GetFilterHeight() const { return fFilterHeight; }
182 size_t GetFilterWidth() const { return fFilterWidth; }
183
184 size_t GetStrideRows() const { return fStrideRows; }
185 size_t GetStrideCols() const { return fStrideCols; }
186
187 size_t GetPaddingHeight() const { return fPaddingHeight; }
188 size_t GetPaddingWidth() const { return fPaddingWidth; }
189
190 size_t GetNLocalViewPixels() const { return fNLocalViewPixels; }
191 size_t GetNLocalViews() const { return fNLocalViews; }
192
194
197
199 const Matrix_t &GetInputActivationAt(size_t i) const { return fInputActivation[i]; }
200
201 const Tensor_t &GetForwardMatrices() const { return fForwardTensor; }
203
207
208 // The following getters are used for testing
210 const TDescriptors * GetDescriptors() const {return fDescriptors;}
211
213 const TWorkspace * GetWorkspace() const {return fWorkspace;}
214};
215
216
217//
218//
219// Conv Layer Class - Implementation
220//______________________________________________________________________________
221template <typename Architecture_t>
222TConvLayer<Architecture_t>::TConvLayer(size_t batchSize, size_t inputDepth, size_t inputHeight, size_t inputWidth,
223 size_t depth, EInitialization init, size_t filterHeight, size_t filterWidth,
224 size_t strideRows, size_t strideCols, size_t paddingHeight, size_t paddingWidth,
225 Scalar_t dropoutProbability, EActivationFunction f, ERegularization reg,
227 : VGeneralLayer<Architecture_t>(batchSize, inputDepth, inputHeight, inputWidth, depth,
228 calculateDimension(inputHeight, filterHeight, paddingHeight, strideRows),
229 calculateDimension(inputWidth, filterWidth, paddingWidth, strideCols),
230 1, depth, calculateNLocalViewPixels(inputDepth, filterHeight, filterWidth),
231 1, depth, 1, batchSize, depth,
232 calculateNLocalViews(inputHeight, filterHeight, paddingHeight, strideRows,
233 inputWidth, filterWidth, paddingWidth, strideCols),
234 init),
235 fFilterDepth(inputDepth), fFilterHeight(filterHeight), fFilterWidth(filterWidth), fStrideRows(strideRows),
236 fStrideCols(strideCols), fNLocalViewPixels(calculateNLocalViewPixels(inputDepth, filterHeight, filterWidth)),
237 fNLocalViews(calculateNLocalViews(inputHeight, filterHeight, paddingHeight, strideRows,
238 inputWidth, filterWidth, paddingWidth, strideCols)),
239 fDropoutProbability(dropoutProbability), fPaddingHeight(paddingHeight), fPaddingWidth(paddingWidth),
240 fInputActivation(), fF(f), fReg(reg), fWeightDecay(weightDecay)
241{
242 /** Each element in the vector is a `T_Matrix` representing an event, therefore `vec.size() == batchSize`.
243 * Cells in these matrices are distributed in the following manner:
244 * Each row represents a single feature map, therefore we have `nRows == depth`.
245 * Each column represents a single pixel in that feature map, therefore we have `nCols == nLocalViews`.
246 **/
247 fInputActivation = Tensor_t( batchSize, depth, fNLocalViews); // create tensor (shape is B x C x LV)
249
250
253}
254
255//______________________________________________________________________________
256template <typename Architecture_t>
258 : VGeneralLayer<Architecture_t>(layer), fFilterDepth(layer->GetFilterDepth()),
259 fFilterHeight(layer->GetFilterHeight()), fFilterWidth(layer->GetFilterWidth()),
260 fStrideRows(layer->GetStrideRows()), fStrideCols(layer->GetStrideCols()),
261 fNLocalViewPixels(layer->GetNLocalViewPixels()), fNLocalViews(layer->GetNLocalViews()),
262 fDropoutProbability(layer->GetDropoutProbability()), fPaddingHeight(layer->GetPaddingHeight()),
263 fPaddingWidth(layer->GetPaddingWidth()),
264 fInputActivation( layer->GetInputActivation().GetShape() ),
265 fF(layer->GetActivationFunction()),
266 fReg(layer->GetRegularization()), fWeightDecay(layer->GetWeightDecay()),
267 fForwardTensor( layer->GetForwardMatrices().GetShape() )
268{
271
272}
273
274//______________________________________________________________________________
275template <typename Architecture_t>
277 : VGeneralLayer<Architecture_t>(convLayer), fFilterDepth(convLayer.fFilterDepth),
278 fFilterHeight(convLayer.fFilterHeight), fFilterWidth(convLayer.fFilterWidth), fStrideRows(convLayer.fStrideRows),
279 fStrideCols(convLayer.fStrideCols), fNLocalViewPixels(convLayer.fNLocalViewPixels),
280 fNLocalViews(convLayer.fNLocalViews), fDropoutProbability(convLayer.fDropoutProbability),
281 fPaddingHeight(convLayer.fPaddingHeight), fPaddingWidth(convLayer.fPaddingWidth),
282 fInputActivation( convLayer.GetInputActivation().GetShape() ),
283 fF(convLayer.fF),
284 fReg(convLayer.fReg), fWeightDecay(convLayer.fWeightDecay),
285 fForwardTensor( convLayer.GetForwardMatrices().GetShape() )
286{
289}
290
291//______________________________________________________________________________
292//FIXME: Add function for cudaFree
293template <typename Architecture_t>
295{
296 //std::cout << "!!!!Delete conv layer " << this->GetOutput().GetShape()[1] << " " << this->GetOutput().GetShape()[2] << " " << this->GetOutput().GetShape()[3] << std::endl;
297 if (fDescriptors) {
298 ReleaseDescriptors();
299 delete fDescriptors;
300 }
301
302 if (fWorkspace) {
303 FreeWorkspace();
304 delete fWorkspace;
305 }
306}
307
308
309//______________________________________________________________________________
310template <typename Architecture_t>
311auto TConvLayer<Architecture_t>::Forward(Tensor_t &input, bool /*applyDropout*/) -> void
312{
313 TConvParams params(this->GetBatchSize(), this->GetInputDepth(), this->GetInputHeight(), this->GetInputWidth(),
314 this->GetDepth(), this->GetFilterHeight(), this->GetFilterWidth(),
315 this->GetStrideRows(), this->GetStrideCols(), this->GetPaddingHeight(), this->GetPaddingWidth());
316
317 //R__ASSERT( input.size() > 0);
318 Architecture_t::ConvLayerForward(this->GetOutput(), this->GetInputActivation(), input, this->GetWeightsAt(0),
319 this->GetBiasesAt(0), params, this->GetActivationFunction(),
320 this->GetForwardMatrices(), (TCNNDescriptors<TConvLayer<Architecture_t>> &) (*fDescriptors),
321 (TCNNWorkspace<TConvLayer<Architecture_t>> &) (*fWorkspace));
322}
323
324//______________________________________________________________________________
325template <typename Architecture_t>
327 const Tensor_t &activations_backward) -> void
328// Tensor_t & /*inp1*/, Tensor_t &
329// /*inp2*/) -> void
330{
331 Architecture_t::ConvLayerBackward(
332 gradients_backward, this->GetWeightGradientsAt(0), this->GetBiasGradientsAt(0), this->GetInputActivation(),
333 this->GetActivationGradients(), this->GetWeightsAt(0), activations_backward, this->GetOutput(),
334 this->GetActivationFunction(),
335 (TCNNDescriptors<TConvLayer<Architecture_t>> &) (*fDescriptors),
336 (TCNNWorkspace<TConvLayer<Architecture_t>> &) (*fWorkspace),
337 this->GetBatchSize(), this->GetInputHeight(), this->GetInputWidth(), this->GetDepth(),
338 this->GetHeight(), this->GetWidth(), this->GetFilterDepth(), this->GetFilterHeight(),
339 this->GetFilterWidth(), this->GetNLocalViews());
340
341 addRegularizationGradients<Architecture_t>(this->GetWeightGradientsAt(0), this->GetWeightsAt(0),
342 this->GetWeightDecay(), this->GetRegularization());
343}
344
345//______________________________________________________________________________
346template <typename Architecture_t>
348{
349 std::cout << " CONV LAYER: \t";
350 std::cout << "( W = " << this->GetWidth() << " , ";
351 std::cout << " H = " << this->GetHeight() << " , ";
352 std::cout << " D = " << this->GetDepth() << " ) ";
353
354 std::cout << "\t Filter ( W = " << this->GetFilterWidth() << " , ";
355 std::cout << " H = " << this->GetFilterHeight() << " ) ";
356 //std::cout << "\t Local Views = " << this->GetNLocalViews() << " " ;
357 if (this->GetOutput().GetSize() > 0) {
358 std::cout << "\tOutput = ( " << this->GetOutput().GetFirstSize() << " , "
359 << this->GetOutput().GetCSize() << " , " << this->GetOutput().GetHSize() << " , " << this->GetOutput().GetWSize()
360 << " ) ";
361 }
362 std::vector<std::string> activationNames = { "Identity","Relu","Sigmoid","Tanh","SymmRelu","SoftSign","Gauss" };
363 std::cout << "\t Activation Function = ";
364 std::cout << activationNames[ static_cast<int>(fF) ] << std::endl;
365}
366
367//______________________________________________________________________________
368template <typename Architecture_t>
370{
371 auto layerxml = gTools().xmlengine().NewChild(parent, nullptr, "ConvLayer");
372
373 gTools().xmlengine().NewAttr(layerxml, nullptr, "Depth", gTools().StringFromInt(this->GetDepth()));
374 gTools().xmlengine().NewAttr(layerxml, nullptr, "FilterHeight", gTools().StringFromInt(this->GetFilterHeight()));
375 gTools().xmlengine().NewAttr(layerxml, nullptr, "FilterWidth", gTools().StringFromInt(this->GetFilterWidth()));
376 gTools().xmlengine().NewAttr(layerxml, nullptr, "StrideRows", gTools().StringFromInt(this->GetStrideRows()));
377 gTools().xmlengine().NewAttr(layerxml, nullptr, "StrideCols", gTools().StringFromInt(this->GetStrideCols()));
378 gTools().xmlengine().NewAttr(layerxml, nullptr, "PaddingHeight", gTools().StringFromInt(this->GetPaddingHeight()));
379 gTools().xmlengine().NewAttr(layerxml, nullptr, "PaddingWidth", gTools().StringFromInt(this->GetPaddingWidth()));
380
381
382 int activationFunction = static_cast<int>(this -> GetActivationFunction());
383 gTools().xmlengine().NewAttr(layerxml, nullptr, "ActivationFunction",
384 TString::Itoa(activationFunction, 10));
385
386 // write weights and bias matrix
387 this->WriteMatrixToXML(layerxml, "Weights", this -> GetWeightsAt(0));
388 this->WriteMatrixToXML(layerxml, "Biases", this -> GetBiasesAt(0));
389}
390
391//______________________________________________________________________________
392template <typename Architecture_t>
394{
395 // read weights and biases
396 // the meta information is read before because it is needed before creating the Conv layer
397 this->ReadMatrixXML(parent,"Weights", this -> GetWeightsAt(0));
398 this->ReadMatrixXML(parent,"Biases", this -> GetBiasesAt(0));
399}
400
401template <typename Architecture_t>
402size_t TConvLayer<Architecture_t>::calculateDimension(size_t imgDim, size_t fltDim, size_t padding, size_t stride)
403{
404 size_t temp = imgDim - fltDim + 2 * padding;
405 if (temp % stride || temp + stride <= 0) {
406 Fatal("calculateDimension", "Not compatible hyper parameters for layer - (imageDim, filterDim, padding, stride) "
407 "%zu, %zu, %zu, %zu", imgDim, fltDim, padding, stride);
408 }
409 return temp / stride + 1;
410}
411
412template <typename Architecture_t>
413size_t TConvLayer<Architecture_t>::calculateNLocalViews(size_t inputHeight, size_t filterHeight, size_t paddingHeight,
414 size_t strideRows, size_t inputWidth, size_t filterWidth,
415 size_t paddingWidth, size_t strideCols)
416{
417 int height = calculateDimension(inputHeight, filterHeight, paddingHeight, strideRows);
418 int width = calculateDimension(inputWidth, filterWidth, paddingWidth, strideCols);
419
420 return height * width;
421}
422
423//______________________________________________________________________________
424template <typename Architecture_t>
426 Architecture_t::InitializeConvDescriptors(fDescriptors, this);
427}
428
429template <typename Architecture_t>
431 Architecture_t::ReleaseConvDescriptors(fDescriptors);
432}
433
434//______________________________________________________________________________
435template <typename Architecture_t>
437 TConvParams params(this->GetBatchSize(), this->GetInputDepth(), this->GetInputHeight(), this->GetInputWidth(),
438 this->GetDepth(), this->GetFilterHeight(), this->GetFilterWidth(),
439 this->GetStrideRows(), this->GetStrideCols(), this->GetPaddingHeight(), this->GetPaddingWidth());
440
441 Architecture_t::InitializeConvWorkspace(fWorkspace, fDescriptors, params, this);
442}
443
444template <typename Architecture_t>
446 Architecture_t::FreeConvWorkspace(fWorkspace);
447}
448
449//______________________________________________________________________________
450
451} // namespace CNN
452} // namespace DNN
453} // namespace TMVA
454
455#endif
#define f(i)
Definition RSha256.hxx:104
void Fatal(const char *location, const char *msgfmt,...)
Use this function in case of a fatal error. It will abort the program.
Definition TError.cxx:244
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void input
Option_t Option_t width
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void reg
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void char Point_t Rectangle_t height
size_t fNLocalViews
The number of local views in one image.
Definition ConvLayer.h:112
static size_t calculateNLocalViews(size_t inputHeight, size_t filterHeight, size_t paddingHeight, size_t strideRows, size_t inputWidth, size_t filterWidth, size_t paddingWidth, size_t strideCols)
Definition ConvLayer.h:413
virtual void ReadWeightsFromXML(void *parent)
Read the information and the weights about the layer from XML node.
Definition ConvLayer.h:393
const Tensor_t & GetForwardMatrices() const
Definition ConvLayer.h:201
size_t GetNLocalViewPixels() const
Definition ConvLayer.h:190
Tensor_t fInputActivation
First output of this layer after conv, before activation.
Definition ConvLayer.h:123
const TDescriptors * GetDescriptors() const
Definition ConvLayer.h:210
typename Architecture_t::ActivationDescriptor_t HelperDescriptor_t
Definition ConvLayer.h:83
size_t GetStrideRows() const
Definition ConvLayer.h:184
void Backward(Tensor_t &gradients_backward, const Tensor_t &activations_backward)
Compute weight, bias and activation gradients.
Definition ConvLayer.h:326
typename Architecture_t::Tensor_t Tensor_t
Definition ConvLayer.h:77
size_t fPaddingWidth
The number of zero layers left and right of the input.
Definition ConvLayer.h:121
Scalar_t GetWeightDecay() const
Definition ConvLayer.h:206
Scalar_t fWeightDecay
The weight decay.
Definition ConvLayer.h:129
Tensor_t & GetInputActivation()
Definition ConvLayer.h:196
size_t fFilterWidth
The width of the filter.
Definition ConvLayer.h:106
std::vector< int > fBackwardIndices
Vector of indices used for a fast Im2Col in backward pass.
Definition ConvLayer.h:125
size_t GetFilterWidth() const
Definition ConvLayer.h:182
TConvLayer(size_t BatchSize, size_t InputDepth, size_t InputHeight, size_t InputWidth, size_t Depth, EInitialization Init, size_t FilterHeight, size_t FilterWidth, size_t StrideRows, size_t StrideCols, size_t PaddingHeight, size_t PaddingWidth, Scalar_t DropoutProbability, EActivationFunction f, ERegularization Reg, Scalar_t WeightDecay)
Constructor.
Definition ConvLayer.h:222
static size_t calculateDimension(size_t imgDim, size_t fltDim, size_t padding, size_t stride)
Definition ConvLayer.h:402
Tensor_t fForwardTensor
Cache tensor used for speeding-up the forward pass.
Definition ConvLayer.h:131
TDescriptors * GetDescriptors()
Definition ConvLayer.h:209
typename Architecture_t::ConvolutionDescriptor_t LayerDescriptor_t
Definition ConvLayer.h:81
size_t fFilterDepth
The depth of the filter.
Definition ConvLayer.h:104
size_t GetPaddingWidth() const
Definition ConvLayer.h:188
size_t fNLocalViewPixels
The number of pixels in one local image view.
Definition ConvLayer.h:111
typename Architecture_t::AlgorithmForward_t AlgorithmForward_t
Definition ConvLayer.h:85
Scalar_t fDropoutProbability
Probability that an input is active.
Definition ConvLayer.h:114
static size_t calculateNLocalViewPixels(size_t depth, size_t height, size_t width)
Definition ConvLayer.h:97
size_t fStrideCols
The number of column pixels to slid the filter each step.
Definition ConvLayer.h:109
typename Architecture_t::AlgorithmDataType_t AlgorithmDataType_t
Definition ConvLayer.h:91
virtual ~TConvLayer()
Destructor.
Definition ConvLayer.h:294
Tensor_t & GetForwardMatrices()
Definition ConvLayer.h:202
typename Architecture_t::AlgorithmBackward_t AlgorithmBackward_t
Definition ConvLayer.h:86
size_t fStrideRows
The number of row pixels to slid the filter each step.
Definition ConvLayer.h:108
ERegularization fReg
The regularization method.
Definition ConvLayer.h:128
const TWorkspace * GetWorkspace() const
Definition ConvLayer.h:213
const Matrix_t & GetInputActivationAt(size_t i) const
Definition ConvLayer.h:199
typename Architecture_t::Matrix_t Matrix_t
Definition ConvLayer.h:78
EActivationFunction GetActivationFunction() const
Definition ConvLayer.h:204
TDescriptors * fDescriptors
Keeps the convolution, activations and filter descriptors.
Definition ConvLayer.h:116
size_t fPaddingHeight
The number of zero layers added top and bottom of the input.
Definition ConvLayer.h:120
Matrix_t & GetInputActivationAt(size_t i)
Definition ConvLayer.h:198
TWorkspace * GetWorkspace()
Definition ConvLayer.h:212
size_t fFilterHeight
The height of the filter.
Definition ConvLayer.h:105
size_t GetStrideCols() const
Definition ConvLayer.h:185
typename Architecture_t::FilterDescriptor_t WeightsDescriptor_t
Definition ConvLayer.h:82
virtual void AddWeightsXMLTo(void *parent)
Writes the information and the weights about the layer in an XML node.
Definition ConvLayer.h:369
EActivationFunction fF
Activation function of the layer.
Definition ConvLayer.h:127
size_t GetFilterDepth() const
Getters.
Definition ConvLayer.h:180
size_t GetPaddingHeight() const
Definition ConvLayer.h:187
size_t GetFilterHeight() const
Definition ConvLayer.h:181
const Tensor_t & GetInputActivation() const
Definition ConvLayer.h:195
typename Architecture_t::ReduceTensorDescriptor_t ReduceTensorDescriptor_t
Definition ConvLayer.h:88
void Forward(Tensor_t &input, bool applyDropout=false)
Computes activation of the layer for the given input.
Definition ConvLayer.h:311
size_t GetNLocalViews() const
Definition ConvLayer.h:191
typename Architecture_t::AlgorithmHelper_t AlgorithmHelper_t
Definition ConvLayer.h:87
Scalar_t GetDropoutProbability() const
Definition ConvLayer.h:193
typename Architecture_t::Scalar_t Scalar_t
Definition ConvLayer.h:79
void Print() const
Prints the info about the layer.
Definition ConvLayer.h:347
ERegularization GetRegularization() const
Definition ConvLayer.h:205
Generic General Layer class.
TXMLEngine & xmlengine()
Definition Tools.h:262
static TString Itoa(Int_t value, Int_t base)
Converts an Int_t to a TString with respect to the base specified (2-36).
Definition TString.cxx:2092
XMLNodePointer_t NewChild(XMLNodePointer_t parent, XMLNsPointer_t ns, const char *name, const char *content=nullptr)
create new child element for parent node
XMLAttrPointer_t NewAttr(XMLNodePointer_t xmlnode, XMLNsPointer_t, const char *name, const char *value)
creates new attribute for xmlnode, namespaces are not supported for attributes
double weightDecay(double error, ItWeight itWeight, ItWeight itWeightEnd, double factorWeightDecay, EnumRegularization eRegularization)
compute the weight decay for regularization (L1 or L2)
ERegularization
Enum representing the regularization type applied for a given layer.
Definition Functions.h:65
EActivationFunction
Enum that represents layer activation functions.
Definition Functions.h:32
create variable transformations
Tools & gTools()
size_t strideRows
The number of row pixels to slid the filter each step.
Definition ConvLayer.h:57
size_t filterHeight
The height of the filter.
Definition ConvLayer.h:54
size_t inputHeight
The height of the previous layer or input.
Definition ConvLayer.h:50
size_t batchSize
Batch size used for training and evaluation.
Definition ConvLayer.h:47
size_t paddingWidth
The number of zero layers left and right of the input.
Definition ConvLayer.h:60
size_t filterWidth
The width of the filter.
Definition ConvLayer.h:55
size_t paddingHeight
The number of zero layers added top and bottom of the input.
Definition ConvLayer.h:59
size_t inputWidth
The width of the previous layer or input.
Definition ConvLayer.h:51
TConvParams(size_t _batchSize, size_t _inputDepth, size_t _inputHeight, size_t _inputWidth, size_t _numberFilters, size_t _filterHeight, size_t _filterWidth, size_t _strideRows, size_t _strideCols, size_t _paddingHeight, size_t _paddingWidth)
Definition ConvLayer.h:62
size_t numberFilters
The number of the filters, which is equal to the output's depth.
Definition ConvLayer.h:53
size_t inputDepth
The depth of the previous layer or input.
Definition ConvLayer.h:49
size_t strideCols
The number of column pixels to slid the filter each step.
Definition ConvLayer.h:58