Logo ROOT   6.18/05
Reference Guide
MaxPoolLayer.h
Go to the documentation of this file.
1// @(#)root/tmva/tmva/dnn:$Id$
2// Author: Vladimir Ilievski
3
4/**********************************************************************************
5 * Project: TMVA - a Root-integrated toolkit for multivariate data analysis *
6 * Package: TMVA *
7 * Class : TMaxPoolLayer *
8 * Web : http://tmva.sourceforge.net *
9 * *
10 * Description: *
11 * Max Pool Deep Neural Network Layer *
12 * *
13 * Authors (alphabetical): *
14 * Vladimir Ilievski <ilievski.vladimir@live.com> - CERN, Switzerland *
15 * *
16 * Copyright (c) 2005-2015: *
17 * CERN, Switzerland *
18 * U. of Victoria, Canada *
19 * MPI-K Heidelberg, Germany *
20 * U. of Bonn, Germany *
21 * *
22 * Redistribution and use in source and binary forms, with or without *
23 * modification, are permitted according to the terms listed in LICENSE *
24 * (http://tmva.sourceforge.net/LICENSE) *
25 **********************************************************************************/
26
27#ifndef MAXPOOLLAYER_H_
28#define MAXPOOLLAYER_H_
29
30#include "TMatrix.h"
31
33#include "TMVA/DNN/Functions.h"
34
35#include <iostream>
36
37namespace TMVA {
38namespace DNN {
39namespace CNN {
40
41/** \class TMaxPoolLayer
42
43 Generic Max Pooling Layer class.
44
45 This generic Max Pooling Layer Class represents a pooling layer of
46 a CNN. It inherits all of the properties of the convolutional layer
47 TConvLayer, but it overrides the propagation methods. In a sense, max pooling
48 can be seen as non-linear convolution: a filter slides over the input and produces
49 one element as a function of the the elements within the receptive field.
50 In addition to that, it contains a matrix of winning units.
51
52 The height and width of the weights and biases is set to 0, since this
53 layer does not contain any weights.
54
55 */
56template <typename Architecture_t>
57class TMaxPoolLayer : public TConvLayer<Architecture_t> {
58
59public:
60 using Matrix_t = typename Architecture_t::Matrix_t;
61 using Scalar_t = typename Architecture_t::Scalar_t;
62
63private:
64 std::vector<Matrix_t> indexMatrix; ///< Matrix of indices for the backward pass.
65
66public:
67 /*! Constructor. */
68 TMaxPoolLayer(size_t BatchSize, size_t InputDepth, size_t InputHeight, size_t InputWidth, size_t FilterHeight,
69 size_t FilterWidth, size_t StrideRows, size_t StrideCols, Scalar_t DropoutProbability);
70
71 /*! Copy the max pooling layer provided as a pointer */
73
74 /*! Copy constructor. */
76
77 /*! Destructor. */
79
80 /*! Computes activation of the layer for the given input. The input
81 * must be in 3D tensor form with the different matrices corresponding to
82 * different events in the batch. It spatially downsamples the input
83 * matrices. */
84 void Forward(std::vector<Matrix_t> &input, bool applyDropout = false);
85
86 /*! Depending on the winning units determined during the Forward pass,
87 * it only forwards the derivatives to the right units in the previous
88 * layer. Must only be called directly at the corresponding call
89 * to Forward(...). */
90 void Backward(std::vector<Matrix_t> &gradients_backward, const std::vector<Matrix_t> &activations_backward,
91 std::vector<Matrix_t> &inp1, std::vector<Matrix_t> &inp2);
92
93 /*! Writes the information and the weights about the layer in an XML node. */
94 virtual void AddWeightsXMLTo(void *parent);
95
96 /*! Read the information and the weights about the layer from XML node. */
97 virtual void ReadWeightsFromXML(void *parent);
98
99 /*! Prints the info about the layer. */
100 void Print() const;
101
102 /*! Getters */
103 const std::vector<Matrix_t> &GetIndexMatrix() const { return indexMatrix; }
104 std::vector<Matrix_t> &GetIndexMatrix() { return indexMatrix; }
105
106};
107
108//______________________________________________________________________________
109template <typename Architecture_t>
110TMaxPoolLayer<Architecture_t>::TMaxPoolLayer(size_t batchSize, size_t inputDepth, size_t inputHeight, size_t inputWidth,
111 size_t filterHeight, size_t filterWidth, size_t strideRows,
112 size_t strideCols, Scalar_t dropoutProbability)
113
114 : TConvLayer<Architecture_t>(batchSize, inputDepth, inputHeight, inputWidth, inputDepth, EInitialization::kZero,
115 filterHeight, filterWidth, strideRows, strideCols, 0, 0, dropoutProbability,
117 indexMatrix()
118{
119 for (size_t i = 0; i < this->GetBatchSize(); i++) {
120 indexMatrix.emplace_back(this->GetDepth(), this->GetNLocalViews());
121 }
122}
123
124//______________________________________________________________________________
125template <typename Architecture_t>
127 : TConvLayer<Architecture_t>(layer), indexMatrix()
128{
129 for (size_t i = 0; i < layer->GetBatchSize(); i++) {
130 indexMatrix.emplace_back(layer->GetDepth(), layer->GetNLocalViews());
131 }
132}
133
134//______________________________________________________________________________
135template <typename Architecture_t>
137 : TConvLayer<Architecture_t>(layer), indexMatrix()
138{
139 for (size_t i = 0; i < layer.fBatchSize; i++) {
140 indexMatrix.emplace_back(layer.fDepth, layer.fNLocalViews);
141 }
142}
143
144//______________________________________________________________________________
145template <typename Architecture_t>
147{
148}
149
150//______________________________________________________________________________
151template <typename Architecture_t>
152auto TMaxPoolLayer<Architecture_t>::Forward(std::vector<Matrix_t> &input, bool applyDropout) -> void
153{
154 for (size_t i = 0; i < this->GetBatchSize(); i++) {
155
156 if (applyDropout && (this->GetDropoutProbability() != 1.0)) {
157 Architecture_t::Dropout(input[i], this->GetDropoutProbability());
158 }
159
160 Architecture_t::Downsample(this->GetOutputAt(i), indexMatrix[i], input[i], this->GetInputHeight(),
161 this->GetInputWidth(), this->GetFilterHeight(), this->GetFilterWidth(),
162 this->GetStrideRows(), this->GetStrideCols());
163 }
164}
165
166//______________________________________________________________________________
167template <typename Architecture_t>
168auto TMaxPoolLayer<Architecture_t>::Backward(std::vector<Matrix_t> &gradients_backward,
169 const std::vector<Matrix_t> & /*activations_backward*/,
170 std::vector<Matrix_t> & /*inp1*/, std::vector<Matrix_t> &
171 /*inp2*/) -> void
172{
173 for (size_t i = 0; i < this->GetBatchSize(); i++) {
174 Architecture_t::MaxPoolLayerBackward(gradients_backward[i], this->GetActivationGradients()[i],
175 this->GetIndexMatrix()[i],
176 this->GetInputHeight(), this->GetInputWidth(),
177 this->GetFilterHeight(), this->GetFilterWidth(),
178 this->GetStrideRows(), this->GetStrideCols(), this->GetNLocalViews());
179 }
180}
181
182//______________________________________________________________________________
183template <typename Architecture_t>
185{
186 std::cout << " POOL Layer: \t";
187 std::cout << "( W = " << this->GetWidth() << " , ";
188 std::cout << " H = " << this->GetHeight() << " , ";
189 std::cout << " D = " << this->GetDepth() << " ) ";
190
191 std::cout << "\t Filter ( W = " << this->GetFilterWidth() << " , ";
192 std::cout << " H = " << this->GetFilterHeight() << " ) ";
193
194 if (this->GetOutput().size() > 0) {
195 std::cout << "\tOutput = ( " << this->GetOutput().size() << " , " << this->GetOutput()[0].GetNrows() << " , " << this->GetOutput()[0].GetNcols() << " ) ";
196 }
197 std::cout << std::endl;
198}
199
200//______________________________________________________________________________
201template <typename Architecture_t>
203{
204 auto layerxml = gTools().xmlengine().NewChild(parent, 0, "MaxPoolLayer");
205
206 // write maxpool layer info
207 gTools().xmlengine().NewAttr(layerxml, 0, "FilterHeight", gTools().StringFromInt(this->GetFilterHeight()));
208 gTools().xmlengine().NewAttr(layerxml, 0, "FilterWidth", gTools().StringFromInt(this->GetFilterWidth()));
209 gTools().xmlengine().NewAttr(layerxml, 0, "StrideRows", gTools().StringFromInt(this->GetStrideRows()));
210 gTools().xmlengine().NewAttr(layerxml, 0, "StrideCols", gTools().StringFromInt(this->GetStrideCols()));
211
212}
213
214//______________________________________________________________________________
215template <typename Architecture_t>
217{
218 // all info is read before - nothing to do
219}
220
221} // namespace CNN
222} // namespace DNN
223} // namespace TMVA
224
225#endif
size_t fNLocalViews
The number of local views in one image.
Definition: ConvLayer.h:68
size_t GetNLocalViews() const
Definition: ConvLayer.h:137
typename Architecture_t::Scalar_t Scalar_t
Definition: ConvLayer.h:46
Generic Max Pooling Layer class.
Definition: MaxPoolLayer.h:57
std::vector< Matrix_t > & GetIndexMatrix()
Definition: MaxPoolLayer.h:104
void Backward(std::vector< Matrix_t > &gradients_backward, const std::vector< Matrix_t > &activations_backward, std::vector< Matrix_t > &inp1, std::vector< Matrix_t > &inp2)
Depending on the winning units determined during the Forward pass, it only forwards the derivatives t...
Definition: MaxPoolLayer.h:168
typename Architecture_t::Matrix_t Matrix_t
Definition: MaxPoolLayer.h:60
TMaxPoolLayer(size_t BatchSize, size_t InputDepth, size_t InputHeight, size_t InputWidth, size_t FilterHeight, size_t FilterWidth, size_t StrideRows, size_t StrideCols, Scalar_t DropoutProbability)
Constructor.
Definition: MaxPoolLayer.h:110
virtual void ReadWeightsFromXML(void *parent)
Read the information and the weights about the layer from XML node.
Definition: MaxPoolLayer.h:216
void Print() const
Prints the info about the layer.
Definition: MaxPoolLayer.h:184
std::vector< Matrix_t > indexMatrix
Matrix of indices for the backward pass.
Definition: MaxPoolLayer.h:64
virtual void AddWeightsXMLTo(void *parent)
Writes the information and the weights about the layer in an XML node.
Definition: MaxPoolLayer.h:202
void Forward(std::vector< Matrix_t > &input, bool applyDropout=false)
Computes activation of the layer for the given input.
Definition: MaxPoolLayer.h:152
const std::vector< Matrix_t > & GetIndexMatrix() const
Getters.
Definition: MaxPoolLayer.h:103
size_t fBatchSize
Batch size used for training and evaluation.
Definition: GeneralLayer.h:51
size_t GetDepth() const
Definition: GeneralLayer.h:148
size_t fDepth
The depth of the layer.
Definition: GeneralLayer.h:57
size_t GetBatchSize() const
Getters.
Definition: GeneralLayer.h:144
TXMLEngine & xmlengine()
Definition: Tools.h:270
XMLAttrPointer_t NewAttr(XMLNodePointer_t xmlnode, XMLNsPointer_t, const char *name, const char *value)
creates new attribute for xmlnode, namespaces are not supported for attributes
Definition: TXMLEngine.cxx:580
XMLNodePointer_t NewChild(XMLNodePointer_t parent, XMLNsPointer_t ns, const char *name, const char *content=0)
create new child element for parent node
Definition: TXMLEngine.cxx:709
EInitialization
Definition: Functions.h:70
ERegularization
Enum representing the regularization type applied for a given layer.
Definition: Functions.h:63
EActivationFunction
Enum that represents layer activation functions.
Definition: Functions.h:32
create variable transformations
Tools & gTools()