Logo ROOT   6.18/05
Reference Guide
Regularization.cxx
Go to the documentation of this file.
1// @(#)root/tmva/tmva/dnn:$Id$
2// Author: Simon Pfreundschuh 21/07/16
3
4/*************************************************************************
5 * Copyright (C) 2016, Simon Pfreundschuh *
6 * All rights reserved. *
7 * *
8 * For the licensing terms see $ROOTSYS/LICENSE. *
9 * For the list of contributors see $ROOTSYS/README/CREDITS. *
10 *************************************************************************/
11
12///////////////////////////////////////////////////////////////////////
13// Implementation of the regularization functionals and gradients //
14// for the multi-threaded CPU implementation using Roots TThreadExecutor. //
15///////////////////////////////////////////////////////////////////////
16
18
19namespace TMVA
20{
21namespace DNN
22{
23
24//______________________________________________________________________________
25template<typename AFloat>
27{
28 const AFloat *data = Weights.GetRawDataPointer();
29
30 size_t nElements = Weights.GetNoElements();
31 size_t nSteps = TCpuMatrix<AFloat>::GetNWorkItems(nElements);
32
33 std::vector<AFloat> temp(nElements/nSteps + 1);
34
35 auto f = [&data, &temp, nElements, nSteps](UInt_t workerID)
36 {
37 size_t iMax = std::min(workerID+nSteps, nElements);
38 size_t iWorker = workerID/nSteps;
39 for (size_t i = workerID; i < iMax; ++i) {
40 temp[iWorker] += fabs(data[i]);
41 }
42 };
43
44 auto reduction = [](const std::vector<AFloat> & v )
45 {
46 return std::accumulate(v.begin(),v.end(),AFloat{});
47 };
48 // auto reduction = [](AFloat sum1, AFloat sum2)
49 // {
50 // return sum1 + sum2;
51 // };
52 Weights.GetThreadExecutor().Foreach(f, ROOT::TSeqI(0,nElements,nSteps) );
53 return Weights.GetThreadExecutor().Reduce(temp, reduction);
54}
55
56
57//______________________________________________________________________________
58template<typename AFloat>
61 const TCpuMatrix<AFloat> & A,
62 AFloat weightDecay)
63{
64 AFloat *dataB = B.GetRawDataPointer();
65 const AFloat *dataA = A.GetRawDataPointer();
66
67 size_t nElements = B.GetNoElements();
68 R__ASSERT(A.GetNoElements() == nElements);
69 size_t nSteps = TCpuMatrix<AFloat>::GetNWorkItems(nElements);
70
71
72
73 auto f = [&dataA, &dataB, weightDecay, nElements, nSteps](UInt_t workerID)
74 {
75 size_t iMax = std::min(workerID+nSteps, nElements);
76 for (size_t i = workerID; i < iMax; ++i) {
77 AFloat sign = (dataA[i] < 0.0) ? -1.0 : 1.0;
78 dataB[i] += weightDecay * sign;
79 }
80 return 0;
81 };
82
83 if (nSteps < nElements) {
84#ifdef DL_USE_MTE
85 B.GetThreadExecutor().Foreach(f, ROOT::TSeqI(0,nElements, nSteps));
86#else
87 for (size_t i = 0; i < nElements; i+=nSteps)
88 f(i);
89#endif
90 } else {
91 f(0);
92 }
93}
94
95//______________________________________________________________________________
96template<typename AFloat>
98{
99 const AFloat *data = Weights.GetRawDataPointer();
100
101 size_t nElements = Weights.GetNoElements();
102 size_t nSteps = TCpuMatrix<AFloat>::GetNWorkItems(nElements);
103
104 std::vector<AFloat> temp(nElements/nSteps + 1);
105
106 auto f = [&data, &temp, nElements, nSteps](UInt_t workerID)
107 {
108 size_t iMax = std::min(workerID+nSteps, nElements);
109 size_t iWorker = workerID/nSteps;
110
111 for (size_t i = workerID; i < iMax; ++i) {
112 temp[iWorker] += data[i] * data[i];
113 }
114 };
115
116 auto reduction = [](const std::vector<AFloat> & v )
117 {
118 return std::accumulate(v.begin(),v.end(),AFloat{});
119 };
120 // auto reduction = [](AFloat sum1, AFloat sum2)
121 // {
122 // return sum1 + sum2;
123 // };
124
125 Weights.GetThreadExecutor().Foreach(f, ROOT::TSeqI(0,nElements,nSteps) );
126 return Weights.GetThreadExecutor().Reduce(temp, reduction);
127}
128
129//______________________________________________________________________________
130template<typename AFloat>
133 const TCpuMatrix<AFloat> & A,
134 AFloat weightDecay)
135{
136 AFloat *dataB = B.GetRawDataPointer();
137 const AFloat *dataA = A.GetRawDataPointer();
138
139 size_t nElements = B.GetNoElements();
140 R__ASSERT(A.GetNoElements() == nElements);
141 size_t nSteps = TCpuMatrix<AFloat>::GetNWorkItems(nElements);
142
143 auto f = [&dataA, &dataB, weightDecay, nElements, nSteps](UInt_t workerID)
144 {
145 size_t iMax = std::min(workerID+nSteps, nElements);
146 for (size_t i = workerID; i < iMax; ++i) {
147 dataB[i] += 2.0 * weightDecay * dataA[i];
148 }
149 return 0;
150 };
151
152 if (nSteps < nElements) {
153#ifdef DL_USE_MTE
154 B.GetThreadExecutor().Foreach(f, ROOT::TSeqI(0,nElements, nSteps));
155#else
156 for (size_t i = 0; i < nElements; i+=nSteps)
157 f(i);
158#endif
159 } else {
160 f(0);
161 }
162}
163
164
165} // namespace DNN
166} // namespace TMVA
SVector< double, 2 > v
Definition: Dict.h:5
#define f(i)
Definition: RSha256.hxx:104
unsigned int UInt_t
Definition: RtypesCore.h:42
#define R__ASSERT(e)
Definition: TError.h:96
A pseudo container class which is a generator of indices.
Definition: TSeq.hxx:66
The TCpuMatrix class.
Definition: CpuMatrix.h:89
AFloat * GetRawDataPointer()
Return raw pointer to the elements stored contiguously in column-major order.
Definition: CpuMatrix.h:152
static size_t GetNWorkItems(size_t nelements)
Definition: CpuMatrix.h:180
static Executor & GetThreadExecutor()
Definition: CpuMatrix.h:155
size_t GetNoElements() const
Definition: CpuMatrix.h:144
static void AddL2RegularizationGradients(TCpuMatrix< Scalar_t > &A, const TCpuMatrix< Scalar_t > &W, Scalar_t weightDecay)
static Scalar_t L2Regularization(const TCpuMatrix< Scalar_t > &W)
static Scalar_t L1Regularization(const TCpuMatrix< Scalar_t > &W)
static void AddL1RegularizationGradients(TCpuMatrix< Scalar_t > &A, const TCpuMatrix< Scalar_t > &W, Scalar_t weightDecay)
void Foreach(Function func, unsigned int nTimes, unsigned nChunks=0)
wrap TExecutor::Foreach
Definition: Executor.h:110
auto Reduce(const std::vector< T > &objs, R redfunc) -> decltype(redfunc(objs))
Wrap Reduce function.
Definition: Executor.h:151
static double B[]
static double A[]
VecExpr< UnaryOp< Fabs< T >, VecExpr< A, T, D >, T >, T, D > fabs(const VecExpr< A, T, D > &rhs)
double weightDecay(double error, ItWeight itWeight, ItWeight itWeightEnd, double factorWeightDecay, EnumRegularization eRegularization)
compute the weight decay for regularization (L1 or L2)
Definition: NeuralNet.icc:496
create variable transformations