79 #ifndef IClassifierReader__def 80 #define IClassifierReader__def 82 class IClassifierReader {
87 IClassifierReader() : fStatusIsClean(
true ) {}
88 virtual ~IClassifierReader() {}
91 virtual double GetMvaValue(
const std::vector<double>& inputValues )
const = 0;
94 bool IsStatusClean()
const {
return fStatusIsClean; }
103 class ReadMLPBNN :
public IClassifierReader {
108 ReadMLPBNN( std::vector<std::string>& theInputVars )
109 : IClassifierReader(),
110 fClassName(
"ReadMLPBNN" ),
112 fIsNormalised(
false )
115 const char* inputVars[] = {
"var1+var2",
"var1-var2",
"var3",
"var4" };
118 if (theInputVars.size() <= 0) {
119 std::cout <<
"Problem in class \"" << fClassName <<
"\": empty input vector" << std::endl;
120 fStatusIsClean =
false;
123 if (theInputVars.size() != fNvars) {
124 std::cout <<
"Problem in class \"" << fClassName <<
"\": mismatch in number of input values: " 125 << theInputVars.size() <<
" != " << fNvars << std::endl;
126 fStatusIsClean =
false;
130 for (
size_t ivar = 0; ivar < theInputVars.size(); ivar++) {
131 if (theInputVars[ivar] != inputVars[ivar]) {
132 std::cout <<
"Problem in class \"" << fClassName <<
"\": mismatch in input variable names" << std::endl
133 <<
" for variable [" << ivar <<
"]: " << theInputVars[ivar].c_str() <<
" != " << inputVars[ivar] << std::endl;
134 fStatusIsClean =
false;
162 virtual ~ReadMLPBNN() {
169 double GetMvaValue(
const std::vector<double>& inputValues )
const;
180 void InitTransform_1();
181 void Transform_1( std::vector<double> & iv,
int sigOrBgd )
const;
182 void InitTransform();
183 void Transform( std::vector<double> & iv,
int sigOrBgd )
const;
186 const char* fClassName;
189 size_t GetNvar()
const {
return fNvars; }
193 const bool fIsNormalised;
194 bool IsNormalised()
const {
return fIsNormalised; }
197 double NormVariable(
double x,
double xmin,
double xmax )
const {
199 return 2*(x -
xmin)/(xmax - xmin) - 1.0;
207 double GetMvaValue__(
const std::vector<double>& inputValues )
const;
211 double ActivationFnc(
double x)
const;
212 double OutputActivationFnc(
double x)
const;
216 double fWeightMatrix0to1[10][5];
217 double fWeightMatrix1to2[1][10];
229 fWeightMatrix0to1[0][0] = -0.0500313211768323;
230 fWeightMatrix0to1[1][0] = 1.7094566516032;
231 fWeightMatrix0to1[2][0] = 0.93080142586609;
232 fWeightMatrix0to1[3][0] = -0.480727310995468;
233 fWeightMatrix0to1[4][0] = -2.43366441685054;
234 fWeightMatrix0to1[5][0] = -2.5252185908736;
235 fWeightMatrix0to1[6][0] = -0.792746559485682;
236 fWeightMatrix0to1[7][0] = 2.12833556673629;
237 fWeightMatrix0to1[8][0] = -2.17383777190916;
238 fWeightMatrix0to1[0][1] = -1.13105790138881;
239 fWeightMatrix0to1[1][1] = -1.2627263837014;
240 fWeightMatrix0to1[2][1] = -0.399373485376653;
241 fWeightMatrix0to1[3][1] = -0.546688911449644;
242 fWeightMatrix0to1[4][1] = -0.977739357549186;
243 fWeightMatrix0to1[5][1] = -0.316705000878231;
244 fWeightMatrix0to1[6][1] = 0.460688878476911;
245 fWeightMatrix0to1[7][1] = -0.600184930633648;
246 fWeightMatrix0to1[8][1] = 1.36008294033747;
247 fWeightMatrix0to1[0][2] = 0.0164823350100334;
248 fWeightMatrix0to1[1][2] = 1.22292494112964;
249 fWeightMatrix0to1[2][2] = -0.0739797017126651;
250 fWeightMatrix0to1[3][2] = -2.20881551110752;
251 fWeightMatrix0to1[4][2] = -0.699094360711102;
252 fWeightMatrix0to1[5][2] = -0.617450611502115;
253 fWeightMatrix0to1[6][2] = -1.09406069992126;
254 fWeightMatrix0to1[7][2] = -0.244288559372016;
255 fWeightMatrix0to1[8][2] = 0.851765786859981;
256 fWeightMatrix0to1[0][3] = -3.05457127456281;
257 fWeightMatrix0to1[1][3] = -1.57468318053161;
258 fWeightMatrix0to1[2][3] = 0.734731808606291;
259 fWeightMatrix0to1[3][3] = 2.54004306064239;
260 fWeightMatrix0to1[4][3] = 4.41806952492387;
261 fWeightMatrix0to1[5][3] = 0.739452971127836;
262 fWeightMatrix0to1[6][3] = 0.196992348311525;
263 fWeightMatrix0to1[7][3] = -0.563671368370465;
264 fWeightMatrix0to1[8][3] = 1.33179184844964;
265 fWeightMatrix0to1[0][4] = 0.274652139714499;
266 fWeightMatrix0to1[1][4] = 3.0367778346693;
267 fWeightMatrix0to1[2][4] = -1.03526804982809;
268 fWeightMatrix0to1[3][4] = -0.519272686570194;
269 fWeightMatrix0to1[4][4] = -0.728282952959732;
270 fWeightMatrix0to1[5][4] = 0.147779857805122;
271 fWeightMatrix0to1[6][4] = 2.00157488875002;
272 fWeightMatrix0to1[7][4] = 0.338330302768732;
273 fWeightMatrix0to1[8][4] = -1.06817074486656;
275 fWeightMatrix1to2[0][0] = -3.07156048041761;
276 fWeightMatrix1to2[0][1] = -0.0691376707884641;
277 fWeightMatrix1to2[0][2] = -0.722861328862227;
278 fWeightMatrix1to2[0][3] = 1.63802320738992;
279 fWeightMatrix1to2[0][4] = 5.34057061614789;
280 fWeightMatrix1to2[0][5] = 2.85130125091591;
281 fWeightMatrix1to2[0][6] = 2.18425548381207;
282 fWeightMatrix1to2[0][7] = -0.884293411297697;
283 fWeightMatrix1to2[0][8] = 0.494073713733155;
284 fWeightMatrix1to2[0][9] = -0.770031394310278;
287 inline double ReadMLPBNN::GetMvaValue__(
const std::vector<double>& inputValues )
const 289 if (inputValues.size() != (
unsigned int)fLayerSize[0]-1) {
290 std::cout <<
"Input vector needs to be of size " << fLayerSize[0]-1 << std::endl;
294 std::array<double, 5> fWeights0 {{}};
295 std::array<double, 10> fWeights1 {{}};
296 std::array<double, 1> fWeights2 {{}};
297 fWeights0.back() = 1.;
298 fWeights1.back() = 1.;
300 for (
int i=0; i<fLayerSize[0]-1; i++)
301 fWeights0[i]=inputValues[i];
304 for (
int o=0; o<fLayerSize[1]-1; o++) {
305 for (
int i=0; i<fLayerSize[0]; i++) {
306 double inputVal = fWeightMatrix0to1[o][i] * fWeights0[i];
307 fWeights1[o] += inputVal;
309 fWeights1[o] = ActivationFnc(fWeights1[o]);
312 for (
int o=0; o<fLayerSize[2]; o++) {
313 for (
int i=0; i<fLayerSize[1]; i++) {
314 double inputVal = fWeightMatrix1to2[o][i] * fWeights1[i];
315 fWeights2[o] += inputVal;
317 fWeights2[o] = OutputActivationFnc(fWeights2[o]);
323 double ReadMLPBNN::ActivationFnc(
double x)
const {
327 double ReadMLPBNN::OutputActivationFnc(
double x)
const {
329 return 1.0/(1.0+
exp(-x));
333 inline void ReadMLPBNN::Clear()
336 inline double ReadMLPBNN::GetMvaValue(
const std::vector<double>& inputValues )
const 342 if (!IsStatusClean()) {
343 std::cout <<
"Problem in class \"" << fClassName <<
"\": cannot return classifier response" 344 <<
" because status is dirty" << std::endl;
348 if (IsNormalised()) {
350 std::vector<double> iV;
351 iV.reserve(inputValues.size());
353 for (std::vector<double>::const_iterator varIt = inputValues.begin();
354 varIt != inputValues.end(); varIt++, ivar++) {
355 iV.push_back(NormVariable( *varIt, fVmin[ivar], fVmax[ivar] ));
358 retval = GetMvaValue__( iV );
361 std::vector<double> iV;
363 for (std::vector<double>::const_iterator varIt = inputValues.begin();
364 varIt != inputValues.end(); varIt++, ivar++) {
365 iV.push_back(*varIt);
368 retval = GetMvaValue__( iV );
376 inline void ReadMLPBNN::InitTransform_1()
379 fMin_1[0][0] = -4.94358778;
380 fMax_1[0][0] = 6.3994679451;
381 fMin_1[1][0] = -8.14423561096;
382 fMax_1[1][0] = 7.26972866058;
383 fMin_1[2][0] = -8.14423561096;
384 fMax_1[2][0] = 7.26972866058;
385 fMin_1[0][1] = -3.96643972397;
386 fMax_1[0][1] = 3.11266636848;
387 fMin_1[1][1] = -3.25508260727;
388 fMax_1[1][1] = 4.0258936882;
389 fMin_1[2][1] = -3.96643972397;
390 fMax_1[2][1] = 4.0258936882;
391 fMin_1[0][2] = -2.78645992279;
392 fMax_1[0][2] = 3.50111722946;
393 fMin_1[1][2] = -5.03730010986;
394 fMax_1[1][2] = 4.27845287323;
395 fMin_1[2][2] = -5.03730010986;
396 fMax_1[2][2] = 4.27845287323;
397 fMin_1[0][3] = -2.42712664604;
398 fMax_1[0][3] = 4.5351858139;
399 fMin_1[1][3] = -5.95050764084;
400 fMax_1[1][3] = 4.64035463333;
401 fMin_1[2][3] = -5.95050764084;
402 fMax_1[2][3] = 4.64035463333;
406 inline void ReadMLPBNN::Transform_1( std::vector<double>& iv,
int cls)
const 409 if (cls < 0 || cls > 2) {
418 static std::vector<int> indicesGet;
419 static std::vector<int> indicesPut;
421 if ( indicesGet.empty() ) {
422 indicesGet.reserve(fNvars);
423 indicesGet.push_back( 0);
424 indicesGet.push_back( 1);
425 indicesGet.push_back( 2);
426 indicesGet.push_back( 3);
428 if ( indicesPut.empty() ) {
429 indicesPut.reserve(fNvars);
430 indicesPut.push_back( 0);
431 indicesPut.push_back( 1);
432 indicesPut.push_back( 2);
433 indicesPut.push_back( 3);
436 static std::vector<double> dv;
438 for (
int ivar=0; ivar<nVar; ivar++) dv[ivar] = iv[indicesGet.at(ivar)];
439 for (
int ivar=0;ivar<4;ivar++) {
440 double offset = fMin_1[cls][ivar];
441 double scale = 1.0/(fMax_1[cls][ivar]-fMin_1[cls][ivar]);
442 iv[indicesPut.at(ivar)] = (dv[ivar]-offset)*scale * 2 - 1;
447 inline void ReadMLPBNN::InitTransform()
453 inline void ReadMLPBNN::Transform( std::vector<double>& iv,
int sigOrBgd )
const 455 Transform_1( iv, sigOrBgd );
Type GetType(const std::string &Name)
void Initialize(Bool_t useTMVAStyle=kTRUE)