78 #ifndef IClassifierReader__def 79 #define IClassifierReader__def 81 class IClassifierReader {
86 IClassifierReader() : fStatusIsClean(
true ) {}
87 virtual ~IClassifierReader() {}
90 virtual double GetMvaValue(
const std::vector<double>& inputValues )
const = 0;
93 bool IsStatusClean()
const {
return fStatusIsClean; }
102 class ReadMLPBNN :
public IClassifierReader {
107 ReadMLPBNN( std::vector<std::string>& theInputVars )
108 : IClassifierReader(),
109 fClassName(
"ReadMLPBNN" ),
111 fIsNormalised(
false )
114 const char* inputVars[] = {
"var1+var2",
"var1-var2",
"var3",
"var4" };
117 if (theInputVars.size() <= 0) {
118 std::cout <<
"Problem in class \"" << fClassName <<
"\": empty input vector" << std::endl;
119 fStatusIsClean =
false;
122 if (theInputVars.size() != fNvars) {
123 std::cout <<
"Problem in class \"" << fClassName <<
"\": mismatch in number of input values: " 124 << theInputVars.size() <<
" != " << fNvars << std::endl;
125 fStatusIsClean =
false;
129 for (
size_t ivar = 0; ivar < theInputVars.size(); ivar++) {
130 if (theInputVars[ivar] != inputVars[ivar]) {
131 std::cout <<
"Problem in class \"" << fClassName <<
"\": mismatch in input variable names" << std::endl
132 <<
" for variable [" << ivar <<
"]: " << theInputVars[ivar].c_str() <<
" != " << inputVars[ivar] << std::endl;
133 fStatusIsClean =
false;
161 virtual ~ReadMLPBNN() {
168 double GetMvaValue(
const std::vector<double>& inputValues )
const;
179 void InitTransform_1();
180 void Transform_1( std::vector<double> & iv,
int sigOrBgd )
const;
181 void InitTransform();
182 void Transform( std::vector<double> & iv,
int sigOrBgd )
const;
185 const char* fClassName;
188 size_t GetNvar()
const {
return fNvars; }
192 const bool fIsNormalised;
193 bool IsNormalised()
const {
return fIsNormalised; }
196 double NormVariable(
double x,
double xmin,
double xmax )
const {
198 return 2*(x -
xmin)/(xmax - xmin) - 1.0;
206 double GetMvaValue__(
const std::vector<double>& inputValues )
const;
210 double ActivationFnc(
double x)
const;
211 double OutputActivationFnc(
double x)
const;
215 double fWeightMatrix0to1[10][5];
216 double fWeightMatrix1to2[1][10];
218 double * fWeights[3];
225 fLayerSize[0] = 5; fWeights[0] =
new double[5];
226 fLayerSize[1] = 10; fWeights[1] =
new double[10];
227 fLayerSize[2] = 1; fWeights[2] =
new double[1];
229 fWeightMatrix0to1[0][0] = 0.0814937806335718;
230 fWeightMatrix0to1[1][0] = 1.66608493904044;
231 fWeightMatrix0to1[2][0] = 0.907620667288285;
232 fWeightMatrix0to1[3][0] = -0.82104985351804;
233 fWeightMatrix0to1[4][0] = -2.08201625319282;
234 fWeightMatrix0to1[5][0] = -2.67922549404247;
235 fWeightMatrix0to1[6][0] = -0.812128025540134;
236 fWeightMatrix0to1[7][0] = 2.34318790716582;
237 fWeightMatrix0to1[8][0] = -2.24296070100105;
238 fWeightMatrix0to1[0][1] = -1.06657449869395;
239 fWeightMatrix0to1[1][1] = -1.19325408648747;
240 fWeightMatrix0to1[2][1] = -0.355524904352707;
241 fWeightMatrix0to1[3][1] = -0.208303498149552;
242 fWeightMatrix0to1[4][1] = -0.949201145643711;
243 fWeightMatrix0to1[5][1] = -0.50055065217611;
244 fWeightMatrix0to1[6][1] = 0.504887665889982;
245 fWeightMatrix0to1[7][1] = -0.416190783244244;
246 fWeightMatrix0to1[8][1] = 1.1704574908932;
247 fWeightMatrix0to1[0][2] = -0.34301751949142;
248 fWeightMatrix0to1[1][2] = 1.18544046118725;
249 fWeightMatrix0to1[2][2] = -0.0337431028634733;
250 fWeightMatrix0to1[3][2] = -2.10928755356298;
251 fWeightMatrix0to1[4][2] = -1.02354627299503;
252 fWeightMatrix0to1[5][2] = -0.389081350821253;
253 fWeightMatrix0to1[6][2] = -1.09998880400102;
254 fWeightMatrix0to1[7][2] = -0.219992405129252;
255 fWeightMatrix0to1[8][2] = 0.794576815354533;
256 fWeightMatrix0to1[0][3] = -3.16844563782956;
257 fWeightMatrix0to1[1][3] = -1.54039625590493;
258 fWeightMatrix0to1[2][3] = 0.790940325402751;
259 fWeightMatrix0to1[3][3] = 2.88071956340261;
260 fWeightMatrix0to1[4][3] = 4.3908734623996;
261 fWeightMatrix0to1[5][3] = 1.12949208619052;
262 fWeightMatrix0to1[6][3] = 0.185644097902448;
263 fWeightMatrix0to1[7][3] = -0.543046087435345;
264 fWeightMatrix0to1[8][3] = 1.44766636064841;
265 fWeightMatrix0to1[0][4] = 0.33202073563451;
266 fWeightMatrix0to1[1][4] = 3.0800970895301;
267 fWeightMatrix0to1[2][4] = -1.02289215335637;
268 fWeightMatrix0to1[3][4] = -0.155435781193619;
269 fWeightMatrix0to1[4][4] = -0.882861078422949;
270 fWeightMatrix0to1[5][4] = 0.019309808309592;
271 fWeightMatrix0to1[6][4] = 1.91549261652052;
272 fWeightMatrix0to1[7][4] = 0.13021332110229;
273 fWeightMatrix0to1[8][4] = -1.32984315105756;
275 fWeightMatrix1to2[0][0] = -3.10575533370833;
276 fWeightMatrix1to2[0][1] = -0.124247930938538;
277 fWeightMatrix1to2[0][2] = -0.813882862721867;
278 fWeightMatrix1to2[0][3] = 1.47280482989785;
279 fWeightMatrix1to2[0][4] = 5.44297027047592;
280 fWeightMatrix1to2[0][5] = 3.21864846820081;
281 fWeightMatrix1to2[0][6] = 2.32953640660092;
282 fWeightMatrix1to2[0][7] = -1.01190744015599;
283 fWeightMatrix1to2[0][8] = 0.4327075992155;
284 fWeightMatrix1to2[0][9] = -0.61477579296756;
287 inline double ReadMLPBNN::GetMvaValue__(
const std::vector<double>& inputValues )
const 289 if (inputValues.size() != (
unsigned int)fLayerSize[0]-1) {
290 std::cout <<
"Input vector needs to be of size " << fLayerSize[0]-1 << std::endl;
294 for (
int l=0;
l<fLayers;
l++)
295 for (
int i=0; i<fLayerSize[
l]; i++) fWeights[
l][i]=0;
297 for (
int l=0;
l<fLayers-1;
l++)
298 fWeights[
l][fLayerSize[
l]-1]=1;
300 for (
int i=0; i<fLayerSize[0]-1; i++)
301 fWeights[0][i]=inputValues[i];
304 for (
int o=0; o<fLayerSize[1]-1; o++) {
305 for (
int i=0; i<fLayerSize[0]; i++) {
306 double inputVal = fWeightMatrix0to1[o][i] * fWeights[0][i];
307 fWeights[1][o] += inputVal;
309 fWeights[1][o] = ActivationFnc(fWeights[1][o]);
312 for (
int o=0; o<fLayerSize[2]; o++) {
313 for (
int i=0; i<fLayerSize[1]; i++) {
314 double inputVal = fWeightMatrix1to2[o][i] * fWeights[1][i];
315 fWeights[2][o] += inputVal;
317 fWeights[2][o] = OutputActivationFnc(fWeights[2][o]);
320 return fWeights[2][0];
323 double ReadMLPBNN::ActivationFnc(
double x)
const {
327 double ReadMLPBNN::OutputActivationFnc(
double x)
const {
329 return 1.0/(1.0+
exp(-x));
333 inline void ReadMLPBNN::Clear()
336 for (
int lIdx = 0; lIdx < 3; lIdx++) {
337 delete[] fWeights[lIdx];
340 inline double ReadMLPBNN::GetMvaValue(
const std::vector<double>& inputValues )
const 346 if (!IsStatusClean()) {
347 std::cout <<
"Problem in class \"" << fClassName <<
"\": cannot return classifier response" 348 <<
" because status is dirty" << std::endl;
352 if (IsNormalised()) {
354 std::vector<double> iV;
355 iV.reserve(inputValues.size());
357 for (std::vector<double>::const_iterator varIt = inputValues.begin();
358 varIt != inputValues.end(); varIt++, ivar++) {
359 iV.push_back(NormVariable( *varIt, fVmin[ivar], fVmax[ivar] ));
362 retval = GetMvaValue__( iV );
365 std::vector<double> iV;
367 for (std::vector<double>::const_iterator varIt = inputValues.begin();
368 varIt != inputValues.end(); varIt++, ivar++) {
369 iV.push_back(*varIt);
372 retval = GetMvaValue__( iV );
380 inline void ReadMLPBNN::InitTransform_1()
383 fMin_1[0][0] = -4.94358778;
384 fMax_1[0][0] = 6.3994679451;
385 fMin_1[1][0] = -8.14423561096;
386 fMax_1[1][0] = 7.26972866058;
387 fMin_1[2][0] = -8.14423561096;
388 fMax_1[2][0] = 7.26972866058;
389 fMin_1[0][1] = -3.96643972397;
390 fMax_1[0][1] = 3.11266636848;
391 fMin_1[1][1] = -3.25508260727;
392 fMax_1[1][1] = 4.0258936882;
393 fMin_1[2][1] = -3.96643972397;
394 fMax_1[2][1] = 4.0258936882;
395 fMin_1[0][2] = -2.78645992279;
396 fMax_1[0][2] = 3.50111722946;
397 fMin_1[1][2] = -5.03730010986;
398 fMax_1[1][2] = 4.27845287323;
399 fMin_1[2][2] = -5.03730010986;
400 fMax_1[2][2] = 4.27845287323;
401 fMin_1[0][3] = -2.42712664604;
402 fMax_1[0][3] = 4.5351858139;
403 fMin_1[1][3] = -5.95050764084;
404 fMax_1[1][3] = 4.64035463333;
405 fMin_1[2][3] = -5.95050764084;
406 fMax_1[2][3] = 4.64035463333;
410 inline void ReadMLPBNN::Transform_1( std::vector<double>& iv,
int cls)
const 413 if (cls < 0 || cls > 2) {
422 static std::vector<int> indicesGet;
423 static std::vector<int> indicesPut;
425 if ( indicesGet.empty() ) {
426 indicesGet.reserve(fNvars);
427 indicesGet.push_back( 0);
428 indicesGet.push_back( 1);
429 indicesGet.push_back( 2);
430 indicesGet.push_back( 3);
432 if ( indicesPut.empty() ) {
433 indicesPut.reserve(fNvars);
434 indicesPut.push_back( 0);
435 indicesPut.push_back( 1);
436 indicesPut.push_back( 2);
437 indicesPut.push_back( 3);
440 static std::vector<double> dv;
442 for (
int ivar=0; ivar<nVar; ivar++) dv[ivar] = iv[indicesGet.at(ivar)];
443 for (
int ivar=0;ivar<4;ivar++) {
444 double offset = fMin_1[cls][ivar];
445 double scale = 1.0/(fMax_1[cls][ivar]-fMin_1[cls][ivar]);
446 iv[indicesPut.at(ivar)] = (dv[ivar]-offset)*scale * 2 - 1;
451 inline void ReadMLPBNN::InitTransform()
457 inline void ReadMLPBNN::Transform( std::vector<double>& iv,
int sigOrBgd )
const 459 Transform_1( iv, sigOrBgd );
Type GetType(const std::string &Name)
void Initialize(Bool_t useTMVAStyle=kTRUE)