Logo ROOT   6.07/09
Reference Guide
RuleEnsemble.cxx
Go to the documentation of this file.
1 // @(#)root/tmva $Id$
2 // Author: Andreas Hoecker, Joerg Stelzer, Fredrik Tegenfeldt, Helge Voss
3 
4 /**********************************************************************************
5  * Project: TMVA - a Root-integrated toolkit for multivariate data analysis *
6  * Package: TMVA *
7  * Class : RuleEnsemble *
8  * Web : http://tmva.sourceforge.net *
9  * *
10  * Description: *
11  * A class generating an ensemble of rules *
12  * Input: a forest of decision trees *
13  * Output: an ensemble of rules *
14  * *
15  * Authors (alphabetical): *
16  * Fredrik Tegenfeldt <Fredrik.Tegenfeldt@cern.ch> - Iowa State U., USA *
17  * Helge Voss <Helge.Voss@cern.ch> - MPI-KP Heidelberg, GER *
18  * *
19  * Copyright (c) 2005: *
20  * CERN, Switzerland *
21  * Iowa State U. *
22  * MPI-K Heidelberg, Germany *
23  * *
24  * Redistribution and use in source and binary forms, with or without *
25  * modification, are permitted according to the terms listed in LICENSE *
26  * (http://tmva.sourceforge.net/LICENSE) *
27  **********************************************************************************/
28 
29 #include "TMVA/RuleEnsemble.h"
30 
31 #include "TMVA/DataSetInfo.h"
32 #include "TMVA/DecisionTree.h"
33 #include "TMVA/Event.h"
34 #include "TMVA/MethodBase.h"
35 #include "TMVA/MethodRuleFit.h"
36 #include "TMVA/MsgLogger.h"
37 #include "TMVA/RuleFit.h"
38 #include "TMVA/Tools.h"
39 #include "TMVA/Types.h"
40 
41 #include "TRandom3.h"
42 #include "TH1F.h"
43 
44 #include <algorithm>
45 #include <cstdlib>
46 #include <iomanip>
47 #include <list>
48 
49 ////////////////////////////////////////////////////////////////////////////////
50 /// constructor
51 
53  : fLearningModel ( kFull )
54  , fImportanceCut ( 0 )
55  , fLinQuantile ( 0.025 ) // default quantile for killing outliers in linear terms
56  , fOffset ( 0 )
57  , fAverageSupport ( 0.8 )
58  , fAverageRuleSigma( 0.4 ) // default value - used if only linear model is chosen
59  , fRuleFSig ( 0 )
60  , fRuleNCave ( 0 )
61  , fRuleNCsig ( 0 )
62  , fRuleMinDist ( 1e-3 ) // closest allowed 'distance' between two rules
63  , fNRulesGenerated ( 0 )
64  , fEvent ( 0 )
65  , fEventCacheOK ( true )
66  , fRuleMapOK ( true )
67  , fRuleMapInd0 ( 0 )
68  , fRuleMapInd1 ( 0 )
69  , fRuleMapEvents ( 0 )
70  , fLogger( new MsgLogger("RuleFit") )
71 {
72  Initialize( rf );
73 }
74 
75 ////////////////////////////////////////////////////////////////////////////////
76 /// copy constructor
77 
79  : fAverageSupport ( 1 )
80  , fEvent(0)
81  , fRuleMapEvents(0)
82  , fRuleFit(0)
83  , fLogger( new MsgLogger("RuleFit") )
84 {
85  Copy( other );
86 }
87 
88 ////////////////////////////////////////////////////////////////////////////////
89 /// constructor
90 
92  : fLearningModel ( kFull )
93  , fImportanceCut ( 0 )
94  , fLinQuantile ( 0.025 ) // default quantile for killing outliers in linear terms
95  , fOffset ( 0 )
96  , fImportanceRef ( 1.0 )
97  , fAverageSupport ( 0.8 )
98  , fAverageRuleSigma( 0.4 ) // default value - used if only linear model is chosen
99  , fRuleFSig ( 0 )
100  , fRuleNCave ( 0 )
101  , fRuleNCsig ( 0 )
102  , fRuleMinDist ( 1e-3 ) // closest allowed 'distance' between two rules
103  , fNRulesGenerated ( 0 )
104  , fEvent ( 0 )
105  , fEventCacheOK ( true )
106  , fRuleMapOK ( true )
107  , fRuleMapInd0 ( 0 )
108  , fRuleMapInd1 ( 0 )
109  , fRuleMapEvents ( 0 )
110  , fRuleFit ( 0 )
111  , fLogger( new MsgLogger("RuleFit") )
112 {
113 }
114 
115 ////////////////////////////////////////////////////////////////////////////////
116 /// destructor
117 
119 {
120  for ( std::vector<Rule *>::iterator itrRule = fRules.begin(); itrRule != fRules.end(); itrRule++ ) {
121  delete *itrRule;
122  }
123  // NOTE: Should not delete the histos fLinPDFB/S since they are delete elsewhere
124  delete fLogger;
125 }
126 
127 ////////////////////////////////////////////////////////////////////////////////
128 /// Initializes all member variables with default values
129 
131 {
132  SetAverageRuleSigma(0.4); // default value - used if only linear model is chosen
133  fRuleFit = rf;
134  UInt_t nvars = GetMethodBase()->GetNvar();
135  fVarImportance.clear();
136  fLinPDFB.clear();
137  fLinPDFS.clear();
138  //
139  fVarImportance.resize( nvars,0.0 );
140  fLinPDFB.resize( nvars,0 );
141  fLinPDFS.resize( nvars,0 );
142  fImportanceRef = 1.0;
143  for (UInt_t i=0; i<nvars; i++) { // a priori all linear terms are equally valid
144  fLinTermOK.push_back(kTRUE);
145  }
146 }
147 
148 ////////////////////////////////////////////////////////////////////////////////
149 
151  fLogger->SetMinType(t);
152 }
153 
154 
155 ////////////////////////////////////////////////////////////////////////////////
156 ///
157 /// Get a pointer to the original MethodRuleFit.
158 ///
159 
161 {
162  return ( fRuleFit==0 ? 0:fRuleFit->GetMethodRuleFit());
163 }
164 
165 ////////////////////////////////////////////////////////////////////////////////
166 ///
167 /// Get a pointer to the original MethodRuleFit.
168 ///
169 
171 {
172  return ( fRuleFit==0 ? 0:fRuleFit->GetMethodBase());
173 }
174 
175 ////////////////////////////////////////////////////////////////////////////////
176 /// create model
177 
179 {
181 
182  MakeLinearTerms();
183 
184  MakeRuleMap();
185 
186  CalcRuleSupport();
187 
188  RuleStatistics();
189 
190  PrintRuleGen();
191 }
192 
193 ////////////////////////////////////////////////////////////////////////////////
194 ///
195 /// Calculates sqrt(Sum(a_i^2)), i=1..N (NOTE do not include a0)
196 ///
197 
199 {
200  Int_t ncoeffs = fRules.size();
201  if (ncoeffs<1) return 0;
202  //
203  Double_t sum2=0;
204  Double_t val;
205  for (Int_t i=0; i<ncoeffs; i++) {
206  val = fRules[i]->GetCoefficient();
207  sum2 += val*val;
208  }
209  return sum2;
210 }
211 
212 ////////////////////////////////////////////////////////////////////////////////
213 /// reset all rule coefficients
214 
216 {
217  fOffset = 0.0;
218  UInt_t nrules = fRules.size();
219  for (UInt_t i=0; i<nrules; i++) {
220  fRules[i]->SetCoefficient(0.0);
221  }
222 }
223 
224 ////////////////////////////////////////////////////////////////////////////////
225 /// set all rule coefficients
226 
227 void TMVA::RuleEnsemble::SetCoefficients( const std::vector< Double_t > & v )
228 {
229  UInt_t nrules = fRules.size();
230  if (v.size()!=nrules) {
231  Log() << kFATAL << "<SetCoefficients> - BUG TRAP - input vector worng size! It is = " << v.size()
232  << " when it should be = " << nrules << Endl;
233  }
234  for (UInt_t i=0; i<nrules; i++) {
235  fRules[i]->SetCoefficient(v[i]);
236  }
237 }
238 
239 ////////////////////////////////////////////////////////////////////////////////
240 /// Retrieve all rule coefficients
241 
242 void TMVA::RuleEnsemble::GetCoefficients( std::vector< Double_t > & v )
243 {
244  UInt_t nrules = fRules.size();
245  v.resize(nrules);
246  if (nrules==0) return;
247  //
248  for (UInt_t i=0; i<nrules; i++) {
249  v[i] = (fRules[i]->GetCoefficient());
250  }
251 }
252 
253 ////////////////////////////////////////////////////////////////////////////////
254 /// get list of training events from the rule fitter
255 
256 const std::vector<const TMVA::Event*>* TMVA::RuleEnsemble::GetTrainingEvents() const
257 {
258  return &(fRuleFit->GetTrainingEvents());
259 }
260 
261 ////////////////////////////////////////////////////////////////////////////////
262 /// get the training event from the rule fitter
263 
265 {
266  return fRuleFit->GetTrainingEvent(i);
267 }
268 
269 ////////////////////////////////////////////////////////////////////////////////
270 /// remove rules that behave similar
271 
273 {
274  Log() << kVERBOSE << "Removing similar rules; distance = " << fRuleMinDist << Endl;
275 
276  UInt_t nrulesIn = fRules.size();
277  TMVA::Rule *first, *second;
278  std::vector< Char_t > removeMe( nrulesIn,false ); // <--- stores boolean
279 
280  Int_t nrem = 0;
281  Int_t remind=-1;
282  Double_t r;
283 
284  for (UInt_t i=0; i<nrulesIn; i++) {
285  if (!removeMe[i]) {
286  first = fRules[i];
287  for (UInt_t k=i+1; k<nrulesIn; k++) {
288  if (!removeMe[k]) {
289  second = fRules[k];
290  Bool_t equal = first->Equal(*second,kTRUE,fRuleMinDist);
291  if (equal) {
292  r = gRandom->Rndm();
293  remind = (r>0.5 ? k:i); // randomly select rule
294  }
295  else {
296  remind = -1;
297  }
298 
299  if (remind>-1) {
300  if (!removeMe[remind]) {
301  removeMe[remind] = true;
302  nrem++;
303  }
304  }
305  }
306  }
307  }
308  }
309  UInt_t ind = 0;
310  Rule *theRule;
311  for (UInt_t i=0; i<nrulesIn; i++) {
312  if (removeMe[i]) {
313  theRule = fRules[ind];
314 #if _MSC_VER >= 1400
315  fRules.erase( std::vector<Rule *>::iterator(&fRules[ind], &fRules) );
316 #else
317  fRules.erase( fRules.begin() + ind );
318 #endif
319  delete theRule;
320  ind--;
321  }
322  ind++;
323  }
324  UInt_t nrulesOut = fRules.size();
325  Log() << kVERBOSE << "Removed " << nrulesIn - nrulesOut << " out of " << nrulesIn << " rules" << Endl;
326 }
327 
328 ////////////////////////////////////////////////////////////////////////////////
329 /// cleanup rules
330 
332 {
333  UInt_t nrules = fRules.size();
334  if (nrules==0) return;
335  Log() << kVERBOSE << "Removing rules with relative importance < " << fImportanceCut << Endl;
336  if (fImportanceCut<=0) return;
337  //
338  // Mark rules to be removed
339  //
340  Rule *therule;
341  Int_t ind=0;
342  for (UInt_t i=0; i<nrules; i++) {
343  if (fRules[ind]->GetRelImportance()<fImportanceCut) {
344  therule = fRules[ind];
345 #if _MSC_VER >= 1400
346  fRules.erase( std::vector<Rule *>::iterator(&fRules[ind], &fRules) );
347 #else
348  fRules.erase( fRules.begin() + ind );
349 #endif
350  delete therule;
351  ind--;
352  }
353  ind++;
354  }
355  Log() << kINFO << "Removed " << nrules-ind << " out of a total of " << nrules
356  << " rules with importance < " << fImportanceCut << Endl;
357 }
358 
359 ////////////////////////////////////////////////////////////////////////////////
360 /// cleanup linear model
361 
363 {
364  UInt_t nlin = fLinNorm.size();
365  if (nlin==0) return;
366  Log() << kVERBOSE << "Removing linear terms with relative importance < " << fImportanceCut << Endl;
367  //
368  fLinTermOK.clear();
369  for (UInt_t i=0; i<nlin; i++) {
371  }
372 }
373 
374 ////////////////////////////////////////////////////////////////////////////////
375 /// calculate the support for all rules
376 
378 {
379  Log() << kVERBOSE << "Evaluating Rule support" << Endl;
380  Double_t s,t,stot,ttot,ssb;
381  Double_t ssig, sbkg, ssum;
382  Int_t indrule=0;
383  stot = 0;
384  ttot = 0;
385  // reset to default values
386  SetAverageRuleSigma(0.4);
387  const std::vector<const Event *> *events = GetTrainingEvents();
388  Double_t nrules = static_cast<Double_t>(fRules.size());
389  Double_t ew;
390  //
391  if ((nrules>0) && (events->size()>0)) {
392  for ( std::vector< Rule * >::iterator itrRule=fRules.begin(); itrRule!=fRules.end(); itrRule++ ) {
393  s=0.0;
394  ssig=0.0;
395  sbkg=0.0;
396  for ( std::vector<const Event * >::const_iterator itrEvent=events->begin(); itrEvent!=events->end(); itrEvent++ ) {
397  if ((*itrRule)->EvalEvent( *(*itrEvent) )) {
398  ew = (*itrEvent)->GetWeight();
399  s += ew;
400  if (GetMethodRuleFit()->DataInfo().IsSignal(*itrEvent)) ssig += ew;
401  else sbkg += ew;
402  }
403  }
404  //
405  s = s/fRuleFit->GetNEveEff();
406  t = s*(1.0-s);
407  t = (t<0 ? 0:sqrt(t));
408  stot += s;
409  ttot += t;
410  ssum = ssig+sbkg;
411  ssb = (ssum>0 ? Double_t(ssig)/Double_t(ssig+sbkg) : 0.0 );
412  (*itrRule)->SetSupport(s);
413  (*itrRule)->SetNorm(t);
414  (*itrRule)->SetSSB( ssb );
415  (*itrRule)->SetSSBNeve(Double_t(ssig+sbkg));
416  indrule++;
417  }
418  fAverageSupport = stot/nrules;
420  Log() << kVERBOSE << "Standard deviation of support = " << fAverageRuleSigma << Endl;
421  Log() << kVERBOSE << "Average rule support = " << fAverageSupport << Endl;
422  }
423 }
424 
425 ////////////////////////////////////////////////////////////////////////////////
426 /// calculate the importance of each rule
427 
429 {
430  Double_t maxRuleImp = CalcRuleImportance();
431  Double_t maxLinImp = CalcLinImportance();
432  Double_t maxImp = (maxRuleImp>maxLinImp ? maxRuleImp : maxLinImp);
433  SetImportanceRef( maxImp );
434 }
435 
436 ////////////////////////////////////////////////////////////////////////////////
437 /// set reference importance
438 
440 {
441  for ( UInt_t i=0; i<fRules.size(); i++ ) {
442  fRules[i]->SetImportanceRef(impref);
443  }
444  fImportanceRef = impref;
445 }
446 ////////////////////////////////////////////////////////////////////////////////
447 /// calculate importance of each rule
448 
450 {
451  Double_t maxImp=-1.0;
452  Double_t imp;
453  Int_t nrules = fRules.size();
454  for ( int i=0; i<nrules; i++ ) {
455  fRules[i]->CalcImportance();
456  imp = fRules[i]->GetImportance();
457  if (imp>maxImp) maxImp = imp;
458  }
459  for ( Int_t i=0; i<nrules; i++ ) {
460  fRules[i]->SetImportanceRef(maxImp);
461  }
462 
463  return maxImp;
464 }
465 
466 ////////////////////////////////////////////////////////////////////////////////
467 /// calculate the linear importance for each rule
468 
470 {
471  Double_t maxImp=-1.0;
472  UInt_t nvars = fLinCoefficients.size();
473  fLinImportance.resize(nvars,0.0);
474  if (!DoLinear()) return maxImp;
475  //
476  // The linear importance is:
477  // I = |b_x|*sigma(x)
478  // Note that the coefficients in fLinCoefficients are for the normalized x
479  // => b'_x * x' = b'_x * sigma(r)*x/sigma(x)
480  // => b_x = b'_x*sigma(r)/sigma(x)
481  // => I = |b'_x|*sigma(r)
482  //
483  Double_t imp;
484  for ( UInt_t i=0; i<nvars; i++ ) {
486  fLinImportance[i] = imp;
487  if (imp>maxImp) maxImp = imp;
488  }
489  return maxImp;
490 }
491 
492 ////////////////////////////////////////////////////////////////////////////////
493 ///
494 /// Calculates variable importance using eq (35) in RuleFit paper by Friedman et.al
495 ///
496 
498 {
499  Log() << kVERBOSE << "Compute variable importance" << Endl;
500  Double_t rimp;
501  UInt_t nrules = fRules.size();
502  if (GetMethodBase()==0) Log() << kFATAL << "RuleEnsemble::CalcVarImportance() - should not be here!" << Endl;
503  UInt_t nvars = GetMethodBase()->GetNvar();
504  UInt_t nvarsUsed;
505  Double_t rimpN;
506  fVarImportance.resize(nvars,0);
507  // rules
508  if (DoRules()) {
509  for ( UInt_t ind=0; ind<nrules; ind++ ) {
510  rimp = fRules[ind]->GetImportance();
511  nvarsUsed = fRules[ind]->GetNumVarsUsed();
512  if (nvarsUsed<1)
513  Log() << kFATAL << "<CalcVarImportance> Variables for importance calc!!!??? A BUG!" << Endl;
514  rimpN = (nvarsUsed > 0 ? rimp/nvarsUsed:0.0);
515  for ( UInt_t iv=0; iv<nvars; iv++ ) {
516  if (fRules[ind]->ContainsVariable(iv)) {
517  fVarImportance[iv] += rimpN;
518  }
519  }
520  }
521  }
522  // linear terms
523  if (DoLinear()) {
524  for ( UInt_t iv=0; iv<fLinTermOK.size(); iv++ ) {
525  if (fLinTermOK[iv]) fVarImportance[iv] += fLinImportance[iv];
526  }
527  }
528  //
529  // Make variable importance relative the strongest variable
530  //
531  Double_t maximp = 0.0;
532  for ( UInt_t iv=0; iv<nvars; iv++ ) {
533  if ( fVarImportance[iv] > maximp ) maximp = fVarImportance[iv];
534  }
535  if (maximp>0) {
536  for ( UInt_t iv=0; iv<nvars; iv++ ) {
537  fVarImportance[iv] *= 1.0/maximp;
538  }
539  }
540 }
541 
542 ////////////////////////////////////////////////////////////////////////////////
543 /// set rules
544 ///
545 /// first clear all
546 
547 void TMVA::RuleEnsemble::SetRules( const std::vector<Rule *> & rules )
548 {
549  DeleteRules();
550  //
551  fRules.resize(rules.size());
552  for (UInt_t i=0; i<fRules.size(); i++) {
553  fRules[i] = rules[i];
554  }
556 }
557 
558 ////////////////////////////////////////////////////////////////////////////////
559 ///
560 /// Makes rules from the given decision tree.
561 /// First node in all rules is ALWAYS the root node.
562 ///
563 
564 void TMVA::RuleEnsemble::MakeRules( const std::vector< const DecisionTree *> & forest )
565 {
566  fRules.clear();
567  if (!DoRules()) return;
568  //
569  Int_t nrulesCheck=0;
570  Int_t nrules;
571  Int_t nendn;
572  Double_t sumnendn=0;
573  Double_t sumn2=0;
574  //
575  // UInt_t prevs;
576  UInt_t ntrees = forest.size();
577  for ( UInt_t ind=0; ind<ntrees; ind++ ) {
578  // prevs = fRules.size();
579  MakeRulesFromTree( forest[ind] );
580  nrules = CalcNRules( forest[ind] );
581  nendn = (nrules/2) + 1;
582  sumnendn += nendn;
583  sumn2 += nendn*nendn;
584  nrulesCheck += nrules;
585  }
586  Double_t nmean = (ntrees>0) ? sumnendn/ntrees : 0;
587  Double_t nsigm = TMath::Sqrt( gTools().ComputeVariance(sumn2,sumnendn,ntrees) );
588  Double_t ndev = 2.0*(nmean-2.0-nsigm)/(nmean-2.0+nsigm);
589  //
590  Log() << kVERBOSE << "Average number of end nodes per tree = " << nmean << Endl;
591  if (ntrees>1) Log() << kVERBOSE << "sigma of ditto ( ~= mean-2 ?) = "
592  << nsigm
593  << Endl;
594  Log() << kVERBOSE << "Deviation from exponential model = " << ndev << Endl;
595  Log() << kVERBOSE << "Corresponds to L (eq. 13, RuleFit ppr) = " << nmean << Endl;
596  // a BUG trap
597  if (nrulesCheck != static_cast<Int_t>(fRules.size())) {
598  Log() << kFATAL
599  << "BUG! number of generated and possible rules do not match! N(rules) = " << fRules.size()
600  << " != " << nrulesCheck << Endl;
601  }
602  Log() << kVERBOSE << "Number of generated rules: " << fRules.size() << Endl;
603 
604  // save initial number of rules
605  fNRulesGenerated = fRules.size();
606 
608 
610 
611 }
612 
613 ////////////////////////////////////////////////////////////////////////////////
614 ///
615 /// Make the linear terms as in eq 25, ref 2
616 /// For this the b and (1-b) quatiles are needed
617 ///
618 
620 {
621  if (!DoLinear()) return;
622 
623  const std::vector<const Event *> *events = GetTrainingEvents();
624  UInt_t neve = events->size();
625  UInt_t nvars = ((*events)[0])->GetNVariables(); // Event -> GetNVariables();
626  Double_t val,ew;
627  typedef std::pair< Double_t, Int_t> dataType;
628  typedef std::pair< Double_t, dataType > dataPoint;
629 
630  std::vector< std::vector<dataPoint> > vardata(nvars);
631  std::vector< Double_t > varsum(nvars,0.0);
632  std::vector< Double_t > varsum2(nvars,0.0);
633  // first find stats of all variables
634  // vardata[v][i].first -> value of var <v> in event <i>
635  // vardata[v][i].second.first -> the event weight
636  // vardata[v][i].second.second -> the event type
637  for (UInt_t i=0; i<neve; i++) {
638  ew = ((*events)[i])->GetWeight();
639  for (UInt_t v=0; v<nvars; v++) {
640  val = ((*events)[i])->GetValue(v);
641  vardata[v].push_back( dataPoint( val, dataType(ew,((*events)[i])->GetClass()) ) );
642  }
643  }
644  //
645  fLinDP.clear();
646  fLinDM.clear();
647  fLinCoefficients.clear();
648  fLinNorm.clear();
649  fLinDP.resize(nvars,0);
650  fLinDM.resize(nvars,0);
651  fLinCoefficients.resize(nvars,0);
652  fLinNorm.resize(nvars,0);
653 
654  Double_t averageWeight = neve ? fRuleFit->GetNEveEff()/static_cast<Double_t>(neve) : 0;
655  // sort and find limits
656  Double_t stdl;
657 
658  // find normalisation given in ref 2 after eq 26
659  Double_t lx;
660  Double_t nquant;
661  Double_t neff;
662  UInt_t indquantM;
663  UInt_t indquantP;
664 
665  MethodBase *fMB=const_cast<MethodBase *>(fRuleFit->GetMethodBase());
666 
667  for (UInt_t v=0; v<nvars; v++) {
668  varsum[v] = 0;
669  varsum2[v] = 0;
670  //
671  std::sort( vardata[v].begin(),vardata[v].end() );
672  nquant = fLinQuantile*fRuleFit->GetNEveEff(); // quantile = 0.025
673  neff=0;
674  UInt_t ie=0;
675  // first scan for lower quantile (including weights)
676  while ( (ie<neve) && (neff<nquant) ) {
677  neff += vardata[v][ie].second.first;
678  ie++;
679  }
680  indquantM = (ie==0 ? 0:ie-1);
681  // now for upper quantile
682  ie = neve;
683  neff=0;
684  while ( (ie>0) && (neff<nquant) ) {
685  ie--;
686  neff += vardata[v][ie].second.first;
687  }
688  indquantP = (ie==neve ? ie=neve-1:ie);
689  //
690  fLinDM[v] = vardata[v][indquantM].first; // delta-
691  fLinDP[v] = vardata[v][indquantP].first; // delta+
692 
693  if(!fMB->IsSilentFile())
694  {
695  if (fLinPDFB[v]) delete fLinPDFB[v];
696  if (fLinPDFS[v]) delete fLinPDFS[v];
697  fLinPDFB[v] = new TH1F(Form("bkgvar%d",v),"bkg temphist",40,fLinDM[v],fLinDP[v]);
698  fLinPDFS[v] = new TH1F(Form("sigvar%d",v),"sig temphist",40,fLinDM[v],fLinDP[v]);
699  fLinPDFB[v]->Sumw2();
700  fLinPDFS[v]->Sumw2();
701  }
702  //
703  Int_t type;
704  const Double_t w = 1.0/fRuleFit->GetNEveEff();
705  for (ie=0; ie<neve; ie++) {
706  val = vardata[v][ie].first;
707  ew = vardata[v][ie].second.first;
708  type = vardata[v][ie].second.second;
709  lx = TMath::Min( fLinDP[v], TMath::Max( fLinDM[v], val ) );
710  varsum[v] += ew*lx;
711  varsum2[v] += ew*lx*lx;
712  if(!fMB->IsSilentFile())
713  {
714  if (type==1) fLinPDFS[v]->Fill(lx,w*ew);
715  else fLinPDFB[v]->Fill(lx,w*ew);
716  }
717  }
718  //
719  // Get normalization.
720  //
721  stdl = TMath::Sqrt( (varsum2[v] - (varsum[v]*varsum[v]/fRuleFit->GetNEveEff()))/(fRuleFit->GetNEveEff()-averageWeight) );
722  fLinNorm[v] = CalcLinNorm(stdl);
723  }
724  // Save PDFs - for debugging purpose
725  if(!fMB->IsSilentFile())
726  {
727  for (UInt_t v=0; v<nvars; v++) {
728  fLinPDFS[v]->Write();
729  fLinPDFB[v]->Write();
730  }
731  }
732 }
733 
734 
735 ////////////////////////////////////////////////////////////////////////////////
736 ///
737 /// This function returns Pr( y = 1 | x ) for the linear terms.
738 ///
739 
741 {
742  UInt_t nvars=fLinDP.size();
743 
744  Double_t fstot=0;
745  Double_t fbtot=0;
746  nsig = 0;
747  ntot = nvars;
748  for (UInt_t v=0; v<nvars; v++) {
749  Double_t val = fEventLinearVal[v];
750  Int_t bin = fLinPDFS[v]->FindBin(val);
751  fstot += fLinPDFS[v]->GetBinContent(bin);
752  fbtot += fLinPDFB[v]->GetBinContent(bin);
753  }
754  if (nvars<1) return 0;
755  ntot = (fstot+fbtot)/Double_t(nvars);
756  nsig = (fstot)/Double_t(nvars);
757  return fstot/(fstot+fbtot);
758 }
759 
760 ////////////////////////////////////////////////////////////////////////////////
761 ///
762 /// This function returns Pr( y = 1 | x ) for rules.
763 /// The probability returned is normalized against the number of rules which are actually passed
764 ///
765 
767 {
768  Double_t sump = 0;
769  Double_t sumok = 0;
770  Double_t sumz = 0;
771  Double_t ssb;
772  Double_t neve;
773  //
774  UInt_t nrules = fRules.size();
775  for (UInt_t ir=0; ir<nrules; ir++) {
776  if (fEventRuleVal[ir]>0) {
777  ssb = fEventRuleVal[ir]*GetRulesConst(ir)->GetSSB(); // S/(S+B) is evaluated in CalcRuleSupport() using ALL training events
778  neve = GetRulesConst(ir)->GetSSBNeve(); // number of events accepted by the rule
779  sump += ssb*neve; // number of signal events
780  sumok += neve; // total number of events passed
781  }
782  else sumz += 1.0; // all events
783  }
784 
785  nsig = sump;
786  ntot = sumok;
787  //
788  if (ntot>0) return nsig/ntot;
789  return 0.0;
790 }
791 
792 ////////////////////////////////////////////////////////////////////////////////
793 ///
794 /// We want to estimate F* = argmin Eyx( L(y,F(x) ), min wrt F(x)
795 /// F(x) = FL(x) + FR(x) , linear and rule part
796 ///
797 ///
798 
800 {
801  SetEvent(e);
802  UpdateEventVal();
803  return FStar();
804 }
805 
806 ////////////////////////////////////////////////////////////////////////////////
807 ///
808 /// We want to estimate F* = argmin Eyx( L(y,F(x) ), min wrt F(x)
809 /// F(x) = FL(x) + FR(x) , linear and rule part
810 ///
811 ///
812 
814 {
815  Double_t p=0;
816  Double_t nrs=0, nrt=0;
817  Double_t nls=0, nlt=0;
818  Double_t nt;
819  Double_t pr=0;
820  Double_t pl=0;
821 
822  // first calculate Pr(y=1|X) for rules and linear terms
823  if (DoLinear()) pl = PdfLinear(nls, nlt);
824  if (DoRules()) pr = PdfRule(nrs, nrt);
825  // nr(l)t=0 or 1
826  if ((nlt>0) && (nrt>0)) nt=2.0;
827  else nt=1.0;
828  p = (pl+pr)/nt;
829  return 2.0*p-1.0;
830 }
831 
832 ////////////////////////////////////////////////////////////////////////////////
833 /// calculate various statistics for this rule
834 
836 {
837  // TODO: NOT YET UPDATED FOR WEIGHTS
838  const std::vector<const Event *> *events = GetTrainingEvents();
839  const UInt_t neve = events->size();
840  const UInt_t nvars = GetMethodBase()->GetNvar();
841  const UInt_t nrules = fRules.size();
842  const Event *eveData;
843  // Flags
844  Bool_t sigRule;
845  Bool_t sigTag;
846  Bool_t bkgTag;
847  // Bool_t noTag;
848  Bool_t sigTrue;
849  Bool_t tagged;
850  // Counters
851  Int_t nsig=0;
852  Int_t nbkg=0;
853  Int_t ntag=0;
854  Int_t nss=0;
855  Int_t nsb=0;
856  Int_t nbb=0;
857  Int_t nbs=0;
858  std::vector<Int_t> varcnt;
859  // Clear vectors
860  fRulePSS.clear();
861  fRulePSB.clear();
862  fRulePBS.clear();
863  fRulePBB.clear();
864  fRulePTag.clear();
865  //
866  varcnt.resize(nvars,0);
867  fRuleVarFrac.clear();
868  fRuleVarFrac.resize(nvars,0);
869  //
870  for ( UInt_t i=0; i<nrules; i++ ) {
871  for ( UInt_t v=0; v<nvars; v++) {
872  if (fRules[i]->ContainsVariable(v)) varcnt[v]++; // count how often a variable occurs
873  }
874  sigRule = fRules[i]->IsSignalRule();
875  if (sigRule) { // rule is a signal rule (ie s/(s+b)>0.5)
876  nsig++;
877  }
878  else {
879  nbkg++;
880  }
881  // reset counters
882  nss=0;
883  nsb=0;
884  nbs=0;
885  nbb=0;
886  ntag=0;
887  // loop over all events
888  for (UInt_t e=0; e<neve; e++) {
889  eveData = (*events)[e];
890  tagged = fRules[i]->EvalEvent(*eveData);
891  sigTag = (tagged && sigRule); // it's tagged as a signal
892  bkgTag = (tagged && (!sigRule)); // ... as bkg
893  // noTag = !(sigTag || bkgTag); // ... not tagged
894  sigTrue = (eveData->GetClass() == 0); // true if event is true signal
895  if (tagged) {
896  ntag++;
897  if (sigTag && sigTrue) nss++;
898  if (sigTag && !sigTrue) nsb++;
899  if (bkgTag && sigTrue) nbs++;
900  if (bkgTag && !sigTrue) nbb++;
901  }
902  }
903  // Fill tagging probabilities
904  if (ntag>0 && neve > 0) { // should always be the case, but let's make sure and keep coverity quiet
905  fRulePTag.push_back(Double_t(ntag)/Double_t(neve));
906  fRulePSS.push_back(Double_t(nss)/Double_t(ntag));
907  fRulePSB.push_back(Double_t(nsb)/Double_t(ntag));
908  fRulePBS.push_back(Double_t(nbs)/Double_t(ntag));
909  fRulePBB.push_back(Double_t(nbb)/Double_t(ntag));
910  }
911  //
912  }
913  fRuleFSig = (nsig>0) ? static_cast<Double_t>(nsig)/static_cast<Double_t>(nsig+nbkg) : 0;
914  for ( UInt_t v=0; v<nvars; v++) {
915  fRuleVarFrac[v] = (nrules>0) ? Double_t(varcnt[v])/Double_t(nrules) : 0;
916  }
917 }
918 
919 ////////////////////////////////////////////////////////////////////////////////
920 /// calculate various statistics for this rule
921 
923 {
924  const UInt_t nrules = fRules.size();
925  Double_t nc;
926  Double_t sumNc =0;
927  Double_t sumNc2=0;
928  for ( UInt_t i=0; i<nrules; i++ ) {
929  nc = static_cast<Double_t>(fRules[i]->GetNcuts());
930  sumNc += nc;
931  sumNc2 += nc*nc;
932  }
933  fRuleNCave = 0.0;
934  fRuleNCsig = 0.0;
935  if (nrules>0) {
936  fRuleNCave = sumNc/nrules;
937  fRuleNCsig = TMath::Sqrt(gTools().ComputeVariance(sumNc2,sumNc,nrules));
938  }
939 }
940 
941 ////////////////////////////////////////////////////////////////////////////////
942 /// print rule generation info
943 
945 {
946  Log() << kHEADER << "-------------------RULE ENSEMBLE SUMMARY------------------------" << Endl;
947  const MethodRuleFit *mrf = GetMethodRuleFit();
948  if (mrf) Log() << kINFO << "Tree training method : " << (mrf->UseBoost() ? "AdaBoost":"Random") << Endl;
949  Log() << kINFO << "Number of events per tree : " << fRuleFit->GetNTreeSample() << Endl;
950  Log() << kINFO << "Number of trees : " << fRuleFit->GetForest().size() << Endl;
951  Log() << kINFO << "Number of generated rules : " << fNRulesGenerated << Endl;
952  Log() << kINFO << "Idem, after cleanup : " << fRules.size() << Endl;
953  Log() << kINFO << "Average number of cuts per rule : " << Form("%8.2f",fRuleNCave) << Endl;
954  Log() << kINFO << "Spread in number of cuts per rules : " << Form("%8.2f",fRuleNCsig) << Endl;
955  Log() << kVERBOSE << "Complexity : " << Form("%8.2f",fRuleNCave*fRuleNCsig) << Endl;
956  Log() << kINFO << "----------------------------------------------------------------" << Endl;
957  Log() << kINFO << Endl;
958 }
959 
960 ////////////////////////////////////////////////////////////////////////////////
961 /// print function
962 
964 {
965  const EMsgType kmtype=kINFO;
966  const Bool_t isDebug = (fLogger->GetMinType()<=kDEBUG);
967  //
968  Log() << kmtype << Endl;
969  Log() << kmtype << "================================================================" << Endl;
970  Log() << kmtype << " M o d e l " << Endl;
971  Log() << kmtype << "================================================================" << Endl;
972 
973  Int_t ind;
974  const UInt_t nvars = GetMethodBase()->GetNvar();
975  const Int_t nrules = fRules.size();
976  const Int_t printN = TMath::Min(10,nrules); //nrules+1;
977  Int_t maxL = 0;
978  for (UInt_t iv = 0; iv<fVarImportance.size(); iv++) {
979  if (GetMethodBase()->GetInputLabel(iv).Length() > maxL) maxL = GetMethodBase()->GetInputLabel(iv).Length();
980  }
981  //
982  if (isDebug) {
983  Log() << kDEBUG << "Variable importance:" << Endl;
984  for (UInt_t iv = 0; iv<fVarImportance.size(); iv++) {
985  Log() << kDEBUG << std::setw(maxL) << GetMethodBase()->GetInputLabel(iv)
986  << std::resetiosflags(std::ios::right)
987  << " : " << Form(" %3.3f",fVarImportance[iv]) << Endl;
988  }
989  }
990  //
991  Log() << kHEADER << "Offset (a0) = " << fOffset << Endl;
992  //
993  if (DoLinear()) {
994  if (fLinNorm.size() > 0) {
995  Log() << kmtype << "------------------------------------" << Endl;
996  Log() << kmtype << "Linear model (weights unnormalised)" << Endl;
997  Log() << kmtype << "------------------------------------" << Endl;
998  Log() << kmtype << std::setw(maxL) << "Variable"
999  << std::resetiosflags(std::ios::right) << " : "
1000  << std::setw(11) << " Weights"
1001  << std::resetiosflags(std::ios::right) << " : "
1002  << "Importance"
1003  << std::resetiosflags(std::ios::right)
1004  << Endl;
1005  Log() << kmtype << "------------------------------------" << Endl;
1006  for ( UInt_t i=0; i<fLinNorm.size(); i++ ) {
1007  Log() << kmtype << std::setw(std::max(maxL,8)) << GetMethodBase()->GetInputLabel(i);
1008  if (fLinTermOK[i]) {
1009  Log() << kmtype
1010  << std::resetiosflags(std::ios::right)
1011  << " : " << Form(" %10.3e",fLinCoefficients[i]*fLinNorm[i])
1012  << " : " << Form(" %3.3f",fLinImportance[i]/fImportanceRef) << Endl;
1013  }
1014  else {
1015  Log() << kmtype << "-> importance below threshhold = "
1016  << Form(" %3.3f",fLinImportance[i]/fImportanceRef) << Endl;
1017  }
1018  }
1019  Log() << kmtype << "------------------------------------" << Endl;
1020  }
1021  }
1022  else Log() << kmtype << "Linear terms were disabled" << Endl;
1023 
1024  if ((!DoRules()) || (nrules==0)) {
1025  if (!DoRules()) {
1026  Log() << kmtype << "Rule terms were disabled" << Endl;
1027  }
1028  else {
1029  Log() << kmtype << "Eventhough rules were included in the model, none passed! " << nrules << Endl;
1030  }
1031  }
1032  else {
1033  Log() << kmtype << "Number of rules = " << nrules << Endl;
1034  if (isDebug) {
1035  Log() << kmtype << "N(cuts) in rules, average = " << fRuleNCave << Endl;
1036  Log() << kmtype << " RMS = " << fRuleNCsig << Endl;
1037  Log() << kmtype << "Fraction of signal rules = " << fRuleFSig << Endl;
1038  Log() << kmtype << "Fraction of rules containing a variable (%):" << Endl;
1039  for ( UInt_t v=0; v<nvars; v++) {
1040  Log() << kmtype << " " << std::setw(maxL) << GetMethodBase()->GetInputLabel(v);
1041  Log() << kmtype << Form(" = %2.2f",fRuleVarFrac[v]*100.0) << " %" << Endl;
1042  }
1043  }
1044  //
1045  // Print out all rules sorted in importance
1046  //
1047  std::list< std::pair<double,int> > sortedImp;
1048  for (Int_t i=0; i<nrules; i++) {
1049  sortedImp.push_back( std::pair<double,int>( fRules[i]->GetImportance(),i ) );
1050  }
1051  sortedImp.sort();
1052  //
1053  Log() << kmtype << "Printing the first " << printN << " rules, ordered in importance." << Endl;
1054  int pind=0;
1055  for ( std::list< std::pair<double,int> >::reverse_iterator itpair = sortedImp.rbegin();
1056  itpair != sortedImp.rend(); itpair++ ) {
1057  ind = itpair->second;
1058  // if (pind==0) impref =
1059  // Log() << kmtype << "Rule #" <<
1060  // Log() << kmtype << *fRules[ind] << Endl;
1061  fRules[ind]->PrintLogger(Form("Rule %4d : ",pind+1));
1062  pind++;
1063  if (pind==printN) {
1064  if (nrules==printN) {
1065  Log() << kmtype << "All rules printed" << Endl;
1066  }
1067  else {
1068  Log() << kmtype << "Skipping the next " << nrules-printN << " rules" << Endl;
1069  }
1070  break;
1071  }
1072  }
1073  }
1074  Log() << kmtype << "================================================================" << Endl;
1075  Log() << kmtype << Endl;
1076 }
1077 
1078 ////////////////////////////////////////////////////////////////////////////////
1079 /// write rules to stream
1080 
1081 void TMVA::RuleEnsemble::PrintRaw( std::ostream & os ) const
1082 {
1083  Int_t dp = os.precision();
1084  UInt_t nrules = fRules.size();
1085  // std::sort(fRules.begin(),fRules.end());
1086  //
1087  os << "ImportanceCut= " << fImportanceCut << std::endl;
1088  os << "LinQuantile= " << fLinQuantile << std::endl;
1089  os << "AverageSupport= " << fAverageSupport << std::endl;
1090  os << "AverageRuleSigma= " << fAverageRuleSigma << std::endl;
1091  os << "Offset= " << fOffset << std::endl;
1092  os << "NRules= " << nrules << std::endl;
1093  for (UInt_t i=0; i<nrules; i++){
1094  os << "***Rule " << i << std::endl;
1095  (fRules[i])->PrintRaw(os);
1096  }
1097  UInt_t nlinear = fLinNorm.size();
1098  //
1099  os << "NLinear= " << fLinTermOK.size() << std::endl;
1100  for (UInt_t i=0; i<nlinear; i++) {
1101  os << "***Linear " << i << std::endl;
1102  os << std::setprecision(10) << (fLinTermOK[i] ? 1:0) << " "
1103  << fLinCoefficients[i] << " "
1104  << fLinNorm[i] << " "
1105  << fLinDM[i] << " "
1106  << fLinDP[i] << " "
1107  << fLinImportance[i] << " " << std::endl;
1108  }
1109  os << std::setprecision(dp);
1110 }
1111 
1112 ////////////////////////////////////////////////////////////////////////////////
1113 /// write rules to XML
1114 
1115 void* TMVA::RuleEnsemble::AddXMLTo(void* parent) const
1116 {
1117  void* re = gTools().AddChild( parent, "Weights" ); // this is the "RuleEnsemble"
1118 
1119  UInt_t nrules = fRules.size();
1120  UInt_t nlinear = fLinNorm.size();
1121  gTools().AddAttr( re, "NRules", nrules );
1122  gTools().AddAttr( re, "NLinear", nlinear );
1123  gTools().AddAttr( re, "LearningModel", (int)fLearningModel );
1124  gTools().AddAttr( re, "ImportanceCut", fImportanceCut );
1125  gTools().AddAttr( re, "LinQuantile", fLinQuantile );
1126  gTools().AddAttr( re, "AverageSupport", fAverageSupport );
1127  gTools().AddAttr( re, "AverageRuleSigma", fAverageRuleSigma );
1128  gTools().AddAttr( re, "Offset", fOffset );
1129  for (UInt_t i=0; i<nrules; i++) fRules[i]->AddXMLTo(re);
1130 
1131  for (UInt_t i=0; i<nlinear; i++) {
1132  void* lin = gTools().AddChild( re, "Linear" );
1133  gTools().AddAttr( lin, "OK", (fLinTermOK[i] ? 1:0) );
1134  gTools().AddAttr( lin, "Coeff", fLinCoefficients[i] );
1135  gTools().AddAttr( lin, "Norm", fLinNorm[i] );
1136  gTools().AddAttr( lin, "DM", fLinDM[i] );
1137  gTools().AddAttr( lin, "DP", fLinDP[i] );
1138  gTools().AddAttr( lin, "Importance", fLinImportance[i] );
1139  }
1140  return re;
1141 }
1142 
1143 ////////////////////////////////////////////////////////////////////////////////
1144 /// read rules from XML
1145 
1146 void TMVA::RuleEnsemble::ReadFromXML( void* wghtnode )
1147 {
1148  UInt_t nrules, nlinear;
1149  gTools().ReadAttr( wghtnode, "NRules", nrules );
1150  gTools().ReadAttr( wghtnode, "NLinear", nlinear );
1151  Int_t iLearningModel;
1152  gTools().ReadAttr( wghtnode, "LearningModel", iLearningModel );
1153  fLearningModel = (ELearningModel) iLearningModel;
1154  gTools().ReadAttr( wghtnode, "ImportanceCut", fImportanceCut );
1155  gTools().ReadAttr( wghtnode, "LinQuantile", fLinQuantile );
1156  gTools().ReadAttr( wghtnode, "AverageSupport", fAverageSupport );
1157  gTools().ReadAttr( wghtnode, "AverageRuleSigma", fAverageRuleSigma );
1158  gTools().ReadAttr( wghtnode, "Offset", fOffset );
1159 
1160  // read rules
1161  DeleteRules();
1162 
1163  UInt_t i = 0;
1164  fRules.resize( nrules );
1165  void* ch = gTools().GetChild( wghtnode );
1166  for (i=0; i<nrules; i++) {
1167  fRules[i] = new Rule();
1168  fRules[i]->SetRuleEnsemble( this );
1169  fRules[i]->ReadFromXML( ch );
1170 
1171  ch = gTools().GetNextChild(ch);
1172  }
1173 
1174  // read linear classifier (Fisher)
1175  fLinNorm .resize( nlinear );
1176  fLinTermOK .resize( nlinear );
1177  fLinCoefficients.resize( nlinear );
1178  fLinDP .resize( nlinear );
1179  fLinDM .resize( nlinear );
1180  fLinImportance .resize( nlinear );
1181 
1182  Int_t iok;
1183  i=0;
1184  while(ch) {
1185  gTools().ReadAttr( ch, "OK", iok );
1186  fLinTermOK[i] = (iok == 1);
1187  gTools().ReadAttr( ch, "Coeff", fLinCoefficients[i] );
1188  gTools().ReadAttr( ch, "Norm", fLinNorm[i] );
1189  gTools().ReadAttr( ch, "DM", fLinDM[i] );
1190  gTools().ReadAttr( ch, "DP", fLinDP[i] );
1191  gTools().ReadAttr( ch, "Importance", fLinImportance[i] );
1192 
1193  i++;
1194  ch = gTools().GetNextChild(ch);
1195  }
1196 }
1197 
1198 ////////////////////////////////////////////////////////////////////////////////
1199 /// read rule ensemble from stream
1200 
1201 void TMVA::RuleEnsemble::ReadRaw( std::istream & istr )
1202 {
1203  UInt_t nrules;
1204  //
1205  std::string dummy;
1206  Int_t idum;
1207  //
1208  // First block is general stuff
1209  //
1210  istr >> dummy >> fImportanceCut;
1211  istr >> dummy >> fLinQuantile;
1212  istr >> dummy >> fAverageSupport;
1213  istr >> dummy >> fAverageRuleSigma;
1214  istr >> dummy >> fOffset;
1215  istr >> dummy >> nrules;
1216  //
1217  // Now read in the rules
1218  //
1219  DeleteRules();
1220  //
1221  for (UInt_t i=0; i<nrules; i++){
1222  istr >> dummy >> idum; // read line "***Rule <ind>"
1223  fRules.push_back( new Rule() );
1224  (fRules.back())->SetRuleEnsemble( this );
1225  (fRules.back())->ReadRaw(istr);
1226  }
1227  //
1228  // and now the linear terms
1229  //
1230  UInt_t nlinear;
1231  //
1232  // coverity[tainted_data_argument]
1233  istr >> dummy >> nlinear;
1234  //
1235  fLinNorm .resize( nlinear );
1236  fLinTermOK .resize( nlinear );
1237  fLinCoefficients.resize( nlinear );
1238  fLinDP .resize( nlinear );
1239  fLinDM .resize( nlinear );
1240  fLinImportance .resize( nlinear );
1241  //
1242 
1243  Int_t iok;
1244  for (UInt_t i=0; i<nlinear; i++) {
1245  istr >> dummy >> idum;
1246  istr >> iok;
1247  fLinTermOK[i] = (iok==1);
1248  istr >> fLinCoefficients[i];
1249  istr >> fLinNorm[i];
1250  istr >> fLinDM[i];
1251  istr >> fLinDP[i];
1252  istr >> fLinImportance[i];
1253  }
1254 }
1255 
1256 ////////////////////////////////////////////////////////////////////////////////
1257 /// copy function
1258 
1260 {
1261  if(this != &other) {
1262  fRuleFit = other.GetRuleFit();
1263  fRuleMinDist = other.GetRuleMinDist();
1264  fOffset = other.GetOffset();
1265  fRules = other.GetRulesConst();
1266  fImportanceCut = other.GetImportanceCut();
1267  fVarImportance = other.GetVarImportance();
1268  fLearningModel = other.GetLearningModel();
1269  fLinQuantile = other.GetLinQuantile();
1270  fRuleNCsig = other.fRuleNCsig;
1272  fEventCacheOK = other.fEventCacheOK;
1275  fRuleFSig = other.fRuleFSig;
1276  fRuleMapInd0 = other.fRuleMapInd0;
1277  fRuleMapInd1 = other.fRuleMapInd1;
1278  fRuleMapOK = other.fRuleMapOK;
1279  fRuleNCave = other.fRuleNCave;
1280  }
1281 }
1282 
1283 ////////////////////////////////////////////////////////////////////////////////
1284 /// calculate the number of rules
1285 
1287 {
1288  if (dtree==0) return 0;
1289  Node *node = dtree->GetRoot();
1290  Int_t nendnodes = 0;
1291  FindNEndNodes( node, nendnodes );
1292  return 2*(nendnodes-1);
1293 }
1294 
1295 ////////////////////////////////////////////////////////////////////////////////
1296 /// find the number of leaf nodes
1297 
1298 void TMVA::RuleEnsemble::FindNEndNodes( const Node *node, Int_t & nendnodes )
1299 {
1300  if (node==0) return;
1301  if ((node->GetRight()==0) && (node->GetLeft()==0)) {
1302  ++nendnodes;
1303  return;
1304  }
1305  const Node *nodeR = node->GetRight();
1306  const Node *nodeL = node->GetLeft();
1307  FindNEndNodes( nodeR, nendnodes );
1308  FindNEndNodes( nodeL, nendnodes );
1309 }
1310 
1311 ////////////////////////////////////////////////////////////////////////////////
1312 /// create rules from the decsision tree structure
1313 
1315 {
1316  Node *node = dtree->GetRoot();
1317  AddRule( node );
1318 }
1319 
1320 ////////////////////////////////////////////////////////////////////////////////
1321 /// add a new rule to the tree
1322 
1324 {
1325  if (node==0) return;
1326  if (node->GetParent()==0) { // it's a root node, don't make a rule
1327  AddRule( node->GetRight() );
1328  AddRule( node->GetLeft() );
1329  }
1330  else {
1331  Rule *rule = MakeTheRule(node);
1332  if (rule) {
1333  fRules.push_back( rule );
1334  AddRule( node->GetRight() );
1335  AddRule( node->GetLeft() );
1336  }
1337  else {
1338  Log() << kFATAL << "<AddRule> - ERROR failed in creating a rule! BUG!" << Endl;
1339  }
1340  }
1341 }
1342 
1343 ////////////////////////////////////////////////////////////////////////////////
1344 ///
1345 /// Make a Rule from a given Node.
1346 /// The root node (ie no parent) does not generate a Rule.
1347 /// The first node in a rule is always the root node => fNodes.size()>=2
1348 /// Each node corresponds to a cut and the cut value is given by the parent node.
1349 ///
1350 ///
1351 
1353 {
1354  if (node==0) {
1355  Log() << kFATAL << "<MakeTheRule> Input node is NULL. Should not happen. BUG!" << Endl;
1356  return 0;
1357  }
1358 
1359  if (node->GetParent()==0) { // a root node - ignore
1360  return 0;
1361  }
1362  //
1363  std::vector< const Node * > nodeVec;
1364  const Node *parent = node;
1365  //
1366  // Make list with the input node at the end:
1367  // <root node> <node1> <node2> ... <node given as argument>
1368  //
1369  nodeVec.push_back( node );
1370  while (parent!=0) {
1371  parent = parent->GetParent();
1372  if (!parent) continue;
1373  const DecisionTreeNode* dtn = dynamic_cast<const DecisionTreeNode*>(parent);
1374  if (dtn && dtn->GetSelector()>=0)
1375  nodeVec.insert( nodeVec.begin(), parent );
1376 
1377  }
1378  if (nodeVec.size()<2) {
1379  Log() << kFATAL << "<MakeTheRule> BUG! Inconsistent Rule!" << Endl;
1380  return 0;
1381  }
1382  Rule *rule = new Rule( this, nodeVec );
1383  rule->SetMsgType( Log().GetMinType() );
1384  return rule;
1385 }
1386 
1387 ////////////////////////////////////////////////////////////////////////////////
1388 /// Makes rule map for all events
1389 
1390 void TMVA::RuleEnsemble::MakeRuleMap(const std::vector<const Event *> *events, UInt_t ifirst, UInt_t ilast)
1391 {
1392  Log() << kVERBOSE << "Making Rule map for all events" << Endl;
1393  // make rule response map
1394  if (events==0) events = GetTrainingEvents();
1395  if ((ifirst==0) || (ilast==0) || (ifirst>ilast)) {
1396  ifirst = 0;
1397  ilast = events->size()-1;
1398  }
1399  // check if identical to previous call
1400  if ((events!=fRuleMapEvents) ||
1401  (ifirst!=fRuleMapInd0) ||
1402  (ilast !=fRuleMapInd1)) {
1403  fRuleMapOK = kFALSE;
1404  }
1405  //
1406  if (fRuleMapOK) {
1407  Log() << kVERBOSE << "<MakeRuleMap> Map is already valid" << Endl;
1408  return; // already cached
1409  }
1410  fRuleMapEvents = events;
1411  fRuleMapInd0 = ifirst;
1412  fRuleMapInd1 = ilast;
1413  // check number of rules
1414  UInt_t nrules = GetNRules();
1415  if (nrules==0) {
1416  Log() << kVERBOSE << "No rules found in MakeRuleMap()" << Endl;
1417  fRuleMapOK = kTRUE;
1418  return;
1419  }
1420  //
1421  // init map
1422  //
1423  std::vector<UInt_t> ruleind;
1424  fRuleMap.clear();
1425  for (UInt_t i=ifirst; i<=ilast; i++) {
1426  ruleind.clear();
1427  fRuleMap.push_back( ruleind );
1428  for (UInt_t r=0; r<nrules; r++) {
1429  if (fRules[r]->EvalEvent(*((*events)[i]))) {
1430  fRuleMap.back().push_back(r); // save only rules that are accepted
1431  }
1432  }
1433  }
1434  fRuleMapOK = kTRUE;
1435  Log() << kVERBOSE << "Made rule map for event# " << ifirst << " : " << ilast << Endl;
1436 }
1437 
1438 ////////////////////////////////////////////////////////////////////////////////
1439 /// std::ostream operator
1440 
1441 std::ostream& TMVA::operator<< ( std::ostream& os, const RuleEnsemble & rules )
1442 {
1443  os << "DON'T USE THIS - TO BE REMOVED" << std::endl;
1444  rules.Print();
1445  return os;
1446 }
const std::vector< const TMVA::Event * > * GetTrainingEvents() const
get list of training events from the rule fitter
Double_t fAverageSupport
Definition: RuleEnsemble.h:366
MsgLogger * fLogger
Definition: RuleEnsemble.h:395
Double_t GetImportanceCut() const
Definition: RuleEnsemble.h:273
Double_t PdfLinear(Double_t &nsig, Double_t &ntot) const
This function returns Pr( y = 1 | x ) for the linear terms.
MsgLogger & Endl(MsgLogger &ml)
Definition: MsgLogger.h:162
ELearningModel GetLearningModel() const
Definition: RuleEnsemble.h:272
EMsgType GetMinType() const
Definition: MsgLogger.h:75
const std::vector< Double_t > & GetVarImportance() const
Definition: RuleEnsemble.h:282
const std::vector< const TMVA::Event * > & GetTrainingEvents() const
Definition: RuleFit.h:141
void SetEvent(const Event &e)
Definition: RuleEnsemble.h:155
Ssiz_t Length() const
Definition: TString.h:390
std::vector< TH1F * > fLinPDFS
Definition: RuleEnsemble.h:362
bool equal(double d1, double d2, double stol=10000)
RuleEnsemble()
constructor
Rule * MakeTheRule(const Node *node)
Make a Rule from a given Node.
THist< 1, float, THistStatContent, THistStatUncertainty > TH1F
Definition: THist.hxx:302
Int_t CalcNRules(const TMVA::DecisionTree *dtree)
calculate the number of rules
Double_t CalcLinNorm(Double_t stdev)
Definition: RuleEnsemble.h:130
UInt_t GetNvar() const
Definition: MethodBase.h:340
virtual ~RuleEnsemble()
destructor
std::vector< Double_t > fLinDP
Definition: RuleEnsemble.h:357
Short_t Min(Short_t a, Short_t b)
Definition: TMathBase.h:170
int Int_t
Definition: RtypesCore.h:41
bool Bool_t
Definition: RtypesCore.h:59
Double_t GetLinQuantile() const
Definition: RuleEnsemble.h:284
const Bool_t kFALSE
Definition: Rtypes.h:92
std::vector< Double_t > fRulePBB
Definition: RuleEnsemble.h:373
void Print() const
print function
void PrintRuleGen() const
print rule generation info
void CleanupLinear()
cleanup linear model
void AddAttr(void *node, const char *, const T &value, Int_t precision=16)
Definition: Tools.h:309
void MakeRuleMap(const std::vector< const TMVA::Event * > *events=0, UInt_t ifirst=0, UInt_t ilast=0)
Makes rule map for all events.
void * AddChild(void *parent, const char *childname, const char *content=0, bool isRootNode=false)
add child node
Definition: Tools.cxx:1134
Short_t Abs(Short_t d)
Definition: TMathBase.h:110
MsgLogger & Log() const
message logger
Definition: RuleEnsemble.h:396
std::vector< TMVA::Rule * > fRules
Definition: RuleEnsemble.h:355
virtual DecisionTreeNode * GetRoot() const
Definition: DecisionTree.h:102
std::vector< Char_t > fLinTermOK
Definition: RuleEnsemble.h:356
Bool_t Equal(const Rule &other, Bool_t useCutValue, Double_t maxdist) const
Compare two rules.
Definition: Rule.cxx:168
void SetAverageRuleSigma(Double_t v)
Definition: RuleEnsemble.h:147
void SetMsgType(EMsgType t)
const Event * GetTrainingEvent(UInt_t i) const
get the training event from the rule fitter
void SetImportanceRef(Double_t impref)
set reference importance
void RuleResponseStats()
calculate various statistics for this rule
double sqrt(double)
TClass * GetClass(T *)
Definition: TClass.h:555
Tools & gTools()
Definition: Tools.cxx:79
std::vector< Double_t > fLinNorm
Definition: RuleEnsemble.h:360
Double_t PdfRule(Double_t &nsig, Double_t &ntot) const
This function returns Pr( y = 1 | x ) for rules.
void RemoveSimilarRules()
remove rules that behave similar
void Copy(RuleEnsemble const &other)
copy function
const MethodBase * GetMethodBase() const
Definition: RuleFit.h:153
void PrintRaw(std::ostream &os) const
write rules to stream
void * GetChild(void *parent, const char *childname=0)
get child node
Definition: Tools.cxx:1158
const MethodRuleFit * GetMethodRuleFit() const
Get a pointer to the original MethodRuleFit.
std::vector< TH1F * > fLinPDFB
Definition: RuleEnsemble.h:361
void SetMinType(EMsgType minType)
Definition: MsgLogger.h:76
std::vector< Double_t > fRulePSB
Definition: RuleEnsemble.h:371
void CalcImportance()
calculate the importance of each rule
std::vector< Double_t > fLinCoefficients
Definition: RuleEnsemble.h:359
virtual Double_t Rndm()
Machine independent random number generator.
Definition: TRandom.cxx:512
void CleanupRules()
cleanup rules
const RuleFit * fRuleFit
Definition: RuleEnsemble.h:393
const std::vector< const TMVA::DecisionTree * > & GetForest() const
Definition: RuleFit.h:147
Double_t FStar() const
We want to estimate F* = argmin Eyx( L(y,F(x) ), min wrt F(x) F(x) = FL(x) + FR(x) ...
Bool_t DoRules() const
Definition: RuleEnsemble.h:268
void CalcVarImportance()
Calculates variable importance using eq (35) in RuleFit paper by Friedman et.al.
TRandom2 r(17)
SVector< double, 2 > v
Definition: Dict.h:5
const RuleFit * GetRuleFit() const
Definition: RuleEnsemble.h:261
ELearningModel fLearningModel
Definition: RuleEnsemble.h:351
Double_t CalcRuleImportance()
calculate importance of each rule
EMsgType
Definition: Types.h:61
std::vector< Double_t > fLinDM
Definition: RuleEnsemble.h:358
void MakeRulesFromTree(const DecisionTree *dtree)
create rules from the decsision tree structure
Double_t CoefficientRadius()
Calculates sqrt(Sum(a_i^2)), i=1..N (NOTE do not include a0)
void AddRule(const Node *node)
add a new rule to the tree
unsigned int UInt_t
Definition: RtypesCore.h:42
UInt_t GetNTreeSample() const
Definition: RuleFit.h:134
char * Form(const char *fmt,...)
void RuleStatistics()
calculate various statistics for this rule
void MakeRules(const std::vector< const TMVA::DecisionTree * > &forest)
Makes rules from the given decision tree.
void ReadAttr(void *node, const char *, T &value)
Definition: Tools.h:296
const std::vector< TMVA::Rule * > & GetRulesConst() const
Definition: RuleEnsemble.h:277
const MethodRuleFit * GetMethodRuleFit() const
Definition: RuleFit.h:152
R__EXTERN TRandom * gRandom
Definition: TRandom.h:66
Bool_t IsSilentFile()
Definition: MethodBase.h:375
std::ostream & operator<<(std::ostream &os, const BinaryTree &tree)
print the tree recursinvely using the << operator
Definition: BinaryTree.cxx:159
Double_t fImportanceRef
Definition: RuleEnsemble.h:365
void ReadFromXML(void *wghtnode)
read rules from XML
std::vector< Char_t > fEventRuleVal
Definition: RuleEnsemble.h:384
void FindNEndNodes(const TMVA::Node *node, Int_t &nendnodes)
find the number of leaf nodes
void Initialize(const RuleFit *rf)
Initializes all member variables with default values.
void SetCoefficients(const std::vector< Double_t > &v)
set all rule coefficients
double Double_t
Definition: RtypesCore.h:55
std::vector< Double_t > fVarImportance
Definition: RuleEnsemble.h:364
std::vector< Double_t > fEventLinearVal
Definition: RuleEnsemble.h:385
int type
Definition: TGX11.cxx:120
static RooMathCoreReg dummy
void * GetNextChild(void *prevchild, const char *childname=0)
XML helpers.
Definition: Tools.cxx:1170
virtual Node * GetParent() const
Definition: Node.h:93
void MakeLinearTerms()
Make the linear terms as in eq 25, ref 2 For this the b and (1-b) quatiles are needed.
std::vector< Double_t > fLinImportance
Definition: RuleEnsemble.h:363
you should not use this method at all Int_t Int_t Double_t Double_t Double_t e
Definition: TRolke.cxx:630
virtual Node * GetRight() const
Definition: Node.h:92
const TString & GetInputLabel(Int_t i) const
Definition: MethodBase.h:346
Double_t fAverageRuleSigma
Definition: RuleEnsemble.h:367
UInt_t GetClass() const
Definition: Event.h:89
std::vector< Double_t > fRulePBS
Definition: RuleEnsemble.h:372
void CalcRuleSupport()
calculate the support for all rules
const std::vector< const TMVA::Event * > * fRuleMapEvents
Definition: RuleEnsemble.h:391
std::vector< Double_t > fRulePSS
Definition: RuleEnsemble.h:370
Double_t GetRuleMinDist() const
Definition: RuleEnsemble.h:290
void MakeModel()
create model
Short_t GetSelector() const
void SetRules(const std::vector< TMVA::Rule * > &rules)
set rules
Bool_t UseBoost() const
Definition: MethodRuleFit.h:94
UInt_t GetNRules() const
Definition: RuleEnsemble.h:276
void SetMsgType(EMsgType t)
Definition: Rule.cxx:152
Short_t Max(Short_t a, Short_t b)
Definition: TMathBase.h:202
void ResetCoefficients()
reset all rule coefficients
void * AddXMLTo(void *parent) const
write rules to XML
void GetCoefficients(std::vector< Double_t > &v)
Retrieve all rule coefficients.
Definition: first.py:1
Double_t Sqrt(Double_t x)
Definition: TMath.h:464
Double_t CalcLinImportance()
calculate the linear importance for each rule
Double_t EvalEvent() const
Definition: RuleEnsemble.h:426
const Bool_t kTRUE
Definition: Rtypes.h:91
const MethodBase * GetMethodBase() const
Get a pointer to the original MethodRuleFit.
Double_t GetOffset() const
Definition: RuleEnsemble.h:275
std::vector< Double_t > fRulePTag
Definition: RuleEnsemble.h:374
const Event * GetTrainingEvent(UInt_t i) const
Definition: RuleFit.h:136
Bool_t DoLinear() const
Definition: RuleEnsemble.h:267
Double_t fImportanceCut
Definition: RuleEnsemble.h:352
Double_t GetNEveEff() const
Definition: RuleFit.h:135
virtual Node * GetLeft() const
Definition: Node.h:91
std::vector< Double_t > fRuleVarFrac
Definition: RuleEnsemble.h:369
std::vector< std::vector< UInt_t > > fRuleMap
Definition: RuleEnsemble.h:388
const Event * fEvent
Definition: RuleEnsemble.h:382
void ReadRaw(std::istream &istr)
read rule ensemble from stream