Logo ROOT  
Reference Guide
 
Loading...
Searching...
No Matches
RuleEnsemble.cxx
Go to the documentation of this file.
1// @(#)root/tmva $Id$
2// Author: Andreas Hoecker, Joerg Stelzer, Fredrik Tegenfeldt, Helge Voss
3
4/**********************************************************************************
5 * Project: TMVA - a Root-integrated toolkit for multivariate data analysis *
6 * Package: TMVA *
7 * Class : RuleEnsemble *
8 * Web : http://tmva.sourceforge.net *
9 * *
10 * Description: *
11 * A class generating an ensemble of rules *
12 * Input: a forest of decision trees *
13 * Output: an ensemble of rules *
14 * *
15 * Authors (alphabetical): *
16 * Fredrik Tegenfeldt <Fredrik.Tegenfeldt@cern.ch> - Iowa State U., USA *
17 * Helge Voss <Helge.Voss@cern.ch> - MPI-KP Heidelberg, GER *
18 * *
19 * Copyright (c) 2005: *
20 * CERN, Switzerland *
21 * Iowa State U. *
22 * MPI-K Heidelberg, Germany *
23 * *
24 * Redistribution and use in source and binary forms, with or without *
25 * modification, are permitted according to the terms listed in LICENSE *
26 * (http://tmva.sourceforge.net/LICENSE) *
27 **********************************************************************************/
28
29/*! \class TMVA::RuleEnsemble
30\ingroup TMVA
31
32*/
33#include "TMVA/RuleEnsemble.h"
34
35#include "TMVA/DataSetInfo.h"
36#include "TMVA/DecisionTree.h"
37#include "TMVA/Event.h"
38#include "TMVA/MethodBase.h"
39#include "TMVA/MethodRuleFit.h"
40#include "TMVA/MsgLogger.h"
41#include "TMVA/RuleFit.h"
42#include "TMVA/Tools.h"
43#include "TMVA/Types.h"
44
45#include "TRandom3.h"
46#include "TH1F.h"
47
48#include <algorithm>
49#include <cstdlib>
50#include <iomanip>
51#include <list>
52
53////////////////////////////////////////////////////////////////////////////////
54/// constructor
55
57 : fLearningModel ( kFull )
58 , fImportanceCut ( 0 )
59 , fLinQuantile ( 0.025 ) // default quantile for killing outliers in linear terms
60 , fOffset ( 0 )
61 , fAverageSupport ( 0.8 )
62 , fAverageRuleSigma( 0.4 ) // default value - used if only linear model is chosen
63 , fRuleFSig ( 0 )
64 , fRuleNCave ( 0 )
65 , fRuleNCsig ( 0 )
66 , fRuleMinDist ( 1e-3 ) // closest allowed 'distance' between two rules
67 , fNRulesGenerated ( 0 )
68 , fEvent ( 0 )
69 , fEventCacheOK ( true )
70 , fRuleMapOK ( true )
71 , fRuleMapInd0 ( 0 )
72 , fRuleMapInd1 ( 0 )
73 , fRuleMapEvents ( 0 )
74 , fLogger( new MsgLogger("RuleFit") )
75{
76 Initialize( rf );
77}
78
79////////////////////////////////////////////////////////////////////////////////
80/// copy constructor
81
83 : fAverageSupport ( 1 )
84 , fEvent(0)
85 , fRuleMapEvents(0)
86 , fRuleFit(0)
87 , fLogger( new MsgLogger("RuleFit") )
88{
89 Copy( other );
90}
91
92////////////////////////////////////////////////////////////////////////////////
93/// constructor
94
96 : fLearningModel ( kFull )
97 , fImportanceCut ( 0 )
98 , fLinQuantile ( 0.025 ) // default quantile for killing outliers in linear terms
99 , fOffset ( 0 )
100 , fImportanceRef ( 1.0 )
101 , fAverageSupport ( 0.8 )
102 , fAverageRuleSigma( 0.4 ) // default value - used if only linear model is chosen
103 , fRuleFSig ( 0 )
104 , fRuleNCave ( 0 )
105 , fRuleNCsig ( 0 )
106 , fRuleMinDist ( 1e-3 ) // closest allowed 'distance' between two rules
107 , fNRulesGenerated ( 0 )
108 , fEvent ( 0 )
109 , fEventCacheOK ( true )
110 , fRuleMapOK ( true )
111 , fRuleMapInd0 ( 0 )
112 , fRuleMapInd1 ( 0 )
113 , fRuleMapEvents ( 0 )
114 , fRuleFit ( 0 )
115 , fLogger( new MsgLogger("RuleFit") )
116{
117}
118
119////////////////////////////////////////////////////////////////////////////////
120/// destructor
121
123{
124 for ( std::vector<Rule *>::iterator itrRule = fRules.begin(); itrRule != fRules.end(); ++itrRule ) {
125 delete *itrRule;
126 }
127 // NOTE: Should not delete the histos fLinPDFB/S since they are delete elsewhere
128 delete fLogger;
129}
130
131////////////////////////////////////////////////////////////////////////////////
132/// Initializes all member variables with default values
133
135{
136 SetAverageRuleSigma(0.4); // default value - used if only linear model is chosen
137 fRuleFit = rf;
138 UInt_t nvars = GetMethodBase()->GetNvar();
139 fVarImportance.clear();
140 fLinPDFB.clear();
141 fLinPDFS.clear();
142 //
143 fVarImportance.resize( nvars,0.0 );
144 fLinPDFB.resize( nvars,0 );
145 fLinPDFS.resize( nvars,0 );
146 fImportanceRef = 1.0;
147 for (UInt_t i=0; i<nvars; i++) { // a priori all linear terms are equally valid
148 fLinTermOK.push_back(kTRUE);
149 }
150}
151
152////////////////////////////////////////////////////////////////////////////////
153
155 fLogger->SetMinType(t);
156}
157
158////////////////////////////////////////////////////////////////////////////////
159///
160/// Get a pointer to the original MethodRuleFit.
161
163{
164 return ( fRuleFit==0 ? 0:fRuleFit->GetMethodRuleFit());
165}
166
167////////////////////////////////////////////////////////////////////////////////
168///
169/// Get a pointer to the original MethodRuleFit.
170
172{
173 return ( fRuleFit==0 ? 0:fRuleFit->GetMethodBase());
174}
175
176////////////////////////////////////////////////////////////////////////////////
177/// create model
178
180{
181 MakeRules( fRuleFit->GetForest() );
182
183 MakeLinearTerms();
184
185 MakeRuleMap();
186
187 CalcRuleSupport();
188
189 RuleStatistics();
190
191 PrintRuleGen();
192}
193
194////////////////////////////////////////////////////////////////////////////////
195///
196/// Calculates sqrt(Sum(a_i^2)), i=1..N (NOTE do not include a0)
197
199{
200 Int_t ncoeffs = fRules.size();
201 if (ncoeffs<1) return 0;
202 //
203 Double_t sum2=0;
204 Double_t val;
205 for (Int_t i=0; i<ncoeffs; i++) {
206 val = fRules[i]->GetCoefficient();
207 sum2 += val*val;
208 }
209 return sum2;
210}
211
212////////////////////////////////////////////////////////////////////////////////
213/// reset all rule coefficients
214
216{
217 fOffset = 0.0;
218 UInt_t nrules = fRules.size();
219 for (UInt_t i=0; i<nrules; i++) {
220 fRules[i]->SetCoefficient(0.0);
221 }
222}
223
224////////////////////////////////////////////////////////////////////////////////
225/// set all rule coefficients
226
227void TMVA::RuleEnsemble::SetCoefficients( const std::vector< Double_t > & v )
228{
229 UInt_t nrules = fRules.size();
230 if (v.size()!=nrules) {
231 Log() << kFATAL << "<SetCoefficients> - BUG TRAP - input vector wrong size! It is = " << v.size()
232 << " when it should be = " << nrules << Endl;
233 }
234 for (UInt_t i=0; i<nrules; i++) {
235 fRules[i]->SetCoefficient(v[i]);
236 }
237}
238
239////////////////////////////////////////////////////////////////////////////////
240/// Retrieve all rule coefficients
241
242void TMVA::RuleEnsemble::GetCoefficients( std::vector< Double_t > & v )
243{
244 UInt_t nrules = fRules.size();
245 v.resize(nrules);
246 if (nrules==0) return;
247 //
248 for (UInt_t i=0; i<nrules; i++) {
249 v[i] = (fRules[i]->GetCoefficient());
250 }
251}
252
253////////////////////////////////////////////////////////////////////////////////
254/// get list of training events from the rule fitter
255
256const std::vector<const TMVA::Event*>* TMVA::RuleEnsemble::GetTrainingEvents() const
257{
258 return &(fRuleFit->GetTrainingEvents());
259}
260
261////////////////////////////////////////////////////////////////////////////////
262/// get the training event from the rule fitter
263
265{
266 return fRuleFit->GetTrainingEvent(i);
267}
268
269////////////////////////////////////////////////////////////////////////////////
270/// remove rules that behave similar
271
273{
274 Log() << kVERBOSE << "Removing similar rules; distance = " << fRuleMinDist << Endl;
275
276 UInt_t nrulesIn = fRules.size();
277 TMVA::Rule *first, *second;
278 std::vector< Char_t > removeMe( nrulesIn,false ); // <--- stores boolean
279
280 Int_t nrem = 0;
281 Int_t remind=-1;
282 Double_t r;
283
284 for (UInt_t i=0; i<nrulesIn; i++) {
285 if (!removeMe[i]) {
286 first = fRules[i];
287 for (UInt_t k=i+1; k<nrulesIn; k++) {
288 if (!removeMe[k]) {
289 second = fRules[k];
290 Bool_t equal = first->Equal(*second,kTRUE,fRuleMinDist);
291 if (equal) {
292 r = gRandom->Rndm();
293 remind = (r>0.5 ? k:i); // randomly select rule
294 }
295 else {
296 remind = -1;
297 }
298
299 if (remind>-1) {
300 if (!removeMe[remind]) {
301 removeMe[remind] = true;
302 nrem++;
303 }
304 }
305 }
306 }
307 }
308 }
309 UInt_t ind = 0;
310 Rule *theRule;
311 for (UInt_t i=0; i<nrulesIn; i++) {
312 if (removeMe[i]) {
313 theRule = fRules[ind];
314 fRules.erase( fRules.begin() + ind );
315 delete theRule;
316 ind--;
317 }
318 ind++;
319 }
320 UInt_t nrulesOut = fRules.size();
321 Log() << kVERBOSE << "Removed " << nrulesIn - nrulesOut << " out of " << nrulesIn << " rules" << Endl;
322}
323
324////////////////////////////////////////////////////////////////////////////////
325/// cleanup rules
326
328{
329 UInt_t nrules = fRules.size();
330 if (nrules==0) return;
331 Log() << kVERBOSE << "Removing rules with relative importance < " << fImportanceCut << Endl;
332 if (fImportanceCut<=0) return;
333 //
334 // Mark rules to be removed
335 //
336 Rule *therule;
337 Int_t ind=0;
338 for (UInt_t i=0; i<nrules; i++) {
339 if (fRules[ind]->GetRelImportance()<fImportanceCut) {
340 therule = fRules[ind];
341 fRules.erase( fRules.begin() + ind );
342 delete therule;
343 ind--;
344 }
345 ind++;
346 }
347 Log() << kINFO << "Removed " << nrules-ind << " out of a total of " << nrules
348 << " rules with importance < " << fImportanceCut << Endl;
349}
350
351////////////////////////////////////////////////////////////////////////////////
352/// cleanup linear model
353
355{
356 UInt_t nlin = fLinNorm.size();
357 if (nlin==0) return;
358 Log() << kVERBOSE << "Removing linear terms with relative importance < " << fImportanceCut << Endl;
359 //
360 fLinTermOK.clear();
361 for (UInt_t i=0; i<nlin; i++) {
362 fLinTermOK.push_back( (fLinImportance[i]/fImportanceRef > fImportanceCut) );
363 }
364}
365
366////////////////////////////////////////////////////////////////////////////////
367/// calculate the support for all rules
368
370{
371 Log() << kVERBOSE << "Evaluating Rule support" << Endl;
372 Double_t s,t,stot,ssb;
373 Double_t ssig, sbkg, ssum;
374 Int_t indrule=0;
375 stot = 0;
376 // reset to default values
377 SetAverageRuleSigma(0.4);
378 const std::vector<const Event *> *events = GetTrainingEvents();
379 Double_t nrules = static_cast<Double_t>(fRules.size());
380 Double_t ew;
381 //
382 if ((nrules>0) && (events->size()>0)) {
383 for ( std::vector< Rule * >::iterator itrRule=fRules.begin(); itrRule!=fRules.end(); ++itrRule ) {
384 s=0.0;
385 ssig=0.0;
386 sbkg=0.0;
387 for ( std::vector<const Event * >::const_iterator itrEvent=events->begin(); itrEvent!=events->end(); ++itrEvent ) {
388 if ((*itrRule)->EvalEvent( *(*itrEvent) )) {
389 ew = (*itrEvent)->GetWeight();
390 s += ew;
391 if (GetMethodRuleFit()->DataInfo().IsSignal(*itrEvent)) ssig += ew;
392 else sbkg += ew;
393 }
394 }
395 //
396 s = s/fRuleFit->GetNEveEff();
397 t = s*(1.0-s);
398 t = (t<0 ? 0:sqrt(t));
399 stot += s;
400 ssum = ssig+sbkg;
401 ssb = (ssum>0 ? Double_t(ssig)/Double_t(ssig+sbkg) : 0.0 );
402 (*itrRule)->SetSupport(s);
403 (*itrRule)->SetNorm(t);
404 (*itrRule)->SetSSB( ssb );
405 (*itrRule)->SetSSBNeve(Double_t(ssig+sbkg));
406 indrule++;
407 }
408 fAverageSupport = stot/nrules;
409 fAverageRuleSigma = TMath::Sqrt(fAverageSupport*(1.0-fAverageSupport));
410 Log() << kVERBOSE << "Standard deviation of support = " << fAverageRuleSigma << Endl;
411 Log() << kVERBOSE << "Average rule support = " << fAverageSupport << Endl;
412 }
413}
414
415////////////////////////////////////////////////////////////////////////////////
416/// calculate the importance of each rule
417
419{
420 Double_t maxRuleImp = CalcRuleImportance();
421 Double_t maxLinImp = CalcLinImportance();
422 Double_t maxImp = (maxRuleImp>maxLinImp ? maxRuleImp : maxLinImp);
423 SetImportanceRef( maxImp );
424}
425
426////////////////////////////////////////////////////////////////////////////////
427/// set reference importance
428
430{
431 for ( UInt_t i=0; i<fRules.size(); i++ ) {
432 fRules[i]->SetImportanceRef(impref);
433 }
434 fImportanceRef = impref;
435}
436////////////////////////////////////////////////////////////////////////////////
437/// calculate importance of each rule
438
440{
441 Double_t maxImp=-1.0;
442 Double_t imp;
443 Int_t nrules = fRules.size();
444 for ( int i=0; i<nrules; i++ ) {
445 fRules[i]->CalcImportance();
446 imp = fRules[i]->GetImportance();
447 if (imp>maxImp) maxImp = imp;
448 }
449 for ( Int_t i=0; i<nrules; i++ ) {
450 fRules[i]->SetImportanceRef(maxImp);
451 }
452
453 return maxImp;
454}
455
456////////////////////////////////////////////////////////////////////////////////
457/// calculate the linear importance for each rule
458
460{
461 Double_t maxImp=-1.0;
462 UInt_t nvars = fLinCoefficients.size();
463 fLinImportance.resize(nvars,0.0);
464 if (!DoLinear()) return maxImp;
465 //
466 // The linear importance is:
467 // I = |b_x|*sigma(x)
468 // Note that the coefficients in fLinCoefficients are for the normalized x
469 // => b'_x * x' = b'_x * sigma(r)*x/sigma(x)
470 // => b_x = b'_x*sigma(r)/sigma(x)
471 // => I = |b'_x|*sigma(r)
472 //
473 Double_t imp;
474 for ( UInt_t i=0; i<nvars; i++ ) {
475 imp = fAverageRuleSigma*TMath::Abs(fLinCoefficients[i]);
476 fLinImportance[i] = imp;
477 if (imp>maxImp) maxImp = imp;
478 }
479 return maxImp;
480}
481
482////////////////////////////////////////////////////////////////////////////////
483/// Calculates variable importance using eq (35) in RuleFit paper by Friedman et.al
484
486{
487 Log() << kVERBOSE << "Compute variable importance" << Endl;
488 Double_t rimp;
489 UInt_t nrules = fRules.size();
490 if (GetMethodBase()==0) Log() << kFATAL << "RuleEnsemble::CalcVarImportance() - should not be here!" << Endl;
491 UInt_t nvars = GetMethodBase()->GetNvar();
492 UInt_t nvarsUsed;
493 Double_t rimpN;
494 fVarImportance.resize(nvars,0);
495 // rules
496 if (DoRules()) {
497 for ( UInt_t ind=0; ind<nrules; ind++ ) {
498 rimp = fRules[ind]->GetImportance();
499 nvarsUsed = fRules[ind]->GetNumVarsUsed();
500 if (nvarsUsed<1)
501 Log() << kFATAL << "<CalcVarImportance> Variables for importance calc!!!??? A BUG!" << Endl;
502 rimpN = (nvarsUsed > 0 ? rimp/nvarsUsed:0.0);
503 for ( UInt_t iv=0; iv<nvars; iv++ ) {
504 if (fRules[ind]->ContainsVariable(iv)) {
505 fVarImportance[iv] += rimpN;
506 }
507 }
508 }
509 }
510 // linear terms
511 if (DoLinear()) {
512 for ( UInt_t iv=0; iv<fLinTermOK.size(); iv++ ) {
513 if (fLinTermOK[iv]) fVarImportance[iv] += fLinImportance[iv];
514 }
515 }
516 //
517 // Make variable importance relative the strongest variable
518 //
519 Double_t maximp = 0.0;
520 for ( UInt_t iv=0; iv<nvars; iv++ ) {
521 if ( fVarImportance[iv] > maximp ) maximp = fVarImportance[iv];
522 }
523 if (maximp>0) {
524 for ( UInt_t iv=0; iv<nvars; iv++ ) {
525 fVarImportance[iv] *= 1.0/maximp;
526 }
527 }
528}
529
530////////////////////////////////////////////////////////////////////////////////
531/// set rules
532///
533/// first clear all
534
535void TMVA::RuleEnsemble::SetRules( const std::vector<Rule *> & rules )
536{
537 DeleteRules();
538 //
539 fRules.resize(rules.size());
540 for (UInt_t i=0; i<fRules.size(); i++) {
541 fRules[i] = rules[i];
542 }
543 fEventCacheOK = kFALSE;
544}
545
546////////////////////////////////////////////////////////////////////////////////
547/// Makes rules from the given decision tree.
548/// First node in all rules is ALWAYS the root node.
549
550void TMVA::RuleEnsemble::MakeRules( const std::vector< const DecisionTree *> & forest )
551{
552 fRules.clear();
553 if (!DoRules()) return;
554 //
555 Int_t nrulesCheck=0;
556 Int_t nrules;
557 Int_t nendn;
558 Double_t sumnendn=0;
559 Double_t sumn2=0;
560 //
561 // UInt_t prevs;
562 UInt_t ntrees = forest.size();
563 for ( UInt_t ind=0; ind<ntrees; ind++ ) {
564 // prevs = fRules.size();
565 MakeRulesFromTree( forest[ind] );
566 nrules = CalcNRules( forest[ind] );
567 nendn = (nrules/2) + 1;
568 sumnendn += nendn;
569 sumn2 += nendn*nendn;
570 nrulesCheck += nrules;
571 }
572 Double_t nmean = (ntrees>0) ? sumnendn/ntrees : 0;
573 Double_t nsigm = TMath::Sqrt( gTools().ComputeVariance(sumn2,sumnendn,ntrees) );
574 Double_t ndev = 2.0*(nmean-2.0-nsigm)/(nmean-2.0+nsigm);
575 //
576 Log() << kVERBOSE << "Average number of end nodes per tree = " << nmean << Endl;
577 if (ntrees>1) Log() << kVERBOSE << "sigma of ditto ( ~= mean-2 ?) = "
578 << nsigm
579 << Endl;
580 Log() << kVERBOSE << "Deviation from exponential model = " << ndev << Endl;
581 Log() << kVERBOSE << "Corresponds to L (eq. 13, RuleFit ppr) = " << nmean << Endl;
582 // a BUG trap
583 if (nrulesCheck != static_cast<Int_t>(fRules.size())) {
584 Log() << kFATAL
585 << "BUG! number of generated and possible rules do not match! N(rules) = " << fRules.size()
586 << " != " << nrulesCheck << Endl;
587 }
588 Log() << kVERBOSE << "Number of generated rules: " << fRules.size() << Endl;
589
590 // save initial number of rules
591 fNRulesGenerated = fRules.size();
592
593 RemoveSimilarRules();
594
595 ResetCoefficients();
596
597}
598
599////////////////////////////////////////////////////////////////////////////////
600/// Make the linear terms as in eq 25, ref 2
601/// For this the b and (1-b) quantiles are needed
602
604{
605 if (!DoLinear()) return;
606
607 const std::vector<const Event *> *events = GetTrainingEvents();
608 UInt_t neve = events->size();
609 UInt_t nvars = ((*events)[0])->GetNVariables(); // Event -> GetNVariables();
610 Double_t val,ew;
611 typedef std::pair< Double_t, Int_t> dataType;
612 typedef std::pair< Double_t, dataType > dataPoint;
613
614 std::vector< std::vector<dataPoint> > vardata(nvars);
615 std::vector< Double_t > varsum(nvars,0.0);
616 std::vector< Double_t > varsum2(nvars,0.0);
617 // first find stats of all variables
618 // vardata[v][i].first -> value of var <v> in event <i>
619 // vardata[v][i].second.first -> the event weight
620 // vardata[v][i].second.second -> the event type
621 for (UInt_t i=0; i<neve; i++) {
622 ew = ((*events)[i])->GetWeight();
623 for (UInt_t v=0; v<nvars; v++) {
624 val = ((*events)[i])->GetValue(v);
625 vardata[v].push_back( dataPoint( val, dataType(ew,((*events)[i])->GetClass()) ) );
626 }
627 }
628 //
629 fLinDP.clear();
630 fLinDM.clear();
631 fLinCoefficients.clear();
632 fLinNorm.clear();
633 fLinDP.resize(nvars,0);
634 fLinDM.resize(nvars,0);
635 fLinCoefficients.resize(nvars,0);
636 fLinNorm.resize(nvars,0);
637
638 Double_t averageWeight = neve ? fRuleFit->GetNEveEff()/static_cast<Double_t>(neve) : 0;
639 // sort and find limits
640 Double_t stdl;
641
642 // find normalisation given in ref 2 after eq 26
643 Double_t lx;
644 Double_t nquant;
645 Double_t neff;
646 UInt_t indquantM;
647 UInt_t indquantP;
648
649 MethodBase *fMB=const_cast<MethodBase *>(fRuleFit->GetMethodBase());
650
651 for (UInt_t v=0; v<nvars; v++) {
652 varsum[v] = 0;
653 varsum2[v] = 0;
654 //
655 std::sort( vardata[v].begin(),vardata[v].end() );
656 nquant = fLinQuantile*fRuleFit->GetNEveEff(); // quantile = 0.025
657 neff=0;
658 UInt_t ie=0;
659 // first scan for lower quantile (including weights)
660 while ( (ie<neve) && (neff<nquant) ) {
661 neff += vardata[v][ie].second.first;
662 ie++;
663 }
664 indquantM = (ie==0 ? 0:ie-1);
665 // now for upper quantile
666 ie = neve;
667 neff=0;
668 while ( (ie>0) && (neff<nquant) ) {
669 ie--;
670 neff += vardata[v][ie].second.first;
671 }
672 indquantP = (ie==neve ? ie=neve-1:ie);
673 //
674 fLinDM[v] = vardata[v][indquantM].first; // delta-
675 fLinDP[v] = vardata[v][indquantP].first; // delta+
676
677 if(!fMB->IsSilentFile())
678 {
679 if (fLinPDFB[v]) delete fLinPDFB[v];
680 if (fLinPDFS[v]) delete fLinPDFS[v];
681 fLinPDFB[v] = new TH1F(Form("bkgvar%d",v),"bkg temphist",40,fLinDM[v],fLinDP[v]);
682 fLinPDFS[v] = new TH1F(Form("sigvar%d",v),"sig temphist",40,fLinDM[v],fLinDP[v]);
683 fLinPDFB[v]->Sumw2();
684 fLinPDFS[v]->Sumw2();
685 }
686 //
687 Int_t type;
688 const Double_t w = 1.0/fRuleFit->GetNEveEff();
689 for (ie=0; ie<neve; ie++) {
690 val = vardata[v][ie].first;
691 ew = vardata[v][ie].second.first;
692 type = vardata[v][ie].second.second;
693 lx = TMath::Min( fLinDP[v], TMath::Max( fLinDM[v], val ) );
694 varsum[v] += ew*lx;
695 varsum2[v] += ew*lx*lx;
696 if(!fMB->IsSilentFile())
697 {
698 if (type==1) fLinPDFS[v]->Fill(lx,w*ew);
699 else fLinPDFB[v]->Fill(lx,w*ew);
700 }
701 }
702 //
703 // Get normalization.
704 //
705 stdl = TMath::Sqrt( (varsum2[v] - (varsum[v]*varsum[v]/fRuleFit->GetNEveEff()))/(fRuleFit->GetNEveEff()-averageWeight) );
706 fLinNorm[v] = CalcLinNorm(stdl);
707 }
708 // Save PDFs - for debugging purpose
709 if(!fMB->IsSilentFile())
710 {
711 for (UInt_t v=0; v<nvars; v++) {
712 fLinPDFS[v]->Write();
713 fLinPDFB[v]->Write();
714 }
715 }
716}
717
718////////////////////////////////////////////////////////////////////////////////
719/// This function returns Pr( y = 1 | x ) for the linear terms.
720
722{
723 UInt_t nvars=fLinDP.size();
724
725 Double_t fstot=0;
726 Double_t fbtot=0;
727 nsig = 0;
728 ntot = nvars;
729 for (UInt_t v=0; v<nvars; v++) {
730 Double_t val = fEventLinearVal[v];
731 Int_t bin = fLinPDFS[v]->FindBin(val);
732 fstot += fLinPDFS[v]->GetBinContent(bin);
733 fbtot += fLinPDFB[v]->GetBinContent(bin);
734 }
735 if (nvars<1) return 0;
736 ntot = (fstot+fbtot)/Double_t(nvars);
737 nsig = (fstot)/Double_t(nvars);
738 return fstot/(fstot+fbtot);
739}
740
741////////////////////////////////////////////////////////////////////////////////
742/// This function returns Pr( y = 1 | x ) for rules.
743/// The probability returned is normalized against the number of rules which are actually passed
744
746{
747 Double_t sump = 0;
748 Double_t sumok = 0;
749 Double_t ssb;
750 Double_t neve;
751 //
752 UInt_t nrules = fRules.size();
753 for (UInt_t ir=0; ir<nrules; ir++) {
754 if (fEventRuleVal[ir]>0) {
755 ssb = fEventRuleVal[ir]*GetRulesConst(ir)->GetSSB(); // S/(S+B) is evaluated in CalcRuleSupport() using ALL training events
756 neve = GetRulesConst(ir)->GetSSBNeve(); // number of events accepted by the rule
757 sump += ssb*neve; // number of signal events
758 sumok += neve; // total number of events passed
759 }
760 }
761
762 nsig = sump;
763 ntot = sumok;
764 //
765 if (ntot>0) return nsig/ntot;
766 return 0.0;
767}
768
769////////////////////////////////////////////////////////////////////////////////
770/// We want to estimate F* = argmin Eyx( L(y,F(x) ), min wrt F(x)
771/// F(x) = FL(x) + FR(x) , linear and rule part
772
774{
775 SetEvent(e);
776 UpdateEventVal();
777 return FStar();
778}
779
780////////////////////////////////////////////////////////////////////////////////
781/// We want to estimate F* = argmin Eyx( L(y,F(x) ), min wrt F(x)
782/// F(x) = FL(x) + FR(x) , linear and rule part
783
785{
786 Double_t p=0;
787 Double_t nrs=0, nrt=0;
788 Double_t nls=0, nlt=0;
789 Double_t nt;
790 Double_t pr=0;
791 Double_t pl=0;
792
793 // first calculate Pr(y=1|X) for rules and linear terms
794 if (DoLinear()) pl = PdfLinear(nls, nlt);
795 if (DoRules()) pr = PdfRule(nrs, nrt);
796 // nr(l)t=0 or 1
797 if ((nlt>0) && (nrt>0)) nt=2.0;
798 else nt=1.0;
799 p = (pl+pr)/nt;
800 return 2.0*p-1.0;
801}
802
803////////////////////////////////////////////////////////////////////////////////
804/// calculate various statistics for this rule
805
807{
808 // TODO: NOT YET UPDATED FOR WEIGHTS
809 const std::vector<const Event *> *events = GetTrainingEvents();
810 const UInt_t neve = events->size();
811 const UInt_t nvars = GetMethodBase()->GetNvar();
812 const UInt_t nrules = fRules.size();
813 const Event *eveData;
814 // Flags
815 Bool_t sigRule;
816 Bool_t sigTag;
817 Bool_t bkgTag;
818 // Bool_t noTag;
819 Bool_t sigTrue;
820 Bool_t tagged;
821 // Counters
822 Int_t nsig=0;
823 Int_t nbkg=0;
824 Int_t ntag=0;
825 Int_t nss=0;
826 Int_t nsb=0;
827 Int_t nbb=0;
828 Int_t nbs=0;
829 std::vector<Int_t> varcnt;
830 // Clear vectors
831 fRulePSS.clear();
832 fRulePSB.clear();
833 fRulePBS.clear();
834 fRulePBB.clear();
835 fRulePTag.clear();
836 //
837 varcnt.resize(nvars,0);
838 fRuleVarFrac.clear();
839 fRuleVarFrac.resize(nvars,0);
840 //
841 for ( UInt_t i=0; i<nrules; i++ ) {
842 for ( UInt_t v=0; v<nvars; v++) {
843 if (fRules[i]->ContainsVariable(v)) varcnt[v]++; // count how often a variable occurs
844 }
845 sigRule = fRules[i]->IsSignalRule();
846 if (sigRule) { // rule is a signal rule (ie s/(s+b)>0.5)
847 nsig++;
848 }
849 else {
850 nbkg++;
851 }
852 // reset counters
853 nss=0;
854 nsb=0;
855 nbs=0;
856 nbb=0;
857 ntag=0;
858 // loop over all events
859 for (UInt_t e=0; e<neve; e++) {
860 eveData = (*events)[e];
861 tagged = fRules[i]->EvalEvent(*eveData);
862 sigTag = (tagged && sigRule); // it's tagged as a signal
863 bkgTag = (tagged && (!sigRule)); // ... as bkg
864 // noTag = !(sigTag || bkgTag); // ... not tagged
865 sigTrue = (eveData->GetClass() == 0); // true if event is true signal
866 if (tagged) {
867 ntag++;
868 if (sigTag && sigTrue) nss++;
869 if (sigTag && !sigTrue) nsb++;
870 if (bkgTag && sigTrue) nbs++;
871 if (bkgTag && !sigTrue) nbb++;
872 }
873 }
874 // Fill tagging probabilities
875 if (ntag>0 && neve > 0) { // should always be the case, but let's make sure and keep coverity quiet
876 fRulePTag.push_back(Double_t(ntag)/Double_t(neve));
877 fRulePSS.push_back(Double_t(nss)/Double_t(ntag));
878 fRulePSB.push_back(Double_t(nsb)/Double_t(ntag));
879 fRulePBS.push_back(Double_t(nbs)/Double_t(ntag));
880 fRulePBB.push_back(Double_t(nbb)/Double_t(ntag));
881 }
882 //
883 }
884 fRuleFSig = (nsig>0) ? static_cast<Double_t>(nsig)/static_cast<Double_t>(nsig+nbkg) : 0;
885 for ( UInt_t v=0; v<nvars; v++) {
886 fRuleVarFrac[v] = (nrules>0) ? Double_t(varcnt[v])/Double_t(nrules) : 0;
887 }
888}
889
890////////////////////////////////////////////////////////////////////////////////
891/// calculate various statistics for this rule
892
894{
895 const UInt_t nrules = fRules.size();
896 Double_t nc;
897 Double_t sumNc =0;
898 Double_t sumNc2=0;
899 for ( UInt_t i=0; i<nrules; i++ ) {
900 nc = static_cast<Double_t>(fRules[i]->GetNcuts());
901 sumNc += nc;
902 sumNc2 += nc*nc;
903 }
904 fRuleNCave = 0.0;
905 fRuleNCsig = 0.0;
906 if (nrules>0) {
907 fRuleNCave = sumNc/nrules;
908 fRuleNCsig = TMath::Sqrt(gTools().ComputeVariance(sumNc2,sumNc,nrules));
909 }
910}
911
912////////////////////////////////////////////////////////////////////////////////
913/// print rule generation info
914
916{
917 Log() << kHEADER << "-------------------RULE ENSEMBLE SUMMARY------------------------" << Endl;
918 const MethodRuleFit *mrf = GetMethodRuleFit();
919 if (mrf) Log() << kINFO << "Tree training method : " << (mrf->UseBoost() ? "AdaBoost":"Random") << Endl;
920 Log() << kINFO << "Number of events per tree : " << fRuleFit->GetNTreeSample() << Endl;
921 Log() << kINFO << "Number of trees : " << fRuleFit->GetForest().size() << Endl;
922 Log() << kINFO << "Number of generated rules : " << fNRulesGenerated << Endl;
923 Log() << kINFO << "Idem, after cleanup : " << fRules.size() << Endl;
924 Log() << kINFO << "Average number of cuts per rule : " << Form("%8.2f",fRuleNCave) << Endl;
925 Log() << kINFO << "Spread in number of cuts per rules : " << Form("%8.2f",fRuleNCsig) << Endl;
926 Log() << kVERBOSE << "Complexity : " << Form("%8.2f",fRuleNCave*fRuleNCsig) << Endl;
927 Log() << kINFO << "----------------------------------------------------------------" << Endl;
928 Log() << kINFO << Endl;
929}
930
931////////////////////////////////////////////////////////////////////////////////
932/// print function
933
935{
936 const EMsgType kmtype=kINFO;
937 const Bool_t isDebug = (fLogger->GetMinType()<=kDEBUG);
938 //
939 Log() << kmtype << Endl;
940 Log() << kmtype << "================================================================" << Endl;
941 Log() << kmtype << " M o d e l " << Endl;
942 Log() << kmtype << "================================================================" << Endl;
943
944 Int_t ind;
945 const UInt_t nvars = GetMethodBase()->GetNvar();
946 const Int_t nrules = fRules.size();
947 const Int_t printN = TMath::Min(10,nrules); //nrules+1;
948 Int_t maxL = 0;
949 for (UInt_t iv = 0; iv<fVarImportance.size(); iv++) {
950 if (GetMethodBase()->GetInputLabel(iv).Length() > maxL) maxL = GetMethodBase()->GetInputLabel(iv).Length();
951 }
952 //
953 if (isDebug) {
954 Log() << kDEBUG << "Variable importance:" << Endl;
955 for (UInt_t iv = 0; iv<fVarImportance.size(); iv++) {
956 Log() << kDEBUG << std::setw(maxL) << GetMethodBase()->GetInputLabel(iv)
957 << std::resetiosflags(std::ios::right)
958 << " : " << Form(" %3.3f",fVarImportance[iv]) << Endl;
959 }
960 }
961 //
962 Log() << kHEADER << "Offset (a0) = " << fOffset << Endl;
963 //
964 if (DoLinear()) {
965 if (fLinNorm.size() > 0) {
966 Log() << kmtype << "------------------------------------" << Endl;
967 Log() << kmtype << "Linear model (weights unnormalised)" << Endl;
968 Log() << kmtype << "------------------------------------" << Endl;
969 Log() << kmtype << std::setw(maxL) << "Variable"
970 << std::resetiosflags(std::ios::right) << " : "
971 << std::setw(11) << " Weights"
972 << std::resetiosflags(std::ios::right) << " : "
973 << "Importance"
974 << std::resetiosflags(std::ios::right)
975 << Endl;
976 Log() << kmtype << "------------------------------------" << Endl;
977 for ( UInt_t i=0; i<fLinNorm.size(); i++ ) {
978 Log() << kmtype << std::setw(std::max(maxL,8)) << GetMethodBase()->GetInputLabel(i);
979 if (fLinTermOK[i]) {
980 Log() << kmtype
981 << std::resetiosflags(std::ios::right)
982 << " : " << Form(" %10.3e",fLinCoefficients[i]*fLinNorm[i])
983 << " : " << Form(" %3.3f",fLinImportance[i]/fImportanceRef) << Endl;
984 }
985 else {
986 Log() << kmtype << "-> importance below threshold = "
987 << Form(" %3.3f",fLinImportance[i]/fImportanceRef) << Endl;
988 }
989 }
990 Log() << kmtype << "------------------------------------" << Endl;
991 }
992 }
993 else Log() << kmtype << "Linear terms were disabled" << Endl;
994
995 if ((!DoRules()) || (nrules==0)) {
996 if (!DoRules()) {
997 Log() << kmtype << "Rule terms were disabled" << Endl;
998 }
999 else {
1000 Log() << kmtype << "Even though rules were included in the model, none passed! " << nrules << Endl;
1001 }
1002 }
1003 else {
1004 Log() << kmtype << "Number of rules = " << nrules << Endl;
1005 if (isDebug) {
1006 Log() << kmtype << "N(cuts) in rules, average = " << fRuleNCave << Endl;
1007 Log() << kmtype << " RMS = " << fRuleNCsig << Endl;
1008 Log() << kmtype << "Fraction of signal rules = " << fRuleFSig << Endl;
1009 Log() << kmtype << "Fraction of rules containing a variable (%):" << Endl;
1010 for ( UInt_t v=0; v<nvars; v++) {
1011 Log() << kmtype << " " << std::setw(maxL) << GetMethodBase()->GetInputLabel(v);
1012 Log() << kmtype << Form(" = %2.2f",fRuleVarFrac[v]*100.0) << " %" << Endl;
1013 }
1014 }
1015 //
1016 // Print out all rules sorted in importance
1017 //
1018 std::list< std::pair<double,int> > sortedImp;
1019 for (Int_t i=0; i<nrules; i++) {
1020 sortedImp.push_back( std::pair<double,int>( fRules[i]->GetImportance(),i ) );
1021 }
1022 sortedImp.sort();
1023 //
1024 Log() << kmtype << "Printing the first " << printN << " rules, ordered in importance." << Endl;
1025 int pind=0;
1026 for ( std::list< std::pair<double,int> >::reverse_iterator itpair = sortedImp.rbegin();
1027 itpair != sortedImp.rend(); ++itpair ) {
1028 ind = itpair->second;
1029 // if (pind==0) impref =
1030 // Log() << kmtype << "Rule #" <<
1031 // Log() << kmtype << *fRules[ind] << Endl;
1032 fRules[ind]->PrintLogger(Form("Rule %4d : ",pind+1));
1033 pind++;
1034 if (pind==printN) {
1035 if (nrules==printN) {
1036 Log() << kmtype << "All rules printed" << Endl;
1037 }
1038 else {
1039 Log() << kmtype << "Skipping the next " << nrules-printN << " rules" << Endl;
1040 }
1041 break;
1042 }
1043 }
1044 }
1045 Log() << kmtype << "================================================================" << Endl;
1046 Log() << kmtype << Endl;
1047}
1048
1049////////////////////////////////////////////////////////////////////////////////
1050/// write rules to stream
1051
1052void TMVA::RuleEnsemble::PrintRaw( std::ostream & os ) const
1053{
1054 Int_t dp = os.precision();
1055 UInt_t nrules = fRules.size();
1056 // std::sort(fRules.begin(),fRules.end());
1057 //
1058 os << "ImportanceCut= " << fImportanceCut << std::endl;
1059 os << "LinQuantile= " << fLinQuantile << std::endl;
1060 os << "AverageSupport= " << fAverageSupport << std::endl;
1061 os << "AverageRuleSigma= " << fAverageRuleSigma << std::endl;
1062 os << "Offset= " << fOffset << std::endl;
1063 os << "NRules= " << nrules << std::endl;
1064 for (UInt_t i=0; i<nrules; i++){
1065 os << "***Rule " << i << std::endl;
1066 (fRules[i])->PrintRaw(os);
1067 }
1068 UInt_t nlinear = fLinNorm.size();
1069 //
1070 os << "NLinear= " << fLinTermOK.size() << std::endl;
1071 for (UInt_t i=0; i<nlinear; i++) {
1072 os << "***Linear " << i << std::endl;
1073 os << std::setprecision(10) << (fLinTermOK[i] ? 1:0) << " "
1074 << fLinCoefficients[i] << " "
1075 << fLinNorm[i] << " "
1076 << fLinDM[i] << " "
1077 << fLinDP[i] << " "
1078 << fLinImportance[i] << " " << std::endl;
1079 }
1080 os << std::setprecision(dp);
1081}
1082
1083////////////////////////////////////////////////////////////////////////////////
1084/// write rules to XML
1085
1086void* TMVA::RuleEnsemble::AddXMLTo(void* parent) const
1087{
1088 void* re = gTools().AddChild( parent, "Weights" ); // this is the "RuleEnsemble"
1089
1090 UInt_t nrules = fRules.size();
1091 UInt_t nlinear = fLinNorm.size();
1092 gTools().AddAttr( re, "NRules", nrules );
1093 gTools().AddAttr( re, "NLinear", nlinear );
1094 gTools().AddAttr( re, "LearningModel", (int)fLearningModel );
1095 gTools().AddAttr( re, "ImportanceCut", fImportanceCut );
1096 gTools().AddAttr( re, "LinQuantile", fLinQuantile );
1097 gTools().AddAttr( re, "AverageSupport", fAverageSupport );
1098 gTools().AddAttr( re, "AverageRuleSigma", fAverageRuleSigma );
1099 gTools().AddAttr( re, "Offset", fOffset );
1100 for (UInt_t i=0; i<nrules; i++) fRules[i]->AddXMLTo(re);
1101
1102 for (UInt_t i=0; i<nlinear; i++) {
1103 void* lin = gTools().AddChild( re, "Linear" );
1104 gTools().AddAttr( lin, "OK", (fLinTermOK[i] ? 1:0) );
1105 gTools().AddAttr( lin, "Coeff", fLinCoefficients[i] );
1106 gTools().AddAttr( lin, "Norm", fLinNorm[i] );
1107 gTools().AddAttr( lin, "DM", fLinDM[i] );
1108 gTools().AddAttr( lin, "DP", fLinDP[i] );
1109 gTools().AddAttr( lin, "Importance", fLinImportance[i] );
1110 }
1111 return re;
1112}
1113
1114////////////////////////////////////////////////////////////////////////////////
1115/// read rules from XML
1116
1118{
1119 UInt_t nrules, nlinear;
1120 gTools().ReadAttr( wghtnode, "NRules", nrules );
1121 gTools().ReadAttr( wghtnode, "NLinear", nlinear );
1122 Int_t iLearningModel;
1123 gTools().ReadAttr( wghtnode, "LearningModel", iLearningModel );
1124 fLearningModel = (ELearningModel) iLearningModel;
1125 gTools().ReadAttr( wghtnode, "ImportanceCut", fImportanceCut );
1126 gTools().ReadAttr( wghtnode, "LinQuantile", fLinQuantile );
1127 gTools().ReadAttr( wghtnode, "AverageSupport", fAverageSupport );
1128 gTools().ReadAttr( wghtnode, "AverageRuleSigma", fAverageRuleSigma );
1129 gTools().ReadAttr( wghtnode, "Offset", fOffset );
1130
1131 // read rules
1132 DeleteRules();
1133
1134 UInt_t i = 0;
1135 fRules.resize( nrules );
1136 void* ch = gTools().GetChild( wghtnode );
1137 for (i=0; i<nrules; i++) {
1138 fRules[i] = new Rule();
1139 fRules[i]->SetRuleEnsemble( this );
1140 fRules[i]->ReadFromXML( ch );
1141
1142 ch = gTools().GetNextChild(ch);
1143 }
1144
1145 // read linear classifier (Fisher)
1146 fLinNorm .resize( nlinear );
1147 fLinTermOK .resize( nlinear );
1148 fLinCoefficients.resize( nlinear );
1149 fLinDP .resize( nlinear );
1150 fLinDM .resize( nlinear );
1151 fLinImportance .resize( nlinear );
1152
1153 Int_t iok;
1154 i=0;
1155 while(ch) {
1156 gTools().ReadAttr( ch, "OK", iok );
1157 fLinTermOK[i] = (iok == 1);
1158 gTools().ReadAttr( ch, "Coeff", fLinCoefficients[i] );
1159 gTools().ReadAttr( ch, "Norm", fLinNorm[i] );
1160 gTools().ReadAttr( ch, "DM", fLinDM[i] );
1161 gTools().ReadAttr( ch, "DP", fLinDP[i] );
1162 gTools().ReadAttr( ch, "Importance", fLinImportance[i] );
1163
1164 i++;
1165 ch = gTools().GetNextChild(ch);
1166 }
1167}
1168
1169////////////////////////////////////////////////////////////////////////////////
1170/// read rule ensemble from stream
1171
1172void TMVA::RuleEnsemble::ReadRaw( std::istream & istr )
1173{
1174 UInt_t nrules;
1175 //
1176 std::string dummy;
1177 Int_t idum;
1178 //
1179 // First block is general stuff
1180 //
1181 istr >> dummy >> fImportanceCut;
1182 istr >> dummy >> fLinQuantile;
1183 istr >> dummy >> fAverageSupport;
1184 istr >> dummy >> fAverageRuleSigma;
1185 istr >> dummy >> fOffset;
1186 istr >> dummy >> nrules;
1187 //
1188 // Now read in the rules
1189 //
1190 DeleteRules();
1191 //
1192 for (UInt_t i=0; i<nrules; i++){
1193 istr >> dummy >> idum; // read line "***Rule <ind>"
1194 fRules.push_back( new Rule() );
1195 (fRules.back())->SetRuleEnsemble( this );
1196 (fRules.back())->ReadRaw(istr);
1197 }
1198 //
1199 // and now the linear terms
1200 //
1201 UInt_t nlinear;
1202 //
1203 // coverity[tainted_data_argument]
1204 istr >> dummy >> nlinear;
1205 //
1206 fLinNorm .resize( nlinear );
1207 fLinTermOK .resize( nlinear );
1208 fLinCoefficients.resize( nlinear );
1209 fLinDP .resize( nlinear );
1210 fLinDM .resize( nlinear );
1211 fLinImportance .resize( nlinear );
1212 //
1213
1214 Int_t iok;
1215 for (UInt_t i=0; i<nlinear; i++) {
1216 istr >> dummy >> idum;
1217 istr >> iok;
1218 fLinTermOK[i] = (iok==1);
1219 istr >> fLinCoefficients[i];
1220 istr >> fLinNorm[i];
1221 istr >> fLinDM[i];
1222 istr >> fLinDP[i];
1223 istr >> fLinImportance[i];
1224 }
1225}
1226
1227////////////////////////////////////////////////////////////////////////////////
1228/// copy function
1229
1231{
1232 if(this != &other) {
1233 fRuleFit = other.GetRuleFit();
1234 fRuleMinDist = other.GetRuleMinDist();
1235 fOffset = other.GetOffset();
1236 fRules = other.GetRulesConst();
1237 fImportanceCut = other.GetImportanceCut();
1238 fVarImportance = other.GetVarImportance();
1239 fLearningModel = other.GetLearningModel();
1240 fLinQuantile = other.GetLinQuantile();
1241 fRuleNCsig = other.fRuleNCsig;
1242 fAverageRuleSigma = other.fAverageRuleSigma;
1243 fEventCacheOK = other.fEventCacheOK;
1244 fImportanceRef = other.fImportanceRef;
1245 fNRulesGenerated = other.fNRulesGenerated;
1246 fRuleFSig = other.fRuleFSig;
1247 fRuleMapInd0 = other.fRuleMapInd0;
1248 fRuleMapInd1 = other.fRuleMapInd1;
1249 fRuleMapOK = other.fRuleMapOK;
1250 fRuleNCave = other.fRuleNCave;
1251 }
1252}
1253
1254////////////////////////////////////////////////////////////////////////////////
1255/// calculate the number of rules
1256
1258{
1259 if (dtree==0) return 0;
1260 Node *node = dtree->GetRoot();
1261 Int_t nendnodes = 0;
1262 FindNEndNodes( node, nendnodes );
1263 return 2*(nendnodes-1);
1264}
1265
1266////////////////////////////////////////////////////////////////////////////////
1267/// find the number of leaf nodes
1268
1269void TMVA::RuleEnsemble::FindNEndNodes( const Node *node, Int_t & nendnodes )
1270{
1271 if (node==0) return;
1272 if ((node->GetRight()==0) && (node->GetLeft()==0)) {
1273 ++nendnodes;
1274 return;
1275 }
1276 const Node *nodeR = node->GetRight();
1277 const Node *nodeL = node->GetLeft();
1278 FindNEndNodes( nodeR, nendnodes );
1279 FindNEndNodes( nodeL, nendnodes );
1280}
1281
1282////////////////////////////////////////////////////////////////////////////////
1283/// create rules from the decision tree structure
1284
1286{
1287 Node *node = dtree->GetRoot();
1288 AddRule( node );
1289}
1290
1291////////////////////////////////////////////////////////////////////////////////
1292/// add a new rule to the tree
1293
1295{
1296 if (node==0) return;
1297 if (node->GetParent()==0) { // it's a root node, don't make a rule
1298 AddRule( node->GetRight() );
1299 AddRule( node->GetLeft() );
1300 }
1301 else {
1302 Rule *rule = MakeTheRule(node);
1303 if (rule) {
1304 fRules.push_back( rule );
1305 AddRule( node->GetRight() );
1306 AddRule( node->GetLeft() );
1307 }
1308 else {
1309 Log() << kFATAL << "<AddRule> - ERROR failed in creating a rule! BUG!" << Endl;
1310 }
1311 }
1312}
1313
1314////////////////////////////////////////////////////////////////////////////////
1315/// Make a Rule from a given Node.
1316/// The root node (ie no parent) does not generate a Rule.
1317/// The first node in a rule is always the root node => fNodes.size()>=2
1318/// Each node corresponds to a cut and the cut value is given by the parent node.
1319
1321{
1322 if (node==0) {
1323 Log() << kFATAL << "<MakeTheRule> Input node is NULL. Should not happen. BUG!" << Endl;
1324 return 0;
1325 }
1326
1327 if (node->GetParent()==0) { // a root node - ignore
1328 return 0;
1329 }
1330 //
1331 std::vector< const Node * > nodeVec;
1332 const Node *parent = node;
1333 //
1334 // Make list with the input node at the end:
1335 // <root node> <node1> <node2> ... <node given as argument>
1336 //
1337 nodeVec.push_back( node );
1338 while (parent!=0) {
1339 parent = parent->GetParent();
1340 if (!parent) continue;
1341 const DecisionTreeNode* dtn = dynamic_cast<const DecisionTreeNode*>(parent);
1342 if (dtn && dtn->GetSelector()>=0)
1343 nodeVec.insert( nodeVec.begin(), parent );
1344
1345 }
1346 if (nodeVec.size()<2) {
1347 Log() << kFATAL << "<MakeTheRule> BUG! Inconsistent Rule!" << Endl;
1348 return 0;
1349 }
1350 Rule *rule = new Rule( this, nodeVec );
1351 rule->SetMsgType( Log().GetMinType() );
1352 return rule;
1353}
1354
1355////////////////////////////////////////////////////////////////////////////////
1356/// Makes rule map for all events
1357
1358void TMVA::RuleEnsemble::MakeRuleMap(const std::vector<const Event *> *events, UInt_t ifirst, UInt_t ilast)
1359{
1360 Log() << kVERBOSE << "Making Rule map for all events" << Endl;
1361 // make rule response map
1362 if (events==0) events = GetTrainingEvents();
1363 if ((ifirst==0) || (ilast==0) || (ifirst>ilast)) {
1364 ifirst = 0;
1365 ilast = events->size()-1;
1366 }
1367 // check if identical to previous call
1368 if ((events!=fRuleMapEvents) ||
1369 (ifirst!=fRuleMapInd0) ||
1370 (ilast !=fRuleMapInd1)) {
1371 fRuleMapOK = kFALSE;
1372 }
1373 //
1374 if (fRuleMapOK) {
1375 Log() << kVERBOSE << "<MakeRuleMap> Map is already valid" << Endl;
1376 return; // already cached
1377 }
1378 fRuleMapEvents = events;
1379 fRuleMapInd0 = ifirst;
1380 fRuleMapInd1 = ilast;
1381 // check number of rules
1382 UInt_t nrules = GetNRules();
1383 if (nrules==0) {
1384 Log() << kVERBOSE << "No rules found in MakeRuleMap()" << Endl;
1385 fRuleMapOK = kTRUE;
1386 return;
1387 }
1388 //
1389 // init map
1390 //
1391 std::vector<UInt_t> ruleind;
1392 fRuleMap.clear();
1393 for (UInt_t i=ifirst; i<=ilast; i++) {
1394 ruleind.clear();
1395 fRuleMap.push_back( ruleind );
1396 for (UInt_t r=0; r<nrules; r++) {
1397 if (fRules[r]->EvalEvent(*((*events)[i]))) {
1398 fRuleMap.back().push_back(r); // save only rules that are accepted
1399 }
1400 }
1401 }
1402 fRuleMapOK = kTRUE;
1403 Log() << kVERBOSE << "Made rule map for event# " << ifirst << " : " << ilast << Endl;
1404}
1405
1406////////////////////////////////////////////////////////////////////////////////
1407/// std::ostream operator
1408
1409std::ostream& TMVA::operator<< ( std::ostream& os, const RuleEnsemble & rules )
1410{
1411 os << "DON'T USE THIS - TO BE REMOVED" << std::endl;
1412 rules.Print();
1413 return os;
1414}
ROOT::R::TRInterface & r
Definition Object.C:4
#define e(i)
Definition RSha256.hxx:103
const Bool_t kFALSE
Definition RtypesCore.h:101
double Double_t
Definition RtypesCore.h:59
const Bool_t kTRUE
Definition RtypesCore.h:100
int type
Definition TGX11.cxx:121
R__EXTERN TRandom * gRandom
Definition TRandom.h:62
char * Form(const char *fmt,...)
1-D histogram with a float per channel (see TH1 documentation)}
Definition TH1.h:575
Short_t GetSelector() const
Implementation of a Decision Tree.
virtual DecisionTreeNode * GetRoot() const
UInt_t GetClass() const
Definition Event.h:86
Virtual base Class for all MVA method.
Definition MethodBase.h:111
Bool_t IsSilentFile() const
Definition MethodBase.h:379
J Friedman's RuleFit method.
Bool_t UseBoost() const
ostringstream derivative to redirect and format output
Definition MsgLogger.h:57
Node for the BinarySearch or Decision Trees.
Definition Node.h:58
virtual Node * GetLeft() const
Definition Node.h:89
virtual Node * GetParent() const
Definition Node.h:91
virtual Node * GetRight() const
Definition Node.h:90
virtual ~RuleEnsemble()
destructor
void CalcVarImportance()
Calculates variable importance using eq (35) in RuleFit paper by Friedman et.al.
void SetImportanceRef(Double_t impref)
set reference importance
void CalcImportance()
calculate the importance of each rule
void PrintRuleGen() const
print rule generation info
void MakeRuleMap(const std::vector< const TMVA::Event * > *events=0, UInt_t ifirst=0, UInt_t ilast=0)
Makes rule map for all events.
Int_t CalcNRules(const TMVA::DecisionTree *dtree)
calculate the number of rules
void ResetCoefficients()
reset all rule coefficients
void SetMsgType(EMsgType t)
Double_t GetLinQuantile() const
void ReadRaw(std::istream &istr)
read rule ensemble from stream
void AddRule(const Node *node)
add a new rule to the tree
void ReadFromXML(void *wghtnode)
read rules from XML
Double_t GetImportanceCut() const
const Event * GetTrainingEvent(UInt_t i) const
get the training event from the rule fitter
const std::vector< const TMVA::Event * > * GetTrainingEvents() const
get list of training events from the rule fitter
Double_t GetRuleMinDist() const
void SetRules(const std::vector< TMVA::Rule * > &rules)
set rules
void MakeRules(const std::vector< const TMVA::DecisionTree * > &forest)
Makes rules from the given decision tree.
void RemoveSimilarRules()
remove rules that behave similar
void FindNEndNodes(const TMVA::Node *node, Int_t &nendnodes)
find the number of leaf nodes
RuleEnsemble()
constructor
const std::vector< Double_t > & GetVarImportance() const
void CleanupRules()
cleanup rules
void Initialize(const RuleFit *rf)
Initializes all member variables with default values.
void CleanupLinear()
cleanup linear model
void RuleResponseStats()
calculate various statistics for this rule
const RuleFit * GetRuleFit() const
void * AddXMLTo(void *parent) const
write rules to XML
const std::vector< TMVA::Rule * > & GetRulesConst() const
const MethodRuleFit * GetMethodRuleFit() const
Get a pointer to the original MethodRuleFit.
void MakeModel()
create model
void RuleStatistics()
calculate various statistics for this rule
void SetCoefficients(const std::vector< Double_t > &v)
set all rule coefficients
void Print() const
print function
Double_t PdfRule(Double_t &nsig, Double_t &ntot) const
This function returns Pr( y = 1 | x ) for rules.
const MethodBase * GetMethodBase() const
Get a pointer to the original MethodRuleFit.
Double_t GetOffset() const
void Copy(RuleEnsemble const &other)
copy function
Double_t CalcLinImportance()
calculate the linear importance for each rule
Double_t CalcRuleImportance()
calculate importance of each rule
void PrintRaw(std::ostream &os) const
write rules to stream
Double_t fAverageRuleSigma
void CalcRuleSupport()
calculate the support for all rules
ELearningModel GetLearningModel() const
Double_t PdfLinear(Double_t &nsig, Double_t &ntot) const
This function returns Pr( y = 1 | x ) for the linear terms.
Double_t CoefficientRadius()
Calculates sqrt(Sum(a_i^2)), i=1..N (NOTE do not include a0)
void MakeRulesFromTree(const DecisionTree *dtree)
create rules from the decision tree structure
void MakeLinearTerms()
Make the linear terms as in eq 25, ref 2 For this the b and (1-b) quantiles are needed.
Rule * MakeTheRule(const Node *node)
Make a Rule from a given Node.
void GetCoefficients(std::vector< Double_t > &v)
Retrieve all rule coefficients.
Double_t FStar() const
We want to estimate F* = argmin Eyx( L(y,F(x) ), min wrt F(x) F(x) = FL(x) + FR(x) ,...
A class implementing various fits of rule ensembles.
Definition RuleFit.h:46
Implementation of a rule.
Definition Rule.h:50
void SetMsgType(EMsgType t)
Definition Rule.cxx:156
void * GetNextChild(void *prevchild, const char *childname=0)
XML helpers.
Definition Tools.cxx:1162
void * AddChild(void *parent, const char *childname, const char *content=0, bool isRootNode=false)
add child node
Definition Tools.cxx:1124
void * GetChild(void *parent, const char *childname=0)
get child node
Definition Tools.cxx:1150
void ReadAttr(void *node, const char *, T &value)
read attribute from xml
Definition Tools.h:329
void AddAttr(void *node, const char *, const T &value, Int_t precision=16)
add attribute to xml
Definition Tools.h:347
virtual Double_t Rndm()
Machine independent random number generator.
Definition TRandom.cxx:552
Tools & gTools()
std::ostream & operator<<(std::ostream &os, const BinaryTree &tree)
MsgLogger & Endl(MsgLogger &ml)
Definition MsgLogger.h:148
Short_t Max(Short_t a, Short_t b)
Definition TMathBase.h:208
Double_t Sqrt(Double_t x)
Definition TMath.h:641
Short_t Min(Short_t a, Short_t b)
Definition TMathBase.h:176
Short_t Abs(Short_t d)
Definition TMathBase.h:120
Definition first.py:1