45 : fLearningModel ( kFull )
46 , fImportanceCut ( 0 )
47 , fLinQuantile ( 0.025 )
49 , fAverageSupport ( 0.8 )
50 , fAverageRuleSigma( 0.4 )
54 , fRuleMinDist ( 1e-3 )
55 , fNRulesGenerated ( 0 )
57 , fEventCacheOK ( true )
61 , fRuleMapEvents ( 0 )
71 : fAverageSupport ( 1 )
84 : fLearningModel ( kFull )
85 , fImportanceCut ( 0 )
86 , fLinQuantile ( 0.025 )
88 , fImportanceRef ( 1.0 )
89 , fAverageSupport ( 0.8 )
90 , fAverageRuleSigma( 0.4 )
94 , fRuleMinDist ( 1e-3 )
95 , fNRulesGenerated ( 0 )
97 , fEventCacheOK ( true )
101 , fRuleMapEvents ( 0 )
112 for ( std::vector<Rule *>::iterator itrRule = fRules.begin(); itrRule != fRules.end(); itrRule++ ) {
124 SetAverageRuleSigma(0.4);
126 UInt_t nvars = GetMethodBase()->GetNvar();
127 fVarImportance.clear();
131 fVarImportance.resize( nvars,0.0 );
132 fLinPDFB.resize( nvars,0 );
133 fLinPDFS.resize( nvars,0 );
134 fImportanceRef = 1.0;
135 for (
UInt_t i=0; i<nvars; i++) {
136 fLinTermOK.push_back(
kTRUE);
143 fLogger->SetMinType(t);
154 return ( fRuleFit==0 ? 0:fRuleFit->GetMethodRuleFit());
164 return ( fRuleFit==0 ? 0:fRuleFit->GetMethodBase());
172 MakeRules( fRuleFit->GetForest() );
192 Int_t ncoeffs = fRules.size();
193 if (ncoeffs<1)
return 0;
197 for (
Int_t i=0; i<ncoeffs; i++) {
198 val = fRules[i]->GetCoefficient();
210 UInt_t nrules = fRules.size();
211 for (
UInt_t i=0; i<nrules; i++) {
212 fRules[i]->SetCoefficient(0.0);
221 UInt_t nrules = fRules.size();
222 if (v.size()!=nrules) {
223 Log() <<
kFATAL <<
"<SetCoefficients> - BUG TRAP - input vector worng size! It is = " << v.size()
224 <<
" when it should be = " << nrules <<
Endl;
226 for (
UInt_t i=0; i<nrules; i++) {
227 fRules[i]->SetCoefficient(v[i]);
236 UInt_t nrules = fRules.size();
238 if (nrules==0)
return;
240 for (
UInt_t i=0; i<nrules; i++) {
241 v[i] = (fRules[i]->GetCoefficient());
250 return &(fRuleFit->GetTrainingEvents());
258 return fRuleFit->GetTrainingEvent(i);
266 Log() <<
kVERBOSE <<
"Removing similar rules; distance = " << fRuleMinDist <<
Endl;
268 UInt_t nrulesIn = fRules.size();
270 std::vector< Char_t > removeMe( nrulesIn,
false );
276 for (
UInt_t i=0; i<nrulesIn; i++) {
279 for (
UInt_t k=i+1; k<nrulesIn; k++) {
285 remind = (r>0.5 ? k:i);
292 if (!removeMe[remind]) {
293 removeMe[remind] =
true;
303 for (
UInt_t i=0; i<nrulesIn; i++) {
305 theRule = fRules[ind];
307 fRules.erase( std::vector<Rule *>::iterator(&fRules[ind], &fRules) );
309 fRules.erase( fRules.begin() + ind );
316 UInt_t nrulesOut = fRules.size();
317 Log() <<
kVERBOSE <<
"Removed " << nrulesIn - nrulesOut <<
" out of " << nrulesIn <<
" rules" <<
Endl;
325 UInt_t nrules = fRules.size();
326 if (nrules==0)
return;
327 Log() <<
kVERBOSE <<
"Removing rules with relative importance < " << fImportanceCut <<
Endl;
328 if (fImportanceCut<=0)
return;
334 for (
UInt_t i=0; i<nrules; i++) {
335 if (fRules[ind]->GetRelImportance()<fImportanceCut) {
336 therule = fRules[ind];
338 fRules.erase( std::vector<Rule *>::iterator(&fRules[ind], &fRules) );
340 fRules.erase( fRules.begin() + ind );
347 Log() <<
kINFO <<
"Removed " << nrules-ind <<
" out of a total of " << nrules
348 <<
" rules with importance < " << fImportanceCut <<
Endl;
356 UInt_t nlin = fLinNorm.size();
358 Log() <<
kVERBOSE <<
"Removing linear terms with relative importance < " << fImportanceCut <<
Endl;
361 for (
UInt_t i=0; i<nlin; i++) {
362 fLinTermOK.push_back( (fLinImportance[i]/fImportanceRef > fImportanceCut) );
378 SetAverageRuleSigma(0.4);
379 const std::vector<const Event *> *events = GetTrainingEvents();
383 if ((nrules>0) && (events->size()>0)) {
384 for ( std::vector< Rule * >::iterator itrRule=fRules.begin(); itrRule!=fRules.end(); itrRule++ ) {
388 for ( std::vector<const Event * >::const_iterator itrEvent=events->begin(); itrEvent!=events->end(); itrEvent++ ) {
389 if ((*itrRule)->EvalEvent( *(*itrEvent) )) {
390 ew = (*itrEvent)->GetWeight();
392 if (GetMethodRuleFit()->DataInfo().IsSignal(*itrEvent)) ssig += ew;
397 s = s/fRuleFit->GetNEveEff();
399 t = (t<0 ? 0:
sqrt(t));
404 (*itrRule)->SetSupport(s);
405 (*itrRule)->SetNorm(t);
406 (*itrRule)->SetSSB( ssb );
407 (*itrRule)->SetSSBNeve(
Double_t(ssig+sbkg));
410 fAverageSupport = stot/nrules;
411 fAverageRuleSigma =
TMath::Sqrt(fAverageSupport*(1.0-fAverageSupport));
412 Log() <<
kVERBOSE <<
"Standard deviation of support = " << fAverageRuleSigma <<
Endl;
413 Log() <<
kVERBOSE <<
"Average rule support = " << fAverageSupport <<
Endl;
422 Double_t maxRuleImp = CalcRuleImportance();
423 Double_t maxLinImp = CalcLinImportance();
424 Double_t maxImp = (maxRuleImp>maxLinImp ? maxRuleImp : maxLinImp);
425 SetImportanceRef( maxImp );
433 for (
UInt_t i=0; i<fRules.size(); i++ ) {
434 fRules[i]->SetImportanceRef(impref);
436 fImportanceRef = impref;
445 Int_t nrules = fRules.size();
446 for (
int i=0; i<nrules; i++ ) {
447 fRules[i]->CalcImportance();
448 imp = fRules[i]->GetImportance();
449 if (imp>maxImp) maxImp = imp;
451 for (
Int_t i=0; i<nrules; i++ ) {
452 fRules[i]->SetImportanceRef(maxImp);
464 UInt_t nvars = fLinCoefficients.size();
465 fLinImportance.resize(nvars,0.0);
466 if (!DoLinear())
return maxImp;
476 for (
UInt_t i=0; i<nvars; i++ ) {
477 imp = fAverageRuleSigma*
TMath::Abs(fLinCoefficients[i]);
478 fLinImportance[i] = imp;
479 if (imp>maxImp) maxImp = imp;
493 UInt_t nrules = fRules.size();
494 if (GetMethodBase()==0)
Log() <<
kFATAL <<
"RuleEnsemble::CalcVarImportance() - should not be here!" <<
Endl;
495 UInt_t nvars = GetMethodBase()->GetNvar();
498 fVarImportance.resize(nvars,0);
501 for (
UInt_t ind=0; ind<nrules; ind++ ) {
502 rimp = fRules[ind]->GetImportance();
503 nvarsUsed = fRules[ind]->GetNumVarsUsed();
505 Log() <<
kFATAL <<
"<CalcVarImportance> Variables for importance calc!!!??? A BUG!" <<
Endl;
506 rimpN = (nvarsUsed > 0 ? rimp/nvarsUsed:0.0);
507 for (
UInt_t iv=0; iv<nvars; iv++ ) {
508 if (fRules[ind]->ContainsVariable(iv)) {
509 fVarImportance[iv] += rimpN;
516 for (
UInt_t iv=0; iv<fLinTermOK.size(); iv++ ) {
517 if (fLinTermOK[iv]) fVarImportance[iv] += fLinImportance[iv];
524 for (
UInt_t iv=0; iv<nvars; iv++ ) {
525 if ( fVarImportance[iv] > maximp ) maximp = fVarImportance[iv];
528 for (
UInt_t iv=0; iv<nvars; iv++ ) {
529 fVarImportance[iv] *= 1.0/maximp;
543 fRules.resize(rules.size());
544 for (
UInt_t i=0; i<fRules.size(); i++) {
545 fRules[i] = rules[i];
559 if (!DoRules())
return;
568 UInt_t ntrees = forest.size();
569 for (
UInt_t ind=0; ind<ntrees; ind++ ) {
571 MakeRulesFromTree( forest[ind] );
572 nrules = CalcNRules( forest[ind] );
573 nendn = (nrules/2) + 1;
575 sumn2 += nendn*nendn;
576 nrulesCheck += nrules;
578 Double_t nmean = (ntrees>0) ? sumnendn/ntrees : 0;
580 Double_t ndev = 2.0*(nmean-2.0-nsigm)/(nmean-2.0+nsigm);
582 Log() <<
kVERBOSE <<
"Average number of end nodes per tree = " << nmean <<
Endl;
583 if (ntrees>1)
Log() <<
kVERBOSE <<
"sigma of ditto ( ~= mean-2 ?) = "
586 Log() <<
kVERBOSE <<
"Deviation from exponential model = " << ndev <<
Endl;
587 Log() <<
kVERBOSE <<
"Corresponds to L (eq. 13, RuleFit ppr) = " << nmean <<
Endl;
589 if (nrulesCheck != static_cast<Int_t>(fRules.size())) {
591 <<
"BUG! number of generated and possible rules do not match! N(rules) = " << fRules.size()
592 <<
" != " << nrulesCheck <<
Endl;
594 Log() <<
kVERBOSE <<
"Number of generated rules: " << fRules.size() <<
Endl;
597 fNRulesGenerated = fRules.size();
599 RemoveSimilarRules();
613 if (!DoLinear())
return;
615 const std::vector<const Event *> *events = GetTrainingEvents();
616 UInt_t neve = events->size();
617 UInt_t nvars = ((*events)[0])->GetNVariables();
619 typedef std::pair< Double_t, Int_t> dataType;
620 typedef std::pair< Double_t, dataType > dataPoint;
622 std::vector< std::vector<dataPoint> > vardata(nvars);
623 std::vector< Double_t > varsum(nvars,0.0);
624 std::vector< Double_t > varsum2(nvars,0.0);
629 for (
UInt_t i=0; i<neve; i++) {
632 val = ((*events)[i])->GetValue(
v);
633 vardata[
v].push_back( dataPoint( val, dataType(ew,((*events)[i])->
GetClass()) ) );
639 fLinCoefficients.clear();
641 fLinDP.resize(nvars,0);
642 fLinDM.resize(nvars,0);
643 fLinCoefficients.resize(nvars,0);
644 fLinNorm.resize(nvars,0);
646 Double_t averageWeight = neve ? fRuleFit->GetNEveEff()/
static_cast<Double_t>(neve) : 0;
661 std::sort( vardata[
v].begin(),vardata[
v].end() );
662 nquant = fLinQuantile*fRuleFit->GetNEveEff();
666 while ( (ie<neve) && (neff<nquant) ) {
667 neff += vardata[
v][ie].second.first;
670 indquantM = (ie==0 ? 0:ie-1);
674 while ( (ie>0) && (neff<nquant) ) {
676 neff += vardata[
v][ie].second.first;
678 indquantP = (ie==neve ? ie=neve-1:ie);
680 fLinDM[
v] = vardata[
v][indquantM].first;
681 fLinDP[
v] = vardata[
v][indquantP].first;
682 if (fLinPDFB[
v])
delete fLinPDFB[
v];
683 if (fLinPDFS[v])
delete fLinPDFS[
v];
684 fLinPDFB[
v] =
new TH1F(
Form(
"bkgvar%d",v),
"bkg temphist",40,fLinDM[v],fLinDP[v]);
685 fLinPDFS[
v] =
new TH1F(
Form(
"sigvar%d",v),
"sig temphist",40,fLinDM[v],fLinDP[v]);
686 fLinPDFB[
v]->Sumw2();
687 fLinPDFS[
v]->Sumw2();
690 const Double_t w = 1.0/fRuleFit->GetNEveEff();
691 for (ie=0; ie<neve; ie++) {
692 val = vardata[
v][ie].first;
693 ew = vardata[
v][ie].second.first;
694 type = vardata[
v][ie].second.second;
697 varsum2[
v] += ew*lx*lx;
698 if (type==1) fLinPDFS[
v]->Fill(lx,w*ew);
699 else fLinPDFB[
v]->Fill(lx,w*ew);
704 stdl =
TMath::Sqrt( (varsum2[v] - (varsum[v]*varsum[v]/fRuleFit->GetNEveEff()))/(fRuleFit->GetNEveEff()-averageWeight) );
705 fLinNorm[
v] = CalcLinNorm(stdl);
709 fLinPDFS[
v]->Write();
710 fLinPDFB[
v]->Write();
722 UInt_t nvars=fLinDP.size();
730 Int_t bin = fLinPDFS[
v]->FindBin(val);
731 fstot += fLinPDFS[
v]->GetBinContent(bin);
732 fbtot += fLinPDFB[
v]->GetBinContent(bin);
734 if (nvars<1)
return 0;
735 ntot = (fstot+fbtot)/
Double_t(nvars);
737 return fstot/(fstot+fbtot);
754 UInt_t nrules = fRules.size();
755 for (
UInt_t ir=0; ir<nrules; ir++) {
756 if (fEventRuleVal[ir]>0) {
757 ssb = fEventRuleVal[ir]*GetRulesConst(ir)->GetSSB();
758 neve = GetRulesConst(ir)->GetSSBNeve();
768 if (ntot>0)
return nsig/ntot;
803 if (DoLinear()) pl = PdfLinear(nls, nlt);
804 if (DoRules()) pr = PdfRule(nrs, nrt);
806 if ((nlt>0) && (nrt>0)) nt=2.0;
818 const std::vector<const Event *> *events = GetTrainingEvents();
819 const UInt_t neve = events->size();
820 const UInt_t nvars = GetMethodBase()->GetNvar();
821 const UInt_t nrules = fRules.size();
822 const Event *eveData;
838 std::vector<Int_t> varcnt;
846 varcnt.resize(nvars,0);
847 fRuleVarFrac.clear();
848 fRuleVarFrac.resize(nvars,0);
850 for (
UInt_t i=0; i<nrules; i++ ) {
852 if (fRules[i]->ContainsVariable(
v)) varcnt[
v]++;
854 sigRule = fRules[i]->IsSignalRule();
868 for (
UInt_t e=0; e<neve; e++) {
869 eveData = (*events)[e];
870 tagged = fRules[i]->EvalEvent(*eveData);
871 sigTag = (tagged && sigRule);
872 bkgTag = (tagged && (!sigRule));
874 sigTrue = (eveData->
GetClass() == 0);
877 if (sigTag && sigTrue) nss++;
878 if (sigTag && !sigTrue) nsb++;
879 if (bkgTag && sigTrue) nbs++;
880 if (bkgTag && !sigTrue) nbb++;
884 if (ntag>0 && neve > 0) {
893 fRuleFSig = (nsig>0) ? static_cast<Double_t>(nsig)/
static_cast<Double_t>(nsig+nbkg) : 0;
904 const UInt_t nrules = fRules.size();
908 for (
UInt_t i=0; i<nrules; i++ ) {
909 nc =
static_cast<Double_t>(fRules[i]->GetNcuts());
916 fRuleNCave = sumNc/nrules;
926 Log() <<
kINFO <<
"-------------------RULE ENSEMBLE SUMMARY------------------------" <<
Endl;
928 if (mrf)
Log() <<
kINFO <<
"Tree training method : " << (mrf->
UseBoost() ?
"AdaBoost":
"Random") << Endl;
929 Log() <<
kINFO <<
"Number of events per tree : " << fRuleFit->GetNTreeSample() <<
Endl;
930 Log() <<
kINFO <<
"Number of trees : " << fRuleFit->GetForest().size() <<
Endl;
931 Log() <<
kINFO <<
"Number of generated rules : " << fNRulesGenerated <<
Endl;
932 Log() <<
kINFO <<
"Idem, after cleanup : " << fRules.size() <<
Endl;
933 Log() <<
kINFO <<
"Average number of cuts per rule : " <<
Form(
"%8.2f",fRuleNCave) <<
Endl;
934 Log() <<
kINFO <<
"Spread in number of cuts per rules : " <<
Form(
"%8.2f",fRuleNCsig) <<
Endl;
936 Log() <<
kINFO <<
"----------------------------------------------------------------" <<
Endl;
949 Log() << kmtype <<
"================================================================" <<
Endl;
950 Log() << kmtype <<
" M o d e l " <<
Endl;
951 Log() << kmtype <<
"================================================================" <<
Endl;
954 const UInt_t nvars = GetMethodBase()->GetNvar();
955 const Int_t nrules = fRules.size();
958 for (
UInt_t iv = 0; iv<fVarImportance.size(); iv++) {
959 if (GetMethodBase()->GetInputLabel(iv).Length() > maxL) maxL = GetMethodBase()->GetInputLabel(iv).Length();
964 for (
UInt_t iv = 0; iv<fVarImportance.size(); iv++) {
965 Log() <<
kDEBUG << std::setw(maxL) << GetMethodBase()->GetInputLabel(iv)
966 << std::resetiosflags(std::ios::right)
967 <<
" : " <<
Form(
" %3.3f",fVarImportance[iv]) <<
Endl;
971 Log() << kmtype <<
"Offset (a0) = " << fOffset <<
Endl;
974 if (fLinNorm.size() > 0) {
975 Log() << kmtype <<
"------------------------------------" <<
Endl;
976 Log() << kmtype <<
"Linear model (weights unnormalised)" <<
Endl;
977 Log() << kmtype <<
"------------------------------------" <<
Endl;
978 Log() << kmtype << std::setw(maxL) <<
"Variable"
979 << std::resetiosflags(std::ios::right) <<
" : "
980 << std::setw(11) <<
" Weights"
981 << std::resetiosflags(std::ios::right) <<
" : "
983 << std::resetiosflags(std::ios::right)
985 Log() << kmtype <<
"------------------------------------" <<
Endl;
986 for (
UInt_t i=0; i<fLinNorm.size(); i++ ) {
987 Log() << kmtype << std::setw(
std::max(maxL,8)) << GetMethodBase()->GetInputLabel(i);
990 << std::resetiosflags(std::ios::right)
991 <<
" : " <<
Form(
" %10.3e",fLinCoefficients[i]*fLinNorm[i])
992 <<
" : " <<
Form(
" %3.3f",fLinImportance[i]/fImportanceRef) <<
Endl;
995 Log() << kmtype <<
"-> importance below threshhold = "
996 <<
Form(
" %3.3f",fLinImportance[i]/fImportanceRef) <<
Endl;
999 Log() << kmtype <<
"------------------------------------" <<
Endl;
1002 else Log() << kmtype <<
"Linear terms were disabled" <<
Endl;
1004 if ((!DoRules()) || (nrules==0)) {
1006 Log() << kmtype <<
"Rule terms were disabled" <<
Endl;
1009 Log() << kmtype <<
"Eventhough rules were included in the model, none passed! " << nrules <<
Endl;
1013 Log() << kmtype <<
"Number of rules = " << nrules <<
Endl;
1015 Log() << kmtype <<
"N(cuts) in rules, average = " << fRuleNCave <<
Endl;
1016 Log() << kmtype <<
" RMS = " << fRuleNCsig <<
Endl;
1017 Log() << kmtype <<
"Fraction of signal rules = " << fRuleFSig <<
Endl;
1018 Log() << kmtype <<
"Fraction of rules containing a variable (%):" <<
Endl;
1020 Log() << kmtype <<
" " << std::setw(maxL) << GetMethodBase()->GetInputLabel(
v);
1021 Log() << kmtype <<
Form(
" = %2.2f",fRuleVarFrac[
v]*100.0) <<
" %" <<
Endl;
1027 std::list< std::pair<double,int> > sortedImp;
1028 for (
Int_t i=0; i<nrules; i++) {
1029 sortedImp.push_back( std::pair<double,int>( fRules[i]->GetImportance(),i ) );
1033 Log() << kmtype <<
"Printing the first " << printN <<
" rules, ordered in importance." <<
Endl;
1035 for ( std::list< std::pair<double,int> >::reverse_iterator itpair = sortedImp.rbegin();
1036 itpair != sortedImp.rend(); itpair++ ) {
1037 ind = itpair->second;
1041 fRules[ind]->PrintLogger(
Form(
"Rule %4d : ",pind+1));
1044 if (nrules==printN) {
1045 Log() << kmtype <<
"All rules printed" <<
Endl;
1048 Log() << kmtype <<
"Skipping the next " << nrules-printN <<
" rules" <<
Endl;
1054 Log() << kmtype <<
"================================================================" <<
Endl;
1063 Int_t dp = os.precision();
1064 UInt_t nrules = fRules.size();
1067 os <<
"ImportanceCut= " << fImportanceCut << std::endl;
1068 os <<
"LinQuantile= " << fLinQuantile << std::endl;
1069 os <<
"AverageSupport= " << fAverageSupport << std::endl;
1070 os <<
"AverageRuleSigma= " << fAverageRuleSigma << std::endl;
1071 os <<
"Offset= " << fOffset << std::endl;
1072 os <<
"NRules= " << nrules << std::endl;
1073 for (
UInt_t i=0; i<nrules; i++){
1074 os <<
"***Rule " << i << std::endl;
1075 (fRules[i])->PrintRaw(os);
1077 UInt_t nlinear = fLinNorm.size();
1079 os <<
"NLinear= " << fLinTermOK.size() << std::endl;
1080 for (
UInt_t i=0; i<nlinear; i++) {
1081 os <<
"***Linear " << i << std::endl;
1082 os << std::setprecision(10) << (fLinTermOK[i] ? 1:0) <<
" "
1083 << fLinCoefficients[i] <<
" "
1084 << fLinNorm[i] <<
" "
1087 << fLinImportance[i] <<
" " << std::endl;
1089 os << std::setprecision(dp);
1099 UInt_t nrules = fRules.size();
1100 UInt_t nlinear = fLinNorm.size();
1103 gTools().
AddAttr( re,
"LearningModel", (
int)fLearningModel );
1107 gTools().
AddAttr( re,
"AverageRuleSigma", fAverageRuleSigma );
1109 for (
UInt_t i=0; i<nrules; i++) fRules[i]->AddXMLTo(re);
1111 for (
UInt_t i=0; i<nlinear; i++) {
1131 Int_t iLearningModel;
1136 gTools().
ReadAttr( wghtnode,
"AverageSupport", fAverageSupport );
1137 gTools().
ReadAttr( wghtnode,
"AverageRuleSigma", fAverageRuleSigma );
1144 fRules.resize( nrules );
1146 for (i=0; i<nrules; i++) {
1147 fRules[i] =
new Rule();
1148 fRules[i]->SetRuleEnsemble(
this );
1149 fRules[i]->ReadFromXML( ch );
1155 fLinNorm .resize( nlinear );
1156 fLinTermOK .resize( nlinear );
1157 fLinCoefficients.resize( nlinear );
1158 fLinDP .resize( nlinear );
1159 fLinDM .resize( nlinear );
1160 fLinImportance .resize( nlinear );
1166 fLinTermOK[i] = (iok == 1);
1190 istr >> dummy >> fImportanceCut;
1191 istr >> dummy >> fLinQuantile;
1192 istr >> dummy >> fAverageSupport;
1193 istr >> dummy >> fAverageRuleSigma;
1194 istr >> dummy >> fOffset;
1195 istr >> dummy >> nrules;
1201 for (
UInt_t i=0; i<nrules; i++){
1202 istr >> dummy >> idum;
1203 fRules.push_back(
new Rule() );
1204 (fRules.back())->SetRuleEnsemble(
this );
1205 (fRules.back())->ReadRaw(istr);
1213 istr >> dummy >> nlinear;
1215 fLinNorm .resize( nlinear );
1216 fLinTermOK .resize( nlinear );
1217 fLinCoefficients.resize( nlinear );
1218 fLinDP .resize( nlinear );
1219 fLinDM .resize( nlinear );
1220 fLinImportance .resize( nlinear );
1224 for (
UInt_t i=0; i<nlinear; i++) {
1225 istr >> dummy >> idum;
1227 fLinTermOK[i] = (iok==1);
1228 istr >> fLinCoefficients[i];
1229 istr >> fLinNorm[i];
1232 istr >> fLinImportance[i];
1241 if(
this != &other) {
1268 if (dtree==0)
return 0;
1270 Int_t nendnodes = 0;
1271 FindNEndNodes( node, nendnodes );
1272 return 2*(nendnodes-1);
1280 if (node==0)
return;
1287 FindNEndNodes( nodeR, nendnodes );
1288 FindNEndNodes( nodeL, nendnodes );
1305 if (node==0)
return;
1311 Rule *rule = MakeTheRule(node);
1313 fRules.push_back( rule );
1318 Log() <<
kFATAL <<
"<AddRule> - ERROR failed in creating a rule! BUG!" <<
Endl;
1335 Log() <<
kFATAL <<
"<MakeTheRule> Input node is NULL. Should not happen. BUG!" <<
Endl;
1343 std::vector< const Node * > nodeVec;
1344 const Node *parent = node;
1349 nodeVec.push_back( node );
1352 if (!parent)
continue;
1355 nodeVec.insert( nodeVec.begin(), parent );
1358 if (nodeVec.size()<2) {
1359 Log() <<
kFATAL <<
"<MakeTheRule> BUG! Inconsistent Rule!" <<
Endl;
1362 Rule *rule =
new Rule(
this, nodeVec );
1374 if (events==0) events = GetTrainingEvents();
1375 if ((ifirst==0) || (ilast==0) || (ifirst>ilast)) {
1377 ilast = events->size()-1;
1380 if ((events!=fRuleMapEvents) ||
1381 (ifirst!=fRuleMapInd0) ||
1382 (ilast !=fRuleMapInd1)) {
1390 fRuleMapEvents = events;
1391 fRuleMapInd0 = ifirst;
1392 fRuleMapInd1 = ilast;
1394 UInt_t nrules = GetNRules();
1403 std::vector<UInt_t> ruleind;
1405 for (
UInt_t i=ifirst; i<=ilast; i++) {
1407 fRuleMap.push_back( ruleind );
1409 if (fRules[
r]->EvalEvent(*((*events)[i]))) {
1410 fRuleMap.back().push_back(
r);
1415 Log() <<
kVERBOSE <<
"Made rule map for event# " << ifirst <<
" : " << ilast <<
Endl;
1423 os <<
"DON'T USE THIS - TO BE REMOVED" << std::endl;
const std::vector< const TMVA::Event * > * GetTrainingEvents() const
get list of training events from the rule fitter
Double_t GetImportanceCut() const
Double_t PdfLinear(Double_t &nsig, Double_t &ntot) const
This function returns Pr( y = 1 | x ) for the linear terms.
MsgLogger & Endl(MsgLogger &ml)
ELearningModel GetLearningModel() const
const std::vector< Double_t > & GetVarImportance() const
virtual Double_t Rndm(Int_t i=0)
Machine independent random number generator.
bool equal(double d1, double d2, double stol=10000)
RuleEnsemble()
constructor
Rule * MakeTheRule(const Node *node)
Make a Rule from a given Node.
Int_t CalcNRules(const TMVA::DecisionTree *dtree)
calculate the number of rules
virtual ~RuleEnsemble()
destructor
Short_t Min(Short_t a, Short_t b)
Double_t GetLinQuantile() const
void Print() const
print function
void PrintRuleGen() const
print rule generation info
void CleanupLinear()
cleanup linear model
void MakeRuleMap(const std::vector< const TMVA::Event * > *events=0, UInt_t ifirst=0, UInt_t ilast=0)
Makes rule map for all events.
virtual DecisionTreeNode * GetRoot() const
Bool_t Equal(const Rule &other, Bool_t useCutValue, Double_t maxdist) const
Compare two rules.
void SetMsgType(EMsgType t)
const Event * GetTrainingEvent(UInt_t i) const
get the training event from the rule fitter
void SetImportanceRef(Double_t impref)
set reference importance
void RuleResponseStats()
calculate various statistics for this rule
Double_t PdfRule(Double_t &nsig, Double_t &ntot) const
This function returns Pr( y = 1 | x ) for rules.
void RemoveSimilarRules()
remove rules that behave similar
void Copy(RuleEnsemble const &other)
copy function
void PrintRaw(std::ostream &os) const
write rules to stream
const MethodRuleFit * GetMethodRuleFit() const
Get a pointer to the original MethodRuleFit.
void CalcImportance()
calculate the importance of each rule
void CleanupRules()
cleanup rules
Double_t FStar() const
We want to estimate F* = argmin Eyx( L(y,F(x) ), min wrt F(x) F(x) = FL(x) + FR(x) ...
void CalcVarImportance()
Calculates variable importance using eq (35) in RuleFit paper by Friedman et.al.
const RuleFit * GetRuleFit() const
Double_t CalcRuleImportance()
calculate importance of each rule
void MakeRulesFromTree(const DecisionTree *dtree)
create rules from the decsision tree structure
Double_t CoefficientRadius()
Calculates sqrt(Sum(a_i^2)), i=1..N (NOTE do not include a0)
void AddRule(const Node *node)
add a new rule to the tree
char * Form(const char *fmt,...)
void RuleStatistics()
calculate various statistics for this rule
void MakeRules(const std::vector< const TMVA::DecisionTree * > &forest)
Makes rules from the given decision tree.
Double_t GetWeight(Double_t x) const
const std::vector< TMVA::Rule * > & GetRulesConst() const
R__EXTERN TRandom * gRandom
std::ostream & operator<<(std::ostream &os, const BinaryTree &tree)
print the tree recursinvely using the << operator
void ReadFromXML(void *wghtnode)
read rules from XML
void FindNEndNodes(const TMVA::Node *node, Int_t &nendnodes)
find the number of leaf nodes
void Initialize(const RuleFit *rf)
Initializes all member variables with default values.
void SetCoefficients(const std::vector< Double_t > &v)
set all rule coefficients
static RooMathCoreReg dummy
virtual Node * GetParent() const
void MakeLinearTerms()
Make the linear terms as in eq 25, ref 2 For this the b and (1-b) quatiles are needed.
virtual Node * GetRight() const
Double_t fAverageRuleSigma
void CalcRuleSupport()
calculate the support for all rules
static Vc_ALWAYS_INLINE int_v max(const int_v &x, const int_v &y)
Double_t GetRuleMinDist() const
void MakeModel()
create model
Short_t GetSelector() const
void SetRules(const std::vector< TMVA::Rule * > &rules)
set rules
void SetMsgType(EMsgType t)
Short_t Max(Short_t a, Short_t b)
void ResetCoefficients()
reset all rule coefficients
void * AddXMLTo(void *parent) const
write rules to XML
void GetCoefficients(std::vector< Double_t > &v)
Retrieve all rule coefficients.
Double_t Sqrt(Double_t x)
Double_t CalcLinImportance()
calculate the linear importance for each rule
const MethodBase * GetMethodBase() const
Get a pointer to the original MethodRuleFit.
Double_t GetOffset() const
virtual Node * GetLeft() const
void ReadRaw(std::istream &istr)
read rule ensemble from stream