122 const TString& theWeightFile) :
171 for (
UInt_t i=0; i<fEventSample.size(); i++)
delete fEventSample[i];
172 for (
UInt_t i=0; i<fForest.size(); i++)
delete fForest[i];
231 DeclareOptionRef(fGDTau=-1,
"GDTau",
"Gradient-directed (GD) path: default fit cut-off");
232 DeclareOptionRef(fGDTauPrec=0.01,
"GDTauPrec",
"GD path: precision of tau");
233 DeclareOptionRef(fGDPathStep=0.01,
"GDStep",
"GD path: step size");
234 DeclareOptionRef(fGDNPathSteps=10000,
"GDNSteps",
"GD path: number of steps");
235 DeclareOptionRef(fGDErrScale=1.1,
"GDErrScale",
"Stop scan when error > scale*errmin");
236 DeclareOptionRef(fLinQuantile,
"LinQuantile",
"Quantile of linear terms (removes outliers)");
237 DeclareOptionRef(fGDPathEveFrac=0.5,
"GDPathEveFrac",
"Fraction of events used for the path search");
238 DeclareOptionRef(fGDValidEveFrac=0.5,
"GDValidEveFrac",
"Fraction of events used for the validation");
240 DeclareOptionRef(fMinFracNEve=0.1,
"fEventsMin",
"Minimum fraction of events in a splittable node");
241 DeclareOptionRef(fMaxFracNEve=0.9,
"fEventsMax",
"Maximum fraction of events in a splittable node");
242 DeclareOptionRef(fNTrees=20,
"nTrees",
"Number of trees in forest.");
244 DeclareOptionRef(fForestTypeS=
"AdaBoost",
"ForestType",
"Method to use for forest generation (AdaBoost or RandomForest)");
245 AddPreDefVal(
TString(
"AdaBoost"));
246 AddPreDefVal(
TString(
"Random"));
248 DeclareOptionRef(fRuleMinDist=0.001,
"RuleMinDist",
"Minimum distance between rules");
249 DeclareOptionRef(fMinimp=0.01,
"MinImp",
"Minimum rule importance accepted");
251 DeclareOptionRef(fModelTypeS=
"ModRuleLinear",
"Model",
"Model to be used");
252 AddPreDefVal(
TString(
"ModRule"));
253 AddPreDefVal(
TString(
"ModRuleLinear"));
254 AddPreDefVal(
TString(
"ModLinear"));
255 DeclareOptionRef(fRuleFitModuleS=
"RFTMVA",
"RuleFitModule",
"Which RuleFit module to use");
256 AddPreDefVal(
TString(
"RFTMVA"));
257 AddPreDefVal(
TString(
"RFFriedman"));
259 DeclareOptionRef(fRFWorkDir=
"./rulefit",
"RFWorkDir",
"Friedman\'s RuleFit module (RFF): working dir");
260 DeclareOptionRef(fRFNrules=2000,
"RFNrules",
"RFF: Mximum number of rules");
261 DeclareOptionRef(fRFNendnodes=4,
"RFNendnodes",
"RFF: Average number of end nodes");
269 if (IgnoreEventsWithNegWeightsInTraining()) {
270 Log() << kFATAL <<
"Mechanism to ignore events with negative weights in training not yet available for method: "
271 << GetMethodTypeName()
272 <<
" --> please remove \"IgnoreNegWeightsInTraining\" option from booking string."
276 fRuleFitModuleS.ToLower();
277 if (fRuleFitModuleS ==
"rftmva") fUseRuleFitJF =
kFALSE;
278 else if (fRuleFitModuleS ==
"rffriedman") fUseRuleFitJF =
kTRUE;
279 else fUseRuleFitJF =
kTRUE;
283 else if (fSepTypeS ==
"giniindex") fSepType =
new GiniIndex();
284 else if (fSepTypeS ==
"crossentropy") fSepType =
new CrossEntropy();
287 fModelTypeS.ToLower();
288 if (fModelTypeS ==
"modlinear" ) fRuleFit.SetModelLinear();
289 else if (fModelTypeS ==
"modrule" ) fRuleFit.SetModelRules();
290 else fRuleFit.SetModelFull();
292 fPruneMethodS.ToLower();
297 fForestTypeS.ToLower();
298 if (fForestTypeS ==
"random" ) fUseBoost =
kFALSE;
299 else if (fForestTypeS ==
"adaboost" ) fUseBoost =
kTRUE;
300 else fUseBoost =
kTRUE;
305 if (fUseBoost && (!fUseRuleFitJF)) fTreeEveFrac = 1.0;
309 if (fTreeEveFrac<=0) {
310 Int_t nevents = Data()->GetNTrainingEvents();
312 fTreeEveFrac = min( 0.5, (100.0 +6.0*
sqrt(
n))/
n);
315 VerifyRange(
Log(),
"nTrees", fNTrees,0,100000,20);
316 VerifyRange(
Log(),
"MinImp", fMinimp,0.0,1.0,0.0);
317 VerifyRange(
Log(),
"GDTauPrec", fGDTauPrec,1
e-5,5
e-1);
318 VerifyRange(
Log(),
"GDTauMin", fGDTauMin,0.0,1.0);
319 VerifyRange(
Log(),
"GDTauMax", fGDTauMax,fGDTauMin,1.0);
320 VerifyRange(
Log(),
"GDPathStep", fGDPathStep,0.0,100.0,0.01);
321 VerifyRange(
Log(),
"GDErrScale", fGDErrScale,1.0,100.0,1.1);
322 VerifyRange(
Log(),
"GDPathEveFrac", fGDPathEveFrac,0.01,0.9,0.5);
323 VerifyRange(
Log(),
"GDValidEveFrac",fGDValidEveFrac,0.01,1.0-fGDPathEveFrac,1.0-fGDPathEveFrac);
324 VerifyRange(
Log(),
"fEventsMin", fMinFracNEve,0.0,1.0);
325 VerifyRange(
Log(),
"fEventsMax", fMaxFracNEve,fMinFracNEve,1.0);
327 fRuleFit.GetRuleEnsemblePtr()->SetLinQuantile(fLinQuantile);
328 fRuleFit.GetRuleFitParamsPtr()->SetGDTauRange(fGDTauMin,fGDTauMax);
329 fRuleFit.GetRuleFitParamsPtr()->SetGDTau(fGDTau);
330 fRuleFit.GetRuleFitParamsPtr()->SetGDTauPrec(fGDTauPrec);
331 fRuleFit.GetRuleFitParamsPtr()->SetGDTauScan(fGDTauScan);
332 fRuleFit.GetRuleFitParamsPtr()->SetGDPathStep(fGDPathStep);
333 fRuleFit.GetRuleFitParamsPtr()->SetGDNPathSteps(fGDNPathSteps);
334 fRuleFit.GetRuleFitParamsPtr()->SetGDErrScale(fGDErrScale);
335 fRuleFit.SetImportanceCut(fMinimp);
336 fRuleFit.SetRuleMinDist(fRuleMinDist);
343 Log() << kINFO <<
"--------------------------------------" <<
Endl;
344 Log() << kINFO <<
"Friedmans RuleFit module is selected." <<
Endl;
345 Log() << kINFO <<
"Only the following options are used:" <<
Endl;
354 Log() << kINFO <<
"--------------------------------------" <<
Endl;
365 fRuleFit.UseImportanceVisHists();
367 fRuleFit.SetMsgType(
Log().GetMinType() );
369 if (HasTrainingTree()) InitEventSample();
379 fMonitorNtuple=
new TTree(
"MonitorNtuple_RuleFit",
"RuleFit variables");
380 fMonitorNtuple->Branch(
"importance",&fNTImportance,
"importance/D");
381 fMonitorNtuple->Branch(
"support",&fNTSupport,
"support/D");
382 fMonitorNtuple->Branch(
"coefficient",&fNTCoefficient,
"coefficient/D");
383 fMonitorNtuple->Branch(
"ncuts",&fNTNcuts,
"ncuts/I");
384 fMonitorNtuple->Branch(
"nvars",&fNTNvars,
"nvars/I");
385 fMonitorNtuple->Branch(
"type",&fNTType,
"type/I");
386 fMonitorNtuple->Branch(
"ptag",&fNTPtag,
"ptag/D");
387 fMonitorNtuple->Branch(
"pss",&fNTPss,
"pss/D");
388 fMonitorNtuple->Branch(
"psb",&fNTPsb,
"psb/D");
389 fMonitorNtuple->Branch(
"pbs",&fNTPbs,
"pbs/D");
390 fMonitorNtuple->Branch(
"pbb",&fNTPbb,
"pbb/D");
391 fMonitorNtuple->Branch(
"soversb",&fNTSSB,
"soversb/D");
400 SetSignalReferenceCut( 0.0 );
404 fLinQuantile = 0.025;
407 fSepTypeS =
"GiniIndex";
408 fPruneMethodS =
"NONE";
409 fPruneStrength = 3.5;
424 if (Data()->GetNEvents()==0)
Log() << kFATAL <<
"<Init> Data().TrainingTree() is zero pointer" <<
Endl;
426 Int_t nevents = Data()->GetNEvents();
427 for (
Int_t ievt=0; ievt<nevents; ievt++){
428 const Event * ev = GetEvent(ievt);
429 fEventSample.push_back(
new Event(*ev));
431 if (fTreeEveFrac<=0) {
433 fTreeEveFrac = min( 0.5, (100.0 +6.0*
sqrt(
n))/
n);
435 if (fTreeEveFrac>1.0) fTreeEveFrac=1.0;
437 std::shuffle(fEventSample.begin(), fEventSample.end(), std::default_random_engine{});
439 Log() << kDEBUG <<
"Set sub-sample fraction to " << fTreeEveFrac <<
Endl;
449 if(!IsSilentFile()) InitMonitorNtuple();
452 this->InitEventSample();
460 fRuleFit.GetRuleEnsemblePtr()->ClearRuleMap();
470 if (IsNormalised())
Log() << kFATAL <<
"\"Normalise\" option cannot be used with RuleFit; "
471 <<
"please remove the option from the configuration string, or "
472 <<
"use \"!Normalise\""
488 fRuleFit.Initialize(
this );
494 Log() << kDEBUG <<
"Fitting rule coefficients ..." <<
Endl;
495 fRuleFit.FitCoefficients();
498 Log() << kDEBUG <<
"Computing rule and variable importance" <<
Endl;
499 fRuleFit.CalcImportance();
502 fRuleFit.GetRuleEnsemblePtr()->
Print();
506 Log() << kDEBUG <<
"Filling rule ntuple" <<
Endl;
507 UInt_t nrules = fRuleFit.GetRuleEnsemble().GetRulesConst().size();
509 for (
UInt_t i=0; i<nrules; i++ ) {
517 fNTPtag = fRuleFit.GetRuleEnsemble().GetRulePTag(i);
518 fNTPss = fRuleFit.GetRuleEnsemble().GetRulePSS(i);
519 fNTPsb = fRuleFit.GetRuleEnsemble().GetRulePSB(i);
520 fNTPbs = fRuleFit.GetRuleEnsemble().GetRulePBS(i);
521 fNTPbb = fRuleFit.GetRuleEnsemble().GetRulePBB(i);
523 fMonitorNtuple->Fill();
526 fRuleFit.MakeVisHists();
527 fRuleFit.MakeDebugHists();
529 Log() << kDEBUG <<
"Training done" <<
Endl;
538 fRuleFit.InitPtrs(
this );
540 UInt_t nevents = Data()->GetNTrainingEvents();
541 std::vector<const TMVA::Event*> tmp;
542 for (
Long64_t ievt=0; ievt<nevents; ievt++) {
543 const Event *
event = GetEvent(ievt);
544 tmp.push_back(event);
546 fRuleFit.SetTrainingEvents( tmp );
555 Log() << kINFO <<
"Training ..." <<
Endl;
558 Log() << kDEBUG <<
"reading model summary from rf_go.exe output" <<
Endl;
563 Log() << kDEBUG <<
"calculating rule and variable importance" <<
Endl;
564 fRuleFit.CalcImportance();
567 fRuleFit.GetRuleEnsemblePtr()->
Print();
569 if(!IsSilentFile())fRuleFit.MakeVisHists();
573 Log() << kDEBUG <<
"done training" <<
Endl;
584 for (
UInt_t ivar=0; ivar<GetNvar(); ivar++) {
585 fRanking->AddRank(
Rank( GetInputLabel(ivar), fRuleFit.GetRuleEnsemble().GetVarImportance(ivar) ) );
596 fRuleFit.GetRuleEnsemble().AddXMLTo( parent );
604 fRuleFit.GetRuleEnsemblePtr()->ReadRaw( istr );
612 fRuleFit.GetRuleEnsemblePtr()->ReadFromXML( wghtnode );
621 NoErrorCalc(err, errUpper);
623 return fRuleFit.EvalEvent( *GetEvent() );
632 Log() << kINFO <<
"Write monitoring ntuple to file: " << BaseDir()->GetPath() <<
Endl;
633 fMonitorNtuple->
Write();
641 Int_t dp = fout.precision();
642 fout <<
" // not implemented for class: \"" << className <<
"\"" << std::endl;
643 fout <<
"};" << std::endl;
644 fout <<
"void " << className <<
"::Initialize(){}" << std::endl;
645 fout <<
"void " << className <<
"::Clear(){}" << std::endl;
646 fout <<
"double " << className <<
"::GetMvaValue__( const std::vector<double>& inputValues ) const {" << std::endl;
647 fout <<
" double rval=" << std::setprecision(10) << fRuleFit.GetRuleEnsemble().GetOffset() <<
";" << std::endl;
648 MakeClassRuleCuts(fout);
649 MakeClassLinear(fout);
650 fout <<
" return rval;" << std::endl;
651 fout <<
"}" << std::endl;
652 fout << std::setprecision(dp);
660 Int_t dp = fout.precision();
661 if (!fRuleFit.GetRuleEnsemble().DoRules()) {
662 fout <<
" //" << std::endl;
663 fout <<
" // ==> MODEL CONTAINS NO RULES <==" << std::endl;
664 fout <<
" //" << std::endl;
667 const RuleEnsemble *rens = &(fRuleFit.GetRuleEnsemble());
668 const std::vector< Rule* > *rules = &(rens->
GetRulesConst());
671 std::list< std::pair<Double_t,Int_t> > sortedRules;
672 for (
UInt_t ir=0; ir<rules->size(); ir++) {
673 sortedRules.push_back( std::pair<Double_t,Int_t>( (*rules)[ir]->GetImportance()/rens->
GetImportanceRef(),ir ) );
677 fout <<
" //" << std::endl;
678 fout <<
" // here follows all rules ordered in importance (most important first)" << std::endl;
679 fout <<
" // at the end of each line, the relative importance of the rule is given" << std::endl;
680 fout <<
" //" << std::endl;
682 for ( std::list< std::pair<double,int> >::reverse_iterator itpair = sortedRules.rbegin();
683 itpair != sortedRules.rend(); ++itpair ) {
684 UInt_t ir = itpair->second;
686 ruleCut = (*rules)[ir]->GetRuleCut();
687 if (impr<rens->GetImportanceCut()) fout <<
" //" << std::endl;
688 fout <<
" if (" << std::flush;
696 if (ic>0) fout <<
"&&" << std::flush;
698 fout <<
"(" << std::setprecision(10) << valmin << std::flush;
699 fout <<
"<inputValues[" << sel <<
"])" << std::flush;
702 if (domin) fout <<
"&&" << std::flush;
703 fout <<
"(inputValues[" << sel <<
"]" << std::flush;
704 fout <<
"<" << std::setprecision(10) << valmax <<
")" <<std::flush;
707 fout <<
") rval+=" << std::setprecision(10) << (*rules)[ir]->GetCoefficient() <<
";" << std::flush;
708 fout <<
" // importance = " <<
Form(
"%3.3f",impr) << std::endl;
710 fout << std::setprecision(dp);
718 if (!fRuleFit.GetRuleEnsemble().DoLinear()) {
719 fout <<
" //" << std::endl;
720 fout <<
" // ==> MODEL CONTAINS NO LINEAR TERMS <==" << std::endl;
721 fout <<
" //" << std::endl;
724 fout <<
" //" << std::endl;
725 fout <<
" // here follows all linear terms" << std::endl;
726 fout <<
" // at the end of each line, the relative importance of the term is given" << std::endl;
727 fout <<
" //" << std::endl;
728 const RuleEnsemble *rens = &(fRuleFit.GetRuleEnsemble());
730 for (
UInt_t il=0; il<nlin; il++) {
738 <<
"*std::min( double(" << std::setprecision(10) << rens->
GetLinDP(il)
739 <<
"), std::max( double(inputValues[" << il <<
"]), double(" << std::setprecision(10) << rens->
GetLinDM(il) <<
")));"
741 fout <<
" // importance = " <<
Form(
"%3.3f",imp) << std::endl;
759 Log() << col <<
"--- Short description:" << colres <<
Endl;
761 Log() <<
"This method uses a collection of so called rules to create a" <<
Endl;
762 Log() <<
"discriminating scoring function. Each rule consists of a series" <<
Endl;
763 Log() <<
"of cuts in parameter space. The ensemble of rules are created" <<
Endl;
764 Log() <<
"from a forest of decision trees, trained using the training data." <<
Endl;
765 Log() <<
"Each node (apart from the root) corresponds to one rule." <<
Endl;
766 Log() <<
"The scoring function is then obtained by linearly combining" <<
Endl;
767 Log() <<
"the rules. A fitting procedure is applied to find the optimum" <<
Endl;
768 Log() <<
"set of coefficients. The goal is to find a model with few rules" <<
Endl;
769 Log() <<
"but with a strong discriminating power." <<
Endl;
771 Log() << col <<
"--- Performance optimisation:" << colres <<
Endl;
773 Log() <<
"There are two important considerations to make when optimising:" <<
Endl;
775 Log() <<
" 1. Topology of the decision tree forest" << brk <<
Endl;
776 Log() <<
" 2. Fitting of the coefficients" <<
Endl;
778 Log() <<
"The maximum complexity of the rules is defined by the size of" <<
Endl;
779 Log() <<
"the trees. Large trees will yield many complex rules and capture" <<
Endl;
780 Log() <<
"higher order correlations. On the other hand, small trees will" <<
Endl;
781 Log() <<
"lead to a smaller ensemble with simple rules, only capable of" <<
Endl;
782 Log() <<
"modeling simple structures." <<
Endl;
783 Log() <<
"Several parameters exists for controlling the complexity of the" <<
Endl;
784 Log() <<
"rule ensemble." <<
Endl;
786 Log() <<
"The fitting procedure searches for a minimum using a gradient" <<
Endl;
787 Log() <<
"directed path. Apart from step size and number of steps, the" <<
Endl;
788 Log() <<
"evolution of the path is defined by a cut-off parameter, tau." <<
Endl;
789 Log() <<
"This parameter is unknown and depends on the training data." <<
Endl;
790 Log() <<
"A large value will tend to give large weights to a few rules." <<
Endl;
791 Log() <<
"Similarly, a small value will lead to a large set of rules" <<
Endl;
792 Log() <<
"with similar weights." <<
Endl;
794 Log() <<
"A final point is the model used; rules and/or linear terms." <<
Endl;
795 Log() <<
"For a given training sample, the result may improve by adding" <<
Endl;
796 Log() <<
"linear terms. If best performance is obtained using only linear" <<
Endl;
797 Log() <<
"terms, it is very likely that the Fisher discriminant would be" <<
Endl;
798 Log() <<
"a better choice. Ideally the fitting procedure should be able to" <<
Endl;
799 Log() <<
"make this choice by giving appropriate weights for either terms." <<
Endl;
801 Log() << col <<
"--- Performance tuning via configuration options:" << colres <<
Endl;
803 Log() <<
"I. TUNING OF RULE ENSEMBLE:" <<
Endl;
805 Log() <<
" " << col <<
"ForestType " << colres
806 <<
": Recommended is to use the default \"AdaBoost\"." << brk <<
Endl;
807 Log() <<
" " << col <<
"nTrees " << colres
808 <<
": More trees leads to more rules but also slow" <<
Endl;
809 Log() <<
" performance. With too few trees the risk is" <<
Endl;
810 Log() <<
" that the rule ensemble becomes too simple." << brk <<
Endl;
811 Log() <<
" " << col <<
"fEventsMin " << colres << brk <<
Endl;
812 Log() <<
" " << col <<
"fEventsMax " << colres
813 <<
": With a lower min, more large trees will be generated" <<
Endl;
814 Log() <<
" leading to more complex rules." <<
Endl;
815 Log() <<
" With a higher max, more small trees will be" <<
Endl;
816 Log() <<
" generated leading to more simple rules." <<
Endl;
817 Log() <<
" By changing this range, the average complexity" <<
Endl;
818 Log() <<
" of the rule ensemble can be controlled." << brk <<
Endl;
819 Log() <<
" " << col <<
"RuleMinDist " << colres
820 <<
": By increasing the minimum distance between" <<
Endl;
821 Log() <<
" rules, fewer and more diverse rules will remain." <<
Endl;
822 Log() <<
" Initially it is a good idea to keep this small" <<
Endl;
823 Log() <<
" or zero and let the fitting do the selection of" <<
Endl;
824 Log() <<
" rules. In order to reduce the ensemble size," <<
Endl;
825 Log() <<
" the value can then be increased." <<
Endl;
828 Log() <<
"II. TUNING OF THE FITTING:" <<
Endl;
830 Log() <<
" " << col <<
"GDPathEveFrac " << colres
831 <<
": fraction of events in path evaluation" <<
Endl;
832 Log() <<
" Increasing this fraction will improve the path" <<
Endl;
833 Log() <<
" finding. However, a too high value will give few" <<
Endl;
834 Log() <<
" unique events available for error estimation." <<
Endl;
835 Log() <<
" It is recommended to use the default = 0.5." << brk <<
Endl;
836 Log() <<
" " << col <<
"GDTau " << colres
837 <<
": cutoff parameter tau" <<
Endl;
838 Log() <<
" By default this value is set to -1.0." <<
Endl;
840 Log() <<
" This means that the cut off parameter is" <<
Endl;
841 Log() <<
" automatically estimated. In most cases" <<
Endl;
842 Log() <<
" this should be fine. However, you may want" <<
Endl;
843 Log() <<
" to fix this value if you already know it" <<
Endl;
844 Log() <<
" and want to reduce on training time." << brk <<
Endl;
845 Log() <<
" " << col <<
"GDTauPrec " << colres
846 <<
": precision of estimated tau" <<
Endl;
847 Log() <<
" Increase this precision to find a more" <<
Endl;
848 Log() <<
" optimum cut-off parameter." << brk <<
Endl;
849 Log() <<
" " << col <<
"GDNStep " << colres
850 <<
": number of steps in path search" <<
Endl;
851 Log() <<
" If the number of steps is too small, then" <<
Endl;
852 Log() <<
" the program will give a warning message." <<
Endl;
854 Log() <<
"III. WARNING MESSAGES" <<
Endl;
856 Log() << col <<
"Risk(i+1)>=Risk(i) in path" << colres << brk <<
Endl;
857 Log() << col <<
"Chaotic behaviour of risk evolution." << colres <<
Endl;
859 Log() <<
" The error rate was still decreasing at the end" <<
Endl;
860 Log() <<
" By construction the Risk should always decrease." <<
Endl;
861 Log() <<
" However, if the training sample is too small or" <<
Endl;
862 Log() <<
" the model is overtrained, such warnings can" <<
Endl;
864 Log() <<
" The warnings can safely be ignored if only a" <<
Endl;
865 Log() <<
" few (<3) occur. If more warnings are generated," <<
Endl;
866 Log() <<
" the fitting fails." <<
Endl;
867 Log() <<
" A remedy may be to increase the value" << brk <<
Endl;
869 << col <<
"GDValidEveFrac" << colres
870 <<
" to 1.0 (or a larger value)." << brk <<
Endl;
871 Log() <<
" In addition, if "
872 << col <<
"GDPathEveFrac" << colres
873 <<
" is too high" <<
Endl;
874 Log() <<
" the same warnings may occur since the events" <<
Endl;
875 Log() <<
" used for error estimation are also used for" <<
Endl;
876 Log() <<
" path estimation." <<
Endl;
877 Log() <<
" Another possibility is to modify the model - " <<
Endl;
878 Log() <<
" See above on tuning the rule ensemble." <<
Endl;
880 Log() << col <<
"The error rate was still decreasing at the end of the path"
882 Log() <<
" Too few steps in path! Increase "
883 << col <<
"GDNSteps" << colres <<
"." <<
Endl;
885 Log() << col <<
"Reached minimum early in the search" << colres <<
Endl;
887 Log() <<
" Minimum was found early in the fitting. This" <<
Endl;
888 Log() <<
" may indicate that the used step size "
889 << col <<
"GDStep" << colres <<
"." <<
Endl;
890 Log() <<
" was too large. Reduce it and rerun." <<
Endl;
891 Log() <<
" If the results still are not OK, modify the" <<
Endl;
892 Log() <<
" model either by modifying the rule ensemble" <<
Endl;
893 Log() <<
" or add/remove linear terms" <<
Endl;
#define REGISTER_METHOD(CLASS)
for example
char * Form(const char *fmt,...)
Bool_t WriteOptionsReference() const
Implementation of the CrossEntropy as separation criterion.
Class that contains all the data information.
Implementation of a Decision Tree.
Implementation of the GiniIndex as separation criterion.
Virtual base Class for all MVA method.
J Friedman's RuleFit method.
Double_t GetMvaValue(Double_t *err=0, Double_t *errUpper=0)
returns MVA value for given event
void MakeClassSpecific(std::ostream &, const TString &) const
write specific classifier response
void MakeClassLinear(std::ostream &) const
print out the linear terms
void GetHelpMessage() const
get help message text
void TrainJFRuleFit()
training of rules using Jerome Friedmans implementation
virtual Bool_t HasAnalysisType(Types::EAnalysisType type, UInt_t numberClasses, UInt_t)
RuleFit can handle classification with 2 classes.
void ProcessOptions()
process the options specified by the user
void ReadWeightsFromStream(std::istream &istr)
read rules from an std::istream
void AddWeightsXMLTo(void *parent) const
add the rules to XML node
void InitEventSample(void)
write all Events from the Tree into a vector of Events, that are more easily manipulated.
void MakeClassRuleCuts(std::ostream &) const
print out the rule cuts
void InitMonitorNtuple()
initialize the monitoring ntuple
virtual ~MethodRuleFit(void)
destructor
void Init(void)
default initialization
void WriteMonitoringHistosToFile(void) const
write special monitoring histograms to file (here ntuple)
MethodRuleFit(const TString &jobName, const TString &methodTitle, DataSetInfo &theData, const TString &theOption="")
standard constructor
void ReadWeightsFromXML(void *wghtnode)
read rules from XML node
void DeclareOptions()
define the options (their key words) that can be set in the option string know options.
const Ranking * CreateRanking()
computes ranking of input variables
void TrainTMVARuleFit()
training of rules using TMVA implementation
Implementation of the MisClassificationError as separation criterion.
Ranking for variables in method (implementation)
A class describing a 'rule cut'.
Double_t GetCutMin(Int_t is) const
UInt_t GetSelector(Int_t is) const
Char_t GetCutDoMin(Int_t is) const
Char_t GetCutDoMax(Int_t is) const
UInt_t GetNcuts() const
get number of cuts
Double_t GetCutMax(Int_t is) const
Double_t GetLinDP(int i) const
Double_t GetLinDM(int i) const
const std::vector< Double_t > & GetLinCoefficients() const
Double_t GetImportanceRef() const
const std::vector< Double_t > & GetLinNorm() const
UInt_t GetNLinear() const
const std::vector< TMVA::Rule * > & GetRulesConst() const
const std::vector< Double_t > & GetLinImportance() const
Bool_t IsLinTermOK(int i) const
J Friedman's RuleFit method.
Bool_t ReadModelSum()
read model from rulefit.sum
void WelcomeMessage()
welcome message
Implementation of a rule.
Double_t GetSupport() const
const RuleCut * GetRuleCut() const
Bool_t IsSignalRule() const
Double_t GetCoefficient() const
const RuleEnsemble * GetRuleEnsemble() const
Double_t GetRelImportance() const
Implementation of the SdivSqrtSplusB as separation criterion.
Timing information for training and evaluation of MVA methods.
Singleton class for Global types used by TMVA.
virtual Int_t Write(const char *name=0, Int_t option=0, Int_t bufsize=0)
Write this object to the current directory.
virtual void Print(Option_t *option="") const
This method must be overridden when a class wants to print itself.
A TTree object has a header with a name and a title.
std::string GetName(const std::string &scope_name)
Abstract ClassifierFactory template that handles arbitrary types.
MsgLogger & Endl(MsgLogger &ml)