76#ifdef MethodMLP_UseMinuit__
96 fPrior(0.0), fPriorDev(0), fUpdateLimit(0),
97 fTrainingMethod(kBFGS), fTrainMethodS(
"BFGS"),
98 fSamplingFraction(1.0), fSamplingEpoch(0.0), fSamplingWeight(0.0),
99 fSamplingTraining(
false), fSamplingTesting(
false),
100 fLastAlpha(0.0), fTau(0.),
101 fResetStep(0), fLearnRate(0.0), fDecayRate(0.0),
102 fBPMode(kSequential), fBpModeS(
"None"),
103 fBatchSize(0), fTestRate(0), fEpochMon(
false),
104 fGA_nsteps(0), fGA_preCalc(0), fGA_SC_steps(0),
105 fGA_SC_rate(0), fGA_SC_factor(0.0),
106 fDeviationsFromTargets(0),
118 fUseRegulator(
false), fCalculateErrors(
false),
119 fPrior(0.0), fPriorDev(0), fUpdateLimit(0),
120 fTrainingMethod(kBFGS), fTrainMethodS(
"BFGS"),
121 fSamplingFraction(1.0), fSamplingEpoch(0.0), fSamplingWeight(0.0),
122 fSamplingTraining(
false), fSamplingTesting(
false),
123 fLastAlpha(0.0), fTau(0.),
124 fResetStep(0), fLearnRate(0.0), fDecayRate(0.0),
125 fBPMode(kSequential), fBpModeS(
"None"),
126 fBatchSize(0), fTestRate(0), fEpochMon(
false),
127 fGA_nsteps(0), fGA_preCalc(0), fGA_SC_steps(0),
128 fGA_SC_rate(0), fGA_SC_factor(0.0),
129 fDeviationsFromTargets(0),
167 SetSignalReferenceCut( 0.5 );
168#ifdef MethodMLP_UseMinuit__
197 DeclareOptionRef(fTrainMethodS=
"BP",
"TrainingMethod",
198 "Train with Back-Propagation (BP), BFGS Algorithm (BFGS), or Genetic Algorithm (GA - slower and worse)");
203 DeclareOptionRef(fLearnRate=0.02,
"LearningRate",
"ANN learning rate parameter");
204 DeclareOptionRef(fDecayRate=0.01,
"DecayRate",
"Decay rate for learning parameter");
205 DeclareOptionRef(fTestRate =10,
"TestRate",
"Test for overtraining performed at each #th epochs");
206 DeclareOptionRef(fEpochMon =
kFALSE,
"EpochMonitoring",
"Provide epoch-wise monitoring plots according to TestRate (caution: causes big ROOT output file!)" );
208 DeclareOptionRef(fSamplingFraction=1.0,
"Sampling",
"Only 'Sampling' (randomly selected) events are trained each epoch");
209 DeclareOptionRef(fSamplingEpoch=1.0,
"SamplingEpoch",
"Sampling is used for the first 'SamplingEpoch' epochs, afterwards, all events are taken for training");
210 DeclareOptionRef(fSamplingWeight=1.0,
"SamplingImportance",
" The sampling weights of events in epochs which successful (worse estimator than before) are multiplied with SamplingImportance, else they are divided.");
212 DeclareOptionRef(fSamplingTraining=
kTRUE,
"SamplingTraining",
"The training sample is sampled");
213 DeclareOptionRef(fSamplingTesting=
kFALSE,
"SamplingTesting" ,
"The testing sample is sampled");
215 DeclareOptionRef(fResetStep=50,
"ResetStep",
"How often BFGS should reset history");
216 DeclareOptionRef(fTau =3.0,
"Tau",
"LineSearch \"size step\"");
218 DeclareOptionRef(fBpModeS=
"sequential",
"BPMode",
219 "Back-propagation learning mode: sequential or batch");
220 AddPreDefVal(
TString(
"sequential"));
221 AddPreDefVal(
TString(
"batch"));
223 DeclareOptionRef(fBatchSize=-1,
"BatchSize",
224 "Batch size: number of events/batch, only set if in Batch Mode, -1 for BatchSize=number_of_events");
226 DeclareOptionRef(fImprovement=1
e-30,
"ConvergenceImprove",
227 "Minimum improvement which counts as improvement (<0 means automatic convergence check is turned off)");
229 DeclareOptionRef(fSteps=-1,
"ConvergenceTests",
230 "Number of steps (without improvement) required for convergence (<0 means automatic convergence check is turned off)");
232 DeclareOptionRef(fUseRegulator=
kFALSE,
"UseRegulator",
233 "Use regulator to avoid over-training");
234 DeclareOptionRef(fUpdateLimit=10000,
"UpdateLimit",
235 "Maximum times of regulator update");
236 DeclareOptionRef(fCalculateErrors=
kFALSE,
"CalculateErrors",
237 "Calculates inverse Hessian matrix at the end of the training to be able to calculate the uncertainties of an MVA value");
239 DeclareOptionRef(fWeightRange=1.0,
"WeightRange",
240 "Take the events for the estimator calculations from small deviations from the desired value to large deviations only over the weight range");
252 if (IgnoreEventsWithNegWeightsInTraining()) {
254 <<
"Will ignore negative events in training!"
259 if (fTrainMethodS ==
"BP" ) fTrainingMethod = kBP;
260 else if (fTrainMethodS ==
"BFGS") fTrainingMethod = kBFGS;
261 else if (fTrainMethodS ==
"GA" ) fTrainingMethod = kGA;
263 if (fBpModeS ==
"sequential") fBPMode = kSequential;
264 else if (fBpModeS ==
"batch") fBPMode = kBatch;
268 if (fBPMode == kBatch) {
270 Int_t numEvents = Data()->GetNEvents();
280 Log() << kDEBUG <<
"Initialize learning rates" <<
Endl;
285 synapse->SetLearningRate(fLearnRate);
296 Log() << kFATAL <<
"<CalculateEstimator> fatal error: wrong tree type: " <<
treeType <<
Endl;
311 if (fEpochMon &&
iEpoch >= 0 && !DoRegression()) {
319 Int_t nEvents = GetNEvents();
325 if( fWeightRange < 1.f ){
326 fDeviationsFromTargets =
new std::vector<std::pair<Float_t,Float_t> >(nEvents);
329 for (
Int_t i = 0; i < nEvents; i++) {
333 if ((
ev->GetWeight() < 0) && IgnoreEventsWithNegWeightsInTraining()
340 ForceNetworkInputs(
ev );
341 ForceNetworkCalculations();
344 if (DoRegression()) {
346 v = GetOutputNeuron(
itgt )->GetActivationValue();
352 }
else if (DoMulticlass() ) {
354 if (fEstimator==kCE){
367 v = GetOutputNeuron(
icls )->GetActivationValue();
374 v = GetOutputNeuron()->GetActivationValue();
380 if( fDeviationsFromTargets )
381 fDeviationsFromTargets->push_back(std::pair<Float_t,Float_t>(
d,
w));
387 if (DataInfo().IsSignal(
ev) &&
histS != 0)
histS->Fill(
float(
v), float(
w) );
392 if( fDeviationsFromTargets ) {
393 std::sort(fDeviationsFromTargets->begin(),fDeviationsFromTargets->end());
410 delete fDeviationsFromTargets;
413 if (
histS != 0) fEpochMonHistS.push_back(
histS );
414 if (
histB != 0) fEpochMonHistB.push_back(
histB );
428 CreateWeightMonitoringHists(
TString::Format(
"epochmonitoring___epoch_%04i_weights_hist",
iEpoch), &fEpochMonHistW );
440 Log() << kFATAL <<
"ANN Network is not initialized, doing it now!"<<
Endl;
441 SetAnalysisType(GetAnalysisType());
443 Log() << kDEBUG <<
"reinitialize learning rates" <<
Endl;
444 InitializeLearningRates();
446 PrintMessage(
"Training Network");
448 Int_t nEvents=GetNEvents();
451 Log()<<kWARNING<<
"ANN too complicated: #events="<<nEvents<<
"\t#synapses="<<
nSynapses<<
Endl;
454 if (fInteractive && fInteractive->NotInitialized()){
455 std::vector<TString>
titles = {
"Error on training set",
"Error on test set"};
456 fInteractive->Init(
titles);
459#ifdef MethodMLP_UseMinuit__
462 if (fTrainingMethod == kGA) GeneticMinimize();
463 else if (fTrainingMethod == kBFGS) BFGSMinimize(
nEpochs);
464 else BackPropagationMinimize(
nEpochs);
470 Log()<<kINFO<<
"Finalizing handling of Regulator terms, trainE="<<
trainE<<
" testE="<<
testE<<
Endl;
472 Log()<<kINFO<<
"Done with handling of Regulator terms"<<
Endl;
475 if( fCalculateErrors || fUseRegulator )
479 GetApproxInvHessian( fInvHessian ,
false);
495 fEstimatorHistTrain =
new TH1F(
"estimatorHistTrain",
"training estimator",
497 fEstimatorHistTest =
new TH1F(
"estimatorHistTest",
"test estimator",
509 std::vector<Double_t> buffer(
nWeights );
525 if(fSamplingTraining || fSamplingTesting)
526 Data()->InitSampling(1.0,1.0,fRandomSeed);
528 if (fSteps > 0) Log() << kINFO <<
"Inaccurate progress timing for MLP... " <<
Endl;
529 timer.DrawProgressBar( 0 );
534 if (fExitFromTraining)
break;
537 if ((i+1)%fTestRate == 0 || (i == 0)) {
538 if (fSamplingTraining) {
540 Data()->InitSampling(fSamplingFraction,fSamplingWeight);
541 Data()->CreateSampling();
543 if (fSamplingTesting) {
545 Data()->InitSampling(fSamplingFraction,fSamplingWeight);
546 Data()->CreateSampling();
552 Data()->InitSampling(1.0,1.0);
554 Data()->InitSampling(1.0,1.0);
565 SetGammaDelta( Gamma,
Delta, buffer );
567 if (i % fResetStep == 0 && i<0.5*
nEpochs) {
573 if (GetHessian( Hessian, Gamma,
Delta )) {
578 else SetDir( Hessian, Dir );
582 if (DerivDir( Dir ) > 0) {
587 if (LineSearch( Dir, buffer, &
dError )) {
591 if (LineSearch(Dir, buffer, &
dError)) {
593 Log() << kFATAL <<
"Line search failed! Huge troubles somewhere..." <<
Endl;
612 if ((i+1)%fTestRate == 0) {
617 if (fInteractive) fInteractive->AddPoint(i+1,
trainE,
testE);
620 fEstimatorHistTrain->Fill( i+1,
trainE );
621 fEstimatorHistTest ->Fill( i+1,
testE );
624 if ((
testE < GetCurrentValue()) || (GetCurrentValue()<1
e-100)) {
627 Data()->EventResult(
success );
629 SetCurrentValue(
testE );
630 if (HasConverged()) {
634 ResetConvergenceCounter();
646 progress = Progress()*fSamplingFraction*100*fSamplingEpoch;
650 progress = 100.0*(fSamplingFraction*fSamplingEpoch+(1.0-fSamplingEpoch)*Progress());
658 if (progress<i) progress=i;
705 Int_t nEvents = GetNEvents();
707 for (
Int_t i=0;i<nEvents;i++) {
710 if ((
ev->GetWeight() < 0) && IgnoreEventsWithNegWeightsInTraining()
727 if (fUseRegulator)
DEDw+=fPriorDev[i];
738 ForceNetworkInputs(
ev );
739 ForceNetworkCalculations();
741 if (DoRegression()) {
746 GetOutputNeuron(
itgt )->SetError(error);
748 }
else if (DoMulticlass()) {
754 GetOutputNeuron(
icls )->SetError(error);
759 if (fEstimator==kMSE) error = ( GetOutputNeuron()->GetActivationValue() -
desired )*eventWeight;
760 else if (fEstimator==kCE) error = -eventWeight/(GetOutputNeuron()->GetActivationValue() -1 +
desired);
761 GetOutputNeuron()->SetError(error);
764 CalculateNeuronDeltas();
765 for (
Int_t j=0;
j<fSynapses->GetEntriesFast();
j++) {
871 for (
Int_t i=0;i<100;i++) {
885 SetDirWeights(
Origin, Dir, 0.);
890 for (
Int_t i=0;i<100;i++) {
893 Log() << kWARNING <<
"linesearch, starting to investigate direction opposite of steepestDIR" <<
Endl;
906 SetDirWeights(
Origin, Dir, 0.);
907 Log() << kWARNING <<
"linesearch, failed even in opposite direction of steepestDIR" <<
Endl;
922 fLastAlpha = fLastAlpha < 10000 ? fLastAlpha : 10000;
924 SetDirWeights(
Origin, Dir, fLastAlpha);
932 Log() << kWARNING <<
"Line search increased error! Something is wrong."
933 <<
"fLastAlpha=" << fLastAlpha <<
"al123=" <<
alpha1 <<
" "
960 if (fUseRegulator) UpdatePriors();
968 Int_t nEvents = GetNEvents();
972 for (
Int_t i=0;i<nEvents;i++) {
975 if ((
ev->GetWeight() < 0) && IgnoreEventsWithNegWeightsInTraining()
982 if (DoRegression()) {
984 error += GetMSEErr(
ev,
itgt );
986 }
else if ( DoMulticlass() ){
988 error += GetMSEErr(
ev,
icls );
991 if (fEstimator==kMSE) error = GetMSEErr(
ev );
992 else if (fEstimator==kCE) error= GetCEErr(
ev );
994 Result += error *
ev->GetWeight();
996 if (fUseRegulator) Result+=fPrior;
997 if (Result<0) Log()<<kWARNING<<
"\nNegative Error!!! :"<<Result-fPrior<<
"+"<<fPrior<<
Endl;
1009 else if (DoMulticlass())
target = (
ev->GetClass() ==
index ? 1.0 : 0.0 );
1010 else target = GetDesiredOutput(
ev );
1026 else if (DoMulticlass())
target = (
ev->GetClass() ==
index ? 1.0 : 0.0 );
1027 else target = GetDesiredOutput(
ev );
1047 fEstimatorHistTrain =
new TH1F(
"estimatorHistTrain",
"training estimator",
1049 fEstimatorHistTest =
new TH1F(
"estimatorHistTest",
"test estimator",
1052 if(fSamplingTraining || fSamplingTesting)
1053 Data()->InitSampling(1.0,1.0,fRandomSeed);
1055 if (fSteps > 0) Log() << kINFO <<
"Inaccurate progress timing for MLP... " <<
Endl;
1056 timer.DrawProgressBar(0);
1065 if (fExitFromTraining)
break;
1066 fIPyCurrentIter = i;
1068 if ((i+1)%fTestRate == 0 || (i == 0)) {
1069 if (fSamplingTraining) {
1071 Data()->InitSampling(fSamplingFraction,fSamplingWeight);
1072 Data()->CreateSampling();
1074 if (fSamplingTesting) {
1076 Data()->InitSampling(fSamplingFraction,fSamplingWeight);
1077 Data()->CreateSampling();
1083 Data()->InitSampling(1.0,1.0);
1085 Data()->InitSampling(1.0,1.0);
1093 if ((i+1)%fTestRate == 0) {
1096 if (fInteractive) fInteractive->AddPoint(i+1,
trainE,
testE);
1099 fEstimatorHistTrain->Fill( i+1,
trainE );
1100 fEstimatorHistTest ->Fill( i+1,
testE );
1103 if ((
testE < GetCurrentValue()) || (GetCurrentValue()<1
e-100)) {
1106 Data()->EventResult(
success );
1108 SetCurrentValue(
testE );
1109 if (HasConverged()) {
1113 ResetConvergenceCounter();
1127 progress = Progress()*fSamplingEpoch*fSamplingFraction*100;
1129 progress = 100*(fSamplingEpoch*fSamplingFraction+(1.0-fSamplingFraction*fSamplingEpoch)*Progress());
1144 Int_t nEvents = Data()->GetNEvents();
1148 for (
Int_t i = 0; i < nEvents; i++)
index[i] = i;
1149 Shuffle(
index, nEvents);
1152 for (
Int_t i = 0; i < nEvents; i++) {
1155 if ((
ev->GetWeight() < 0) && IgnoreEventsWithNegWeightsInTraining()
1160 TrainOneEvent(
index[i]);
1163 if (fBPMode == kBatch && (i+1)%fBatchSize == 0) {
1164 AdjustSynapseWeights();
1165 if (fgPRINT_BATCH) {
1194 for (
Int_t i = 0; i <
n; i++) {
1195 j = (
Int_t) (frgen->Rndm() *
a);
1215 else synapse->DecayLearningRate(fDecayRate);
1237 else desired = fOutput->GetMax();
1243 for (
UInt_t j = 0;
j < GetNvar();
j++) {
1246 neuron = GetInputNeuron(
j);
1250 ForceNetworkCalculations();
1251 UpdateNetwork(
desired, eventWeight);
1267 ForceNetworkInputs(
ev );
1268 ForceNetworkCalculations();
1269 if (DoRegression()) UpdateNetwork(
ev->GetTargets(), eventWeight );
1270 if (DoMulticlass()) UpdateNetwork( *DataInfo().GetTargetsForMulticlass(
ev ), eventWeight );
1271 else UpdateNetwork( GetDesiredOutput(
ev ), eventWeight );
1279 return DataInfo().IsSignal(
ev)?fOutput->GetMax():fOutput->GetMin();
1289 if (fEstimator==kMSE) error = GetOutputNeuron()->GetActivationValue() -
desired ;
1290 else if (fEstimator==kCE) error = -1./(GetOutputNeuron()->GetActivationValue() -1 +
desired);
1291 else Log() << kFATAL <<
"Estimator type unspecified!!" <<
Endl;
1292 error *= eventWeight;
1293 GetOutputNeuron()->SetError(error);
1294 CalculateNeuronDeltas();
1307 Double_t act = GetOutputNeuron(i)->GetActivationValue();
1313 Double_t act = GetOutputNeuron(i)->GetActivationValue();
1316 error *= eventWeight;
1317 GetOutputNeuron(i)->SetError(error);
1321 CalculateNeuronDeltas();
1358 PrintMessage(
"Minimizing Estimator with GA");
1364 fGA_SC_factor = 0.95;
1368 std::vector<Interval*> ranges;
1370 Int_t numWeights = fSynapses->GetEntriesFast();
1379 Log() << kINFO <<
"GA: estimator after optimization: " <<
estimator <<
Endl;
1387 return ComputeEstimator( parameters );
1400 synapse->SetWeight(parameters.at(i));
1402 if (fUseRegulator) UpdatePriors();
1461 fPrior+=0.5*fRegulators[fRegulatorIdx[i]]*(
synapse->GetWeight())*(
synapse->GetWeight());
1462 fPriorDev.push_back(fRegulators[fRegulatorIdx[i]]*(
synapse->GetWeight()));
1471 GetApproxInvHessian(
InvH);
1480 Int_t idx=fRegulatorIdx[i];
1483 gamma+=1-fRegulators[idx]*
InvH[i][i];
1486 if (fEstimator==kMSE) {
1496 if (fRegulators[i]<0) fRegulators[i]=0;
1497 Log()<<kDEBUG<<
"R"<<i<<
":"<<fRegulators[i]<<
"\t";
1515 Int_t nEvents = GetNEvents();
1516 for (
Int_t i=0;i<nEvents;i++) {
1519 GetOutputNeuron()->SetError(1./fOutput->EvalDerivative(GetOutputNeuron()->GetValue()));
1520 CalculateNeuronDeltas();
1527 if (fEstimator==kMSE ) InvHessian+=
sens*
sensT;
1534 InvHessian[i][i]+=fRegulators[fRegulatorIdx[i]];
1539 InvHessian[i][i]+=1
e-6;
1560 Log() << kWARNING <<
"inconsistent dimension " << fInvHessian.GetNcols() <<
" vs " <<
numSynapses <<
Endl;
1564 GetOutputNeuron()->SetError(1./fOutput->EvalDerivative(GetOutputNeuron()->GetValue()));
1566 CalculateNeuronDeltas();
1576 median=GetOutputNeuron()->GetValue();
1579 Log()<<kWARNING<<
"Negative variance!!! median=" <<
median <<
"\tvariance(sigma^2)=" <<
variance <<
Endl;
1598#ifdef MethodMLP_UseMinuit__
1603void TMVA::MethodMLP::MinuitMinimize()
1614 tfitter->ExecuteCommand(
"SET PRINTOUT", args, 1 );
1615 tfitter->ExecuteCommand(
"SET NOWARNINGS", args, 0 );
1631 tfitter->ExecuteCommand(
"SET STRATEGY", args, 1 );
1635 tfitter->ExecuteCommand(
"MIGRAD", args, 1 );
1641 tfitter->ExecuteCommand(
"IMPROVE", args, 1 );
1645 tfitter->ExecuteCommand(
"MINOS", args, 1 );
1681 f = CalculateEstimator();
1687 Log() <<
kDEBUG <<
"***** New estimator: " <<
f <<
" min: " <<
minf <<
" --> ncalls: " << nc <<
Endl;
1721 Log() << col <<
"--- Short description:" <<
colres <<
Endl;
1723 Log() <<
"The MLP artificial neural network (ANN) is a traditional feed-" <<
Endl;
1724 Log() <<
"forward multilayer perceptron implementation. The MLP has a user-" <<
Endl;
1725 Log() <<
"defined hidden layer architecture, while the number of input (output)" <<
Endl;
1726 Log() <<
"nodes is determined by the input variables (output classes, i.e., " <<
Endl;
1727 Log() <<
"signal and one background). " <<
Endl;
1729 Log() << col <<
"--- Performance optimisation:" <<
colres <<
Endl;
1731 Log() <<
"Neural networks are stable and performing for a large variety of " <<
Endl;
1732 Log() <<
"linear and non-linear classification problems. However, in contrast" <<
Endl;
1733 Log() <<
"to (e.g.) boosted decision trees, the user is advised to reduce the " <<
Endl;
1734 Log() <<
"number of input variables that have only little discrimination power. " <<
Endl;
1735 Log() <<
"" <<
Endl;
1736 Log() <<
"In the tests we have carried out so far, the MLP and ROOT networks" <<
Endl;
1737 Log() <<
"(TMlpANN, interfaced via TMVA) performed equally well, with however" <<
Endl;
1738 Log() <<
"a clear speed advantage for the MLP. The Clermont-Ferrand neural " <<
Endl;
1739 Log() <<
"net (CFMlpANN) exhibited worse classification performance in these" <<
Endl;
1740 Log() <<
"tests, which is partly due to the slow convergence of its training" <<
Endl;
1741 Log() <<
"(at least 10k training cycles are required to achieve approximately" <<
Endl;
1742 Log() <<
"competitive results)." <<
Endl;
1744 Log() << col <<
"Overtraining: " <<
colres
1745 <<
"only the TMlpANN performs an explicit separation of the" <<
Endl;
1746 Log() <<
"full training sample into independent training and validation samples." <<
Endl;
1747 Log() <<
"We have found that in most high-energy physics applications the " <<
Endl;
1748 Log() <<
"available degrees of freedom (training events) are sufficient to " <<
Endl;
1749 Log() <<
"constrain the weights of the relatively simple architectures required" <<
Endl;
1750 Log() <<
"to achieve good performance. Hence no overtraining should occur, and " <<
Endl;
1751 Log() <<
"the use of validation samples would only reduce the available training" <<
Endl;
1752 Log() <<
"information. However, if the performance on the training sample is " <<
Endl;
1753 Log() <<
"found to be significantly better than the one found with the inde-" <<
Endl;
1754 Log() <<
"pendent test sample, caution is needed. The results for these samples " <<
Endl;
1755 Log() <<
"are printed to standard output at the end of each training job." <<
Endl;
1757 Log() << col <<
"--- Performance tuning via configuration options:" <<
colres <<
Endl;
1759 Log() <<
"The hidden layer architecture for all ANNs is defined by the option" <<
Endl;
1760 Log() <<
"\"HiddenLayers=N+1,N,...\", where here the first hidden layer has N+1" <<
Endl;
1761 Log() <<
"neurons and the second N neurons (and so on), and where N is the number " <<
Endl;
1762 Log() <<
"of input variables. Excessive numbers of hidden layers should be avoided," <<
Endl;
1763 Log() <<
"in favour of more neurons in the first hidden layer." <<
Endl;
1764 Log() <<
"" <<
Endl;
1765 Log() <<
"The number of cycles should be above 500. As said, if the number of" <<
Endl;
1766 Log() <<
"adjustable weights is small compared to the training sample size," <<
Endl;
1767 Log() <<
"using a large number of training samples should not lead to overtraining." <<
Endl;
#define REGISTER_METHOD(CLASS)
for example
ROOT::Detail::TRangeCast< T, true > TRangeDynCast
TRangeDynCast is an adapter class that allows the typed iteration through a TCollection.
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void char Point_t Rectangle_t WindowAttributes_t Float_t Float_t Float_t Int_t Int_t UInt_t UInt_t Rectangle_t Int_t Int_t Window_t TString Int_t GCValues_t GetPrimarySelectionOwner GetDisplay GetScreen GetColormap GetNativeEvent const char const char dpyName wid window const char font_name cursor keysym reg const char only_if_exist regb h Point_t winding char text const char depth char const char Int_t count const char ColorStruct_t color const char Pixmap_t Pixmap_t PictureAttributes_t attr const char char ret_data h unsigned char height h Atom_t Int_t ULong_t ULong_t unsigned char prop_list Atom_t Atom_t target
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void char Point_t Rectangle_t WindowAttributes_t index
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void char Point_t Rectangle_t WindowAttributes_t Float_t Float_t Float_t Int_t Int_t UInt_t UInt_t Rectangle_t Int_t Int_t Window_t TString Int_t GCValues_t GetPrimarySelectionOwner GetDisplay GetScreen GetColormap GetNativeEvent const char const char dpyName wid window const char font_name cursor keysym reg const char only_if_exist regb h Point_t winding char text const char depth char const char Int_t count const char ColorStruct_t color const char Pixmap_t Pixmap_t PictureAttributes_t attr const char char ret_data h unsigned char height h Atom_t Int_t ULong_t ULong_t unsigned char prop_list Atom_t Atom_t Atom_t Time_t type
TMatrixT< Double_t > TMatrixD
const_iterator begin() const
const_iterator end() const
<div class="legacybox"><h2>Legacy Code</h2> TFitter is a legacy interface: there will be no bug fixes...
1-D histogram with a float per channel (see TH1 documentation)
TH1 is the base class of all histogram classes in ROOT.
Bool_t WriteOptionsReference() const
Class that contains all the data information.
Base class for TMVA fitters.
Fitter using a Genetic Algorithm.
The TMVA::Interval Class.
Base class for all TMVA methods using artificial neural networks.
virtual void ProcessOptions()
do nothing specific at this moment
virtual Double_t GetMvaValue(Double_t *err=nullptr, Double_t *errUpper=nullptr)
get the mva value generated by the NN
virtual void MakeClassSpecific(std::ostream &, const TString &) const
write specific classifier response
Multilayer Perceptron class built off of MethodANNBase.
Double_t GetMvaValue(Double_t *err=nullptr, Double_t *errUpper=nullptr)
get the mva value generated by the NN
void GetHelpMessage() const
get help message text
void BackPropagationMinimize(Int_t nEpochs)
minimize estimator / train network with back propagation algorithm
Double_t GetMSEErr(const Event *ev, UInt_t index=0)
zjh
void MakeClassSpecific(std::ostream &, const TString &) const
write specific classifier response
void AdjustSynapseWeights()
just adjust the synapse weights (should be called in batch mode)
void SteepestDir(TMatrixD &Dir)
void TrainOneEpoch()
train network over a single epoch/cycle of events
virtual Bool_t HasAnalysisType(Types::EAnalysisType type, UInt_t numberClasses, UInt_t numberTargets)
MLP can handle classification with 2 classes and regression with one regression-target.
Bool_t GetHessian(TMatrixD &Hessian, TMatrixD &Gamma, TMatrixD &Delta)
Double_t ComputeEstimator(std::vector< Double_t > ¶meters)
this function is called by GeneticANN for GA optimization
void InitializeLearningRates()
initialize learning rates of synapses, used only by back propagation
void CalculateNeuronDeltas()
have each neuron calculate its delta by back propagation
Double_t DerivDir(TMatrixD &Dir)
Double_t GetCEErr(const Event *ev, UInt_t index=0)
zjh
virtual ~MethodMLP()
destructor nothing to be done
void SetDir(TMatrixD &Hessian, TMatrixD &Dir)
void Shuffle(Int_t *index, Int_t n)
Input:
void SimulateEvent(const Event *ev)
void SetDirWeights(std::vector< Double_t > &Origin, TMatrixD &Dir, Double_t alpha)
void SetGammaDelta(TMatrixD &Gamma, TMatrixD &Delta, std::vector< Double_t > &Buffer)
Double_t EstimatorFunction(std::vector< Double_t > ¶meters)
interface to the estimate
void GetApproxInvHessian(TMatrixD &InvHessian, bool regulate=true)
rank-1 approximation, neglect 2nd derivatives. //zjh
void BFGSMinimize(Int_t nEpochs)
train network with BFGS algorithm
void UpdateSynapses()
update synapse error fields and adjust the weights (if in sequential mode)
void Init()
default initializations
void ProcessOptions()
process user options
void TrainOneEvent(Int_t ievt)
train network over a single event this uses the new event model
Double_t GetDesiredOutput(const Event *ev)
get the desired output of this event
void GeneticMinimize()
create genetics class similar to GeneticCut give it vector of parameter ranges (parameters = weights)...
void DecaySynapseWeights(Bool_t lateEpoch)
decay synapse weights in last 10 epochs, lower learning rate even more to find a good minimum
void TrainOneEventFast(Int_t ievt, Float_t *&branchVar, Int_t &type)
fast per-event training
void UpdateNetwork(Double_t desired, Double_t eventWeight=1.0)
update the network based on how closely the output matched the desired output
MethodMLP(const TString &jobName, const TString &methodTitle, DataSetInfo &theData, const TString &theOption)
standard constructor
void UpdateRegulators()
zjh
Bool_t LineSearch(TMatrixD &Dir, std::vector< Double_t > &Buffer, Double_t *dError=nullptr)
zjh
void DeclareOptions()
define the options (their key words) that can be set in the option string
Double_t CalculateEstimator(Types::ETreeType treeType=Types::kTraining, Int_t iEpoch=-1)
calculate the estimator that training is attempting to minimize
Neuron class used by TMVA artificial neural network methods.
void AdjustSynapseWeights()
adjust the pre-synapses' weights for each neuron (input neuron has no pre-synapse) this method should...
void ForceValue(Double_t value)
force the value, typically for input and bias neurons
void UpdateSynapsesSequential()
update the pre-synapses for each neuron (input neuron has no pre-synapse) this method should only be ...
void UpdateSynapsesBatch()
update and adjust the pre-synapses for each neuron (input neuron has no pre-synapse) this method shou...
void CalculateDelta()
calculate error field
Synapse class used by TMVA artificial neural network methods.
Timing information for training and evaluation of MVA methods.
Singleton class for Global types used by TMVA.
virtual TMatrixTBase< Element > & UnitMatrix()
Make a unit matrix (matrix need not be a square one).
TMatrixTBase< Element > & ResizeTo(Int_t nrows, Int_t ncols, Int_t=-1) override
Set size of the matrix to nrows x ncols New dynamic elements are created, the overlapping part of the...
TMatrixT< Element > & Invert(Double_t *det=nullptr)
Invert the matrix and calculate its determinant.
static TString Format(const char *fmt,...)
Static method which formats a string using a printf style format descriptor and return a TString.
This is a simple weighted bidirectional connection between two neurons.
MsgLogger & Endl(MsgLogger &ml)
Double_t Exp(Double_t x)
Returns the base-e exponential function of x, which is e raised to the power x.
Double_t Log(Double_t x)
Returns the natural logarithm of x.
Double_t Sqrt(Double_t x)
Returns the square root of x.