76#ifdef MethodMLP_UseMinuit__
95 fPrior(0.0), fPriorDev(0), fUpdateLimit(0),
96 fTrainingMethod(kBFGS), fTrainMethodS(
"BFGS"),
97 fSamplingFraction(1.0), fSamplingEpoch(0.0), fSamplingWeight(0.0),
98 fSamplingTraining(
false), fSamplingTesting(
false),
99 fLastAlpha(0.0), fTau(0.),
100 fResetStep(0), fLearnRate(0.0), fDecayRate(0.0),
101 fBPMode(kSequential), fBpModeS(
"None"),
102 fBatchSize(0), fTestRate(0), fEpochMon(
false),
103 fGA_nsteps(0), fGA_preCalc(0), fGA_SC_steps(0),
104 fGA_SC_rate(0), fGA_SC_factor(0.0),
105 fDeviationsFromTargets(0),
117 fUseRegulator(
false), fCalculateErrors(
false),
118 fPrior(0.0), fPriorDev(0), fUpdateLimit(0),
119 fTrainingMethod(kBFGS), fTrainMethodS(
"BFGS"),
120 fSamplingFraction(1.0), fSamplingEpoch(0.0), fSamplingWeight(0.0),
121 fSamplingTraining(
false), fSamplingTesting(
false),
122 fLastAlpha(0.0), fTau(0.),
123 fResetStep(0), fLearnRate(0.0), fDecayRate(0.0),
124 fBPMode(kSequential), fBpModeS(
"None"),
125 fBatchSize(0), fTestRate(0), fEpochMon(
false),
126 fGA_nsteps(0), fGA_preCalc(0), fGA_SC_steps(0),
127 fGA_SC_rate(0), fGA_SC_factor(0.0),
128 fDeviationsFromTargets(0),
166 SetSignalReferenceCut( 0.5 );
167#ifdef MethodMLP_UseMinuit__
196 DeclareOptionRef(fTrainMethodS=
"BP",
"TrainingMethod",
197 "Train with Back-Propagation (BP), BFGS Algorithm (BFGS), or Genetic Algorithm (GA - slower and worse)");
202 DeclareOptionRef(fLearnRate=0.02,
"LearningRate",
"ANN learning rate parameter");
203 DeclareOptionRef(fDecayRate=0.01,
"DecayRate",
"Decay rate for learning parameter");
204 DeclareOptionRef(fTestRate =10,
"TestRate",
"Test for overtraining performed at each #th epochs");
205 DeclareOptionRef(fEpochMon =
kFALSE,
"EpochMonitoring",
"Provide epoch-wise monitoring plots according to TestRate (caution: causes big ROOT output file!)" );
207 DeclareOptionRef(fSamplingFraction=1.0,
"Sampling",
"Only 'Sampling' (randomly selected) events are trained each epoch");
208 DeclareOptionRef(fSamplingEpoch=1.0,
"SamplingEpoch",
"Sampling is used for the first 'SamplingEpoch' epochs, afterwards, all events are taken for training");
209 DeclareOptionRef(fSamplingWeight=1.0,
"SamplingImportance",
" The sampling weights of events in epochs which successful (worse estimator than before) are multiplied with SamplingImportance, else they are divided.");
211 DeclareOptionRef(fSamplingTraining=
kTRUE,
"SamplingTraining",
"The training sample is sampled");
212 DeclareOptionRef(fSamplingTesting=
kFALSE,
"SamplingTesting" ,
"The testing sample is sampled");
214 DeclareOptionRef(fResetStep=50,
"ResetStep",
"How often BFGS should reset history");
215 DeclareOptionRef(fTau =3.0,
"Tau",
"LineSearch \"size step\"");
217 DeclareOptionRef(fBpModeS=
"sequential",
"BPMode",
218 "Back-propagation learning mode: sequential or batch");
219 AddPreDefVal(
TString(
"sequential"));
220 AddPreDefVal(
TString(
"batch"));
222 DeclareOptionRef(fBatchSize=-1,
"BatchSize",
223 "Batch size: number of events/batch, only set if in Batch Mode, -1 for BatchSize=number_of_events");
225 DeclareOptionRef(fImprovement=1
e-30,
"ConvergenceImprove",
226 "Minimum improvement which counts as improvement (<0 means automatic convergence check is turned off)");
228 DeclareOptionRef(fSteps=-1,
"ConvergenceTests",
229 "Number of steps (without improvement) required for convergence (<0 means automatic convergence check is turned off)");
231 DeclareOptionRef(fUseRegulator=
kFALSE,
"UseRegulator",
232 "Use regulator to avoid over-training");
233 DeclareOptionRef(fUpdateLimit=10000,
"UpdateLimit",
234 "Maximum times of regulator update");
235 DeclareOptionRef(fCalculateErrors=
kFALSE,
"CalculateErrors",
236 "Calculates inverse Hessian matrix at the end of the training to be able to calculate the uncertainties of an MVA value");
238 DeclareOptionRef(fWeightRange=1.0,
"WeightRange",
239 "Take the events for the estimator calculations from small deviations from the desired value to large deviations only over the weight range");
251 if (IgnoreEventsWithNegWeightsInTraining()) {
253 <<
"Will ignore negative events in training!"
258 if (fTrainMethodS ==
"BP" ) fTrainingMethod = kBP;
259 else if (fTrainMethodS ==
"BFGS") fTrainingMethod = kBFGS;
260 else if (fTrainMethodS ==
"GA" ) fTrainingMethod = kGA;
262 if (fBpModeS ==
"sequential") fBPMode = kSequential;
263 else if (fBpModeS ==
"batch") fBPMode = kBatch;
267 if (fBPMode == kBatch) {
279 Log() << kDEBUG <<
"Initialize learning rates" <<
Endl;
284 synapse->SetLearningRate(fLearnRate);
295 Log() << kFATAL <<
"<CalculateEstimator> fatal error: wrong tree type: " <<
treeType <<
Endl;
310 if (fEpochMon &&
iEpoch >= 0 && !DoRegression()) {
318 Int_t nEvents = GetNEvents();
324 if( fWeightRange < 1.f ){
325 fDeviationsFromTargets =
new std::vector<std::pair<Float_t,Float_t> >(nEvents);
328 for (
Int_t i = 0; i < nEvents; i++) {
332 if ((
ev->GetWeight() < 0) && IgnoreEventsWithNegWeightsInTraining()
339 ForceNetworkInputs(
ev );
340 ForceNetworkCalculations();
343 if (DoRegression()) {
345 v = GetOutputNeuron(
itgt )->GetActivationValue();
351 }
else if (DoMulticlass() ) {
353 if (fEstimator==kCE){
366 v = GetOutputNeuron(
icls )->GetActivationValue();
373 v = GetOutputNeuron()->GetActivationValue();
379 if( fDeviationsFromTargets )
380 fDeviationsFromTargets->push_back(std::pair<Float_t,Float_t>(
d,
w));
386 if (DataInfo().IsSignal(
ev) &&
histS != 0)
histS->Fill(
float(
v), float(
w) );
391 if( fDeviationsFromTargets ) {
392 std::sort(fDeviationsFromTargets->begin(),fDeviationsFromTargets->end());
409 delete fDeviationsFromTargets;
412 if (
histS != 0) fEpochMonHistS.push_back(
histS );
413 if (
histB != 0) fEpochMonHistB.push_back(
histB );
427 CreateWeightMonitoringHists(
TString::Format(
"epochmonitoring___epoch_%04i_weights_hist",
iEpoch), &fEpochMonHistW );
439 Log() << kFATAL <<
"ANN Network is not initialized, doing it now!"<<
Endl;
440 SetAnalysisType(GetAnalysisType());
442 Log() << kDEBUG <<
"reinitialize learning rates" <<
Endl;
443 InitializeLearningRates();
445 PrintMessage(
"Training Network");
447 Int_t nEvents=GetNEvents();
450 Log()<<kWARNING<<
"ANN too complicated: #events="<<nEvents<<
"\t#synapses="<<
nSynapses<<
Endl;
453 if (fInteractive && fInteractive->NotInitialized()){
454 std::vector<TString>
titles = {
"Error on training set",
"Error on test set"};
455 fInteractive->Init(
titles);
458#ifdef MethodMLP_UseMinuit__
461 if (fTrainingMethod == kGA) GeneticMinimize();
462 else if (fTrainingMethod == kBFGS) BFGSMinimize(
nEpochs);
463 else BackPropagationMinimize(
nEpochs);
469 Log()<<kINFO<<
"Finalizing handling of Regulator terms, trainE="<<
trainE<<
" testE="<<
testE<<
Endl;
471 Log()<<kINFO<<
"Done with handling of Regulator terms"<<
Endl;
474 if( fCalculateErrors || fUseRegulator )
478 GetApproxInvHessian( fInvHessian ,
false);
494 fEstimatorHistTrain =
new TH1F(
"estimatorHistTrain",
"training estimator",
496 fEstimatorHistTest =
new TH1F(
"estimatorHistTest",
"test estimator",
508 std::vector<Double_t> buffer(
nWeights );
524 if(fSamplingTraining || fSamplingTesting)
525 Data()->InitSampling(1.0,1.0,fRandomSeed);
527 if (fSteps > 0) Log() << kINFO <<
"Inaccurate progress timing for MLP... " <<
Endl;
528 timer.DrawProgressBar( 0 );
533 if (fExitFromTraining)
break;
536 if ((i+1)%fTestRate == 0 || (i == 0)) {
537 if (fSamplingTraining) {
539 Data()->InitSampling(fSamplingFraction,fSamplingWeight);
540 Data()->CreateSampling();
542 if (fSamplingTesting) {
544 Data()->InitSampling(fSamplingFraction,fSamplingWeight);
545 Data()->CreateSampling();
551 Data()->InitSampling(1.0,1.0);
553 Data()->InitSampling(1.0,1.0);
564 SetGammaDelta( Gamma,
Delta, buffer );
566 if (i % fResetStep == 0 && i<0.5*
nEpochs) {
572 if (GetHessian( Hessian, Gamma,
Delta )) {
577 else SetDir( Hessian,
Dir );
581 if (DerivDir(
Dir ) > 0) {
586 if (LineSearch(
Dir, buffer, &
dError )) {
592 Log() << kFATAL <<
"Line search failed! Huge troubles somewhere..." <<
Endl;
611 if ((i+1)%fTestRate == 0) {
616 if (fInteractive) fInteractive->AddPoint(i+1,
trainE,
testE);
619 fEstimatorHistTrain->Fill( i+1,
trainE );
620 fEstimatorHistTest ->Fill( i+1,
testE );
623 if ((
testE < GetCurrentValue()) || (GetCurrentValue()<1
e-100)) {
626 Data()->EventResult(
success );
628 SetCurrentValue(
testE );
629 if (HasConverged()) {
633 ResetConvergenceCounter();
645 progress = Progress()*fSamplingFraction*100*fSamplingEpoch;
649 progress = 100.0*(fSamplingFraction*fSamplingEpoch+(1.0-fSamplingEpoch)*Progress());
657 if (progress<i) progress=i;
704 Int_t nEvents = GetNEvents();
706 for (
Int_t i=0;i<nEvents;i++) {
709 if ((
ev->GetWeight() < 0) && IgnoreEventsWithNegWeightsInTraining()
726 if (fUseRegulator)
DEDw+=fPriorDev[i];
737 ForceNetworkInputs(
ev );
738 ForceNetworkCalculations();
740 if (DoRegression()) {
745 GetOutputNeuron(
itgt )->SetError(error);
747 }
else if (DoMulticlass()) {
753 GetOutputNeuron(
icls )->SetError(error);
758 if (fEstimator==kMSE) error = ( GetOutputNeuron()->GetActivationValue() -
desired )*eventWeight;
759 else if (fEstimator==kCE) error = -eventWeight/(GetOutputNeuron()->GetActivationValue() -1 +
desired);
760 GetOutputNeuron()->SetError(error);
763 CalculateNeuronDeltas();
764 for (
Int_t j=0;
j<fSynapses->GetEntriesFast();
j++) {
818 dir = Hessian *
DEDw;
819 for (
Int_t i=0;i<
IDX;i++) dir[i][0] = -dir[i][0];
870 for (
Int_t i=0;i<100;i++) {
889 for (
Int_t i=0;i<100;i++) {
892 Log() << kWARNING <<
"linesearch, starting to investigate direction opposite of steepestDIR" <<
Endl;
906 Log() << kWARNING <<
"linesearch, failed even in opposite direction of steepestDIR" <<
Endl;
921 fLastAlpha = fLastAlpha < 10000 ? fLastAlpha : 10000;
931 Log() << kWARNING <<
"Line search increased error! Something is wrong."
932 <<
"fLastAlpha=" << fLastAlpha <<
"al123=" <<
alpha1 <<
" "
959 if (fUseRegulator) UpdatePriors();
967 Int_t nEvents = GetNEvents();
971 for (
Int_t i=0;i<nEvents;i++) {
974 if ((
ev->GetWeight() < 0) && IgnoreEventsWithNegWeightsInTraining()
981 if (DoRegression()) {
983 error += GetMSEErr(
ev,
itgt );
985 }
else if ( DoMulticlass() ){
987 error += GetMSEErr(
ev,
icls );
990 if (fEstimator==kMSE) error = GetMSEErr(
ev );
991 else if (fEstimator==kCE) error= GetCEErr(
ev );
993 Result += error *
ev->GetWeight();
995 if (fUseRegulator) Result+=fPrior;
996 if (Result<0) Log()<<kWARNING<<
"\nNegative Error!!! :"<<Result-fPrior<<
"+"<<fPrior<<
Endl;
1008 else if (DoMulticlass())
target = (
ev->GetClass() ==
index ? 1.0 : 0.0 );
1009 else target = GetDesiredOutput(
ev );
1025 else if (DoMulticlass())
target = (
ev->GetClass() ==
index ? 1.0 : 0.0 );
1026 else target = GetDesiredOutput(
ev );
1046 fEstimatorHistTrain =
new TH1F(
"estimatorHistTrain",
"training estimator",
1048 fEstimatorHistTest =
new TH1F(
"estimatorHistTest",
"test estimator",
1051 if(fSamplingTraining || fSamplingTesting)
1052 Data()->InitSampling(1.0,1.0,fRandomSeed);
1054 if (fSteps > 0) Log() << kINFO <<
"Inaccurate progress timing for MLP... " <<
Endl;
1055 timer.DrawProgressBar(0);
1064 if (fExitFromTraining)
break;
1065 fIPyCurrentIter = i;
1067 if ((i+1)%fTestRate == 0 || (i == 0)) {
1068 if (fSamplingTraining) {
1070 Data()->InitSampling(fSamplingFraction,fSamplingWeight);
1071 Data()->CreateSampling();
1073 if (fSamplingTesting) {
1075 Data()->InitSampling(fSamplingFraction,fSamplingWeight);
1076 Data()->CreateSampling();
1082 Data()->InitSampling(1.0,1.0);
1084 Data()->InitSampling(1.0,1.0);
1092 if ((i+1)%fTestRate == 0) {
1095 if (fInteractive) fInteractive->AddPoint(i+1,
trainE,
testE);
1098 fEstimatorHistTrain->Fill( i+1,
trainE );
1099 fEstimatorHistTest ->Fill( i+1,
testE );
1102 if ((
testE < GetCurrentValue()) || (GetCurrentValue()<1
e-100)) {
1105 Data()->EventResult(
success );
1107 SetCurrentValue(
testE );
1108 if (HasConverged()) {
1112 ResetConvergenceCounter();
1126 progress = Progress()*fSamplingEpoch*fSamplingFraction*100;
1128 progress = 100*(fSamplingEpoch*fSamplingFraction+(1.0-fSamplingFraction*fSamplingEpoch)*Progress());
1143 Int_t nEvents = Data()->GetNEvents();
1147 for (
Int_t i = 0; i < nEvents; i++)
index[i] = i;
1148 Shuffle(
index, nEvents);
1151 for (
Int_t i = 0; i < nEvents; i++) {
1154 if ((
ev->GetWeight() < 0) && IgnoreEventsWithNegWeightsInTraining()
1159 TrainOneEvent(
index[i]);
1162 if (fBPMode == kBatch && (i+1)%fBatchSize == 0) {
1163 AdjustSynapseWeights();
1164 if (fgPRINT_BATCH) {
1193 for (
Int_t i = 0; i <
n; i++) {
1194 j = (
Int_t) (frgen->Rndm() *
a);
1214 else synapse->DecayLearningRate(fDecayRate);
1236 else desired = fOutput->GetMax();
1242 for (
UInt_t j = 0;
j < GetNvar();
j++) {
1245 neuron = GetInputNeuron(
j);
1249 ForceNetworkCalculations();
1250 UpdateNetwork(
desired, eventWeight);
1266 ForceNetworkInputs(
ev );
1267 ForceNetworkCalculations();
1268 if (DoRegression()) UpdateNetwork(
ev->GetTargets(), eventWeight );
1269 if (DoMulticlass()) UpdateNetwork( *DataInfo().GetTargetsForMulticlass(
ev ), eventWeight );
1270 else UpdateNetwork( GetDesiredOutput(
ev ), eventWeight );
1278 return DataInfo().IsSignal(
ev)?fOutput->GetMax():fOutput->GetMin();
1288 if (fEstimator==kMSE) error = GetOutputNeuron()->GetActivationValue() -
desired ;
1289 else if (fEstimator==kCE) error = -1./(GetOutputNeuron()->GetActivationValue() -1 +
desired);
1290 else Log() << kFATAL <<
"Estimator type unspecified!!" <<
Endl;
1291 error *= eventWeight;
1292 GetOutputNeuron()->SetError(error);
1293 CalculateNeuronDeltas();
1306 Double_t act = GetOutputNeuron(i)->GetActivationValue();
1312 Double_t act = GetOutputNeuron(i)->GetActivationValue();
1315 error *= eventWeight;
1316 GetOutputNeuron(i)->SetError(error);
1320 CalculateNeuronDeltas();
1357 PrintMessage(
"Minimizing Estimator with GA");
1363 fGA_SC_factor = 0.95;
1367 std::vector<Interval*> ranges;
1369 Int_t numWeights = fSynapses->GetEntriesFast();
1378 Log() << kINFO <<
"GA: estimator after optimization: " <<
estimator <<
Endl;
1386 return ComputeEstimator( parameters );
1399 synapse->SetWeight(parameters.at(i));
1401 if (fUseRegulator) UpdatePriors();
1460 fPrior+=0.5*fRegulators[fRegulatorIdx[i]]*(
synapse->GetWeight())*(
synapse->GetWeight());
1461 fPriorDev.push_back(fRegulators[fRegulatorIdx[i]]*(
synapse->GetWeight()));
1470 GetApproxInvHessian(
InvH);
1479 Int_t idx=fRegulatorIdx[i];
1482 gamma+=1-fRegulators[idx]*
InvH[i][i];
1485 if (fEstimator==kMSE) {
1495 if (fRegulators[i]<0) fRegulators[i]=0;
1496 Log()<<kDEBUG<<
"R"<<i<<
":"<<fRegulators[i]<<
"\t";
1514 Int_t nEvents = GetNEvents();
1515 for (
Int_t i=0;i<nEvents;i++) {
1518 GetOutputNeuron()->SetError(1./fOutput->EvalDerivative(GetOutputNeuron()->GetValue()));
1519 CalculateNeuronDeltas();
1526 if (fEstimator==kMSE ) InvHessian+=
sens*
sensT;
1533 InvHessian[i][i]+=fRegulators[fRegulatorIdx[i]];
1538 InvHessian[i][i]+=1
e-6;
1559 Log() << kWARNING <<
"inconsistent dimension " << fInvHessian.GetNcols() <<
" vs " <<
numSynapses <<
Endl;
1563 GetOutputNeuron()->SetError(1./fOutput->EvalDerivative(GetOutputNeuron()->GetValue()));
1565 CalculateNeuronDeltas();
1575 median=GetOutputNeuron()->GetValue();
1578 Log()<<kWARNING<<
"Negative variance!!! median=" <<
median <<
"\tvariance(sigma^2)=" <<
variance <<
Endl;
1597#ifdef MethodMLP_UseMinuit__
1602void TMVA::MethodMLP::MinuitMinimize()
1613 tfitter->ExecuteCommand(
"SET PRINTOUT", args, 1 );
1614 tfitter->ExecuteCommand(
"SET NOWARNINGS", args, 0 );
1630 tfitter->ExecuteCommand(
"SET STRATEGY", args, 1 );
1634 tfitter->ExecuteCommand(
"MIGRAD", args, 1 );
1640 tfitter->ExecuteCommand(
"IMPROVE", args, 1 );
1644 tfitter->ExecuteCommand(
"MINOS", args, 1 );
1680 f = CalculateEstimator();
1686 Log() <<
kDEBUG <<
"***** New estimator: " <<
f <<
" min: " <<
minf <<
" --> ncalls: " << nc <<
Endl;
1720 Log() << col <<
"--- Short description:" <<
colres <<
Endl;
1722 Log() <<
"The MLP artificial neural network (ANN) is a traditional feed-" <<
Endl;
1723 Log() <<
"forward multilayer perceptron implementation. The MLP has a user-" <<
Endl;
1724 Log() <<
"defined hidden layer architecture, while the number of input (output)" <<
Endl;
1725 Log() <<
"nodes is determined by the input variables (output classes, i.e., " <<
Endl;
1726 Log() <<
"signal and one background). " <<
Endl;
1728 Log() << col <<
"--- Performance optimisation:" <<
colres <<
Endl;
1730 Log() <<
"Neural networks are stable and performing for a large variety of " <<
Endl;
1731 Log() <<
"linear and non-linear classification problems. However, in contrast" <<
Endl;
1732 Log() <<
"to (e.g.) boosted decision trees, the user is advised to reduce the " <<
Endl;
1733 Log() <<
"number of input variables that have only little discrimination power. " <<
Endl;
1734 Log() <<
"" <<
Endl;
1735 Log() <<
"In the tests we have carried out so far, the MLP and ROOT networks" <<
Endl;
1736 Log() <<
"(TMlpANN, interfaced via TMVA) performed equally well, with however" <<
Endl;
1737 Log() <<
"a clear speed advantage for the MLP. The Clermont-Ferrand neural " <<
Endl;
1738 Log() <<
"net (CFMlpANN) exhibited worse classification performance in these" <<
Endl;
1739 Log() <<
"tests, which is partly due to the slow convergence of its training" <<
Endl;
1740 Log() <<
"(at least 10k training cycles are required to achieve approximately" <<
Endl;
1741 Log() <<
"competitive results)." <<
Endl;
1743 Log() << col <<
"Overtraining: " <<
colres
1744 <<
"only the TMlpANN performs an explicit separation of the" <<
Endl;
1745 Log() <<
"full training sample into independent training and validation samples." <<
Endl;
1746 Log() <<
"We have found that in most high-energy physics applications the " <<
Endl;
1747 Log() <<
"available degrees of freedom (training events) are sufficient to " <<
Endl;
1748 Log() <<
"constrain the weights of the relatively simple architectures required" <<
Endl;
1749 Log() <<
"to achieve good performance. Hence no overtraining should occur, and " <<
Endl;
1750 Log() <<
"the use of validation samples would only reduce the available training" <<
Endl;
1751 Log() <<
"information. However, if the performance on the training sample is " <<
Endl;
1752 Log() <<
"found to be significantly better than the one found with the inde-" <<
Endl;
1753 Log() <<
"pendent test sample, caution is needed. The results for these samples " <<
Endl;
1754 Log() <<
"are printed to standard output at the end of each training job." <<
Endl;
1756 Log() << col <<
"--- Performance tuning via configuration options:" <<
colres <<
Endl;
1758 Log() <<
"The hidden layer architecture for all ANNs is defined by the option" <<
Endl;
1759 Log() <<
"\"HiddenLayers=N+1,N,...\", where here the first hidden layer has N+1" <<
Endl;
1760 Log() <<
"neurons and the second N neurons (and so on), and where N is the number " <<
Endl;
1761 Log() <<
"of input variables. Excessive numbers of hidden layers should be avoided," <<
Endl;
1762 Log() <<
"in favour of more neurons in the first hidden layer." <<
Endl;
1763 Log() <<
"" <<
Endl;
1764 Log() <<
"The number of cycles should be above 500. As said, if the number of" <<
Endl;
1765 Log() <<
"adjustable weights is small compared to the training sample size," <<
Endl;
1766 Log() <<
"using a large number of training samples should not lead to overtraining." <<
Endl;
#define REGISTER_METHOD(CLASS)
for example
bool Bool_t
Boolean (0=false, 1=true) (bool)
int Int_t
Signed integer 4 bytes (int)
float Float_t
Float 4 bytes (float)
double Double_t
Double 8 bytes.
ROOT::Detail::TRangeCast< T, true > TRangeDynCast
TRangeDynCast is an adapter class that allows the typed iteration through a TCollection.
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void char Point_t Rectangle_t WindowAttributes_t Float_t Float_t Float_t Int_t Int_t UInt_t UInt_t Rectangle_t Int_t Int_t Window_t TString Int_t GCValues_t GetPrimarySelectionOwner GetDisplay GetScreen GetColormap GetNativeEvent const char const char dpyName wid window const char font_name cursor keysym reg const char only_if_exist regb h Point_t winding char text const char depth char const char Int_t count const char ColorStruct_t color const char Pixmap_t Pixmap_t PictureAttributes_t attr const char char ret_data h unsigned char height h Atom_t Int_t ULong_t ULong_t unsigned char prop_list Atom_t Atom_t target
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void char Point_t Rectangle_t WindowAttributes_t index
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void char Point_t Rectangle_t WindowAttributes_t Float_t Float_t Float_t Int_t Int_t UInt_t UInt_t Rectangle_t Int_t Int_t Window_t TString Int_t GCValues_t GetPrimarySelectionOwner GetDisplay GetScreen GetColormap GetNativeEvent const char const char dpyName wid window const char font_name cursor keysym reg const char only_if_exist regb h Point_t winding char text const char depth char const char Int_t count const char ColorStruct_t color const char Pixmap_t Pixmap_t PictureAttributes_t attr const char char ret_data h unsigned char height h Atom_t Int_t ULong_t ULong_t unsigned char prop_list Atom_t Atom_t Atom_t Time_t type
TMatrixT< Double_t > TMatrixD
const_iterator begin() const
const_iterator end() const
<div class="legacybox"><h2>Legacy Code</h2> TFitter is a legacy interface: there will be no bug fixes...
1-D histogram with a float per channel (see TH1 documentation)
TH1 is the base class of all histogram classes in ROOT.
Bool_t WriteOptionsReference() const
Class that contains all the data information.
Base class for TMVA fitters.
Fitter using a Genetic Algorithm.
The TMVA::Interval Class.
Base class for all TMVA methods using artificial neural networks.
void ProcessOptions() override
do nothing specific at this moment
void MakeClassSpecific(std::ostream &, const TString &) const override
write specific classifier response
Double_t GetMvaValue(Double_t *err=nullptr, Double_t *errUpper=nullptr) override
get the mva value generated by the NN
Multilayer Perceptron class built off of MethodANNBase.
void GetHelpMessage() const override
get help message text
void BackPropagationMinimize(Int_t nEpochs)
minimize estimator / train network with back propagation algorithm
Double_t GetMSEErr(const Event *ev, UInt_t index=0)
zjh
void MakeClassSpecific(std::ostream &, const TString &) const override
write specific classifier response
void DeclareOptions() override
define the options (their key words) that can be set in the option string
void AdjustSynapseWeights()
just adjust the synapse weights (should be called in batch mode)
void SteepestDir(TMatrixD &Dir)
void TrainOneEpoch()
train network over a single epoch/cycle of events
Bool_t GetHessian(TMatrixD &Hessian, TMatrixD &Gamma, TMatrixD &Delta)
Double_t ComputeEstimator(std::vector< Double_t > ¶meters)
this function is called by GeneticANN for GA optimization
void InitializeLearningRates()
initialize learning rates of synapses, used only by back propagation
void CalculateNeuronDeltas()
have each neuron calculate its delta by back propagation
Double_t EstimatorFunction(std::vector< Double_t > ¶meters) override
interface to the estimate
Double_t DerivDir(TMatrixD &Dir)
Double_t GetCEErr(const Event *ev, UInt_t index=0)
zjh
virtual ~MethodMLP()
destructor nothing to be done
void SetDir(TMatrixD &Hessian, TMatrixD &Dir)
void Shuffle(Int_t *index, Int_t n)
Input:
void SimulateEvent(const Event *ev)
void SetDirWeights(std::vector< Double_t > &Origin, TMatrixD &Dir, Double_t alpha)
void SetGammaDelta(TMatrixD &Gamma, TMatrixD &Delta, std::vector< Double_t > &Buffer)
void GetApproxInvHessian(TMatrixD &InvHessian, bool regulate=true)
rank-1 approximation, neglect 2nd derivatives. //zjh
void BFGSMinimize(Int_t nEpochs)
train network with BFGS algorithm
void UpdateSynapses()
update synapse error fields and adjust the weights (if in sequential mode)
void TrainOneEvent(Int_t ievt)
train network over a single event this uses the new event model
Double_t GetDesiredOutput(const Event *ev)
get the desired output of this event
void GeneticMinimize()
create genetics class similar to GeneticCut give it vector of parameter ranges (parameters = weights)...
void Init() override
default initializations
Bool_t HasAnalysisType(Types::EAnalysisType type, UInt_t numberClasses, UInt_t numberTargets) override
MLP can handle classification with 2 classes and regression with one regression-target.
void DecaySynapseWeights(Bool_t lateEpoch)
decay synapse weights in last 10 epochs, lower learning rate even more to find a good minimum
void TrainOneEventFast(Int_t ievt, Float_t *&branchVar, Int_t &type)
fast per-event training
void UpdateNetwork(Double_t desired, Double_t eventWeight=1.0)
update the network based on how closely the output matched the desired output
MethodMLP(const TString &jobName, const TString &methodTitle, DataSetInfo &theData, const TString &theOption)
standard constructor
void UpdateRegulators()
zjh
Bool_t LineSearch(TMatrixD &Dir, std::vector< Double_t > &Buffer, Double_t *dError=nullptr)
zjh
Double_t GetMvaValue(Double_t *err=nullptr, Double_t *errUpper=nullptr) override
get the mva value generated by the NN
Double_t CalculateEstimator(Types::ETreeType treeType=Types::kTraining, Int_t iEpoch=-1)
calculate the estimator that training is attempting to minimize
void ProcessOptions() override
process user options
Neuron class used by TMVA artificial neural network methods.
void AdjustSynapseWeights()
adjust the pre-synapses' weights for each neuron (input neuron has no pre-synapse) this method should...
void ForceValue(Double_t value)
force the value, typically for input and bias neurons
void UpdateSynapsesSequential()
update the pre-synapses for each neuron (input neuron has no pre-synapse) this method should only be ...
void UpdateSynapsesBatch()
update and adjust the pre-synapses for each neuron (input neuron has no pre-synapse) this method shou...
void CalculateDelta()
calculate error field
Synapse class used by TMVA artificial neural network methods.
Timing information for training and evaluation of MVA methods.
Singleton class for Global types used by TMVA.
virtual TMatrixTBase< Element > & UnitMatrix()
Make a unit matrix (matrix need not be a square one).
TMatrixTBase< Element > & ResizeTo(Int_t nrows, Int_t ncols, Int_t=-1) override
Set size of the matrix to nrows x ncols New dynamic elements are created, the overlapping part of the...
TMatrixT< Element > & Invert(Double_t *det=nullptr)
Invert the matrix and calculate its determinant.
static TString Format(const char *fmt,...)
Static method which formats a string using a printf style format descriptor and return a TString.
This is a simple weighted bidirectional connection between two neurons.
MsgLogger & Endl(MsgLogger &ml)
Double_t Exp(Double_t x)
Returns the base-e exponential function of x, which is e raised to the power x.
Double_t Log(Double_t x)
Returns the natural logarithm of x.
Double_t Sqrt(Double_t x)
Returns the square root of x.