87:
TMVA::
MethodBase( jobName, methodType, methodTitle, theData, theOption)
127 DeclareOptionRef( fNcycles = 500,
"NCycles",
"Number of training cycles" );
128 DeclareOptionRef( fLayerSpec =
"N,N-1",
"HiddenLayers",
"Specification of hidden layer architecture" );
129 DeclareOptionRef( fNeuronType =
"sigmoid",
"NeuronType",
"Neuron activation function type" );
130 DeclareOptionRef( fRandomSeed = 1,
"RandomSeed",
"Random seed for initial synapse weights (0 means unique seed for each run; default value '1')");
132 DeclareOptionRef(fEstimatorS=
"MSE",
"EstimatorType",
133 "MSE (Mean Square Estimator) for Gaussian Likelihood or CE(Cross-Entropy) for Bernoulli Likelihood" );
140 Int_t nTypes = names->size();
141 for (
Int_t i = 0; i < nTypes; i++)
142 AddPreDefVal(names->at(i));
145 DeclareOptionRef(fNeuronInputType=
"sum",
"NeuronInputType",
"Neuron input function type");
148 nTypes = names->size();
149 for (
Int_t i = 0; i < nTypes; i++) AddPreDefVal(names->at(i));
159 if ( DoRegression() || DoMulticlass()) fEstimatorS =
"MSE";
160 else fEstimatorS =
"CE" ;
161 if (fEstimatorS ==
"MSE" ) fEstimator = kMSE;
162 else if (fEstimatorS ==
"CE") fEstimator = kCE;
163 std::vector<Int_t>* layout = ParseLayoutString(fLayerSpec);
164 BuildNetwork(layout);
174 std::vector<Int_t>* layout =
new std::vector<Int_t>();
175 layout->push_back((
Int_t)GetNvar());
176 while(layerSpec.
Length()>0) {
178 if (layerSpec.
First(
',')<0) {
183 sToAdd = layerSpec(0,layerSpec.
First(
','));
184 layerSpec = layerSpec(layerSpec.
First(
',')+1,layerSpec.
Length());
188 nNodes += atoi(sToAdd);
189 layout->push_back(nNodes);
192 layout->push_back( DataInfo().GetNTargets() );
193 else if( DoMulticlass() )
194 layout->push_back( DataInfo().GetNClasses() );
196 layout->push_back(1);
199 for( std::vector<Int_t>::iterator it = layout->begin(); it != layout->end(); ++it ){
216 fInputCalculator = NULL;
218 fEstimatorHistTrain = NULL;
219 fEstimatorHistTest = NULL;
222 fEpochMonHistS.clear();
223 fEpochMonHistB.clear();
224 fEpochMonHistW.clear();
228 fOutputNeurons.clear();
248 if (fNetwork != NULL) {
250 Int_t numLayers = fNetwork->GetEntriesFast();
251 for (
Int_t i = 0; i < numLayers; i++) {
253 DeleteNetworkLayer(layer);
258 if (frgen != NULL)
delete frgen;
259 if (fActivation != NULL)
delete fActivation;
260 if (fOutput != NULL)
delete fOutput;
261 if (fIdentity != NULL)
delete fIdentity;
262 if (fInputCalculator != NULL)
delete fInputCalculator;
263 if (fSynapses != NULL)
delete fSynapses;
270 fInputCalculator = NULL;
281 for (
Int_t i = 0; i < numNeurons; i++) {
295 if (fEstimatorS ==
"MSE") fEstimator = kMSE;
296 else if (fEstimatorS ==
"CE") fEstimator = kCE;
297 else Log()<<kWARNING<<
"fEstimator="<<fEstimator<<
"\tfEstimatorS="<<fEstimatorS<<
Endl;
298 if (fEstimator!=kMSE && fEstimator!=kCE) Log()<<kWARNING<<
"Estimator type unspecified \t"<<
Endl;
301 Log() << kHEADER <<
"Building Network. " <<
Endl;
316 fRegulatorIdx.clear();
318 BuildLayers( layout, fromFile );
321 fInputLayer = (
TObjArray*)fNetwork->At(0);
323 fOutputNeurons.clear();
325 fOutputNeurons.push_back( (
TNeuron*)outputLayer->
At(i) );
328 if (weights == NULL) InitWeights();
329 else ForceWeights(weights);
340 Int_t numLayers = layout->size();
342 for (
Int_t i = 0; i < numLayers; i++) {
344 BuildLayer(layout->at(i), curLayer, prevLayer, i, numLayers, fromFile);
345 prevLayer = curLayer;
346 fNetwork->
Add(curLayer);
350 for (
Int_t i = 0; i < numLayers; i++) {
353 if (i!=0 && i!=numLayers-1) fRegulators.push_back(0.);
354 for (
Int_t j = 0; j < numNeurons; j++) {
355 if (i==0) fRegulators.push_back(0.);
358 for (
Int_t k = 0; k < numSynapses; k++) {
360 fSynapses->Add(synapse);
361 fRegulatorIdx.push_back(fRegulators.size()-1);
376 for (
Int_t j = 0; j < numNeurons; j++) {
377 if (fromFile && (layerIndex != numLayers-1) && (j==numNeurons-1)){
382 curLayer->
Add(neuron);
389 if (layerIndex == 0) {
395 if (layerIndex == numLayers-1) {
401 AddPreLinks(neuron, prevLayer);
404 curLayer->
Add(neuron);
410 if (layerIndex != numLayers-1) {
415 curLayer->
Add(neuron);
429 for (
Int_t i = 0; i < numNeurons; i++) {
444 PrintMessage(
"Initializing weights");
447 Int_t numSynapses = fSynapses->GetEntriesFast();
449 for (
Int_t i = 0; i < numSynapses; i++) {
450 synapse = (
TSynapse*)fSynapses->At(i);
451 synapse->
SetWeight(4.0*frgen->Rndm() - 2.0);
460 PrintMessage(
"Forcing weights");
462 Int_t numSynapses = fSynapses->GetEntriesFast();
464 for (
Int_t i = 0; i < numSynapses; i++) {
465 synapse = (
TSynapse*)fSynapses->At(i);
480 for (
UInt_t j = 0; j < GetNvar(); j++) {
484 neuron = GetInputNeuron(j);
496 Int_t numLayers = fNetwork->GetEntriesFast();
499 for (
Int_t i = 0; i < numLayers; i++) {
503 for (
Int_t j = 0; j < numNeurons; j++) {
517 if (Verbose() ||
Debug() || force) Log() << kINFO << message <<
Endl;
526 Log() << kINFO <<
"***Type anything to continue (q to quit): ";
527 std::getline(std::cin, dummy);
528 if (dummy ==
"q" || dummy ==
"Q") {
529 PrintMessage(
"quit" );
540 if (!
Debug())
return;
542 Log() << kINFO <<
Endl;
543 PrintMessage(
"Printing network " );
544 Log() << kINFO <<
"-------------------------------------------------------------------" <<
Endl;
547 Int_t numLayers = fNetwork->GetEntriesFast();
549 for (
Int_t i = 0; i < numLayers; i++) {
554 Log() << kINFO <<
"Layer #" << i <<
" (" << numNeurons <<
" neurons):" <<
Endl;
555 PrintLayer( curLayer );
567 for (
Int_t j = 0; j < numNeurons; j++) {
569 Log() << kINFO <<
"\tNeuron #" << j <<
" (LinksIn: " << neuron->
NumPreLinks()
571 PrintNeuron( neuron );
581 <<
"\t\tValue:\t" << neuron->
GetValue()
584 Log() << kINFO <<
"\t\tActivationEquation:\t";
586 Log() << kINFO <<
"\t\tLinksIn:" <<
Endl;
588 Log() << kINFO <<
"\t\tLinksOut:" <<
Endl;
601 const Event * ev = GetEvent();
603 for (
UInt_t i = 0; i < GetNvar(); i++) {
607 ForceNetworkCalculations();
614 NoErrorCalc(err, errUpper);
628 const Event * ev = GetEvent();
630 for (
UInt_t i = 0; i < GetNvar(); i++) {
634 ForceNetworkCalculations();
639 if (fRegressionReturnVal == NULL) fRegressionReturnVal =
new std::vector<Float_t>();
640 fRegressionReturnVal->clear();
644 for (
UInt_t itgt = 0; itgt < ntgts; itgt++) {
648 const Event* evT2 = GetTransformationHandler().InverseTransform( evT );
649 for (
UInt_t itgt = 0; itgt < ntgts; itgt++) {
650 fRegressionReturnVal->push_back( evT2->
GetTarget(itgt) );
655 return *fRegressionReturnVal;
667 const Event * ev = GetEvent();
669 for (
UInt_t i = 0; i < GetNvar(); i++) {
673 ForceNetworkCalculations();
677 if (fMulticlassReturnVal == NULL) fMulticlassReturnVal =
new std::vector<Float_t>();
678 fMulticlassReturnVal->clear();
679 std::vector<Float_t> temp;
681 UInt_t nClasses = DataInfo().GetNClasses();
682 for (
UInt_t icls = 0; icls < nClasses; icls++) {
683 temp.push_back(GetOutputNeuron( icls )->GetActivationValue() );
686 for(
UInt_t iClass=0; iClass<nClasses; iClass++){
688 for(
UInt_t j=0;j<nClasses;j++){
690 norm+=exp(temp[j]-temp[iClass]);
692 (*fMulticlassReturnVal).push_back(1.0/(1.0+norm));
697 return *fMulticlassReturnVal;
706 Int_t numLayers = fNetwork->GetEntriesFast();
711 for (
Int_t i = 0; i < numLayers; i++) {
717 for (
Int_t j = 0; j < numNeurons; j++) {
722 if(numSynapses==0)
continue;
723 std::stringstream s(
"");
725 for (
Int_t k = 0; k < numSynapses; k++) {
727 s << std::scientific << synapse->
GetWeight() <<
" ";
734 if( fInvHessian.GetNcols()>0 ){
738 Int_t nElements = fInvHessian.GetNoElements();
739 Int_t nRows = fInvHessian.GetNrows();
740 Int_t nCols = fInvHessian.GetNcols();
747 fInvHessian.GetMatrix2Array( elements );
751 for(
Int_t row = 0; row < nRows; ++row ){
756 std::stringstream s(
"");
758 for(
Int_t col = 0; col < nCols; ++col ){
759 s << std::scientific << (*(elements+index)) <<
" ";
776 std::vector<Int_t>* layout =
new std::vector<Int_t>();
778 void* xmlLayout = NULL;
781 xmlLayout = wghtnode;
785 layout->resize( nLayers );
793 layout->at(index) = nNeurons;
797 BuildNetwork( layout, NULL, fromFile );
800 if (GetTrainingTMVAVersionCode() <
TMVA_VERSION(4,2,1) && fActivation->GetExpression().Contains(
"tanh")){
822 std::stringstream s(content);
823 for (
UInt_t iSyn = 0; iSyn<nSyn; iSyn++) {
840 void* xmlInvHessian = NULL;
846 fUseRegulator =
kTRUE;
856 fInvHessian.ResizeTo( nRows, nCols );
860 if (nElements > std::numeric_limits<int>::max()-100){
861 Log() << kFATAL <<
"you tried to read a hessian matrix with " << nElements <<
" elements, --> too large, guess s.th. went wrong reading from the weight file" <<
Endl;
864 elements =
new Double_t[nElements+10];
877 std::stringstream s(content);
878 for (
Int_t iCol = 0; iCol<nCols; iCol++) {
879 s >> (*(elements+index));
886 fInvHessian.SetMatrixArray( elements );
902 std::vector<Double_t>* weights =
new std::vector<Double_t>();
904 while (istr>> dummy >> weight) weights->push_back(weight);
906 ForceWeights(weights);
918 fRanking =
new Ranking( GetName(),
"Importance" );
925 for (
UInt_t ivar = 0; ivar < GetNvar(); ivar++) {
927 neuron = GetInputNeuron(ivar);
930 varName = GetInputVar(ivar);
935 meanS, meanB, rmsS, rmsB,
xmin,
xmax );
939 if (avgVal<meanrms) avgVal = meanrms;
940 if (IsNormalised()) avgVal = 0.5*(1 +
gTools().
NormVariable( avgVal, GetXmin( ivar ), GetXmax( ivar )));
942 for (
Int_t j = 0; j < numSynapses; j++) {
947 importance *= avgVal * avgVal;
949 fRanking->AddRank(
Rank( varName, importance ) );
958 std::vector<TH1*>* hv )
const
961 Int_t numLayers = fNetwork->GetEntriesFast();
963 for (
Int_t i = 0; i < numLayers-1; i++) {
972 numNeurons1, 0, numNeurons1, numNeurons2, 0, numNeurons2);
974 for (
Int_t j = 0; j < numNeurons1; j++) {
979 for (
Int_t k = 0; k < numSynapses; k++) {
987 if (hv) hv->push_back( hist );
1000 PrintMessage(
Form(
"Write special histos to file: %s", BaseDir()->GetPath()),
kTRUE);
1002 if (fEstimatorHistTrain) fEstimatorHistTrain->Write();
1003 if (fEstimatorHistTest ) fEstimatorHistTest ->Write();
1006 CreateWeightMonitoringHists(
"weights_hist" );
1009 static std::atomic<int> epochMonitoringDirectoryNumber{0};
1010 int epochVal = epochMonitoringDirectoryNumber++;
1013 epochdir = BaseDir()->
mkdir(
"EpochMonitoring" );
1015 epochdir = BaseDir()->
mkdir(
Form(
"EpochMonitoring_%4d",epochVal) );
1018 for (std::vector<TH1*>::const_iterator it = fEpochMonHistS.begin(); it != fEpochMonHistS.end(); ++it) {
1022 for (std::vector<TH1*>::const_iterator it = fEpochMonHistB.begin(); it != fEpochMonHistB.end(); ++it) {
1026 for (std::vector<TH1*>::const_iterator it = fEpochMonHistW.begin(); it != fEpochMonHistW.end(); ++it) {
1038 Int_t numLayers = fNetwork->GetEntries();
1041 fout <<
" double ActivationFnc(double x) const;" << std::endl;
1042 fout <<
" double OutputActivationFnc(double x) const;" << std::endl;
1044 int numNodesFrom = -1;
1045 for (
Int_t lIdx = 0; lIdx < numLayers; lIdx++) {
1046 int numNodesTo = ((
TObjArray*)fNetwork->At(lIdx))->GetEntries();
1047 if (numNodesFrom<0) { numNodesFrom=numNodesTo;
continue; }
1048 fout <<
" double fWeightMatrix" << lIdx-1 <<
"to" << lIdx <<
"[" << numNodesTo <<
"][" << numNodesFrom <<
"];";
1049 fout <<
" // weight matrix from layer " << lIdx-1 <<
" to " << lIdx << std::endl;
1050 numNodesFrom = numNodesTo;
1053 fout <<
"};" << std::endl;
1057 fout <<
"inline void " << className <<
"::Initialize()" << std::endl;
1058 fout <<
"{" << std::endl;
1059 fout <<
" // build network structure" << std::endl;
1061 for (
Int_t i = 0; i < numLayers-1; i++) {
1062 fout <<
" // weight matrix from layer " << i <<
" to " << i+1 << std::endl;
1065 for (
Int_t j = 0; j < numNeurons; j++) {
1068 for (
Int_t k = 0; k < numSynapses; k++) {
1070 fout <<
" fWeightMatrix" << i <<
"to" << i+1 <<
"[" << k <<
"][" << j <<
"] = " << synapse->
GetWeight() <<
";" << std::endl;
1075 fout <<
"}" << std::endl;
1079 fout <<
"inline double " << className <<
"::GetMvaValue__( const std::vector<double>& inputValues ) const" << std::endl;
1080 fout <<
"{" << std::endl;
1081 fout <<
" if (inputValues.size() != (unsigned int)" << ((
TObjArray *)fNetwork->At(0))->GetEntries() - 1 <<
") {"
1083 fout <<
" std::cout << \"Input vector needs to be of size \" << "
1084 << ((
TObjArray *)fNetwork->At(0))->GetEntries() - 1 <<
" << std::endl;" << std::endl;
1085 fout <<
" return 0;" << std::endl;
1086 fout <<
" }" << std::endl;
1088 for (
Int_t lIdx = 1; lIdx < numLayers; lIdx++) {
1091 fout <<
" std::array<double, " << numNodes <<
"> fWeights" << lIdx <<
" {{}};" << std::endl;
1093 for (
Int_t lIdx = 1; lIdx < numLayers - 1; lIdx++) {
1094 fout <<
" fWeights" << lIdx <<
".back() = 1.;" << std::endl;
1097 for (
Int_t i = 0; i < numLayers - 1; i++) {
1098 fout <<
" // layer " << i <<
" to " << i + 1 << std::endl;
1099 if (i + 1 == numLayers - 1) {
1100 fout <<
" for (int o=0; o<" << ((
TObjArray *)fNetwork->At(i + 1))->GetEntries() <<
"; o++) {" << std::endl;
1102 fout <<
" for (int o=0; o<" << ((
TObjArray *)fNetwork->At(i + 1))->GetEntries() - 1 <<
"; o++) {"
1106 fout <<
" std::array<double, " << ((
TObjArray *)fNetwork->At(i))->GetEntries()
1107 <<
"> buffer; // no need to initialise" << std::endl;
1108 fout <<
" for (int i = 0; i<" << ((
TObjArray *)fNetwork->At(i))->GetEntries() <<
" - 1; i++) {"
1110 fout <<
" buffer[i] = fWeightMatrix" << i <<
"to" << i + 1 <<
"[o][i] * inputValues[i];" << std::endl;
1111 fout <<
" } // loop over i" << std::endl;
1112 fout <<
" buffer.back() = fWeightMatrix" << i <<
"to" << i + 1 <<
"[o]["
1113 << ((
TObjArray *)fNetwork->At(i))->GetEntries() - 1 <<
"];" << std::endl;
1115 fout <<
" std::array<double, " << ((
TObjArray *)fNetwork->At(i))->GetEntries()
1116 <<
"> buffer; // no need to initialise" << std::endl;
1117 fout <<
" for (int i=0; i<" << ((
TObjArray *)fNetwork->At(i))->GetEntries() <<
"; i++) {" << std::endl;
1118 fout <<
" buffer[i] = fWeightMatrix" << i <<
"to" << i + 1 <<
"[o][i] * fWeights" << i <<
"[i];"
1120 fout <<
" } // loop over i" << std::endl;
1122 fout <<
" for (int i=0; i<" << ((
TObjArray *)fNetwork->At(i))->GetEntries() <<
"; i++) {" << std::endl;
1123 if (fNeuronInputType ==
"sum") {
1124 fout <<
" fWeights" << i + 1 <<
"[o] += buffer[i];" << std::endl;
1125 }
else if (fNeuronInputType ==
"sqsum") {
1126 fout <<
" fWeights" << i + 1 <<
"[o] += buffer[i]*buffer[i];" << std::endl;
1128 fout <<
" fWeights" << i + 1 <<
"[o] += fabs(buffer[i]);" << std::endl;
1130 fout <<
" } // loop over i" << std::endl;
1131 fout <<
" } // loop over o" << std::endl;
1132 if (i + 1 == numLayers - 1) {
1133 fout <<
" for (int o=0; o<" << ((
TObjArray *)fNetwork->At(i + 1))->GetEntries() <<
"; o++) {" << std::endl;
1135 fout <<
" for (int o=0; o<" << ((
TObjArray *)fNetwork->At(i + 1))->GetEntries() - 1 <<
"; o++) {"
1138 if (i+1 != numLayers-1)
1139 fout <<
" fWeights" << i + 1 <<
"[o] = ActivationFnc(fWeights" << i + 1 <<
"[o]);" << std::endl;
1141 fout <<
" fWeights" << i + 1 <<
"[o] = OutputActivationFnc(fWeights" << i + 1 <<
"[o]);"
1143 fout <<
" } // loop over o" << std::endl;
1146 fout <<
" return fWeights" << numLayers - 1 <<
"[0];" << std::endl;
1147 fout <<
"}" << std::endl;
1150 TString fncName = className+
"::ActivationFnc";
1151 fActivation->MakeFunction(fout, fncName);
1152 fncName = className+
"::OutputActivationFnc";
1153 fOutput->MakeFunction(fout, fncName);
1156 fout <<
"// Clean up" << std::endl;
1157 fout <<
"inline void " << className <<
"::Clear()" << std::endl;
1158 fout <<
"{" << std::endl;
1159 fout <<
"}" << std::endl;
char * Form(const char *fmt,...)
void Debug(Int_t level, const char *va_(fmt),...)
#define TMVA_VERSION(a, b, c)
Describe directory structure in memory.
virtual Bool_t cd()
Change current directory to "this" directory.
virtual TDirectory * mkdir(const char *name, const char *title="", Bool_t returnExistingDirectory=kFALSE)
Create a sub-directory "a" or a hierarchy of sub-directories "a/b/c/...".
2-D histogram with a float per channel (see TH1 documentation)}
virtual void SetBinContent(Int_t bin, Double_t content)
Set bin content.
Class that contains all the data information.
Float_t GetValue(UInt_t ivar) const
return value of i'th variable
void SetTarget(UInt_t itgt, Float_t value)
set the target value (dimension itgt) to value
Float_t GetTarget(UInt_t itgt) const
Base class for all TMVA methods using artificial neural networks.
std::vector< Int_t > * ParseLayoutString(TString layerSpec)
parse layout specification string and return a vector, each entry containing the number of neurons to...
virtual void ProcessOptions()
do nothing specific at this moment
virtual ~MethodANNBase()
destructor
void DeleteNetworkLayer(TObjArray *&layer)
delete a network layer
const Ranking * CreateRanking()
compute ranking of input variables by summing function of weights
void DeleteNetwork()
delete/clear network
void WaitForKeyboard()
wait for keyboard input, for debugging
MethodANNBase(const TString &jobName, Types::EMVA methodType, const TString &methodTitle, DataSetInfo &theData, const TString &theOption)
standard constructor Note: Right now it is an option to choose the neuron input function,...
void AddPreLinks(TNeuron *neuron, TObjArray *prevLayer)
add synapses connecting a neuron to its preceding layer
void CreateWeightMonitoringHists(const TString &bulkname, std::vector< TH1 * > *hv=0) const
void PrintNeuron(TNeuron *neuron) const
print a neuron, for debugging
void PrintMessage(TString message, Bool_t force=kFALSE) const
print messages, turn off printing by setting verbose and debug flag appropriately
void AddWeightsXMLTo(void *parent) const
create XML description of ANN classifier
void InitANNBase()
initialize ANNBase object
void PrintLayer(TObjArray *layer) const
print a single layer, for debugging
virtual void BuildNetwork(std::vector< Int_t > *layout, std::vector< Double_t > *weights=NULL, Bool_t fromFile=kFALSE)
build network given a layout (number of neurons in each layer) and optional weights array
void InitWeights()
initialize the synapse weights randomly
virtual void DeclareOptions()
define the options (their key words) that can be set in the option string here the options valid for ...
virtual void ReadWeightsFromStream(std::istream &istr)
destroy/clear the network then read it back in from the weights file
void BuildLayers(std::vector< Int_t > *layout, Bool_t from_file=false)
build the network layers
virtual void MakeClassSpecific(std::ostream &, const TString &) const
write specific classifier response
void ForceWeights(std::vector< Double_t > *weights)
force the synapse weights
virtual Double_t GetMvaValue(Double_t *err=0, Double_t *errUpper=0)
get the mva value generated by the NN
void BuildLayer(Int_t numNeurons, TObjArray *curLayer, TObjArray *prevLayer, Int_t layerIndex, Int_t numLayers, Bool_t from_file=false)
build a single layer with neurons and synapses connecting this layer to the previous layer
void ForceNetworkCalculations()
calculate input values to each neuron
void ForceNetworkInputs(const Event *ev, Int_t ignoreIndex=-1)
force the input values of the input neurons force the value for each input neuron
virtual const std::vector< Float_t > & GetMulticlassValues()
get the multiclass classification values generated by the NN
void ReadWeightsFromXML(void *wghtnode)
read MLP from xml weight file
Bool_t Debug() const
who the hell makes such strange Debug flags that even use "global pointers"..
virtual void WriteMonitoringHistosToFile() const
write histograms to file
virtual const std::vector< Float_t > & GetRegressionValues()
get the regression value generated by the NN
virtual void PrintNetwork() const
print network representation, for debugging
Virtual base Class for all MVA method.
Ranking for variables in method (implementation)
Class for easily choosing activation functions.
std::vector< TString > * GetAllActivationNames() const
returns the names of all know activation functions
TActivation * CreateActivation(EActivationType type) const
instantiate the correct activation object according to the type chosen (given as the enumeration type...
Tanh activation function for ANN.
Neuron class used by TMVA artificial neural network methods.
Double_t GetActivationValue() const
void ForceValue(Double_t value)
force the value, typically for input and bias neurons
TSynapse * PostLinkAt(Int_t index) const
void SetActivationEqn(TActivation *activation)
set activation equation
Double_t GetDelta() const
void AddPostLink(TSynapse *post)
add synapse as a post-link to this neuron
void SetInputCalculator(TNeuronInput *calculator)
set input calculator
Int_t NumPreLinks() const
void PrintActivationEqn()
print activation equation, for debugging
void CalculateValue()
calculate neuron input
void CalculateActivationValue()
calculate neuron activation/output
void PrintPostLinks() const
Int_t NumPostLinks() const
void AddPreLink(TSynapse *pre)
add synapse as a pre-link to this neuron
Double_t GetValue() const
void DeletePreLinks()
delete all pre-links
void PrintPreLinks() const
Synapse class used by TMVA artificial neural network methods.
void SetWeight(Double_t weight)
set synapse weight
void SetPostNeuron(TNeuron *post)
void SetPreNeuron(TNeuron *pre)
Int_t GetEntriesFast() const
Int_t GetEntries() const
Return the number of objects in array (i.e.
TObject * At(Int_t idx) const
virtual Int_t Write(const char *name=0, Int_t option=0, Int_t bufsize=0)
Write this object to the current directory.
Random number generator class based on M.
Ssiz_t First(char c) const
Find first occurrence of a character c.
const char * Data() const
Bool_t BeginsWith(const char *s, ECaseCompare cmp=kExact) const
TString & Remove(Ssiz_t pos)
Bool_t AddRawLine(XMLNodePointer_t parent, const char *line)
Add just line into xml file Line should has correct xml syntax that later it can be decoded by xml pa...
XMLNodePointer_t NewChild(XMLNodePointer_t parent, XMLNsPointer_t ns, const char *name, const char *content=nullptr)
create new child element for parent node
XMLNodePointer_t GetChild(XMLNodePointer_t xmlnode, Bool_t realnode=kTRUE)
returns first child of xmlnode
XMLAttrPointer_t NewAttr(XMLNodePointer_t xmlnode, XMLNsPointer_t, const char *name, const char *value)
creates new attribute for xmlnode, namespaces are not supported for attributes
const char * GetNodeContent(XMLNodePointer_t xmlnode)
get contents (if any) of xmlnode
XMLNodePointer_t GetNext(XMLNodePointer_t xmlnode, Bool_t realnode=kTRUE)
return next to xmlnode node if realnode==kTRUE, any special nodes in between will be skipped
create variable transformations
MsgLogger & Endl(MsgLogger &ml)