80using std::stringstream;
101 , fSVKernelFunction(0)
104 , fDoubleSigmaSquared(0)
116 fNumVars =
theData.GetVariableInfos().size();
117 for(
int i=0; i<fNumVars; i++){
118 fVarNames.push_back(
theData.GetVariableInfos().at(i).GetTitle());
136 , fSVKernelFunction(0)
139 , fDoubleSigmaSquared(0)
162 fSupportVectors->clear();
163 for (
UInt_t i=0; i<fInputData->size(); i++) {
164 delete fInputData->at(i);
166 if (fWgSet !=0) {
delete fWgSet; fWgSet=0; }
167 if (fSVKernelFunction !=0 ) {
delete fSVKernelFunction; fSVKernelFunction = 0; }
176 fSupportVectors->clear();
177 for (
UInt_t i=0; i<fInputData->size(); i++){
178 delete fInputData->at(i);
182 if (fWgSet !=0) { fWgSet=0; }
183 if (fSVKernelFunction !=0 ) { fSVKernelFunction = 0; }
185 Data()->DeleteResults(GetMethodName(),
Types::kTraining, GetAnalysisType());
188 Log() << kDEBUG <<
" successfully(?) reset the method " <<
Endl;
207 SetNormalised(
kTRUE );
212 fInputData =
new std::vector<TMVA::SVEvent*>(0);
213 fSupportVectors =
new std::vector<TMVA::SVEvent*>(0);
221 DeclareOptionRef( fTheKernel =
"RBF",
"Kernel",
"Pick which kernel ( RBF or MultiGauss )");
223 DeclareOptionRef( fGamma = 1.,
"Gamma",
"RBF kernel parameter: Gamma (size of the Kernel)");
225 DeclareOptionRef( fOrder = 3,
"Order",
"Polynomial Kernel parameter: polynomial order");
226 DeclareOptionRef( fTheta = 1.,
"Theta",
"Polynomial Kernel parameter: polynomial theta");
228 DeclareOptionRef( fGammas =
"",
"GammaList",
"MultiGauss parameters" );
231 DeclareOptionRef( fTune =
"All",
"Tune",
"Tune Parameters");
233 DeclareOptionRef( fMultiKernels =
"None",
"KernelList",
"Sum or product of kernels");
234 DeclareOptionRef( fLoss =
"hinge",
"Loss",
"Loss function");
236 DeclareOptionRef( fCost,
"C",
"Cost parameter" );
237 if (DoRegression()) {
242 DeclareOptionRef( fTolerance = 0.01,
"Tol",
"Tolerance parameter" );
243 DeclareOptionRef( fMaxIter = 1000,
"MaxIter",
"Maximum number of training loops" );
253 DeclareOptionRef( fNSubSets = 1,
"NSubSets",
"Number of training subsets" );
254 DeclareOptionRef( fTheKernel =
"Gauss",
"Kernel",
"Uses kernel function");
256 DeclareOptionRef( fDoubleSigmaSquared = 2.,
"Sigma",
"Kernel parameter: sigma");
258 DeclareOptionRef( fOrder = 3,
"Order",
"Polynomial Kernel parameter: polynomial order");
260 DeclareOptionRef( fTheta = 1.,
"Theta",
"Sigmoid Kernel parameter: theta");
261 DeclareOptionRef( fKappa = 1.,
"Kappa",
"Sigmoid Kernel parameter: kappa");
269 if (IgnoreEventsWithNegWeightsInTraining()) {
270 Log() << kFATAL <<
"Mechanism to ignore events with negative weights in training not yet available for method: "
271 << GetMethodTypeName()
272 <<
" --> please remove \"IgnoreNegWeightsInTraining\" option from booking string."
282 fIPyMaxIter = fMaxIter;
285 Log() << kDEBUG <<
"Create event vector"<<
Endl;
287 fDataSize = Data()->GetNEvents();
307 if (GetEvent(
ievnt)->GetWeight() != 0){
308 if(DataInfo().IsSignal(GetEvent(
ievnt))){
309 fInputData->push_back(
new SVEvent(GetEvent(
ievnt),
CSig, DataInfo().IsSignal\
313 fInputData->push_back(
new SVEvent(GetEvent(
ievnt),
CBkg, DataInfo().IsSignal\
322 if( fTheKernel ==
"RBF"){
325 else if( fTheKernel ==
"MultiGauss" ){
331 if(fmGamma.size()!=0){ GetMGamma(fmGamma); }
334 fmGamma.push_back(1.0);
341 else if( fTheKernel ==
"Polynomial" ){
344 else if( fTheKernel ==
"Prod" ){
350 if(fmGamma.size()!=0){ GetMGamma(fmGamma); }
354 else if( fTheKernel ==
"Sum" ){
360 if(fmGamma.size()!=0){ GetMGamma(fmGamma); }
365 Log() << kWARNING << fTheKernel <<
" is not a recognised kernel function." <<
Endl;
369 Log()<< kINFO <<
"Building SVM Working Set...with "<<fInputData->size()<<
" event instances"<<
Endl;
371 fWgSet =
new SVWorkingSet( fInputData, fSVKernelFunction,fTolerance, DoRegression() );
372 Log() << kINFO <<
"Elapsed time for Working Set build: "<<
bldwstime.GetElapsedTime()<<
Endl;
376 Log() << kINFO <<
"Sorry, no computing time forecast available for SVM, please wait ..." <<
Endl;
378 if (fInteractive) fWgSet->SetIPythonInteractive(&fExitFromTraining, &fIPyCurrentIter);
380 fWgSet->Train(fMaxIter);
382 Log() << kINFO <<
"Elapsed time: " <<
timer.GetElapsedTime()
385 fBparm = fWgSet->GetBpar();
386 fSupportVectors = fWgSet->GetSupportVectors();
390 if (!fExitFromTraining) fIPyMaxIter = fIPyCurrentIter;
407 for (std::vector<TMVA::SVEvent*>::iterator
veciter=fSupportVectors->
begin();
410 temp[0] = (*veciter)->GetNs();
411 temp[1] = (*veciter)->GetTypeFlag();
412 temp[2] = (*veciter)->GetAlpha();
413 temp[3] = (*veciter)->GetAlpha_p();
415 temp[
ivar+4] = (*(*veciter)->GetDataVector())[
ivar];
444 std::vector<Float_t>*
svector =
new std::vector<Float_t>(GetNvar());
446 if (fMaxVars!=0)
delete fMaxVars;
447 fMaxVars =
new TVectorD( GetNvar() );
448 if (fMinVars!=0)
delete fMinVars;
449 fMinVars =
new TVectorD( GetNvar() );
450 if (fSupportVectors!=0) {
451 for (vector< SVEvent* >::iterator it = fSupportVectors->begin(); it!=fSupportVectors->end(); ++it)
453 delete fSupportVectors;
455 fSupportVectors =
new std::vector<TMVA::SVEvent*>(0);
476 if (fSVKernelFunction!=0)
delete fSVKernelFunction;
477 if( fTheKernel ==
"RBF" ){
480 else if( fTheKernel ==
"MultiGauss" ){
481 SetMGamma(fGammaList);
484 else if( fTheKernel ==
"Polynomial" ){
487 else if( fTheKernel ==
"Prod" ){
488 SetMGamma(fGammaList);
491 else if( fTheKernel ==
"Sum" ){
492 SetMGamma(fGammaList);
496 Log() << kWARNING << fTheKernel <<
" is not a recognised kernel function." <<
Endl;
514 if (fSupportVectors !=0) {
delete fSupportVectors; fSupportVectors = 0;}
515 fSupportVectors =
new std::vector<TMVA::SVEvent*>(0);
523 fSupportVectors->reserve(
fNsupv);
529 std::vector<Float_t>*
svector =
new std::vector<Float_t>(GetNvar());
531 fMaxVars =
new TVectorD( GetNvar() );
532 fMinVars =
new TVectorD( GetNvar() );
548 delete fSVKernelFunction;
549 if (fTheKernel ==
"Gauss" ) {
558 Log() << kFATAL <<
"Unknown kernel function found in weight file!" <<
Endl;
561 fSVKernelFunction->setCompatibilityParams(k, fOrder, fTheta, fKappa);
584 myMVA += ( fSupportVectors->at(
ievt)->GetAlpha()
585 * fSupportVectors->at(
ievt)->GetTypeFlag()
586 * fSVKernelFunction->Evaluate( fSupportVectors->at(
ievt),
ev ) );
603 if( fRegressionReturnVal ==
NULL )
604 fRegressionReturnVal =
new std::vector<Float_t>();
605 fRegressionReturnVal->clear();
613 myMVA += ( fSupportVectors->at(
ievt)->GetDeltaAlpha()
614 *fSVKernelFunction->Evaluate( fSupportVectors->at(
ievt),
ev ) );
620 const Event*
evT2 = GetTransformationHandler().InverseTransform(
evT );
622 fRegressionReturnVal->push_back(
evT2->GetTarget(0));
628 return *fRegressionReturnVal;
636 const int fNsupv = fSupportVectors->size();
637 fout <<
" // not implemented for class: \"" << className <<
"\"" << std::endl;
638 fout <<
" float fBparameter;" << std::endl;
639 fout <<
" int fNOfSuppVec;" << std::endl;
640 fout <<
" static float fAllSuppVectors[][" <<
fNsupv <<
"];" << std::endl;
641 fout <<
" static float fAlphaTypeCoef[" <<
fNsupv <<
"];" << std::endl;
643 fout <<
" // Kernel parameter(s) " << std::endl;
644 fout <<
" float fGamma;" << std::endl;
645 fout <<
"};" << std::endl;
646 fout <<
"" << std::endl;
649 fout <<
"inline void " << className <<
"::Initialize() " << std::endl;
650 fout <<
"{" << std::endl;
651 fout <<
" fBparameter = " << fBparm <<
";" << std::endl;
652 fout <<
" fNOfSuppVec = " <<
fNsupv <<
";" << std::endl;
653 fout <<
" fGamma = " << fGamma <<
";" <<std::endl;
654 fout <<
"}" << std::endl;
658 fout <<
"inline double " << className <<
"::GetMvaValue__(const std::vector<double>& inputValues ) const" << std::endl;
659 fout <<
"{" << std::endl;
660 fout <<
" double mvaval = 0; " << std::endl;
661 fout <<
" double temp = 0; " << std::endl;
663 fout <<
" for (int ievt = 0; ievt < fNOfSuppVec; ievt++ ){" << std::endl;
664 fout <<
" temp = 0;" << std::endl;
665 fout <<
" for ( unsigned int ivar = 0; ivar < GetNvar(); ivar++ ) {" << std::endl;
667 fout <<
" temp += (fAllSuppVectors[ivar][ievt] - inputValues[ivar]) " << std::endl;
668 fout <<
" * (fAllSuppVectors[ivar][ievt] - inputValues[ivar]); " << std::endl;
669 fout <<
" }" << std::endl;
670 fout <<
" mvaval += fAlphaTypeCoef[ievt] * exp( -fGamma * temp ); " << std::endl;
672 fout <<
" }" << std::endl;
673 fout <<
" mvaval -= fBparameter;" << std::endl;
674 fout <<
" return 1./(1. + exp(mvaval));" << std::endl;
675 fout <<
"}" << std::endl;
676 fout <<
"// Clean up" << std::endl;
677 fout <<
"inline void " << className <<
"::Clear() " << std::endl;
678 fout <<
"{" << std::endl;
679 fout <<
" // nothing to clear " << std::endl;
680 fout <<
"}" << std::endl;
681 fout <<
"" << std::endl;
684 fout <<
"float " << className <<
"::fAlphaTypeCoef[] =" << std::endl;
687 fout << fSupportVectors->at(
isv)->GetDeltaAlpha() * fSupportVectors->at(
isv)->GetTypeFlag();
690 fout <<
" };" << std::endl << std::endl;
692 fout <<
"float " << className <<
"::fAllSuppVectors[][" <<
fNsupv <<
"] =" << std::endl;
698 fout << fSupportVectors->at(
isv)->GetDataVector()->at(
ivar);
702 if (
ivar < GetNvar()-1)
fout <<
", " << std::endl;
703 else fout << std::endl;
705 fout <<
"};" << std::endl<< std::endl;
719 Log() <<
"The Support Vector Machine (SVM) builds a hyperplane separating" <<
Endl;
720 Log() <<
"signal and background events (vectors) using the minimal subset of " <<
Endl;
721 Log() <<
"all vectors used for training (support vectors). The extension to" <<
Endl;
722 Log() <<
"the non-linear case is performed by mapping input vectors into a " <<
Endl;
723 Log() <<
"higher-dimensional feature space in which linear separation is " <<
Endl;
724 Log() <<
"possible. The use of the kernel functions thereby eliminates the " <<
Endl;
725 Log() <<
"explicit transformation to the feature space. The implemented SVM " <<
Endl;
726 Log() <<
"algorithm performs the classification tasks using linear, polynomial, " <<
Endl;
727 Log() <<
"Gaussian and sigmoidal kernel functions. The Gaussian kernel allows " <<
Endl;
728 Log() <<
"to apply any discriminant shape in the input space." <<
Endl;
732 Log() <<
"SVM is a general purpose non-linear classification method, which " <<
Endl;
733 Log() <<
"does not require data preprocessing like decorrelation or Principal " <<
Endl;
734 Log() <<
"Component Analysis. It generalises quite well and can handle analyses " <<
Endl;
735 Log() <<
"with large numbers of input variables." <<
Endl;
739 Log() <<
"Optimal performance requires primarily a proper choice of the kernel " <<
Endl;
740 Log() <<
"parameters (the width \"Sigma\" in case of Gaussian kernel) and the" <<
Endl;
741 Log() <<
"cost parameter \"C\". The user must optimise them empirically by running" <<
Endl;
742 Log() <<
"SVM several times with different parameter sets. The time needed for " <<
Endl;
743 Log() <<
"each evaluation scales like the square of the number of training " <<
Endl;
744 Log() <<
"events so that a coarse preliminary tuning should be performed on " <<
Endl;
745 Log() <<
"reduced data sets." <<
Endl;
762 std::map< TString,std::vector<Double_t> >
optVars;
767 std::map< TString,std::vector<Double_t> >::iterator iter;
773 Log() << kINFO <<
"Using the " << fTheKernel <<
" kernel." <<
Endl;
775 if( fTheKernel ==
"RBF" ){
782 if( iter->first ==
"Gamma" || iter->first ==
"C"){
783 tuneParameters.insert(std::pair<TString,Interval*>(iter->first,
new Interval(iter->second.at(0),iter->second.at(1),iter->second.at(2))));
786 Log() << kWARNING << iter->first <<
" is not a recognised tuneable parameter." <<
Endl;
792 else if( fTheKernel ==
"Polynomial" ){
800 if( iter->first ==
"Theta" || iter->first ==
"C"){
801 tuneParameters.insert(std::pair<TString,Interval*>(iter->first,
new Interval(iter->second.at(0),iter->second.at(1),iter->second.at(2))));
803 else if( iter->first ==
"Order"){
804 tuneParameters.insert(std::pair<TString,Interval*>(iter->first,
new Interval(iter->second.at(0),iter->second.at(1),iter->second.at(2))));
807 Log() << kWARNING << iter->first <<
" is not a recognised tuneable parameter." <<
Endl;
813 else if( fTheKernel ==
"MultiGauss" ){
815 for(
int i=0; i<fNumVars; i++){
817 s << fVarNames.at(i);
818 string str =
"Gamma_" + s.str();
824 if( iter->first ==
"GammaList"){
825 for(
int j=0;
j<fNumVars;
j++){
827 s << fVarNames.at(
j);
828 string str =
"Gamma_" + s.str();
829 tuneParameters.insert(std::pair<TString,Interval*>(str,
new Interval(iter->second.at(0),iter->second.at(1),iter->second.at(2))));
832 else if( iter->first ==
"C"){
833 tuneParameters.insert(std::pair<TString,Interval*>(iter->first,
new Interval(iter->second.at(0),iter->second.at(1),iter->second.at(2))));
836 Log() << kWARNING << iter->first <<
" is not a recognised tuneable parameter." <<
Endl;
842 else if( fTheKernel ==
"Prod" ){
849 else if(
value ==
"MultiGauss"){
850 for(
int i=0; i<fNumVars; i++){
852 s << fVarNames.at(i);
853 string str =
"Gamma_" + s.str();
857 else if(
value ==
"Polynomial"){
862 Log() << kWARNING <<
value <<
" is not a recognised kernel function." <<
Endl;
868 else if( fTheKernel ==
"Sum" ){
875 else if(
value ==
"MultiGauss"){
876 for(
int i=0; i<fNumVars; i++){
878 s << fVarNames.at(i);
879 string str =
"Gamma_" + s.str();
883 else if(
value ==
"Polynomial"){
888 Log() << kWARNING <<
value <<
" is not a recognised kernel function." <<
Endl;
895 Log() << kWARNING << fTheKernel <<
" is not a recognised kernel function." <<
Endl;
898 Log() << kINFO <<
" the following SVM parameters will be tuned on the respective *grid*\n" <<
Endl;
899 std::map<TString,TMVA::Interval*>::iterator it;
901 Log() << kWARNING << it->first <<
Endl;
902 std::ostringstream
oss;
918 std::map<TString,Double_t>::iterator it;
919 if( fTheKernel ==
"RBF" ){
921 Log() << kWARNING << it->first <<
" = " << it->second <<
Endl;
922 if (it->first ==
"Gamma"){
923 SetGamma (it->second);
925 else if(it->first ==
"C"){
926 SetCost (it->second);
929 Log() << kFATAL <<
" SetParameter for " << it->first <<
" not implemented " <<
Endl;
933 else if( fTheKernel ==
"MultiGauss" ){
935 for(
int i=0; i<fNumVars; i++){
937 s << fVarNames.at(i);
938 string str =
"Gamma_" + s.str();
943 if (it->first ==
"C"){
944 Log() << kWARNING << it->first <<
" = " << it->second <<
Endl;
950 else if( fTheKernel ==
"Polynomial" ){
952 Log() << kWARNING << it->first <<
" = " << it->second <<
Endl;
953 if (it->first ==
"Order"){
954 SetOrder(it->second);
956 else if (it->first ==
"Theta"){
957 SetTheta(it->second);
959 else if(it->first ==
"C"){ SetCost (it->second);
961 else if(it->first ==
"Mult"){
965 Log() << kFATAL <<
" SetParameter for " << it->first <<
" not implemented " <<
Endl;
969 else if( fTheKernel ==
"Prod" || fTheKernel ==
"Sum"){
973 Log() << kWARNING << it->first <<
" = " << it->second <<
Endl;
974 for(
int i=0; i<fNumVars; i++){
976 s << fVarNames.at(i);
977 string str =
"Gamma_" + s.str();
978 if(it->first == str){
979 fmGamma.push_back(it->second);
983 if (it->first ==
"Gamma"){
984 SetGamma (it->second);
987 else if (it->first ==
"Order"){
988 SetOrder (it->second);
991 else if (it->first ==
"Theta"){
992 SetTheta (it->second);
995 else if (it->first ==
"C"){ SetCost (it->second);
996 SetCost (it->second);
1001 Log() << kFATAL <<
" SetParameter for " << it->first <<
" not implemented " <<
Endl;
1007 Log() << kWARNING << fTheKernel <<
" is not a recognised kernel function." <<
Endl;
1021 fmGamma.push_back(
value);
1035 if(i!=(
gammas.size()-1)){
1055 std::vector<TMVA::SVKernelFunction::EKernelType>
kernelsList;
1061 else if(
value ==
"MultiGauss"){
1069 Log() << kWARNING <<
value <<
" is not a recognised kernel function." <<
Endl;
1077 else if(
value ==
"MultiGauss"){
1085 Log() << kWARNING <<
value <<
" is not a recognised kernel function." <<
Endl;
1091 Log() << kWARNING <<
"Unable to split MultiKernels. Delimiters */+ required." <<
Endl;
1107 std::map< TString,std::vector<Double_t> >
optVars;
1111 unsigned first =
value.find(
'[')+1;
1112 unsigned last =
value.find_last_of(
']');
1114 std::stringstream
strNew (
value.substr(first,last-first));
1116 std::vector<Double_t>
tempVec;
1120 if (
strNew.peek() ==
';'){
1125 if(i != 3 && i ==
tempVec.size()){
1147 Log() << kWARNING <<
optParam <<
" is not a recognised tuneable parameter." <<
Endl;
1172 if(DataInfo().IsSignal(
ev)){
1183 Log() << kWARNING <<
lossFunction <<
" is not a recognised loss function." <<
Endl;
1198 Log() << kWARNING <<
lossFunction <<
" is not a recognised loss function." <<
Endl;
#define REGISTER_METHOD(CLASS)
for example
float Float_t
Float 4 bytes (float)
ROOT::Detail::TRangeCast< T, true > TRangeDynCast
TRangeDynCast is an adapter class that allows the typed iteration through a TCollection.
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void value
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void char Point_t Rectangle_t WindowAttributes_t Float_t Float_t Float_t Int_t Int_t UInt_t UInt_t Rectangle_t Int_t Int_t Window_t TString Int_t GCValues_t GetPrimarySelectionOwner GetDisplay GetScreen GetColormap GetNativeEvent const char const char dpyName wid window const char font_name cursor keysym reg const char only_if_exist regb h Point_t winding char text const char depth char const char Int_t count const char ColorStruct_t color const char Pixmap_t Pixmap_t PictureAttributes_t attr const char char ret_data h unsigned char height h Atom_t Int_t ULong_t ULong_t unsigned char prop_list Atom_t Atom_t Atom_t Time_t type
void Print(GNN_Data &d, std::string txt="")
TVectorT< Double_t > TVectorD
const_iterator begin() const
const_iterator end() const
A ROOT file is an on-disk file, usually with extension .root, that stores objects in a file-system-li...
Class that contains all the data information.
The TMVA::Interval Class.
Virtual base Class for all MVA method.
virtual void DeclareCompatibilityOptions()
options that are used ONLY for the READER to ensure backward compatibility they are hence without any...
SMO Platt's SVM classifier with Keerthi & Shavade improvements.
Double_t getLoss(TString lossFunction)
getLoss Calculates loss for testing dataset.
Bool_t HasAnalysisType(Types::EAnalysisType type, UInt_t numberClasses, UInt_t numberTargets) override
SVM can handle classification with 2 classes and regression with one regression-target.
void Init(void) override
default initialisation
void Reset(void) override
void ReadWeightsFromXML(void *wghtnode) override
void Train(void) override
Train SVM.
std::vector< TString > fVarNames
void MakeClassSpecific(std::ostream &, const TString &) const override
write specific classifier response
void WriteWeightsToStream(TFile &fout) const
TODO write IT write training sample (TTree) to file.
void SetMGamma(std::string &mg)
Takes as input a string of values for multigaussian gammas and splits it, filling the gamma vector re...
void GetHelpMessage() const override
get help message text
void SetTuneParameters(std::map< TString, Double_t > tuneParameters) override
Set the tuning parameters according to the argument.
void GetMGamma(const std::vector< float > &gammas)
Produces GammaList string for multigaussian kernel to be written to xml file.
Float_t fNumVars
number of input variables for multi-gaussian
void AddWeightsXMLTo(void *parent) const override
write configuration to xml file
std::map< TString, Double_t > OptimizeTuningParameters(TString fomType="ROCIntegral", TString fitType="Minuit") override
Optimize Tuning Parameters This is used to optimise the kernel function parameters and cost.
void ProcessOptions() override
option post processing (if necessary)
void DeclareOptions() override
declare options available for this method
std::map< TString, std::vector< Double_t > > GetTuningOptions()
GetTuningOptions Function to allow for ranges and number of steps (for scan) when optimising kernel f...
Double_t GetMvaValue(Double_t *err=nullptr, Double_t *errUpper=nullptr) override
returns MVA value for given event
void ReadWeightsFromStream(std::istream &istr) override
const std::vector< Float_t > & GetRegressionValues() override
virtual ~MethodSVM(void)
destructor
void DeclareCompatibilityOptions() override
options that are used ONLY for the READER to ensure backward compatibility
MethodSVM(const TString &jobName, const TString &methodTitle, DataSetInfo &theData, const TString &theOption="")
standard constructor
std::vector< TMVA::SVKernelFunction::EKernelType > MakeKernelList(std::string multiKernels, TString kernel)
MakeKernelList Function providing string manipulation for product or sum of kernels functions to take...
std::map< TString, Double_t > optimize()
Class that is the base-class for a vector of result.
Event class for Support Vector Machine.
Kernel for Support Vector Machine.
Working class for Support Vector Machine.
Timing information for training and evaluation of MVA methods.
Singleton class for Global types used by TMVA.
create variable transformations
MsgLogger & Endl(MsgLogger &ml)
Double_t Exp(Double_t x)
Returns the base-e exponential function of x, which is e raised to the power x.
Double_t Log(Double_t x)
Returns the natural logarithm of x.