59 MethodBase( jobName, Types::kLD, methodTitle, dsi, theOption, theTargetDir ),
86 if(DataInfo().GetNTargets()!=0) fNRegOut = DataInfo().GetNTargets();
89 fLDCoeff =
new vector< vector< Double_t >* >(fNRegOut);
90 for (
Int_t iout = 0; iout<fNRegOut; iout++){
91 (*fLDCoeff)[iout] =
new std::vector<Double_t>( GetNvar()+1 );
95 SetSignalReferenceCut( 0.0 );
103 if (fSumMatx) {
delete fSumMatx; fSumMatx = 0; }
104 if (fSumValMatx) {
delete fSumValMatx; fSumValMatx = 0; }
105 if (fCoeffMatx) {
delete fCoeffMatx; fCoeffMatx = 0; }
107 for (vector< vector< Double_t >* >::iterator vi=fLDCoeff->begin(); vi!=fLDCoeff->end(); vi++){
108 if (*vi) {
delete *vi; *vi = 0; }
110 delete fLDCoeff; fLDCoeff = 0;
121 Log() <<
"regression with " << numberTargets <<
" targets.";
150 const Event* ev = GetEvent();
152 if (fRegressionReturnVal ==
NULL) fRegressionReturnVal =
new vector< Float_t >();
153 fRegressionReturnVal->resize( fNRegOut );
155 for (
Int_t iout = 0; iout<fNRegOut; iout++) {
156 (*fRegressionReturnVal)[iout] = (*(*fLDCoeff)[iout])[0] ;
159 for (std::vector<Float_t>::const_iterator it = ev->
GetValues().begin();it!=ev->
GetValues().end();++it){
160 (*fRegressionReturnVal)[iout] += (*(*fLDCoeff)[iout])[++icoeff] * (*it);
165 NoErrorCalc(err, errUpper);
167 return (*fRegressionReturnVal)[0];
175 const Event* ev = GetEvent();
177 if (fRegressionReturnVal ==
NULL) fRegressionReturnVal =
new vector< Float_t >();
178 fRegressionReturnVal->resize( fNRegOut );
180 for (
Int_t iout = 0; iout<fNRegOut; iout++) {
181 (*fRegressionReturnVal)[iout] = (*(*fLDCoeff)[iout])[0] ;
184 for (std::vector<Float_t>::const_iterator it = ev->
GetValues().begin();it!=ev->
GetValues().end();++it){
185 (*fRegressionReturnVal)[iout] += (*(*fLDCoeff)[iout])[++icoeff] * (*it);
191 for (
Int_t iout = 0; iout<fNRegOut; iout++) evT->
SetTarget(iout,(*fRegressionReturnVal)[iout]);
193 const Event* evT2 = GetTransformationHandler().InverseTransform( evT );
194 fRegressionReturnVal->clear();
195 for (
Int_t iout = 0; iout<fNRegOut; iout++) fRegressionReturnVal->push_back(evT2->
GetTarget(iout));
198 return (*fRegressionReturnVal);
206 fSumMatx =
new TMatrixD( GetNvar()+1, GetNvar()+1 );
207 fSumValMatx =
new TMatrixD( GetNvar()+1, fNRegOut );
208 fCoeffMatx =
new TMatrixD( GetNvar()+1, fNRegOut );
218 const UInt_t nvar = DataInfo().GetNVariables();
220 for (
UInt_t ivar = 0; ivar<=nvar; ivar++){
221 for (
UInt_t jvar = 0; jvar<=nvar; jvar++) (*fSumMatx)( ivar, jvar ) = 0;
226 for (
Int_t ievt=0; ievt<nevts; ievt++) {
227 const Event * ev = GetEvent(ievt);
230 if (IgnoreEventsWithNegWeightsInTraining() && weight <= 0)
continue;
233 (*fSumMatx)( 0, 0 ) += weight;
236 for (
UInt_t ivar=0; ivar<nvar; ivar++) {
237 (*fSumMatx)( ivar+1, 0 ) += ev->
GetValue( ivar ) * weight;
238 (*fSumMatx)( 0, ivar+1 ) += ev->
GetValue( ivar ) * weight;
242 for (
UInt_t ivar=0; ivar<nvar; ivar++){
243 for (
UInt_t jvar=0; jvar<nvar; jvar++){
244 (*fSumMatx)( ivar+1, jvar+1 ) += ev->
GetValue( ivar ) * ev->
GetValue( jvar ) * weight;
255 const UInt_t nvar = DataInfo().GetNVariables();
257 for (
Int_t ivar = 0; ivar<fNRegOut; ivar++){
258 for (
UInt_t jvar = 0; jvar<=nvar; jvar++){
259 (*fSumValMatx)(jvar,ivar) = 0;
264 for (
Int_t ievt=0; ievt<
Data()->GetNEvents(); ievt++) {
267 const Event* ev = GetEvent(ievt);
271 if (IgnoreEventsWithNegWeightsInTraining() && weight <= 0)
continue;
273 for (
Int_t ivar=0; ivar<fNRegOut; ivar++) {
277 if (!DoRegression()){
278 val *= DataInfo().IsSignal(ev);
282 (*fSumValMatx)( 0,ivar ) += val;
283 for (
UInt_t jvar=0; jvar<nvar; jvar++) {
284 (*fSumValMatx)(jvar+1,ivar ) += ev->
GetValue(jvar) * val;
295 const UInt_t nvar = DataInfo().GetNVariables();
297 for (
Int_t ivar = 0; ivar<fNRegOut; ivar++){
300 Log() <<
kWARNING <<
"<GetCoeff> matrix is almost singular with determinant="
302 <<
" did you use the variables that are linear combinations or highly correlated?"
306 Log() <<
kFATAL <<
"<GetCoeff> matrix is singular with determinant="
308 <<
" did you use the variables that are linear combinations?"
313 fCoeffMatx =
new TMatrixD( invSum * (*fSumValMatx));
314 for (
UInt_t jvar = 0; jvar<nvar+1; jvar++) {
315 (*(*fLDCoeff)[ivar])[jvar] = (*fCoeffMatx)(jvar, ivar );
317 if (!DoRegression()) {
318 (*(*fLDCoeff)[ivar])[0]=0.0;
319 for (
UInt_t jvar = 1; jvar<nvar+1; jvar++){
320 (*(*fLDCoeff)[ivar])[0]+=(*fCoeffMatx)(jvar,ivar)*(*fSumMatx)(0,jvar)/(*fSumMatx)( 0, 0 );
322 (*(*fLDCoeff)[ivar])[0]/=-2.0;
333 for (
Int_t iout=0; iout<fNRegOut; iout++){
334 for (
UInt_t icoeff=0; icoeff<GetNvar()+1; icoeff++){
335 istr >> (*(*fLDCoeff)[iout])[icoeff];
349 for (
Int_t iout=0; iout<fNRegOut; iout++) {
350 for (
UInt_t icoeff=0; icoeff<GetNvar()+1; icoeff++) {
354 gTools().
AddAttr( coeffxml,
"Value", (*(*fLDCoeff)[iout])[icoeff] );
369 if (ncoeff != GetNvar()+1)
Log() <<
kFATAL <<
"Mismatch in number of output variables/coefficients: "
370 << ncoeff <<
" != " << GetNvar()+1 <<
Endl;
374 for (vector< vector< Double_t >* >::iterator vi=fLDCoeff->begin(); vi!=fLDCoeff->end(); vi++){
375 if (*vi) {
delete *vi; *vi = 0; }
377 delete fLDCoeff; fLDCoeff = 0;
379 fLDCoeff =
new vector< vector< Double_t >* >(fNRegOut);
380 for (
Int_t ivar = 0; ivar<fNRegOut; ivar++) (*fLDCoeff)[ivar] = new std::vector<Double_t>( ncoeff );
390 (*(*fLDCoeff)[iout])[icoeff] = coeff;
401 fout <<
" std::vector<double> fLDCoefficients;" << std::endl;
402 fout <<
"};" << std::endl;
403 fout <<
"" << std::endl;
404 fout <<
"inline void " << className <<
"::Initialize() " << std::endl;
405 fout <<
"{" << std::endl;
406 for (
UInt_t ivar=0; ivar<GetNvar()+1; ivar++) {
407 Int_t dp = fout.precision();
408 fout <<
" fLDCoefficients.push_back( "
409 << std::setprecision(12) << (*(*fLDCoeff)[0])[ivar]
410 << std::setprecision(dp) <<
" );" << std::endl;
413 fout <<
" // sanity check" << std::endl;
414 fout <<
" if (fLDCoefficients.size() != fNvars+1) {" << std::endl;
415 fout <<
" std::cout << \"Problem in class \\\"\" << fClassName << \"\\\"::Initialize: mismatch in number of input values\"" << std::endl;
416 fout <<
" << fLDCoefficients.size() << \" != \" << fNvars+1 << std::endl;" << std::endl;
417 fout <<
" fStatusIsClean = false;" << std::endl;
418 fout <<
" } " << std::endl;
419 fout <<
"}" << std::endl;
421 fout <<
"inline double " << className <<
"::GetMvaValue__( const std::vector<double>& inputValues ) const" << std::endl;
422 fout <<
"{" << std::endl;
423 fout <<
" double retval = fLDCoefficients[0];" << std::endl;
424 fout <<
" for (size_t ivar = 1; ivar < fNvars+1; ivar++) {" << std::endl;
425 fout <<
" retval += fLDCoefficients[ivar]*inputValues[ivar-1];" << std::endl;
426 fout <<
" }" << std::endl;
428 fout <<
" return retval;" << std::endl;
429 fout <<
"}" << std::endl;
431 fout <<
"// Clean up" << std::endl;
432 fout <<
"inline void " << className <<
"::Clear() " << std::endl;
433 fout <<
"{" << std::endl;
434 fout <<
" // clear coefficients" << std::endl;
435 fout <<
" fLDCoefficients.clear(); " << std::endl;
436 fout <<
"}" << std::endl;
444 fRanking =
new Ranking( GetName(),
"Discr. power" );
446 for (
UInt_t ivar=0; ivar<GetNvar(); ivar++) {
447 fRanking->AddRank(
Rank( GetInputLabel(ivar),
TMath::Abs((* (*fLDCoeff)[0])[ivar+1] )) );
466 if (HasTrainingTree()) InitMatrices();
474 Log() <<
kINFO <<
"Results for LD coefficients:" <<
Endl;
476 if (GetTransformationHandler().GetTransformationList().GetSize() != 0) {
477 Log() <<
kINFO <<
"NOTE: The coefficients must be applied to TRANFORMED variables" <<
Endl;
478 Log() <<
kINFO <<
" List of the transformation: " <<
Endl;
479 TListIter trIt(&GetTransformationHandler().GetTransformationList());
484 std::vector<TString> vars;
485 std::vector<Double_t> coeffs;
486 for (
UInt_t ivar=0; ivar<GetNvar(); ivar++) {
487 vars .push_back( GetInputLabel(ivar) );
488 coeffs.push_back( (* (*fLDCoeff)[0])[ivar+1] );
490 vars .push_back(
"(offset)" );
491 coeffs.push_back((* (*fLDCoeff)[0])[0] );
493 if (IsNormalised()) {
494 Log() <<
kINFO <<
"NOTE: You have chosen to use the \"Normalise\" booking option. Hence, the" <<
Endl;
495 Log() <<
kINFO <<
" coefficients must be applied to NORMALISED (') variables as follows:" <<
Endl;
497 for (
UInt_t ivar=0; ivar<GetNvar(); ivar++)
if (GetInputLabel(ivar).Length() > maxL) maxL = GetInputLabel(ivar).Length();
500 for (
UInt_t ivar=0; ivar<GetNvar(); ivar++) {
502 << std::setw(maxL+9) <<
TString(
"[") + GetInputLabel(ivar) +
"]' = 2*("
503 << std::setw(maxL+2) <<
TString(
"[") + GetInputLabel(ivar) +
"]"
504 << std::setw(3) << (GetXmin(ivar) > 0 ?
" - " :
" + ")
505 << std::setw(6) <<
TMath::Abs(GetXmin(ivar)) << std::setw(3) <<
")/"
506 << std::setw(6) << (GetXmax(ivar) - GetXmin(ivar) )
507 << std::setw(3) <<
" - 1"
510 Log() <<
kINFO <<
"The TMVA Reader will properly account for this normalisation, but if the" <<
Endl;
511 Log() <<
kINFO <<
"LD classifier is applied outside the Reader, the transformation must be" <<
Endl;
512 Log() <<
kINFO <<
"implemented -- or the \"Normalise\" option is removed and LD retrained." <<
Endl;
528 Log() <<
"Linear discriminants select events by distinguishing the mean " <<
Endl;
529 Log() <<
"values of the signal and background distributions in a trans- " <<
Endl;
530 Log() <<
"formed variable space where linear correlations are removed." <<
Endl;
531 Log() <<
"The LD implementation here is equivalent to the \"Fisher\" discriminant" <<
Endl;
532 Log() <<
"for classification, but also provides linear regression." <<
Endl;
534 Log() <<
" (More precisely: the \"linear discriminator\" determines" <<
Endl;
535 Log() <<
" an axis in the (correlated) hyperspace of the input " <<
Endl;
536 Log() <<
" variables such that, when projecting the output classes " <<
Endl;
537 Log() <<
" (signal and background) upon this axis, they are pushed " <<
Endl;
538 Log() <<
" as far as possible away from each other, while events" <<
Endl;
539 Log() <<
" of a same class are confined in a close vicinity. The " <<
Endl;
540 Log() <<
" linearity property of this classifier is reflected in the " <<
Endl;
541 Log() <<
" metric with which \"far apart\" and \"close vicinity\" are " <<
Endl;
542 Log() <<
" determined: the covariance matrix of the discriminating" <<
Endl;
543 Log() <<
" variable space.)" <<
Endl;
547 Log() <<
"Optimal performance for the linear discriminant is obtained for " <<
Endl;
548 Log() <<
"linearly correlated Gaussian-distributed variables. Any deviation" <<
Endl;
549 Log() <<
"from this ideal reduces the achievable separation power. In " <<
Endl;
550 Log() <<
"particular, no discrimination at all is achieved for a variable" <<
Endl;
551 Log() <<
"that has the same sample mean for signal and background, even if " <<
Endl;
552 Log() <<
"the shapes of the distributions are very different. Thus, the linear " <<
Endl;
553 Log() <<
"discriminant often benefits from a suitable transformation of the " <<
Endl;
554 Log() <<
"input variables. For example, if a variable x in [-1,1] has a " <<
Endl;
555 Log() <<
"a parabolic signal distributions, and a uniform background" <<
Endl;
556 Log() <<
"distributions, their mean value is zero in both cases, leading " <<
Endl;
557 Log() <<
"to no separation. The simple transformation x -> |x| renders this " <<
Endl;
558 Log() <<
"variable powerful for the use in a linear discriminant." <<
Endl;
virtual const std::vector< Float_t > & GetRegressionValues()
Calculates the regression output.
const Ranking * CreateRanking()
computes ranking of input variables
MsgLogger & Endl(MsgLogger &ml)
Bool_t HasAnalysisType(Types::EAnalysisType type, UInt_t numberClasses, UInt_t numberTargets)
LD can handle classification with 2 classes and regression with one regression-target.
void GetSumVal(void)
Calculates the vector transposed(X)*W*Y with Y being the target vector.
virtual ~MethodLD(void)
destructor
void PrintCoefficients(void)
Display the classification/regression coefficients for each variable.
Double_t GetWeight() const
return the event weight - depending on whether the flag IgnoreNegWeightsInTraining is or not...
Float_t GetValue(UInt_t ivar) const
return value of i'th variable
void ReadWeightsFromXML(void *wghtnode)
read coefficients from xml weight file
ClassImp(TMVA::MethodLD) TMVA
standard constructor for the LD
Double_t GetMvaValue(Double_t *err=0, Double_t *errUpper=0)
Returns the MVA classification output.
std::vector< std::vector< double > > Data
MethodLD(const TString &jobName, const TString &methodTitle, DataSetInfo &dsi, const TString &theOption="LD", TDirectory *theTargetDir=0)
void MakeClassSpecific(std::ostream &, const TString &) const
write LD-specific classifier response
TMatrixT< Element > & Invert(Double_t *det=0)
Invert the matrix and calculate its determinant.
TMatrixT< Double_t > TMatrixD
void ReadWeightsFromStream(std::istream &i)
read LD coefficients from weight file
void AddWeightsXMLTo(void *parent) const
create XML description for LD classification and regression (for arbitrary number of output classes/t...
void SetTarget(UInt_t itgt, Float_t value)
set the target value (dimension itgt) to value
void DeclareOptions()
MethodLD options.
Describe directory structure in memory.
void GetSum(void)
Calculates the matrix transposed(X)*W*X with W being the diagonal weight matrix and X the coordinates...
Float_t GetTarget(UInt_t itgt) const
#define REGISTER_METHOD(CLASS)
for example
Abstract ClassifierFactory template that handles arbitrary types.
std::vector< Float_t > & GetValues()
void GetLDCoeff(void)
Calculates the coeffiecients used for classification/regression.
virtual Double_t Determinant() const
Return the matrix determinant.
void Train(void)
compute fSumMatx
void InitMatrices(void)
Initializaton method; creates global matrices and vectors.
void Init(void)
default initialization called by all constructors
void GetHelpMessage() const
get help message text
void ProcessOptions()
this is the preparation for training