Logo ROOT   6.07/09
Reference Guide
LossFunction.cxx
Go to the documentation of this file.
1 // @(#)root/tmva $Id$
2 // Author: Andreas Hoecker, Peter Speckmayer, Joerg Stelzer, Helge Voss, Jan Therhaag
3 
4 /**********************************************************************************
5  * Project: TMVA - a Root-integrated toolkit for multivariate data analysis *
6  * Package: TMVA *
7  * Class : LossFunction *
8  * Web : http://tmva.sourceforge.net *
9  * *
10  * Description: *
11  * Implementation (see header for description) *
12  * *
13  * Authors (alphabetical): *
14  * Andreas Hoecker <Andreas.Hocker@cern.ch> - CERN, Switzerland *
15  * Peter Speckmayer <Peter.Speckmayer@cern.ch> - CERN, Switzerland *
16  * Joerg Stelzer <Joerg.Stelzer@cern.ch> - CERN, Switzerland *
17  * Jan Therhaag <Jan.Therhaag@cern.ch> - U of Bonn, Germany *
18  * Helge Voss <Helge.Voss@cern.ch> - MPI-K Heidelberg, Germany *
19  * *
20  * Copyright (c) 2005-2011: *
21  * CERN, Switzerland *
22  * U. of Victoria, Canada *
23  * MPI-K Heidelberg, Germany *
24  * U. of Bonn, Germany *
25  * *
26  * Redistribution and use in source and binary forms, with or without *
27  * modification, are permitted according to the terms listed in LICENSE *
28  * (http://mva.sourceforge.net/license.txt) *
29  **********************************************************************************/
30 
31 #include "TMVA/LossFunction.h"
32 
33 #include "TMVA/MsgLogger.h"
34 
35 #include "Rtypes.h"
36 #include "TMath.h"
37 
38 #include <iostream>
39 
40 ////////////////////////////////////////////////////////////////////////////////
41 //-----------------------------------------------------------------------------
42 // Huber Loss Function
43 //-----------------------------------------------------------------------------
44 ////////////////////////////////////////////////////////////////////////////////
45 
46 
47 ////////////////////////////////////////////////////////////////////////////////
48 /// huber constructor
49 
51  fTransitionPoint = -9999;
52  fSumOfWeights = -9999;
53  fQuantile = 0.7; // the quantile value determines the bulk of the data, e.g. 0.7 defines
54  // the core as the first 70% and the tails as the last 30%
55 }
56 
58  fSumOfWeights = -9999;
59  fTransitionPoint = -9999;
60  fQuantile = quantile;
61 }
62 
63 ////////////////////////////////////////////////////////////////////////////////
64 /// huber destructor
65 
67 
68 }
69 
70 ////////////////////////////////////////////////////////////////////////////////
71 /// figure out the residual that determines the separation between the
72 /// "core" and the "tails" of the residuals distribution
73 
74 void TMVA::HuberLossFunction::Init(std::vector<LossFunctionEventInfo>& evs){
75 
76  // Calculate the residual that separates the core and the tails
77  SetSumOfWeights(evs);
78  SetTransitionPoint(evs);
79 }
80 
81 ////////////////////////////////////////////////////////////////////////////////
82 /// huber, determine the quantile for a given input
83 
84 Double_t TMVA::HuberLossFunction::CalculateSumOfWeights(std::vector<LossFunctionEventInfo>& evs){
85 
86  // Calculate the sum of the weights
87  Double_t sumOfWeights = 0;
88  for(UInt_t i = 0; i<evs.size(); i++)
89  sumOfWeights+=evs[i].weight;
90 
91  return sumOfWeights;
92 }
93 
94 ////////////////////////////////////////////////////////////////////////////////
95 /// huber, determine the quantile for a given input
96 
97 Double_t TMVA::HuberLossFunction::CalculateQuantile(std::vector<LossFunctionEventInfo>& evs, Double_t whichQuantile, Double_t sumOfWeights, bool abs){
98 
99  // use a lambda function to tell the vector how to sort the LossFunctionEventInfo data structures
100  // (sort them in ascending order of residual magnitude) if abs is true
101  // otherwise sort them in ascending order of residual
102  if(abs)
103  std::sort(evs.begin(), evs.end(), [](LossFunctionEventInfo a, LossFunctionEventInfo b){
104  return TMath::Abs(a.trueValue-a.predictedValue) < TMath::Abs(b.trueValue-b.predictedValue); });
105  else
106  std::sort(evs.begin(), evs.end(), [](LossFunctionEventInfo a, LossFunctionEventInfo b){
107  return (a.trueValue-a.predictedValue) < (b.trueValue-b.predictedValue); });
108  UInt_t i = 0;
109  Double_t temp = 0.0;
110  while(i<evs.size()-1 && temp <= sumOfWeights*whichQuantile){
111  temp += evs[i].weight;
112  i++;
113  }
114  // edge cases
115  // Output warning for low return values
116  if(whichQuantile == 0) i=0; // assume 0th quantile to mean the 0th entry in the ordered series
117 
118  // usual returns
119  if(abs) return TMath::Abs(evs[i].trueValue-evs[i].predictedValue);
120  else return evs[i].trueValue-evs[i].predictedValue;
121 }
122 
123 ////////////////////////////////////////////////////////////////////////////////
124 /// huber, determine the transition point using the values for fQuantile and fSumOfWeights
125 /// which presumably have already been set
126 
127 void TMVA::HuberLossFunction::SetTransitionPoint(std::vector<LossFunctionEventInfo>& evs){
129 
130  // if the transition point corresponding to the quantile is 0 then the loss function will not function
131  // the quantile was chosen too low. Let's use the first nonzero residual as the transition point instead.
132  if(fTransitionPoint == 0){
133  // evs should already be sorted according to the magnitude of the residuals, since CalculateQuantile does this
134  for(UInt_t i=0; i<evs.size(); i++){
135  Double_t residual = TMath::Abs(evs[i].trueValue - evs[i].predictedValue);
136  if(residual != 0){
137  fTransitionPoint = residual;
138  break;
139  }
140  }
141  }
142 
143  // Let the user know that the transition point is zero and the loss function won't work properly
144  if(fTransitionPoint == 0){
145  //std::cout << "The residual transition point for the Huber loss function corresponding to quantile, " << fQuantile << ", is zero."
146  //<< " This implies that all of the residuals are zero and the events have been predicted perfectly. Perhaps the regression is too complex"
147  //<< " for the amount of data." << std::endl;
148  }
149 }
150 
151 ////////////////////////////////////////////////////////////////////////////////
152 /// huber, set the sum of weights given a collection of events
153 
154 void TMVA::HuberLossFunction::SetSumOfWeights(std::vector<LossFunctionEventInfo>& evs){
156 }
157 
158 ////////////////////////////////////////////////////////////////////////////////
159 /// huber, determine the loss for a single event
160 
162  // If the huber loss function is uninitialized then assume a group of one
163  // and initialize the transition point and weights for this single event
164  if(fSumOfWeights == -9999){
165  std::vector<LossFunctionEventInfo> evs;
166  evs.push_back(e);
167 
168  SetSumOfWeights(evs);
169  SetTransitionPoint(evs);
170  }
171 
172  Double_t residual = TMath::Abs(e.trueValue - e.predictedValue);
173  Double_t loss = 0;
174  // Quadratic loss in terms of the residual for small residuals
175  if(residual <= fTransitionPoint) loss = 0.5*residual*residual;
176  // Linear loss for large residuals, so that the tails don't dominate the net loss calculation
177  else loss = fQuantile*residual - 0.5*fQuantile*fQuantile;
178  return e.weight*loss;
179 }
180 
181 ////////////////////////////////////////////////////////////////////////////////
182 /// huber, determine the net loss for a collection of events
183 
184 Double_t TMVA::HuberLossFunction::CalculateNetLoss(std::vector<LossFunctionEventInfo>& evs){
185  // Initialize the Huber Loss Function so that we can calculate the loss.
186  // The loss for each event depends on the other events in the group
187  // that define the cutoff quantile (fTransitionPoint).
188  SetSumOfWeights(evs);
189  SetTransitionPoint(evs);
190 
191  Double_t netloss = 0;
192  for(UInt_t i=0; i<evs.size(); i++)
193  netloss+=CalculateLoss(evs[i]);
194  return netloss;
195  // should get a function to return the average loss as well
196  // return netloss/fSumOfWeights
197 }
198 
199 ////////////////////////////////////////////////////////////////////////////////
200 /// huber, determine the mean loss for a collection of events
201 
202 Double_t TMVA::HuberLossFunction::CalculateMeanLoss(std::vector<LossFunctionEventInfo>& evs){
203  // Initialize the Huber Loss Function so that we can calculate the loss.
204  // The loss for each event depends on the other events in the group
205  // that define the cutoff quantile (fTransitionPoint).
206  SetSumOfWeights(evs);
207  SetTransitionPoint(evs);
208 
209  Double_t netloss = 0;
210  for(UInt_t i=0; i<evs.size(); i++)
211  netloss+=CalculateLoss(evs[i]);
212  return netloss/fSumOfWeights;
213 }
214 
215 ////////////////////////////////////////////////////////////////////////////////
216 //-----------------------------------------------------------------------------
217 // Huber BDT Loss Function
218 //-----------------------------------------------------------------------------
219 ////////////////////////////////////////////////////////////////////////////////
220 
222 }
223 
224 ////////////////////////////////////////////////////////////////////////////////
225 /// huber BDT, initialize the targets and prepare for the regression
226 
227 void TMVA::HuberLossFunctionBDT::Init(std::map<const TMVA::Event*, LossFunctionEventInfo>& evinfomap, std::vector<double>& boostWeights){
228 // Run this once before building the forest. Set initial prediction to weightedMedian.
229 
230  std::vector<LossFunctionEventInfo> evinfovec;
231  for (auto &e: evinfomap){
232  evinfovec.push_back(LossFunctionEventInfo(e.second.trueValue, e.second.predictedValue, e.first->GetWeight()));
233  }
234 
235  // Calculates fSumOfWeights and fTransitionPoint with the current residuals
236  SetSumOfWeights(evinfovec);
237  Double_t weightedMedian = CalculateQuantile(evinfovec, 0.5, fSumOfWeights, false);
238 
239  //Store the weighted median as a first boosweight for later use
240  boostWeights.push_back(weightedMedian);
241  for (auto &e: evinfomap ) {
242  // set the initial prediction for all events to the median
243  e.second.predictedValue += weightedMedian;
244  }
245 }
246 
247 ////////////////////////////////////////////////////////////////////////////////
248 /// huber BDT, set the targets for a collection of events
249 
250 void TMVA::HuberLossFunctionBDT::SetTargets(std::vector<const TMVA::Event*>& evs, std::map< const TMVA::Event*, LossFunctionEventInfo >& evinfomap){
251 
252  std::vector<LossFunctionEventInfo> eventvec;
253  for (std::vector<const TMVA::Event*>::const_iterator e=evs.begin(); e!=evs.end();e++){
254  eventvec.push_back(LossFunctionEventInfo(evinfomap[*e].trueValue, evinfomap[*e].predictedValue, (*e)->GetWeight()));
255  }
256 
257  // Recalculate the residual that separates the "core" of the data and the "tails"
258  // This residual is the quantile given by fQuantile, defaulted to 0.7
259  // the quantile corresponding to 0.5 would be the usual median
260  SetSumOfWeights(eventvec); // This was already set in init, but may change if there is subsampling for each tree
261  SetTransitionPoint(eventvec);
262 
263  for (std::vector<const TMVA::Event*>::const_iterator e=evs.begin(); e!=evs.end();e++) {
264  const_cast<TMVA::Event*>(*e)->SetTarget(0,Target(evinfomap[*e]));
265  }
266 }
267 
268 ////////////////////////////////////////////////////////////////////////////////
269 /// huber BDT, set the target for a single event
270 
272  Double_t residual = e.trueValue - e.predictedValue;
273  // The weight/target relationships are taken care of in the tmva decision tree operations so we don't need to worry about that here
274  if(TMath::Abs(residual) <= fTransitionPoint) return residual;
275  else return fTransitionPoint*(residual<0?-1.0:1.0);
276 }
277 
278 ////////////////////////////////////////////////////////////////////////////////
279 /// huber BDT, determine the fit value for the terminal node based upon the
280 /// events in the terminal node
281 
282 Double_t TMVA::HuberLossFunctionBDT::Fit(std::vector<LossFunctionEventInfo>& evs){
283 // The fit in the terminal node for huber is basically the median of the residuals.
284 // Then you add the average difference from the median to that.
285 // The tails are discounted. If a residual is in the tails then we just use the
286 // cutoff residual that sets the "core" and the "tails" instead of the large residual.
287 // So we get something between least squares (mean as fit) and absolute deviation (median as fit).
288  Double_t sumOfWeights = CalculateSumOfWeights(evs);
289  Double_t shift=0,diff= 0;
290  Double_t residualMedian = CalculateQuantile(evs,0.5,sumOfWeights, false);
291  for(UInt_t j=0;j<evs.size();j++){
292  Double_t residual = evs[j].trueValue - evs[j].predictedValue;
293  diff = residual-residualMedian;
294  // if we are using weights then I'm not sure why this isn't weighted
295  shift+=1.0/evs.size()*((diff<0)?-1.0:1.0)*TMath::Min(fTransitionPoint,fabs(diff));
296  // I think this should be
297  // shift+=evs[j].weight/sumOfWeights*((diff<0)?-1.0:1.0)*TMath::Min(fTransitionPoint,fabs(diff));
298  // not sure why it was originally coded like this
299  }
300  return (residualMedian + shift);
301 
302 }
303 
304 ////////////////////////////////////////////////////////////////////////////////
305 //-----------------------------------------------------------------------------
306 // Least Squares Loss Function
307 //-----------------------------------------------------------------------------
308 ////////////////////////////////////////////////////////////////////////////////
309 
310 // Constructor and destructor are in header file. They don't do anything.
311 
312 ////////////////////////////////////////////////////////////////////////////////
313 /// least squares , determine the loss for a single event
314 
316  Double_t residual = (e.trueValue - e.predictedValue);
317  Double_t loss = 0;
318  loss = residual*residual;
319  return e.weight*loss;
320 }
321 
322 ////////////////////////////////////////////////////////////////////////////////
323 /// least squares , determine the net loss for a collection of events
324 
325 Double_t TMVA::LeastSquaresLossFunction::CalculateNetLoss(std::vector<LossFunctionEventInfo>& evs){
326  Double_t netloss = 0;
327  for(UInt_t i=0; i<evs.size(); i++)
328  netloss+=CalculateLoss(evs[i]);
329  return netloss;
330  // should get a function to return the average loss as well
331  // return netloss/fSumOfWeights
332 }
333 
334 ////////////////////////////////////////////////////////////////////////////////
335 /// least squares , determine the mean loss for a collection of events
336 
337 Double_t TMVA::LeastSquaresLossFunction::CalculateMeanLoss(std::vector<LossFunctionEventInfo>& evs){
338  Double_t netloss = 0;
339  Double_t sumOfWeights = 0;
340  for(UInt_t i=0; i<evs.size(); i++){
341  sumOfWeights+=evs[i].weight;
342  netloss+=CalculateLoss(evs[i]);
343  }
344  // return the weighted mean
345  return netloss/sumOfWeights;
346 }
347 
348 ////////////////////////////////////////////////////////////////////////////////
349 //-----------------------------------------------------------------------------
350 // Least Squares BDT Loss Function
351 //-----------------------------------------------------------------------------
352 ////////////////////////////////////////////////////////////////////////////////
353 
354 // Constructor and destructor defined in header. They don't do anything.
355 
356 ////////////////////////////////////////////////////////////////////////////////
357 /// least squares BDT, initialize the targets and prepare for the regression
358 
359 void TMVA::LeastSquaresLossFunctionBDT::Init(std::map<const TMVA::Event*, LossFunctionEventInfo>& evinfomap, std::vector<double>& boostWeights){
360 // Run this once before building the foresut. Set initial prediction to the weightedMean
361 
362  std::vector<LossFunctionEventInfo> evinfovec;
363  for (auto &e: evinfomap){
364  evinfovec.push_back(LossFunctionEventInfo(e.second.trueValue, e.second.predictedValue, e.first->GetWeight()));
365  }
366 
367  // Initial prediction for least squares is the weighted mean
368  Double_t weightedMean = Fit(evinfovec);
369 
370  //Store the weighted median as a first boosweight for later use
371  boostWeights.push_back(weightedMean);
372  for (auto &e: evinfomap ) {
373  // set the initial prediction for all events to the median
374  e.second.predictedValue += weightedMean;
375  }
376 }
377 
378 ////////////////////////////////////////////////////////////////////////////////
379 /// least squares BDT, set the targets for a collection of events
380 
381 void TMVA::LeastSquaresLossFunctionBDT::SetTargets(std::vector<const TMVA::Event*>& evs, std::map< const TMVA::Event*, LossFunctionEventInfo >& evinfomap){
382 
383  std::vector<LossFunctionEventInfo> eventvec;
384  for (std::vector<const TMVA::Event*>::const_iterator e=evs.begin(); e!=evs.end();e++){
385  eventvec.push_back(LossFunctionEventInfo(evinfomap[*e].trueValue, evinfomap[*e].predictedValue, (*e)->GetWeight()));
386  }
387 
388  for (std::vector<const TMVA::Event*>::const_iterator e=evs.begin(); e!=evs.end();e++) {
389  const_cast<TMVA::Event*>(*e)->SetTarget(0,Target(evinfomap[*e]));
390  }
391 }
392 
393 ////////////////////////////////////////////////////////////////////////////////
394 /// least squares BDT, set the target for a single event
395 
397  Double_t residual = e.trueValue - e.predictedValue;
398  // The weight/target relationships are taken care of in the tmva decision tree operations. We don't need to worry about that here
399  // and we return the residual instead of the weight*residual.
400  return residual;
401 }
402 
403 ////////////////////////////////////////////////////////////////////////////////
404 /// huber BDT, determine the fit value for the terminal node based upon the
405 /// events in the terminal node
406 
407 Double_t TMVA::LeastSquaresLossFunctionBDT::Fit(std::vector<LossFunctionEventInfo>& evs){
408 // The fit in the terminal node for least squares is the weighted average of the residuals.
409  Double_t sumOfWeights = 0;
410  Double_t weightedResidualSum = 0;
411  for(UInt_t j=0;j<evs.size();j++){
412  sumOfWeights += evs[j].weight;
413  Double_t residual = evs[j].trueValue - evs[j].predictedValue;
414  weightedResidualSum += evs[j].weight*residual;
415  }
416  Double_t weightedMean = weightedResidualSum/sumOfWeights;
417 
418  // return the weighted mean
419  return weightedMean;
420 }
421 
422 ////////////////////////////////////////////////////////////////////////////////
423 //-----------------------------------------------------------------------------
424 // Absolute Deviation Loss Function
425 //-----------------------------------------------------------------------------
426 ////////////////////////////////////////////////////////////////////////////////
427 
428 // Constructors in the header. They don't do anything.
429 
430 ////////////////////////////////////////////////////////////////////////////////
431 /// absolute deviation, determine the loss for a single event
432 
434  Double_t residual = e.trueValue - e.predictedValue;
435  return e.weight*TMath::Abs(residual);
436 }
437 
438 ////////////////////////////////////////////////////////////////////////////////
439 /// absolute deviation, determine the net loss for a collection of events
440 
441 Double_t TMVA::AbsoluteDeviationLossFunction::CalculateNetLoss(std::vector<LossFunctionEventInfo>& evs){
442 
443  Double_t netloss = 0;
444  for(UInt_t i=0; i<evs.size(); i++)
445  netloss+=CalculateLoss(evs[i]);
446  return netloss;
447 }
448 
449 ////////////////////////////////////////////////////////////////////////////////
450 /// absolute deviation, determine the mean loss for a collection of events
451 
452 Double_t TMVA::AbsoluteDeviationLossFunction::CalculateMeanLoss(std::vector<LossFunctionEventInfo>& evs){
453  Double_t sumOfWeights = 0;
454  Double_t netloss = 0;
455  for(UInt_t i=0; i<evs.size(); i++){
456  sumOfWeights+=evs[i].weight;
457  netloss+=CalculateLoss(evs[i]);
458  }
459  return netloss/sumOfWeights;
460 }
461 
462 ////////////////////////////////////////////////////////////////////////////////
463 //-----------------------------------------------------------------------------
464 // Absolute Deviation BDT Loss Function
465 //-----------------------------------------------------------------------------
466 ////////////////////////////////////////////////////////////////////////////////
467 
468 ////////////////////////////////////////////////////////////////////////////////
469 /// absolute deviation BDT, initialize the targets and prepare for the regression
470 
471 void TMVA::AbsoluteDeviationLossFunctionBDT::Init(std::map<const TMVA::Event*, LossFunctionEventInfo>& evinfomap, std::vector<double>& boostWeights){
472 // Run this once before building the forest. Set initial prediction to weightedMedian.
473 
474  std::vector<LossFunctionEventInfo> evinfovec;
475  for (auto &e: evinfomap){
476  evinfovec.push_back(LossFunctionEventInfo(e.second.trueValue, e.second.predictedValue, e.first->GetWeight()));
477  }
478 
479  Double_t weightedMedian = Fit(evinfovec);
480 
481  //Store the weighted median as a first boostweight for later use
482  boostWeights.push_back(weightedMedian);
483  for (auto &e: evinfomap ) {
484  // set the initial prediction for all events to the median
485  e.second.predictedValue += weightedMedian;
486  }
487 }
488 
489 ////////////////////////////////////////////////////////////////////////////////
490 /// absolute deviation BDT, set the targets for a collection of events
491 
492 void TMVA::AbsoluteDeviationLossFunctionBDT::SetTargets(std::vector<const TMVA::Event*>& evs, std::map< const TMVA::Event*, LossFunctionEventInfo >& evinfomap){
493 
494  std::vector<LossFunctionEventInfo> eventvec;
495  for (std::vector<const TMVA::Event*>::const_iterator e=evs.begin(); e!=evs.end();e++){
496  eventvec.push_back(LossFunctionEventInfo(evinfomap[*e].trueValue, evinfomap[*e].predictedValue, (*e)->GetWeight()));
497  }
498 
499  for (std::vector<const TMVA::Event*>::const_iterator e=evs.begin(); e!=evs.end();e++) {
500  const_cast<TMVA::Event*>(*e)->SetTarget(0,Target(evinfomap[*e]));
501  }
502 }
503 
504 ////////////////////////////////////////////////////////////////////////////////
505 /// absolute deviation BDT, set the target for a single event
506 
508 // The target is the sign of the residual.
509  Double_t residual = e.trueValue - e.predictedValue;
510  // The weight/target relationships are taken care of in the tmva decision tree operations so we don't need to worry about that here
511  return (residual<0?-1.0:1.0);
512 }
513 
514 ////////////////////////////////////////////////////////////////////////////////
515 /// absolute deviation BDT, determine the fit value for the terminal node based upon the
516 /// events in the terminal node
517 
518 Double_t TMVA::AbsoluteDeviationLossFunctionBDT::Fit(std::vector<LossFunctionEventInfo>& evs){
519 // For Absolute Deviation, the fit in each terminal node is the weighted residual median.
520 
521  // use a lambda function to tell the vector how to sort the LossFunctionEventInfo data structures
522  // sort in ascending order of residual value
523  std::sort(evs.begin(), evs.end(), [](LossFunctionEventInfo a, LossFunctionEventInfo b){
524  return (a.trueValue-a.predictedValue) < (b.trueValue-b.predictedValue); });
525 
526  // calculate the sum of weights, used in the weighted median calculation
527  Double_t sumOfWeights = 0;
528  for(UInt_t j=0; j<evs.size(); j++)
529  sumOfWeights+=evs[j].weight;
530 
531  // get the index of the weighted median
532  UInt_t i = 0;
533  Double_t temp = 0.0;
534  while(i<evs.size() && temp <= sumOfWeights*0.5){
535  temp += evs[i].weight;
536  i++;
537  }
538  if (i >= evs.size()) return 0.; // prevent uncontrolled memory access in return value calculation
539 
540  // return the median residual
541  return evs[i].trueValue-evs[i].predictedValue;
542 }
543 
Double_t CalculateMeanLoss(std::vector< LossFunctionEventInfo > &evs)
absolute deviation, determine the mean loss for a collection of events
void SetTargets(std::vector< const TMVA::Event * > &evs, std::map< const TMVA::Event *, LossFunctionEventInfo > &evinfomap)
absolute deviation BDT, set the targets for a collection of events
Double_t CalculateLoss(LossFunctionEventInfo &e)
absolute deviation, determine the loss for a single event
void Init(std::map< const TMVA::Event *, LossFunctionEventInfo > &evinfomap, std::vector< double > &boostWeights)
absolute deviation BDT, initialize the targets and prepare for the regression
Short_t Min(Short_t a, Short_t b)
Definition: TMathBase.h:170
TArc * a
Definition: textangle.C:12
void SetTargets(std::vector< const TMVA::Event * > &evs, std::map< const TMVA::Event *, LossFunctionEventInfo > &evinfomap)
huber BDT, set the targets for a collection of events
Double_t Fit(std::vector< LossFunctionEventInfo > &evs)
absolute deviation BDT, determine the fit value for the terminal node based upon the events in the te...
void SetTargets(std::vector< const TMVA::Event * > &evs, std::map< const TMVA::Event *, LossFunctionEventInfo > &evinfomap)
least squares BDT, set the targets for a collection of events
Short_t Abs(Short_t d)
Definition: TMathBase.h:110
Double_t CalculateLoss(LossFunctionEventInfo &e)
least squares , determine the loss for a single event
Double_t Target(LossFunctionEventInfo &e)
absolute deviation BDT, set the target for a single event
Double_t Target(LossFunctionEventInfo &e)
huber BDT, set the target for a single event
void Init(std::map< const TMVA::Event *, LossFunctionEventInfo > &evinfomap, std::vector< double > &boostWeights)
huber BDT, initialize the targets and prepare for the regression
VecExpr< UnaryOp< Fabs< T >, VecExpr< A, T, D >, T >, T, D > fabs(const VecExpr< A, T, D > &rhs)
Double_t Fit(std::vector< LossFunctionEventInfo > &evs)
huber BDT, determine the fit value for the terminal node based upon the events in the terminal node ...
Double_t CalculateSumOfWeights(std::vector< LossFunctionEventInfo > &evs)
huber, determine the quantile for a given input
HuberLossFunction()
huber constructor
Double_t CalculateNetLoss(std::vector< LossFunctionEventInfo > &evs)
least squares , determine the net loss for a collection of events
Double_t CalculateQuantile(std::vector< LossFunctionEventInfo > &evs, Double_t whichQuantile, Double_t sumOfWeights, bool abs)
huber, determine the quantile for a given input
unsigned int UInt_t
Definition: RtypesCore.h:42
void SetTarget(UInt_t itgt, Float_t value)
set the target value (dimension itgt) to value
Definition: Event.cxx:356
void Init(std::map< const TMVA::Event *, LossFunctionEventInfo > &evinfomap, std::vector< double > &boostWeights)
least squares BDT, initialize the targets and prepare for the regression
double Double_t
Definition: RtypesCore.h:55
TFitResultPtr Fit(FitObject *h1, TF1 *f1, Foption_t &option, const ROOT::Math::MinimizerOptions &moption, const char *goption, ROOT::Fit::DataRange &range)
Definition: HFitImpl.cxx:134
void Init(std::vector< LossFunctionEventInfo > &evs)
figure out the residual that determines the separation between the "core" and the "tails" of the resi...
void SetTransitionPoint(std::vector< LossFunctionEventInfo > &evs)
huber, determine the transition point using the values for fQuantile and fSumOfWeights which presumab...
you should not use this method at all Int_t Int_t Double_t Double_t Double_t e
Definition: TRolke.cxx:630
Double_t CalculateNetLoss(std::vector< LossFunctionEventInfo > &evs)
huber, determine the net loss for a collection of events
~HuberLossFunction()
huber destructor
Double_t CalculateMeanLoss(std::vector< LossFunctionEventInfo > &evs)
huber, determine the mean loss for a collection of events
void SetSumOfWeights(std::vector< LossFunctionEventInfo > &evs)
huber, set the sum of weights given a collection of events
Double_t CalculateMeanLoss(std::vector< LossFunctionEventInfo > &evs)
least squares , determine the mean loss for a collection of events
Double_t Fit(std::vector< LossFunctionEventInfo > &evs)
huber BDT, determine the fit value for the terminal node based upon the events in the terminal node ...
you should not use this method at all Int_t Int_t Double_t Double_t Double_t Int_t Double_t Double_t Double_t Double_t b
Definition: TRolke.cxx:630
Double_t CalculateLoss(LossFunctionEventInfo &e)
huber, determine the loss for a single event
Double_t CalculateNetLoss(std::vector< LossFunctionEventInfo > &evs)
absolute deviation, determine the net loss for a collection of events
Double_t Target(LossFunctionEventInfo &e)
least squares BDT, set the target for a single event