ROOT  6.06/09
Reference Guide
Fitter.cxx
Go to the documentation of this file.
1 // @(#)root/mathcore:$Id$
2 // Author: L. Moneta Mon Sep 4 17:00:10 2006
3 
4 /**********************************************************************
5  * *
6  * Copyright (c) 2006 LCG ROOT Math Team, CERN/PH-SFT *
7  * *
8  * *
9  **********************************************************************/
10 
11 // Implementation file for class Fitter
12 
13 
14 #include "Fit/Fitter.h"
15 #include "Fit/Chi2FCN.h"
17 #include "Fit/LogLikelihoodFCN.h"
18 #include "Math/Minimizer.h"
19 #include "Math/MinimizerOptions.h"
20 #include "Fit/BinData.h"
21 #include "Fit/UnBinData.h"
22 #include "Fit/FcnAdapter.h"
23 #include "Math/Error.h"
24 
25 #include <memory>
26 
27 #include "Math/IParamFunction.h"
28 
30 
31 // #include "TMatrixDSym.h"
32 // for debugging
33 //#include "TMatrixD.h"
34 // #include <iomanip>
35 
36 namespace ROOT {
37 
38  namespace Fit {
39 
40 // use a static variable to get default minimizer options for error def
41 // to see if user has changed it later on. If it has not been changed we set
42 // for the likelihood method an error def of 0.5
43 // t.b.d : multiply likelihood by 2 so have same error def definition as chi2
44  double gDefaultErrorDef = ROOT::Math::MinimizerOptions::DefaultErrorDef();
45 
46 
48  fUseGradient(false),
49  fBinFit(false),
50  fFitType(0),
51  fDataSize(0)
52 {}
53 
54 Fitter::Fitter(const std::shared_ptr<FitResult> & result) :
55  fUseGradient(false),
56  fBinFit(false),
57  fFitType(0),
58  fDataSize(0),
59  fResult(result)
60 {
61  if (result->fFitFunc) SetFunction(*fResult->fFitFunc); // this will create also the configuration
62  if (result->fObjFunc) fObjFunction = fResult->fObjFunc;
63  if (result->fFitData) fData = fResult->fFitData;
64 }
65 
67 {
68  // Destructor implementation.
69 
70  // nothing to do since we use shared_ptr now
71 }
72 
73 Fitter::Fitter(const Fitter & rhs)
74 {
75  // Implementation of copy constructor.
76  // copy FitResult, FitConfig and clone fit function
77  (*this) = rhs;
78 }
79 
81 {
82  // Implementation of assignment operator.
83  // dummy implementation, since it is private
84  if (this == &rhs) return *this; // time saving self-test
85 // fUseGradient = rhs.fUseGradient;
86 // fBinFit = rhs.fBinFit;
87 // fResult = rhs.fResult;
88 // fConfig = rhs.fConfig;
89 // // function is copied and managed by FitResult (maybe should use an auto_ptr)
90 // fFunc = fResult.ModelFunction();
91 // if (rhs.fFunc != 0 && fResult.ModelFunction() == 0) { // case no fit has been done yet - then clone
92 // if (fFunc) delete fFunc;
93 // fFunc = dynamic_cast<IModelFunction *>( (rhs.fFunc)->Clone() );
94 // assert(fFunc != 0);
95 // }
96  return *this;
97 }
98 
100 {
101 
103  if (fUseGradient) {
104  const IGradModelFunction * gradFunc = dynamic_cast<const IGradModelFunction*>(&func);
105  if (gradFunc) {
106  SetFunction(*gradFunc, true);
107  return;
108  }
109  else {
110  MATH_WARN_MSG("Fitter::SetFunction","Requested function does not provide gradient - use it as non-gradient function ");
111  }
112  }
113  fUseGradient = false;
114 
115  // set the fit model function (clone the given one and keep a copy )
116  //std::cout << "set a non-grad function" << std::endl;
117 
118  fFunc = std::shared_ptr<IModelFunction>(dynamic_cast<IModelFunction *>(func.Clone() ) );
119  assert(fFunc);
120 
121  // creates the parameter settings
123 }
124 
125 
127 {
129  if (fUseGradient) {
130  const IGradModel1DFunction * gradFunc = dynamic_cast<const IGradModel1DFunction*>(&func);
131  if (gradFunc) {
132  SetFunction(*gradFunc, true);
133  return;
134  }
135  else {
136  MATH_WARN_MSG("Fitter::SetFunction","Requested function does not provide gradient - use it as non-gradient function ");
137  }
138  }
139  fUseGradient = false;
140  //std::cout << "set a 1d function" << std::endl;
141 
142  // function is cloned when creating the adapter
143  fFunc = std::shared_ptr<IModelFunction>(new ROOT::Math::MultiDimParamFunctionAdapter(func));
144 
145  // creates the parameter settings
147 }
148 
150 {
152  //std::cout << "set a grad function" << std::endl;
153  // set the fit model function (clone the given one and keep a copy )
154  fFunc = std::shared_ptr<IModelFunction>( dynamic_cast<IGradModelFunction *> ( func.Clone() ) );
155  assert(fFunc);
156 
157  // creates the parameter settings
159 }
160 
161 
163 {
164  //std::cout << "set a 1d grad function" << std::endl;
166  // function is cloned when creating the adapter
167  fFunc = std::shared_ptr<IModelFunction>(new ROOT::Math::MultiDimParamGradFunctionAdapter(func));
168 
169  // creates the parameter settings
171 }
172 
173 
174 bool Fitter::SetFCN(const ROOT::Math::IMultiGenFunction & fcn, const double * params, unsigned int dataSize, bool chi2fit) {
175  // set the objective function for the fit
176  // if params is not NULL create the parameter settings
177  fUseGradient = false;
178  unsigned int npar = fcn.NDim();
179  if (npar == 0) {
180  MATH_ERROR_MSG("Fitter::SetFCN","FCN function has zero parameters ");
181  return false;
182  }
183  if (params != 0 )
184  fConfig.SetParamsSettings(npar, params);
185  else {
186  if ( fConfig.ParamsSettings().size() != npar) {
187  MATH_ERROR_MSG("Fitter::SetFCN","wrong fit parameter settings");
188  return false;
189  }
190  }
191 
192  fBinFit = chi2fit;
193  fDataSize = dataSize;
194 
195  // keep also a copy of FCN function and set this in minimizer so they will be managed together
196  // (remember that cloned copy will still depends on data and model function pointers)
197  fObjFunction = std::auto_ptr<ROOT::Math::IMultiGenFunction> ( fcn.Clone() );
198 
199  return true;
200 }
201 
202 bool Fitter::SetFCN(const ROOT::Math::IMultiGradFunction & fcn, const double * params, unsigned int dataSize , bool chi2fit) {
203  // set the objective function for the fit
204  // if params is not NULL create the parameter settings
205  if (!SetFCN(static_cast<const ROOT::Math::IMultiGenFunction &>(fcn),params, dataSize, chi2fit) ) return false;
206  fUseGradient = true;
207  return true;
208 }
209 
210 bool Fitter::SetFCN(const ROOT::Math::FitMethodFunction & fcn, const double * params) {
211  // set the objective function for the fit
212  // if params is not NULL create the parameter settings
213  bool chi2fit = (fcn.Type() == ROOT::Math::FitMethodFunction::kLeastSquare );
214  if (!SetFCN(fcn,params,fcn.NPoints(), chi2fit) ) return false;
215  fUseGradient = false;
216  fFitType = fcn.Type();
217  return true;
218 }
219 
220 bool Fitter::SetFCN(const ROOT::Math::FitMethodGradFunction & fcn, const double * params) {
221  // set the objective function for the fit
222  // if params is not NULL create the parameter settings
223  bool chi2fit = (fcn.Type() == ROOT::Math::FitMethodGradFunction::kLeastSquare );
224  if (!SetFCN(fcn,params,fcn.NPoints(), chi2fit) ) return false;
225  fUseGradient = true;
226  fFitType = fcn.Type();
227  return true;
228 }
229 
230 
231 
232 bool Fitter::FitFCN(const BaseFunc & fcn, const double * params, unsigned int dataSize, bool chi2fit) {
233  // fit a user provided FCN function
234  // create fit parameter settings
235  if (!SetFCN(fcn, params,dataSize,chi2fit) ) return false;
236  return FitFCN();
237 }
238 
239 
240 
241 bool Fitter::FitFCN(const BaseGradFunc & fcn, const double * params, unsigned int dataSize, bool chi2fit) {
242  // fit a user provided FCN gradient function
243 
244  if (!SetFCN(fcn, params,dataSize, chi2fit) ) return false;
245  return FitFCN();
246 }
247 
248 bool Fitter::FitFCN(const ROOT::Math::FitMethodFunction & fcn, const double * params) {
249  // fit using the passed objective function for the fit
250  if (!SetFCN(fcn, params) ) return false;
251  return FitFCN();
252 }
253 
254 bool Fitter::FitFCN(const ROOT::Math::FitMethodGradFunction & fcn, const double * params) {
255  // fit using the passed objective function for the fit
256  if (!SetFCN(fcn, params) ) return false;
257  return FitFCN();
258 }
259 
260 
261 bool Fitter::SetFCN(MinuitFCN_t fcn, int npar, const double * params , unsigned int dataSize , bool chi2fit ){
262  // set TMinuit style FCN type (global function pointer)
263  // create corresponfing objective function from that function
264 
265  if (npar == 0) {
266  npar = fConfig.ParamsSettings().size();
267  if (npar == 0) {
268  MATH_ERROR_MSG("Fitter::FitFCN","Fit Parameter settings have not been created ");
269  return false;
270  }
271  }
272 
273  ROOT::Fit::FcnAdapter newFcn(fcn,npar);
274  return SetFCN(newFcn,params,dataSize,chi2fit);
275 }
276 
277 bool Fitter::FitFCN(MinuitFCN_t fcn, int npar, const double * params , unsigned int dataSize , bool chi2fit ) {
278  // fit using Minuit style FCN type (global function pointer)
279  // create corresponfing objective function from that function
280  if (!SetFCN(fcn, npar, params, dataSize, chi2fit)) return false;
281  fUseGradient = false;
282  return FitFCN();
283 }
284 
286  // fit using the previously set FCN function
287 
288  // in case a model function exists from a previous fit - reset shared-ptr
289  if (fFunc && fResult->FittedFunction() == 0) fFunc.reset();
290 
291  if (!fObjFunction) {
292  MATH_ERROR_MSG("Fitter::FitFCN","Objective function has not been set");
293  return false;
294  }
295  // look if FCN s of a known type and we can get some modelfunction and data objects
296  ExamineFCN();
297  // init the minimizer
298  if (!DoInitMinimizer() ) return false;
299  // perform the minimization
300  return DoMinimization();
301 }
302 
304  // evaluate the FCN using the stored values in fConfig
305 
306  if (fFunc && fResult->FittedFunction() == 0) fFunc.reset();
307 
308  if (!fObjFunction) {
309  MATH_ERROR_MSG("Fitter::FitFCN","Objective function has not been set");
310  return false;
311  }
312  // create a Fit result from the fit configuration
313  fResult = std::shared_ptr<ROOT::Fit::FitResult>(new ROOT::Fit::FitResult(fConfig) );
314  // evaluate one time the FCN
315  double fcnval = (*fObjFunction)(fResult->GetParams() );
316  // update fit result
317  fResult->fVal = fcnval;
318  fResult->fNCalls++;
319  return true;
320 }
321 
322 
324 
325  // perform a chi2 fit on a set of binned data
326  std::shared_ptr<BinData> data = std::dynamic_pointer_cast<BinData>(fData);
327  assert(data);
328 
329  // check function
330  if (!fFunc) {
331  MATH_ERROR_MSG("Fitter::DoLeastSquareFit","model function is not set");
332  return false;
333  }
334 
335 #ifdef DEBUG
336  std::cout << "Fitter ParamSettings " << Config().ParamsSettings()[3].IsBound() << " lower limit " << Config().ParamsSettings()[3].LowerLimit() << " upper limit " << Config().ParamsSettings()[3].UpperLimit() << std::endl;
337 #endif
338 
339  fBinFit = true;
340  fDataSize = data->Size();
341 
342  // check if fFunc provides gradient
343  if (!fUseGradient) {
344  // do minimzation without using the gradient
345  Chi2FCN<BaseFunc> chi2(data,fFunc);
346  fFitType = chi2.Type();
347  return DoMinimization (chi2);
348  }
349  else {
350  // use gradient
351  if (fConfig.MinimizerOptions().PrintLevel() > 0)
352  MATH_INFO_MSG("Fitter::DoLeastSquareFit","use gradient from model function");
353  std::shared_ptr<IGradModelFunction> gradFun = std::dynamic_pointer_cast<IGradModelFunction>(fFunc);
354  if (gradFun) {
355  Chi2FCN<BaseGradFunc> chi2(data,gradFun);
356  fFitType = chi2.Type();
357  return DoMinimization (chi2);
358  }
359  MATH_ERROR_MSG("Fitter::DoLeastSquareFit","wrong type of function - it does not provide gradient");
360  }
361  return false;
362 }
363 
364 bool Fitter::DoBinnedLikelihoodFit(bool extended) {
365  // perform a likelihood fit on a set of binned data
366  // The fit is extended (Poisson logl_ by default
367 
368  std::shared_ptr<BinData> data = std::dynamic_pointer_cast<BinData>(fData);
369  assert(data);
370 
371  bool useWeight = fConfig.UseWeightCorrection();
372 
373  // check function
374  if (!fFunc) {
375  MATH_ERROR_MSG("Fitter::DoLikelihoodFit","model function is not set");
376  return false;
377  }
378 
379  // logl fit (error should be 0.5) set if different than default values (of 1)
382  }
383 
384  if (useWeight && fConfig.MinosErrors() ) {
385  MATH_INFO_MSG("Fitter::DoLikelihoodFit","MINOS errors cannot be computed in weighted likelihood fits");
386  fConfig.SetMinosErrors(false);
387  }
388 
389 
390  fBinFit = true;
391  fDataSize = data->Size();
392 
393  // create a chi2 function to be used for the equivalent chi-square
394  Chi2FCN<BaseFunc> chi2(data,fFunc);
395 
396  if (!fUseGradient) {
397  // do minimization without using the gradient
398  PoissonLikelihoodFCN<BaseFunc> logl(data,fFunc, useWeight, extended);
399  fFitType = logl.Type();
400  // do minimization
401  if (!DoMinimization (logl, &chi2) ) return false;
402  if (useWeight) {
403  logl.UseSumOfWeightSquare();
404  if (!ApplyWeightCorrection(logl) ) return false;
405  }
406  }
407  else {
408  if (fConfig.MinimizerOptions().PrintLevel() > 0)
409  MATH_INFO_MSG("Fitter::DoLikelihoodFit","use gradient from model function");
410  // check if fFunc provides gradient
411  std::shared_ptr<IGradModelFunction> gradFun = std::dynamic_pointer_cast<IGradModelFunction>(fFunc);
412  if (!gradFun) {
413  MATH_ERROR_MSG("Fitter::DoLikelihoodFit","wrong type of function - it does not provide gradient");
414  return false;
415  }
416  // use gradient for minimization
417  // not-extended is not impelemented in this case
418  if (!extended) {
419  MATH_WARN_MSG("Fitter::DoLikelihoodFit","Not-extended binned fit with gradient not yet supported - do an extended fit");
420  }
421  PoissonLikelihoodFCN<BaseGradFunc> logl(data,gradFun, useWeight, true);
422  fFitType = logl.Type();
423  // do minimization
424  if (!DoMinimization (logl, &chi2) ) return false;
425  if (useWeight) {
426  logl.UseSumOfWeightSquare();
427  if (!ApplyWeightCorrection(logl) ) return false;
428  }
429  }
430  return true;
431 }
432 
433 
434 bool Fitter::DoUnbinnedLikelihoodFit(bool extended) {
435  // perform a likelihood fit on a set of unbinned data
436 
437  std::shared_ptr<UnBinData> data = std::dynamic_pointer_cast<UnBinData>(fData);
438  assert(data);
439 
440  bool useWeight = fConfig.UseWeightCorrection();
441 
442  if (!fFunc) {
443  MATH_ERROR_MSG("Fitter::DoLikelihoodFit","model function is not set");
444  return false;
445  }
446 
447  if (useWeight && fConfig.MinosErrors() ) {
448  MATH_INFO_MSG("Fitter::DoLikelihoodFit","MINOS errors cannot be computed in weighted likelihood fits");
449  fConfig.SetMinosErrors(false);
450  }
451 
452 
453  fBinFit = false;
454  fDataSize = data->Size();
455 
456 #ifdef DEBUG
457  int ipar = 0;
458  std::cout << "Fitter ParamSettings " << Config().ParamsSettings()[ipar].IsBound() << " lower limit " << Config().ParamsSettings()[ipar].LowerLimit() << " upper limit " << Config().ParamsSettings()[ipar].UpperLimit() << std::endl;
459 #endif
460 
461  // logl fit (error should be 0.5) set if different than default values (of 1)
464  }
465 
466  if (!fUseGradient) {
467  // do minimization without using the gradient
468  LogLikelihoodFCN<BaseFunc> logl(data,fFunc, useWeight, extended);
469  fFitType = logl.Type();
470  if (!DoMinimization (logl) ) return false;
471  if (useWeight) {
472  logl.UseSumOfWeightSquare();
473  if (!ApplyWeightCorrection(logl) ) return false;
474  }
475  return true;
476  }
477  else {
478  // use gradient : check if fFunc provides gradient
479  if (fConfig.MinimizerOptions().PrintLevel() > 0)
480  MATH_INFO_MSG("Fitter::DoLikelihoodFit","use gradient from model function");
481  std::shared_ptr<IGradModelFunction> gradFun = std::dynamic_pointer_cast<IGradModelFunction>(fFunc);
482  if (gradFun) {
483  if (extended) {
484  MATH_WARN_MSG("Fitter::DoLikelihoodFit","Extended unbinned fit with gradient not yet supported - do a not-extended fit");
485  }
486  LogLikelihoodFCN<BaseGradFunc> logl(data,gradFun,useWeight, extended);
487  fFitType = logl.Type();
488  if (!DoMinimization (logl) ) return false;
489  if (useWeight) {
490  logl.UseSumOfWeightSquare();
491  if (!ApplyWeightCorrection(logl) ) return false;
492  }
493  return true;
494  }
495  MATH_ERROR_MSG("Fitter::DoLikelihoodFit","wrong type of function - it does not provide gradient");
496  }
497  return false;
498 }
499 
500 
502 
503  std::shared_ptr<BinData> data = std::dynamic_pointer_cast<BinData>(fData);
504  assert(data);
505 
506  // perform a linear fit on a set of binned data
507  std::string prevminimizer = fConfig.MinimizerType();
508  fConfig.SetMinimizer("Linear");
509 
510  fBinFit = true;
511 
512  bool ret = DoLeastSquareFit();
513  fConfig.SetMinimizer(prevminimizer.c_str());
514  return ret;
515 }
516 
517 
519  // compute the Hesse errors according to configuration
520  // set in the parameters and append value in fit result
521  if (fObjFunction.get() == 0) {
522  MATH_ERROR_MSG("Fitter::CalculateHessErrors","Objective function has not been set");
523  return false;
524  }
525  // assume a fResult pointer always exists
526  assert (fResult.get() );
527 
528  // need a special treatment in case of weighted likelihood fit
529  // (not yet implemented)
530  if (fFitType == 2 && fConfig.UseWeightCorrection() ) {
531  MATH_ERROR_MSG("Fitter::CalculateHessErrors","Re-computation of Hesse errors not implemented for weighted likelihood fits");
532  MATH_INFO_MSG("Fitter::CalculateHessErrors","Do the Fit using configure option FitConfig::SetParabErrors()");
533  return false;
534  }
535  // if (!fUseGradient ) {
536  // ROOT::Math::FitMethodFunction * fcn = dynamic_cast< ROOT::Math::FitMethodFunction *>(fObjFunction.get());
537  // if (fcn && fcn->Type() == ROOT::Math::FitMethodFunction::kLogLikelihood) {
538  // if (!fBinFit) {
539  // ROOT::Math::LogLikelihoodFunction * nll = dynamic_cast< ROOT::Math::LogLikelihoodFunction *>(fcn);
540  // assert(nll);
541  // nll->UseSumOfWeightSquare(false);
542  // }
543  // else {
544  // ROOT::Math::PoissonLikelihoodFunction * nll = dynamic_cast< ROOT::Math::PoissonLikelihoodFunction *>(fcn);
545  // assert(nll);
546  // nll->UseSumOfWeightSquare(false);
547  // }
548  // // reset fcn in minimizer
549  // }
550 
551 
552  // create minimizer if not done or if name has changed
553  if ( !fMinimizer ||
554  fResult->MinimizerType().find(fConfig.MinimizerType()) == std::string::npos ) {
555  bool ret = DoInitMinimizer();
556  if (!ret) {
557  MATH_ERROR_MSG("Fitter::CalculateHessErrors","Error initializing the minimizer");
558  return false;
559  }
560  }
561 
562 
563  if (!fMinimizer ) {
564  MATH_ERROR_MSG("Fitter::CalculateHessErrors","Need to do a fit before calculating the errors");
565  return false;
566  }
567 
568  //run Hesse
569  bool ret = fMinimizer->Hesse();
570  if (!ret) MATH_WARN_MSG("Fitter::CalculateHessErrors","Error when calculating Hessian");
571 
572 
573  // update minimizer results with what comes out from Hesse
574  // in case is empty - create from a FitConfig
575  if (fResult->IsEmpty() )
576  fResult = std::auto_ptr<ROOT::Fit::FitResult>(new ROOT::Fit::FitResult(fConfig) );
577 
578 
579  // re-give a minimizer instance in case it has been changed
580  ret |= fResult->Update(fMinimizer, ret);
581 
582  // when possible get ncalls from FCN and set in fit result
584  fResult->fNCalls = GetNCallsFromFCN();
585  }
586 
587  // set also new errors in FitConfig
588  if (fConfig.UpdateAfterFit() && ret) DoUpdateFitConfig();
589 
590  return ret;
591 }
592 
593 
595  // compute the Minos errors according to configuration
596  // set in the parameters and append value in fit result
597  // normally Minos errors are computed just after the minimization
598  // (in DoMinimization) when creating the FItResult if the
599  // FitConfig::MinosErrors() flag is set
600 
601  if (!fMinimizer.get() ) {
602  MATH_ERROR_MSG("Fitter::CalculateMinosErrors","Minimizer does not exist - cannot calculate Minos errors");
603  return false;
604  }
605 
606  if (!fResult.get() || fResult->IsEmpty() ) {
607  MATH_ERROR_MSG("Fitter::CalculateMinosErrors","Invalid Fit Result - cannot calculate Minos errors");
608  return false;
609  }
610 
611  if (fFitType == 2 && fConfig.UseWeightCorrection() ) {
612  MATH_ERROR_MSG("Fitter::CalculateMinosErrors","Computation of MINOS errors not implemented for weighted likelihood fits");
613  return false;
614  }
615 
616  // set flag to compute Minos error to false in FitConfig to avoid that
617  // following minimizaiton calls perform unwanted Minos error calculations
618  fConfig.SetMinosErrors(false);
619 
620 
621  const std::vector<unsigned int> & ipars = fConfig.MinosParams();
622  unsigned int n = (ipars.size() > 0) ? ipars.size() : fResult->Parameters().size();
623  bool ok = false;
624  for (unsigned int i = 0; i < n; ++i) {
625  double elow, eup;
626  unsigned int index = (ipars.size() > 0) ? ipars[i] : i;
627  bool ret = fMinimizer->GetMinosError(index, elow, eup);
628  if (ret) fResult->SetMinosError(index, elow, eup);
629  ok |= ret;
630  }
631  if (!ok)
632  MATH_ERROR_MSG("Fitter::CalculateMinosErrors","Minos error calculation failed for all parameters");
633 
634  return ok;
635 }
636 
637 
638 
639 // traits for distinhuishing fit methods functions from generic objective functions
640 template<class Func>
641 struct ObjFuncTrait {
642  static unsigned int NCalls(const Func & ) { return 0; }
643  static int Type(const Func & ) { return -1; }
644  static bool IsGrad() { return false; }
645 };
646 template<>
647 struct ObjFuncTrait<ROOT::Math::FitMethodFunction> {
648  static unsigned int NCalls(const ROOT::Math::FitMethodFunction & f ) { return f.NCalls(); }
649  static int Type(const ROOT::Math::FitMethodFunction & f) { return f.Type(); }
650  static bool IsGrad() { return false; }
651 };
652 template<>
653 struct ObjFuncTrait<ROOT::Math::FitMethodGradFunction> {
654  static unsigned int NCalls(const ROOT::Math::FitMethodGradFunction & f ) { return f.NCalls(); }
655  static int Type(const ROOT::Math::FitMethodGradFunction & f) { return f.Type(); }
656  static bool IsGrad() { return true; }
657 };
658 
660  //initialize minimizer by creating it
661  // and set there the objective function
662  // obj function must have been copied before
663  assert(fObjFunction.get() );
664 
665  // check configuration and objective function
666  if ( fConfig.ParamsSettings().size() != fObjFunction->NDim() ) {
667  MATH_ERROR_MSG("Fitter::DoInitMinimizer","wrong function dimension or wrong size for FitConfig");
668  return false;
669  }
670 
671  // create first Minimizer
672  // using an auto_Ptr will delete the previous existing one
673  fMinimizer = std::shared_ptr<ROOT::Math::Minimizer> ( fConfig.CreateMinimizer() );
674  if (fMinimizer.get() == 0) {
675  MATH_ERROR_MSG("Fitter::FitFCN","Minimizer cannot be created");
676  return false;
677  }
678 
679  // in case of gradient function one needs to downcast the pointer
680  if (fUseGradient) {
681  const ROOT::Math::IMultiGradFunction * gradfcn = dynamic_cast<const ROOT::Math::IMultiGradFunction *> (fObjFunction.get() );
682  if (!gradfcn) {
683  MATH_ERROR_MSG("Fitter::DoInitMinimizer","wrong type of function - it does not provide gradient");
684  return false;
685  }
686  fMinimizer->SetFunction( *gradfcn);
687  }
688  else
689  fMinimizer->SetFunction( *fObjFunction);
690 
691 
692  fMinimizer->SetVariables(fConfig.ParamsSettings().begin(), fConfig.ParamsSettings().end() );
693 
694  // if requested parabolic error do correct error analysis by the minimizer (call HESSE)
695  if (fConfig.ParabErrors()) fMinimizer->SetValidError(true);
696 
697  return true;
698 
699 }
700 
701 
703  // perform the minimization (assume we have already initialized the minimizer)
704 
705  assert(fMinimizer );
706 
707  bool ret = fMinimizer->Minimize();
708 
709  // unsigned int ncalls = ObjFuncTrait<ObjFunc>::NCalls(*fcn);
710  // int fitType = ObjFuncTrait<ObjFunc>::Type(objFunc);
711 
712  if (!fResult) fResult = std::shared_ptr<FitResult> ( new FitResult() );
713  fResult->FillResult(fMinimizer,fConfig, fFunc, ret, fDataSize, fBinFit, chi2func );
714 
715  // when possible get ncalls from FCN and set in fit result
717  fResult->fNCalls = GetNCallsFromFCN();
718  }
719 
720  // fill information in fit result
721  fResult->fObjFunc = fObjFunction;
722  fResult->fFitData = fData;
723 
724 
725 #ifdef DEBUG
726  std::cout << "ROOT::Fit::Fitter::DoMinimization : ncalls = " << fResult->fNCalls << " type of objfunc " << fFitFitResType << " typeid: " << typeid(*fObjFunction).name() << " use gradient " << fUseGradient << std::endl;
727 #endif
728 
730  fResult->NormalizeErrors();
731 
732 
733 
734  // set also new parameter values and errors in FitConfig
735  if (fConfig.UpdateAfterFit() && ret) DoUpdateFitConfig();
736 
737  return ret;
738 }
739 
740 bool Fitter::DoMinimization(const BaseFunc & objFunc, const ROOT::Math::IMultiGenFunction * chi2func) {
741  // perform the minimization initializing the minimizer starting from a given obj function
742 
743  // keep also a copy of FCN function and set this in minimizer so they will be managed together
744  // (remember that cloned copy will still depends on data and model function pointers)
745  fObjFunction = std::auto_ptr<ROOT::Math::IMultiGenFunction> ( objFunc.Clone() );
746  if (!DoInitMinimizer()) return false;
747  return DoMinimization(chi2func);
748 }
749 
750 
752  // update the fit configuration after a fit using the obtained result
753  if (fResult->IsEmpty() || !fResult->IsValid() ) return;
754  for (unsigned int i = 0; i < fConfig.NPar(); ++i) {
756  par.SetValue( fResult->Value(i) );
757  if (fResult->Error(i) > 0) par.SetStepSize( fResult->Error(i) );
758  }
759 }
760 
762  // retrieve ncalls from the fit method functions
763  // this function is called when minimizer does not provide a way of returning the nnumber of function calls
764  int ncalls = 0;
765  if (!fUseGradient) {
766  const ROOT::Math::FitMethodFunction * fcn = dynamic_cast<const ROOT::Math::FitMethodFunction *>(fObjFunction.get());
767  if (fcn) ncalls = fcn->NCalls();
768  }
769  else {
770  const ROOT::Math::FitMethodGradFunction * fcn = dynamic_cast<const ROOT::Math::FitMethodGradFunction*>(fObjFunction.get());
771  if (fcn) ncalls = fcn->NCalls();
772  }
773  return ncalls;
774 }
775 
776 
777 bool Fitter::ApplyWeightCorrection(const ROOT::Math::IMultiGenFunction & loglw2, bool minimizeW2L) {
778  // apply correction for weight square
779  // Compute Hessian of the loglikelihood function using the sum of the weight squared
780  // This method assumes:
781  // - a fit has been done before and a covariance matrix exists
782  // - the objective function is a likelihood function and Likelihood::UseSumOfWeightSquare()
783  // has been called before
784 
785  if (fMinimizer.get() == 0) {
786  MATH_ERROR_MSG("Fitter::ApplyWeightCorrection","Must perform first a fit before applying the correction");
787  return false;
788  }
789 
790  unsigned int n = loglw2.NDim();
791  // correct errors for weight squared
792  std::vector<double> cov(n*n);
793  bool ret = fMinimizer->GetCovMatrix(&cov[0] );
794  if (!ret) {
795  MATH_ERROR_MSG("Fitter::ApplyWeightCorrection","Previous fit has no valid Covariance matrix");
796  return false;
797  }
798  // need to re-init the minimizer and set w2
799  fObjFunction = std::auto_ptr<ROOT::Math::IMultiGenFunction> ( loglw2.Clone() );
800  // need to re-initialize the minimizer for the changes applied in the
801  // objective functions
802  if (!DoInitMinimizer()) return false;
803 
804  //std::cout << "Running Hesse ..." << std::endl;
805 
806  // run eventually before a minimization
807  // ignore its error
808  if (minimizeW2L) fMinimizer->Minimize();
809  // run Hesse on the log-likelihood build using sum of weight squared
810  ret = fMinimizer->Hesse();
811  if (!ret) {
812  MATH_ERROR_MSG("Fitter::ApplyWeightCorrection","Error running Hesse on weight2 likelihood - cannot compute errors");
813  return false;
814  }
815 
816  if (fMinimizer->CovMatrixStatus() != 3) {
817  MATH_WARN_MSG("Fitter::ApplyWeightCorrection","Covariance matrix for weighted likelihood is not accurate, the errors may be not reliable");
818  if (fMinimizer->CovMatrixStatus() == 2)
819  MATH_WARN_MSG("Fitter::ApplyWeightCorrection","Covariance matrix for weighted likelihood was forced to be defined positive");
820  if (fMinimizer->CovMatrixStatus() <= 0)
821  // probably should have failed before
822  MATH_ERROR_MSG("Fitter::ApplyWeightCorrection","Covariance matrix for weighted likelihood is not valid !");
823  }
824 
825  // std::vector<double> c(n*n);
826  // ret = fMinimizer->GetCovMatrix(&c2[0] );
827  // if (!ret) std::cout << "Error reading cov matrix " << fMinimizer->Status() << std::endl;
828  // TMatrixDSym cmat2(n,&c2[0]);
829  // std::cout << "Cov matrix of w2 " << std::endl;
830  // cmat2.Print();
831  // cmat2.Invert();
832  // std::cout << "Hessian of w2 " << std::endl;
833  // cmat2.Print();
834 
835  // get Hessian matrix from weight-square likelihood
836  std::vector<double> hes(n*n);
837  ret = fMinimizer->GetHessianMatrix(&hes[0] );
838  if (!ret) {
839  MATH_ERROR_MSG("Fitter::ApplyWeightCorrection","Error retrieving Hesse on weight2 likelihood - cannot compute errors");
840  return false;
841  }
842 
843  // for debug
844  // std::cout << "Hessian W2 matrix " << std::endl;
845  // for (unsigned int i = 0; i < n; ++i) {
846  // for (unsigned int j = 0; j < n; ++j) {
847  // std::cout << std::setw(12) << hes[i*n + j] << " , ";
848  // }
849  // std::cout << std::endl;
850  // }
851 
852  // perform product of matvrix cov * hes * cov
853  // since we do not want to add matrix dependence do product by hand
854  // first do hes * cov
855  std::vector<double> tmp(n*n);
856  for (unsigned int i = 0; i < n; ++i) {
857  for (unsigned int j = 0; j < n; ++j) {
858  for (unsigned int k = 0; k < n; ++k)
859  tmp[i*n+j] += hes[i*n + k] * cov[k*n + j];
860  }
861  }
862  // do multiplication now cov * tmp save result
863  std::vector<double> newCov(n*n);
864  for (unsigned int i = 0; i < n; ++i) {
865  for (unsigned int j = 0; j < n; ++j) {
866  for (unsigned int k = 0; k < n; ++k)
867  newCov[i*n+j] += cov[i*n + k] * tmp[k*n + j];
868  }
869  }
870  // update fit result with new corrected covariance matrix
871  unsigned int k = 0;
872  for (unsigned int i = 0; i < n; ++i) {
873  fResult->fErrors[i] = std::sqrt( newCov[i*(n+1)] );
874  for (unsigned int j = 0; j <= i; ++j)
875  fResult->fCovMatrix[k++] = newCov[i *n + j];
876  }
877  //fResult->PrintCovMatrix(std::cout);
878 
879  return true;
880 }
881 
882 
883 
885  // return a pointer to the binned data used in the fit
886  // works only for chi2 or binned likelihood fits
887  // thus when the objective function stored is a Chi2Func or a PoissonLikelihood
888  // The funciton also set the model function correctly if it has not been set
889 
892 
895 
896  //MATH_INFO_MSG("Fitter::ExamineFCN","Objective function is not of a known type - FitData and ModelFunction objects are not available");
897  return;
898 }
899 
900 
901 
902 
903  } // end namespace Fit
904 
905 } // end namespace ROOT
906 
const FitConfig & Config() const
access to the fit configuration (const method)
Definition: Fitter.h:382
Interface (abstract class) for multi-dimensional functions providing a gradient calculation.
Definition: IFunction.h:322
double par[1]
Definition: unuranDistr.cxx:38
const std::string & MinimizerType() const
return type of minimizer package
Definition: FitConfig.h:160
void ExamineFCN()
look at the user provided FCN and get data and model function is they derive from ROOT::Fit FCN class...
Definition: Fitter.cxx:884
Namespace for new ROOT classes and functions.
Definition: ROOT.py:1
ROOT::Math::Minimizer * CreateMinimizer()
create a new minimizer according to chosen configuration
Definition: FitConfig.cxx:203
bool CalculateMinosErrors()
perform an error analysis on the result using MINOS To be called only after fitting and when a minimi...
Definition: Fitter.cxx:594
virtual Type_t Type() const
return the type of method, override if needed
Fitter & operator=(const Fitter &rhs)
Assignment operator (disabled, class is not copyable)
Definition: Fitter.cxx:80
bool EvalFCN()
Perform a simple FCN evaluation.
Definition: Fitter.cxx:303
#define assert(cond)
Definition: unittest.h:542
Class, describing value, limits and step size of the parameters Provides functionality also to set/re...
virtual BaseObjFunction::Type_t Type() const
get type of fit method function
bool DoBinnedLikelihoodFit(bool extended=true)
binned likelihood fit
Definition: Fitter.cxx:364
LogLikelihoodFCN class for likelihood fits.
virtual unsigned int NPoints() const
return the number of data points used in evaluating the function
bool ParabErrors() const
do analysis for parabolic errors
Definition: FitConfig.h:174
const std::vector< unsigned int > & MinosParams() const
return vector of parameter indeces for which the Minos Error will be computed
Definition: FitConfig.h:187
std::shared_ptr< IModelFunction > fFunc
Definition: Fitter.h:494
Class describing the unbinned data sets (just x coordinates values) of any dimensions.
Definition: UnBinData.h:47
double ErrorDef() const
error definition
bool DoLinearFit()
linear least square fit
Definition: Fitter.cxx:501
ROOT::Math::MinimizerOptions & MinimizerOptions()
access to the minimizer control parameter (non const method)
Definition: FitConfig.h:138
void SetErrorDef(double err)
set error def
#define MATH_WARN_MSG(loc, str)
Definition: Error.h:47
ClassImp(TIterator) Bool_t TIterator return false
Compare two iterator objects.
Definition: TIterator.cxx:20
std::shared_ptr< ROOT::Fit::FitResult > fResult
copy of the fitted function containing on output the fit result
Definition: Fitter.h:496
MultiDimParamFunctionAdapter class to wrap a one-dimensional parametric function in a multi dimension...
bool CalculateHessErrors()
perform an error analysis on the result using the Hessian Errors are obtaied from the inverse of the ...
Definition: Fitter.cxx:518
void SetValue(double val)
set the value
virtual BaseObjFunction::Type_t Type() const
get type of fit method function
void CreateParamsSettings(const ROOT::Math::IParamMultiFunction &func)
set the parameter settings from a model function.
Definition: FitConfig.cxx:174
double sqrt(double)
Fitter()
Default constructor.
Definition: Fitter.cxx:47
bool UpdateAfterFit() const
Update configuration after a fit using the FitResult.
Definition: FitConfig.h:180
bool DoInitMinimizer()
Definition: Fitter.cxx:659
class evaluating the log likelihood for binned Poisson likelihood fits it is template to distinguish ...
const std::vector< ROOT::Fit::ParameterSettings > & ParamsSettings() const
get the vector of parameter settings (const method)
Definition: FitConfig.h:90
bool NormalizeErrors() const
flag to check if resulting errors are be normalized according to chi2/ndf
Definition: FitConfig.h:171
bool ApplyWeightCorrection(const ROOT::Math::IMultiGenFunction &loglw2, bool minimizeW2L=false)
apply correction in the error matrix for the weights for likelihood fits This method can be called on...
Definition: Fitter.cxx:777
#define MATH_ERROR_MSG(loc, str)
Definition: Error.h:50
int PrintLevel() const
non-static methods for retrieving options
void SetMinosErrors(bool on=true)
set Minos erros computation to be performed after fitting
Definition: FitConfig.h:198
Chi2FCN class for binnned fits using the least square methods.
Definition: Chi2FCN.h:66
~Fitter()
Destructor.
Definition: Fitter.cxx:66
std::shared_ptr< ROOT::Fit::FitData > fData
pointer to used minimizer
Definition: Fitter.h:500
FitConfig fConfig
Definition: Fitter.h:492
#define MATH_INFO_MSG(loc, str)
Definition: Error.h:44
void SetMinimizer(const char *type, const char *algo=0)
set minimizer type
Definition: FitConfig.h:152
void SetStepSize(double err)
set the step size
IParamFunction interface (abstract class) describing multi-dimensional parameteric functions It is a ...
BasicFCN class: base class for the objective functions used in the fits It has a reference to the dat...
Definition: BasicFCN.h:43
Fitter class, entry point for performing all type of fits.
Definition: Fitter.h:94
virtual unsigned int NDim() const =0
Retrieve the dimension of the function.
const ParameterSettings & ParSettings(unsigned int i) const
get the parameter settings for the i-th parameter (const method)
Definition: FitConfig.h:80
unsigned int NPar() const
number of parameters settings
Definition: FitConfig.h:100
bool FitFCN()
Perform a fit with the previously set FCN function.
Definition: Fitter.cxx:285
void UseSumOfWeightSquare(bool on=true)
Class describing the binned data sets : vectors of x coordinates, y values and optionally error on y ...
Definition: BinData.h:61
void SetFunction(const IModelFunction &func, bool useGradient=false)
Set the fitted function (model function) from a parametric function interface.
Definition: Fitter.cxx:99
std::shared_ptr< ROOT::Math::Minimizer > fMinimizer
pointer to the object containing the result of the fit
Definition: Fitter.h:498
Type
enumeration specifying the integration types.
bool UseWeightCorrection() const
Apply Weight correction for error matrix computation.
Definition: FitConfig.h:183
Interface (abstract class) for parametric one-dimensional gradient functions providing in addition to...
class containg the result of the fit and all the related information (fitted parameter values...
Definition: FitResult.h:52
Specialized IParamFunction interface (abstract class) for one-dimensional parametric functions It is ...
FitMethodFunction class Interface for objective functions (like chi2 and likelihood used in the fit) ...
Definition: Fitter.h:57
TFitResultPtr Fit(FitObject *h1, TF1 *f1, Foption_t &option, const ROOT::Math::MinimizerOptions &moption, const char *goption, ROOT::Fit::DataRange &range)
Definition: HFitImpl.cxx:132
Interface (abstract class) for parametric gradient multi-dimensional functions providing in addition ...
double func(double *x, double *p)
Definition: stressTF1.cxx:213
std::shared_ptr< ROOT::Math::IMultiGenFunction > fObjFunction
pointer to the fit data (binned or unbinned data)
Definition: Fitter.h:502
bool GetDataFromFCN()
internal functions to get data set and model function from FCN useful for fits done with customized F...
Definition: Fitter.h:510
bool DoLeastSquareFit()
least square fit
Definition: Fitter.cxx:323
bool useGradient
bool fUseGradient
Definition: Fitter.h:482
BasicFitMethodFunction< ROOT::Math::IMultiGradFunction > FitMethodGradFunction
Definition: Fitter.h:61
bool SetFCN(unsigned int npar, Function &fcn, const double *params=0, unsigned int dataSize=0, bool chi2fit=false)
Set a generic FCN function as a C++ callable object implementing double () (const double *) Note that...
Definition: Fitter.h:543
MultiDimParamGradFunctionAdapter class to wrap a one-dimensional parametric gradient function in a mu...
void SetParamsSettings(unsigned int npar, const double *params, const double *vstep=0)
set the parameter settings from number of parameters and a vector of values and optionally step value...
Definition: FitConfig.cxx:136
int GetNCallsFromFCN()
Definition: Fitter.cxx:761
void DoUpdateFitConfig()
Definition: Fitter.cxx:751
double result[121]
virtual unsigned int NCalls() const
return the total number of function calls (overrided if needed)
bool MinosErrors() const
do minos errros analysis on the parameters
Definition: FitConfig.h:177
Documentation for the abstract class IBaseFunctionMultiDim.
Definition: IFunction.h:63
virtual BaseObjFunction::Type_t Type() const
get type of fit method function
Definition: Chi2FCN.h:146
bool DoMinimization(const BaseFunc &f, const ROOT::Math::IMultiGenFunction *chifunc=0)
do minimization
Definition: Fitter.cxx:740
const Int_t n
Definition: legend1.C:16
double gDefaultErrorDef
Definition: Fitter.cxx:44
virtual IBaseFunctionMultiDim * Clone() const =0
Clone a function.
bool DoUnbinnedLikelihoodFit(bool extended=false)
un-binned likelihood fit
Definition: Fitter.cxx:434
BasicFitMethodFunction< ROOT::Math::IMultiGenFunction > FitMethodFunction
Definition: Fitter.h:57