Logo ROOT   6.16/01
Reference Guide
Fitter.cxx
Go to the documentation of this file.
1// @(#)root/mathcore:$Id$
2// Author: L. Moneta Mon Sep 4 17:00:10 2006
3
4/**********************************************************************
5 * *
6 * Copyright (c) 2006 LCG ROOT Math Team, CERN/PH-SFT *
7 * *
8 * *
9 **********************************************************************/
10
11// Implementation file for class Fitter
12
13
14#include "Fit/Fitter.h"
15#include "Fit/Chi2FCN.h"
18#include "Math/Minimizer.h"
21#include "Fit/BasicFCN.h"
22#include "Fit/BinData.h"
23#include "Fit/UnBinData.h"
24#include "Fit/FcnAdapter.h"
25#include "Fit/FitConfig.h"
26#include "Fit/FitResult.h"
27#include "Math/Error.h"
28
29#include <memory>
30
31#include "Math/IParamFunction.h"
32
34
35// #include "TMatrixDSym.h"
36// for debugging
37//#include "TMatrixD.h"
38// #include <iomanip>
39
40namespace ROOT {
41
42 namespace Fit {
43
44// use a static variable to get default minimizer options for error def
45// to see if user has changed it later on. If it has not been changed we set
46// for the likelihood method an error def of 0.5
47// t.b.d : multiply likelihood by 2 so have same error def definition as chi2
49
50
52 fUseGradient(false),
53 fBinFit(false),
54 fFitType(0),
55 fDataSize(0)
56{}
57
58Fitter::Fitter(const std::shared_ptr<FitResult> & result) :
59 fUseGradient(false),
60 fBinFit(false),
61 fFitType(0),
62 fDataSize(0),
63 fResult(result)
64{
65 if (result->fFitFunc) SetFunction(*fResult->fFitFunc); // this will create also the configuration
66 if (result->fObjFunc) fObjFunction = fResult->fObjFunc;
67 if (result->fFitData) fData = fResult->fFitData;
68}
69
71{
72 // Destructor implementation.
73
74 // nothing to do since we use shared_ptr now
75}
76
78{
79 // Implementation of copy constructor.
80 // copy FitResult, FitConfig and clone fit function
81 (*this) = rhs;
82}
83
85{
86 // Implementation of assignment operator.
87 // dummy implementation, since it is private
88 if (this == &rhs) return *this; // time saving self-test
89// fUseGradient = rhs.fUseGradient;
90// fBinFit = rhs.fBinFit;
91// fResult = rhs.fResult;
92// fConfig = rhs.fConfig;
93// // function is copied and managed by FitResult (maybe should use an unique_ptr)
94// fFunc = fResult.ModelFunction();
95// if (rhs.fFunc != 0 && fResult.ModelFunction() == 0) { // case no fit has been done yet - then clone
96// if (fFunc) delete fFunc;
97// fFunc = dynamic_cast<IModelFunction *>( (rhs.fFunc)->Clone() );
98// assert(fFunc != 0);
99// }
100 return *this;
101}
102
103void Fitter::SetFunction(const IModelFunction & func, bool useGradient)
104{
105
106 fUseGradient = useGradient;
107 if (fUseGradient) {
108 const IGradModelFunction * gradFunc = dynamic_cast<const IGradModelFunction*>(&func);
109 if (gradFunc) {
110 SetFunction(*gradFunc, true);
111 return;
112 }
113 else {
114 MATH_WARN_MSG("Fitter::SetFunction","Requested function does not provide gradient - use it as non-gradient function ");
115 }
116 }
117 fUseGradient = false;
118
119 // set the fit model function (clone the given one and keep a copy )
120 //std::cout << "set a non-grad function" << std::endl;
121
122 fFunc = std::shared_ptr<IModelFunction>(dynamic_cast<IModelFunction *>(func.Clone() ) );
123 assert(fFunc);
124
125 // creates the parameter settings
127 fFunc_v.reset();
128}
129
130void Fitter::SetFunction(const IModel1DFunction & func, bool useGradient)
131{
132 fUseGradient = useGradient;
133 if (fUseGradient) {
134 const IGradModel1DFunction * gradFunc = dynamic_cast<const IGradModel1DFunction*>(&func);
135 if (gradFunc) {
136 SetFunction(*gradFunc, true);
137 return;
138 }
139 else {
140 MATH_WARN_MSG("Fitter::SetFunction","Requested function does not provide gradient - use it as non-gradient function ");
141 }
142 }
143 fUseGradient = false;
144 //std::cout << "set a 1d function" << std::endl;
145
146 // function is cloned when creating the adapter
147 fFunc = std::shared_ptr<IModelFunction>(new ROOT::Math::MultiDimParamFunctionAdapter(func));
148
149 // creates the parameter settings
151 fFunc_v.reset();
152}
153
154void Fitter::SetFunction(const IGradModelFunction & func, bool useGradient)
155{
156 fUseGradient = useGradient;
157 //std::cout << "set a grad function" << std::endl;
158 // set the fit model function (clone the given one and keep a copy )
159 fFunc = std::shared_ptr<IModelFunction>( dynamic_cast<IGradModelFunction *> ( func.Clone() ) );
160 assert(fFunc);
161
162 // creates the parameter settings
164 fFunc_v.reset();
165}
166
167
168void Fitter::SetFunction(const IGradModel1DFunction & func, bool useGradient)
169{
170 //std::cout << "set a 1d grad function" << std::endl;
171 fUseGradient = useGradient;
172 // function is cloned when creating the adapter
173 fFunc = std::shared_ptr<IModelFunction>(new ROOT::Math::MultiDimParamGradFunctionAdapter(func));
174
175 // creates the parameter settings
177 fFunc_v.reset();
178}
179
180
181bool Fitter::SetFCN(const ROOT::Math::IMultiGenFunction & fcn, const double * params, unsigned int dataSize, bool chi2fit) {
182 // set the objective function for the fit
183 // if params is not NULL create the parameter settings
184 fUseGradient = false;
185 unsigned int npar = fcn.NDim();
186 if (npar == 0) {
187 MATH_ERROR_MSG("Fitter::SetFCN","FCN function has zero parameters ");
188 return false;
189 }
190 if (params != 0 )
191 fConfig.SetParamsSettings(npar, params);
192 else {
193 if ( fConfig.ParamsSettings().size() != npar) {
194 MATH_ERROR_MSG("Fitter::SetFCN","wrong fit parameter settings");
195 return false;
196 }
197 }
198
199 fBinFit = chi2fit;
200 fDataSize = dataSize;
201
202 // keep also a copy of FCN function and set this in minimizer so they will be managed together
203 // (remember that cloned copy will still depends on data and model function pointers)
204 fObjFunction = std::unique_ptr<ROOT::Math::IMultiGenFunction> ( fcn.Clone() );
205
206 return true;
207}
208
209bool Fitter::SetFCN(const ROOT::Math::IMultiGradFunction & fcn, const double * params, unsigned int dataSize , bool chi2fit) {
210 // set the objective function for the fit
211 // if params is not NULL create the parameter settings
212 if (!SetFCN(static_cast<const ROOT::Math::IMultiGenFunction &>(fcn),params, dataSize, chi2fit) ) return false;
213 fUseGradient = true;
214 return true;
215}
216
217bool Fitter::SetFCN(const ROOT::Math::FitMethodFunction & fcn, const double * params) {
218 // set the objective function for the fit
219 // if params is not NULL create the parameter settings
220 bool chi2fit = (fcn.Type() == ROOT::Math::FitMethodFunction::kLeastSquare );
221 if (!SetFCN(fcn,params,fcn.NPoints(), chi2fit) ) return false;
222 fUseGradient = false;
223 fFitType = fcn.Type();
224 return true;
225}
226
227bool Fitter::SetFCN(const ROOT::Math::FitMethodGradFunction & fcn, const double * params) {
228 // set the objective function for the fit
229 // if params is not NULL create the parameter settings
230 bool chi2fit = (fcn.Type() == ROOT::Math::FitMethodGradFunction::kLeastSquare );
231 if (!SetFCN(fcn,params,fcn.NPoints(), chi2fit) ) return false;
232 fUseGradient = true;
233 fFitType = fcn.Type();
234 return true;
235}
236
237
238
239bool Fitter::FitFCN(const BaseFunc & fcn, const double * params, unsigned int dataSize, bool chi2fit) {
240 // fit a user provided FCN function
241 // create fit parameter settings
242 if (!SetFCN(fcn, params,dataSize,chi2fit) ) return false;
243 return FitFCN();
244}
245
246
247
248bool Fitter::FitFCN(const BaseGradFunc & fcn, const double * params, unsigned int dataSize, bool chi2fit) {
249 // fit a user provided FCN gradient function
250
251 if (!SetFCN(fcn, params,dataSize, chi2fit) ) return false;
252 return FitFCN();
253}
254
255bool Fitter::FitFCN(const ROOT::Math::FitMethodFunction & fcn, const double * params) {
256 // fit using the passed objective function for the fit
257 if (!SetFCN(fcn, params) ) return false;
258 return FitFCN();
259}
260
261bool Fitter::FitFCN(const ROOT::Math::FitMethodGradFunction & fcn, const double * params) {
262 // fit using the passed objective function for the fit
263 if (!SetFCN(fcn, params) ) return false;
264 return FitFCN();
265}
266
267
268bool Fitter::SetFCN(MinuitFCN_t fcn, int npar, const double * params , unsigned int dataSize , bool chi2fit ){
269 // set TMinuit style FCN type (global function pointer)
270 // create corresponfing objective function from that function
271
272 if (npar == 0) {
273 npar = fConfig.ParamsSettings().size();
274 if (npar == 0) {
275 MATH_ERROR_MSG("Fitter::FitFCN","Fit Parameter settings have not been created ");
276 return false;
277 }
278 }
279
280 ROOT::Fit::FcnAdapter newFcn(fcn,npar);
281 return SetFCN(newFcn,params,dataSize,chi2fit);
282}
283
284bool Fitter::FitFCN(MinuitFCN_t fcn, int npar, const double * params , unsigned int dataSize , bool chi2fit ) {
285 // fit using Minuit style FCN type (global function pointer)
286 // create corresponfing objective function from that function
287 if (!SetFCN(fcn, npar, params, dataSize, chi2fit)) return false;
288 fUseGradient = false;
289 return FitFCN();
290}
291
293 // fit using the previously set FCN function
294
295 // in case a model function exists from a previous fit - reset shared-ptr
296 if (fFunc && fResult->FittedFunction() == 0) fFunc.reset();
297
298 if (!fObjFunction) {
299 MATH_ERROR_MSG("Fitter::FitFCN","Objective function has not been set");
300 return false;
301 }
302 // look if FCN s of a known type and we can get some modelfunction and data objects
303 ExamineFCN();
304 // init the minimizer
305 if (!DoInitMinimizer() ) return false;
306 // perform the minimization
307 return DoMinimization();
308}
309
311 // evaluate the FCN using the stored values in fConfig
312
313 if (fFunc && fResult->FittedFunction() == 0) fFunc.reset();
314
315 if (!fObjFunction) {
316 MATH_ERROR_MSG("Fitter::FitFCN","Objective function has not been set");
317 return false;
318 }
319 // create a Fit result from the fit configuration
320 fResult = std::make_shared<ROOT::Fit::FitResult>(fConfig);
321 // evaluate one time the FCN
322 double fcnval = (*fObjFunction)(fResult->GetParams() );
323 // update fit result
324 fResult->fVal = fcnval;
325 fResult->fNCalls++;
326 return true;
327}
328
330{
331
332 // perform a chi2 fit on a set of binned data
333 std::shared_ptr<BinData> data = std::dynamic_pointer_cast<BinData>(fData);
334 assert(data);
335
336 // check function
337 if (!fFunc && !fFunc_v) {
338 MATH_ERROR_MSG("Fitter::DoLeastSquareFit", "model function is not set");
339 return false;
340 } else {
341
342#ifdef DEBUG
343 std::cout << "Fitter ParamSettings " << Config().ParamsSettings()[3].IsBound() << " lower limit "
344 << Config().ParamsSettings()[3].LowerLimit() << " upper limit "
345 << Config().ParamsSettings()[3].UpperLimit() << std::endl;
346#endif
347
348 fBinFit = true;
349 fDataSize = data->Size();
350 // check if fFunc provides gradient
351 if (!fUseGradient) {
352 // do minimzation without using the gradient
353 if (fFunc_v) {
354 Chi2FCN<BaseFunc, IModelFunction_v> chi2(data, fFunc_v, executionPolicy);
355 fFitType = chi2.Type();
356 return DoMinimization(chi2);
357 } else {
358 Chi2FCN<BaseFunc> chi2(data, fFunc, executionPolicy);
359 fFitType = chi2.Type();
360 return DoMinimization(chi2);
361 }
362 } else {
363 // use gradient
365 MATH_INFO_MSG("Fitter::DoLeastSquareFit", "use gradient from model function");
366
367 if (fFunc_v) {
368 std::shared_ptr<IGradModelFunction_v> gradFun = std::dynamic_pointer_cast<IGradModelFunction_v>(fFunc_v);
369 if (gradFun) {
371 fFitType = chi2.Type();
372 return DoMinimization(chi2);
373 }
374 } else {
375 std::shared_ptr<IGradModelFunction> gradFun = std::dynamic_pointer_cast<IGradModelFunction>(fFunc);
376 if (gradFun) {
377 Chi2FCN<BaseGradFunc> chi2(data, gradFun);
378 fFitType = chi2.Type();
379 return DoMinimization(chi2);
380 }
381 }
382 MATH_ERROR_MSG("Fitter::DoLeastSquareFit", "wrong type of function - it does not provide gradient");
383 }
384 }
385 return false;
386}
387
388bool Fitter::DoBinnedLikelihoodFit(bool extended, const ROOT::Fit::ExecutionPolicy &executionPolicy)
389{
390 // perform a likelihood fit on a set of binned data
391 // The fit is extended (Poisson logl_ by default
392
393 std::shared_ptr<BinData> data = std::dynamic_pointer_cast<BinData>(fData);
394 assert(data);
395
396 bool useWeight = fConfig.UseWeightCorrection();
397
398 // check function
399 if (!fFunc && !fFunc_v) {
400 MATH_ERROR_MSG("Fitter::DoBinnedLikelihoodFit", "model function is not set");
401 return false;
402 }
403
404 // logl fit (error should be 0.5) set if different than default values (of 1)
407 }
408
409 if (useWeight && fConfig.MinosErrors()) {
410 MATH_INFO_MSG("Fitter::DoBinnedLikelihoodFit", "MINOS errors cannot be computed in weighted likelihood fits");
411 fConfig.SetMinosErrors(false);
412 }
413
414 fBinFit = true;
415 fDataSize = data->Size();
416
417 if (!fUseGradient) {
418 // do minimization without using the gradient
419 if (fFunc_v) {
420 // create a chi2 function to be used for the equivalent chi-square
422 PoissonLikelihoodFCN<BaseFunc, IModelFunction_v> logl(data, fFunc_v, useWeight, extended, executionPolicy);
423 fFitType = logl.Type();
424 // do minimization
425 if (!DoMinimization(logl, &chi2))
426 return false;
427 if (useWeight) {
429 if (!ApplyWeightCorrection(logl))
430 return false;
431 }
432 } else {
433 // create a chi2 function to be used for the equivalent chi-square
435 PoissonLikelihoodFCN<BaseFunc> logl(data, fFunc, useWeight, extended, executionPolicy);
436 fFitType = logl.Type();
437 // do minimization
438 if (!DoMinimization(logl, &chi2))
439 return false;
440 if (useWeight) {
442 if (!ApplyWeightCorrection(logl))
443 return false;
444 }
445 }
446 } else {
447 if (fFunc_v) {
448 // create a chi2 function to be used for the equivalent chi-square
450 std::shared_ptr<IGradModelFunction_v> gradFun = std::dynamic_pointer_cast<IGradModelFunction_v>(fFunc_v);
451 if (!gradFun) {
452 MATH_ERROR_MSG("Fitter::DoBinnedLikelihoodFit", "wrong type of function - it does not provide gradient");
453 return false;
454 }
455 PoissonLikelihoodFCN<BaseGradFunc, IModelFunction_v> logl(data, gradFun, useWeight, true, executionPolicy);
456 fFitType = logl.Type();
457 // do minimization
458 if (!DoMinimization(logl, &chi2))
459 return false;
460 if (useWeight) {
462 if (!ApplyWeightCorrection(logl))
463 return false;
464 }
465 } else {
466 // create a chi2 function to be used for the equivalent chi-square
469 MATH_INFO_MSG("Fitter::DoLikelihoodFit", "use gradient from model function");
470 // check if fFunc provides gradient
471 std::shared_ptr<IGradModelFunction> gradFun = std::dynamic_pointer_cast<IGradModelFunction>(fFunc);
472 if (!gradFun) {
473 MATH_ERROR_MSG("Fitter::DoBinnedLikelihoodFit", "wrong type of function - it does not provide gradient");
474 return false;
475 }
476 // use gradient for minimization
477 // not-extended is not impelemented in this case
478 if (!extended) {
479 MATH_WARN_MSG("Fitter::DoBinnedLikelihoodFit",
480 "Not-extended binned fit with gradient not yet supported - do an extended fit");
481 }
482 PoissonLikelihoodFCN<BaseGradFunc> logl(data, gradFun, useWeight, true, executionPolicy);
483 fFitType = logl.Type();
484 // do minimization
485 if (!DoMinimization(logl, &chi2))
486 return false;
487 if (useWeight) {
489 if (!ApplyWeightCorrection(logl))
490 return false;
491 }
492 }
493 }
494 return true;
495}
496
497bool Fitter::DoUnbinnedLikelihoodFit(bool extended, const ROOT::Fit::ExecutionPolicy &executionPolicy) {
498 // perform a likelihood fit on a set of unbinned data
499
500 std::shared_ptr<UnBinData> data = std::dynamic_pointer_cast<UnBinData>(fData);
501 assert(data);
502
503 bool useWeight = fConfig.UseWeightCorrection();
504
505 if (!fFunc && !fFunc_v) {
506 MATH_ERROR_MSG("Fitter::DoUnbinnedLikelihoodFit","model function is not set");
507 return false;
508 }
509
510 if (useWeight && fConfig.MinosErrors() ) {
511 MATH_INFO_MSG("Fitter::DoUnbinnedLikelihoodFit","MINOS errors cannot be computed in weighted likelihood fits");
512 fConfig.SetMinosErrors(false);
513 }
514
515
516 fBinFit = false;
517 fDataSize = data->Size();
518
519#ifdef DEBUG
520 int ipar = 0;
521 std::cout << "Fitter ParamSettings " << Config().ParamsSettings()[ipar].IsBound() << " lower limit " << Config().ParamsSettings()[ipar].LowerLimit() << " upper limit " << Config().ParamsSettings()[ipar].UpperLimit() << std::endl;
522#endif
523
524 // logl fit (error should be 0.5) set if different than default values (of 1)
527 }
528
529 if (!fUseGradient) {
530 // do minimization without using the gradient
531 if (fFunc_v ){
532 LogLikelihoodFCN<BaseFunc, IModelFunction_v> logl(data, fFunc_v, useWeight, extended, executionPolicy);
533 fFitType = logl.Type();
534 if (!DoMinimization (logl) ) return false;
535 if (useWeight) {
537 if (!ApplyWeightCorrection(logl) ) return false;
538 }
539 return true;
540 } else {
541 LogLikelihoodFCN<BaseFunc> logl(data, fFunc, useWeight, extended, executionPolicy);
542
543 fFitType = logl.Type();
544 if (!DoMinimization (logl) ) return false;
545 if (useWeight) {
547 if (!ApplyWeightCorrection(logl) ) return false;
548 }
549 return true;
550 }
551 } else {
552 // use gradient : check if fFunc provides gradient
553 if (fFunc_v) {
555 MATH_INFO_MSG("Fitter::DoUnbinnedLikelihoodFit", "use gradient from model function");
556 std::shared_ptr<IGradModelFunction_v> gradFun = std::dynamic_pointer_cast<IGradModelFunction_v>(fFunc_v);
557 if (gradFun) {
558 if (extended) {
559 MATH_WARN_MSG("Fitter::DoUnbinnedLikelihoodFit",
560 "Extended unbinned fit with gradient not yet supported - do a not-extended fit");
561 }
562 LogLikelihoodFCN<BaseGradFunc, IModelFunction_v> logl(data, gradFun, useWeight, extended);
563 fFitType = logl.Type();
564 if (!DoMinimization(logl))
565 return false;
566 if (useWeight) {
568 if (!ApplyWeightCorrection(logl))
569 return false;
570 }
571 return true;
572 }
573 MATH_ERROR_MSG("Fitter::DoUnbinnedLikelihoodFit", "wrong type of function - it does not provide gradient");
574
575 } else {
577 MATH_INFO_MSG("Fitter::DoUnbinnedLikelihoodFit", "use gradient from model function");
578 std::shared_ptr<IGradModelFunction> gradFun = std::dynamic_pointer_cast<IGradModelFunction>(fFunc);
579 if (gradFun) {
580 if (extended) {
581 MATH_WARN_MSG("Fitter::DoUnbinnedLikelihoodFit",
582 "Extended unbinned fit with gradient not yet supported - do a not-extended fit");
583 }
584 LogLikelihoodFCN<BaseGradFunc> logl(data, gradFun, useWeight, extended);
585 fFitType = logl.Type();
586 if (!DoMinimization(logl))
587 return false;
588 if (useWeight) {
590 if (!ApplyWeightCorrection(logl))
591 return false;
592 }
593 return true;
594 }
595 MATH_ERROR_MSG("Fitter::DoUnbinnedLikelihoodFit", "wrong type of function - it does not provide gradient");
596 }
597 }
598 return false;
599}
600
601
603
604 std::shared_ptr<BinData> data = std::dynamic_pointer_cast<BinData>(fData);
605 assert(data);
606
607 // perform a linear fit on a set of binned data
608 std::string prevminimizer = fConfig.MinimizerType();
609 fConfig.SetMinimizer("Linear");
610
611 fBinFit = true;
612
613 bool ret = DoLeastSquareFit();
614 fConfig.SetMinimizer(prevminimizer.c_str());
615 return ret;
616}
617
618
620 // compute the Hesse errors according to configuration
621 // set in the parameters and append value in fit result
622 if (fObjFunction.get() == 0) {
623 MATH_ERROR_MSG("Fitter::CalculateHessErrors","Objective function has not been set");
624 return false;
625 }
626 // assume a fResult pointer always exists
627 assert (fResult.get() );
628
629 // need a special treatment in case of weighted likelihood fit
630 // (not yet implemented)
631 if (fFitType == 2 && fConfig.UseWeightCorrection() ) {
632 MATH_ERROR_MSG("Fitter::CalculateHessErrors","Re-computation of Hesse errors not implemented for weighted likelihood fits");
633 MATH_INFO_MSG("Fitter::CalculateHessErrors","Do the Fit using configure option FitConfig::SetParabErrors()");
634 return false;
635 }
636 // if (!fUseGradient ) {
637 // ROOT::Math::FitMethodFunction * fcn = dynamic_cast< ROOT::Math::FitMethodFunction *>(fObjFunction.get());
638 // if (fcn && fcn->Type() == ROOT::Math::FitMethodFunction::kLogLikelihood) {
639 // if (!fBinFit) {
640 // ROOT::Math::LogLikelihoodFunction * nll = dynamic_cast< ROOT::Math::LogLikelihoodFunction *>(fcn);
641 // assert(nll);
642 // nll->UseSumOfWeightSquare(false);
643 // }
644 // else {
645 // ROOT::Math::PoissonLikelihoodFunction * nll = dynamic_cast< ROOT::Math::PoissonLikelihoodFunction *>(fcn);
646 // assert(nll);
647 // nll->UseSumOfWeightSquare(false);
648 // }
649 // // reset fcn in minimizer
650 // }
651
652
653 // create minimizer if not done or if name has changed
654 if ( !fMinimizer ||
655 fResult->MinimizerType().find(fConfig.MinimizerType()) == std::string::npos ) {
656 bool ret = DoInitMinimizer();
657 if (!ret) {
658 MATH_ERROR_MSG("Fitter::CalculateHessErrors","Error initializing the minimizer");
659 return false;
660 }
661 }
662
663
664 if (!fMinimizer ) {
665 MATH_ERROR_MSG("Fitter::CalculateHessErrors","Need to do a fit before calculating the errors");
666 return false;
667 }
668
669 //run Hesse
670 bool ret = fMinimizer->Hesse();
671 if (!ret) MATH_WARN_MSG("Fitter::CalculateHessErrors","Error when calculating Hessian");
672
673
674 // update minimizer results with what comes out from Hesse
675 // in case is empty - create from a FitConfig
676 if (fResult->IsEmpty() )
677 fResult = std::unique_ptr<ROOT::Fit::FitResult>(new ROOT::Fit::FitResult(fConfig) );
678
679
680 // re-give a minimizer instance in case it has been changed
681 ret |= fResult->Update(fMinimizer, ret);
682
683 // when possible get ncalls from FCN and set in fit result
685 fResult->fNCalls = GetNCallsFromFCN();
686 }
687
688 // set also new errors in FitConfig
690
691 return ret;
692}
693
694
696 // compute the Minos errors according to configuration
697 // set in the parameters and append value in fit result
698 // normally Minos errors are computed just after the minimization
699 // (in DoMinimization) when creating the FItResult if the
700 // FitConfig::MinosErrors() flag is set
701
702 if (!fMinimizer.get() ) {
703 MATH_ERROR_MSG("Fitter::CalculateMinosErrors","Minimizer does not exist - cannot calculate Minos errors");
704 return false;
705 }
706
707 if (!fResult.get() || fResult->IsEmpty() ) {
708 MATH_ERROR_MSG("Fitter::CalculateMinosErrors","Invalid Fit Result - cannot calculate Minos errors");
709 return false;
710 }
711
712 if (fFitType == 2 && fConfig.UseWeightCorrection() ) {
713 MATH_ERROR_MSG("Fitter::CalculateMinosErrors","Computation of MINOS errors not implemented for weighted likelihood fits");
714 return false;
715 }
716
717 // set flag to compute Minos error to false in FitConfig to avoid that
718 // following minimizaiton calls perform unwanted Minos error calculations
719 fConfig.SetMinosErrors(false);
720
721
722 const std::vector<unsigned int> & ipars = fConfig.MinosParams();
723 unsigned int n = (ipars.size() > 0) ? ipars.size() : fResult->Parameters().size();
724 bool ok = false;
725 for (unsigned int i = 0; i < n; ++i) {
726 double elow, eup;
727 unsigned int index = (ipars.size() > 0) ? ipars[i] : i;
728 bool ret = fMinimizer->GetMinosError(index, elow, eup);
729 if (ret) fResult->SetMinosError(index, elow, eup);
730 ok |= ret;
731 }
732 if (!ok)
733 MATH_ERROR_MSG("Fitter::CalculateMinosErrors","Minos error calculation failed for all parameters");
734
735 return ok;
736}
737
738
739
740// traits for distinhuishing fit methods functions from generic objective functions
741template<class Func>
742struct ObjFuncTrait {
743 static unsigned int NCalls(const Func & ) { return 0; }
744 static int Type(const Func & ) { return -1; }
745 static bool IsGrad() { return false; }
746};
747template<>
748struct ObjFuncTrait<ROOT::Math::FitMethodFunction> {
749 static unsigned int NCalls(const ROOT::Math::FitMethodFunction & f ) { return f.NCalls(); }
750 static int Type(const ROOT::Math::FitMethodFunction & f) { return f.Type(); }
751 static bool IsGrad() { return false; }
752};
753template<>
754struct ObjFuncTrait<ROOT::Math::FitMethodGradFunction> {
755 static unsigned int NCalls(const ROOT::Math::FitMethodGradFunction & f ) { return f.NCalls(); }
756 static int Type(const ROOT::Math::FitMethodGradFunction & f) { return f.Type(); }
757 static bool IsGrad() { return true; }
758};
759
761 //initialize minimizer by creating it
762 // and set there the objective function
763 // obj function must have been copied before
764 assert(fObjFunction.get() );
765
766 // check configuration and objective function
767 if ( fConfig.ParamsSettings().size() != fObjFunction->NDim() ) {
768 MATH_ERROR_MSG("Fitter::DoInitMinimizer","wrong function dimension or wrong size for FitConfig");
769 return false;
770 }
771
772 // create first Minimizer
773 // using an auto_Ptr will delete the previous existing one
774 fMinimizer = std::shared_ptr<ROOT::Math::Minimizer> ( fConfig.CreateMinimizer() );
775 if (fMinimizer.get() == 0) {
776 MATH_ERROR_MSG("Fitter::FitFCN","Minimizer cannot be created");
777 return false;
778 }
779
780 // in case of gradient function one needs to downcast the pointer
781 if (fUseGradient) {
782 const ROOT::Math::IMultiGradFunction * gradfcn = dynamic_cast<const ROOT::Math::IMultiGradFunction *> (fObjFunction.get() );
783 if (!gradfcn) {
784 MATH_ERROR_MSG("Fitter::DoInitMinimizer","wrong type of function - it does not provide gradient");
785 return false;
786 }
787 fMinimizer->SetFunction( *gradfcn);
788 }
789 else
790 fMinimizer->SetFunction( *fObjFunction);
791
792
793 fMinimizer->SetVariables(fConfig.ParamsSettings().begin(), fConfig.ParamsSettings().end() );
794
795 // if requested parabolic error do correct error analysis by the minimizer (call HESSE)
796 if (fConfig.ParabErrors()) fMinimizer->SetValidError(true);
797
798 return true;
799
800}
801
802
804 // perform the minimization (assume we have already initialized the minimizer)
805
806 assert(fMinimizer );
807
808 bool ret = fMinimizer->Minimize();
809
810 // unsigned int ncalls = ObjFuncTrait<ObjFunc>::NCalls(*fcn);
811 // int fitType = ObjFuncTrait<ObjFunc>::Type(objFunc);
812
813 if (!fResult) fResult = std::make_shared<FitResult>();
814 fResult->FillResult(fMinimizer,fConfig, fFunc, ret, fDataSize, fBinFit, chi2func );
815
816 // when possible get ncalls from FCN and set in fit result
818 fResult->fNCalls = GetNCallsFromFCN();
819 }
820
821 // fill information in fit result
822 fResult->fObjFunc = fObjFunction;
823 fResult->fFitData = fData;
824
825
826#ifdef DEBUG
827 std::cout << "ROOT::Fit::Fitter::DoMinimization : ncalls = " << fResult->fNCalls << " type of objfunc " << fFitFitResType << " typeid: " << typeid(*fObjFunction).name() << " use gradient " << fUseGradient << std::endl;
828#endif
829
831 fResult->NormalizeErrors();
832
833 // set also new parameter values and errors in FitConfig
835
836 return ret;
837}
838
839bool Fitter::DoMinimization(const BaseFunc & objFunc, const ROOT::Math::IMultiGenFunction * chi2func) {
840 // perform the minimization initializing the minimizer starting from a given obj function
841
842 // keep also a copy of FCN function and set this in minimizer so they will be managed together
843 // (remember that cloned copy will still depends on data and model function pointers)
844 fObjFunction = std::unique_ptr<ROOT::Math::IMultiGenFunction> ( objFunc.Clone() );
845 if (!DoInitMinimizer()) return false;
846 return DoMinimization(chi2func);
847}
848
849
851 // update the fit configuration after a fit using the obtained result
852 if (fResult->IsEmpty() || !fResult->IsValid() ) return;
853 for (unsigned int i = 0; i < fConfig.NPar(); ++i) {
855 par.SetValue( fResult->Value(i) );
856 if (fResult->Error(i) > 0) par.SetStepSize( fResult->Error(i) );
857 }
858}
859
861 // retrieve ncalls from the fit method functions
862 // this function is called when minimizer does not provide a way of returning the nnumber of function calls
863 int ncalls = 0;
864 if (!fUseGradient) {
865 const ROOT::Math::FitMethodFunction * fcn = dynamic_cast<const ROOT::Math::FitMethodFunction *>(fObjFunction.get());
866 if (fcn) ncalls = fcn->NCalls();
867 }
868 else {
870 if (fcn) ncalls = fcn->NCalls();
871 }
872 return ncalls;
873}
874
875
876bool Fitter::ApplyWeightCorrection(const ROOT::Math::IMultiGenFunction & loglw2, bool minimizeW2L) {
877 // apply correction for weight square
878 // Compute Hessian of the loglikelihood function using the sum of the weight squared
879 // This method assumes:
880 // - a fit has been done before and a covariance matrix exists
881 // - the objective function is a likelihood function and Likelihood::UseSumOfWeightSquare()
882 // has been called before
883
884 if (fMinimizer.get() == 0) {
885 MATH_ERROR_MSG("Fitter::ApplyWeightCorrection","Must perform first a fit before applying the correction");
886 return false;
887 }
888
889 unsigned int n = loglw2.NDim();
890 // correct errors for weight squared
891 std::vector<double> cov(n*n);
892 bool ret = fMinimizer->GetCovMatrix(&cov[0] );
893 if (!ret) {
894 MATH_ERROR_MSG("Fitter::ApplyWeightCorrection","Previous fit has no valid Covariance matrix");
895 return false;
896 }
897 // need to re-init the minimizer and set w2
898 fObjFunction = std::unique_ptr<ROOT::Math::IMultiGenFunction> ( loglw2.Clone() );
899 // need to re-initialize the minimizer for the changes applied in the
900 // objective functions
901 if (!DoInitMinimizer()) return false;
902
903 //std::cout << "Running Hesse ..." << std::endl;
904
905 // run eventually before a minimization
906 // ignore its error
907 if (minimizeW2L) fMinimizer->Minimize();
908 // run Hesse on the log-likelihood build using sum of weight squared
909 ret = fMinimizer->Hesse();
910 if (!ret) {
911 MATH_ERROR_MSG("Fitter::ApplyWeightCorrection","Error running Hesse on weight2 likelihood - cannot compute errors");
912 return false;
913 }
914
915 if (fMinimizer->CovMatrixStatus() != 3) {
916 MATH_WARN_MSG("Fitter::ApplyWeightCorrection","Covariance matrix for weighted likelihood is not accurate, the errors may be not reliable");
917 if (fMinimizer->CovMatrixStatus() == 2)
918 MATH_WARN_MSG("Fitter::ApplyWeightCorrection","Covariance matrix for weighted likelihood was forced to be defined positive");
919 if (fMinimizer->CovMatrixStatus() <= 0)
920 // probably should have failed before
921 MATH_ERROR_MSG("Fitter::ApplyWeightCorrection","Covariance matrix for weighted likelihood is not valid !");
922 }
923
924 // std::vector<double> c(n*n);
925 // ret = fMinimizer->GetCovMatrix(&c2[0] );
926 // if (!ret) std::cout << "Error reading cov matrix " << fMinimizer->Status() << std::endl;
927 // TMatrixDSym cmat2(n,&c2[0]);
928 // std::cout << "Cov matrix of w2 " << std::endl;
929 // cmat2.Print();
930 // cmat2.Invert();
931 // std::cout << "Hessian of w2 " << std::endl;
932 // cmat2.Print();
933
934 // get Hessian matrix from weight-square likelihood
935 std::vector<double> hes(n*n);
936 ret = fMinimizer->GetHessianMatrix(&hes[0] );
937 if (!ret) {
938 MATH_ERROR_MSG("Fitter::ApplyWeightCorrection","Error retrieving Hesse on weight2 likelihood - cannot compute errors");
939 return false;
940 }
941
942 // for debug
943 // std::cout << "Hessian W2 matrix " << std::endl;
944 // for (unsigned int i = 0; i < n; ++i) {
945 // for (unsigned int j = 0; j < n; ++j) {
946 // std::cout << std::setw(12) << hes[i*n + j] << " , ";
947 // }
948 // std::cout << std::endl;
949 // }
950
951 // perform product of matvrix cov * hes * cov
952 // since we do not want to add matrix dependence do product by hand
953 // first do hes * cov
954 std::vector<double> tmp(n*n);
955 for (unsigned int i = 0; i < n; ++i) {
956 for (unsigned int j = 0; j < n; ++j) {
957 for (unsigned int k = 0; k < n; ++k)
958 tmp[i*n+j] += hes[i*n + k] * cov[k*n + j];
959 }
960 }
961 // do multiplication now cov * tmp save result
962 std::vector<double> newCov(n*n);
963 for (unsigned int i = 0; i < n; ++i) {
964 for (unsigned int j = 0; j < n; ++j) {
965 for (unsigned int k = 0; k < n; ++k)
966 newCov[i*n+j] += cov[i*n + k] * tmp[k*n + j];
967 }
968 }
969 // update fit result with new corrected covariance matrix
970 unsigned int k = 0;
971 for (unsigned int i = 0; i < n; ++i) {
972 fResult->fErrors[i] = std::sqrt( newCov[i*(n+1)] );
973 for (unsigned int j = 0; j <= i; ++j)
974 fResult->fCovMatrix[k++] = newCov[i *n + j];
975 }
976 //fResult->PrintCovMatrix(std::cout);
977
978 return true;
979}
980
981
982
984 // return a pointer to the binned data used in the fit
985 // works only for chi2 or binned likelihood fits
986 // thus when the objective function stored is a Chi2Func or a PoissonLikelihood
987 // The funciton also set the model function correctly if it has not been set
988
991
994
995 //MATH_INFO_MSG("Fitter::ExamineFCN","Objective function is not of a known type - FitData and ModelFunction objects are not available");
996 return;
997}
998
999 } // end namespace Fit
1000
1001} // end namespace ROOT
#define MATH_INFO_MSG(loc, str)
Pre-processor macro to report messages which can be configured to use ROOT error or simply an std::io...
Definition: Error.h:76
#define MATH_ERROR_MSG(loc, str)
Definition: Error.h:82
#define MATH_WARN_MSG(loc, str)
Definition: Error.h:79
#define f(i)
Definition: RSha256.hxx:104
double sqrt(double)
BasicFCN class: base class for the objective functions used in the fits It has a reference to the dat...
Definition: BasicFCN.h:40
Chi2FCN class for binnned fits using the least square methods.
Definition: Chi2FCN.h:49
virtual BaseObjFunction::Type_t Type() const
get type of fit method function
Definition: Chi2FCN.h:134
const std::vector< unsigned int > & MinosParams() const
return vector of parameter indeces for which the Minos Error will be computed
Definition: FitConfig.h:215
void SetParamsSettings(unsigned int npar, const double *params, const double *vstep=0)
set the parameter settings from number of parameters and a vector of values and optionally step value...
Definition: FitConfig.cxx:136
bool UpdateAfterFit() const
Update configuration after a fit using the FitResult.
Definition: FitConfig.h:208
void SetMinosErrors(bool on=true)
set Minos erros computation to be performed after fitting
Definition: FitConfig.h:226
void SetMinimizer(const char *type, const char *algo=0)
set minimizer type
Definition: FitConfig.h:180
bool NormalizeErrors() const
flag to check if resulting errors are be normalized according to chi2/ndf
Definition: FitConfig.h:199
bool ParabErrors() const
do analysis for parabolic errors
Definition: FitConfig.h:202
unsigned int NPar() const
number of parameters settings
Definition: FitConfig.h:95
bool UseWeightCorrection() const
Apply Weight correction for error matrix computation.
Definition: FitConfig.h:211
const std::vector< ROOT::Fit::ParameterSettings > & ParamsSettings() const
get the vector of parameter settings (const method)
Definition: FitConfig.h:85
ROOT::Math::Minimizer * CreateMinimizer()
create a new minimizer according to chosen configuration
Definition: FitConfig.cxx:174
void CreateParamsSettings(const ROOT::Math::IParamMultiFunctionTempl< T > &func)
set the parameter settings from a model function.
Definition: FitConfig.h:108
const std::string & MinimizerType() const
return type of minimizer package
Definition: FitConfig.h:188
const ParameterSettings & ParSettings(unsigned int i) const
get the parameter settings for the i-th parameter (const method)
Definition: FitConfig.h:75
ROOT::Math::MinimizerOptions & MinimizerOptions()
access to the minimizer control parameter (non const method)
Definition: FitConfig.h:166
bool MinosErrors() const
do minos errros analysis on the parameters
Definition: FitConfig.h:205
class containg the result of the fit and all the related information (fitted parameter values,...
Definition: FitResult.h:48
Fitter class, entry point for performing all type of fits.
Definition: Fitter.h:77
bool EvalFCN()
Perform a simple FCN evaluation.
Definition: Fitter.cxx:310
bool FitFCN()
Perform a fit with the previously set FCN function.
Definition: Fitter.cxx:292
void DoUpdateFitConfig()
Definition: Fitter.cxx:850
std::shared_ptr< ROOT::Math::Minimizer > fMinimizer
pointer to the object containing the result of the fit
Definition: Fitter.h:512
bool DoBinnedLikelihoodFit(bool extended=true, const ROOT::Fit::ExecutionPolicy &executionPolicy=ROOT::Fit::ExecutionPolicy::kSerial)
binned likelihood fit
Definition: Fitter.cxx:388
std::shared_ptr< ROOT::Fit::FitData > fData
pointer to used minimizer
Definition: Fitter.h:514
bool fUseGradient
Definition: Fitter.h:494
bool SetFCN(unsigned int npar, Function &fcn, const double *params=0, unsigned int dataSize=0, bool chi2fit=false)
Set a generic FCN function as a C++ callable object implementing double () (const double *) Note that...
Definition: Fitter.h:595
bool DoMinimization(const BaseFunc &f, const ROOT::Math::IMultiGenFunction *chifunc=0)
do minimization
Definition: Fitter.cxx:839
std::shared_ptr< ROOT::Math::IMultiGenFunction > fObjFunction
pointer to the fit data (binned or unbinned data)
Definition: Fitter.h:516
bool ApplyWeightCorrection(const ROOT::Math::IMultiGenFunction &loglw2, bool minimizeW2L=false)
apply correction in the error matrix for the weights for likelihood fits This method can be called on...
Definition: Fitter.cxx:876
void ExamineFCN()
look at the user provided FCN and get data and model function is they derive from ROOT::Fit FCN class...
Definition: Fitter.cxx:983
const FitConfig & Config() const
access to the fit configuration (const method)
Definition: Fitter.h:393
std::shared_ptr< IModelFunction_v > fFunc_v
Definition: Fitter.h:506
bool DoUnbinnedLikelihoodFit(bool extended=false, const ROOT::Fit::ExecutionPolicy &executionPolicy=ROOT::Fit::ExecutionPolicy::kSerial)
un-binned likelihood fit
Definition: Fitter.cxx:497
Fitter & operator=(const Fitter &rhs)
Assignment operator (disabled, class is not copyable)
Definition: Fitter.cxx:84
std::shared_ptr< ROOT::Fit::FitResult > fResult
copy of the fitted function containing on output the fit result
Definition: Fitter.h:510
bool GetDataFromFCN()
internal functions to get data set and model function from FCN useful for fits done with customized F...
Definition: Fitter.h:524
bool CalculateMinosErrors()
perform an error analysis on the result using MINOS To be called only after fitting and when a minimi...
Definition: Fitter.cxx:695
~Fitter()
Destructor.
Definition: Fitter.cxx:70
void SetFunction(const IModelFunction &func, bool useGradient=false)
Set the fitted function (model function) from a parametric function interface.
Definition: Fitter.cxx:103
bool CalculateHessErrors()
perform an error analysis on the result using the Hessian Errors are obtaied from the inverse of the ...
Definition: Fitter.cxx:619
FitConfig fConfig
Definition: Fitter.h:504
Fitter()
Default constructor.
Definition: Fitter.cxx:51
std::shared_ptr< IModelFunction > fFunc
copy of the fitted function containing on output the fit result
Definition: Fitter.h:508
bool DoLinearFit()
linear least square fit
Definition: Fitter.cxx:602
bool DoInitMinimizer()
Definition: Fitter.cxx:760
int GetNCallsFromFCN()
Definition: Fitter.cxx:860
bool DoLeastSquareFit(const ROOT::Fit::ExecutionPolicy &executionPolicy=ROOT::Fit::ExecutionPolicy::kSerial)
least square fit
Definition: Fitter.cxx:329
LogLikelihoodFCN class for likelihood fits.
virtual BaseObjFunction::Type_t Type() const
get type of fit method function
void UseSumOfWeightSquare(bool on=true)
Class, describing value, limits and step size of the parameters Provides functionality also to set/re...
void SetValue(double val)
set the value
void SetStepSize(double err)
set the step size
class evaluating the log likelihood for binned Poisson likelihood fits it is template to distinguish ...
virtual BaseObjFunction::Type_t Type() const
get type of fit method function
FitMethodFunction class Interface for objective functions (like chi2 and likelihood used in the fit) ...
virtual Type_t Type() const
return the type of method, override if needed
virtual unsigned int NPoints() const
return the number of data points used in evaluating the function
virtual unsigned int NCalls() const
return the total number of function calls (overrided if needed)
Documentation for the abstract class IBaseFunctionMultiDim.
Definition: IFunction.h:62
virtual IBaseFunctionMultiDimTempl< T > * Clone() const =0
Clone a function.
virtual unsigned int NDim() const =0
Retrieve the dimension of the function.
Interface (abstract class) for multi-dimensional functions providing a gradient calculation.
Definition: IFunction.h:327
Specialized IParamFunction interface (abstract class) for one-dimensional parametric functions It is ...
Interface (abstract class) for parametric gradient multi-dimensional functions providing in addition ...
Interface (abstract class) for parametric one-dimensional gradient functions providing in addition to...
double ErrorDef() const
error definition
int PrintLevel() const
non-static methods for retrieving options
void SetErrorDef(double err)
set error def
MultiDimParamFunctionAdapter class to wrap a one-dimensional parametric function in a multi dimension...
MultiDimParamGradFunctionAdapter class to wrap a one-dimensional parametric gradient function in a mu...
Type
enumeration specifying the integration types.
const Int_t n
Definition: legend1.C:16
TFitResultPtr Fit(FitObject *h1, TF1 *f1, Foption_t &option, const ROOT::Math::MinimizerOptions &moption, const char *goption, ROOT::Fit::DataRange &range)
Definition: HFitImpl.cxx:134
double gDefaultErrorDef
Definition: Fitter.cxx:48
BasicFitMethodFunction< ROOT::Math::IMultiGenFunction > FitMethodFunction
Definition: Fitter.h:40
BasicFitMethodFunction< ROOT::Math::IMultiGradFunction > FitMethodGradFunction
Definition: Fitter.h:44
Namespace for new ROOT classes and functions.
Definition: StringConv.hxx:21