Logo ROOT  
Reference Guide
 
Loading...
Searching...
No Matches
Fitter.cxx
Go to the documentation of this file.
1// @(#)root/mathcore:$Id$
2// Author: L. Moneta Mon Sep 4 17:00:10 2006
3
4/**********************************************************************
5 * *
6 * Copyright (c) 2006 LCG ROOT Math Team, CERN/PH-SFT *
7 * *
8 * *
9 **********************************************************************/
10
11// Implementation file for class Fitter
12
13
14#include "Fit/Fitter.h"
15#include "Fit/Chi2FCN.h"
18#include "Math/Minimizer.h"
21#include "Fit/BasicFCN.h"
22#include "Fit/BinData.h"
23#include "Fit/UnBinData.h"
24#include "Fit/FcnAdapter.h"
25#include "Fit/FitConfig.h"
26#include "Fit/FitResult.h"
27#include "Math/Error.h"
28
29#include <memory>
30
31#include "Math/IParamFunction.h"
32
34
35// #include "TMatrixDSym.h"
36// for debugging
37//#include "TMatrixD.h"
38// #include <iomanip>
39
40namespace ROOT {
41
42 namespace Fit {
43
44// use a static variable to get default minimizer options for error def
45// to see if user has changed it later on. If it has not been changed we set
46// for the likelihood method an error def of 0.5
47// t.b.d : multiply likelihood by 2 so have same error def definition as chi2
49
50
51Fitter::Fitter(const std::shared_ptr<FitResult> & result) :
52 fResult(result)
53{
54 if (result->fFitFunc) SetFunction(*fResult->fFitFunc); // this will create also the configuration
55 if (result->fObjFunc) fObjFunction = fResult->fObjFunc;
56 if (result->fFitData) fData = fResult->fFitData;
57}
58
59void Fitter::SetFunction(const IModelFunction & func, bool useGradient)
60{
61
62 fUseGradient = useGradient;
63 if (fUseGradient) {
64 const IGradModelFunction * gradFunc = dynamic_cast<const IGradModelFunction*>(&func);
65 if (gradFunc) {
66 SetFunction(*gradFunc, true);
67 return;
68 }
69 else {
70 MATH_WARN_MSG("Fitter::SetFunction","Requested function does not provide gradient - use it as non-gradient function ");
71 }
72 }
73 fUseGradient = false;
74
75 // set the fit model function (clone the given one and keep a copy )
76 //std::cout << "set a non-grad function" << std::endl;
77
78 fFunc = std::shared_ptr<IModelFunction>(dynamic_cast<IModelFunction *>(func.Clone() ) );
80
81 // creates the parameter settings
83 fFunc_v.reset();
84}
85
86void Fitter::SetFunction(const IModel1DFunction & func, bool useGradient)
87{
88 fUseGradient = useGradient;
89 if (fUseGradient) {
90 const IGradModel1DFunction * gradFunc = dynamic_cast<const IGradModel1DFunction*>(&func);
91 if (gradFunc) {
92 SetFunction(*gradFunc, true);
93 return;
94 }
95 else {
96 MATH_WARN_MSG("Fitter::SetFunction","Requested function does not provide gradient - use it as non-gradient function ");
97 }
98 }
99 fUseGradient = false;
100 //std::cout << "set a 1d function" << std::endl;
101
102 // function is cloned when creating the adapter
103 fFunc = std::shared_ptr<IModelFunction>(new ROOT::Math::MultiDimParamFunctionAdapter(func));
104
105 // creates the parameter settings
107 fFunc_v.reset();
108}
109
110void Fitter::SetFunction(const IGradModelFunction & func, bool useGradient)
111{
112 fUseGradient = useGradient;
113 //std::cout << "set a grad function" << std::endl;
114 // set the fit model function (clone the given one and keep a copy )
115 fFunc = std::shared_ptr<IModelFunction>( dynamic_cast<IGradModelFunction *> ( func.Clone() ) );
116 assert(fFunc);
117
118 // creates the parameter settings
120 fFunc_v.reset();
121}
122
123
124void Fitter::SetFunction(const IGradModel1DFunction & func, bool useGradient)
125{
126 //std::cout << "set a 1d grad function" << std::endl;
127 fUseGradient = useGradient;
128 // function is cloned when creating the adapter
129 fFunc = std::shared_ptr<IModelFunction>(new ROOT::Math::MultiDimParamGradFunctionAdapter(func));
130
131 // creates the parameter settings
133 fFunc_v.reset();
134}
135
136
137bool Fitter::DoSetFCN(bool extFcn, const ROOT::Math::IMultiGenFunction & fcn, const double * params, unsigned int dataSize, int fitType) {
138 // Set the objective function for the fit. First parameter specifies if function object is managed external or internal.
139 // In case of an internal function object we need to clone because it is a temporary one
140 // if params is not NULL create the parameter settings
141 fUseGradient = false;
142 unsigned int npar = fcn.NDim();
143 if (npar == 0) {
144 MATH_ERROR_MSG("Fitter::SetFCN","FCN function has zero parameters ");
145 return false;
146 }
147 if (params != nullptr || fConfig.ParamsSettings().empty())
149 else {
150 if ( fConfig.ParamsSettings().size() != npar) {
151 MATH_ERROR_MSG("Fitter::SetFCN","wrong fit parameter settings");
152 return false;
153 }
154 }
157
159
160 // store external provided FCN without cloning it
161 // it will be cloned in fObjFunc after the fit
162 if (extFcn) {
164 fObjFunction.reset();
165 }
166 else {
167 // case FCN is built from Minuit interface so function object is created internally in Fitter class
168 // and needs to be cloned and managed
169 fExtObjFunction = nullptr;
170 fObjFunction.reset(fcn.Clone());
171 }
172
173 // in case a model function and data exists from a previous fit - reset shared-ptr
174 if (fResult && fResult->FittedFunction() == nullptr && fFunc) fFunc.reset();
175 if (fData) fData.reset();
176
177 return true;
178}
179bool Fitter::SetFCN(const ROOT::Math::IMultiGenFunction & fcn, const double * params, unsigned int dataSize, int fitType) {
180 // set the objective function for the fit
181 return DoSetFCN(true, fcn, params, dataSize, fitType);
182}
183bool Fitter::SetFCN(const ROOT::Math::IMultiGenFunction &fcn, const IModelFunction & func, const double *params, unsigned int dataSize, int fitType) {
184 // set the objective function for the fit and a model function
185 if (!SetFCN(fcn, params, dataSize, fitType) ) return false;
186 // need to set fFunc afterwards because SetFCN could reset fFunc
187 fFunc = std::unique_ptr<IModelFunction>(dynamic_cast<IModelFunction *>(func.Clone()));
188 if(fFunc) {
189 fUseGradient = fcn.HasGradient();
190 return true;
191 }
192 return false;
193}
194
195bool Fitter::SetFCN(const ROOT::Math::FitMethodFunction &fcn, const double *params)
196{
197 // set the objective function for the fit
198 // if params is not NULL create the parameter settings
199 int fitType = static_cast<int>(fcn.Type());
200 if (!SetFCN(fcn, params, fcn.NPoints(), fitType))
201 return false;
202 fUseGradient = false;
203 return true;
204}
205
206bool Fitter::SetFCN(const ROOT::Math::FitMethodGradFunction &fcn, const double *params)
207{
208 // set the objective function for the fit
209 // if params is not NULL create the parameter settings
210 int fitType = static_cast<int>(fcn.Type());
211 if (!SetFCN(fcn, params, fcn.NPoints(), fitType))
212 return false;
213 fUseGradient = true;
214 return true;
215}
216
217bool Fitter::FitFCN(const BaseFunc &fcn, const double *params, unsigned int dataSize, int fitType)
218{
219 // fit a user provided FCN function
220 // create fit parameter settings
221 if (!SetFCN(fcn, params, dataSize, fitType))
222 return false;
223 return FitFCN();
224}
225
226bool Fitter::FitFCN(const ROOT::Math::FitMethodFunction &fcn, const double *params)
227{
228 // fit using the passed objective function for the fit
229 if (!SetFCN(fcn, params))
230 return false;
231 return FitFCN();
232}
233
234bool Fitter::FitFCN(const ROOT::Math::FitMethodGradFunction &fcn, const double *params)
235{
236 // fit using the passed objective function for the fit
237 if (!SetFCN(fcn, params))
238 return false;
239 return FitFCN();
240}
241
242bool Fitter::SetFCN(MinuitFCN_t fcn, int npar, const double *params, unsigned int dataSize, int fitType)
243{
244 // set TMinuit style FCN type (global function pointer)
245 // create corresponding objective function from that function
246
247 if (npar == 0) {
248 npar = fConfig.ParamsSettings().size();
249 if (npar == 0) {
250 MATH_ERROR_MSG("Fitter::FitFCN", "Fit Parameter settings have not been created ");
251 return false;
252 }
253 }
254
256 return DoSetFCN(false,newFcn, params, dataSize, fitType);
257}
258
259bool Fitter::FitFCN(MinuitFCN_t fcn, int npar, const double *params, unsigned int dataSize, int fitType)
260{
261 // fit using Minuit style FCN type (global function pointer)
262 // create corresponding objective function from that function
263 if (!SetFCN(fcn, npar, params, dataSize, fitType))
264 return false;
265 fUseGradient = false;
266 return FitFCN();
267}
268
270{
271 // fit using the previously set FCN function
272
273
274 if (!fExtObjFunction && !fObjFunction) {
275 MATH_ERROR_MSG("Fitter::FitFCN", "Objective function has not been set");
276 return false;
277 }
278 // init the minimizer
279 if (!DoInitMinimizer())
280 return false;
281 // perform the minimization
282 return DoMinimization();
283}
284
286{
287 // evaluate the FCN using the stored values in fConfig
288
289 if (fFunc && fResult->FittedFunction() == nullptr)
290 fFunc.reset();
291
292 if (!ObjFunction()) {
293 MATH_ERROR_MSG("Fitter::FitFCN", "Objective function has not been set");
294 return false;
295 }
296 // create a Fit result from the fit configuration
297 fResult = std::make_unique<ROOT::Fit::FitResult>(fConfig);
298 // evaluate one time the FCN
299 double fcnval = (*ObjFunction())(fResult->GetParams());
300 // update fit result
301 fResult->fVal = fcnval;
302 fResult->fNCalls++;
303 return true;
304}
305
307{
308
309 // perform a chi2 fit on a set of binned data
310 std::shared_ptr<BinData> data = std::dynamic_pointer_cast<BinData>(fData);
311 assert(data);
312
313 // check function
314 if (!fFunc && !fFunc_v) {
315 MATH_ERROR_MSG("Fitter::DoLeastSquareFit", "model function is not set");
316 return false;
317 } else {
318
319#ifdef DEBUG
320 std::cout << "Fitter ParamSettings " << Config().ParamsSettings()[3].IsBound() << " lower limit "
321 << Config().ParamsSettings()[3].LowerLimit() << " upper limit "
322 << Config().ParamsSettings()[3].UpperLimit() << std::endl;
323#endif
324
325 fBinFit = true;
326 fDataSize = data->Size();
327 // check if fFunc provides gradient
328 if (!fUseGradient) {
329 // do minimization without using the gradient
330 if (fFunc_v) {
332 } else {
333 return DoMinimization(std::make_unique<Chi2FCN<BaseFunc>>(data, fFunc, executionPolicy));
334 }
335 } else {
336 // use gradient
338 MATH_INFO_MSG("Fitter::DoLeastSquareFit", "use gradient from model function");
339
340 if (fFunc_v) {
341 std::shared_ptr<IGradModelFunction_v> gradFun = std::dynamic_pointer_cast<IGradModelFunction_v>(fFunc_v);
342 if (gradFun) {
344 }
345 } else {
346 std::shared_ptr<IGradModelFunction> gradFun = std::dynamic_pointer_cast<IGradModelFunction>(fFunc);
347 if (gradFun) {
349 }
350 }
351 MATH_ERROR_MSG("Fitter::DoLeastSquareFit", "wrong type of function - it does not provide gradient");
352 }
353 }
354 return false;
355}
356
358{
359 // perform a likelihood fit on a set of binned data
360 // The fit is extended (Poisson logl_ by default
361
362 std::shared_ptr<BinData> data = std::dynamic_pointer_cast<BinData>(fData);
363 assert(data);
364
366
367 // check function
368 if (!fFunc && !fFunc_v) {
369 MATH_ERROR_MSG("Fitter::DoBinnedLikelihoodFit", "model function is not set");
370 return false;
371 }
372
373 // logl fit (error should be 0.5) set if different than default values (of 1)
376 }
377
378 if (useWeight && fConfig.MinosErrors()) {
379 MATH_INFO_MSG("Fitter::DoBinnedLikelihoodFit", "MINOS errors cannot be computed in weighted likelihood fits");
380 fConfig.SetMinosErrors(false);
381 }
382
383 fBinFit = true;
384 fDataSize = data->Size();
385
386 if (!fUseGradient) {
387 // do minimization without using the gradient
388 if (fFunc_v) {
389 // create a chi2 function to be used for the equivalent chi-square
391 auto logl = std::make_unique<PoissonLikelihoodFCN<BaseFunc, IModelFunction_v>>(data, fFunc_v, useWeight, extended, executionPolicy);
392 return (useWeight) ? DoWeightMinimization(std::move(logl),&chi2) : DoMinimization(std::move(logl),&chi2);
393 } else {
394 // create a chi2 function to be used for the equivalent chi-square
396 auto logl = std::make_unique<PoissonLikelihoodFCN<BaseFunc>>(data, fFunc, useWeight, extended, executionPolicy);
397 return (useWeight) ? DoWeightMinimization(std::move(logl),&chi2) : DoMinimization(std::move(logl),&chi2);
398 }
399 } else {
401 MATH_INFO_MSG("Fitter::DoLikelihoodFit", "use gradient from model function");
402 // not-extended is not implemented in this case
403 if (!extended) {
404 MATH_WARN_MSG("Fitter::DoBinnedLikelihoodFit",
405 "Not-extended binned fit with gradient not yet supported - do an extended fit");
406 extended = true;
407 }
408 if (fFunc_v) {
409 // create a chi2 function to be used for the equivalent chi-square
411 std::shared_ptr<IGradModelFunction_v> gradFun = std::dynamic_pointer_cast<IGradModelFunction_v>(fFunc_v);
412 if (!gradFun) {
413 MATH_ERROR_MSG("Fitter::DoBinnedLikelihoodFit", "wrong type of function - it does not provide gradient");
414 return false;
415 }
416 auto logl = std::make_unique<PoissonLikelihoodFCN<BaseGradFunc, IModelFunction_v>>(data, gradFun, useWeight, extended, executionPolicy);
417 // do minimization
418 return (useWeight) ? DoWeightMinimization(std::move(logl),&chi2) : DoMinimization(std::move(logl),&chi2);
419 } else {
420 // create a chi2 function to be used for the equivalent chi-square
422 // check if fFunc provides gradient
423 std::shared_ptr<IGradModelFunction> gradFun = std::dynamic_pointer_cast<IGradModelFunction>(fFunc);
424 if (!gradFun) {
425 MATH_ERROR_MSG("Fitter::DoBinnedLikelihoodFit", "wrong type of function - it does not provide gradient");
426 return false;
427 }
428 // use gradient for minimization
429 auto logl = std::make_unique<PoissonLikelihoodFCN<BaseGradFunc>>(data, gradFun, useWeight, extended, executionPolicy);
430 // do minimization
431 return (useWeight) ? DoWeightMinimization(std::move(logl),&chi2) : DoMinimization(std::move(logl),&chi2);
432 }
433 }
434 return false;
435}
436
438 // perform a likelihood fit on a set of unbinned data
439
440 std::shared_ptr<UnBinData> data = std::dynamic_pointer_cast<UnBinData>(fData);
441 assert(data);
442
444
445 if (!fFunc && !fFunc_v) {
446 MATH_ERROR_MSG("Fitter::DoUnbinnedLikelihoodFit","model function is not set");
447 return false;
448 }
449
450 if (useWeight && fConfig.MinosErrors() ) {
451 MATH_INFO_MSG("Fitter::DoUnbinnedLikelihoodFit","MINOS errors cannot be computed in weighted likelihood fits");
452 fConfig.SetMinosErrors(false);
453 }
454
455
456 fBinFit = false;
457 fDataSize = data->Size();
458
459#ifdef DEBUG
460 int ipar = 0;
461 std::cout << "Fitter ParamSettings " << Config().ParamsSettings()[ipar].IsBound() << " lower limit " << Config().ParamsSettings()[ipar].LowerLimit() << " upper limit " << Config().ParamsSettings()[ipar].UpperLimit() << std::endl;
462#endif
463
464 // logl fit (error should be 0.5) set if different than default values (of 1)
467 }
468
469 if (!fUseGradient) {
470 // do minimization without using the gradient
471 if (fFunc_v ){
472 auto logl = std::make_unique<LogLikelihoodFCN<BaseFunc, IModelFunction_v>>(data, fFunc_v, useWeight, extended, executionPolicy);
473 // do minimization
474 return (useWeight) ? DoWeightMinimization(std::move(logl)) : DoMinimization(std::move(logl));
475 } else {
476 auto logl = std::make_unique<LogLikelihoodFCN<BaseFunc>>(data, fFunc, useWeight, extended, executionPolicy);
477 return (useWeight) ? DoWeightMinimization(std::move(logl)) : DoMinimization(std::move(logl));
478 }
479 } else {
480 // use gradient : check if fFunc provides gradient
482 MATH_INFO_MSG("Fitter::DoUnbinnedLikelihoodFit", "use gradient from model function");
483 if (extended) {
484 MATH_WARN_MSG("Fitter::DoUnbinnedLikelihoodFit",
485 "Extended unbinned fit with gradient not yet supported - do a not-extended fit");
486 extended = false;
487 }
488 if (fFunc_v) {
489 std::shared_ptr<IGradModelFunction_v> gradFun = std::dynamic_pointer_cast<IGradModelFunction_v>(fFunc_v);
490 if (!gradFun) {
491 MATH_ERROR_MSG("Fitter::DoUnbinnedLikelihoodFit", "wrong type of function - it does not provide gradient");
492 return false;
493 }
494 auto logl = std::make_unique<LogLikelihoodFCN<BaseGradFunc, IModelFunction_v>>(data, gradFun, useWeight, extended, executionPolicy);
495 return (useWeight) ? DoWeightMinimization(std::move(logl)) : DoMinimization(std::move(logl));
496 } else {
497 std::shared_ptr<IGradModelFunction> gradFun = std::dynamic_pointer_cast<IGradModelFunction>(fFunc);
498 if (!gradFun) {
499 MATH_ERROR_MSG("Fitter::DoUnbinnedLikelihoodFit", "wrong type of function - it does not provide gradient");
500 return false;
501 }
502 auto logl = std::make_unique<LogLikelihoodFCN<BaseGradFunc>>(data, gradFun, useWeight, extended, executionPolicy);
503 return (useWeight) ? DoWeightMinimization(std::move(logl)) : DoMinimization(std::move(logl));
504 }
505 }
506 return false;
507}
508
509
511
512 std::shared_ptr<BinData> data = std::dynamic_pointer_cast<BinData>(fData);
513 assert(data);
514
515 // perform a linear fit on a set of binned data
516 std::string prevminimizer = fConfig.MinimizerType();
517 fConfig.SetMinimizer("Linear");
518
519 fBinFit = true;
520
521 bool ret = DoLeastSquareFit();
523 return ret;
524}
525
526
528 // compute the Hesse errors according to configuration
529 // set in the parameters and append value in fit result
530 if (!ObjFunction()) {
531 MATH_ERROR_MSG("Fitter::CalculateHessErrors","Objective function has not been set");
532 return false;
533 }
534
535 // need a special treatment in case of weighted likelihood fit
536 // (not yet implemented)
537 if (fFitType == 2 && fConfig.UseWeightCorrection() ) {
538 MATH_ERROR_MSG("Fitter::CalculateHessErrors","Re-computation of Hesse errors not implemented for weighted likelihood fits");
539 MATH_INFO_MSG("Fitter::CalculateHessErrors","Do the Fit using configure option FitConfig::SetParabErrors()");
540 return false;
541 }
542
543 // a fit Result pointer must exist when a minimizer exists
544 if (fMinimizer && !fResult ) {
545 MATH_ERROR_MSG("Fitter::CalculateHessErrors", "FitResult has not been created");
546 return false;
547 }
548
549 // update minimizer (recreate if not done or if name has changed
551 MATH_ERROR_MSG("Fitter::CalculateHessErrors","Error re-initializing the minimizer");
552 return false;
553 }
554
555 if (!fMinimizer ) {
556 // this should not happen
557 MATH_ERROR_MSG("Fitter::CalculateHessErrors", "Need to do a fit before calculating the errors");
558 assert(false);
559 return false;
560 }
561
562 //run Hesse
563 bool ret = fMinimizer->Hesse();
564 if (!ret) MATH_WARN_MSG("Fitter::CalculateHessErrors","Error when calculating Hessian");
565
566 // update minimizer results with what comes out from Hesse
567 // in case is empty - create from a FitConfig
568 if (fResult->IsEmpty() )
569 fResult = std::make_unique<ROOT::Fit::FitResult>(fConfig );
570
571 // update obj function in case it was an external one
572 if (fExtObjFunction) fObjFunction.reset(fExtObjFunction->Clone());
573 fResult->fObjFunc = fObjFunction;
574
575 // re-give a minimizer instance in case it has been changed
576 ret |= fResult->Update(fMinimizer, fConfig, ret);
577
578 // when possible get ncalls from FCN and set in fit result
580 fResult->fNCalls = GetNCallsFromFCN();
581 }
582
583 // set also new errors in FitConfig
585
586 return ret;
587}
588
589
591 // compute the Minos errors according to configuration
592 // set in the parameters and append value in fit result
593 // normally Minos errors are computed just after the minimization
594 // (in DoMinimization) aftewr minimizing if the
595 // FitConfig::MinosErrors() flag is set
596
597 if (!fMinimizer) {
598 MATH_ERROR_MSG("Fitter::CalculateMinosErrors","Minimizer does not exist - cannot calculate Minos errors");
599 return false;
600 }
601
602 if (!fResult || fResult->IsEmpty() ) {
603 MATH_ERROR_MSG("Fitter::CalculateMinosErrors","Invalid Fit Result - cannot calculate Minos errors");
604 return false;
605 }
606
608 MATH_ERROR_MSG("Fitter::CalculateMinosErrors","Computation of MINOS errors not implemented for weighted likelihood fits");
609 return false;
610 }
611
612 // update minimizer (but cannot re-create in this case). Must use an existing one
613 if (!DoUpdateMinimizerOptions(false)) {
614 MATH_ERROR_MSG("Fitter::CalculateHessErrors","Error re-initializing the minimizer");
615 return false;
616 }
617
618 // set flag to compute Minos error to false in FitConfig to avoid that
619 // following minimizaiton calls perform unwanted Minos error calculations
620 /// fConfig.SetMinosErrors(false);
621
622
623 const std::vector<unsigned int> & ipars = fConfig.MinosParams();
624 unsigned int n = (!ipars.empty()) ? ipars.size() : fResult->Parameters().size();
625 bool ok = false;
626
627 int iparNewMin = 0;
628 int iparMax = n;
629 int iter = 0;
630 // rerun minos for the parameters run before a new Minimum has been found
631 do {
632 if (iparNewMin > 0)
633 MATH_INFO_MSG("Fitter::CalculateMinosErrors","Run again Minos for some parameters because a new Minimum has been found");
634 iparNewMin = 0;
635 for (int i = 0; i < iparMax; ++i) {
636 double elow, eup;
637 unsigned int index = (!ipars.empty()) ? ipars[i] : i;
638 bool ret = fMinimizer->GetMinosError(index, elow, eup);
639 // flags case when a new minimum has been found
640 if ((fMinimizer->MinosStatus() & 8) != 0) {
641 iparNewMin = i;
642 }
643 if (ret)
644 fResult->SetMinosError(index, elow, eup);
645 ok |= ret;
646 }
647
649 iter++; // to avoid infinite looping
650 }
651 while( iparNewMin > 0 && iter < 10);
652 if (!ok) {
653 MATH_ERROR_MSG("Fitter::CalculateMinosErrors","Minos error calculation failed for all the selected parameters");
654 }
655
656 // update obj function in case it was an external one
657 if (fExtObjFunction) fObjFunction.reset(fExtObjFunction->Clone());
658 fResult->fObjFunc = fObjFunction;
659
660 // re-give a minimizer instance in case it has been changed
661 // but maintain previous valid status. Do not set result to false if minos failed
662 ok &= fResult->Update(fMinimizer, fConfig, fResult->IsValid());
663
664 return ok;
665}
666
667
668
669// traits for distinguishing fit methods functions from generic objective functions
670template<class Func>
672 static unsigned int NCalls(const Func & ) { return 0; }
673 static int Type(const Func & ) { return -1; }
674 static bool IsGrad() { return false; }
675};
676template<>
678 static unsigned int NCalls(const ROOT::Math::FitMethodFunction & f ) { return f.NCalls(); }
679 static int Type(const ROOT::Math::FitMethodFunction & f) { return f.Type(); }
680 static bool IsGrad() { return false; }
681};
682template<>
684 static unsigned int NCalls(const ROOT::Math::FitMethodGradFunction & f ) { return f.NCalls(); }
685 static int Type(const ROOT::Math::FitMethodGradFunction & f) { return f.Type(); }
686 static bool IsGrad() { return true; }
687};
688
690 //initialize minimizer by creating it
691 // and set there the objective function
692 // obj function must have been set before
693 auto objFunction = ObjFunction();
694 if (!objFunction) {
695 MATH_ERROR_MSG("Fitter::DoInitMinimizer","Objective function has not been set");
696 return false;
697 }
698
699 // check configuration and objective function
700 if ( fConfig.ParamsSettings().size() != objFunction->NDim() ) {
701 MATH_ERROR_MSG("Fitter::DoInitMinimizer","wrong function dimension or wrong size for FitConfig");
702 return false;
703 }
704
705 // create first Minimizer
706 // using an auto_Ptr will delete the previous existing one
707 fMinimizer = std::shared_ptr<ROOT::Math::Minimizer> ( fConfig.CreateMinimizer() );
708 if (fMinimizer == nullptr) {
709 MATH_ERROR_MSG("Fitter::DoInitMinimizer","Minimizer cannot be created");
710 return false;
711 }
712
713 // in case of gradient function one needs to downcast the pointer
714 if (fUseGradient) {
716 if (!gradfcn) {
717 MATH_ERROR_MSG("Fitter::DoInitMinimizer","wrong type of function - it does not provide gradient");
718 return false;
719 }
720 fMinimizer->SetFunction( *gradfcn);
721 // set also Hessian if available
722 if (Config().MinimizerType() == "Minuit2") {
724 dynamic_cast<const ROOT::Math::FitMethodGradFunction *>(gradfcn);
725 if (fitGradFcn && fitGradFcn->HasHessian()) {
726 auto hessFcn = [=](std::span<const double> x, double *hess) {
727 unsigned int ndim = x.size();
728 unsigned int nh = ndim * (ndim + 1) / 2;
729 std::vector<double> h(nh);
730 bool ret = fitGradFcn->Hessian(x.data(), h.data());
731 if (!ret) return false;
732 for (unsigned int i = 0; i < ndim; i++) {
733 for (unsigned int j = 0; j <= i; j++) {
734 unsigned int index = j + i * (i + 1) / 2; // formula for j < i
735 hess[ndim * i + j] = h[index];
736 if (j != i)
737 hess[ndim * j + i] = h[index];
738 }
739 }
740 return true;
741 };
742
743 fMinimizer->SetHessianFunction(hessFcn);
744 }
745 }
746 }
747 else
748 fMinimizer->SetFunction( *objFunction);
749
750
752
753 // if requested parabolic error do correct error analysis by the minimizer (call HESSE)
754 if (fConfig.ParabErrors()) fMinimizer->SetValidError(true);
755
756 return true;
757
758}
759
761 // update minimizer options when re-doing a Fit or computing Hesse or Minos errors
762
763
764 // create a new minimizer if it is different type
765 // minimizer type string stored in FitResult is "minimizer name" + " / " + minimizer algo
766 std::string newMinimType = fConfig.MinimizerName();
767 if (fMinimizer && fResult && newMinimType != fResult->MinimizerType()) {
768 // if a different minimizer is allowed (e.g. when calling Hesse)
769 if (canDifferentMinim) {
770 std::string msg = "Using now " + newMinimType;
771 MATH_INFO_MSG("Fitter::DoUpdateMinimizerOptions: ", msg.c_str());
772 if (!DoInitMinimizer() )
773 return false;
774 }
775 else {
776 std::string msg = "Cannot change minimizer. Continue using " + fResult->MinimizerType();
777 MATH_WARN_MSG("Fitter::DoUpdateMinimizerOptions",msg.c_str());
778 }
779 }
780
781 // create minimizer if it was not done before
782 if (!fMinimizer) {
783 if (!DoInitMinimizer())
784 return false;
785 }
786
787 // set new minimizer options (but not functions and parameters)
788 fMinimizer->SetOptions(fConfig.MinimizerOptions());
789 return true;
790}
791
793 // perform the minimization (assume we have already initialized the minimizer)
794
796
797 bool isValid = fMinimizer->Minimize();
798
799 if (!fResult) fResult = std::make_unique<FitResult>();
800
801 fResult->FillResult(fMinimizer,fConfig, fFunc, isValid, fDataSize, fFitType, chi2func );
802
803 // if requested run Minos after minimization
804 if (isValid && fConfig.MinosErrors()) {
805 // minos error calculation will update also FitResult
807 }
808
809 // when possible get number of calls from FCN and set in fit result
811 fResult->fNCalls = GetNCallsFromFCN();
812 }
813
814 // fill information in fit result
815 // if using an external obj function clone it for storing in FitResult
816 if (fExtObjFunction) fObjFunction.reset(fExtObjFunction->Clone());
817 fResult->fObjFunc = fObjFunction;
818 fResult->fFitData = fData;
819
820#ifdef DEBUG
821 std::cout << "ROOT::Fit::Fitter::DoMinimization : ncalls = " << fResult->fNCalls << " type of objfunc " << fFitFitResType << " typeid: " << typeid(*fObjFunction).name() << " use gradient " << fUseGradient << std::endl;
822#endif
823
825 fResult->NormalizeErrors();
826
827 // set also new parameter values and errors in FitConfig
828 if (fConfig.UpdateAfterFit() && isValid) DoUpdateFitConfig();
829
830 return isValid;
831}
832template<class ObjFunc_t>
833bool Fitter::DoMinimization(std::unique_ptr<ObjFunc_t> objFunc, const ROOT::Math::IMultiGenFunction * chi2func) {
834 // perform the minimization initializing the minimizer starting from a given obj function
835 fFitType = objFunc->Type();
836 fExtObjFunction = nullptr;
837 fObjFunction = std::move(objFunc);
838 if (!DoInitMinimizer()) return false;
839 return DoMinimization(chi2func);
840}
841template<class ObjFunc_t>
843 // perform the minimization initializing the minimizer starting from a given obj function
844 // and apply afterwards the correction for weights. This applies only for logL fitting
845 this->fFitType = objFunc->Type();
846 fExtObjFunction = nullptr;
847 // need to use a temporary shared pointer to the objective function since we cannot use the unique pointer when it has been moved
848 std::shared_ptr<ObjFunc_t> sObjFunc{ std::move(objFunc)};
850 if (!DoInitMinimizer()) return false;
851 if (!DoMinimization(chi2func)) return false;
852 sObjFunc->UseSumOfWeightSquare();
854}
855
856
858 // update the fit configuration after a fit using the obtained result
859 if (fResult->IsEmpty() || !fResult->IsValid() ) return;
860 for (unsigned int i = 0; i < fConfig.NPar(); ++i) {
862 par.SetValue( fResult->Value(i) );
863 if (fResult->Error(i) > 0) par.SetStepSize( fResult->Error(i) );
864 }
865}
866
868 // retrieve ncalls from the fit method functions
869 // this function is called when minimizer does not provide a way of returning the number of function calls
870 int ncalls = 0;
871 if (!fUseGradient) {
873 if (fcn) ncalls = fcn->NCalls();
874 }
875 else {
877 if (fcn) ncalls = fcn->NCalls();
878 }
879 return ncalls;
880}
881
882
884 // apply correction for weight square
885 // Compute Hessian of the loglikelihood function using the sum of the weight squared
886 // This method assumes:
887 // - a fit has been done before and a covariance matrix exists
888 // - the objective function is a likelihood function and Likelihood::UseSumOfWeightSquare()
889 // has been called before
890
891 if (fMinimizer == nullptr) {
892 MATH_ERROR_MSG("Fitter::ApplyWeightCorrection","Must perform first a fit before applying the correction");
893 return false;
894 }
895
896 unsigned int n = loglw2.NDim();
897 // correct errors for weight squared
898 std::vector<double> cov(n*n);
899 bool ret = fMinimizer->GetCovMatrix(&cov[0] );
900 if (!ret) {
901 MATH_ERROR_MSG("Fitter::ApplyWeightCorrection","Previous fit has no valid Covariance matrix");
902 return false;
903 }
904 // need to use new obj function computed with weight-square
905 std::shared_ptr<ROOT::Math::IMultiGenFunction> objFunc(loglw2.Clone());
906 fObjFunction.swap( objFunc );
907
908 // need to re-initialize the minimizer for the changes applied in the
909 // objective functions
910 if (!DoInitMinimizer()) return false;
911
912 //std::cout << "Running Hesse ..." << std::endl;
913
914 // run eventually before a minimization
915 // ignore its error
916 if (minimizeW2L) fMinimizer->Minimize();
917 // run Hesse on the log-likelihood build using sum of weight squared
918 ret = fMinimizer->Hesse();
919 if (!ret) {
920 MATH_ERROR_MSG("Fitter::ApplyWeightCorrection","Error running Hesse on weight2 likelihood - cannot compute errors");
921 return false;
922 }
923
924 if (fMinimizer->CovMatrixStatus() != 3) {
925 MATH_WARN_MSG("Fitter::ApplyWeightCorrection","Covariance matrix for weighted likelihood is not accurate, the errors may be not reliable");
926 if (fMinimizer->CovMatrixStatus() == 2)
927 MATH_WARN_MSG("Fitter::ApplyWeightCorrection","Covariance matrix for weighted likelihood was forced to be defined positive");
928 if (fMinimizer->CovMatrixStatus() <= 0)
929 // probably should have failed before
930 MATH_ERROR_MSG("Fitter::ApplyWeightCorrection","Covariance matrix for weighted likelihood is not valid !");
931 }
932
933 // get Hessian matrix from weight-square likelihood
934 std::vector<double> hes(n*n);
935 ret = fMinimizer->GetHessianMatrix(&hes[0] );
936 if (!ret) {
937 MATH_ERROR_MSG("Fitter::ApplyWeightCorrection","Error retrieving Hesse on weight2 likelihood - cannot compute errors");
938 return false;
939 }
940
941
942 // perform product of matrix cov * hes * cov
943 // since we do not want to add matrix dependence do product by hand
944 // first do hes * cov
945 std::vector<double> tmp(n*n);
946 for (unsigned int i = 0; i < n; ++i) {
947 for (unsigned int j = 0; j < n; ++j) {
948 for (unsigned int k = 0; k < n; ++k)
949 tmp[i*n+j] += hes[i*n + k] * cov[k*n + j];
950 }
951 }
952 // do multiplication now cov * tmp save result
953 std::vector<double> newCov(n*n);
954 for (unsigned int i = 0; i < n; ++i) {
955 for (unsigned int j = 0; j < n; ++j) {
956 for (unsigned int k = 0; k < n; ++k)
957 newCov[i*n+j] += cov[i*n + k] * tmp[k*n + j];
958 }
959 }
960 // update fit result with new corrected covariance matrix
961 unsigned int k = 0;
962 for (unsigned int i = 0; i < n; ++i) {
963 fResult->fErrors[i] = std::sqrt( newCov[i*(n+1)] );
964 for (unsigned int j = 0; j <= i; ++j)
965 fResult->fCovMatrix[k++] = newCov[i *n + j];
966 }
967
968 // restore previous used objective function
969 fObjFunction.swap( objFunc );
970
971 return true;
972}
973
974 } // end namespace Fit
975
976} // end namespace ROOT
#define MATH_INFO_MSG(loc, str)
Pre-processor macro to report messages which can be configured to use ROOT error or simply an std::io...
Definition Error.h:77
#define MATH_ERROR_MSG(loc, str)
Definition Error.h:83
#define MATH_WARN_MSG(loc, str)
Definition Error.h:80
#define f(i)
Definition RSha256.hxx:104
#define h(i)
Definition RSha256.hxx:106
ROOT::Detail::TRangeCast< T, true > TRangeDynCast
TRangeDynCast is an adapter class that allows the typed iteration through a TCollection.
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void data
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void char Point_t Rectangle_t WindowAttributes_t Float_t Float_t Float_t Int_t Int_t UInt_t UInt_t Rectangle_t result
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void char Point_t Rectangle_t WindowAttributes_t index
const std::vector< unsigned int > & MinosParams() const
return vector of parameter indices for which the Minos Error will be computed
Definition FitConfig.h:222
bool UpdateAfterFit() const
Update configuration after a fit using the FitResult.
Definition FitConfig.h:215
void SetMinimizer(const char *type, const char *algo=nullptr)
set minimizer type and algorithm
Definition FitConfig.h:183
void SetMinosErrors(bool on=true)
set Minos errors computation to be performed after fitting
Definition FitConfig.h:233
bool NormalizeErrors() const
flag to check if resulting errors are be normalized according to chi2/ndf
Definition FitConfig.h:206
bool ParabErrors() const
do analysis for parabolic errors
Definition FitConfig.h:209
unsigned int NPar() const
number of parameters settings
Definition FitConfig.h:98
void SetParamsSettings(unsigned int npar, const double *params, const double *vstep=nullptr)
set the parameter settings from number of parameters and a vector of values and optionally step value...
std::string MinimizerName() const
return Minimizer full name (type / algorithm)
bool UseWeightCorrection() const
Apply Weight correction for error matrix computation.
Definition FitConfig.h:218
const std::vector< ROOT::Fit::ParameterSettings > & ParamsSettings() const
get the vector of parameter settings (const method)
Definition FitConfig.h:88
ROOT::Math::Minimizer * CreateMinimizer()
create a new minimizer according to chosen configuration
void CreateParamsSettings(const ROOT::Math::IParamMultiFunctionTempl< T > &func)
set the parameter settings from a model function.
Definition FitConfig.h:111
const std::string & MinimizerType() const
return type of minimizer package
Definition FitConfig.h:191
const ParameterSettings & ParSettings(unsigned int i) const
get the parameter settings for the i-th parameter (const method)
Definition FitConfig.h:78
ROOT::Math::MinimizerOptions & MinimizerOptions()
access to the minimizer control parameter (non const method)
Definition FitConfig.h:169
bool MinosErrors() const
do minos errors analysis on the parameters
Definition FitConfig.h:212
bool EvalFCN()
Perform a simple FCN evaluation.
Definition Fitter.cxx:285
const ROOT::Math::IMultiGenFunction * fExtObjFunction
! pointer to an external FCN
Definition Fitter.h:574
bool FitFCN()
Perform a fit with the previously set FCN function.
Definition Fitter.cxx:269
void DoUpdateFitConfig()
Definition Fitter.cxx:857
bool DoMinimization(std::unique_ptr< ObjFunc_t > f, const ROOT::Math::IMultiGenFunction *chifunc=nullptr)
do minimization
Definition Fitter.cxx:833
bool DoSetFCN(bool useExtFCN, const ROOT::Math::IMultiGenFunction &fcn, const double *params, unsigned int dataSize, int fitType)
Set Objective function.
Definition Fitter.cxx:137
int fDataSize
size of data sets (need for Fumili or LM fitters)
Definition Fitter.h:558
bool DoUnbinnedLikelihoodFit(bool extended=false, const ROOT::EExecutionPolicy &executionPolicy=ROOT::EExecutionPolicy::kSequential)
un-binned likelihood fit
Definition Fitter.cxx:437
const ROOT::Math::IBaseFunctionMultiDimTempl< double > * ObjFunction() const
Return pointer to the used objective function for fitting.
Definition Fitter.h:542
std::shared_ptr< ROOT::Math::Minimizer > fMinimizer
! pointer to used minimizer
Definition Fitter.h:568
bool DoWeightMinimization(std::unique_ptr< ObjFunc_t > f, const ROOT::Math::IMultiGenFunction *chifunc=nullptr)
Definition Fitter.cxx:842
bool DoBinnedLikelihoodFit(bool extended=true, const ROOT::EExecutionPolicy &executionPolicy=ROOT::EExecutionPolicy::kSequential)
binned likelihood fit
Definition Fitter.cxx:357
int fFitType
type of fit (0 undefined, 1 least square, 2 likelihood, 3 binned likelihood)
Definition Fitter.h:556
std::shared_ptr< ROOT::Fit::FitData > fData
! pointer to the fit data (binned or unbinned data)
Definition Fitter.h:570
bool fUseGradient
flag to indicate if using gradient or not
Definition Fitter.h:550
bool fBinFit
flag to indicate if fit is binned in case of false the fit is unbinned or undefined) flag it is used ...
Definition Fitter.h:552
std::shared_ptr< ROOT::Math::IMultiGenFunction > fObjFunction
! pointer to used objective function
Definition Fitter.h:572
bool ApplyWeightCorrection(const ROOT::Math::IMultiGenFunction &loglw2, bool minimizeW2L=false)
apply correction in the error matrix for the weights for likelihood fits This method can be called on...
Definition Fitter.cxx:883
const FitConfig & Config() const
access to the fit configuration (const method)
Definition Fitter.h:425
bool DoLeastSquareFit(const ROOT::EExecutionPolicy &executionPolicy=ROOT::EExecutionPolicy::kSequential)
least square fit
Definition Fitter.cxx:306
bool SetFCN(unsigned int npar, Function &fcn, const double *params=nullptr, unsigned int dataSize=0, int fitType=0)
Set a generic FCN function as a C++ callable object implementing double () (const double *) Note that...
Definition Fitter.h:654
std::shared_ptr< IModelFunction_v > fFunc_v
! copy of the fitted function containing on output the fit result
Definition Fitter.h:562
std::shared_ptr< ROOT::Fit::FitResult > fResult
! pointer to the object containing the result of the fit
Definition Fitter.h:566
bool CalculateMinosErrors()
perform an error analysis on the result using MINOS To be called only after fitting and when a minimi...
Definition Fitter.cxx:590
bool DoUpdateMinimizerOptions(bool canDifferentMinim=true)
Definition Fitter.cxx:760
void SetFunction(const IModelFunction &func, bool useGradient=false)
Set the fitted function (model function) from a parametric function interface.
Definition Fitter.cxx:59
bool CalculateHessErrors()
perform an error analysis on the result using the Hessian Errors are obtained from the inverse of the...
Definition Fitter.cxx:527
FitConfig fConfig
fitter configuration (options and parameter settings)
Definition Fitter.h:560
Fitter()
Default constructor.
Definition Fitter.h:104
std::shared_ptr< IModelFunction > fFunc
! copy of the fitted function containing on output the fit result
Definition Fitter.h:564
bool DoLinearFit()
linear least square fit
Definition Fitter.cxx:510
bool DoInitMinimizer()
Definition Fitter.cxx:689
int GetNCallsFromFCN()
Definition Fitter.cxx:867
Class, describing value, limits and step size of the parameters Provides functionality also to set/re...
void SetValue(double val)
set the value
void SetStepSize(double err)
set the step size
FitMethodFunction class Interface for objective functions (like chi2 and likelihood used in the fit) ...
Documentation for the abstract class IBaseFunctionMultiDim.
Definition IFunction.h:63
virtual IBaseFunctionMultiDimTempl< T > * Clone() const =0
Clone a function.
Interface (abstract class) for multi-dimensional functions providing a gradient calculation.
Definition IFunction.h:252
Specialized IParamFunction interface (abstract class) for one-dimensional parametric functions It is ...
Interface (abstract class) for parametric gradient multi-dimensional functions providing in addition ...
Interface (abstract class) for parametric one-dimensional gradient functions providing in addition to...
double ErrorDef() const
error definition
int PrintLevel() const
non-static methods for retrieving options
void SetErrorDef(double err)
set error def
MultiDimParamFunctionAdapter class to wrap a one-dimensional parametric function in a multi dimension...
MultiDimParamGradFunctionAdapter class to wrap a one-dimensional parametric gradient function in a mu...
const_iterator begin() const
const_iterator end() const
Double_t x[n]
Definition legend1.C:17
const Int_t n
Definition legend1.C:16
TFitResultPtr Fit(FitObject *h1, TF1 *f1, Foption_t &option, const ROOT::Math::MinimizerOptions &moption, const char *goption, ROOT::Fit::DataRange &range)
Definition HFitImpl.cxx:133
double gDefaultErrorDef
Definition Fitter.cxx:48
Namespace for new ROOT classes and functions.
static int Type(const ROOT::Math::FitMethodFunction &f)
Definition Fitter.cxx:679
static unsigned int NCalls(const ROOT::Math::FitMethodFunction &f)
Definition Fitter.cxx:678
static int Type(const ROOT::Math::FitMethodGradFunction &f)
Definition Fitter.cxx:685
static unsigned int NCalls(const ROOT::Math::FitMethodGradFunction &f)
Definition Fitter.cxx:684
static bool IsGrad()
Definition Fitter.cxx:674
static unsigned int NCalls(const Func &)
Definition Fitter.cxx:672
static int Type(const Func &)
Definition Fitter.cxx:673