Logo ROOT  
Reference Guide
 
Loading...
Searching...
No Matches
Fitter.cxx
Go to the documentation of this file.
1// @(#)root/mathcore:$Id$
2// Author: L. Moneta Mon Sep 4 17:00:10 2006
3
4/**********************************************************************
5 * *
6 * Copyright (c) 2006 LCG ROOT Math Team, CERN/PH-SFT *
7 * *
8 * *
9 **********************************************************************/
10
11// Implementation file for class Fitter
12
13
14#include "Fit/Fitter.h"
15#include "Fit/Chi2FCN.h"
18#include "Math/Minimizer.h"
21#include "Fit/BasicFCN.h"
22#include "Fit/BinData.h"
23#include "Fit/UnBinData.h"
24#include "Fit/FcnAdapter.h"
25#include "Fit/FitConfig.h"
26#include "Fit/FitResult.h"
27#include "Math/Error.h"
28
29#include <memory>
30
31#include "Math/IParamFunction.h"
32
34
35// #include "TMatrixDSym.h"
36// for debugging
37//#include "TMatrixD.h"
38// #include <iomanip>
39
40namespace ROOT {
41
42 namespace Fit {
43
44// use a static variable to get default minimizer options for error def
45// to see if user has changed it later on. If it has not been changed we set
46// for the likelihood method an error def of 0.5
47// t.b.d : multiply likelihood by 2 so have same error def definition as chi2
49
50
51Fitter::Fitter(const std::shared_ptr<FitResult> & result) :
52 fResult(result)
53{
54 if (result->fFitFunc) SetFunction(*fResult->fFitFunc); // this will create also the configuration
55 if (result->fObjFunc) fObjFunction = fResult->fObjFunc;
56 if (result->fFitData) fData = fResult->fFitData;
57}
58
59void Fitter::SetFunction(const IModelFunction & func, bool useGradient)
60{
61
62 fUseGradient = useGradient;
63 if (fUseGradient) {
64 const IGradModelFunction * gradFunc = dynamic_cast<const IGradModelFunction*>(&func);
65 if (gradFunc) {
66 SetFunction(*gradFunc, true);
67 return;
68 }
69 else {
70 MATH_WARN_MSG("Fitter::SetFunction","Requested function does not provide gradient - use it as non-gradient function ");
71 }
72 }
73 fUseGradient = false;
74
75 // set the fit model function (clone the given one and keep a copy )
76 //std::cout << "set a non-grad function" << std::endl;
77
78 fFunc = std::shared_ptr<IModelFunction>(dynamic_cast<IModelFunction *>(func.Clone() ) );
79 assert(fFunc);
80
81 // creates the parameter settings
83 fFunc_v.reset();
84}
85
86void Fitter::SetFunction(const IModel1DFunction & func, bool useGradient)
87{
88 fUseGradient = useGradient;
89 if (fUseGradient) {
90 const IGradModel1DFunction * gradFunc = dynamic_cast<const IGradModel1DFunction*>(&func);
91 if (gradFunc) {
92 SetFunction(*gradFunc, true);
93 return;
94 }
95 else {
96 MATH_WARN_MSG("Fitter::SetFunction","Requested function does not provide gradient - use it as non-gradient function ");
97 }
98 }
99 fUseGradient = false;
100 //std::cout << "set a 1d function" << std::endl;
101
102 // function is cloned when creating the adapter
103 fFunc = std::shared_ptr<IModelFunction>(new ROOT::Math::MultiDimParamFunctionAdapter(func));
104
105 // creates the parameter settings
107 fFunc_v.reset();
108}
109
110void Fitter::SetFunction(const IGradModelFunction & func, bool useGradient)
111{
112 fUseGradient = useGradient;
113 //std::cout << "set a grad function" << std::endl;
114 // set the fit model function (clone the given one and keep a copy )
115 fFunc = std::shared_ptr<IModelFunction>( dynamic_cast<IGradModelFunction *> ( func.Clone() ) );
116 assert(fFunc);
117
118 // creates the parameter settings
120 fFunc_v.reset();
121}
122
123
124void Fitter::SetFunction(const IGradModel1DFunction & func, bool useGradient)
125{
126 //std::cout << "set a 1d grad function" << std::endl;
127 fUseGradient = useGradient;
128 // function is cloned when creating the adapter
129 fFunc = std::shared_ptr<IModelFunction>(new ROOT::Math::MultiDimParamGradFunctionAdapter(func));
130
131 // creates the parameter settings
133 fFunc_v.reset();
134}
135
136
137bool Fitter::DoSetFCN(bool extFcn, const ROOT::Math::IMultiGenFunction & fcn, const double * params, unsigned int dataSize, int fitType) {
138 // Set the objective function for the fit. First parameter specifies if function object is managed external or internal.
139 // In case of an internal function object we need to clone because it is a temporary one
140 // if params is not NULL create the parameter settings
141 fUseGradient = false;
142 unsigned int npar = fcn.NDim();
143 if (npar == 0) {
144 MATH_ERROR_MSG("Fitter::SetFCN","FCN function has zero parameters ");
145 return false;
146 }
147 if (params != nullptr || fConfig.ParamsSettings().size() == 0)
148 fConfig.SetParamsSettings(npar, params);
149 else {
150 if ( fConfig.ParamsSettings().size() != npar) {
151 MATH_ERROR_MSG("Fitter::SetFCN","wrong fit parameter settings");
152 return false;
153 }
154 }
155 fFitType = fitType;
157
158 fDataSize = dataSize;
159
160 // store external provided FCN without cloning it
161 // it will be cloned in fObjFunc after the fit
162 if (extFcn) {
163 fExtObjFunction = &fcn;
164 fObjFunction.reset();
165 }
166 else {
167 // case FCN is built from Minuit interface so function object is created internally in Fitter class
168 // and needs to be cloned and managed
169 fExtObjFunction = nullptr;
170 fObjFunction.reset(fcn.Clone());
171 }
172
173 // in case a model function and data exists from a previous fit - reset shared-ptr
174 if (fResult && fResult->FittedFunction() == nullptr && fFunc) fFunc.reset();
175 if (fData) fData.reset();
176
177 return true;
178}
179bool Fitter::SetFCN(const ROOT::Math::IMultiGenFunction & fcn, const double * params, unsigned int dataSize, int fitType) {
180 // set the objective function for the fit
181 return DoSetFCN(true, fcn, params, dataSize, fitType);
182}
183bool Fitter::SetFCN(const ROOT::Math::IMultiGenFunction &fcn, const IModelFunction & func, const double *params, unsigned int dataSize, int fitType) {
184 // set the objective function for the fit and a model function
185 if (!SetFCN(fcn, params, dataSize, fitType) ) return false;
186 // need to set fFunc afterwards because SetFCN could reset fFunc
187 fFunc = std::unique_ptr<IModelFunction>(dynamic_cast<IModelFunction *>(func.Clone()));
188 if(fFunc) {
190 return true;
191 }
192 return false;
193}
194
195bool Fitter::SetFCN(const ROOT::Math::FitMethodFunction &fcn, const double *params)
196{
197 // set the objective function for the fit
198 // if params is not NULL create the parameter settings
199 int fitType = static_cast<int>(fcn.Type());
200 if (!SetFCN(fcn, params, fcn.NPoints(), fitType))
201 return false;
202 fUseGradient = false;
203 return true;
204}
205
206bool Fitter::SetFCN(const ROOT::Math::FitMethodGradFunction &fcn, const double *params)
207{
208 // set the objective function for the fit
209 // if params is not NULL create the parameter settings
210 int fitType = static_cast<int>(fcn.Type());
211 if (!SetFCN(fcn, params, fcn.NPoints(), fitType))
212 return false;
213 fUseGradient = true;
214 return true;
215}
216
217bool Fitter::FitFCN(const BaseFunc &fcn, const double *params, unsigned int dataSize, int fitType)
218{
219 // fit a user provided FCN function
220 // create fit parameter settings
221 if (!SetFCN(fcn, params, dataSize, fitType))
222 return false;
223 return FitFCN();
224}
225
226bool Fitter::FitFCN(const ROOT::Math::FitMethodFunction &fcn, const double *params)
227{
228 // fit using the passed objective function for the fit
229 if (!SetFCN(fcn, params))
230 return false;
231 return FitFCN();
232}
233
234bool Fitter::FitFCN(const ROOT::Math::FitMethodGradFunction &fcn, const double *params)
235{
236 // fit using the passed objective function for the fit
237 if (!SetFCN(fcn, params))
238 return false;
239 return FitFCN();
240}
241
242bool Fitter::SetFCN(MinuitFCN_t fcn, int npar, const double *params, unsigned int dataSize, int fitType)
243{
244 // set TMinuit style FCN type (global function pointer)
245 // create corresponding objective function from that function
246
247 if (npar == 0) {
248 npar = fConfig.ParamsSettings().size();
249 if (npar == 0) {
250 MATH_ERROR_MSG("Fitter::FitFCN", "Fit Parameter settings have not been created ");
251 return false;
252 }
253 }
254
255 ROOT::Fit::FcnAdapter newFcn(fcn, npar);
256 return DoSetFCN(false,newFcn, params, dataSize, fitType);
257}
258
259bool Fitter::FitFCN(MinuitFCN_t fcn, int npar, const double *params, unsigned int dataSize, int fitType)
260{
261 // fit using Minuit style FCN type (global function pointer)
262 // create corresponding objective function from that function
263 if (!SetFCN(fcn, npar, params, dataSize, fitType))
264 return false;
265 fUseGradient = false;
266 return FitFCN();
267}
268
270{
271 // fit using the previously set FCN function
272
273
274 if (!fExtObjFunction && !fObjFunction) {
275 MATH_ERROR_MSG("Fitter::FitFCN", "Objective function has not been set");
276 return false;
277 }
278 // look if FCN is of a known type and we can get retrieve the model function and data objects
279 if (!fFunc || !fData)
280 ExamineFCN();
281 // init the minimizer
282 if (!DoInitMinimizer())
283 return false;
284 // perform the minimization
285 return DoMinimization();
286}
287
289{
290 // evaluate the FCN using the stored values in fConfig
291
292 if (fFunc && fResult->FittedFunction() == nullptr)
293 fFunc.reset();
294
295 if (!ObjFunction()) {
296 MATH_ERROR_MSG("Fitter::FitFCN", "Objective function has not been set");
297 return false;
298 }
299 // create a Fit result from the fit configuration
300 fResult = std::make_shared<ROOT::Fit::FitResult>(fConfig);
301 // evaluate one time the FCN
302 double fcnval = (*ObjFunction())(fResult->GetParams());
303 // update fit result
304 fResult->fVal = fcnval;
305 fResult->fNCalls++;
306 return true;
307}
308
310{
311
312 // perform a chi2 fit on a set of binned data
313 std::shared_ptr<BinData> data = std::dynamic_pointer_cast<BinData>(fData);
314 assert(data);
315
316 // check function
317 if (!fFunc && !fFunc_v) {
318 MATH_ERROR_MSG("Fitter::DoLeastSquareFit", "model function is not set");
319 return false;
320 } else {
321
322#ifdef DEBUG
323 std::cout << "Fitter ParamSettings " << Config().ParamsSettings()[3].IsBound() << " lower limit "
324 << Config().ParamsSettings()[3].LowerLimit() << " upper limit "
325 << Config().ParamsSettings()[3].UpperLimit() << std::endl;
326#endif
327
328 fBinFit = true;
329 fDataSize = data->Size();
330 // check if fFunc provides gradient
331 if (!fUseGradient) {
332 // do minimization without using the gradient
333 if (fFunc_v) {
334 return DoMinimization(std::make_unique<Chi2FCN<BaseFunc, IModelFunction_v>>(data, fFunc_v, executionPolicy));
335 } else {
336 return DoMinimization(std::make_unique<Chi2FCN<BaseFunc>>(data, fFunc, executionPolicy));
337 }
338 } else {
339 // use gradient
341 MATH_INFO_MSG("Fitter::DoLeastSquareFit", "use gradient from model function");
342
343 if (fFunc_v) {
344 std::shared_ptr<IGradModelFunction_v> gradFun = std::dynamic_pointer_cast<IGradModelFunction_v>(fFunc_v);
345 if (gradFun) {
346 return DoMinimization(std::make_unique<Chi2FCN<BaseGradFunc, IModelFunction_v>>(data, gradFun, executionPolicy));
347 }
348 } else {
349 std::shared_ptr<IGradModelFunction> gradFun = std::dynamic_pointer_cast<IGradModelFunction>(fFunc);
350 if (gradFun) {
351 return DoMinimization(std::make_unique<Chi2FCN<BaseGradFunc>>(data, gradFun, executionPolicy));
352 }
353 }
354 MATH_ERROR_MSG("Fitter::DoLeastSquareFit", "wrong type of function - it does not provide gradient");
355 }
356 }
357 return false;
358}
359
360bool Fitter::DoBinnedLikelihoodFit(bool extended, const ROOT::EExecutionPolicy &executionPolicy)
361{
362 // perform a likelihood fit on a set of binned data
363 // The fit is extended (Poisson logl_ by default
364
365 std::shared_ptr<BinData> data = std::dynamic_pointer_cast<BinData>(fData);
366 assert(data);
367
368 bool useWeight = fConfig.UseWeightCorrection();
369
370 // check function
371 if (!fFunc && !fFunc_v) {
372 MATH_ERROR_MSG("Fitter::DoBinnedLikelihoodFit", "model function is not set");
373 return false;
374 }
375
376 // logl fit (error should be 0.5) set if different than default values (of 1)
379 }
380
381 if (useWeight && fConfig.MinosErrors()) {
382 MATH_INFO_MSG("Fitter::DoBinnedLikelihoodFit", "MINOS errors cannot be computed in weighted likelihood fits");
383 fConfig.SetMinosErrors(false);
384 }
385
386 fBinFit = true;
387 fDataSize = data->Size();
388
389 if (!fUseGradient) {
390 // do minimization without using the gradient
391 if (fFunc_v) {
392 // create a chi2 function to be used for the equivalent chi-square
394 auto logl = std::make_unique<PoissonLikelihoodFCN<BaseFunc, IModelFunction_v>>(data, fFunc_v, useWeight, extended, executionPolicy);
395 return (useWeight) ? DoWeightMinimization(std::move(logl),&chi2) : DoMinimization(std::move(logl),&chi2);
396 } else {
397 // create a chi2 function to be used for the equivalent chi-square
399 auto logl = std::make_unique<PoissonLikelihoodFCN<BaseFunc>>(data, fFunc, useWeight, extended, executionPolicy);
400 return (useWeight) ? DoWeightMinimization(std::move(logl),&chi2) : DoMinimization(std::move(logl),&chi2);
401 }
402 } else {
404 MATH_INFO_MSG("Fitter::DoLikelihoodFit", "use gradient from model function");
405 // not-extended is not implemented in this case
406 if (!extended) {
407 MATH_WARN_MSG("Fitter::DoBinnedLikelihoodFit",
408 "Not-extended binned fit with gradient not yet supported - do an extended fit");
409 extended = true;
410 }
411 if (fFunc_v) {
412 // create a chi2 function to be used for the equivalent chi-square
414 std::shared_ptr<IGradModelFunction_v> gradFun = std::dynamic_pointer_cast<IGradModelFunction_v>(fFunc_v);
415 if (!gradFun) {
416 MATH_ERROR_MSG("Fitter::DoBinnedLikelihoodFit", "wrong type of function - it does not provide gradient");
417 return false;
418 }
419 auto logl = std::make_unique<PoissonLikelihoodFCN<BaseGradFunc, IModelFunction_v>>(data, gradFun, useWeight, extended, executionPolicy);
420 // do minimization
421 return (useWeight) ? DoWeightMinimization(std::move(logl),&chi2) : DoMinimization(std::move(logl),&chi2);
422 } else {
423 // create a chi2 function to be used for the equivalent chi-square
425 // check if fFunc provides gradient
426 std::shared_ptr<IGradModelFunction> gradFun = std::dynamic_pointer_cast<IGradModelFunction>(fFunc);
427 if (!gradFun) {
428 MATH_ERROR_MSG("Fitter::DoBinnedLikelihoodFit", "wrong type of function - it does not provide gradient");
429 return false;
430 }
431 // use gradient for minimization
432 auto logl = std::make_unique<PoissonLikelihoodFCN<BaseGradFunc>>(data, gradFun, useWeight, extended, executionPolicy);
433 // do minimization
434 return (useWeight) ? DoWeightMinimization(std::move(logl),&chi2) : DoMinimization(std::move(logl),&chi2);
435 }
436 }
437 return false;
438}
439
440bool Fitter::DoUnbinnedLikelihoodFit(bool extended, const ROOT::EExecutionPolicy &executionPolicy) {
441 // perform a likelihood fit on a set of unbinned data
442
443 std::shared_ptr<UnBinData> data = std::dynamic_pointer_cast<UnBinData>(fData);
444 assert(data);
445
446 bool useWeight = fConfig.UseWeightCorrection();
447
448 if (!fFunc && !fFunc_v) {
449 MATH_ERROR_MSG("Fitter::DoUnbinnedLikelihoodFit","model function is not set");
450 return false;
451 }
452
453 if (useWeight && fConfig.MinosErrors() ) {
454 MATH_INFO_MSG("Fitter::DoUnbinnedLikelihoodFit","MINOS errors cannot be computed in weighted likelihood fits");
455 fConfig.SetMinosErrors(false);
456 }
457
458
459 fBinFit = false;
460 fDataSize = data->Size();
461
462#ifdef DEBUG
463 int ipar = 0;
464 std::cout << "Fitter ParamSettings " << Config().ParamsSettings()[ipar].IsBound() << " lower limit " << Config().ParamsSettings()[ipar].LowerLimit() << " upper limit " << Config().ParamsSettings()[ipar].UpperLimit() << std::endl;
465#endif
466
467 // logl fit (error should be 0.5) set if different than default values (of 1)
470 }
471
472 if (!fUseGradient) {
473 // do minimization without using the gradient
474 if (fFunc_v ){
475 auto logl = std::make_unique<LogLikelihoodFCN<BaseFunc, IModelFunction_v>>(data, fFunc_v, useWeight, extended, executionPolicy);
476 // do minimization
477 return (useWeight) ? DoWeightMinimization(std::move(logl)) : DoMinimization(std::move(logl));
478 } else {
479 auto logl = std::make_unique<LogLikelihoodFCN<BaseFunc>>(data, fFunc, useWeight, extended, executionPolicy);
480 return (useWeight) ? DoWeightMinimization(std::move(logl)) : DoMinimization(std::move(logl));
481 }
482 } else {
483 // use gradient : check if fFunc provides gradient
485 MATH_INFO_MSG("Fitter::DoUnbinnedLikelihoodFit", "use gradient from model function");
486 if (extended) {
487 MATH_WARN_MSG("Fitter::DoUnbinnedLikelihoodFit",
488 "Extended unbinned fit with gradient not yet supported - do a not-extended fit");
489 extended = false;
490 }
491 if (fFunc_v) {
492 std::shared_ptr<IGradModelFunction_v> gradFun = std::dynamic_pointer_cast<IGradModelFunction_v>(fFunc_v);
493 if (!gradFun) {
494 MATH_ERROR_MSG("Fitter::DoUnbinnedLikelihoodFit", "wrong type of function - it does not provide gradient");
495 return false;
496 }
497 auto logl = std::make_unique<LogLikelihoodFCN<BaseGradFunc, IModelFunction_v>>(data, gradFun, useWeight, extended, executionPolicy);
498 return (useWeight) ? DoWeightMinimization(std::move(logl)) : DoMinimization(std::move(logl));
499 } else {
500 std::shared_ptr<IGradModelFunction> gradFun = std::dynamic_pointer_cast<IGradModelFunction>(fFunc);
501 if (!gradFun) {
502 MATH_ERROR_MSG("Fitter::DoUnbinnedLikelihoodFit", "wrong type of function - it does not provide gradient");
503 return false;
504 }
505 auto logl = std::make_unique<LogLikelihoodFCN<BaseGradFunc>>(data, gradFun, useWeight, extended, executionPolicy);
506 return (useWeight) ? DoWeightMinimization(std::move(logl)) : DoMinimization(std::move(logl));
507 }
508 }
509 return false;
510}
511
512
514
515 std::shared_ptr<BinData> data = std::dynamic_pointer_cast<BinData>(fData);
516 assert(data);
517
518 // perform a linear fit on a set of binned data
519 std::string prevminimizer = fConfig.MinimizerType();
520 fConfig.SetMinimizer("Linear");
521
522 fBinFit = true;
523
524 bool ret = DoLeastSquareFit();
525 fConfig.SetMinimizer(prevminimizer.c_str());
526 return ret;
527}
528
529
531 // compute the Hesse errors according to configuration
532 // set in the parameters and append value in fit result
533 if (!ObjFunction()) {
534 MATH_ERROR_MSG("Fitter::CalculateHessErrors","Objective function has not been set");
535 return false;
536 }
537
538 // need a special treatment in case of weighted likelihood fit
539 // (not yet implemented)
540 if (fFitType == 2 && fConfig.UseWeightCorrection() ) {
541 MATH_ERROR_MSG("Fitter::CalculateHessErrors","Re-computation of Hesse errors not implemented for weighted likelihood fits");
542 MATH_INFO_MSG("Fitter::CalculateHessErrors","Do the Fit using configure option FitConfig::SetParabErrors()");
543 return false;
544 }
545
546 // a fit Result pointer must exist when a minimizer exists
547 if (fMinimizer && !fResult ) {
548 MATH_ERROR_MSG("Fitter::CalculateHessErrors", "FitResult has not been created");
549 return false;
550 }
551
552 // update minimizer (recreate if not done or if name has changed
554 MATH_ERROR_MSG("Fitter::CalculateHessErrors","Error re-initializing the minimizer");
555 return false;
556 }
557
558 if (!fMinimizer ) {
559 // this should not happen
560 MATH_ERROR_MSG("Fitter::CalculateHessErrors", "Need to do a fit before calculating the errors");
561 assert(false);
562 return false;
563 }
564
565 //run Hesse
566 bool ret = fMinimizer->Hesse();
567 if (!ret) MATH_WARN_MSG("Fitter::CalculateHessErrors","Error when calculating Hessian");
568
569 // update minimizer results with what comes out from Hesse
570 // in case is empty - create from a FitConfig
571 if (fResult->IsEmpty() )
573
574 // update obj function in case it was an external one
576 fResult->fObjFunc = fObjFunction;
577
578 // re-give a minimizer instance in case it has been changed
579 ret |= fResult->Update(fMinimizer, fConfig, ret);
580
581 // when possible get ncalls from FCN and set in fit result
583 fResult->fNCalls = GetNCallsFromFCN();
584 }
585
586 // set also new errors in FitConfig
588
589 return ret;
590}
591
592
594 // compute the Minos errors according to configuration
595 // set in the parameters and append value in fit result
596 // normally Minos errors are computed just after the minimization
597 // (in DoMinimization) aftewr minimizing if the
598 // FitConfig::MinosErrors() flag is set
599
600 if (!fMinimizer) {
601 MATH_ERROR_MSG("Fitter::CalculateMinosErrors","Minimizer does not exist - cannot calculate Minos errors");
602 return false;
603 }
604
605 if (!fResult || fResult->IsEmpty() ) {
606 MATH_ERROR_MSG("Fitter::CalculateMinosErrors","Invalid Fit Result - cannot calculate Minos errors");
607 return false;
608 }
609
611 MATH_ERROR_MSG("Fitter::CalculateMinosErrors","Computation of MINOS errors not implemented for weighted likelihood fits");
612 return false;
613 }
614
615 // update minimizer (but cannot re-create in this case). Must use an existing one
616 if (!DoUpdateMinimizerOptions(false)) {
617 MATH_ERROR_MSG("Fitter::CalculateHessErrors","Error re-initializing the minimizer");
618 return false;
619 }
620
621 // set flag to compute Minos error to false in FitConfig to avoid that
622 // following minimizaiton calls perform unwanted Minos error calculations
623 /// fConfig.SetMinosErrors(false);
624
625
626 const std::vector<unsigned int> & ipars = fConfig.MinosParams();
627 unsigned int n = (!ipars.empty()) ? ipars.size() : fResult->Parameters().size();
628 bool ok = false;
629
630 int iparNewMin = 0;
631 int iparMax = n;
632 int iter = 0;
633 // rerun minos for the parameters run before a new Minimum has been found
634 do {
635 if (iparNewMin > 0)
636 MATH_INFO_MSG("Fitter::CalculateMinosErrors","Run again Minos for some parameters because a new Minimum has been found");
637 iparNewMin = 0;
638 for (int i = 0; i < iparMax; ++i) {
639 double elow, eup;
640 unsigned int index = (!ipars.empty()) ? ipars[i] : i;
641 bool ret = fMinimizer->GetMinosError(index, elow, eup);
642 // flags case when a new minimum has been found
643 if ((fMinimizer->MinosStatus() & 8) != 0) {
644 iparNewMin = i;
645 }
646 if (ret)
647 fResult->SetMinosError(index, elow, eup);
648 ok |= ret;
649 }
650
651 iparMax = iparNewMin;
652 iter++; // to avoid infinite looping
653 }
654 while( iparNewMin > 0 && iter < 10);
655 if (!ok) {
656 MATH_ERROR_MSG("Fitter::CalculateMinosErrors","Minos error calculation failed for all the selected parameters");
657 }
658
659 // update obj function in case it was an external one
661 fResult->fObjFunc = fObjFunction;
662
663 // re-give a minimizer instance in case it has been changed
664 // but maintain previous valid status. Do not set result to false if minos failed
665 ok &= fResult->Update(fMinimizer, fConfig, fResult->IsValid());
666
667 return ok;
668}
669
670
671
672// traits for distinguishing fit methods functions from generic objective functions
673template<class Func>
675 static unsigned int NCalls(const Func & ) { return 0; }
676 static int Type(const Func & ) { return -1; }
677 static bool IsGrad() { return false; }
678};
679template<>
681 static unsigned int NCalls(const ROOT::Math::FitMethodFunction & f ) { return f.NCalls(); }
682 static int Type(const ROOT::Math::FitMethodFunction & f) { return f.Type(); }
683 static bool IsGrad() { return false; }
684};
685template<>
687 static unsigned int NCalls(const ROOT::Math::FitMethodGradFunction & f ) { return f.NCalls(); }
688 static int Type(const ROOT::Math::FitMethodGradFunction & f) { return f.Type(); }
689 static bool IsGrad() { return true; }
690};
691
693 //initialize minimizer by creating it
694 // and set there the objective function
695 // obj function must have been set before
696 auto objFunction = ObjFunction();
697 if (!objFunction) {
698 MATH_ERROR_MSG("Fitter::DoInitMinimizer","Objective function has not been set");
699 return false;
700 }
701
702 // check configuration and objective function
703 if ( fConfig.ParamsSettings().size() != objFunction->NDim() ) {
704 MATH_ERROR_MSG("Fitter::DoInitMinimizer","wrong function dimension or wrong size for FitConfig");
705 return false;
706 }
707
708 // create first Minimizer
709 // using an auto_Ptr will delete the previous existing one
710 fMinimizer = std::shared_ptr<ROOT::Math::Minimizer> ( fConfig.CreateMinimizer() );
711 if (fMinimizer.get() == nullptr) {
712 MATH_ERROR_MSG("Fitter::DoInitMinimizer","Minimizer cannot be created");
713 return false;
714 }
715
716 // in case of gradient function one needs to downcast the pointer
717 if (fUseGradient) {
718 const ROOT::Math::IMultiGradFunction * gradfcn = dynamic_cast<const ROOT::Math::IMultiGradFunction *> (objFunction );
719 if (!gradfcn) {
720 MATH_ERROR_MSG("Fitter::DoInitMinimizer","wrong type of function - it does not provide gradient");
721 return false;
722 }
723 fMinimizer->SetFunction( *gradfcn);
724 // set also Hessian if available
725 if (Config().MinimizerType() == "Minuit2") {
726 const ROOT::Math::FitMethodGradFunction *fitGradFcn =
727 dynamic_cast<const ROOT::Math::FitMethodGradFunction *>(gradfcn);
728 if (fitGradFcn && fitGradFcn->HasHessian()) {
729 auto hessFcn = [=](const std::vector<double> &x, double *hess) {
730 unsigned int ndim = x.size();
731 unsigned int nh = ndim * (ndim + 1) / 2;
732 std::vector<double> h(nh);
733 bool ret = fitGradFcn->Hessian(x.data(), h.data());
734 if (!ret) return false;
735 for (unsigned int i = 0; i < ndim; i++) {
736 for (unsigned int j = 0; j <= i; j++) {
737 unsigned int index = j + i * (i + 1) / 2; // formula for j < i
738 hess[ndim * i + j] = h[index];
739 if (j != i)
740 hess[ndim * j + i] = h[index];
741 }
742 }
743 return true;
744 };
745
746 fMinimizer->SetHessianFunction(hessFcn);
747 }
748 }
749 }
750 else
751 fMinimizer->SetFunction( *objFunction);
752
753
754 fMinimizer->SetVariables(fConfig.ParamsSettings().begin(), fConfig.ParamsSettings().end() );
755
756 // if requested parabolic error do correct error analysis by the minimizer (call HESSE)
757 if (fConfig.ParabErrors()) fMinimizer->SetValidError(true);
758
759 return true;
760
761}
762
763bool Fitter::DoUpdateMinimizerOptions(bool canDifferentMinim ) {
764 // update minimizer options when re-doing a Fit or computing Hesse or Minos errors
765
766
767 // create a new minimizer if it is different type
768 // minimizer type string stored in FitResult is "minimizer name" + " / " + minimizer algo
769 std::string newMinimType = fConfig.MinimizerName();
770 if (fMinimizer && fResult && newMinimType != fResult->MinimizerType()) {
771 // if a different minimizer is allowed (e.g. when calling Hesse)
772 if (canDifferentMinim) {
773 std::string msg = "Using now " + newMinimType;
774 MATH_INFO_MSG("Fitter::DoUpdateMinimizerOptions: ", msg.c_str());
775 if (!DoInitMinimizer() )
776 return false;
777 }
778 else {
779 std::string msg = "Cannot change minimizer. Continue using " + fResult->MinimizerType();
780 MATH_WARN_MSG("Fitter::DoUpdateMinimizerOptions",msg.c_str());
781 }
782 }
783
784 // create minimizer if it was not done before
785 if (!fMinimizer) {
786 if (!DoInitMinimizer())
787 return false;
788 }
789
790 // set new minimizer options (but not functions and parameters)
791 fMinimizer->SetOptions(fConfig.MinimizerOptions());
792 return true;
793}
794
796 // perform the minimization (assume we have already initialized the minimizer)
797
798 assert(fMinimizer );
799
800 bool isValid = fMinimizer->Minimize();
801
802 if (!fResult) fResult = std::make_shared<FitResult>();
803
804 fResult->FillResult(fMinimizer,fConfig, fFunc, isValid, fDataSize, fFitType, chi2func );
805
806 // if requested run Minos after minimization
807 if (isValid && fConfig.MinosErrors()) {
808 // minos error calculation will update also FitResult
810 }
811
812 // when possible get number of calls from FCN and set in fit result
814 fResult->fNCalls = GetNCallsFromFCN();
815 }
816
817 // fill information in fit result
818 // if using an external obj function clone it for storing in FitResult
820 fResult->fObjFunc = fObjFunction;
821 fResult->fFitData = fData;
822
823#ifdef DEBUG
824 std::cout << "ROOT::Fit::Fitter::DoMinimization : ncalls = " << fResult->fNCalls << " type of objfunc " << fFitFitResType << " typeid: " << typeid(*fObjFunction).name() << " use gradient " << fUseGradient << std::endl;
825#endif
826
828 fResult->NormalizeErrors();
829
830 // set also new parameter values and errors in FitConfig
831 if (fConfig.UpdateAfterFit() && isValid) DoUpdateFitConfig();
832
833 return isValid;
834}
835template<class ObjFunc_t>
836bool Fitter::DoMinimization(std::unique_ptr<ObjFunc_t> objFunc, const ROOT::Math::IMultiGenFunction * chi2func) {
837 // perform the minimization initializing the minimizer starting from a given obj function
838 fFitType = objFunc->Type();
839 fExtObjFunction = nullptr;
840 fObjFunction = std::move(objFunc);
841 if (!DoInitMinimizer()) return false;
842 return DoMinimization(chi2func);
843}
844template<class ObjFunc_t>
845bool Fitter::DoWeightMinimization(std::unique_ptr<ObjFunc_t> objFunc, const ROOT::Math::IMultiGenFunction * chi2func) {
846 // perform the minimization initializing the minimizer starting from a given obj function
847 // and apply afterwards the correction for weights. This applies only for logL fitting
848 this->fFitType = objFunc->Type();
849 fExtObjFunction = nullptr;
850 // need to use a temporary shared pointer to the objective function since we cannot use the unique pointer when it has been moved
851 std::shared_ptr<ObjFunc_t> sObjFunc{ std::move(objFunc)};
852 fObjFunction = sObjFunc;
853 if (!DoInitMinimizer()) return false;
854 if (!DoMinimization(chi2func)) return false;
855 sObjFunc->UseSumOfWeightSquare();
856 return ApplyWeightCorrection(*sObjFunc);
857}
858
859
861 // update the fit configuration after a fit using the obtained result
862 if (fResult->IsEmpty() || !fResult->IsValid() ) return;
863 for (unsigned int i = 0; i < fConfig.NPar(); ++i) {
865 par.SetValue( fResult->Value(i) );
866 if (fResult->Error(i) > 0) par.SetStepSize( fResult->Error(i) );
867 }
868}
869
871 // retrieve ncalls from the fit method functions
872 // this function is called when minimizer does not provide a way of returning the number of function calls
873 int ncalls = 0;
874 if (!fUseGradient) {
875 const ROOT::Math::FitMethodFunction * fcn = dynamic_cast<const ROOT::Math::FitMethodFunction *>(fObjFunction.get());
876 if (fcn) ncalls = fcn->NCalls();
877 }
878 else {
880 if (fcn) ncalls = fcn->NCalls();
881 }
882 return ncalls;
883}
884
885
886bool Fitter::ApplyWeightCorrection(const ROOT::Math::IMultiGenFunction & loglw2, bool minimizeW2L) {
887 // apply correction for weight square
888 // Compute Hessian of the loglikelihood function using the sum of the weight squared
889 // This method assumes:
890 // - a fit has been done before and a covariance matrix exists
891 // - the objective function is a likelihood function and Likelihood::UseSumOfWeightSquare()
892 // has been called before
893
894 if (fMinimizer.get() == nullptr) {
895 MATH_ERROR_MSG("Fitter::ApplyWeightCorrection","Must perform first a fit before applying the correction");
896 return false;
897 }
898
899 unsigned int n = loglw2.NDim();
900 // correct errors for weight squared
901 std::vector<double> cov(n*n);
902 bool ret = fMinimizer->GetCovMatrix(&cov[0] );
903 if (!ret) {
904 MATH_ERROR_MSG("Fitter::ApplyWeightCorrection","Previous fit has no valid Covariance matrix");
905 return false;
906 }
907 // need to use new obj function computed with weight-square
908 std::shared_ptr<ROOT::Math::IMultiGenFunction> objFunc(loglw2.Clone());
909 fObjFunction.swap( objFunc );
910
911 // need to re-initialize the minimizer for the changes applied in the
912 // objective functions
913 if (!DoInitMinimizer()) return false;
914
915 //std::cout << "Running Hesse ..." << std::endl;
916
917 // run eventually before a minimization
918 // ignore its error
919 if (minimizeW2L) fMinimizer->Minimize();
920 // run Hesse on the log-likelihood build using sum of weight squared
921 ret = fMinimizer->Hesse();
922 if (!ret) {
923 MATH_ERROR_MSG("Fitter::ApplyWeightCorrection","Error running Hesse on weight2 likelihood - cannot compute errors");
924 return false;
925 }
926
927 if (fMinimizer->CovMatrixStatus() != 3) {
928 MATH_WARN_MSG("Fitter::ApplyWeightCorrection","Covariance matrix for weighted likelihood is not accurate, the errors may be not reliable");
929 if (fMinimizer->CovMatrixStatus() == 2)
930 MATH_WARN_MSG("Fitter::ApplyWeightCorrection","Covariance matrix for weighted likelihood was forced to be defined positive");
931 if (fMinimizer->CovMatrixStatus() <= 0)
932 // probably should have failed before
933 MATH_ERROR_MSG("Fitter::ApplyWeightCorrection","Covariance matrix for weighted likelihood is not valid !");
934 }
935
936 // get Hessian matrix from weight-square likelihood
937 std::vector<double> hes(n*n);
938 ret = fMinimizer->GetHessianMatrix(&hes[0] );
939 if (!ret) {
940 MATH_ERROR_MSG("Fitter::ApplyWeightCorrection","Error retrieving Hesse on weight2 likelihood - cannot compute errors");
941 return false;
942 }
943
944
945 // perform product of matrix cov * hes * cov
946 // since we do not want to add matrix dependence do product by hand
947 // first do hes * cov
948 std::vector<double> tmp(n*n);
949 for (unsigned int i = 0; i < n; ++i) {
950 for (unsigned int j = 0; j < n; ++j) {
951 for (unsigned int k = 0; k < n; ++k)
952 tmp[i*n+j] += hes[i*n + k] * cov[k*n + j];
953 }
954 }
955 // do multiplication now cov * tmp save result
956 std::vector<double> newCov(n*n);
957 for (unsigned int i = 0; i < n; ++i) {
958 for (unsigned int j = 0; j < n; ++j) {
959 for (unsigned int k = 0; k < n; ++k)
960 newCov[i*n+j] += cov[i*n + k] * tmp[k*n + j];
961 }
962 }
963 // update fit result with new corrected covariance matrix
964 unsigned int k = 0;
965 for (unsigned int i = 0; i < n; ++i) {
966 fResult->fErrors[i] = std::sqrt( newCov[i*(n+1)] );
967 for (unsigned int j = 0; j <= i; ++j)
968 fResult->fCovMatrix[k++] = newCov[i *n + j];
969 }
970
971 // restore previous used objective function
972 fObjFunction.swap( objFunc );
973
974 return true;
975}
976
977
978
980 // return a pointer to the binned data used in the fit
981 // works only for chi2 or binned likelihood fits
982 // thus when the objective function stored is a Chi2Func or a PoissonLikelihood
983 // This also set the model function correctly if it has not been set
984
987
990
991 //MATH_INFO_MSG("Fitter::ExamineFCN","Objective function is not of a known type - FitData and ModelFunction objects are not available");
992 return;
993}
994
995 } // end namespace Fit
996
997} // end namespace ROOT
#define MATH_INFO_MSG(loc, str)
Pre-processor macro to report messages which can be configured to use ROOT error or simply an std::io...
Definition Error.h:77
#define MATH_ERROR_MSG(loc, str)
Definition Error.h:83
#define MATH_WARN_MSG(loc, str)
Definition Error.h:80
#define f(i)
Definition RSha256.hxx:104
#define h(i)
Definition RSha256.hxx:106
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void data
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void char Point_t Rectangle_t WindowAttributes_t Float_t Float_t Float_t Int_t Int_t UInt_t UInt_t Rectangle_t result
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void char Point_t Rectangle_t WindowAttributes_t index
BasicFCN class: base class for the objective functions used in the fits It has a reference to the dat...
Definition BasicFCN.h:40
Chi2FCN class for binned fits using the least square methods.
Definition Chi2FCN.h:46
const std::vector< unsigned int > & MinosParams() const
return vector of parameter indices for which the Minos Error will be computed
Definition FitConfig.h:218
bool UpdateAfterFit() const
Update configuration after a fit using the FitResult.
Definition FitConfig.h:211
void SetMinimizer(const char *type, const char *algo=nullptr)
set minimizer type
Definition FitConfig.h:179
void SetMinosErrors(bool on=true)
set Minos errors computation to be performed after fitting
Definition FitConfig.h:229
bool NormalizeErrors() const
flag to check if resulting errors are be normalized according to chi2/ndf
Definition FitConfig.h:202
bool ParabErrors() const
do analysis for parabolic errors
Definition FitConfig.h:205
unsigned int NPar() const
number of parameters settings
Definition FitConfig.h:96
void SetParamsSettings(unsigned int npar, const double *params, const double *vstep=nullptr)
set the parameter settings from number of parameters and a vector of values and optionally step value...
std::string MinimizerName() const
return Minimizer full name (type / algorithm)
bool UseWeightCorrection() const
Apply Weight correction for error matrix computation.
Definition FitConfig.h:214
const std::vector< ROOT::Fit::ParameterSettings > & ParamsSettings() const
get the vector of parameter settings (const method)
Definition FitConfig.h:86
ROOT::Math::Minimizer * CreateMinimizer()
create a new minimizer according to chosen configuration
void CreateParamsSettings(const ROOT::Math::IParamMultiFunctionTempl< T > &func)
set the parameter settings from a model function.
Definition FitConfig.h:109
const std::string & MinimizerType() const
return type of minimizer package
Definition FitConfig.h:187
const ParameterSettings & ParSettings(unsigned int i) const
get the parameter settings for the i-th parameter (const method)
Definition FitConfig.h:76
ROOT::Math::MinimizerOptions & MinimizerOptions()
access to the minimizer control parameter (non const method)
Definition FitConfig.h:167
bool MinosErrors() const
do minos errors analysis on the parameters
Definition FitConfig.h:208
class containing the result of the fit and all the related information (fitted parameter values,...
Definition FitResult.h:47
bool EvalFCN()
Perform a simple FCN evaluation.
Definition Fitter.cxx:288
const ROOT::Math::IMultiGenFunction * fExtObjFunction
! pointer to an external FCN
Definition Fitter.h:576
bool FitFCN()
Perform a fit with the previously set FCN function.
Definition Fitter.cxx:269
void DoUpdateFitConfig()
Definition Fitter.cxx:860
bool DoMinimization(std::unique_ptr< ObjFunc_t > f, const ROOT::Math::IMultiGenFunction *chifunc=nullptr)
do minimization
Definition Fitter.cxx:836
bool DoSetFCN(bool useExtFCN, const ROOT::Math::IMultiGenFunction &fcn, const double *params, unsigned int dataSize, int fitType)
Set Objective function.
Definition Fitter.cxx:137
int fDataSize
size of data sets (need for Fumili or LM fitters)
Definition Fitter.h:560
bool DoUnbinnedLikelihoodFit(bool extended=false, const ROOT::EExecutionPolicy &executionPolicy=ROOT::EExecutionPolicy::kSequential)
un-binned likelihood fit
Definition Fitter.cxx:440
const ROOT::Math::IBaseFunctionMultiDimTempl< double > * ObjFunction() const
Return pointer to the used objective function for fitting.
Definition Fitter.h:544
std::shared_ptr< ROOT::Math::Minimizer > fMinimizer
! pointer to used minimizer
Definition Fitter.h:570
bool DoWeightMinimization(std::unique_ptr< ObjFunc_t > f, const ROOT::Math::IMultiGenFunction *chifunc=nullptr)
Definition Fitter.cxx:845
bool DoBinnedLikelihoodFit(bool extended=true, const ROOT::EExecutionPolicy &executionPolicy=ROOT::EExecutionPolicy::kSequential)
binned likelihood fit
Definition Fitter.cxx:360
int fFitType
type of fit (0 undefined, 1 least square, 2 likelihood, 3 binned likelihood)
Definition Fitter.h:558
std::shared_ptr< ROOT::Fit::FitData > fData
! pointer to the fit data (binned or unbinned data)
Definition Fitter.h:572
bool fUseGradient
flag to indicate if using gradient or not
Definition Fitter.h:552
bool fBinFit
flag to indicate if fit is binned in case of false the fit is unbinned or undefined) flag it is used ...
Definition Fitter.h:554
std::shared_ptr< ROOT::Math::IMultiGenFunction > fObjFunction
! pointer to used objective function
Definition Fitter.h:574
bool ApplyWeightCorrection(const ROOT::Math::IMultiGenFunction &loglw2, bool minimizeW2L=false)
apply correction in the error matrix for the weights for likelihood fits This method can be called on...
Definition Fitter.cxx:886
void ExamineFCN()
look at the user provided FCN and get data and model function is they derive from ROOT::Fit FCN class...
Definition Fitter.cxx:979
const FitConfig & Config() const
access to the fit configuration (const method)
Definition Fitter.h:422
bool DoLeastSquareFit(const ROOT::EExecutionPolicy &executionPolicy=ROOT::EExecutionPolicy::kSequential)
least square fit
Definition Fitter.cxx:309
bool SetFCN(unsigned int npar, Function &fcn, const double *params=nullptr, unsigned int dataSize=0, int fitType=0)
Set a generic FCN function as a C++ callable object implementing double () (const double *) Note that...
Definition Fitter.h:656
std::shared_ptr< IModelFunction_v > fFunc_v
! copy of the fitted function containing on output the fit result
Definition Fitter.h:564
std::shared_ptr< ROOT::Fit::FitResult > fResult
! pointer to the object containing the result of the fit
Definition Fitter.h:568
bool GetDataFromFCN()
internal functions to get data set and model function from FCN useful for fits done with customized F...
Definition Fitter.h:584
bool CalculateMinosErrors()
perform an error analysis on the result using MINOS To be called only after fitting and when a minimi...
Definition Fitter.cxx:593
bool DoUpdateMinimizerOptions(bool canDifferentMinim=true)
Definition Fitter.cxx:763
void SetFunction(const IModelFunction &func, bool useGradient=false)
Set the fitted function (model function) from a parametric function interface.
Definition Fitter.cxx:59
bool CalculateHessErrors()
perform an error analysis on the result using the Hessian Errors are obtained from the inverse of the...
Definition Fitter.cxx:530
FitConfig fConfig
fitter configuration (options and parameter settings)
Definition Fitter.h:562
Fitter()
Default constructor.
Definition Fitter.h:102
std::shared_ptr< IModelFunction > fFunc
! copy of the fitted function containing on output the fit result
Definition Fitter.h:566
bool DoLinearFit()
linear least square fit
Definition Fitter.cxx:513
bool DoInitMinimizer()
Definition Fitter.cxx:692
int GetNCallsFromFCN()
Definition Fitter.cxx:870
Class, describing value, limits and step size of the parameters Provides functionality also to set/re...
void SetValue(double val)
set the value
void SetStepSize(double err)
set the step size
FitMethodFunction class Interface for objective functions (like chi2 and likelihood used in the fit) ...
virtual Type_t Type() const
return the type of method, override if needed
virtual bool Hessian(const double *x, double *hess) const
Computes the full Hessian.
virtual unsigned int NPoints() const
return the number of data points used in evaluating the function
virtual unsigned int NCalls() const
return the total number of function calls (override if needed)
Documentation for the abstract class IBaseFunctionMultiDim.
Definition IFunction.h:61
virtual IBaseFunctionMultiDimTempl< T > * Clone() const =0
Clone a function.
virtual unsigned int NDim() const =0
Retrieve the dimension of the function.
Interface (abstract class) for multi-dimensional functions providing a gradient calculation.
Definition IFunction.h:168
Specialized IParamFunction interface (abstract class) for one-dimensional parametric functions It is ...
Interface (abstract class) for parametric gradient multi-dimensional functions providing in addition ...
Interface (abstract class) for parametric one-dimensional gradient functions providing in addition to...
double ErrorDef() const
error definition
int PrintLevel() const
non-static methods for retrieving options
void SetErrorDef(double err)
set error def
MultiDimParamFunctionAdapter class to wrap a one-dimensional parametric function in a multi dimension...
MultiDimParamGradFunctionAdapter class to wrap a one-dimensional parametric gradient function in a mu...
Double_t x[n]
Definition legend1.C:17
const Int_t n
Definition legend1.C:16
TFitResultPtr Fit(FitObject *h1, TF1 *f1, Foption_t &option, const ROOT::Math::MinimizerOptions &moption, const char *goption, ROOT::Fit::DataRange &range)
Definition HFitImpl.cxx:133
double gDefaultErrorDef
Definition Fitter.cxx:48
This file contains a specialised ROOT message handler to test for diagnostic in unit tests.
static int Type(const ROOT::Math::FitMethodFunction &f)
Definition Fitter.cxx:682
static unsigned int NCalls(const ROOT::Math::FitMethodFunction &f)
Definition Fitter.cxx:681
static int Type(const ROOT::Math::FitMethodGradFunction &f)
Definition Fitter.cxx:688
static unsigned int NCalls(const ROOT::Math::FitMethodGradFunction &f)
Definition Fitter.cxx:687
static bool IsGrad()
Definition Fitter.cxx:677
static unsigned int NCalls(const Func &)
Definition Fitter.cxx:675
static int Type(const Func &)
Definition Fitter.cxx:676