Logo ROOT  
Reference Guide
 
Loading...
Searching...
No Matches
Fitter.cxx
Go to the documentation of this file.
1// @(#)root/mathcore:$Id$
2// Author: L. Moneta Mon Sep 4 17:00:10 2006
3
4/**********************************************************************
5 * *
6 * Copyright (c) 2006 LCG ROOT Math Team, CERN/PH-SFT *
7 * *
8 * *
9 **********************************************************************/
10
11// Implementation file for class Fitter
12
13
14#include "Fit/Fitter.h"
15#include "Fit/Chi2FCN.h"
18#include "Math/Minimizer.h"
21#include "Fit/BasicFCN.h"
22#include "Fit/BinData.h"
23#include "Fit/UnBinData.h"
24#include "Fit/FcnAdapter.h"
25#include "Fit/FitConfig.h"
26#include "Fit/FitResult.h"
27#include "Math/Error.h"
28
29#include <memory>
30
31#include "Math/IParamFunction.h"
32
34
35// #include "TMatrixDSym.h"
36// for debugging
37//#include "TMatrixD.h"
38// #include <iomanip>
39
40namespace ROOT {
41
42 namespace Fit {
43
44// use a static variable to get default minimizer options for error def
45// to see if user has changed it later on. If it has not been changed we set
46// for the likelihood method an error def of 0.5
47// t.b.d : multiply likelihood by 2 so have same error def definition as chi2
49
50
51Fitter::Fitter(const std::shared_ptr<FitResult> & result) :
52 fResult(result)
53{
54 if (result->fFitFunc) SetFunction(*fResult->fFitFunc); // this will create also the configuration
55 if (result->fObjFunc) fObjFunction = fResult->fObjFunc;
56 if (result->fFitData) fData = fResult->fFitData;
57}
58
59void Fitter::SetFunction(const IModelFunction & func, bool useGradient)
60{
61
62 fUseGradient = useGradient;
63 if (fUseGradient) {
64 const IGradModelFunction * gradFunc = dynamic_cast<const IGradModelFunction*>(&func);
65 if (gradFunc) {
66 SetFunction(*gradFunc, true);
67 return;
68 }
69 else {
70 MATH_WARN_MSG("Fitter::SetFunction","Requested function does not provide gradient - use it as non-gradient function ");
71 }
72 }
73 fUseGradient = false;
74
75 // set the fit model function (clone the given one and keep a copy )
76 //std::cout << "set a non-grad function" << std::endl;
77
78 fFunc = std::shared_ptr<IModelFunction>(dynamic_cast<IModelFunction *>(func.Clone() ) );
79 assert(fFunc);
80
81 // creates the parameter settings
83 fFunc_v.reset();
84}
85
86void Fitter::SetFunction(const IModel1DFunction & func, bool useGradient)
87{
88 fUseGradient = useGradient;
89 if (fUseGradient) {
90 const IGradModel1DFunction * gradFunc = dynamic_cast<const IGradModel1DFunction*>(&func);
91 if (gradFunc) {
92 SetFunction(*gradFunc, true);
93 return;
94 }
95 else {
96 MATH_WARN_MSG("Fitter::SetFunction","Requested function does not provide gradient - use it as non-gradient function ");
97 }
98 }
99 fUseGradient = false;
100 //std::cout << "set a 1d function" << std::endl;
101
102 // function is cloned when creating the adapter
103 fFunc = std::shared_ptr<IModelFunction>(new ROOT::Math::MultiDimParamFunctionAdapter(func));
104
105 // creates the parameter settings
107 fFunc_v.reset();
108}
109
110void Fitter::SetFunction(const IGradModelFunction & func, bool useGradient)
111{
112 fUseGradient = useGradient;
113 //std::cout << "set a grad function" << std::endl;
114 // set the fit model function (clone the given one and keep a copy )
115 fFunc = std::shared_ptr<IModelFunction>( dynamic_cast<IGradModelFunction *> ( func.Clone() ) );
116 assert(fFunc);
117
118 // creates the parameter settings
120 fFunc_v.reset();
121}
122
123
124void Fitter::SetFunction(const IGradModel1DFunction & func, bool useGradient)
125{
126 //std::cout << "set a 1d grad function" << std::endl;
127 fUseGradient = useGradient;
128 // function is cloned when creating the adapter
129 fFunc = std::shared_ptr<IModelFunction>(new ROOT::Math::MultiDimParamGradFunctionAdapter(func));
130
131 // creates the parameter settings
133 fFunc_v.reset();
134}
135
136
137bool Fitter::DoSetFCN(bool extFcn, const ROOT::Math::IMultiGenFunction & fcn, const double * params, unsigned int dataSize, bool chi2fit) {
138 // Set the objective function for the fit. First parameter specifies if function object is managed external or internal.
139 // In case of an internal function object we need to clone because it is a temporary one
140 // if params is not NULL create the parameter settings
141 fUseGradient = false;
142 unsigned int npar = fcn.NDim();
143 if (npar == 0) {
144 MATH_ERROR_MSG("Fitter::SetFCN","FCN function has zero parameters ");
145 return false;
146 }
147 if (params != nullptr || fConfig.ParamsSettings().size() == 0)
148 fConfig.SetParamsSettings(npar, params);
149 else {
150 if ( fConfig.ParamsSettings().size() != npar) {
151 MATH_ERROR_MSG("Fitter::SetFCN","wrong fit parameter settings");
152 return false;
153 }
154 }
155
156 fBinFit = chi2fit;
157 fDataSize = dataSize;
158
159 // store external provided FCN without cloning it
160 // it will be cloned in fObjFunc after the fit
161 if (extFcn) {
162 fExtObjFunction = &fcn;
163 fObjFunction.reset();
164 }
165 else {
166 // case FCN is built from Minuit interface so function object is created internally in Fitter class
167 // and needs to be cloned and managed
168 fExtObjFunction = nullptr;
169 fObjFunction.reset(fcn.Clone());
170 }
171
172 // in case a model function and data exists from a previous fit - reset shared-ptr
173 if (fResult && fResult->FittedFunction() == nullptr && fFunc) fFunc.reset();
174 if (fData) fData.reset();
175
176 return true;
177}
178bool Fitter::SetFCN(const ROOT::Math::IMultiGenFunction & fcn, const double * params, unsigned int dataSize, bool chi2fit) {
179 // set the objective function for the fit
180 return DoSetFCN(true, fcn, params, dataSize, chi2fit);
181}
182bool Fitter::SetFCN(const ROOT::Math::IMultiGenFunction &fcn, const IModelFunction & func, const double *params, unsigned int dataSize, bool chi2fit) {
183 // set the objective function for the fit and a model function
184 if (!SetFCN(fcn, params, dataSize, chi2fit) ) return false;
185 // need to set fFunc afterwards because SetFCN could reset fFUnc
186 fFunc = std::unique_ptr<IModelFunction>(dynamic_cast<IModelFunction *>(func.Clone()));
187 if(fFunc) {
189 return true;
190 }
191 return false;
192 return (fFunc != nullptr);
193}
194
195bool Fitter::SetFCN(const ROOT::Math::FitMethodFunction &fcn, const double *params)
196{
197 // set the objective function for the fit
198 // if params is not NULL create the parameter settings
199 bool chi2fit = (fcn.Type() == ROOT::Math::FitMethodFunction::kLeastSquare);
200 if (!SetFCN(fcn, params, fcn.NPoints(), chi2fit))
201 return false;
202 fUseGradient = false;
203 fFitType = fcn.Type();
204 return true;
205}
206
207bool Fitter::SetFCN(const ROOT::Math::FitMethodGradFunction &fcn, const double *params)
208{
209 // set the objective function for the fit
210 // if params is not NULL create the parameter settings
212 if (!SetFCN(fcn, params, fcn.NPoints(), chi2fit))
213 return false;
214 fUseGradient = true;
215 fFitType = fcn.Type();
216 return true;
217}
218
219bool Fitter::FitFCN(const BaseFunc &fcn, const double *params, unsigned int dataSize, bool chi2fit)
220{
221 // fit a user provided FCN function
222 // create fit parameter settings
223 if (!SetFCN(fcn, params, dataSize, chi2fit))
224 return false;
225 return FitFCN();
226}
227
228bool Fitter::FitFCN(const ROOT::Math::FitMethodFunction &fcn, const double *params)
229{
230 // fit using the passed objective function for the fit
231 if (!SetFCN(fcn, params))
232 return false;
233 return FitFCN();
234}
235
236bool Fitter::FitFCN(const ROOT::Math::FitMethodGradFunction &fcn, const double *params)
237{
238 // fit using the passed objective function for the fit
239 if (!SetFCN(fcn, params))
240 return false;
241 return FitFCN();
242}
243
244bool Fitter::SetFCN(MinuitFCN_t fcn, int npar, const double *params, unsigned int dataSize, bool chi2fit)
245{
246 // set TMinuit style FCN type (global function pointer)
247 // create corresponding objective function from that function
248
249 if (npar == 0) {
250 npar = fConfig.ParamsSettings().size();
251 if (npar == 0) {
252 MATH_ERROR_MSG("Fitter::FitFCN", "Fit Parameter settings have not been created ");
253 return false;
254 }
255 }
256
257 ROOT::Fit::FcnAdapter newFcn(fcn, npar);
258 return DoSetFCN(false,newFcn, params, dataSize, chi2fit);
259}
260
261bool Fitter::FitFCN(MinuitFCN_t fcn, int npar, const double *params, unsigned int dataSize, bool chi2fit)
262{
263 // fit using Minuit style FCN type (global function pointer)
264 // create corresponfing objective function from that function
265 if (!SetFCN(fcn, npar, params, dataSize, chi2fit))
266 return false;
267 fUseGradient = false;
268 return FitFCN();
269}
270
272{
273 // fit using the previously set FCN function
274
275
276 if (!fExtObjFunction && !fObjFunction) {
277 MATH_ERROR_MSG("Fitter::FitFCN", "Objective function has not been set");
278 return false;
279 }
280 // look if FCN is of a known type and we can get retrieve the model function and data objects
281 if (!fFunc || !fData)
282 ExamineFCN();
283 // init the minimizer
284 if (!DoInitMinimizer())
285 return false;
286 // perform the minimization
287 return DoMinimization();
288}
289
291{
292 // evaluate the FCN using the stored values in fConfig
293
294 if (fFunc && fResult->FittedFunction() == nullptr)
295 fFunc.reset();
296
297 if (!ObjFunction()) {
298 MATH_ERROR_MSG("Fitter::FitFCN", "Objective function has not been set");
299 return false;
300 }
301 // create a Fit result from the fit configuration
302 fResult = std::make_shared<ROOT::Fit::FitResult>(fConfig);
303 // evaluate one time the FCN
304 double fcnval = (*ObjFunction())(fResult->GetParams());
305 // update fit result
306 fResult->fVal = fcnval;
307 fResult->fNCalls++;
308 return true;
309}
310
312{
313
314 // perform a chi2 fit on a set of binned data
315 std::shared_ptr<BinData> data = std::dynamic_pointer_cast<BinData>(fData);
316 assert(data);
317
318 // check function
319 if (!fFunc && !fFunc_v) {
320 MATH_ERROR_MSG("Fitter::DoLeastSquareFit", "model function is not set");
321 return false;
322 } else {
323
324#ifdef DEBUG
325 std::cout << "Fitter ParamSettings " << Config().ParamsSettings()[3].IsBound() << " lower limit "
326 << Config().ParamsSettings()[3].LowerLimit() << " upper limit "
327 << Config().ParamsSettings()[3].UpperLimit() << std::endl;
328#endif
329
330 fBinFit = true;
331 fDataSize = data->Size();
332 // check if fFunc provides gradient
333 if (!fUseGradient) {
334 // do minimization without using the gradient
335 if (fFunc_v) {
336 return DoMinimization(std::make_unique<Chi2FCN<BaseFunc, IModelFunction_v>>(data, fFunc_v, executionPolicy));
337 } else {
338 return DoMinimization(std::make_unique<Chi2FCN<BaseFunc>>(data, fFunc, executionPolicy));
339 }
340 } else {
341 // use gradient
343 MATH_INFO_MSG("Fitter::DoLeastSquareFit", "use gradient from model function");
344
345 if (fFunc_v) {
346 std::shared_ptr<IGradModelFunction_v> gradFun = std::dynamic_pointer_cast<IGradModelFunction_v>(fFunc_v);
347 if (gradFun) {
348 return DoMinimization(std::make_unique<Chi2FCN<BaseGradFunc, IModelFunction_v>>(data, gradFun, executionPolicy));
349 }
350 } else {
351 std::shared_ptr<IGradModelFunction> gradFun = std::dynamic_pointer_cast<IGradModelFunction>(fFunc);
352 if (gradFun) {
353 return DoMinimization(std::make_unique<Chi2FCN<BaseGradFunc>>(data, gradFun, executionPolicy));
354 }
355 }
356 MATH_ERROR_MSG("Fitter::DoLeastSquareFit", "wrong type of function - it does not provide gradient");
357 }
358 }
359 return false;
360}
361
362bool Fitter::DoBinnedLikelihoodFit(bool extended, const ROOT::EExecutionPolicy &executionPolicy)
363{
364 // perform a likelihood fit on a set of binned data
365 // The fit is extended (Poisson logl_ by default
366
367 std::shared_ptr<BinData> data = std::dynamic_pointer_cast<BinData>(fData);
368 assert(data);
369
370 bool useWeight = fConfig.UseWeightCorrection();
371
372 // check function
373 if (!fFunc && !fFunc_v) {
374 MATH_ERROR_MSG("Fitter::DoBinnedLikelihoodFit", "model function is not set");
375 return false;
376 }
377
378 // logl fit (error should be 0.5) set if different than default values (of 1)
381 }
382
383 if (useWeight && fConfig.MinosErrors()) {
384 MATH_INFO_MSG("Fitter::DoBinnedLikelihoodFit", "MINOS errors cannot be computed in weighted likelihood fits");
385 fConfig.SetMinosErrors(false);
386 }
387
388 fBinFit = true;
389 fDataSize = data->Size();
390
391 if (!fUseGradient) {
392 // do minimization without using the gradient
393 if (fFunc_v) {
394 // create a chi2 function to be used for the equivalent chi-square
396 auto logl = std::make_unique<PoissonLikelihoodFCN<BaseFunc, IModelFunction_v>>(data, fFunc_v, useWeight, extended, executionPolicy);
397 return (useWeight) ? DoWeightMinimization(std::move(logl),&chi2) : DoMinimization(std::move(logl),&chi2);
398 } else {
399 // create a chi2 function to be used for the equivalent chi-square
401 auto logl = std::make_unique<PoissonLikelihoodFCN<BaseFunc>>(data, fFunc, useWeight, extended, executionPolicy);
402 return (useWeight) ? DoWeightMinimization(std::move(logl),&chi2) : DoMinimization(std::move(logl),&chi2);
403 }
404 } else {
406 MATH_INFO_MSG("Fitter::DoLikelihoodFit", "use gradient from model function");
407 // not-extended is not implemented in this case
408 if (!extended) {
409 MATH_WARN_MSG("Fitter::DoBinnedLikelihoodFit",
410 "Not-extended binned fit with gradient not yet supported - do an extended fit");
411 extended = true;
412 }
413 if (fFunc_v) {
414 // create a chi2 function to be used for the equivalent chi-square
416 std::shared_ptr<IGradModelFunction_v> gradFun = std::dynamic_pointer_cast<IGradModelFunction_v>(fFunc_v);
417 if (!gradFun) {
418 MATH_ERROR_MSG("Fitter::DoBinnedLikelihoodFit", "wrong type of function - it does not provide gradient");
419 return false;
420 }
421 auto logl = std::make_unique<PoissonLikelihoodFCN<BaseGradFunc, IModelFunction_v>>(data, gradFun, useWeight, extended, executionPolicy);
422 // do minimization
423 return (useWeight) ? DoWeightMinimization(std::move(logl),&chi2) : DoMinimization(std::move(logl),&chi2);
424 } else {
425 // create a chi2 function to be used for the equivalent chi-square
427 // check if fFunc provides gradient
428 std::shared_ptr<IGradModelFunction> gradFun = std::dynamic_pointer_cast<IGradModelFunction>(fFunc);
429 if (!gradFun) {
430 MATH_ERROR_MSG("Fitter::DoBinnedLikelihoodFit", "wrong type of function - it does not provide gradient");
431 return false;
432 }
433 // use gradient for minimization
434 auto logl = std::make_unique<PoissonLikelihoodFCN<BaseGradFunc>>(data, gradFun, useWeight, extended, executionPolicy);
435 // do minimization
436 return (useWeight) ? DoWeightMinimization(std::move(logl),&chi2) : DoMinimization(std::move(logl),&chi2);
437 }
438 }
439 return false;
440}
441
442bool Fitter::DoUnbinnedLikelihoodFit(bool extended, const ROOT::EExecutionPolicy &executionPolicy) {
443 // perform a likelihood fit on a set of unbinned data
444
445 std::shared_ptr<UnBinData> data = std::dynamic_pointer_cast<UnBinData>(fData);
446 assert(data);
447
448 bool useWeight = fConfig.UseWeightCorrection();
449
450 if (!fFunc && !fFunc_v) {
451 MATH_ERROR_MSG("Fitter::DoUnbinnedLikelihoodFit","model function is not set");
452 return false;
453 }
454
455 if (useWeight && fConfig.MinosErrors() ) {
456 MATH_INFO_MSG("Fitter::DoUnbinnedLikelihoodFit","MINOS errors cannot be computed in weighted likelihood fits");
457 fConfig.SetMinosErrors(false);
458 }
459
460
461 fBinFit = false;
462 fDataSize = data->Size();
463
464#ifdef DEBUG
465 int ipar = 0;
466 std::cout << "Fitter ParamSettings " << Config().ParamsSettings()[ipar].IsBound() << " lower limit " << Config().ParamsSettings()[ipar].LowerLimit() << " upper limit " << Config().ParamsSettings()[ipar].UpperLimit() << std::endl;
467#endif
468
469 // logl fit (error should be 0.5) set if different than default values (of 1)
472 }
473
474 if (!fUseGradient) {
475 // do minimization without using the gradient
476 if (fFunc_v ){
477 auto logl = std::make_unique<LogLikelihoodFCN<BaseFunc, IModelFunction_v>>(data, fFunc_v, useWeight, extended, executionPolicy);
478 // do minimization
479 return (useWeight) ? DoWeightMinimization(std::move(logl)) : DoMinimization(std::move(logl));
480 } else {
481 auto logl = std::make_unique<LogLikelihoodFCN<BaseFunc>>(data, fFunc, useWeight, extended, executionPolicy);
482 return (useWeight) ? DoWeightMinimization(std::move(logl)) : DoMinimization(std::move(logl));
483 }
484 } else {
485 // use gradient : check if fFunc provides gradient
487 MATH_INFO_MSG("Fitter::DoUnbinnedLikelihoodFit", "use gradient from model function");
488 if (extended) {
489 MATH_WARN_MSG("Fitter::DoUnbinnedLikelihoodFit",
490 "Extended unbinned fit with gradient not yet supported - do a not-extended fit");
491 extended = false;
492 }
493 if (fFunc_v) {
494 std::shared_ptr<IGradModelFunction_v> gradFun = std::dynamic_pointer_cast<IGradModelFunction_v>(fFunc_v);
495 if (!gradFun) {
496 MATH_ERROR_MSG("Fitter::DoUnbinnedLikelihoodFit", "wrong type of function - it does not provide gradient");
497 return false;
498 }
499 auto logl = std::make_unique<LogLikelihoodFCN<BaseGradFunc, IModelFunction_v>>(data, gradFun, useWeight, extended, executionPolicy);
500 return (useWeight) ? DoWeightMinimization(std::move(logl)) : DoMinimization(std::move(logl));
501 } else {
502 std::shared_ptr<IGradModelFunction> gradFun = std::dynamic_pointer_cast<IGradModelFunction>(fFunc);
503 if (!gradFun) {
504 MATH_ERROR_MSG("Fitter::DoUnbinnedLikelihoodFit", "wrong type of function - it does not provide gradient");
505 return false;
506 }
507 auto logl = std::make_unique<LogLikelihoodFCN<BaseGradFunc>>(data, gradFun, useWeight, extended, executionPolicy);
508 return (useWeight) ? DoWeightMinimization(std::move(logl)) : DoMinimization(std::move(logl));
509 }
510 }
511 return false;
512}
513
514
516
517 std::shared_ptr<BinData> data = std::dynamic_pointer_cast<BinData>(fData);
518 assert(data);
519
520 // perform a linear fit on a set of binned data
521 std::string prevminimizer = fConfig.MinimizerType();
522 fConfig.SetMinimizer("Linear");
523
524 fBinFit = true;
525
526 bool ret = DoLeastSquareFit();
527 fConfig.SetMinimizer(prevminimizer.c_str());
528 return ret;
529}
530
531
533 // compute the Hesse errors according to configuration
534 // set in the parameters and append value in fit result
535 if (!ObjFunction()) {
536 MATH_ERROR_MSG("Fitter::CalculateHessErrors","Objective function has not been set");
537 return false;
538 }
539
540 // need a special treatment in case of weighted likelihood fit
541 // (not yet implemented)
542 if (fFitType == 2 && fConfig.UseWeightCorrection() ) {
543 MATH_ERROR_MSG("Fitter::CalculateHessErrors","Re-computation of Hesse errors not implemented for weighted likelihood fits");
544 MATH_INFO_MSG("Fitter::CalculateHessErrors","Do the Fit using configure option FitConfig::SetParabErrors()");
545 return false;
546 }
547
548 // a fit Result pointer must exist when a minimizer exists
549 if (fMinimizer && !fResult ) {
550 MATH_ERROR_MSG("Fitter::CalculateHessErrors", "FitResult has not been created");
551 return false;
552 }
553
554 // update minimizer (recreate if not done or if name has changed
556 MATH_ERROR_MSG("Fitter::CalculateHessErrors","Error re-initializing the minimizer");
557 return false;
558 }
559
560 if (!fMinimizer ) {
561 // this should not happen
562 MATH_ERROR_MSG("Fitter::CalculateHessErrors", "Need to do a fit before calculating the errors");
563 assert(false);
564 return false;
565 }
566
567 //run Hesse
568 bool ret = fMinimizer->Hesse();
569 if (!ret) MATH_WARN_MSG("Fitter::CalculateHessErrors","Error when calculating Hessian");
570
571 // update minimizer results with what comes out from Hesse
572 // in case is empty - create from a FitConfig
573 if (fResult->IsEmpty() )
575
576 // update obj function in case it was an external one
578 fResult->fObjFunc = fObjFunction;
579
580 // re-give a minimizer instance in case it has been changed
581 ret |= fResult->Update(fMinimizer, fConfig, ret);
582
583 // when possible get ncalls from FCN and set in fit result
585 fResult->fNCalls = GetNCallsFromFCN();
586 }
587
588 // set also new errors in FitConfig
590
591 return ret;
592}
593
594
596 // compute the Minos errors according to configuration
597 // set in the parameters and append value in fit result
598 // normally Minos errors are computed just after the minimization
599 // (in DoMinimization) aftewr minimizing if the
600 // FitConfig::MinosErrors() flag is set
601
602 if (!fMinimizer) {
603 MATH_ERROR_MSG("Fitter::CalculateMinosErrors","Minimizer does not exist - cannot calculate Minos errors");
604 return false;
605 }
606
607 if (!fResult || fResult->IsEmpty() ) {
608 MATH_ERROR_MSG("Fitter::CalculateMinosErrors","Invalid Fit Result - cannot calculate Minos errors");
609 return false;
610 }
611
612 if (fFitType == 2 && fConfig.UseWeightCorrection() ) {
613 MATH_ERROR_MSG("Fitter::CalculateMinosErrors","Computation of MINOS errors not implemented for weighted likelihood fits");
614 return false;
615 }
616
617 // update minimizer (but cannot re-create in this case). Must use an existing one
618 if (!DoUpdateMinimizerOptions(false)) {
619 MATH_ERROR_MSG("Fitter::CalculateHessErrors","Error re-initializing the minimizer");
620 return false;
621 }
622
623 // set flag to compute Minos error to false in FitConfig to avoid that
624 // following minimizaiton calls perform unwanted Minos error calculations
625 /// fConfig.SetMinosErrors(false);
626
627
628 const std::vector<unsigned int> & ipars = fConfig.MinosParams();
629 unsigned int n = (ipars.size() > 0) ? ipars.size() : fResult->Parameters().size();
630 bool ok = false;
631
632 int iparNewMin = 0;
633 int iparMax = n;
634 int iter = 0;
635 // rerun minos for the parameters run before a new Minimum has been found
636 do {
637 if (iparNewMin > 0)
638 MATH_INFO_MSG("Fitter::CalculateMinosErrors","Run again Minos for some parameters because a new Minimum has been found");
639 iparNewMin = 0;
640 for (int i = 0; i < iparMax; ++i) {
641 double elow, eup;
642 unsigned int index = (ipars.size() > 0) ? ipars[i] : i;
643 bool ret = fMinimizer->GetMinosError(index, elow, eup);
644 // flags case when a new minimum has been found
645 if ((fMinimizer->MinosStatus() & 8) != 0) {
646 iparNewMin = i;
647 }
648 if (ret)
649 fResult->SetMinosError(index, elow, eup);
650 ok |= ret;
651 }
652
653 iparMax = iparNewMin;
654 iter++; // to avoid infinite looping
655 }
656 while( iparNewMin > 0 && iter < 10);
657 if (!ok) {
658 MATH_ERROR_MSG("Fitter::CalculateMinosErrors","Minos error calculation failed for all the selected parameters");
659 }
660
661 // update obj function in case it was an external one
663 fResult->fObjFunc = fObjFunction;
664
665 // re-give a minimizer instance in case it has been changed
666 // but maintain previous valid status. Do not set result to false if minos failed
667 ok &= fResult->Update(fMinimizer, fConfig, fResult->IsValid());
668
669 return ok;
670}
671
672
673
674// traits for distinguishing fit methods functions from generic objective functions
675template<class Func>
677 static unsigned int NCalls(const Func & ) { return 0; }
678 static int Type(const Func & ) { return -1; }
679 static bool IsGrad() { return false; }
680};
681template<>
683 static unsigned int NCalls(const ROOT::Math::FitMethodFunction & f ) { return f.NCalls(); }
684 static int Type(const ROOT::Math::FitMethodFunction & f) { return f.Type(); }
685 static bool IsGrad() { return false; }
686};
687template<>
689 static unsigned int NCalls(const ROOT::Math::FitMethodGradFunction & f ) { return f.NCalls(); }
690 static int Type(const ROOT::Math::FitMethodGradFunction & f) { return f.Type(); }
691 static bool IsGrad() { return true; }
692};
693
695 //initialize minimizer by creating it
696 // and set there the objective function
697 // obj function must have been set before
698 auto objFunction = ObjFunction();
699 if (!objFunction) {
700 MATH_ERROR_MSG("Fitter::DoInitMinimizer","Objective function has not been set");
701 return false;
702 }
703
704 // check configuration and objective function
705 if ( fConfig.ParamsSettings().size() != objFunction->NDim() ) {
706 MATH_ERROR_MSG("Fitter::DoInitMinimizer","wrong function dimension or wrong size for FitConfig");
707 return false;
708 }
709
710 // create first Minimizer
711 // using an auto_Ptr will delete the previous existing one
712 fMinimizer = std::shared_ptr<ROOT::Math::Minimizer> ( fConfig.CreateMinimizer() );
713 if (fMinimizer.get() == nullptr) {
714 MATH_ERROR_MSG("Fitter::DoInitMinimizer","Minimizer cannot be created");
715 return false;
716 }
717
718 // in case of gradient function one needs to downcast the pointer
719 if (fUseGradient) {
720 const ROOT::Math::IMultiGradFunction * gradfcn = dynamic_cast<const ROOT::Math::IMultiGradFunction *> (objFunction );
721 if (!gradfcn) {
722 MATH_ERROR_MSG("Fitter::DoInitMinimizer","wrong type of function - it does not provide gradient");
723 return false;
724 }
725 fMinimizer->SetFunction( *gradfcn);
726 // set also Hessian if available
727 if (Config().MinimizerType() == "Minuit2") {
728 const ROOT::Math::FitMethodGradFunction *fitGradFcn =
729 dynamic_cast<const ROOT::Math::FitMethodGradFunction *>(gradfcn);
730 if (fitGradFcn && fitGradFcn->HasHessian()) {
731 auto hessFcn = [=](const std::vector<double> &x, double *hess) {
732 unsigned int ndim = x.size();
733 unsigned int nh = ndim * (ndim + 1) / 2;
734 std::vector<double> h(nh);
735 bool ret = fitGradFcn->Hessian(x.data(), h.data());
736 if (!ret) return false;
737 for (unsigned int i = 0; i < ndim; i++) {
738 for (unsigned int j = 0; j <= i; j++) {
739 unsigned int index = j + i * (i + 1) / 2; // formula for j < i
740 hess[ndim * i + j] = h[index];
741 if (j != i)
742 hess[ndim * j + i] = h[index];
743 }
744 }
745 return true;
746 };
747
748 fMinimizer->SetHessianFunction(hessFcn);
749 }
750 }
751 }
752 else
753 fMinimizer->SetFunction( *objFunction);
754
755
756 fMinimizer->SetVariables(fConfig.ParamsSettings().begin(), fConfig.ParamsSettings().end() );
757
758 // if requested parabolic error do correct error analysis by the minimizer (call HESSE)
759 if (fConfig.ParabErrors()) fMinimizer->SetValidError(true);
760
761 return true;
762
763}
764
765bool Fitter::DoUpdateMinimizerOptions(bool canDifferentMinim ) {
766 // update minimizer options when re-doing a Fit or computing Hesse or Minos errors
767
768
769 // create a new minimizer if it is different type
770 // minimizer type string stored in FitResult is "minimizer name" + " / " + minimizer algo
771 std::string newMinimType = fConfig.MinimizerName();
772 if (fMinimizer && fResult && newMinimType != fResult->MinimizerType()) {
773 // if a different minimizer is allowed (e.g. when calling Hesse)
774 if (canDifferentMinim) {
775 std::string msg = "Using now " + newMinimType;
776 MATH_INFO_MSG("Fitter::DoUpdateMinimizerOptions: ", msg.c_str());
777 if (!DoInitMinimizer() )
778 return false;
779 }
780 else {
781 std::string msg = "Cannot change minimizer. Continue using " + fResult->MinimizerType();
782 MATH_WARN_MSG("Fitter::DoUpdateMinimizerOptions",msg.c_str());
783 }
784 }
785
786 // create minimizer if it was not done before
787 if (!fMinimizer) {
788 if (!DoInitMinimizer())
789 return false;
790 }
791
792 // set new minimizer options (but not functions and parameters)
793 fMinimizer->SetOptions(fConfig.MinimizerOptions());
794 return true;
795}
796
798 // perform the minimization (assume we have already initialized the minimizer)
799
800 assert(fMinimizer );
801
802 bool isValid = fMinimizer->Minimize();
803
804 if (!fResult) fResult = std::make_shared<FitResult>();
805
806 fResult->FillResult(fMinimizer,fConfig, fFunc, isValid, fDataSize, fFitType, chi2func );
807
808 // if requested run Minos after minimization
809 if (isValid && fConfig.MinosErrors()) {
810 // minos error calculation will update also FitResult
812 }
813
814 // when possible get number of calls from FCN and set in fit result
816 fResult->fNCalls = GetNCallsFromFCN();
817 }
818
819 // fill information in fit result
820 // if using an external obj function clone it for storing in FitResult
822 fResult->fObjFunc = fObjFunction;
823 fResult->fFitData = fData;
824
825#ifdef DEBUG
826 std::cout << "ROOT::Fit::Fitter::DoMinimization : ncalls = " << fResult->fNCalls << " type of objfunc " << fFitFitResType << " typeid: " << typeid(*fObjFunction).name() << " use gradient " << fUseGradient << std::endl;
827#endif
828
830 fResult->NormalizeErrors();
831
832 // set also new parameter values and errors in FitConfig
833 if (fConfig.UpdateAfterFit() && isValid) DoUpdateFitConfig();
834
835 return isValid;
836}
837template<class ObjFunc_t>
838bool Fitter::DoMinimization(std::unique_ptr<ObjFunc_t> objFunc, const ROOT::Math::IMultiGenFunction * chi2func) {
839 // perform the minimization initializing the minimizer starting from a given obj function
840 fFitType = objFunc->Type();
841 fExtObjFunction = nullptr;
842 fObjFunction = std::move(objFunc);
843 if (!DoInitMinimizer()) return false;
844 return DoMinimization(chi2func);
845}
846template<class ObjFunc_t>
847bool Fitter::DoWeightMinimization(std::unique_ptr<ObjFunc_t> objFunc, const ROOT::Math::IMultiGenFunction * chi2func) {
848 // perform the minimization initializing the minimizer starting from a given obj function
849 // and apply afterwards the correction for weights. This applies only for logL fitting
850 this->fFitType = objFunc->Type();
851 fExtObjFunction = nullptr;
852 // need to use a temporary shared pointer to the objective function since we cannot use the unique pointer when it has been moved
853 std::shared_ptr<ObjFunc_t> sObjFunc{ std::move(objFunc)};
854 fObjFunction = sObjFunc;
855 if (!DoInitMinimizer()) return false;
856 if (!DoMinimization(chi2func)) return false;
857 sObjFunc->UseSumOfWeightSquare();
858 return ApplyWeightCorrection(*sObjFunc);
859}
860
861
863 // update the fit configuration after a fit using the obtained result
864 if (fResult->IsEmpty() || !fResult->IsValid() ) return;
865 for (unsigned int i = 0; i < fConfig.NPar(); ++i) {
867 par.SetValue( fResult->Value(i) );
868 if (fResult->Error(i) > 0) par.SetStepSize( fResult->Error(i) );
869 }
870}
871
873 // retrieve ncalls from the fit method functions
874 // this function is called when minimizer does not provide a way of returning the number of function calls
875 int ncalls = 0;
876 if (!fUseGradient) {
877 const ROOT::Math::FitMethodFunction * fcn = dynamic_cast<const ROOT::Math::FitMethodFunction *>(fObjFunction.get());
878 if (fcn) ncalls = fcn->NCalls();
879 }
880 else {
882 if (fcn) ncalls = fcn->NCalls();
883 }
884 return ncalls;
885}
886
887
888bool Fitter::ApplyWeightCorrection(const ROOT::Math::IMultiGenFunction & loglw2, bool minimizeW2L) {
889 // apply correction for weight square
890 // Compute Hessian of the loglikelihood function using the sum of the weight squared
891 // This method assumes:
892 // - a fit has been done before and a covariance matrix exists
893 // - the objective function is a likelihood function and Likelihood::UseSumOfWeightSquare()
894 // has been called before
895
896 if (fMinimizer.get() == nullptr) {
897 MATH_ERROR_MSG("Fitter::ApplyWeightCorrection","Must perform first a fit before applying the correction");
898 return false;
899 }
900
901 unsigned int n = loglw2.NDim();
902 // correct errors for weight squared
903 std::vector<double> cov(n*n);
904 bool ret = fMinimizer->GetCovMatrix(&cov[0] );
905 if (!ret) {
906 MATH_ERROR_MSG("Fitter::ApplyWeightCorrection","Previous fit has no valid Covariance matrix");
907 return false;
908 }
909 // need to use new obj function computed with weight-square
910 std::shared_ptr<ROOT::Math::IMultiGenFunction> objFunc(loglw2.Clone());
911 fObjFunction.swap( objFunc );
912
913 // need to re-initialize the minimizer for the changes applied in the
914 // objective functions
915 if (!DoInitMinimizer()) return false;
916
917 //std::cout << "Running Hesse ..." << std::endl;
918
919 // run eventually before a minimization
920 // ignore its error
921 if (minimizeW2L) fMinimizer->Minimize();
922 // run Hesse on the log-likelihood build using sum of weight squared
923 ret = fMinimizer->Hesse();
924 if (!ret) {
925 MATH_ERROR_MSG("Fitter::ApplyWeightCorrection","Error running Hesse on weight2 likelihood - cannot compute errors");
926 return false;
927 }
928
929 if (fMinimizer->CovMatrixStatus() != 3) {
930 MATH_WARN_MSG("Fitter::ApplyWeightCorrection","Covariance matrix for weighted likelihood is not accurate, the errors may be not reliable");
931 if (fMinimizer->CovMatrixStatus() == 2)
932 MATH_WARN_MSG("Fitter::ApplyWeightCorrection","Covariance matrix for weighted likelihood was forced to be defined positive");
933 if (fMinimizer->CovMatrixStatus() <= 0)
934 // probably should have failed before
935 MATH_ERROR_MSG("Fitter::ApplyWeightCorrection","Covariance matrix for weighted likelihood is not valid !");
936 }
937
938 // get Hessian matrix from weight-square likelihood
939 std::vector<double> hes(n*n);
940 ret = fMinimizer->GetHessianMatrix(&hes[0] );
941 if (!ret) {
942 MATH_ERROR_MSG("Fitter::ApplyWeightCorrection","Error retrieving Hesse on weight2 likelihood - cannot compute errors");
943 return false;
944 }
945
946
947 // perform product of matrix cov * hes * cov
948 // since we do not want to add matrix dependence do product by hand
949 // first do hes * cov
950 std::vector<double> tmp(n*n);
951 for (unsigned int i = 0; i < n; ++i) {
952 for (unsigned int j = 0; j < n; ++j) {
953 for (unsigned int k = 0; k < n; ++k)
954 tmp[i*n+j] += hes[i*n + k] * cov[k*n + j];
955 }
956 }
957 // do multiplication now cov * tmp save result
958 std::vector<double> newCov(n*n);
959 for (unsigned int i = 0; i < n; ++i) {
960 for (unsigned int j = 0; j < n; ++j) {
961 for (unsigned int k = 0; k < n; ++k)
962 newCov[i*n+j] += cov[i*n + k] * tmp[k*n + j];
963 }
964 }
965 // update fit result with new corrected covariance matrix
966 unsigned int k = 0;
967 for (unsigned int i = 0; i < n; ++i) {
968 fResult->fErrors[i] = std::sqrt( newCov[i*(n+1)] );
969 for (unsigned int j = 0; j <= i; ++j)
970 fResult->fCovMatrix[k++] = newCov[i *n + j];
971 }
972
973 // restore previous used objective function
974 fObjFunction.swap( objFunc );
975
976 return true;
977}
978
979
980
982 // return a pointer to the binned data used in the fit
983 // works only for chi2 or binned likelihood fits
984 // thus when the objective function stored is a Chi2Func or a PoissonLikelihood
985 // This also set the model function correctly if it has not been set
986
989
992
993 //MATH_INFO_MSG("Fitter::ExamineFCN","Objective function is not of a known type - FitData and ModelFunction objects are not available");
994 return;
995}
996
997 } // end namespace Fit
998
999} // end namespace ROOT
#define MATH_INFO_MSG(loc, str)
Pre-processor macro to report messages which can be configured to use ROOT error or simply an std::io...
Definition Error.h:77
#define MATH_ERROR_MSG(loc, str)
Definition Error.h:83
#define MATH_WARN_MSG(loc, str)
Definition Error.h:80
#define f(i)
Definition RSha256.hxx:104
#define h(i)
Definition RSha256.hxx:106
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void data
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void char Point_t Rectangle_t WindowAttributes_t Float_t Float_t Float_t Int_t Int_t UInt_t UInt_t Rectangle_t result
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void char Point_t Rectangle_t WindowAttributes_t index
BasicFCN class: base class for the objective functions used in the fits It has a reference to the dat...
Definition BasicFCN.h:40
Chi2FCN class for binned fits using the least square methods.
Definition Chi2FCN.h:46
const std::vector< unsigned int > & MinosParams() const
return vector of parameter indices for which the Minos Error will be computed
Definition FitConfig.h:220
bool UpdateAfterFit() const
Update configuration after a fit using the FitResult.
Definition FitConfig.h:213
void SetMinimizer(const char *type, const char *algo=nullptr)
set minimizer type
Definition FitConfig.h:181
void SetMinosErrors(bool on=true)
set Minos errors computation to be performed after fitting
Definition FitConfig.h:231
bool NormalizeErrors() const
flag to check if resulting errors are be normalized according to chi2/ndf
Definition FitConfig.h:204
bool ParabErrors() const
do analysis for parabolic errors
Definition FitConfig.h:207
unsigned int NPar() const
number of parameters settings
Definition FitConfig.h:96
void SetParamsSettings(unsigned int npar, const double *params, const double *vstep=nullptr)
set the parameter settings from number of parameters and a vector of values and optionally step value...
std::string MinimizerName() const
return Minimizer full name (type / algorithm)
bool UseWeightCorrection() const
Apply Weight correction for error matrix computation.
Definition FitConfig.h:216
const std::vector< ROOT::Fit::ParameterSettings > & ParamsSettings() const
get the vector of parameter settings (const method)
Definition FitConfig.h:86
ROOT::Math::Minimizer * CreateMinimizer()
create a new minimizer according to chosen configuration
void CreateParamsSettings(const ROOT::Math::IParamMultiFunctionTempl< T > &func)
set the parameter settings from a model function.
Definition FitConfig.h:109
const std::string & MinimizerType() const
return type of minimizer package
Definition FitConfig.h:189
const ParameterSettings & ParSettings(unsigned int i) const
get the parameter settings for the i-th parameter (const method)
Definition FitConfig.h:76
ROOT::Math::MinimizerOptions & MinimizerOptions()
access to the minimizer control parameter (non const method)
Definition FitConfig.h:167
bool MinosErrors() const
do minos errors analysis on the parameters
Definition FitConfig.h:210
class containing the result of the fit and all the related information (fitted parameter values,...
Definition FitResult.h:47
bool DoSetFCN(bool useExtFCN, const ROOT::Math::IMultiGenFunction &fcn, const double *params, unsigned int dataSize, bool chi2fit)
Set Objective function.
Definition Fitter.cxx:137
bool EvalFCN()
Perform a simple FCN evaluation.
Definition Fitter.cxx:290
const ROOT::Math::IMultiGenFunction * fExtObjFunction
! pointer to an external FCN
Definition Fitter.h:575
bool FitFCN()
Perform a fit with the previously set FCN function.
Definition Fitter.cxx:271
void DoUpdateFitConfig()
Definition Fitter.cxx:862
bool DoMinimization(std::unique_ptr< ObjFunc_t > f, const ROOT::Math::IMultiGenFunction *chifunc=nullptr)
do minimization
Definition Fitter.cxx:838
int fDataSize
size of data sets (need for Fumili or LM fitters)
Definition Fitter.h:559
bool DoUnbinnedLikelihoodFit(bool extended=false, const ROOT::EExecutionPolicy &executionPolicy=ROOT::EExecutionPolicy::kSequential)
un-binned likelihood fit
Definition Fitter.cxx:442
const ROOT::Math::IBaseFunctionMultiDimTempl< double > * ObjFunction() const
Return pointer to the used objective function for fitting.
Definition Fitter.h:543
std::shared_ptr< ROOT::Math::Minimizer > fMinimizer
! pointer to used minimizer
Definition Fitter.h:569
bool DoWeightMinimization(std::unique_ptr< ObjFunc_t > f, const ROOT::Math::IMultiGenFunction *chifunc=nullptr)
Definition Fitter.cxx:847
bool DoBinnedLikelihoodFit(bool extended=true, const ROOT::EExecutionPolicy &executionPolicy=ROOT::EExecutionPolicy::kSequential)
binned likelihood fit
Definition Fitter.cxx:362
int fFitType
type of fit (0 undefined, 1 least square, 2 likelihood, 3 binned likelihood)
Definition Fitter.h:557
std::shared_ptr< ROOT::Fit::FitData > fData
! pointer to the fit data (binned or unbinned data)
Definition Fitter.h:571
bool fUseGradient
flag to indicate if using gradient or not
Definition Fitter.h:551
bool fBinFit
flag to indicate if fit is binned in case of false the fit is unbinned or undefined) flag it is used ...
Definition Fitter.h:553
std::shared_ptr< ROOT::Math::IMultiGenFunction > fObjFunction
! pointer to used objective function
Definition Fitter.h:573
bool ApplyWeightCorrection(const ROOT::Math::IMultiGenFunction &loglw2, bool minimizeW2L=false)
apply correction in the error matrix for the weights for likelihood fits This method can be called on...
Definition Fitter.cxx:888
bool SetFCN(unsigned int npar, Function &fcn, const double *params=nullptr, unsigned int dataSize=0, bool chi2fit=false)
Set a generic FCN function as a C++ callable object implementing double () (const double *) Note that...
Definition Fitter.h:656
void ExamineFCN()
look at the user provided FCN and get data and model function is they derive from ROOT::Fit FCN class...
Definition Fitter.cxx:981
const FitConfig & Config() const
access to the fit configuration (const method)
Definition Fitter.h:421
bool DoLeastSquareFit(const ROOT::EExecutionPolicy &executionPolicy=ROOT::EExecutionPolicy::kSequential)
least square fit
Definition Fitter.cxx:311
std::shared_ptr< IModelFunction_v > fFunc_v
! copy of the fitted function containing on output the fit result
Definition Fitter.h:563
std::shared_ptr< ROOT::Fit::FitResult > fResult
! pointer to the object containing the result of the fit
Definition Fitter.h:567
bool GetDataFromFCN()
internal functions to get data set and model function from FCN useful for fits done with customized F...
Definition Fitter.h:583
bool CalculateMinosErrors()
perform an error analysis on the result using MINOS To be called only after fitting and when a minimi...
Definition Fitter.cxx:595
bool DoUpdateMinimizerOptions(bool canDifferentMinim=true)
Definition Fitter.cxx:765
void SetFunction(const IModelFunction &func, bool useGradient=false)
Set the fitted function (model function) from a parametric function interface.
Definition Fitter.cxx:59
bool CalculateHessErrors()
perform an error analysis on the result using the Hessian Errors are obtained from the inverse of the...
Definition Fitter.cxx:532
FitConfig fConfig
fitter configuration (options and parameter settings)
Definition Fitter.h:561
Fitter()
Default constructor.
Definition Fitter.h:102
std::shared_ptr< IModelFunction > fFunc
! copy of the fitted function containing on output the fit result
Definition Fitter.h:565
bool DoLinearFit()
linear least square fit
Definition Fitter.cxx:515
bool DoInitMinimizer()
Definition Fitter.cxx:694
int GetNCallsFromFCN()
Definition Fitter.cxx:872
Class, describing value, limits and step size of the parameters Provides functionality also to set/re...
void SetValue(double val)
set the value
void SetStepSize(double err)
set the step size
FitMethodFunction class Interface for objective functions (like chi2 and likelihood used in the fit) ...
virtual Type_t Type() const
return the type of method, override if needed
virtual bool Hessian(const double *x, double *hess) const
Computes the full Hessian.
virtual unsigned int NPoints() const
return the number of data points used in evaluating the function
virtual unsigned int NCalls() const
return the total number of function calls (override if needed)
Documentation for the abstract class IBaseFunctionMultiDim.
Definition IFunction.h:61
virtual IBaseFunctionMultiDimTempl< T > * Clone() const =0
Clone a function.
virtual unsigned int NDim() const =0
Retrieve the dimension of the function.
Interface (abstract class) for multi-dimensional functions providing a gradient calculation.
Definition IFunction.h:168
Specialized IParamFunction interface (abstract class) for one-dimensional parametric functions It is ...
Interface (abstract class) for parametric gradient multi-dimensional functions providing in addition ...
Interface (abstract class) for parametric one-dimensional gradient functions providing in addition to...
double ErrorDef() const
error definition
int PrintLevel() const
non-static methods for retrieving options
void SetErrorDef(double err)
set error def
MultiDimParamFunctionAdapter class to wrap a one-dimensional parametric function in a multi dimension...
MultiDimParamGradFunctionAdapter class to wrap a one-dimensional parametric gradient function in a mu...
Double_t x[n]
Definition legend1.C:17
const Int_t n
Definition legend1.C:16
TFitResultPtr Fit(FitObject *h1, TF1 *f1, Foption_t &option, const ROOT::Math::MinimizerOptions &moption, const char *goption, ROOT::Fit::DataRange &range)
Definition HFitImpl.cxx:133
double gDefaultErrorDef
Definition Fitter.cxx:48
This file contains a specialised ROOT message handler to test for diagnostic in unit tests.
static int Type(const ROOT::Math::FitMethodFunction &f)
Definition Fitter.cxx:684
static unsigned int NCalls(const ROOT::Math::FitMethodFunction &f)
Definition Fitter.cxx:683
static int Type(const ROOT::Math::FitMethodGradFunction &f)
Definition Fitter.cxx:690
static unsigned int NCalls(const ROOT::Math::FitMethodGradFunction &f)
Definition Fitter.cxx:689
static bool IsGrad()
Definition Fitter.cxx:679
static unsigned int NCalls(const Func &)
Definition Fitter.cxx:677
static int Type(const Func &)
Definition Fitter.cxx:678