Logo ROOT   6.18/05
Reference Guide
Fitter.cxx
Go to the documentation of this file.
1// @(#)root/mathcore:$Id$
2// Author: L. Moneta Mon Sep 4 17:00:10 2006
3
4/**********************************************************************
5 * *
6 * Copyright (c) 2006 LCG ROOT Math Team, CERN/PH-SFT *
7 * *
8 * *
9 **********************************************************************/
10
11// Implementation file for class Fitter
12
13
14#include "Fit/Fitter.h"
15#include "Fit/Chi2FCN.h"
18#include "Math/Minimizer.h"
21#include "Fit/BasicFCN.h"
22#include "Fit/BinData.h"
23#include "Fit/UnBinData.h"
24#include "Fit/FcnAdapter.h"
25#include "Fit/FitConfig.h"
26#include "Fit/FitResult.h"
27#include "Math/Error.h"
28
29#include <memory>
30
31#include "Math/IParamFunction.h"
32
34
35// #include "TMatrixDSym.h"
36// for debugging
37//#include "TMatrixD.h"
38// #include <iomanip>
39
40namespace ROOT {
41
42 namespace Fit {
43
44// use a static variable to get default minimizer options for error def
45// to see if user has changed it later on. If it has not been changed we set
46// for the likelihood method an error def of 0.5
47// t.b.d : multiply likelihood by 2 so have same error def definition as chi2
49
50
52 fUseGradient(false),
53 fBinFit(false),
54 fFitType(0),
55 fDataSize(0)
56{}
57
58Fitter::Fitter(const std::shared_ptr<FitResult> & result) :
59 fUseGradient(false),
60 fBinFit(false),
61 fFitType(0),
62 fDataSize(0),
63 fResult(result)
64{
65 if (result->fFitFunc) SetFunction(*fResult->fFitFunc); // this will create also the configuration
66 if (result->fObjFunc) fObjFunction = fResult->fObjFunc;
67 if (result->fFitData) fData = fResult->fFitData;
68}
69
71{
72 // Destructor implementation.
73
74 // nothing to do since we use shared_ptr now
75}
76
78{
79 // Implementation of copy constructor.
80 // copy FitResult, FitConfig and clone fit function
81 (*this) = rhs;
82}
83
85{
86 // Implementation of assignment operator.
87 // dummy implementation, since it is private
88 if (this == &rhs) return *this; // time saving self-test
89// fUseGradient = rhs.fUseGradient;
90// fBinFit = rhs.fBinFit;
91// fResult = rhs.fResult;
92// fConfig = rhs.fConfig;
93// // function is copied and managed by FitResult (maybe should use an unique_ptr)
94// fFunc = fResult.ModelFunction();
95// if (rhs.fFunc != 0 && fResult.ModelFunction() == 0) { // case no fit has been done yet - then clone
96// if (fFunc) delete fFunc;
97// fFunc = dynamic_cast<IModelFunction *>( (rhs.fFunc)->Clone() );
98// assert(fFunc != 0);
99// }
100 return *this;
101}
102
103void Fitter::SetFunction(const IModelFunction & func, bool useGradient)
104{
105
106 fUseGradient = useGradient;
107 if (fUseGradient) {
108 const IGradModelFunction * gradFunc = dynamic_cast<const IGradModelFunction*>(&func);
109 if (gradFunc) {
110 SetFunction(*gradFunc, true);
111 return;
112 }
113 else {
114 MATH_WARN_MSG("Fitter::SetFunction","Requested function does not provide gradient - use it as non-gradient function ");
115 }
116 }
117 fUseGradient = false;
118
119 // set the fit model function (clone the given one and keep a copy )
120 //std::cout << "set a non-grad function" << std::endl;
121
122 fFunc = std::shared_ptr<IModelFunction>(dynamic_cast<IModelFunction *>(func.Clone() ) );
123 assert(fFunc);
124
125 // creates the parameter settings
127 fFunc_v.reset();
128}
129
130void Fitter::SetFunction(const IModel1DFunction & func, bool useGradient)
131{
132 fUseGradient = useGradient;
133 if (fUseGradient) {
134 const IGradModel1DFunction * gradFunc = dynamic_cast<const IGradModel1DFunction*>(&func);
135 if (gradFunc) {
136 SetFunction(*gradFunc, true);
137 return;
138 }
139 else {
140 MATH_WARN_MSG("Fitter::SetFunction","Requested function does not provide gradient - use it as non-gradient function ");
141 }
142 }
143 fUseGradient = false;
144 //std::cout << "set a 1d function" << std::endl;
145
146 // function is cloned when creating the adapter
147 fFunc = std::shared_ptr<IModelFunction>(new ROOT::Math::MultiDimParamFunctionAdapter(func));
148
149 // creates the parameter settings
151 fFunc_v.reset();
152}
153
154void Fitter::SetFunction(const IGradModelFunction & func, bool useGradient)
155{
156 fUseGradient = useGradient;
157 //std::cout << "set a grad function" << std::endl;
158 // set the fit model function (clone the given one and keep a copy )
159 fFunc = std::shared_ptr<IModelFunction>( dynamic_cast<IGradModelFunction *> ( func.Clone() ) );
160 assert(fFunc);
161
162 // creates the parameter settings
164 fFunc_v.reset();
165}
166
167
168void Fitter::SetFunction(const IGradModel1DFunction & func, bool useGradient)
169{
170 //std::cout << "set a 1d grad function" << std::endl;
171 fUseGradient = useGradient;
172 // function is cloned when creating the adapter
173 fFunc = std::shared_ptr<IModelFunction>(new ROOT::Math::MultiDimParamGradFunctionAdapter(func));
174
175 // creates the parameter settings
177 fFunc_v.reset();
178}
179
180
181bool Fitter::SetFCN(const ROOT::Math::IMultiGenFunction & fcn, const double * params, unsigned int dataSize, bool chi2fit) {
182 // set the objective function for the fit
183 // if params is not NULL create the parameter settings
184 fUseGradient = false;
185 unsigned int npar = fcn.NDim();
186 if (npar == 0) {
187 MATH_ERROR_MSG("Fitter::SetFCN","FCN function has zero parameters ");
188 return false;
189 }
190 if (params != 0 )
191 fConfig.SetParamsSettings(npar, params);
192 else {
193 if ( fConfig.ParamsSettings().size() != npar) {
194 MATH_ERROR_MSG("Fitter::SetFCN","wrong fit parameter settings");
195 return false;
196 }
197 }
198
199 fBinFit = chi2fit;
200 fDataSize = dataSize;
201
202 // keep also a copy of FCN function and set this in minimizer so they will be managed together
203 // (remember that cloned copy will still depends on data and model function pointers)
204 fObjFunction = std::unique_ptr<ROOT::Math::IMultiGenFunction> ( fcn.Clone() );
205
206 // in case a model function and data exists from a previous fit - reset shared-ptr
207 if (fFunc && fResult->FittedFunction() == 0) fFunc.reset();
208 if (fData) fData.reset();
209
210 return true;
211}
212
213bool Fitter::SetFCN(const ROOT::Math::IMultiGradFunction & fcn, const double * params, unsigned int dataSize , bool chi2fit) {
214 // set the objective function for the fit
215 // if params is not NULL create the parameter settings
216 if (!SetFCN(static_cast<const ROOT::Math::IMultiGenFunction &>(fcn),params, dataSize, chi2fit) ) return false;
217 fUseGradient = true;
218 return true;
219}
220
221bool Fitter::SetFCN(const ROOT::Math::FitMethodFunction & fcn, const double * params) {
222 // set the objective function for the fit
223 // if params is not NULL create the parameter settings
224 bool chi2fit = (fcn.Type() == ROOT::Math::FitMethodFunction::kLeastSquare );
225 if (!SetFCN(fcn,params,fcn.NPoints(), chi2fit) ) return false;
226 fUseGradient = false;
227 fFitType = fcn.Type();
228 return true;
229}
230
231bool Fitter::SetFCN(const ROOT::Math::FitMethodGradFunction & fcn, const double * params) {
232 // set the objective function for the fit
233 // if params is not NULL create the parameter settings
234 bool chi2fit = (fcn.Type() == ROOT::Math::FitMethodGradFunction::kLeastSquare );
235 if (!SetFCN(fcn,params,fcn.NPoints(), chi2fit) ) return false;
236 fUseGradient = true;
237 fFitType = fcn.Type();
238 return true;
239}
240
241
242
243bool Fitter::FitFCN(const BaseFunc & fcn, const double * params, unsigned int dataSize, bool chi2fit) {
244 // fit a user provided FCN function
245 // create fit parameter settings
246 if (!SetFCN(fcn, params,dataSize,chi2fit) ) return false;
247 return FitFCN();
248}
249
250
251
252bool Fitter::FitFCN(const BaseGradFunc & fcn, const double * params, unsigned int dataSize, bool chi2fit) {
253 // fit a user provided FCN gradient function
254
255 if (!SetFCN(fcn, params,dataSize, chi2fit) ) return false;
256 return FitFCN();
257}
258
259bool Fitter::FitFCN(const ROOT::Math::FitMethodFunction & fcn, const double * params) {
260 // fit using the passed objective function for the fit
261 if (!SetFCN(fcn, params) ) return false;
262 return FitFCN();
263}
264
265bool Fitter::FitFCN(const ROOT::Math::FitMethodGradFunction & fcn, const double * params) {
266 // fit using the passed objective function for the fit
267 if (!SetFCN(fcn, params) ) return false;
268 return FitFCN();
269}
270
271
272bool Fitter::SetFCN(MinuitFCN_t fcn, int npar, const double * params , unsigned int dataSize , bool chi2fit ){
273 // set TMinuit style FCN type (global function pointer)
274 // create corresponfing objective function from that function
275
276 if (npar == 0) {
277 npar = fConfig.ParamsSettings().size();
278 if (npar == 0) {
279 MATH_ERROR_MSG("Fitter::FitFCN","Fit Parameter settings have not been created ");
280 return false;
281 }
282 }
283
284 ROOT::Fit::FcnAdapter newFcn(fcn,npar);
285 return SetFCN(newFcn,params,dataSize,chi2fit);
286}
287
288bool Fitter::FitFCN(MinuitFCN_t fcn, int npar, const double * params , unsigned int dataSize , bool chi2fit ) {
289 // fit using Minuit style FCN type (global function pointer)
290 // create corresponfing objective function from that function
291 if (!SetFCN(fcn, npar, params, dataSize, chi2fit)) return false;
292 fUseGradient = false;
293 return FitFCN();
294}
295
297 // fit using the previously set FCN function
298
299 if (!fObjFunction) {
300 MATH_ERROR_MSG("Fitter::FitFCN","Objective function has not been set");
301 return false;
302 }
303 // look if FCN s of a known type and we can get some modelfunction and data objects
304 if (!fFunc || !fData) ExamineFCN();
305 // init the minimizer
306 if (!DoInitMinimizer() ) return false;
307 // perform the minimization
308 return DoMinimization();
309}
310
312 // evaluate the FCN using the stored values in fConfig
313
314 if (fFunc && fResult->FittedFunction() == 0) fFunc.reset();
315
316 if (!fObjFunction) {
317 MATH_ERROR_MSG("Fitter::FitFCN","Objective function has not been set");
318 return false;
319 }
320 // create a Fit result from the fit configuration
321 fResult = std::make_shared<ROOT::Fit::FitResult>(fConfig);
322 // evaluate one time the FCN
323 double fcnval = (*fObjFunction)(fResult->GetParams() );
324 // update fit result
325 fResult->fVal = fcnval;
326 fResult->fNCalls++;
327 return true;
328}
329
331{
332
333 // perform a chi2 fit on a set of binned data
334 std::shared_ptr<BinData> data = std::dynamic_pointer_cast<BinData>(fData);
335 assert(data);
336
337 // check function
338 if (!fFunc && !fFunc_v) {
339 MATH_ERROR_MSG("Fitter::DoLeastSquareFit", "model function is not set");
340 return false;
341 } else {
342
343#ifdef DEBUG
344 std::cout << "Fitter ParamSettings " << Config().ParamsSettings()[3].IsBound() << " lower limit "
345 << Config().ParamsSettings()[3].LowerLimit() << " upper limit "
346 << Config().ParamsSettings()[3].UpperLimit() << std::endl;
347#endif
348
349 fBinFit = true;
350 fDataSize = data->Size();
351 // check if fFunc provides gradient
352 if (!fUseGradient) {
353 // do minimzation without using the gradient
354 if (fFunc_v) {
355 Chi2FCN<BaseFunc, IModelFunction_v> chi2(data, fFunc_v, executionPolicy);
356 fFitType = chi2.Type();
357 return DoMinimization(chi2);
358 } else {
359 Chi2FCN<BaseFunc> chi2(data, fFunc, executionPolicy);
360 fFitType = chi2.Type();
361 return DoMinimization(chi2);
362 }
363 } else {
364 // use gradient
366 MATH_INFO_MSG("Fitter::DoLeastSquareFit", "use gradient from model function");
367
368 if (fFunc_v) {
369 std::shared_ptr<IGradModelFunction_v> gradFun = std::dynamic_pointer_cast<IGradModelFunction_v>(fFunc_v);
370 if (gradFun) {
371 Chi2FCN<BaseGradFunc, IModelFunction_v> chi2(data, gradFun);
372 fFitType = chi2.Type();
373 return DoMinimization(chi2);
374 }
375 } else {
376 std::shared_ptr<IGradModelFunction> gradFun = std::dynamic_pointer_cast<IGradModelFunction>(fFunc);
377 if (gradFun) {
378 Chi2FCN<BaseGradFunc> chi2(data, gradFun);
379 fFitType = chi2.Type();
380 return DoMinimization(chi2);
381 }
382 }
383 MATH_ERROR_MSG("Fitter::DoLeastSquareFit", "wrong type of function - it does not provide gradient");
384 }
385 }
386 return false;
387}
388
389bool Fitter::DoBinnedLikelihoodFit(bool extended, const ROOT::Fit::ExecutionPolicy &executionPolicy)
390{
391 // perform a likelihood fit on a set of binned data
392 // The fit is extended (Poisson logl_ by default
393
394 std::shared_ptr<BinData> data = std::dynamic_pointer_cast<BinData>(fData);
395 assert(data);
396
397 bool useWeight = fConfig.UseWeightCorrection();
398
399 // check function
400 if (!fFunc && !fFunc_v) {
401 MATH_ERROR_MSG("Fitter::DoBinnedLikelihoodFit", "model function is not set");
402 return false;
403 }
404
405 // logl fit (error should be 0.5) set if different than default values (of 1)
408 }
409
410 if (useWeight && fConfig.MinosErrors()) {
411 MATH_INFO_MSG("Fitter::DoBinnedLikelihoodFit", "MINOS errors cannot be computed in weighted likelihood fits");
412 fConfig.SetMinosErrors(false);
413 }
414
415 fBinFit = true;
416 fDataSize = data->Size();
417
418 if (!fUseGradient) {
419 // do minimization without using the gradient
420 if (fFunc_v) {
421 // create a chi2 function to be used for the equivalent chi-square
423 PoissonLikelihoodFCN<BaseFunc, IModelFunction_v> logl(data, fFunc_v, useWeight, extended, executionPolicy);
424 fFitType = logl.Type();
425 // do minimization
426 if (!DoMinimization(logl, &chi2))
427 return false;
428 if (useWeight) {
430 if (!ApplyWeightCorrection(logl))
431 return false;
432 }
433 } else {
434 // create a chi2 function to be used for the equivalent chi-square
435 Chi2FCN<BaseFunc> chi2(data, fFunc);
436 PoissonLikelihoodFCN<BaseFunc> logl(data, fFunc, useWeight, extended, executionPolicy);
437 fFitType = logl.Type();
438 // do minimization
439 if (!DoMinimization(logl, &chi2))
440 return false;
441 if (useWeight) {
443 if (!ApplyWeightCorrection(logl))
444 return false;
445 }
446 }
447 } else {
448 if (fFunc_v) {
449 // create a chi2 function to be used for the equivalent chi-square
451 std::shared_ptr<IGradModelFunction_v> gradFun = std::dynamic_pointer_cast<IGradModelFunction_v>(fFunc_v);
452 if (!gradFun) {
453 MATH_ERROR_MSG("Fitter::DoBinnedLikelihoodFit", "wrong type of function - it does not provide gradient");
454 return false;
455 }
456 PoissonLikelihoodFCN<BaseGradFunc, IModelFunction_v> logl(data, gradFun, useWeight, true, executionPolicy);
457 fFitType = logl.Type();
458 // do minimization
459 if (!DoMinimization(logl, &chi2))
460 return false;
461 if (useWeight) {
463 if (!ApplyWeightCorrection(logl))
464 return false;
465 }
466 } else {
467 // create a chi2 function to be used for the equivalent chi-square
468 Chi2FCN<BaseFunc> chi2(data, fFunc);
470 MATH_INFO_MSG("Fitter::DoLikelihoodFit", "use gradient from model function");
471 // check if fFunc provides gradient
472 std::shared_ptr<IGradModelFunction> gradFun = std::dynamic_pointer_cast<IGradModelFunction>(fFunc);
473 if (!gradFun) {
474 MATH_ERROR_MSG("Fitter::DoBinnedLikelihoodFit", "wrong type of function - it does not provide gradient");
475 return false;
476 }
477 // use gradient for minimization
478 // not-extended is not impelemented in this case
479 if (!extended) {
480 MATH_WARN_MSG("Fitter::DoBinnedLikelihoodFit",
481 "Not-extended binned fit with gradient not yet supported - do an extended fit");
482 }
483 PoissonLikelihoodFCN<BaseGradFunc> logl(data, gradFun, useWeight, true, executionPolicy);
484 fFitType = logl.Type();
485 // do minimization
486 if (!DoMinimization(logl, &chi2))
487 return false;
488 if (useWeight) {
490 if (!ApplyWeightCorrection(logl))
491 return false;
492 }
493 }
494 }
495 return true;
496}
497
498bool Fitter::DoUnbinnedLikelihoodFit(bool extended, const ROOT::Fit::ExecutionPolicy &executionPolicy) {
499 // perform a likelihood fit on a set of unbinned data
500
501 std::shared_ptr<UnBinData> data = std::dynamic_pointer_cast<UnBinData>(fData);
502 assert(data);
503
504 bool useWeight = fConfig.UseWeightCorrection();
505
506 if (!fFunc && !fFunc_v) {
507 MATH_ERROR_MSG("Fitter::DoUnbinnedLikelihoodFit","model function is not set");
508 return false;
509 }
510
511 if (useWeight && fConfig.MinosErrors() ) {
512 MATH_INFO_MSG("Fitter::DoUnbinnedLikelihoodFit","MINOS errors cannot be computed in weighted likelihood fits");
513 fConfig.SetMinosErrors(false);
514 }
515
516
517 fBinFit = false;
518 fDataSize = data->Size();
519
520#ifdef DEBUG
521 int ipar = 0;
522 std::cout << "Fitter ParamSettings " << Config().ParamsSettings()[ipar].IsBound() << " lower limit " << Config().ParamsSettings()[ipar].LowerLimit() << " upper limit " << Config().ParamsSettings()[ipar].UpperLimit() << std::endl;
523#endif
524
525 // logl fit (error should be 0.5) set if different than default values (of 1)
528 }
529
530 if (!fUseGradient) {
531 // do minimization without using the gradient
532 if (fFunc_v ){
533 LogLikelihoodFCN<BaseFunc, IModelFunction_v> logl(data, fFunc_v, useWeight, extended, executionPolicy);
534 fFitType = logl.Type();
535 if (!DoMinimization (logl) ) return false;
536 if (useWeight) {
538 if (!ApplyWeightCorrection(logl) ) return false;
539 }
540 return true;
541 } else {
542 LogLikelihoodFCN<BaseFunc> logl(data, fFunc, useWeight, extended, executionPolicy);
543
544 fFitType = logl.Type();
545 if (!DoMinimization (logl) ) return false;
546 if (useWeight) {
548 if (!ApplyWeightCorrection(logl) ) return false;
549 }
550 return true;
551 }
552 } else {
553 // use gradient : check if fFunc provides gradient
554 if (fFunc_v) {
556 MATH_INFO_MSG("Fitter::DoUnbinnedLikelihoodFit", "use gradient from model function");
557 std::shared_ptr<IGradModelFunction_v> gradFun = std::dynamic_pointer_cast<IGradModelFunction_v>(fFunc_v);
558 if (gradFun) {
559 if (extended) {
560 MATH_WARN_MSG("Fitter::DoUnbinnedLikelihoodFit",
561 "Extended unbinned fit with gradient not yet supported - do a not-extended fit");
562 }
563 LogLikelihoodFCN<BaseGradFunc, IModelFunction_v> logl(data, gradFun, useWeight, extended);
564 fFitType = logl.Type();
565 if (!DoMinimization(logl))
566 return false;
567 if (useWeight) {
569 if (!ApplyWeightCorrection(logl))
570 return false;
571 }
572 return true;
573 }
574 MATH_ERROR_MSG("Fitter::DoUnbinnedLikelihoodFit", "wrong type of function - it does not provide gradient");
575
576 } else {
578 MATH_INFO_MSG("Fitter::DoUnbinnedLikelihoodFit", "use gradient from model function");
579 std::shared_ptr<IGradModelFunction> gradFun = std::dynamic_pointer_cast<IGradModelFunction>(fFunc);
580 if (gradFun) {
581 if (extended) {
582 MATH_WARN_MSG("Fitter::DoUnbinnedLikelihoodFit",
583 "Extended unbinned fit with gradient not yet supported - do a not-extended fit");
584 }
585 LogLikelihoodFCN<BaseGradFunc> logl(data, gradFun, useWeight, extended);
586 fFitType = logl.Type();
587 if (!DoMinimization(logl))
588 return false;
589 if (useWeight) {
591 if (!ApplyWeightCorrection(logl))
592 return false;
593 }
594 return true;
595 }
596 MATH_ERROR_MSG("Fitter::DoUnbinnedLikelihoodFit", "wrong type of function - it does not provide gradient");
597 }
598 }
599 return false;
600}
601
602
604
605 std::shared_ptr<BinData> data = std::dynamic_pointer_cast<BinData>(fData);
606 assert(data);
607
608 // perform a linear fit on a set of binned data
609 std::string prevminimizer = fConfig.MinimizerType();
610 fConfig.SetMinimizer("Linear");
611
612 fBinFit = true;
613
614 bool ret = DoLeastSquareFit();
615 fConfig.SetMinimizer(prevminimizer.c_str());
616 return ret;
617}
618
619
621 // compute the Hesse errors according to configuration
622 // set in the parameters and append value in fit result
623 if (fObjFunction.get() == 0) {
624 MATH_ERROR_MSG("Fitter::CalculateHessErrors","Objective function has not been set");
625 return false;
626 }
627 // assume a fResult pointer always exists
628 assert (fResult.get() );
629
630 // need a special treatment in case of weighted likelihood fit
631 // (not yet implemented)
632 if (fFitType == 2 && fConfig.UseWeightCorrection() ) {
633 MATH_ERROR_MSG("Fitter::CalculateHessErrors","Re-computation of Hesse errors not implemented for weighted likelihood fits");
634 MATH_INFO_MSG("Fitter::CalculateHessErrors","Do the Fit using configure option FitConfig::SetParabErrors()");
635 return false;
636 }
637 // if (!fUseGradient ) {
638 // ROOT::Math::FitMethodFunction * fcn = dynamic_cast< ROOT::Math::FitMethodFunction *>(fObjFunction.get());
639 // if (fcn && fcn->Type() == ROOT::Math::FitMethodFunction::kLogLikelihood) {
640 // if (!fBinFit) {
641 // ROOT::Math::LogLikelihoodFunction * nll = dynamic_cast< ROOT::Math::LogLikelihoodFunction *>(fcn);
642 // assert(nll);
643 // nll->UseSumOfWeightSquare(false);
644 // }
645 // else {
646 // ROOT::Math::PoissonLikelihoodFunction * nll = dynamic_cast< ROOT::Math::PoissonLikelihoodFunction *>(fcn);
647 // assert(nll);
648 // nll->UseSumOfWeightSquare(false);
649 // }
650 // // reset fcn in minimizer
651 // }
652
653
654 // create minimizer if not done or if name has changed
655 if ( !fMinimizer ||
656 fResult->MinimizerType().find(fConfig.MinimizerType()) == std::string::npos ) {
657 bool ret = DoInitMinimizer();
658 if (!ret) {
659 MATH_ERROR_MSG("Fitter::CalculateHessErrors","Error initializing the minimizer");
660 return false;
661 }
662 }
663
664
665 if (!fMinimizer ) {
666 MATH_ERROR_MSG("Fitter::CalculateHessErrors","Need to do a fit before calculating the errors");
667 return false;
668 }
669
670 //run Hesse
671 bool ret = fMinimizer->Hesse();
672 if (!ret) MATH_WARN_MSG("Fitter::CalculateHessErrors","Error when calculating Hessian");
673
674
675 // update minimizer results with what comes out from Hesse
676 // in case is empty - create from a FitConfig
677 if (fResult->IsEmpty() )
678 fResult = std::unique_ptr<ROOT::Fit::FitResult>(new ROOT::Fit::FitResult(fConfig) );
679
680
681 // re-give a minimizer instance in case it has been changed
682 ret |= fResult->Update(fMinimizer, ret);
683
684 // when possible get ncalls from FCN and set in fit result
686 fResult->fNCalls = GetNCallsFromFCN();
687 }
688
689 // set also new errors in FitConfig
691
692 return ret;
693}
694
695
697 // compute the Minos errors according to configuration
698 // set in the parameters and append value in fit result
699 // normally Minos errors are computed just after the minimization
700 // (in DoMinimization) when creating the FItResult if the
701 // FitConfig::MinosErrors() flag is set
702
703 if (!fMinimizer.get() ) {
704 MATH_ERROR_MSG("Fitter::CalculateMinosErrors","Minimizer does not exist - cannot calculate Minos errors");
705 return false;
706 }
707
708 if (!fResult.get() || fResult->IsEmpty() ) {
709 MATH_ERROR_MSG("Fitter::CalculateMinosErrors","Invalid Fit Result - cannot calculate Minos errors");
710 return false;
711 }
712
713 if (fFitType == 2 && fConfig.UseWeightCorrection() ) {
714 MATH_ERROR_MSG("Fitter::CalculateMinosErrors","Computation of MINOS errors not implemented for weighted likelihood fits");
715 return false;
716 }
717
718 // set flag to compute Minos error to false in FitConfig to avoid that
719 // following minimizaiton calls perform unwanted Minos error calculations
720 fConfig.SetMinosErrors(false);
721
722
723 const std::vector<unsigned int> & ipars = fConfig.MinosParams();
724 unsigned int n = (ipars.size() > 0) ? ipars.size() : fResult->Parameters().size();
725 bool ok = false;
726 for (unsigned int i = 0; i < n; ++i) {
727 double elow, eup;
728 unsigned int index = (ipars.size() > 0) ? ipars[i] : i;
729 bool ret = fMinimizer->GetMinosError(index, elow, eup);
730 if (ret) fResult->SetMinosError(index, elow, eup);
731 ok |= ret;
732 }
733 if (!ok)
734 MATH_ERROR_MSG("Fitter::CalculateMinosErrors","Minos error calculation failed for all parameters");
735
736 return ok;
737}
738
739
740
741// traits for distinhuishing fit methods functions from generic objective functions
742template<class Func>
743struct ObjFuncTrait {
744 static unsigned int NCalls(const Func & ) { return 0; }
745 static int Type(const Func & ) { return -1; }
746 static bool IsGrad() { return false; }
747};
748template<>
749struct ObjFuncTrait<ROOT::Math::FitMethodFunction> {
750 static unsigned int NCalls(const ROOT::Math::FitMethodFunction & f ) { return f.NCalls(); }
751 static int Type(const ROOT::Math::FitMethodFunction & f) { return f.Type(); }
752 static bool IsGrad() { return false; }
753};
754template<>
755struct ObjFuncTrait<ROOT::Math::FitMethodGradFunction> {
756 static unsigned int NCalls(const ROOT::Math::FitMethodGradFunction & f ) { return f.NCalls(); }
757 static int Type(const ROOT::Math::FitMethodGradFunction & f) { return f.Type(); }
758 static bool IsGrad() { return true; }
759};
760
762 //initialize minimizer by creating it
763 // and set there the objective function
764 // obj function must have been copied before
765 assert(fObjFunction.get() );
766
767 // check configuration and objective function
768 if ( fConfig.ParamsSettings().size() != fObjFunction->NDim() ) {
769 MATH_ERROR_MSG("Fitter::DoInitMinimizer","wrong function dimension or wrong size for FitConfig");
770 return false;
771 }
772
773 // create first Minimizer
774 // using an auto_Ptr will delete the previous existing one
775 fMinimizer = std::shared_ptr<ROOT::Math::Minimizer> ( fConfig.CreateMinimizer() );
776 if (fMinimizer.get() == 0) {
777 MATH_ERROR_MSG("Fitter::FitFCN","Minimizer cannot be created");
778 return false;
779 }
780
781 // in case of gradient function one needs to downcast the pointer
782 if (fUseGradient) {
783 const ROOT::Math::IMultiGradFunction * gradfcn = dynamic_cast<const ROOT::Math::IMultiGradFunction *> (fObjFunction.get() );
784 if (!gradfcn) {
785 MATH_ERROR_MSG("Fitter::DoInitMinimizer","wrong type of function - it does not provide gradient");
786 return false;
787 }
788 fMinimizer->SetFunction( *gradfcn);
789 }
790 else
791 fMinimizer->SetFunction( *fObjFunction);
792
793
794 fMinimizer->SetVariables(fConfig.ParamsSettings().begin(), fConfig.ParamsSettings().end() );
795
796 // if requested parabolic error do correct error analysis by the minimizer (call HESSE)
797 if (fConfig.ParabErrors()) fMinimizer->SetValidError(true);
798
799 return true;
800
801}
802
803
805 // perform the minimization (assume we have already initialized the minimizer)
806
807 assert(fMinimizer );
808
809 bool ret = fMinimizer->Minimize();
810
811 // unsigned int ncalls = ObjFuncTrait<ObjFunc>::NCalls(*fcn);
812 // int fitType = ObjFuncTrait<ObjFunc>::Type(objFunc);
813
814 if (!fResult) fResult = std::make_shared<FitResult>();
815 fResult->FillResult(fMinimizer,fConfig, fFunc, ret, fDataSize, fBinFit, chi2func );
816
817 // when possible get ncalls from FCN and set in fit result
819 fResult->fNCalls = GetNCallsFromFCN();
820 }
821
822 // fill information in fit result
823 fResult->fObjFunc = fObjFunction;
824 fResult->fFitData = fData;
825
826
827#ifdef DEBUG
828 std::cout << "ROOT::Fit::Fitter::DoMinimization : ncalls = " << fResult->fNCalls << " type of objfunc " << fFitFitResType << " typeid: " << typeid(*fObjFunction).name() << " use gradient " << fUseGradient << std::endl;
829#endif
830
832 fResult->NormalizeErrors();
833
834 // set also new parameter values and errors in FitConfig
836
837 return ret;
838}
839
840bool Fitter::DoMinimization(const BaseFunc & objFunc, const ROOT::Math::IMultiGenFunction * chi2func) {
841 // perform the minimization initializing the minimizer starting from a given obj function
842
843 // keep also a copy of FCN function and set this in minimizer so they will be managed together
844 // (remember that cloned copy will still depends on data and model function pointers)
845 fObjFunction = std::unique_ptr<ROOT::Math::IMultiGenFunction> ( objFunc.Clone() );
846 if (!DoInitMinimizer()) return false;
847 return DoMinimization(chi2func);
848}
849
850
852 // update the fit configuration after a fit using the obtained result
853 if (fResult->IsEmpty() || !fResult->IsValid() ) return;
854 for (unsigned int i = 0; i < fConfig.NPar(); ++i) {
856 par.SetValue( fResult->Value(i) );
857 if (fResult->Error(i) > 0) par.SetStepSize( fResult->Error(i) );
858 }
859}
860
862 // retrieve ncalls from the fit method functions
863 // this function is called when minimizer does not provide a way of returning the nnumber of function calls
864 int ncalls = 0;
865 if (!fUseGradient) {
866 const ROOT::Math::FitMethodFunction * fcn = dynamic_cast<const ROOT::Math::FitMethodFunction *>(fObjFunction.get());
867 if (fcn) ncalls = fcn->NCalls();
868 }
869 else {
871 if (fcn) ncalls = fcn->NCalls();
872 }
873 return ncalls;
874}
875
876
877bool Fitter::ApplyWeightCorrection(const ROOT::Math::IMultiGenFunction & loglw2, bool minimizeW2L) {
878 // apply correction for weight square
879 // Compute Hessian of the loglikelihood function using the sum of the weight squared
880 // This method assumes:
881 // - a fit has been done before and a covariance matrix exists
882 // - the objective function is a likelihood function and Likelihood::UseSumOfWeightSquare()
883 // has been called before
884
885 if (fMinimizer.get() == 0) {
886 MATH_ERROR_MSG("Fitter::ApplyWeightCorrection","Must perform first a fit before applying the correction");
887 return false;
888 }
889
890 unsigned int n = loglw2.NDim();
891 // correct errors for weight squared
892 std::vector<double> cov(n*n);
893 bool ret = fMinimizer->GetCovMatrix(&cov[0] );
894 if (!ret) {
895 MATH_ERROR_MSG("Fitter::ApplyWeightCorrection","Previous fit has no valid Covariance matrix");
896 return false;
897 }
898 // need to re-init the minimizer and set w2
899 fObjFunction = std::unique_ptr<ROOT::Math::IMultiGenFunction> ( loglw2.Clone() );
900 // need to re-initialize the minimizer for the changes applied in the
901 // objective functions
902 if (!DoInitMinimizer()) return false;
903
904 //std::cout << "Running Hesse ..." << std::endl;
905
906 // run eventually before a minimization
907 // ignore its error
908 if (minimizeW2L) fMinimizer->Minimize();
909 // run Hesse on the log-likelihood build using sum of weight squared
910 ret = fMinimizer->Hesse();
911 if (!ret) {
912 MATH_ERROR_MSG("Fitter::ApplyWeightCorrection","Error running Hesse on weight2 likelihood - cannot compute errors");
913 return false;
914 }
915
916 if (fMinimizer->CovMatrixStatus() != 3) {
917 MATH_WARN_MSG("Fitter::ApplyWeightCorrection","Covariance matrix for weighted likelihood is not accurate, the errors may be not reliable");
918 if (fMinimizer->CovMatrixStatus() == 2)
919 MATH_WARN_MSG("Fitter::ApplyWeightCorrection","Covariance matrix for weighted likelihood was forced to be defined positive");
920 if (fMinimizer->CovMatrixStatus() <= 0)
921 // probably should have failed before
922 MATH_ERROR_MSG("Fitter::ApplyWeightCorrection","Covariance matrix for weighted likelihood is not valid !");
923 }
924
925 // std::vector<double> c(n*n);
926 // ret = fMinimizer->GetCovMatrix(&c2[0] );
927 // if (!ret) std::cout << "Error reading cov matrix " << fMinimizer->Status() << std::endl;
928 // TMatrixDSym cmat2(n,&c2[0]);
929 // std::cout << "Cov matrix of w2 " << std::endl;
930 // cmat2.Print();
931 // cmat2.Invert();
932 // std::cout << "Hessian of w2 " << std::endl;
933 // cmat2.Print();
934
935 // get Hessian matrix from weight-square likelihood
936 std::vector<double> hes(n*n);
937 ret = fMinimizer->GetHessianMatrix(&hes[0] );
938 if (!ret) {
939 MATH_ERROR_MSG("Fitter::ApplyWeightCorrection","Error retrieving Hesse on weight2 likelihood - cannot compute errors");
940 return false;
941 }
942
943 // for debug
944 // std::cout << "Hessian W2 matrix " << std::endl;
945 // for (unsigned int i = 0; i < n; ++i) {
946 // for (unsigned int j = 0; j < n; ++j) {
947 // std::cout << std::setw(12) << hes[i*n + j] << " , ";
948 // }
949 // std::cout << std::endl;
950 // }
951
952 // perform product of matvrix cov * hes * cov
953 // since we do not want to add matrix dependence do product by hand
954 // first do hes * cov
955 std::vector<double> tmp(n*n);
956 for (unsigned int i = 0; i < n; ++i) {
957 for (unsigned int j = 0; j < n; ++j) {
958 for (unsigned int k = 0; k < n; ++k)
959 tmp[i*n+j] += hes[i*n + k] * cov[k*n + j];
960 }
961 }
962 // do multiplication now cov * tmp save result
963 std::vector<double> newCov(n*n);
964 for (unsigned int i = 0; i < n; ++i) {
965 for (unsigned int j = 0; j < n; ++j) {
966 for (unsigned int k = 0; k < n; ++k)
967 newCov[i*n+j] += cov[i*n + k] * tmp[k*n + j];
968 }
969 }
970 // update fit result with new corrected covariance matrix
971 unsigned int k = 0;
972 for (unsigned int i = 0; i < n; ++i) {
973 fResult->fErrors[i] = std::sqrt( newCov[i*(n+1)] );
974 for (unsigned int j = 0; j <= i; ++j)
975 fResult->fCovMatrix[k++] = newCov[i *n + j];
976 }
977 //fResult->PrintCovMatrix(std::cout);
978
979 return true;
980}
981
982
983
985 // return a pointer to the binned data used in the fit
986 // works only for chi2 or binned likelihood fits
987 // thus when the objective function stored is a Chi2Func or a PoissonLikelihood
988 // The funciton also set the model function correctly if it has not been set
989
992
995
996 //MATH_INFO_MSG("Fitter::ExamineFCN","Objective function is not of a known type - FitData and ModelFunction objects are not available");
997 return;
998}
999
1000 } // end namespace Fit
1001
1002} // end namespace ROOT
#define MATH_INFO_MSG(loc, str)
Pre-processor macro to report messages which can be configured to use ROOT error or simply an std::io...
Definition: Error.h:76
#define MATH_ERROR_MSG(loc, str)
Definition: Error.h:82
#define MATH_WARN_MSG(loc, str)
Definition: Error.h:79
#define f(i)
Definition: RSha256.hxx:104
double sqrt(double)
BasicFCN class: base class for the objective functions used in the fits It has a reference to the dat...
Definition: BasicFCN.h:40
Chi2FCN class for binnned fits using the least square methods.
Definition: Chi2FCN.h:49
virtual BaseObjFunction::Type_t Type() const
get type of fit method function
Definition: Chi2FCN.h:134
const std::vector< unsigned int > & MinosParams() const
return vector of parameter indeces for which the Minos Error will be computed
Definition: FitConfig.h:215
void SetParamsSettings(unsigned int npar, const double *params, const double *vstep=0)
set the parameter settings from number of parameters and a vector of values and optionally step value...
Definition: FitConfig.cxx:136
bool UpdateAfterFit() const
Update configuration after a fit using the FitResult.
Definition: FitConfig.h:208
void SetMinosErrors(bool on=true)
set Minos erros computation to be performed after fitting
Definition: FitConfig.h:226
void SetMinimizer(const char *type, const char *algo=0)
set minimizer type
Definition: FitConfig.h:180
bool NormalizeErrors() const
flag to check if resulting errors are be normalized according to chi2/ndf
Definition: FitConfig.h:199
bool ParabErrors() const
do analysis for parabolic errors
Definition: FitConfig.h:202
unsigned int NPar() const
number of parameters settings
Definition: FitConfig.h:95
bool UseWeightCorrection() const
Apply Weight correction for error matrix computation.
Definition: FitConfig.h:211
const std::vector< ROOT::Fit::ParameterSettings > & ParamsSettings() const
get the vector of parameter settings (const method)
Definition: FitConfig.h:85
ROOT::Math::Minimizer * CreateMinimizer()
create a new minimizer according to chosen configuration
Definition: FitConfig.cxx:174
void CreateParamsSettings(const ROOT::Math::IParamMultiFunctionTempl< T > &func)
set the parameter settings from a model function.
Definition: FitConfig.h:108
const std::string & MinimizerType() const
return type of minimizer package
Definition: FitConfig.h:188
const ParameterSettings & ParSettings(unsigned int i) const
get the parameter settings for the i-th parameter (const method)
Definition: FitConfig.h:75
ROOT::Math::MinimizerOptions & MinimizerOptions()
access to the minimizer control parameter (non const method)
Definition: FitConfig.h:166
bool MinosErrors() const
do minos errros analysis on the parameters
Definition: FitConfig.h:205
class containg the result of the fit and all the related information (fitted parameter values,...
Definition: FitResult.h:48
Fitter class, entry point for performing all type of fits.
Definition: Fitter.h:77
bool EvalFCN()
Perform a simple FCN evaluation.
Definition: Fitter.cxx:311
bool FitFCN()
Perform a fit with the previously set FCN function.
Definition: Fitter.cxx:296
void DoUpdateFitConfig()
Definition: Fitter.cxx:851
std::shared_ptr< ROOT::Math::Minimizer > fMinimizer
pointer to the object containing the result of the fit
Definition: Fitter.h:512
bool DoBinnedLikelihoodFit(bool extended=true, const ROOT::Fit::ExecutionPolicy &executionPolicy=ROOT::Fit::ExecutionPolicy::kSerial)
binned likelihood fit
Definition: Fitter.cxx:389
std::shared_ptr< ROOT::Fit::FitData > fData
pointer to used minimizer
Definition: Fitter.h:514
bool fUseGradient
Definition: Fitter.h:494
bool SetFCN(unsigned int npar, Function &fcn, const double *params=0, unsigned int dataSize=0, bool chi2fit=false)
Set a generic FCN function as a C++ callable object implementing double () (const double *) Note that...
Definition: Fitter.h:595
bool DoMinimization(const BaseFunc &f, const ROOT::Math::IMultiGenFunction *chifunc=0)
do minimization
Definition: Fitter.cxx:840
std::shared_ptr< ROOT::Math::IMultiGenFunction > fObjFunction
pointer to the fit data (binned or unbinned data)
Definition: Fitter.h:516
bool ApplyWeightCorrection(const ROOT::Math::IMultiGenFunction &loglw2, bool minimizeW2L=false)
apply correction in the error matrix for the weights for likelihood fits This method can be called on...
Definition: Fitter.cxx:877
bool FitFCN(const ROOT::Math::FitMethodFunction &fcn, const double *params=0)
Fit using a FitMethodFunction interface.
Definition: Fitter.cxx:259
void ExamineFCN()
look at the user provided FCN and get data and model function is they derive from ROOT::Fit FCN class...
Definition: Fitter.cxx:984
const FitConfig & Config() const
access to the fit configuration (const method)
Definition: Fitter.h:393
std::shared_ptr< IModelFunction_v > fFunc_v
Definition: Fitter.h:506
bool DoUnbinnedLikelihoodFit(bool extended=false, const ROOT::Fit::ExecutionPolicy &executionPolicy=ROOT::Fit::ExecutionPolicy::kSerial)
un-binned likelihood fit
Definition: Fitter.cxx:498
Fitter & operator=(const Fitter &rhs)
Assignment operator (disabled, class is not copyable)
Definition: Fitter.cxx:84
std::shared_ptr< ROOT::Fit::FitResult > fResult
copy of the fitted function containing on output the fit result
Definition: Fitter.h:510
bool GetDataFromFCN()
internal functions to get data set and model function from FCN useful for fits done with customized F...
Definition: Fitter.h:524
bool CalculateMinosErrors()
perform an error analysis on the result using MINOS To be called only after fitting and when a minimi...
Definition: Fitter.cxx:696
~Fitter()
Destructor.
Definition: Fitter.cxx:70
void SetFunction(const IModelFunction &func, bool useGradient=false)
Set the fitted function (model function) from a parametric function interface.
Definition: Fitter.cxx:103
bool CalculateHessErrors()
perform an error analysis on the result using the Hessian Errors are obtaied from the inverse of the ...
Definition: Fitter.cxx:620
FitConfig fConfig
Definition: Fitter.h:504
Fitter()
Default constructor.
Definition: Fitter.cxx:51
std::shared_ptr< IModelFunction > fFunc
copy of the fitted function containing on output the fit result
Definition: Fitter.h:508
bool DoLinearFit()
linear least square fit
Definition: Fitter.cxx:603
bool DoInitMinimizer()
Definition: Fitter.cxx:761
int GetNCallsFromFCN()
Definition: Fitter.cxx:861
bool DoLeastSquareFit(const ROOT::Fit::ExecutionPolicy &executionPolicy=ROOT::Fit::ExecutionPolicy::kSerial)
least square fit
Definition: Fitter.cxx:330
LogLikelihoodFCN class for likelihood fits.
virtual BaseObjFunction::Type_t Type() const
get type of fit method function
void UseSumOfWeightSquare(bool on=true)
Class, describing value, limits and step size of the parameters Provides functionality also to set/re...
void SetValue(double val)
set the value
void SetStepSize(double err)
set the step size
class evaluating the log likelihood for binned Poisson likelihood fits it is template to distinguish ...
virtual BaseObjFunction::Type_t Type() const
get type of fit method function
FitMethodFunction class Interface for objective functions (like chi2 and likelihood used in the fit) ...
virtual Type_t Type() const
return the type of method, override if needed
virtual unsigned int NPoints() const
return the number of data points used in evaluating the function
virtual unsigned int NCalls() const
return the total number of function calls (overrided if needed)
Documentation for the abstract class IBaseFunctionMultiDim.
Definition: IFunction.h:62
virtual IBaseFunctionMultiDimTempl< T > * Clone() const =0
Clone a function.
virtual unsigned int NDim() const =0
Retrieve the dimension of the function.
Interface (abstract class) for multi-dimensional functions providing a gradient calculation.
Definition: IFunction.h:327
Specialized IParamFunction interface (abstract class) for one-dimensional parametric functions It is ...
Interface (abstract class) for parametric gradient multi-dimensional functions providing in addition ...
Interface (abstract class) for parametric one-dimensional gradient functions providing in addition to...
double ErrorDef() const
error definition
int PrintLevel() const
non-static methods for retrieving options
void SetErrorDef(double err)
set error def
MultiDimParamFunctionAdapter class to wrap a one-dimensional parametric function in a multi dimension...
MultiDimParamGradFunctionAdapter class to wrap a one-dimensional parametric gradient function in a mu...
Type
enumeration specifying the integration types.
const Int_t n
Definition: legend1.C:16
TFitResultPtr Fit(FitObject *h1, TF1 *f1, Foption_t &option, const ROOT::Math::MinimizerOptions &moption, const char *goption, ROOT::Fit::DataRange &range)
Definition: HFitImpl.cxx:134
double gDefaultErrorDef
Definition: Fitter.cxx:48
BasicFitMethodFunction< ROOT::Math::IMultiGenFunction > FitMethodFunction
Definition: Fitter.h:40
BasicFitMethodFunction< ROOT::Math::IMultiGradFunction > FitMethodGradFunction
Definition: Fitter.h:44
Namespace for new ROOT classes and functions.
Definition: StringConv.hxx:21