70 MATH_WARN_MSG(
"Fitter::SetFunction",
"Requested function does not provide gradient - use it as non-gradient function ");
96 MATH_WARN_MSG(
"Fitter::SetFunction",
"Requested function does not provide gradient - use it as non-gradient function ");
142 unsigned int npar =
fcn.NDim();
144 MATH_ERROR_MSG(
"Fitter::SetFCN",
"FCN function has zero parameters ");
250 MATH_ERROR_MSG(
"Fitter::FitFCN",
"Fit Parameter settings have not been created ");
275 MATH_ERROR_MSG(
"Fitter::FitFCN",
"Objective function has not been set");
293 MATH_ERROR_MSG(
"Fitter::FitFCN",
"Objective function has not been set");
310 std::shared_ptr<BinData>
data = std::dynamic_pointer_cast<BinData>(
fData);
315 MATH_ERROR_MSG(
"Fitter::DoLeastSquareFit",
"model function is not set");
320 std::cout <<
"Fitter ParamSettings " <<
Config().ParamsSettings()[3].IsBound() <<
" lower limit "
321 <<
Config().ParamsSettings()[3].LowerLimit() <<
" upper limit "
322 <<
Config().ParamsSettings()[3].UpperLimit() << std::endl;
338 MATH_INFO_MSG(
"Fitter::DoLeastSquareFit",
"use gradient from model function");
341 std::shared_ptr<IGradModelFunction_v>
gradFun = std::dynamic_pointer_cast<IGradModelFunction_v>(
fFunc_v);
346 std::shared_ptr<IGradModelFunction>
gradFun = std::dynamic_pointer_cast<IGradModelFunction>(
fFunc);
351 MATH_ERROR_MSG(
"Fitter::DoLeastSquareFit",
"wrong type of function - it does not provide gradient");
362 std::shared_ptr<BinData>
data = std::dynamic_pointer_cast<BinData>(
fData);
369 MATH_ERROR_MSG(
"Fitter::DoBinnedLikelihoodFit",
"model function is not set");
379 MATH_INFO_MSG(
"Fitter::DoBinnedLikelihoodFit",
"MINOS errors cannot be computed in weighted likelihood fits");
401 MATH_INFO_MSG(
"Fitter::DoLikelihoodFit",
"use gradient from model function");
405 "Not-extended binned fit with gradient not yet supported - do an extended fit");
411 std::shared_ptr<IGradModelFunction_v>
gradFun = std::dynamic_pointer_cast<IGradModelFunction_v>(
fFunc_v);
413 MATH_ERROR_MSG(
"Fitter::DoBinnedLikelihoodFit",
"wrong type of function - it does not provide gradient");
423 std::shared_ptr<IGradModelFunction>
gradFun = std::dynamic_pointer_cast<IGradModelFunction>(
fFunc);
425 MATH_ERROR_MSG(
"Fitter::DoBinnedLikelihoodFit",
"wrong type of function - it does not provide gradient");
440 std::shared_ptr<UnBinData>
data = std::dynamic_pointer_cast<UnBinData>(
fData);
446 MATH_ERROR_MSG(
"Fitter::DoUnbinnedLikelihoodFit",
"model function is not set");
451 MATH_INFO_MSG(
"Fitter::DoUnbinnedLikelihoodFit",
"MINOS errors cannot be computed in weighted likelihood fits");
461 std::cout <<
"Fitter ParamSettings " <<
Config().ParamsSettings()[ipar].IsBound() <<
" lower limit " <<
Config().ParamsSettings()[ipar].LowerLimit() <<
" upper limit " <<
Config().ParamsSettings()[ipar].UpperLimit() << std::endl;
482 MATH_INFO_MSG(
"Fitter::DoUnbinnedLikelihoodFit",
"use gradient from model function");
485 "Extended unbinned fit with gradient not yet supported - do a not-extended fit");
489 std::shared_ptr<IGradModelFunction_v>
gradFun = std::dynamic_pointer_cast<IGradModelFunction_v>(
fFunc_v);
491 MATH_ERROR_MSG(
"Fitter::DoUnbinnedLikelihoodFit",
"wrong type of function - it does not provide gradient");
497 std::shared_ptr<IGradModelFunction>
gradFun = std::dynamic_pointer_cast<IGradModelFunction>(
fFunc);
499 MATH_ERROR_MSG(
"Fitter::DoUnbinnedLikelihoodFit",
"wrong type of function - it does not provide gradient");
512 std::shared_ptr<BinData>
data = std::dynamic_pointer_cast<BinData>(
fData);
531 MATH_ERROR_MSG(
"Fitter::CalculateHessErrors",
"Objective function has not been set");
538 MATH_ERROR_MSG(
"Fitter::CalculateHessErrors",
"Re-computation of Hesse errors not implemented for weighted likelihood fits");
539 MATH_INFO_MSG(
"Fitter::CalculateHessErrors",
"Do the Fit using configure option FitConfig::SetParabErrors()");
545 MATH_ERROR_MSG(
"Fitter::CalculateHessErrors",
"FitResult has not been created");
551 MATH_ERROR_MSG(
"Fitter::CalculateHessErrors",
"Error re-initializing the minimizer");
557 MATH_ERROR_MSG(
"Fitter::CalculateHessErrors",
"Need to do a fit before calculating the errors");
564 if (!
ret)
MATH_WARN_MSG(
"Fitter::CalculateHessErrors",
"Error when calculating Hessian");
598 MATH_ERROR_MSG(
"Fitter::CalculateMinosErrors",
"Minimizer does not exist - cannot calculate Minos errors");
603 MATH_ERROR_MSG(
"Fitter::CalculateMinosErrors",
"Invalid Fit Result - cannot calculate Minos errors");
608 MATH_ERROR_MSG(
"Fitter::CalculateMinosErrors",
"Computation of MINOS errors not implemented for weighted likelihood fits");
614 MATH_ERROR_MSG(
"Fitter::CalculateHessErrors",
"Error re-initializing the minimizer");
633 MATH_INFO_MSG(
"Fitter::CalculateMinosErrors",
"Run again Minos for some parameters because a new Minimum has been found");
635 for (
int i = 0; i <
iparMax; ++i) {
653 MATH_ERROR_MSG(
"Fitter::CalculateMinosErrors",
"Minos error calculation failed for all the selected parameters");
672 static unsigned int NCalls(
const Func & ) {
return 0; }
673 static int Type(
const Func & ) {
return -1; }
695 MATH_ERROR_MSG(
"Fitter::DoInitMinimizer",
"Objective function has not been set");
701 MATH_ERROR_MSG(
"Fitter::DoInitMinimizer",
"wrong function dimension or wrong size for FitConfig");
709 MATH_ERROR_MSG(
"Fitter::DoInitMinimizer",
"Minimizer cannot be created");
717 MATH_ERROR_MSG(
"Fitter::DoInitMinimizer",
"wrong type of function - it does not provide gradient");
722 if (
Config().MinimizerType() ==
"Minuit2") {
726 auto hessFcn = [=](std::span<const double>
x,
double *
hess) {
727 unsigned int ndim =
x.size();
728 unsigned int nh = ndim * (ndim + 1) / 2;
729 std::vector<double>
h(
nh);
731 if (!
ret)
return false;
732 for (
unsigned int i = 0; i < ndim; i++) {
733 for (
unsigned int j = 0;
j <= i;
j++) {
734 unsigned int index =
j + i * (i + 1) / 2;
776 std::string
msg =
"Cannot change minimizer. Continue using " +
fResult->MinimizerType();
821 std::cout <<
"ROOT::Fit::Fitter::DoMinimization : ncalls = " <<
fResult->fNCalls <<
" type of objfunc " <<
fFitFitResType <<
" typeid: " <<
typeid(*fObjFunction).name() <<
" use gradient " <<
fUseGradient << std::endl;
832template<
class ObjFunc_t>
841template<
class ObjFunc_t>
860 for (
unsigned int i = 0; i <
fConfig.
NPar(); ++i) {
892 MATH_ERROR_MSG(
"Fitter::ApplyWeightCorrection",
"Must perform first a fit before applying the correction");
896 unsigned int n =
loglw2.NDim();
898 std::vector<double>
cov(
n*
n);
901 MATH_ERROR_MSG(
"Fitter::ApplyWeightCorrection",
"Previous fit has no valid Covariance matrix");
905 std::shared_ptr<ROOT::Math::IMultiGenFunction>
objFunc(
loglw2.Clone());
920 MATH_ERROR_MSG(
"Fitter::ApplyWeightCorrection",
"Error running Hesse on weight2 likelihood - cannot compute errors");
925 MATH_WARN_MSG(
"Fitter::ApplyWeightCorrection",
"Covariance matrix for weighted likelihood is not accurate, the errors may be not reliable");
927 MATH_WARN_MSG(
"Fitter::ApplyWeightCorrection",
"Covariance matrix for weighted likelihood was forced to be defined positive");
930 MATH_ERROR_MSG(
"Fitter::ApplyWeightCorrection",
"Covariance matrix for weighted likelihood is not valid !");
934 std::vector<double>
hes(
n*
n);
937 MATH_ERROR_MSG(
"Fitter::ApplyWeightCorrection",
"Error retrieving Hesse on weight2 likelihood - cannot compute errors");
945 std::vector<double> tmp(
n*
n);
946 for (
unsigned int i = 0; i <
n; ++i) {
947 for (
unsigned int j = 0;
j <
n; ++
j) {
948 for (
unsigned int k = 0; k <
n; ++k)
954 for (
unsigned int i = 0; i <
n; ++i) {
955 for (
unsigned int j = 0;
j <
n; ++
j) {
956 for (
unsigned int k = 0; k <
n; ++k)
962 for (
unsigned int i = 0; i <
n; ++i) {
964 for (
unsigned int j = 0;
j <= i; ++
j)
#define MATH_INFO_MSG(loc, str)
Pre-processor macro to report messages which can be configured to use ROOT error or simply an std::io...
#define MATH_ERROR_MSG(loc, str)
#define MATH_WARN_MSG(loc, str)
ROOT::Detail::TRangeCast< T, true > TRangeDynCast
TRangeDynCast is an adapter class that allows the typed iteration through a TCollection.
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void data
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void char Point_t Rectangle_t WindowAttributes_t Float_t Float_t Float_t Int_t Int_t UInt_t UInt_t Rectangle_t result
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void char Point_t Rectangle_t WindowAttributes_t index
const std::vector< unsigned int > & MinosParams() const
return vector of parameter indices for which the Minos Error will be computed
bool UpdateAfterFit() const
Update configuration after a fit using the FitResult.
void SetMinimizer(const char *type, const char *algo=nullptr)
set minimizer type and algorithm
void SetMinosErrors(bool on=true)
set Minos errors computation to be performed after fitting
bool NormalizeErrors() const
flag to check if resulting errors are be normalized according to chi2/ndf
bool ParabErrors() const
do analysis for parabolic errors
unsigned int NPar() const
number of parameters settings
void SetParamsSettings(unsigned int npar, const double *params, const double *vstep=nullptr)
set the parameter settings from number of parameters and a vector of values and optionally step value...
std::string MinimizerName() const
return Minimizer full name (type / algorithm)
bool UseWeightCorrection() const
Apply Weight correction for error matrix computation.
const std::vector< ROOT::Fit::ParameterSettings > & ParamsSettings() const
get the vector of parameter settings (const method)
ROOT::Math::Minimizer * CreateMinimizer()
create a new minimizer according to chosen configuration
void CreateParamsSettings(const ROOT::Math::IParamMultiFunctionTempl< T > &func)
set the parameter settings from a model function.
const std::string & MinimizerType() const
return type of minimizer package
const ParameterSettings & ParSettings(unsigned int i) const
get the parameter settings for the i-th parameter (const method)
ROOT::Math::MinimizerOptions & MinimizerOptions()
access to the minimizer control parameter (non const method)
bool MinosErrors() const
do minos errors analysis on the parameters
bool EvalFCN()
Perform a simple FCN evaluation.
const ROOT::Math::IMultiGenFunction * fExtObjFunction
! pointer to an external FCN
bool FitFCN()
Perform a fit with the previously set FCN function.
bool DoMinimization(std::unique_ptr< ObjFunc_t > f, const ROOT::Math::IMultiGenFunction *chifunc=nullptr)
do minimization
bool DoSetFCN(bool useExtFCN, const ROOT::Math::IMultiGenFunction &fcn, const double *params, unsigned int dataSize, int fitType)
Set Objective function.
int fDataSize
size of data sets (need for Fumili or LM fitters)
bool DoUnbinnedLikelihoodFit(bool extended=false, const ROOT::EExecutionPolicy &executionPolicy=ROOT::EExecutionPolicy::kSequential)
un-binned likelihood fit
const ROOT::Math::IBaseFunctionMultiDimTempl< double > * ObjFunction() const
Return pointer to the used objective function for fitting.
std::shared_ptr< ROOT::Math::Minimizer > fMinimizer
! pointer to used minimizer
bool DoWeightMinimization(std::unique_ptr< ObjFunc_t > f, const ROOT::Math::IMultiGenFunction *chifunc=nullptr)
bool DoBinnedLikelihoodFit(bool extended=true, const ROOT::EExecutionPolicy &executionPolicy=ROOT::EExecutionPolicy::kSequential)
binned likelihood fit
int fFitType
type of fit (0 undefined, 1 least square, 2 likelihood, 3 binned likelihood)
std::shared_ptr< ROOT::Fit::FitData > fData
! pointer to the fit data (binned or unbinned data)
bool fUseGradient
flag to indicate if using gradient or not
bool fBinFit
flag to indicate if fit is binned in case of false the fit is unbinned or undefined) flag it is used ...
std::shared_ptr< ROOT::Math::IMultiGenFunction > fObjFunction
! pointer to used objective function
bool ApplyWeightCorrection(const ROOT::Math::IMultiGenFunction &loglw2, bool minimizeW2L=false)
apply correction in the error matrix for the weights for likelihood fits This method can be called on...
const FitConfig & Config() const
access to the fit configuration (const method)
bool DoLeastSquareFit(const ROOT::EExecutionPolicy &executionPolicy=ROOT::EExecutionPolicy::kSequential)
least square fit
bool SetFCN(unsigned int npar, Function &fcn, const double *params=nullptr, unsigned int dataSize=0, int fitType=0)
Set a generic FCN function as a C++ callable object implementing double () (const double *) Note that...
std::shared_ptr< IModelFunction_v > fFunc_v
! copy of the fitted function containing on output the fit result
std::shared_ptr< ROOT::Fit::FitResult > fResult
! pointer to the object containing the result of the fit
bool CalculateMinosErrors()
perform an error analysis on the result using MINOS To be called only after fitting and when a minimi...
bool DoUpdateMinimizerOptions(bool canDifferentMinim=true)
void SetFunction(const IModelFunction &func, bool useGradient=false)
Set the fitted function (model function) from a parametric function interface.
bool CalculateHessErrors()
perform an error analysis on the result using the Hessian Errors are obtained from the inverse of the...
FitConfig fConfig
fitter configuration (options and parameter settings)
Fitter()
Default constructor.
std::shared_ptr< IModelFunction > fFunc
! copy of the fitted function containing on output the fit result
bool DoLinearFit()
linear least square fit
Class, describing value, limits and step size of the parameters Provides functionality also to set/re...
void SetValue(double val)
set the value
void SetStepSize(double err)
set the step size
FitMethodFunction class Interface for objective functions (like chi2 and likelihood used in the fit) ...
Documentation for the abstract class IBaseFunctionMultiDim.
virtual IBaseFunctionMultiDimTempl< T > * Clone() const =0
Clone a function.
Interface (abstract class) for multi-dimensional functions providing a gradient calculation.
Specialized IParamFunction interface (abstract class) for one-dimensional parametric functions It is ...
Interface (abstract class) for parametric gradient multi-dimensional functions providing in addition ...
Interface (abstract class) for parametric one-dimensional gradient functions providing in addition to...
double ErrorDef() const
error definition
int PrintLevel() const
non-static methods for retrieving options
void SetErrorDef(double err)
set error def
static double DefaultErrorDef()
MultiDimParamFunctionAdapter class to wrap a one-dimensional parametric function in a multi dimension...
MultiDimParamGradFunctionAdapter class to wrap a one-dimensional parametric gradient function in a mu...
const_iterator begin() const
const_iterator end() const
TFitResultPtr Fit(FitObject *h1, TF1 *f1, Foption_t &option, const ROOT::Math::MinimizerOptions &moption, const char *goption, ROOT::Fit::DataRange &range)
Namespace for new ROOT classes and functions.
static int Type(const ROOT::Math::FitMethodFunction &f)
static unsigned int NCalls(const ROOT::Math::FitMethodFunction &f)
static int Type(const ROOT::Math::FitMethodGradFunction &f)
static unsigned int NCalls(const ROOT::Math::FitMethodGradFunction &f)
static unsigned int NCalls(const Func &)
static int Type(const Func &)