53 template<
class GradFunc = IGradModelFunction>
54 struct ParamDerivFunc {
55 ParamDerivFunc(
const GradFunc &
f) : fFunc(
f), fIpar(0) {}
56 void SetDerivComponent(
unsigned int ipar) { fIpar = ipar; }
57 double operator() (
const double *
x,
const double *p)
const {
58 return fFunc.ParameterDerivative(
x, p, fIpar );
60 unsigned int NDim()
const {
return fFunc.NDim(); }
61 const GradFunc & fFunc;
67 class SimpleGradientCalculator {
77 SimpleGradientCalculator(
int gdim,
const IModelFunction & func,
double eps = 2.E-8,
int istrat = 1) :
83 fVec(std::vector<double>(gdim) )
88 double DoParameterDerivative(
const double *
x,
const double *p,
double f0,
int k)
const {
90 double h = std::max( fEps* std::abs(p0), 8.0*fPrecision*(std::abs(p0) + fPrecision) );
95 double f1 = fFunc(
x, &fVec.front() );
98 double f2 = fFunc(
x, &fVec.front() );
99 deriv = 0.5 * ( f2 -
f1 )/
h;
102 deriv = (
f1 - f0 )/
h;
108 unsigned int NDim()
const {
112 unsigned int NPar()
const {
116 double ParameterDerivative(
const double *
x,
const double *p,
int ipar)
const {
118 std::copy(p, p+fN, fVec.begin());
119 double f0 = fFunc(
x, p);
120 return DoParameterDerivative(
x,p,f0,ipar);
124 void ParameterGradient(
const double *
x,
const double * p,
double f0,
double *
g) {
126 std::copy(p, p+fN, fVec.begin());
127 for (
unsigned int k = 0; k < fN; ++k) {
128 g[k] = DoParameterDerivative(
x,p,f0,k);
133 void Gradient(
const double *
x,
const double * p,
double f0,
double *
g) {
135 std::copy(
x,
x+fN, fVec.begin());
136 for (
unsigned int k = 0; k < fN; ++k) {
138 double h = std::max( fEps* std::abs(x0), 8.0*fPrecision*(std::abs(x0) + fPrecision) );
142 double f1 = fFunc( &fVec.front(), p );
145 double f2 = fFunc( &fVec.front(), p );
146 g[k] = 0.5 * ( f2 -
f1 )/
h;
149 g[k] = (
f1 - f0 )/
h;
162 mutable std::vector<double> fVec;
169 if (rval > - std::numeric_limits<double>::max() && rval < std::numeric_limits<double>::max() )
173 return -std::numeric_limits<double>::max();
176 return + std::numeric_limits<double>::max();
183 if (rval > - std::numeric_limits<double>::max() && rval < std::numeric_limits<double>::max() )
187 rval = -std::numeric_limits<double>::max();
192 rval = + std::numeric_limits<double>::max();
201 template <
class GFunc>
203 const double *
x1,
const double *
x2,
const double * p,
double *
g) {
206 ParamDerivFunc<GFunc> pfunc( gfunc);
209 unsigned int npar = gfunc.NPar();
210 for (
unsigned int k = 0; k < npar; ++k ) {
211 pfunc.SetDerivComponent(k);
212 g[k] = igDerEval(
x1,
x2 );
234 unsigned int n = data.
Size();
252 std::cout <<
"\n\nFit data size = " <<
n << std::endl;
253 std::cout <<
"evaluate chi2 using function " << &func <<
" " << p << std::endl;
254 std::cout <<
"use empty bins " << fitOpt.
fUseEmpty << std::endl;
255 std::cout <<
"use integral " << fitOpt.
fIntegral << std::endl;
256 std::cout <<
"use all error=1 " << fitOpt.
fErrors1 << std::endl;
257 if (isWeighted) std::cout <<
"Weighted data set - sumw = " << data.
SumOfContent() <<
" sumw2 = " << data.
SumOfError2() << std::endl;
265 double maxResValue = std::numeric_limits<double>::max() /
n;
266 double wrefVolume = 1.0;
273 auto mapFunction = [&](
const unsigned i){
279 const auto y = data.
Value(i);
284 const double *
x =
nullptr;
285 std::vector<double> xc;
286 double binVolume = 1.0;
288 unsigned int ndim = data.
NDim();
290 xc.resize(data.
NDim());
291 for (
unsigned int j = 0; j < ndim; ++j) {
293 binVolume *= std::abs(
x2[j]- xx);
294 xc[j] = 0.5*(
x2[j]+ xx);
298 binVolume *= wrefVolume;
299 }
else if(data.
NDim() > 1) {
300 xc.resize(data.
NDim());
302 for (
unsigned int j = 1; j < data.
NDim(); ++j)
310 if (!useBinIntegral) {
314 fval = func (
x, p );
323 if (useBinVolume) fval *= binVolume;
327 double invWeight = 1.0;
340 double invError2 = (fval > 0) ? invWeight / fval : 0.0;
347 std::cout <<
x[0] <<
" " <<
y <<
" " << 1./invError <<
" params : ";
348 for (
unsigned int ipar = 0; ipar < func.
NPar(); ++ipar)
349 std::cout << p[ipar] <<
"\t";
350 std::cout <<
"\tfval = " << fval <<
" bin volume " << binVolume <<
" ref " << wrefVolume << std::endl;
356 double tmp = (
y -fval )* invError;
357 double resval = tmp * tmp;
361 if ( resval < maxResValue )
372 auto redFunction = [](
const std::vector<double> & objs){
373 return std::accumulate(objs.begin(), objs.end(),
double{});
380 Warning(
"FitUtil::EvaluateChi2",
"Multithread execution policy requires IMT, which is disabled. Changing "
381 "to ROOT::Fit::ExecutionPolicy::kSerial.");
388 for (
unsigned int i=0; i<
n; ++i) {
389 res += mapFunction(i);
401 Error(
"FitUtil::EvaluateChi2",
"Execution policy unknown. Avalaible choices:\n ROOT::Fit::ExecutionPolicy::kSerial (default)\n ROOT::Fit::ExecutionPolicy::kMultithread (requires IMT)\n");
416 unsigned int n = data.
Size();
419 std::cout <<
"\n\nFit data size = " <<
n << std::endl;
420 std::cout <<
"evaluate effective chi2 using function " << &func <<
" " << p << std::endl;
431 unsigned int ndim = func.
NDim();
436 double maxResValue = std::numeric_limits<double>::max() /
n;
440 for (
unsigned int i = 0; i <
n; ++ i) {
446 double fval = func(
x, p );
448 double delta_y_func =
y - fval;
452 const double *
ex = 0;
456 double eylow, eyhigh = 0;
458 if ( delta_y_func < 0)
466 while ( j < ndim &&
ex[j] == 0.) { j++; }
473 double kPrecision = 1.E-8;
474 for (
unsigned int icoord = 0; icoord < ndim; ++icoord) {
476 if (
ex[icoord] > 0) {
480 double x0=
x[icoord];
481 double h = std::max( kEps* std::abs(
ex[icoord]), 8.0*kPrecision*(std::abs(x0) + kPrecision) );
483 double edx =
ex[icoord] * deriv;
486 std::cout <<
"error for coord " << icoord <<
" = " <<
ex[icoord] <<
" deriv " << deriv << std::endl;
491 double w2 = (e2 > 0) ? 1.0/e2 : 0;
492 double resval = w2 * (
y - fval ) * (
y - fval);
495 std::cout <<
x[0] <<
" " <<
y <<
" ex " <<
ex[0] <<
" ey " <<
ey <<
" params : ";
496 for (
unsigned int ipar = 0; ipar < func.
NPar(); ++ipar)
497 std::cout << p[ipar] <<
"\t";
498 std::cout <<
"\tfval = " << fval <<
"\tresval = " << resval << std::endl;
503 if ( resval < maxResValue )
516 std::cout <<
"chi2 = " << chi2 <<
" n = " << nPoints << std::endl;
533 MATH_ERROR_MSG(
"FitUtil::EvaluateChi2Residual",
"Error on the coordinates are not used in calculating Chi2 residual");
540 double y, invError = 0;
551 unsigned int ndim = data.
NDim();
552 double binVolume = 1.0;
553 const double *
x2 = 0;
554 if (useBinVolume || useBinIntegral)
x2 = data.
BinUpEdge(i);
559 xc =
new double[ndim];
560 for (
unsigned int j = 0; j < ndim; ++j) {
561 binVolume *= std::abs(
x2[j]-
x1[j] );
562 xc[j] = 0.5*(
x2[j]+
x1[j]);
568 const double *
x = (useBinVolume) ? xc :
x1;
570 if (!useBinIntegral) {
571 fval = func (
x, p );
576 fval = igEval(
x1,
x2) ;
579 if (useBinVolume) fval *= binVolume;
590 double invError2 = (fval > 0) ? 1.0 / fval : 0.0;
595 double resval = (
y -fval )* invError;
603 unsigned int npar = func.
NPar();
608 if (!useBinIntegral ) {
617 SimpleGradientCalculator gc( npar, func);
618 if (!useBinIntegral )
619 gc.ParameterGradient(
x, p, fval,
g);
626 for (
unsigned int k = 0; k < npar; ++k) {
628 if (useBinVolume)
g[k] *= binVolume;
632 if (useBinVolume)
delete [] xc;
649 "Error on the coordinates are not used in calculating Chi2 gradient");
654 assert(fg !=
nullptr);
659 std::cout <<
"\n\nFit data size = " <<
n << std::endl;
660 std::cout <<
"evaluate chi2 using function gradient " << &func <<
" " << p << std::endl;
667 double wrefVolume = 1.0;
674 unsigned int npar = func.
NPar();
675 unsigned initialNPoints = data.
Size();
677 std::vector<bool> isPointRejected(initialNPoints);
679 auto mapFunction = [&](
const unsigned int i) {
681 std::vector<double> gradFunc(npar);
682 std::vector<double> pointContribution(npar);
685 const auto y = data.
Value(i);
686 auto invError = data.
Error(i);
688 invError = (invError != 0.0) ? 1.0 / invError : 1;
692 const double *
x =
nullptr;
693 std::vector<double> xc;
695 unsigned int ndim = data.
NDim();
696 double binVolume = 1;
701 for (
unsigned int j = 0; j < ndim; ++j) {
703 binVolume *= std::abs(
x2[j] - x1_j);
704 xc[j] = 0.5 * (
x2[j] + x1_j);
710 binVolume *= wrefVolume;
711 }
else if (ndim > 1) {
714 for (
unsigned int j = 1; j < ndim; ++j)
721 if (!useBinIntegral) {
728 fval = igEval(
x,
x2);
735 std::cout <<
x[0] <<
" " <<
y <<
" " << 1. / invError <<
" params : ";
736 for (
unsigned int ipar = 0; ipar < npar; ++ipar)
737 std::cout << p[ipar] <<
"\t";
738 std::cout <<
"\tfval = " << fval << std::endl;
741 isPointRejected[i] =
true;
743 return pointContribution;
747 unsigned int ipar = 0;
748 for (; ipar < npar; ++ipar) {
752 gradFunc[ipar] *= binVolume;
756 double dfval = gradFunc[ipar];
762 pointContribution[ipar] = -2.0 * (
y - fval) * invError * invError * gradFunc[ipar];
767 isPointRejected[i] =
true;
770 return pointContribution;
774 auto redFunction = [&](
const std::vector<std::vector<double>> &pointContributions) {
775 std::vector<double> result(npar);
777 for (
auto const &pointContribution : pointContributions) {
778 for (
unsigned int parameterIndex = 0; parameterIndex < npar; parameterIndex++)
779 result[parameterIndex] += pointContribution[parameterIndex];
785 std::vector<double>
g(npar);
790 Warning(
"FitUtil::EvaluateChi2Gradient",
"Multithread execution policy requires IMT, which is disabled. Changing "
791 "to ROOT::Fit::ExecutionPolicy::kSerial.");
797 std::vector<std::vector<double>> allGradients(initialNPoints);
798 for (
unsigned int i = 0; i < initialNPoints; ++i) {
799 allGradients[i] = mapFunction(i);
801 g = redFunction(allGradients);
815 Error(
"FitUtil::EvaluateChi2Gradient",
816 "Execution policy unknown. Avalaible choices:\n 0: Serial (default)\n 1: MultiThread (requires IMT)\n");
825 nPoints = initialNPoints;
827 if (std::any_of(isPointRejected.begin(), isPointRejected.end(), [](
bool point) { return point; })) {
828 unsigned nRejected = std::accumulate(isPointRejected.begin(), isPointRejected.end(), 0);
829 assert(nRejected <= initialNPoints);
830 nPoints = initialNPoints - nRejected;
834 "Error - too many points rejected for overflow in gradient calculation");
838 std::copy(
g.begin(),
g.end(), grad);
858 const double *
x = data.
Coords(i);
859 double fval = func (
x, p );
862 if (
g == 0)
return logPdf;
874 SimpleGradientCalculator gc(func.
NPar(), func);
875 gc.ParameterGradient(
x, p, fval,
g );
878 for (
unsigned int ipar = 0; ipar < func.
NPar(); ++ipar) {
883 std::cout <<
x[i] <<
"\t";
884 std::cout <<
"\tpar = [ " << func.
NPar() <<
" ] = ";
885 for (
unsigned int ipar = 0; ipar < func.
NPar(); ++ipar)
886 std::cout << p[ipar] <<
"\t";
887 std::cout <<
"\tfval = " << fval;
888 std::cout <<
"\tgrad = [ ";
889 for (
unsigned int ipar = 0; ipar < func.
NPar(); ++ipar)
890 std::cout <<
g[ipar] <<
"\t";
891 std::cout <<
" ] " << std::endl;
899 int iWeight,
bool extended,
unsigned int &nPoints,
904 unsigned int n = data.
Size();
908 bool normalizeFunc =
false;
912 (
const_cast<IModelFunctionTempl<double> &
>(func)).SetParameters(p);
915 nPoints = data.
Size();
920 if (!normalizeFunc) {
921 if (data.
NDim() == 1) {
926 std::vector<double>
x(data.
NDim());
927 for (
unsigned int j = 0; j < data.
NDim(); ++j)
937 std::vector<double>
xmin(data.
NDim());
938 std::vector<double>
xmax(data.
NDim());
939 IntegralEvaluator<> igEval(func, p,
true);
943 for (
unsigned int ir = 0; ir < data.
Range().
Size(); ++ir) {
945 norm += igEval.Integral(
xmin.data(),
xmax.data());
951 if (func(
xmin.data(), p) != 0 || func(
xmax.data(), p) != 0) {
953 "A range has not been set and the function is not zero at +/- inf");
956 norm = igEval.Integral(&
xmin[0], &
xmax[0]);
962 auto mapFunction = [&](
const unsigned i) {
967 if (data.
NDim() > 1) {
968 std::vector<double>
x(data.
NDim());
969 for (
unsigned int j = 0; j < data.
NDim(); ++j)
972 fval = func(
x.data());
974 fval = func(
x.data(), p);
988 fval = fval * (1 / norm);
993 double weight = data.
Weight(i);
1000 W2 = weight * weight;
1004 return LikelihoodAux<double>(logval, W, W2);
1015 auto redFunction = [](
const std::vector<LikelihoodAux<double>> & objs){
1016 auto l0 = LikelihoodAux<double>(0.0,0.0,0.0);
1017 for (
auto &
l : objs ) {
1027 Warning(
"FitUtil::EvaluateLogL",
"Multithread execution policy requires IMT, which is disabled. Changing "
1028 "to ROOT::Fit::ExecutionPolicy::kSerial.");
1037 for (
unsigned int i=0; i<
n; ++i) {
1038 auto resArray = mapFunction(i);
1039 logl+=resArray.logvalue;
1040 sumW+=resArray.weight;
1041 sumW2+=resArray.weight2;
1048 logl=resArray.logvalue;
1049 sumW=resArray.weight;
1050 sumW2=resArray.weight2;
1056 Error(
"FitUtil::EvaluateLogL",
"Execution policy unknown. Avalaible choices:\n ROOT::Fit::ExecutionPolicy::kSerial (default)\n ROOT::Fit::ExecutionPolicy::kMultithread (requires IMT)\n");
1061 double extendedTerm = 0;
1065 if (!normalizeFunc) {
1066 IntegralEvaluator<> igEval( func, p,
true);
1067 std::vector<double>
xmin(data.
NDim());
1068 std::vector<double>
xmax(data.
NDim());
1073 for (
unsigned int ir = 0; ir < data.
Range().
Size(); ++ir) {
1075 nuTot += igEval.Integral(
xmin.data(),
xmax.data());
1081 if (func(
xmin.data(), p) != 0 || func(
xmax.data(), p) != 0) {
1082 MATH_ERROR_MSG(
"FitUtil::EvaluateLogLikelihood",
"A range has not been set and the function is not zero at +/- inf");
1085 nuTot = igEval.Integral(&
xmin[0],&
xmax[0]);
1091 extendedTerm = - nuTot;
1095 extendedTerm = - (sumW2 / sumW) * nuTot;
1104 logl += extendedTerm;
1109 std::cout <<
"Evaluated log L for parameters (";
1110 for (
unsigned int ip = 0; ip < func.NPar(); ++ip)
1111 std::cout <<
" " << p[ip];
1112 std::cout <<
") fval = " << -logl << std::endl;
1124 assert(fg !=
nullptr);
1128 unsigned int npar = func.
NPar();
1129 unsigned initialNPoints = data.
Size();
1134 std::cout <<
"\n===> Evaluate Gradient for parameters ";
1135 for (
unsigned int ip = 0; ip < npar; ++ip)
1136 std::cout <<
" " << p[ip];
1140 const double kdmax1 =
std::sqrt(std::numeric_limits<double>::max());
1141 const double kdmax2 = std::numeric_limits<double>::max() / (4 * initialNPoints);
1143 auto mapFunction = [&](
const unsigned int i) {
1144 std::vector<double> gradFunc(npar);
1145 std::vector<double> pointContribution(npar);
1148 const double *
x =
nullptr;
1149 std::vector<double> xc;
1150 if (data.
NDim() > 1) {
1151 xc.resize(data.
NDim() );
1152 for (
unsigned int j = 0; j < data.
NDim(); ++j)
1159 double fval = func(
x, p);
1165 if (i < 5 || (i > data.
Size()-5) ) {
1166 if (data.
NDim() > 1) std::cout << i <<
" x " <<
x[0] <<
" y " <<
x[1] <<
" func " << fval
1167 <<
" gradient " << gradFunc[0] <<
" " << gradFunc[1] <<
" " << gradFunc[3] << std::endl;
1168 else std::cout << i <<
" x " <<
x[0] <<
" gradient " << gradFunc[0] <<
" " << gradFunc[1] <<
" " << gradFunc[3] << std::endl;
1173 for (
unsigned int kpar = 0; kpar < npar; ++kpar) {
1175 pointContribution[kpar] = -1. / fval * gradFunc[kpar];
1176 else if (gradFunc[kpar] != 0) {
1177 double gg = kdmax1 * gradFunc[kpar];
1179 gg = std::min(gg, kdmax2);
1181 gg = std::max(gg, -kdmax2);
1182 pointContribution[kpar] = -gg;
1187 return pointContribution;
1191 auto redFunction = [&](
const std::vector<std::vector<double>> &pointContributions) {
1192 std::vector<double> result(npar);
1194 for (
auto const &pointContribution : pointContributions) {
1195 for (
unsigned int parameterIndex = 0; parameterIndex < npar; parameterIndex++)
1196 result[parameterIndex] += pointContribution[parameterIndex];
1202 std::vector<double>
g(npar);
1207 Warning(
"FitUtil::EvaluateLogLGradient",
"Multithread execution policy requires IMT, which is disabled. Changing "
1208 "to ROOT::Fit::ExecutionPolicy::kSerial.");
1214 std::vector<std::vector<double>> allGradients(initialNPoints);
1215 for (
unsigned int i = 0; i < initialNPoints; ++i) {
1216 allGradients[i] = mapFunction(i);
1218 g = redFunction(allGradients);
1233 Error(
"FitUtil::EvaluateLogLGradient",
"Execution policy unknown. Avalaible choices:\n "
1234 "ROOT::Fit::ExecutionPolicy::kSerial (default)\n "
1235 "ROOT::Fit::ExecutionPolicy::kMultithread (requires IMT)\n");
1244 std::copy(
g.begin(),
g.end(), grad);
1247 std::cout <<
"FitUtil.cxx : Final gradient ";
1248 for (
unsigned int param = 0; param < npar; param++) {
1249 std::cout <<
" " << grad[param];
1270 const double *
x2 = 0;
1272 double binVolume = 1;
1273 std::vector<double> xc;
1275 unsigned int ndim = data.
NDim();
1278 for (
unsigned int j = 0; j < ndim; ++j) {
1279 binVolume *= std::abs(
x2[j]-
x1[j] );
1280 xc[j] = 0.5*(
x2[j]+
x1[j]);
1286 const double *
x = (useBinVolume) ? &xc.front() :
x1;
1288 if (!useBinIntegral ) {
1289 fval = func (
x, p );
1294 fval = igEval(
x1,
x2 ) ;
1296 if (useBinVolume) fval *= binVolume;
1299 fval = std::max(fval, 0.0);
1300 double logPdf = - fval;
1310 if (
g == 0)
return logPdf;
1312 unsigned int npar = func.
NPar();
1318 if (!useBinIntegral )
1327 SimpleGradientCalculator gc(func.
NPar(), func);
1328 if (!useBinIntegral )
1329 gc.ParameterGradient(
x, p, fval,
g);
1336 for (
unsigned int k = 0; k < npar; ++k) {
1338 if (useBinVolume)
g[k] *= binVolume;
1342 g[k] *= (
y/fval - 1.) ;
1344 const double kdmax1 =
std::sqrt( std::numeric_limits<double>::max() );
1353 std::cout <<
"x = " <<
x[0] <<
" logPdf = " << logPdf <<
" grad";
1354 for (
unsigned int ipar = 0; ipar < npar; ++ipar)
1355 std::cout <<
g[ipar] <<
"\t";
1356 std::cout << std::endl;
1384 unsigned int n = data.
Size();
1386#ifdef USE_PARAMCACHE
1390 nPoints = data.
Size();
1397 bool useW2 = (iWeight == 2);
1400 double wrefVolume = 1.0;
1406 std::cout <<
"Evaluate PoissonLogL for params = [ ";
1407 for (
unsigned int j = 0; j < func.
NPar(); ++j) std::cout << p[j] <<
" , ";
1408 std::cout <<
"] - data size = " <<
n <<
" useBinIntegral " << useBinIntegral <<
" useBinVolume "
1409 << useBinVolume <<
" useW2 " << useW2 <<
" wrefVolume = " << wrefVolume << std::endl;
1412#ifdef USE_PARAMCACHE
1418 auto mapFunction = [&](
const unsigned i) {
1422 const double *
x =
nullptr;
1423 std::vector<double> xc;
1425 double binVolume = 1.0;
1428 unsigned int ndim = data.
NDim();
1430 xc.resize(data.
NDim());
1431 for (
unsigned int j = 0; j < ndim; ++j) {
1433 binVolume *= std::abs(
x2[j] - xx);
1434 xc[j] = 0.5 * (
x2[j] + xx);
1438 binVolume *= wrefVolume;
1439 }
else if (data.
NDim() > 1) {
1440 xc.resize(data.
NDim());
1442 for (
unsigned int j = 1; j < data.
NDim(); ++j) {
1450 if (!useBinIntegral) {
1451#ifdef USE_PARAMCACHE
1461 if (useBinVolume) fval *= binVolume;
1467 if (i % NSAMPLE == 0) {
1468 std::cout <<
"evt " << i <<
" x = [ ";
1469 for (
unsigned int j = 0; j < func.
NDim(); ++j) std::cout <<
x[j] <<
" , ";
1472 std::cout <<
"x2 = [ ";
1473 for (
unsigned int j = 0; j < func.
NDim(); ++j) std::cout << data.
BinUpEdge(i)[j] <<
" , ";
1476 std::cout <<
" y = " <<
y <<
" fval = " << fval << std::endl;
1483 fval = std::max(fval, 0.0);
1485 double nloglike = 0;
1494 double weight = 1.0;
1496 double error = data.
Error(i);
1497 weight = (error * error) /
y;
1505 nloglike += weight * ( fval -
y);
1513 if (extended) nloglike = fval -
y;
1523 auto redFunction = [](
const std::vector<double> &objs) {
1524 return std::accumulate(objs.begin(), objs.end(),
double{});
1531 Warning(
"FitUtil::EvaluatePoissonLogL",
"Multithread execution policy requires IMT, which is disabled. Changing "
1532 "to ROOT::Fit::ExecutionPolicy::kSerial.");
1539 for (
unsigned int i = 0; i <
n; ++i) {
1540 res += mapFunction(i);
1552 Error(
"FitUtil::EvaluatePoissonLogL",
1553 "Execution policy unknown. Avalaible choices:\n ROOT::Fit::ExecutionPolicy::kSerial (default)\n ROOT::Fit::ExecutionPolicy::kMultithread (requires IMT)\n");
1557 std::cout <<
"Loglikelihood = " << res << std::endl;
1569 assert(fg !=
nullptr);
1573#ifdef USE_PARAMCACHE
1581 double wrefVolume = 1.0;
1587 unsigned int npar = func.
NPar();
1588 unsigned initialNPoints = data.
Size();
1590 auto mapFunction = [&](
const unsigned int i) {
1592 std::vector<double> gradFunc(npar);
1593 std::vector<double> pointContribution(npar);
1596 const auto y = data.
Value(i);
1597 auto invError = data.
Error(i);
1599 invError = (invError != 0.0) ? 1.0 / invError : 1;
1603 const double *
x =
nullptr;
1604 std::vector<double> xc;
1606 unsigned ndim = data.
NDim();
1607 double binVolume = 1.0;
1612 for (
unsigned int j = 0; j < ndim; ++j) {
1614 binVolume *= std::abs(
x2[j] - x1_j);
1615 xc[j] = 0.5 * (
x2[j] + x1_j);
1621 binVolume *= wrefVolume;
1622 }
else if (ndim > 1) {
1625 for (
unsigned int j = 1; j < ndim; ++j)
1632 if (!useBinIntegral) {
1639 fval = igEval(
x,
x2);
1648 if (i < 5 || (i > data.
Size()-5) ) {
1649 if (data.
NDim() > 1) std::cout << i <<
" x " <<
x[0] <<
" y " <<
x[1] <<
" func " << fval
1650 <<
" gradient " << gradFunc[0] <<
" " << gradFunc[1] <<
" " << gradFunc[3] << std::endl;
1651 else std::cout << i <<
" x " <<
x[0] <<
" gradient " << gradFunc[0] <<
" " << gradFunc[1] <<
" " << gradFunc[3] << std::endl;
1657 for (
unsigned int ipar = 0; ipar < npar; ++ipar) {
1661 gradFunc[ipar] *= binVolume;
1665 pointContribution[ipar] = gradFunc[ipar] * (1. -
y / fval);
1666 else if (gradFunc[ipar] != 0) {
1667 const double kdmax1 =
std::sqrt(std::numeric_limits<double>::max());
1668 const double kdmax2 = std::numeric_limits<double>::max() / (4 * initialNPoints);
1669 double gg = kdmax1 * gradFunc[ipar];
1671 gg = std::min(gg, kdmax2);
1673 gg = std::max(gg, -kdmax2);
1674 pointContribution[ipar] = -gg;
1679 return pointContribution;
1683 auto redFunction = [&](
const std::vector<std::vector<double>> &pointContributions) {
1684 std::vector<double> result(npar);
1686 for (
auto const &pointContribution : pointContributions) {
1687 for (
unsigned int parameterIndex = 0; parameterIndex < npar; parameterIndex++)
1688 result[parameterIndex] += pointContribution[parameterIndex];
1694 std::vector<double>
g(npar);
1699 Warning(
"FitUtil::EvaluatePoissonLogLGradient",
1700 "Multithread execution policy requires IMT, which is disabled. Changing "
1701 "to ROOT::Fit::ExecutionPolicy::kSerial.");
1707 std::vector<std::vector<double>> allGradients(initialNPoints);
1708 for (
unsigned int i = 0; i < initialNPoints; ++i) {
1709 allGradients[i] = mapFunction(i);
1711 g = redFunction(allGradients);
1726 Error(
"FitUtil::EvaluatePoissonLogLGradient",
1727 "Execution policy unknown. Avalaible choices:\n 0: Serial (default)\n 1: MultiThread (requires IMT)\n");
1736 std::copy(
g.begin(),
g.end(), grad);
1739 std::cout <<
"***** Final gradient : ";
1740 for (
unsigned int ii = 0; ii< npar; ++ii) std::cout << grad[ii] <<
" ";
1749 if (nEvents/ncpu < 1000)
return ncpu;
1750 return nEvents/1000;
#define MATH_ERROR_MSG(loc, str)
static const double x2[5]
static const double x1[5]
void Error(const char *location, const char *msgfmt,...)
void Warning(const char *location, const char *msgfmt,...)
TRObject operator()(const T1 &t1) const
R__EXTERN TVirtualMutex * gROOTMutex
typedef void((*Func_t)())
#define R__LOCKGUARD(mutex)
Class describing the binned data sets : vectors of x coordinates, y values and optionally error on y ...
double SumOfContent() const
compute the total sum of the data content (sum of weights in case of weighted data set)
double RefVolume() const
retrieve the reference volume used to normalize the data when the option bin volume is set
bool HasBinEdges() const
query if the data store the bin edges instead of the center
const double * GetPointError(unsigned int ipoint, double &errvalue) const
Retrieve the errors on the point (coordinate and value) for the given fit point It must be called onl...
const double * GetPoint(unsigned int ipoint, double &value) const
retrieve at the same time a pointer to the coordinate data and the fit value More efficient than call...
const double * BinUpEdge(unsigned int ipoint) const
return an array containing the upper edge of the bin for coordinate i In case of empty bin they could...
double InvError(unsigned int ipoint) const
Return the inverse of error on the value for the given fit point useful when error in the coordinates...
bool IsWeighted() const
return true if the data set is weighted We cannot compute ourselfs because sometimes errors are fille...
double Value(unsigned int ipoint) const
return the value for the given fit point
ErrorType GetErrorType() const
retrieve the errortype
bool HaveCoordErrors() const
flag to control if data provides error on the coordinates
const double * ValuePtr(unsigned int ipoint) const
return a pointer to the value for the given fit point
double Error(unsigned int ipoint) const
bool HaveAsymErrors() const
flag to control if data provides asymmetric errors on the value
double SumOfError2() const
compute the total sum of the error square (sum of weight square in case of a weighted data set)
unsigned int Size(unsigned int icoord=0) const
return range size for coordinate icoord (starts from zero) Size == 0 indicates no range is present [-...
void GetRange(unsigned int irange, unsigned int icoord, double &xmin, double &xmax) const
get the i-th range for given coordinate.
unsigned int Size() const
return number of fit points
const double * GetCoordComponent(unsigned int ipoint, unsigned int icoord) const
returns a single coordinate component of a point.
unsigned int NDim() const
return coordinate data dimension
const DataOptions & Opt() const
access to options
const double * Coords(unsigned int ipoint) const
return a pointer to the coordinates data for the given fit point
const DataRange & Range() const
access to range
Class describing the unbinned data sets (just x coordinates values) of any dimensions.
double Weight(unsigned int ipoint) const
return weight
unsigned int NDim() const
return coordinate data dimension
virtual unsigned int NDim() const =0
Retrieve the dimension of the function.
virtual void SetParameters(const double *p)=0
Set the parameter values.
virtual unsigned int NPar() const =0
Return the number of Parameters.
Interface (abstract class) for parametric gradient multi-dimensional functions providing in addition ...
virtual void ParameterGradient(const T *x, const double *p, T *grad) const
Evaluate the all the derivatives (gradient vector) of the function with respect to the parameters at ...
OneDimMultiFunctionAdapter class to wrap a multidimensional function in one dimensional one.
void SetCoord(int icoord)
User class for calculating the derivatives of a function.
double Derivative1(double x)
Returns the first derivative of the function at point x, computed by Richardson's extrapolation metho...
A pseudo container class which is a generator of indices.
This class provides a simple interface to execute the same task multiple times in parallel,...
auto MapReduce(F func, unsigned nTimes, R redfunc) -> typename std::result_of< F()>::type
This method behaves just like Map, but an additional redfunc function must be provided.
TFitResultPtr Fit(FitObject *h1, TF1 *f1, Foption_t &option, const ROOT::Math::MinimizerOptions &moption, const char *goption, ROOT::Fit::DataRange &range)
double EvaluateChi2(const IModelFunction &func, const BinData &data, const double *x, unsigned int &nPoints, ROOT::Fit::ExecutionPolicy executionPolicy, unsigned nChunks=0)
Chi2 Functions.
ROOT::Math::IParamMultiFunction IModelFunction
void CalculateGradientIntegral(const GFunc &gfunc, const double *x1, const double *x2, const double *p, double *g)
double EvaluateChi2Residual(const IModelFunction &func, const BinData &data, const double *x, unsigned int ipoint, double *g=0)
evaluate the residual contribution to the Chi2 given a model function and the BinPoint data and if th...
void EvaluateLogLGradient(const IModelFunction &func, const UnBinData &data, const double *x, double *grad, unsigned int &nPoints, ROOT::Fit::ExecutionPolicy executionPolicy=ROOT::Fit::ExecutionPolicy::kSerial, unsigned nChunks=0)
evaluate the LogL gradient given a model function and the data at the point x.
double CorrectValue(double rval)
double EvaluatePoissonLogL(const IModelFunction &func, const BinData &data, const double *x, int iWeight, bool extended, unsigned int &nPoints, ROOT::Fit::ExecutionPolicy executionPolicy, unsigned nChunks=0)
evaluate the Poisson LogL given a model function and the data at the point x.
double EvaluateChi2Effective(const IModelFunction &func, const BinData &data, const double *x, unsigned int &nPoints)
evaluate the effective Chi2 given a model function and the data at the point x.
double EvaluatePoissonBinPdf(const IModelFunction &func, const BinData &data, const double *x, unsigned int ipoint, double *g=0)
evaluate the pdf contribution to the Poisson LogL given a model function and the BinPoint data.
bool CheckInfNaNValue(double &rval)
unsigned setAutomaticChunking(unsigned nEvents)
double EvaluateLogL(const IModelFunction &func, const UnBinData &data, const double *p, int iWeight, bool extended, unsigned int &nPoints, ROOT::Fit::ExecutionPolicy executionPolicy, unsigned nChunks=0)
evaluate the LogL given a model function and the data at the point x.
void EvaluatePoissonLogLGradient(const IModelFunction &func, const BinData &data, const double *x, double *grad, unsigned int &nPoints, ROOT::Fit::ExecutionPolicy executionPolicy=ROOT::Fit::ExecutionPolicy::kSerial, unsigned nChunks=0)
evaluate the Poisson LogL given a model function and the data at the point x.
double EvaluatePdf(const IModelFunction &func, const UnBinData &data, const double *x, unsigned int ipoint, double *g=0)
evaluate the pdf contribution to the LogL given a model function and the BinPoint data.
void EvaluateChi2Gradient(const IModelFunction &func, const BinData &data, const double *x, double *grad, unsigned int &nPoints, ROOT::Fit::ExecutionPolicy executionPolicy=ROOT::Fit::ExecutionPolicy::kSerial, unsigned nChunks=0)
evaluate the Chi2 gradient given a model function and the data at the point x.
T EvalLog(T x)
safe evaluation of log(x) with a protections against negative or zero argument to the log smooth line...
Namespace for new ROOT classes and functions.
UInt_t GetImplicitMTPoolSize()
Returns the size of the pool used for implicit multi-threading.
DataOptions : simple structure holding the options on how the data are filled.