53 template<
class GradFunc = IGradModelFunction>
54 struct ParamDerivFunc {
55 ParamDerivFunc(
const GradFunc &
f) : fFunc(f), fIpar(0) {}
56 void SetDerivComponent(
unsigned int ipar) { fIpar = ipar; }
57 double operator() (
const double *
x,
const double *p)
const {
58 return fFunc.ParameterDerivative( x, p, fIpar );
60 unsigned int NDim()
const {
return fFunc.NDim(); }
61 const GradFunc & fFunc;
67 class SimpleGradientCalculator {
77 SimpleGradientCalculator(
int gdim,
const IModelFunction & func,
double eps = 2.
E-8,
int istrat = 1) :
83 fVec(
std::vector<double>(gdim) )
88 double DoParameterDerivative(
const double *x,
const double *p,
double f0,
int k)
const {
90 double h = std::max( fEps* std::abs(p0), 8.0*fPrecision*(std::abs(p0) + fPrecision) );
95 double f1 = fFunc(x, &fVec.front() );
98 double f2 = fFunc(x, &fVec.front() );
99 deriv = 0.5 * ( f2 -
f1 )/h;
102 deriv = ( f1 - f0 )/h;
108 unsigned int NDim()
const {
112 unsigned int NPar()
const {
116 double ParameterDerivative(
const double *x,
const double *p,
int ipar)
const {
118 std::copy(p, p+fN, fVec.begin());
119 double f0 = fFunc(x, p);
120 return DoParameterDerivative(x,p,f0,ipar);
124 void ParameterGradient(
const double * x,
const double * p,
double f0,
double *
g) {
126 std::copy(p, p+fN, fVec.begin());
127 for (
unsigned int k = 0; k < fN; ++k) {
128 g[k] = DoParameterDerivative(x,p,f0,k);
133 void Gradient(
const double * x,
const double * p,
double f0,
double * g) {
135 std::copy(x, x+fN, fVec.begin());
136 for (
unsigned int k = 0; k < fN; ++k) {
138 double h = std::max( fEps* std::abs(x0), 8.0*fPrecision*(std::abs(x0) + fPrecision) );
142 double f1 = fFunc( &fVec.front(), p );
145 double f2 = fFunc( &fVec.front(), p );
146 g[k] = 0.5 * ( f2 -
f1 )/h;
149 g[k] = ( f1 - f0 )/h;
162 mutable std::vector<double> fVec;
169 if (rval > - std::numeric_limits<double>::max() && rval < std::numeric_limits<double>::max() )
173 return -std::numeric_limits<double>::max();
176 return + std::numeric_limits<double>::max();
183 if (rval > - std::numeric_limits<double>::max() && rval < std::numeric_limits<double>::max() )
187 rval = -std::numeric_limits<double>::max();
192 rval = + std::numeric_limits<double>::max();
201 template <
class GFunc>
203 const double *
x1,
const double *
x2,
const double * p,
double *g) {
206 ParamDerivFunc<GFunc> pfunc( gfunc);
209 unsigned int npar = gfunc.NPar();
210 for (
unsigned int k = 0; k < npar; ++k ) {
211 pfunc.SetDerivComponent(k);
212 g[k] = igDerEval( x1, x2 );
234 unsigned int n = data.
Size();
237 #ifdef USE_PARAMCACHE 252 std::cout <<
"\n\nFit data size = " << n << std::endl;
253 std::cout <<
"evaluate chi2 using function " << &func <<
" " << p << std::endl;
254 std::cout <<
"use empty bins " << fitOpt.
fUseEmpty << std::endl;
255 std::cout <<
"use integral " << fitOpt.
fIntegral << std::endl;
256 std::cout <<
"use all error=1 " << fitOpt.
fErrors1 << std::endl;
257 if (isWeighted) std::cout <<
"Weighted data set - sumw = " << data.
SumOfContent() <<
" sumw2 = " << data.
SumOfError2() << std::endl;
260 #ifdef USE_PARAMCACHE 265 double maxResValue = std::numeric_limits<double>::max() /
n;
266 double wrefVolume = 1.0;
273 auto mapFunction = [&](
const unsigned i){
279 const auto y = data.
Value(i);
284 const double * x =
nullptr;
285 std::vector<double> xc;
286 double binVolume = 1.0;
288 unsigned int ndim = data.
NDim();
290 xc.resize(data.
NDim());
291 for (
unsigned int j = 0; j < ndim; ++j) {
293 binVolume *= std::abs(x2[j]- xx);
294 xc[j] = 0.5*(x2[j]+ xx);
298 binVolume *= wrefVolume;
299 }
else if(data.
NDim() > 1) {
300 xc.resize(data.
NDim());
302 for (
unsigned int j = 1; j < data.
NDim(); ++j)
310 if (!useBinIntegral) {
311 #ifdef USE_PARAMCACHE 314 fval = func ( x, p );
323 if (useBinVolume) fval *= binVolume;
327 double invWeight = 1.0;
340 double invError2 = (fval > 0) ? invWeight / fval : 0.0;
347 std::cout << x[0] <<
" " <<
y <<
" " << 1./invError <<
" params : ";
348 for (
unsigned int ipar = 0; ipar < func.
NPar(); ++ipar)
349 std::cout << p[ipar] <<
"\t";
350 std::cout <<
"\tfval = " << fval <<
" bin volume " << binVolume <<
" ref " << wrefVolume << std::endl;
356 double tmp = (
y -fval )* invError;
357 double resval = tmp * tmp;
361 if ( resval < maxResValue )
372 auto redFunction = [](
const std::vector<double> & objs){
373 return std::accumulate(objs.begin(), objs.end(),
double{});
380 Warning(
"FitUtil::EvaluateChi2",
"Multithread execution policy requires IMT, which is disabled. Changing " 381 "to ROOT::Fit::ExecutionPolicy::kSerial.");
388 for (
unsigned int i=0; i<
n; ++i) {
389 res += mapFunction(i);
401 Error(
"FitUtil::EvaluateChi2",
"Execution policy unknown. Avalaible choices:\n ROOT::Fit::ExecutionPolicy::kSerial (default)\n ROOT::Fit::ExecutionPolicy::kMultithread (requires IMT)\n");
416 unsigned int n = data.
Size();
419 std::cout <<
"\n\nFit data size = " << n << std::endl;
420 std::cout <<
"evaluate effective chi2 using function " << &func <<
" " << p << std::endl;
431 unsigned int ndim = func.
NDim();
436 double maxResValue = std::numeric_limits<double>::max() /
n;
440 for (
unsigned int i = 0; i <
n; ++ i) {
444 const double * x = data.
GetPoint(i,y);
446 double fval = func( x, p );
448 double delta_y_func = y - fval;
452 const double *
ex = 0;
456 double eylow, eyhigh = 0;
458 if ( delta_y_func < 0)
466 while ( j < ndim && ex[j] == 0.) { j++; }
473 double kPrecision = 1.E-8;
474 for (
unsigned int icoord = 0; icoord < ndim; ++icoord) {
476 if (ex[icoord] > 0) {
480 double x0= x[icoord];
481 double h = std::max( kEps* std::abs(ex[icoord]), 8.0*kPrecision*(std::abs(x0) + kPrecision) );
482 double deriv = derivator.
Derivative1(f1D, x[icoord], h);
483 double edx = ex[icoord] * deriv;
486 std::cout <<
"error for coord " << icoord <<
" = " << ex[icoord] <<
" deriv " << deriv << std::endl;
491 double w2 = (e2 > 0) ? 1.0/e2 : 0;
492 double resval = w2 * ( y - fval ) * ( y - fval);
495 std::cout << x[0] <<
" " << y <<
" ex " << ex[0] <<
" ey " << ey <<
" params : ";
496 for (
unsigned int ipar = 0; ipar < func.
NPar(); ++ipar)
497 std::cout << p[ipar] <<
"\t";
498 std::cout <<
"\tfval = " << fval <<
"\tresval = " << resval << std::endl;
503 if ( resval < maxResValue )
516 std::cout <<
"chi2 = " << chi2 <<
" n = " << nPoints << std::endl;
533 MATH_ERROR_MSG(
"FitUtil::EvaluateChi2Residual",
"Error on the coordinates are not used in calculating Chi2 residual");
540 double y, invError = 0;
547 const double *
x1 = data.
GetPoint(i,y, invError);
551 unsigned int ndim = data.
NDim();
552 double binVolume = 1.0;
553 const double *
x2 = 0;
554 if (useBinVolume || useBinIntegral) x2 = data.
BinUpEdge(i);
559 xc =
new double[ndim];
560 for (
unsigned int j = 0; j < ndim; ++j) {
561 binVolume *= std::abs( x2[j]-x1[j] );
562 xc[j] = 0.5*(x2[j]+ x1[j]);
568 const double * x = (useBinVolume) ? xc : x1;
570 if (!useBinIntegral) {
571 fval = func ( x, p );
576 fval = igEval( x1, x2) ;
579 if (useBinVolume) fval *= binVolume;
590 double invError2 = (fval > 0) ? 1.0 / fval : 0.0;
595 double resval = ( y -fval )* invError;
603 unsigned int npar = func.
NPar();
608 if (!useBinIntegral ) {
617 SimpleGradientCalculator gc( npar, func);
618 if (!useBinIntegral )
619 gc.ParameterGradient(x, p, fval, g);
626 for (
unsigned int k = 0; k < npar; ++k) {
628 if (useBinVolume) g[k] *= binVolume;
632 if (useBinVolume)
delete [] xc;
649 "Error on the coordinates are not used in calculating Chi2 gradient");
654 assert(fg !=
nullptr);
659 std::cout <<
"\n\nFit data size = " <<
n << std::endl;
660 std::cout <<
"evaluate chi2 using function gradient " << &func <<
" " << p << std::endl;
667 double wrefVolume = 1.0;
674 unsigned int npar = func.
NPar();
675 unsigned initialNPoints = data.
Size();
677 std::vector<bool> isPointRejected(initialNPoints);
679 auto mapFunction = [&](
const unsigned int i) {
681 std::vector<double> gradFunc(npar);
682 std::vector<double> pointContribution(npar);
685 const auto y = data.
Value(i);
686 auto invError = data.
Error(i);
688 invError = (invError != 0.0) ? 1.0 / invError : 1;
692 const double *x =
nullptr;
693 std::vector<double> xc;
695 unsigned int ndim = data.
NDim();
696 double binVolume = 1;
701 for (
unsigned int j = 0; j < ndim; ++j) {
703 binVolume *= std::abs(x2[j] - x1_j);
704 xc[j] = 0.5 * (x2[j] + x1_j);
710 binVolume *= wrefVolume;
711 }
else if (ndim > 1) {
714 for (
unsigned int j = 1; j < ndim; ++j)
721 if (!useBinIntegral) {
728 fval = igEval(x,
x2);
735 std::cout << x[0] <<
" " <<
y <<
" " << 1. / invError <<
" params : ";
736 for (
unsigned int ipar = 0; ipar < npar; ++ipar)
737 std::cout << p[ipar] <<
"\t";
738 std::cout <<
"\tfval = " << fval << std::endl;
741 isPointRejected[i] =
true;
743 return pointContribution;
747 unsigned int ipar = 0;
748 for (; ipar < npar; ++ipar) {
752 gradFunc[ipar] *= binVolume;
756 double dfval = gradFunc[ipar];
762 pointContribution[ipar] = -2.0 * (
y - fval) * invError * invError * gradFunc[ipar];
767 isPointRejected[i] =
true;
770 return pointContribution;
774 auto redFunction = [&](
const std::vector<std::vector<double>> &pointContributions) {
775 std::vector<double> result(npar);
777 for (
auto const &pointContribution : pointContributions) {
778 for (
unsigned int parameterIndex = 0; parameterIndex < npar; parameterIndex++)
779 result[parameterIndex] += pointContribution[parameterIndex];
785 std::vector<double>
g(npar);
790 Warning(
"FitUtil::EvaluateChi2Gradient",
"Multithread execution policy requires IMT, which is disabled. Changing " 791 "to ROOT::Fit::ExecutionPolicy::kSerial.");
797 std::vector<std::vector<double>> allGradients(initialNPoints);
798 for (
unsigned int i = 0; i < initialNPoints; ++i) {
799 allGradients[i] = mapFunction(i);
801 g = redFunction(allGradients);
815 Error(
"FitUtil::EvaluateChi2Gradient",
816 "Execution policy unknown. Avalaible choices:\n 0: Serial (default)\n 1: MultiThread (requires IMT)\n");
825 nPoints = initialNPoints;
827 if (std::any_of(isPointRejected.begin(), isPointRejected.end(), [](
bool point) {
return point; })) {
828 unsigned nRejected = std::accumulate(isPointRejected.begin(), isPointRejected.end(), 0);
829 assert(nRejected <= initialNPoints);
830 nPoints = initialNPoints - nRejected;
834 "Error - too many points rejected for overflow in gradient calculation");
838 std::copy(g.begin(), g.end(), grad);
858 const double * x = data.
Coords(i);
859 double fval = func ( x, p );
862 if (g == 0)
return logPdf;
874 SimpleGradientCalculator gc(func.
NPar(), func);
875 gc.ParameterGradient(x, p, fval, g );
878 for (
unsigned int ipar = 0; ipar < func.
NPar(); ++ipar) {
883 std::cout << x[i] <<
"\t";
884 std::cout <<
"\tpar = [ " << func.
NPar() <<
" ] = ";
885 for (
unsigned int ipar = 0; ipar < func.
NPar(); ++ipar)
886 std::cout << p[ipar] <<
"\t";
887 std::cout <<
"\tfval = " << fval;
888 std::cout <<
"\tgrad = [ ";
889 for (
unsigned int ipar = 0; ipar < func.
NPar(); ++ipar)
890 std::cout << g[ipar] <<
"\t";
891 std::cout <<
" ] " << std::endl;
899 int iWeight,
bool extended,
unsigned int &nPoints,
904 unsigned int n = data.
Size();
908 bool normalizeFunc =
false;
911 #ifdef USE_PARAMCACHE 917 if (!normalizeFunc) {
918 if (data.
NDim() == 1) {
923 std::vector<double>
x(data.
NDim());
924 for (
unsigned int j = 0; j < data.
NDim(); ++j)
934 std::vector<double>
xmin(data.
NDim());
935 std::vector<double>
xmax(data.
NDim());
940 for (
unsigned int ir = 0; ir < data.
Range().
Size(); ++ir) {
942 norm += igEval.Integral(
xmin.data(),
xmax.data());
948 if (func(
xmin.data(), p) != 0 || func(
xmax.data(), p) != 0) {
950 "A range has not been set and the function is not zero at +/- inf");
953 norm = igEval.Integral(&
xmin[0], &
xmax[0]);
959 auto mapFunction = [&](
const unsigned i) {
964 if (data.
NDim() > 1) {
965 std::vector<double>
x(data.
NDim());
966 for (
unsigned int j = 0; j < data.
NDim(); ++j)
968 #ifdef USE_PARAMCACHE 969 fval = func(x.data());
971 fval = func(x.data(), p);
977 #ifdef USE_PARAMCACHE 985 fval = fval * (1 / norm);
990 double weight = data.
Weight(i);
997 W2 = weight * weight;
1018 auto redFunction = [](
const std::vector<LikelihoodAux<double>> & objs){
1020 for (
auto &
l : objs ) {
1030 Warning(
"FitUtil::EvaluateLogL",
"Multithread execution policy requires IMT, which is disabled. Changing " 1031 "to ROOT::Fit::ExecutionPolicy::kSerial.");
1040 for (
unsigned int i=0; i<
n; ++i) {
1041 auto resArray = mapFunction(i);
1042 logl+=resArray.logvalue;
1043 sumW+=resArray.weight;
1044 sumW2+=resArray.weight2;
1051 logl=resArray.logvalue;
1052 sumW=resArray.weight;
1053 sumW2=resArray.weight2;
1059 Error(
"FitUtil::EvaluateLogL",
"Execution policy unknown. Avalaible choices:\n ROOT::Fit::ExecutionPolicy::kSerial (default)\n ROOT::Fit::ExecutionPolicy::kMultithread (requires IMT)\n");
1064 double extendedTerm = 0;
1068 if (!normalizeFunc) {
1070 std::vector<double>
xmin(data.
NDim());
1071 std::vector<double>
xmax(data.
NDim());
1076 for (
unsigned int ir = 0; ir < data.
Range().
Size(); ++ir) {
1084 if (func(
xmin.data(), p) != 0 || func(
xmax.data(), p) != 0) {
1085 MATH_ERROR_MSG(
"FitUtil::EvaluateLogLikelihood",
"A range has not been set and the function is not zero at +/- inf");
1094 extendedTerm = - nuTot;
1098 extendedTerm = - (sumW2 / sumW) * nuTot;
1107 logl += extendedTerm;
1117 std::cout <<
"Evaluated log L for parameters (";
1118 for (
unsigned int ip = 0; ip < func.
NPar(); ++ip)
1119 std::cout <<
" " << p[ip];
1120 std::cout <<
") fval = " << -logl << std::endl;
1132 assert(fg !=
nullptr);
1136 unsigned int npar = func.
NPar();
1137 unsigned initialNPoints = data.
Size();
1142 std::cout <<
"\n===> Evaluate Gradient for parameters ";
1143 for (
unsigned int ip = 0; ip < npar; ++ip)
1144 std::cout <<
" " << p[ip];
1148 const double kdmax1 =
std::sqrt(std::numeric_limits<double>::max());
1149 const double kdmax2 = std::numeric_limits<double>::max() / (4 * initialNPoints);
1151 auto mapFunction = [&](
const unsigned int i) {
1152 std::vector<double> gradFunc(npar);
1153 std::vector<double> pointContribution(npar);
1156 const double * x =
nullptr;
1157 std::vector<double> xc;
1158 if (data.
NDim() > 1) {
1159 xc.resize(data.
NDim() );
1160 for (
unsigned int j = 0; j < data.
NDim(); ++j)
1167 double fval = func(x, p);
1173 if (i < 5 || (i > data.
Size()-5) ) {
1174 if (data.
NDim() > 1) std::cout << i <<
" x " << x[0] <<
" y " << x[1] <<
" func " << fval
1175 <<
" gradient " << gradFunc[0] <<
" " << gradFunc[1] <<
" " << gradFunc[3] << std::endl;
1176 else std::cout << i <<
" x " << x[0] <<
" gradient " << gradFunc[0] <<
" " << gradFunc[1] <<
" " << gradFunc[3] << std::endl;
1181 for (
unsigned int kpar = 0; kpar < npar; ++kpar) {
1183 pointContribution[kpar] = -1. / fval * gradFunc[kpar];
1184 else if (gradFunc[kpar] != 0) {
1185 double gg = kdmax1 * gradFunc[kpar];
1187 gg = std::min(gg, kdmax2);
1189 gg = std::max(gg, -kdmax2);
1190 pointContribution[kpar] = -gg;
1195 return pointContribution;
1199 auto redFunction = [&](
const std::vector<std::vector<double>> &pointContributions) {
1200 std::vector<double> result(npar);
1202 for (
auto const &pointContribution : pointContributions) {
1203 for (
unsigned int parameterIndex = 0; parameterIndex < npar; parameterIndex++)
1204 result[parameterIndex] += pointContribution[parameterIndex];
1210 std::vector<double>
g(npar);
1215 Warning(
"FitUtil::EvaluateLogLGradient",
"Multithread execution policy requires IMT, which is disabled. Changing " 1216 "to ROOT::Fit::ExecutionPolicy::kSerial.");
1222 std::vector<std::vector<double>> allGradients(initialNPoints);
1223 for (
unsigned int i = 0; i < initialNPoints; ++i) {
1224 allGradients[i] = mapFunction(i);
1226 g = redFunction(allGradients);
1241 Error(
"FitUtil::EvaluateLogLGradient",
"Execution policy unknown. Avalaible choices:\n " 1242 "ROOT::Fit::ExecutionPolicy::kSerial (default)\n " 1243 "ROOT::Fit::ExecutionPolicy::kMultithread (requires IMT)\n");
1252 std::copy(g.begin(), g.end(), grad);
1255 std::cout <<
"FitUtil.cxx : Final gradient ";
1256 for (
unsigned int param = 0; param < npar; param++) {
1257 std::cout <<
" " << grad[param];
1278 const double *
x2 = 0;
1280 double binVolume = 1;
1281 std::vector<double> xc;
1283 unsigned int ndim = data.
NDim();
1286 for (
unsigned int j = 0; j < ndim; ++j) {
1287 binVolume *= std::abs( x2[j]-x1[j] );
1288 xc[j] = 0.5*(x2[j]+ x1[j]);
1294 const double * x = (useBinVolume) ? &xc.front() :
x1;
1296 if (!useBinIntegral ) {
1297 fval = func ( x, p );
1302 fval = igEval( x1, x2 ) ;
1304 if (useBinVolume) fval *= binVolume;
1307 fval = std::max(fval, 0.0);
1308 double logPdf = - fval;
1318 if (g == 0)
return logPdf;
1320 unsigned int npar = func.
NPar();
1326 if (!useBinIntegral )
1335 SimpleGradientCalculator gc(func.
NPar(), func);
1336 if (!useBinIntegral )
1337 gc.ParameterGradient(x, p, fval, g);
1344 for (
unsigned int k = 0; k < npar; ++k) {
1346 if (useBinVolume) g[k] *= binVolume;
1350 g[k] *= ( y/fval - 1.) ;
1352 const double kdmax1 =
std::sqrt( std::numeric_limits<double>::max() );
1361 std::cout <<
"x = " << x[0] <<
" logPdf = " << logPdf <<
" grad";
1362 for (
unsigned int ipar = 0; ipar < npar; ++ipar)
1363 std::cout << g[ipar] <<
"\t";
1364 std::cout << std::endl;
1392 unsigned int n = data.
Size();
1394 #ifdef USE_PARAMCACHE 1405 bool useW2 = (iWeight == 2);
1408 double wrefVolume = 1.0;
1414 std::cout <<
"Evaluate PoissonLogL for params = [ ";
1415 for (
unsigned int j = 0; j < func.
NPar(); ++j) std::cout << p[j] <<
" , ";
1416 std::cout <<
"] - data size = " << n <<
" useBinIntegral " << useBinIntegral <<
" useBinVolume " 1417 << useBinVolume <<
" useW2 " << useW2 <<
" wrefVolume = " << wrefVolume << std::endl;
1420 #ifdef USE_PARAMCACHE 1426 auto mapFunction = [&](
const unsigned i) {
1430 const double *x =
nullptr;
1431 std::vector<double> xc;
1433 double binVolume = 1.0;
1436 unsigned int ndim = data.
NDim();
1438 xc.resize(data.
NDim());
1439 for (
unsigned int j = 0; j < ndim; ++j) {
1441 binVolume *= std::abs(x2[j] - xx);
1442 xc[j] = 0.5 * (x2[j] + xx);
1446 binVolume *= wrefVolume;
1447 }
else if (data.
NDim() > 1) {
1448 xc.resize(data.
NDim());
1450 for (
unsigned int j = 1; j < data.
NDim(); ++j) {
1458 if (!useBinIntegral) {
1459 #ifdef USE_PARAMCACHE 1469 if (useBinVolume) fval *= binVolume;
1475 if (i % NSAMPLE == 0) {
1476 std::cout <<
"evt " << i <<
" x = [ ";
1477 for (
unsigned int j = 0; j < func.
NDim(); ++j) std::cout << x[j] <<
" , ";
1480 std::cout <<
"x2 = [ ";
1481 for (
unsigned int j = 0; j < func.
NDim(); ++j) std::cout << data.
BinUpEdge(i)[j] <<
" , ";
1484 std::cout <<
" y = " <<
y <<
" fval = " << fval << std::endl;
1491 fval = std::max(fval, 0.0);
1493 double nloglike = 0;
1502 double weight = 1.0;
1504 double error = data.
Error(i);
1505 weight = (error * error) /
y;
1513 nloglike += weight * ( fval -
y);
1521 if (extended) nloglike = fval -
y;
1532 auto redFunction = [](
const std::vector<double> &objs) {
1533 return std::accumulate(objs.begin(), objs.end(),
double{});
1540 Warning(
"FitUtil::EvaluatePoissonLogL",
"Multithread execution policy requires IMT, which is disabled. Changing " 1541 "to ROOT::Fit::ExecutionPolicy::kSerial.");
1548 for (
unsigned int i = 0; i <
n; ++i) {
1549 res += mapFunction(i);
1561 Error(
"FitUtil::EvaluatePoissonLogL",
1562 "Execution policy unknown. Avalaible choices:\n ROOT::Fit::ExecutionPolicy::kSerial (default)\n ROOT::Fit::ExecutionPolicy::kMultithread (requires IMT)\n");
1566 std::cout <<
"Loglikelihood = " << res << std::endl;
1578 assert(fg !=
nullptr);
1582 #ifdef USE_PARAMCACHE 1590 double wrefVolume = 1.0;
1596 unsigned int npar = func.
NPar();
1597 unsigned initialNPoints = data.
Size();
1599 auto mapFunction = [&](
const unsigned int i) {
1601 std::vector<double> gradFunc(npar);
1602 std::vector<double> pointContribution(npar);
1605 const auto y = data.
Value(i);
1606 auto invError = data.
Error(i);
1608 invError = (invError != 0.0) ? 1.0 / invError : 1;
1612 const double *x =
nullptr;
1613 std::vector<double> xc;
1615 unsigned ndim = data.
NDim();
1616 double binVolume = 1.0;
1621 for (
unsigned int j = 0; j < ndim; ++j) {
1623 binVolume *= std::abs(x2[j] - x1_j);
1624 xc[j] = 0.5 * (x2[j] + x1_j);
1630 binVolume *= wrefVolume;
1631 }
else if (ndim > 1) {
1634 for (
unsigned int j = 1; j < ndim; ++j)
1641 if (!useBinIntegral) {
1648 fval = igEval(x,
x2);
1657 if (i < 5 || (i > data.
Size()-5) ) {
1658 if (data.
NDim() > 1) std::cout << i <<
" x " << x[0] <<
" y " << x[1] <<
" func " << fval
1659 <<
" gradient " << gradFunc[0] <<
" " << gradFunc[1] <<
" " << gradFunc[3] << std::endl;
1660 else std::cout << i <<
" x " << x[0] <<
" gradient " << gradFunc[0] <<
" " << gradFunc[1] <<
" " << gradFunc[3] << std::endl;
1666 for (
unsigned int ipar = 0; ipar < npar; ++ipar) {
1670 gradFunc[ipar] *= binVolume;
1674 pointContribution[ipar] = gradFunc[ipar] * (1. -
y / fval);
1675 else if (gradFunc[ipar] != 0) {
1676 const double kdmax1 =
std::sqrt(std::numeric_limits<double>::max());
1677 const double kdmax2 = std::numeric_limits<double>::max() / (4 * initialNPoints);
1678 double gg = kdmax1 * gradFunc[ipar];
1680 gg = std::min(gg, kdmax2);
1682 gg = std::max(gg, -kdmax2);
1683 pointContribution[ipar] = -gg;
1688 return pointContribution;
1692 auto redFunction = [&](
const std::vector<std::vector<double>> &pointContributions) {
1693 std::vector<double> result(npar);
1695 for (
auto const &pointContribution : pointContributions) {
1696 for (
unsigned int parameterIndex = 0; parameterIndex < npar; parameterIndex++)
1697 result[parameterIndex] += pointContribution[parameterIndex];
1703 std::vector<double>
g(npar);
1708 Warning(
"FitUtil::EvaluatePoissonLogLGradient",
1709 "Multithread execution policy requires IMT, which is disabled. Changing " 1710 "to ROOT::Fit::ExecutionPolicy::kSerial.");
1716 std::vector<std::vector<double>> allGradients(initialNPoints);
1717 for (
unsigned int i = 0; i < initialNPoints; ++i) {
1718 allGradients[i] = mapFunction(i);
1720 g = redFunction(allGradients);
1735 Error(
"FitUtil::EvaluatePoissonLogLGradient",
1736 "Execution policy unknown. Avalaible choices:\n 0: Serial (default)\n 1: MultiThread (requires IMT)\n");
1745 std::copy(g.begin(), g.end(), grad);
1748 std::cout <<
"***** Final gradient : ";
1749 for (
unsigned int ii = 0; ii< npar; ++ii) std::cout << grad[ii] <<
" ";
1759 auto ncpu = s.
fCpus;
1760 if (nEvents/ncpu < 1000)
return ncpu;
1761 return nEvents/1000;
bool HaveAsymErrors() const
flag to control if data provides asymmetric errors on the value
unsigned int NDim() const
return coordinate data dimension
bool HasBinEdges() const
query if the data store the bin edges instead of the center
Namespace for new ROOT classes and functions.
double EvaluateChi2(const IModelFunction &func, const BinData &data, const double *x, unsigned int &nPoints, ROOT::Fit::ExecutionPolicy executionPolicy, unsigned nChunks=0)
Chi2 Functions.
void EvaluateLogLGradient(const IModelFunction &func, const UnBinData &data, const double *x, double *grad, unsigned int &nPoints, ROOT::Fit::ExecutionPolicy executionPolicy=ROOT::Fit::ExecutionPolicy::kSerial, unsigned nChunks=0)
evaluate the LogL gradient given a model function and the data at the point x.
double EvaluatePoissonBinPdf(const IModelFunction &func, const BinData &data, const double *x, unsigned int ipoint, double *g=0)
evaluate the pdf contribution to the Poisson LogL given a model function and the BinPoint data...
void CalculateGradientIntegral(const GFunc &gfunc, const double *x1, const double *x2, const double *p, double *g)
ErrorType GetErrorType() const
retrieve the errortype
const double * ValuePtr(unsigned int ipoint) const
return a pointer to the value for the given fit point
unsigned setAutomaticChunking(unsigned nEvents)
Class describing the unbinned data sets (just x coordinates values) of any dimensions.
Interface (abstract class) for parametric gradient multi-dimensional functions providing in addition ...
R__EXTERN TVirtualMutex * gROOTMutex
bool CheckInfNaNValue(double &rval)
TRObject operator()(const T1 &t1) const
double SumOfError2() const
compute the total sum of the error square (sum of weight square in case of a weighted data set) ...
const double * Coords(unsigned int ipoint) const
return a pointer to the coordinates data for the given fit point
const double * GetPoint(unsigned int ipoint, double &value) const
retrieve at the same time a pointer to the coordinate data and the fit value More efficient than call...
static const double x2[5]
void GetRange(unsigned int irange, unsigned int icoord, double &xmin, double &xmax) const
get the i-th range for given coordinate.
virtual int GetSysInfo(SysInfo_t *info) const
Returns static system info, like OS type, CPU type, number of CPUs RAM size, etc into the SysInfo_t s...
OneDimMultiFunctionAdapter class to wrap a multidimensional function in one dimensional one...
const double * GetCoordComponent(unsigned int ipoint, unsigned int icoord) const
returns a single coordinate component of a point.
void EvaluatePoissonLogLGradient(const IModelFunction &func, const BinData &data, const double *x, double *grad, unsigned int &nPoints, ROOT::Fit::ExecutionPolicy executionPolicy=ROOT::Fit::ExecutionPolicy::kSerial, unsigned nChunks=0)
evaluate the Poisson LogL given a model function and the data at the point x.
double RefVolume() const
retrieve the reference volume used to normalize the data when the option bin volume is set ...
double Derivative1(double x)
Returns the first derivative of the function at point x, computed by Richardson's extrapolation metho...
double EvaluatePoissonLogL(const IModelFunction &func, const BinData &data, const double *x, int iWeight, bool extended, unsigned int &nPoints, ROOT::Fit::ExecutionPolicy executionPolicy, unsigned nChunks=0)
evaluate the Poisson LogL given a model function and the data at the point x.
unsigned int Size() const
return number of fit points
double Error(unsigned int ipoint) const
#define MATH_ERROR_MSG(loc, str)
T EvalLog(T x)
safe evaluation of log(x) with a protections against negative or zero argument to the log smooth line...
void EvaluateChi2Gradient(const IModelFunction &func, const BinData &data, const double *x, double *grad, unsigned int &nPoints, ROOT::Fit::ExecutionPolicy executionPolicy=ROOT::Fit::ExecutionPolicy::kSerial, unsigned nChunks=0)
evaluate the Chi2 gradient given a model function and the data at the point x.
This class provides a simple interface to execute the same task multiple times in parallel...
virtual unsigned int NPar() const =0
Return the number of Parameters.
double Weight(unsigned int ipoint) const
return weight
DataOptions : simple structure holding the options on how the data are filled.
R__EXTERN TSystem * gSystem
double EvaluateChi2Effective(const IModelFunction &func, const BinData &data, const double *x, unsigned int &nPoints)
evaluate the effective Chi2 given a model function and the data at the point x.
const double * BinUpEdge(unsigned int ipoint) const
return an array containing the upper edge of the bin for coordinate i In case of empty bin they could...
unsigned int Size(unsigned int icoord=0) const
return range size for coordinate icoord (starts from zero) Size == 0 indicates no range is present [-...
double EvaluateChi2Residual(const IModelFunction &func, const BinData &data, const double *x, unsigned int ipoint, double *g=0)
evaluate the residual contribution to the Chi2 given a model function and the BinPoint data and if th...
const DataOptions & Opt() const
access to options
virtual void ParameterGradient(const T *x, const double *p, T *grad) const
Evaluate the all the derivatives (gradient vector) of the function with respect to the parameters at ...
Class describing the binned data sets : vectors of x coordinates, y values and optionally error on y ...
bool HaveCoordErrors() const
flag to control if data provides error on the coordinates
void Warning(const char *location, const char *msgfmt,...)
constexpr Double_t E()
Base of natural log: .
ROOT::Math::IParamMultiFunction IModelFunction
double CorrectValue(double rval)
virtual void SetParameters(const double *p)=0
Set the parameter values.
static const double x1[5]
A pseudo container class which is a generator of indices.
TFitResultPtr Fit(FitObject *h1, TF1 *f1, Foption_t &option, const ROOT::Math::MinimizerOptions &moption, const char *goption, ROOT::Fit::DataRange &range)
double EvaluatePdf(const IModelFunction &func, const UnBinData &data, const double *x, unsigned int ipoint, double *g=0)
evaluate the pdf contribution to the LogL given a model function and the BinPoint data...
static constexpr double s
#define R__LOCKGUARD(mutex)
double EvaluateLogL(const IModelFunction &func, const UnBinData &data, const double *p, int iWeight, bool extended, unsigned int &nPoints, ROOT::Fit::ExecutionPolicy executionPolicy, unsigned nChunks=0)
evaluate the LogL given a model function and the data at the point x.
double Value(unsigned int ipoint) const
return the value for the given fit point
double SumOfContent() const
compute the total sum of the data content (sum of weights in case of weighted data set) ...
typedef void((*Func_t)())
auto MapReduce(F func, unsigned nTimes, R redfunc) -> typename std::result_of< F()>::type
This method behaves just like Map, but an additional redfunc function must be provided.
bool IsWeighted() const
return true if the data set is weighted We cannot compute ourselfs because sometimes errors are fille...
const double * GetPointError(unsigned int ipoint, double &errvalue) const
Retrieve the errors on the point (coordinate and value) for the given fit point It must be called onl...
void SetCoord(int icoord)
double InvError(unsigned int ipoint) const
Return the inverse of error on the value for the given fit point useful when error in the coordinates...
unsigned int NDim() const
return coordinate data dimension
const DataRange & Range() const
access to range
double Integral(const double *x1, const double *x2)
User class for calculating the derivatives of a function.
void Error(ErrorHandler_t func, int code, const char *va_(fmt),...)
Write error message and call a handler, if required.
virtual unsigned int NDim() const =0
Retrieve the dimension of the function.