53 template<
class GradFunc = IGradModelFunction>
54 struct ParamDerivFunc {
55 ParamDerivFunc(
const GradFunc &
f) : fFunc(
f), fIpar(0) {}
56 void SetDerivComponent(
unsigned int ipar) { fIpar = ipar; }
57 double operator() (
const double *
x,
const double *p)
const {
58 return fFunc.ParameterDerivative(
x, p, fIpar );
60 unsigned int NDim()
const {
return fFunc.NDim(); }
61 const GradFunc & fFunc;
67 class SimpleGradientCalculator {
77 SimpleGradientCalculator(
int gdim,
const IModelFunction & func,
double eps = 2.E-8,
int istrat = 1) :
83 fVec(std::vector<double>(gdim) )
88 double DoParameterDerivative(
const double *
x,
const double *p,
double f0,
int k)
const {
90 double h = std::max( fEps* std::abs(p0), 8.0*fPrecision*(std::abs(p0) + fPrecision) );
95 double f1 = fFunc(
x, &fVec.front() );
98 double f2 = fFunc(
x, &fVec.front() );
99 deriv = 0.5 * ( f2 -
f1 )/
h;
102 deriv = (
f1 - f0 )/
h;
108 unsigned int NDim()
const {
112 unsigned int NPar()
const {
116 double ParameterDerivative(
const double *
x,
const double *p,
int ipar)
const {
118 std::copy(p, p+fN, fVec.begin());
119 double f0 = fFunc(
x, p);
120 return DoParameterDerivative(
x,p,f0,ipar);
124 void ParameterGradient(
const double *
x,
const double * p,
double f0,
double *
g) {
126 std::copy(p, p+fN, fVec.begin());
127 for (
unsigned int k = 0; k < fN; ++k) {
128 g[k] = DoParameterDerivative(
x,p,f0,k);
133 void Gradient(
const double *
x,
const double * p,
double f0,
double *
g) {
135 std::copy(
x,
x+fN, fVec.begin());
136 for (
unsigned int k = 0; k < fN; ++k) {
138 double h = std::max( fEps* std::abs(x0), 8.0*fPrecision*(std::abs(x0) + fPrecision) );
142 double f1 = fFunc( &fVec.front(), p );
145 double f2 = fFunc( &fVec.front(), p );
146 g[k] = 0.5 * ( f2 -
f1 )/
h;
149 g[k] = (
f1 - f0 )/
h;
162 mutable std::vector<double> fVec;
169 if (rval > - std::numeric_limits<double>::max() && rval < std::numeric_limits<double>::max() )
173 return -std::numeric_limits<double>::max();
176 return + std::numeric_limits<double>::max();
183 if (rval > - std::numeric_limits<double>::max() && rval < std::numeric_limits<double>::max() )
187 rval = -std::numeric_limits<double>::max();
192 rval = + std::numeric_limits<double>::max();
201 template <
class GFunc>
203 const double *
x1,
const double *
x2,
const double * p,
double *
g) {
206 ParamDerivFunc<GFunc> pfunc( gfunc);
209 unsigned int npar = gfunc.NPar();
210 for (
unsigned int k = 0; k < npar; ++k ) {
211 pfunc.SetDerivComponent(k);
212 g[k] = igDerEval(
x1,
x2 );
234 unsigned int n = data.
Size();
252 std::cout <<
"\n\nFit data size = " <<
n << std::endl;
253 std::cout <<
"evaluate chi2 using function " << &func <<
" " << p << std::endl;
254 std::cout <<
"use empty bins " << fitOpt.
fUseEmpty << std::endl;
255 std::cout <<
"use integral " << fitOpt.
fIntegral << std::endl;
256 std::cout <<
"use all error=1 " << fitOpt.
fErrors1 << std::endl;
257 if (isWeighted) std::cout <<
"Weighted data set - sumw = " << data.
SumOfContent() <<
" sumw2 = " << data.
SumOfError2() << std::endl;
270 double maxResValue = std::numeric_limits<double>::max() /
n;
271 double wrefVolume = 1.0;
278 auto mapFunction = [&](
const unsigned i){
284 const auto y = data.
Value(i);
289 const double *
x =
nullptr;
290 std::vector<double> xc;
291 double binVolume = 1.0;
293 unsigned int ndim = data.
NDim();
295 xc.resize(data.
NDim());
296 for (
unsigned int j = 0; j < ndim; ++j) {
298 binVolume *= std::abs(
x2[j]- xx);
299 xc[j] = 0.5*(
x2[j]+ xx);
303 binVolume *= wrefVolume;
304 }
else if(data.
NDim() > 1) {
305 xc.resize(data.
NDim());
307 for (
unsigned int j = 1; j < data.
NDim(); ++j)
315 if (!useBinIntegral) {
319 fval = func (
x, p );
328 if (useBinVolume) fval *= binVolume;
332 double invWeight = 1.0;
345 double invError2 = (fval > 0) ? invWeight / fval : 0.0;
352 std::cout <<
x[0] <<
" " <<
y <<
" " << 1./invError <<
" params : ";
353 for (
unsigned int ipar = 0; ipar < func.
NPar(); ++ipar)
354 std::cout << p[ipar] <<
"\t";
355 std::cout <<
"\tfval = " << fval <<
" bin volume " << binVolume <<
" ref " << wrefVolume << std::endl;
361 double tmp = (
y -fval )* invError;
362 double resval = tmp * tmp;
366 if ( resval < maxResValue )
377 auto redFunction = [](
const std::vector<double> & objs){
378 return std::accumulate(objs.begin(), objs.end(),
double{});
385 Warning(
"FitUtil::EvaluateChi2",
"Multithread execution policy requires IMT, which is disabled. Changing "
386 "to ROOT::Fit::ExecutionPolicy::kSerial.");
393 for (
unsigned int i=0; i<
n; ++i) {
394 res += mapFunction(i);
406 Error(
"FitUtil::EvaluateChi2",
"Execution policy unknown. Avalaible choices:\n ROOT::Fit::ExecutionPolicy::kSerial (default)\n ROOT::Fit::ExecutionPolicy::kMultithread (requires IMT)\n");
421 unsigned int n = data.
Size();
424 std::cout <<
"\n\nFit data size = " <<
n << std::endl;
425 std::cout <<
"evaluate effective chi2 using function " << &func <<
" " << p << std::endl;
436 unsigned int ndim = func.
NDim();
441 double maxResValue = std::numeric_limits<double>::max() /
n;
445 for (
unsigned int i = 0; i <
n; ++ i) {
451 double fval = func(
x, p );
453 double delta_y_func =
y - fval;
457 const double *
ex = 0;
461 double eylow, eyhigh = 0;
463 if ( delta_y_func < 0)
471 while ( j < ndim &&
ex[j] == 0.) { j++; }
478 double kPrecision = 1.E-8;
479 for (
unsigned int icoord = 0; icoord < ndim; ++icoord) {
481 if (
ex[icoord] > 0) {
485 double x0=
x[icoord];
486 double h = std::max( kEps* std::abs(
ex[icoord]), 8.0*kPrecision*(std::abs(x0) + kPrecision) );
488 double edx =
ex[icoord] * deriv;
491 std::cout <<
"error for coord " << icoord <<
" = " <<
ex[icoord] <<
" deriv " << deriv << std::endl;
496 double w2 = (e2 > 0) ? 1.0/e2 : 0;
497 double resval = w2 * (
y - fval ) * (
y - fval);
500 std::cout <<
x[0] <<
" " <<
y <<
" ex " <<
ex[0] <<
" ey " <<
ey <<
" params : ";
501 for (
unsigned int ipar = 0; ipar < func.
NPar(); ++ipar)
502 std::cout << p[ipar] <<
"\t";
503 std::cout <<
"\tfval = " << fval <<
"\tresval = " << resval << std::endl;
508 if ( resval < maxResValue )
521 std::cout <<
"chi2 = " << chi2 <<
" n = " << nPoints << std::endl;
538 MATH_ERROR_MSG(
"FitUtil::EvaluateChi2Residual",
"Error on the coordinates are not used in calculating Chi2 residual");
545 double y, invError = 0;
556 unsigned int ndim = data.
NDim();
557 double binVolume = 1.0;
558 const double *
x2 = 0;
559 if (useBinVolume || useBinIntegral)
x2 = data.
BinUpEdge(i);
564 xc =
new double[ndim];
565 for (
unsigned int j = 0; j < ndim; ++j) {
566 binVolume *= std::abs(
x2[j]-
x1[j] );
567 xc[j] = 0.5*(
x2[j]+
x1[j]);
573 const double *
x = (useBinVolume) ? xc :
x1;
575 if (!useBinIntegral) {
576 fval = func (
x, p );
581 fval = igEval(
x1,
x2) ;
584 if (useBinVolume) fval *= binVolume;
595 double invError2 = (fval > 0) ? 1.0 / fval : 0.0;
600 double resval = (
y -fval )* invError;
608 unsigned int npar = func.
NPar();
613 if (!useBinIntegral ) {
622 SimpleGradientCalculator gc( npar, func);
623 if (!useBinIntegral )
624 gc.ParameterGradient(
x, p, fval,
g);
631 for (
unsigned int k = 0; k < npar; ++k) {
633 if (useBinVolume)
g[k] *= binVolume;
637 if (useBinVolume)
delete [] xc;
654 "Error on the coordinates are not used in calculating Chi2 gradient");
659 assert(fg !=
nullptr);
664 std::cout <<
"\n\nFit data size = " <<
n << std::endl;
665 std::cout <<
"evaluate chi2 using function gradient " << &func <<
" " << p << std::endl;
672 double wrefVolume = 1.0;
684 unsigned int npar = func.
NPar();
685 unsigned initialNPoints = data.
Size();
687 std::vector<bool> isPointRejected(initialNPoints);
689 auto mapFunction = [&](
const unsigned int i) {
691 std::vector<double> gradFunc(npar);
692 std::vector<double> pointContribution(npar);
695 const auto y = data.
Value(i);
696 auto invError = data.
Error(i);
698 invError = (invError != 0.0) ? 1.0 / invError : 1;
702 const double *
x =
nullptr;
703 std::vector<double> xc;
705 unsigned int ndim = data.
NDim();
706 double binVolume = 1;
711 for (
unsigned int j = 0; j < ndim; ++j) {
713 binVolume *= std::abs(
x2[j] - x1_j);
714 xc[j] = 0.5 * (
x2[j] + x1_j);
720 binVolume *= wrefVolume;
721 }
else if (ndim > 1) {
724 for (
unsigned int j = 1; j < ndim; ++j)
731 if (!useBinIntegral) {
738 fval = igEval(
x,
x2);
745 std::cout <<
x[0] <<
" " <<
y <<
" " << 1. / invError <<
" params : ";
746 for (
unsigned int ipar = 0; ipar < npar; ++ipar)
747 std::cout << p[ipar] <<
"\t";
748 std::cout <<
"\tfval = " << fval << std::endl;
751 isPointRejected[i] =
true;
753 return pointContribution;
757 unsigned int ipar = 0;
758 for (; ipar < npar; ++ipar) {
762 gradFunc[ipar] *= binVolume;
766 double dfval = gradFunc[ipar];
772 pointContribution[ipar] = -2.0 * (
y - fval) * invError * invError * gradFunc[ipar];
777 isPointRejected[i] =
true;
780 return pointContribution;
784 auto redFunction = [&](
const std::vector<std::vector<double>> &pointContributions) {
785 std::vector<double> result(npar);
787 for (
auto const &pointContribution : pointContributions) {
788 for (
unsigned int parameterIndex = 0; parameterIndex < npar; parameterIndex++)
789 result[parameterIndex] += pointContribution[parameterIndex];
795 std::vector<double>
g(npar);
800 Warning(
"FitUtil::EvaluateChi2Gradient",
"Multithread execution policy requires IMT, which is disabled. Changing "
801 "to ROOT::Fit::ExecutionPolicy::kSerial.");
807 std::vector<std::vector<double>> allGradients(initialNPoints);
808 for (
unsigned int i = 0; i < initialNPoints; ++i) {
809 allGradients[i] = mapFunction(i);
811 g = redFunction(allGradients);
825 Error(
"FitUtil::EvaluateChi2Gradient",
826 "Execution policy unknown. Avalaible choices:\n 0: Serial (default)\n 1: MultiThread (requires IMT)\n");
835 nPoints = initialNPoints;
837 if (std::any_of(isPointRejected.begin(), isPointRejected.end(), [](
bool point) { return point; })) {
838 unsigned nRejected = std::accumulate(isPointRejected.begin(), isPointRejected.end(), 0);
839 assert(nRejected <= initialNPoints);
840 nPoints = initialNPoints - nRejected;
844 "Error - too many points rejected for overflow in gradient calculation");
848 std::copy(
g.begin(),
g.end(), grad);
868 const double *
x = data.
Coords(i);
869 double fval = func (
x, p );
872 if (
g == 0)
return logPdf;
884 SimpleGradientCalculator gc(func.
NPar(), func);
885 gc.ParameterGradient(
x, p, fval,
g );
888 for (
unsigned int ipar = 0; ipar < func.
NPar(); ++ipar) {
893 std::cout <<
x[i] <<
"\t";
894 std::cout <<
"\tpar = [ " << func.
NPar() <<
" ] = ";
895 for (
unsigned int ipar = 0; ipar < func.
NPar(); ++ipar)
896 std::cout << p[ipar] <<
"\t";
897 std::cout <<
"\tfval = " << fval;
898 std::cout <<
"\tgrad = [ ";
899 for (
unsigned int ipar = 0; ipar < func.
NPar(); ++ipar)
900 std::cout <<
g[ipar] <<
"\t";
901 std::cout <<
" ] " << std::endl;
909 int iWeight,
bool extended,
unsigned int &nPoints,
914 unsigned int n = data.
Size();
918 bool normalizeFunc =
false;
922 (
const_cast<IModelFunctionTempl<double> &
>(func)).SetParameters(p);
925 nPoints = data.
Size();
930 if (!normalizeFunc) {
931 if (data.
NDim() == 1) {
936 std::vector<double>
x(data.
NDim());
937 for (
unsigned int j = 0; j < data.
NDim(); ++j)
947 std::vector<double>
xmin(data.
NDim());
948 std::vector<double>
xmax(data.
NDim());
949 IntegralEvaluator<> igEval(func, p,
true);
953 for (
unsigned int ir = 0; ir < data.
Range().
Size(); ++ir) {
955 norm += igEval.Integral(
xmin.data(),
xmax.data());
961 if (func(
xmin.data(), p) != 0 || func(
xmax.data(), p) != 0) {
963 "A range has not been set and the function is not zero at +/- inf");
966 norm = igEval.Integral(&
xmin[0], &
xmax[0]);
972 auto mapFunction = [&](
const unsigned i) {
977 if (data.
NDim() > 1) {
978 std::vector<double>
x(data.
NDim());
979 for (
unsigned int j = 0; j < data.
NDim(); ++j)
982 fval = func(
x.data());
984 fval = func(
x.data(), p);
998 fval = fval * (1 / norm);
1003 double weight = data.
Weight(i);
1010 W2 = weight * weight;
1014 return LikelihoodAux<double>(logval, W, W2);
1025 auto redFunction = [](
const std::vector<LikelihoodAux<double>> & objs){
1026 auto l0 = LikelihoodAux<double>(0.0,0.0,0.0);
1027 for (
auto &
l : objs ) {
1037 Warning(
"FitUtil::EvaluateLogL",
"Multithread execution policy requires IMT, which is disabled. Changing "
1038 "to ROOT::Fit::ExecutionPolicy::kSerial.");
1047 for (
unsigned int i=0; i<
n; ++i) {
1048 auto resArray = mapFunction(i);
1049 logl+=resArray.logvalue;
1050 sumW+=resArray.weight;
1051 sumW2+=resArray.weight2;
1058 logl=resArray.logvalue;
1059 sumW=resArray.weight;
1060 sumW2=resArray.weight2;
1066 Error(
"FitUtil::EvaluateLogL",
"Execution policy unknown. Avalaible choices:\n ROOT::Fit::ExecutionPolicy::kSerial (default)\n ROOT::Fit::ExecutionPolicy::kMultithread (requires IMT)\n");
1071 double extendedTerm = 0;
1075 if (!normalizeFunc) {
1076 IntegralEvaluator<> igEval( func, p,
true);
1077 std::vector<double>
xmin(data.
NDim());
1078 std::vector<double>
xmax(data.
NDim());
1083 for (
unsigned int ir = 0; ir < data.
Range().
Size(); ++ir) {
1085 nuTot += igEval.Integral(
xmin.data(),
xmax.data());
1091 if (func(
xmin.data(), p) != 0 || func(
xmax.data(), p) != 0) {
1092 MATH_ERROR_MSG(
"FitUtil::EvaluateLogLikelihood",
"A range has not been set and the function is not zero at +/- inf");
1095 nuTot = igEval.Integral(&
xmin[0],&
xmax[0]);
1101 extendedTerm = - nuTot;
1105 extendedTerm = - (sumW2 / sumW) * nuTot;
1114 logl += extendedTerm;
1119 std::cout <<
"Evaluated log L for parameters (";
1120 for (
unsigned int ip = 0; ip < func.NPar(); ++ip)
1121 std::cout <<
" " << p[ip];
1122 std::cout <<
") fval = " << -logl << std::endl;
1134 assert(fg !=
nullptr);
1138 unsigned int npar = func.
NPar();
1139 unsigned initialNPoints = data.
Size();
1144 std::cout <<
"\n===> Evaluate Gradient for parameters ";
1145 for (
unsigned int ip = 0; ip < npar; ++ip)
1146 std::cout <<
" " << p[ip];
1150 const double kdmax1 =
std::sqrt(std::numeric_limits<double>::max());
1151 const double kdmax2 = std::numeric_limits<double>::max() / (4 * initialNPoints);
1153 auto mapFunction = [&](
const unsigned int i) {
1154 std::vector<double> gradFunc(npar);
1155 std::vector<double> pointContribution(npar);
1158 const double *
x =
nullptr;
1159 std::vector<double> xc;
1160 if (data.
NDim() > 1) {
1161 xc.resize(data.
NDim() );
1162 for (
unsigned int j = 0; j < data.
NDim(); ++j)
1169 double fval = func(
x, p);
1175 if (i < 5 || (i > data.
Size()-5) ) {
1176 if (data.
NDim() > 1) std::cout << i <<
" x " <<
x[0] <<
" y " <<
x[1] <<
" func " << fval
1177 <<
" gradient " << gradFunc[0] <<
" " << gradFunc[1] <<
" " << gradFunc[3] << std::endl;
1178 else std::cout << i <<
" x " <<
x[0] <<
" gradient " << gradFunc[0] <<
" " << gradFunc[1] <<
" " << gradFunc[3] << std::endl;
1183 for (
unsigned int kpar = 0; kpar < npar; ++kpar) {
1185 pointContribution[kpar] = -1. / fval * gradFunc[kpar];
1186 else if (gradFunc[kpar] != 0) {
1187 double gg = kdmax1 * gradFunc[kpar];
1189 gg = std::min(gg, kdmax2);
1191 gg = std::max(gg, -kdmax2);
1192 pointContribution[kpar] = -gg;
1197 return pointContribution;
1201 auto redFunction = [&](
const std::vector<std::vector<double>> &pointContributions) {
1202 std::vector<double> result(npar);
1204 for (
auto const &pointContribution : pointContributions) {
1205 for (
unsigned int parameterIndex = 0; parameterIndex < npar; parameterIndex++)
1206 result[parameterIndex] += pointContribution[parameterIndex];
1212 std::vector<double>
g(npar);
1217 Warning(
"FitUtil::EvaluateLogLGradient",
"Multithread execution policy requires IMT, which is disabled. Changing "
1218 "to ROOT::Fit::ExecutionPolicy::kSerial.");
1224 std::vector<std::vector<double>> allGradients(initialNPoints);
1225 for (
unsigned int i = 0; i < initialNPoints; ++i) {
1226 allGradients[i] = mapFunction(i);
1228 g = redFunction(allGradients);
1243 Error(
"FitUtil::EvaluateLogLGradient",
"Execution policy unknown. Avalaible choices:\n "
1244 "ROOT::Fit::ExecutionPolicy::kSerial (default)\n "
1245 "ROOT::Fit::ExecutionPolicy::kMultithread (requires IMT)\n");
1254 std::copy(
g.begin(),
g.end(), grad);
1257 std::cout <<
"FitUtil.cxx : Final gradient ";
1258 for (
unsigned int param = 0; param < npar; param++) {
1259 std::cout <<
" " << grad[param];
1280 const double *
x2 = 0;
1282 double binVolume = 1;
1283 std::vector<double> xc;
1285 unsigned int ndim = data.
NDim();
1288 for (
unsigned int j = 0; j < ndim; ++j) {
1289 binVolume *= std::abs(
x2[j]-
x1[j] );
1290 xc[j] = 0.5*(
x2[j]+
x1[j]);
1296 const double *
x = (useBinVolume) ? &xc.front() :
x1;
1298 if (!useBinIntegral ) {
1299 fval = func (
x, p );
1304 fval = igEval(
x1,
x2 ) ;
1306 if (useBinVolume) fval *= binVolume;
1309 fval = std::max(fval, 0.0);
1310 double logPdf = - fval;
1320 if (
g == 0)
return logPdf;
1322 unsigned int npar = func.
NPar();
1328 if (!useBinIntegral )
1337 SimpleGradientCalculator gc(func.
NPar(), func);
1338 if (!useBinIntegral )
1339 gc.ParameterGradient(
x, p, fval,
g);
1346 for (
unsigned int k = 0; k < npar; ++k) {
1348 if (useBinVolume)
g[k] *= binVolume;
1352 g[k] *= (
y/fval - 1.) ;
1354 const double kdmax1 =
std::sqrt( std::numeric_limits<double>::max() );
1363 std::cout <<
"x = " <<
x[0] <<
" logPdf = " << logPdf <<
" grad";
1364 for (
unsigned int ipar = 0; ipar < npar; ++ipar)
1365 std::cout <<
g[ipar] <<
"\t";
1366 std::cout << std::endl;
1394 unsigned int n = data.
Size();
1396#ifdef USE_PARAMCACHE
1400 nPoints = data.
Size();
1407 bool useW2 = (iWeight == 2);
1410 double wrefVolume = 1.0;
1417 std::cout <<
"Evaluate PoissonLogL for params = [ ";
1418 for (
unsigned int j = 0; j < func.
NPar(); ++j) std::cout << p[j] <<
" , ";
1419 std::cout <<
"] - data size = " <<
n <<
" useBinIntegral " << useBinIntegral <<
" useBinVolume "
1420 << useBinVolume <<
" useW2 " << useW2 <<
" wrefVolume = " << wrefVolume << std::endl;
1429#ifdef USE_PARAMCACHE
1435 auto mapFunction = [&](
const unsigned i) {
1439 const double *
x =
nullptr;
1440 std::vector<double> xc;
1442 double binVolume = 1.0;
1445 unsigned int ndim = data.
NDim();
1447 xc.resize(data.
NDim());
1448 for (
unsigned int j = 0; j < ndim; ++j) {
1450 binVolume *= std::abs(
x2[j] - xx);
1451 xc[j] = 0.5 * (
x2[j] + xx);
1455 binVolume *= wrefVolume;
1456 }
else if (data.
NDim() > 1) {
1457 xc.resize(data.
NDim());
1459 for (
unsigned int j = 1; j < data.
NDim(); ++j) {
1467 if (!useBinIntegral) {
1468#ifdef USE_PARAMCACHE
1478 if (useBinVolume) fval *= binVolume;
1484 if (i % NSAMPLE == 0) {
1485 std::cout <<
"evt " << i <<
" x = [ ";
1486 for (
unsigned int j = 0; j < func.
NDim(); ++j) std::cout <<
x[j] <<
" , ";
1489 std::cout <<
"x2 = [ ";
1490 for (
unsigned int j = 0; j < func.
NDim(); ++j) std::cout << data.
BinUpEdge(i)[j] <<
" , ";
1493 std::cout <<
" y = " <<
y <<
" fval = " << fval << std::endl;
1500 fval = std::max(fval, 0.0);
1502 double nloglike = 0;
1511 double weight = 1.0;
1513 double error = data.
Error(i);
1514 weight = (error * error) /
y;
1522 nloglike += weight * ( fval -
y);
1530 if (extended) nloglike = fval -
y;
1539 std::cout <<
" nll = " << nloglike << std::endl;
1546 auto redFunction = [](
const std::vector<double> &objs) {
1547 return std::accumulate(objs.begin(), objs.end(),
double{});
1554 Warning(
"FitUtil::EvaluatePoissonLogL",
"Multithread execution policy requires IMT, which is disabled. Changing "
1555 "to ROOT::Fit::ExecutionPolicy::kSerial.");
1562 for (
unsigned int i = 0; i <
n; ++i) {
1563 res += mapFunction(i);
1575 Error(
"FitUtil::EvaluatePoissonLogL",
1576 "Execution policy unknown. Avalaible choices:\n ROOT::Fit::ExecutionPolicy::kSerial (default)\n ROOT::Fit::ExecutionPolicy::kMultithread (requires IMT)\n");
1580 std::cout <<
"Loglikelihood = " << res << std::endl;
1592 assert(fg !=
nullptr);
1596#ifdef USE_PARAMCACHE
1604 double wrefVolume = 1.0;
1616 unsigned int npar = func.
NPar();
1617 unsigned initialNPoints = data.
Size();
1619 auto mapFunction = [&](
const unsigned int i) {
1621 std::vector<double> gradFunc(npar);
1622 std::vector<double> pointContribution(npar);
1625 const auto y = data.
Value(i);
1626 auto invError = data.
Error(i);
1628 invError = (invError != 0.0) ? 1.0 / invError : 1;
1632 const double *
x =
nullptr;
1633 std::vector<double> xc;
1635 unsigned ndim = data.
NDim();
1636 double binVolume = 1.0;
1641 for (
unsigned int j = 0; j < ndim; ++j) {
1643 binVolume *= std::abs(
x2[j] - x1_j);
1644 xc[j] = 0.5 * (
x2[j] + x1_j);
1650 binVolume *= wrefVolume;
1651 }
else if (ndim > 1) {
1654 for (
unsigned int j = 1; j < ndim; ++j)
1661 if (!useBinIntegral) {
1668 fval = igEval(
x,
x2);
1677 if (i < 5 || (i > data.
Size()-5) ) {
1678 if (data.
NDim() > 1) std::cout << i <<
" x " <<
x[0] <<
" y " <<
x[1] <<
" func " << fval
1679 <<
" gradient " << gradFunc[0] <<
" " << gradFunc[1] <<
" " << gradFunc[3] << std::endl;
1680 else std::cout << i <<
" x " <<
x[0] <<
" gradient " << gradFunc[0] <<
" " << gradFunc[1] <<
" " << gradFunc[3] << std::endl;
1686 for (
unsigned int ipar = 0; ipar < npar; ++ipar) {
1690 gradFunc[ipar] *= binVolume;
1694 pointContribution[ipar] = gradFunc[ipar] * (1. -
y / fval);
1695 else if (gradFunc[ipar] != 0) {
1696 const double kdmax1 =
std::sqrt(std::numeric_limits<double>::max());
1697 const double kdmax2 = std::numeric_limits<double>::max() / (4 * initialNPoints);
1698 double gg = kdmax1 * gradFunc[ipar];
1700 gg = std::min(gg, kdmax2);
1702 gg = std::max(gg, -kdmax2);
1703 pointContribution[ipar] = -gg;
1708 return pointContribution;
1712 auto redFunction = [&](
const std::vector<std::vector<double>> &pointContributions) {
1713 std::vector<double> result(npar);
1715 for (
auto const &pointContribution : pointContributions) {
1716 for (
unsigned int parameterIndex = 0; parameterIndex < npar; parameterIndex++)
1717 result[parameterIndex] += pointContribution[parameterIndex];
1723 std::vector<double>
g(npar);
1728 Warning(
"FitUtil::EvaluatePoissonLogLGradient",
1729 "Multithread execution policy requires IMT, which is disabled. Changing "
1730 "to ROOT::Fit::ExecutionPolicy::kSerial.");
1736 std::vector<std::vector<double>> allGradients(initialNPoints);
1737 for (
unsigned int i = 0; i < initialNPoints; ++i) {
1738 allGradients[i] = mapFunction(i);
1740 g = redFunction(allGradients);
1755 Error(
"FitUtil::EvaluatePoissonLogLGradient",
1756 "Execution policy unknown. Avalaible choices:\n 0: Serial (default)\n 1: MultiThread (requires IMT)\n");
1765 std::copy(
g.begin(),
g.end(), grad);
1768 std::cout <<
"***** Final gradient : ";
1769 for (
unsigned int ii = 0; ii< npar; ++ii) std::cout << grad[ii] <<
" ";
1778 if (nEvents/ncpu < 1000)
return ncpu;
1779 return nEvents/1000;
#define MATH_ERROR_MSG(loc, str)
static const double x2[5]
static const double x1[5]
void Error(const char *location, const char *msgfmt,...)
void Warning(const char *location, const char *msgfmt,...)
TRObject operator()(const T1 &t1) const
R__EXTERN TVirtualMutex * gROOTMutex
typedef void((*Func_t)())
#define R__LOCKGUARD(mutex)
Class describing the binned data sets : vectors of x coordinates, y values and optionally error on y ...
double SumOfContent() const
compute the total sum of the data content (sum of weights in case of weighted data set)
double RefVolume() const
retrieve the reference volume used to normalize the data when the option bin volume is set
bool HasBinEdges() const
query if the data store the bin edges instead of the center
const double * GetPointError(unsigned int ipoint, double &errvalue) const
Retrieve the errors on the point (coordinate and value) for the given fit point It must be called onl...
const double * GetPoint(unsigned int ipoint, double &value) const
retrieve at the same time a pointer to the coordinate data and the fit value More efficient than call...
const double * BinUpEdge(unsigned int ipoint) const
return an array containing the upper edge of the bin for coordinate i In case of empty bin they could...
double InvError(unsigned int ipoint) const
Return the inverse of error on the value for the given fit point useful when error in the coordinates...
bool IsWeighted() const
return true if the data set is weighted We cannot compute ourselfs because sometimes errors are fille...
double Value(unsigned int ipoint) const
return the value for the given fit point
ErrorType GetErrorType() const
retrieve the errortype
bool HaveCoordErrors() const
flag to control if data provides error on the coordinates
const double * ValuePtr(unsigned int ipoint) const
return a pointer to the value for the given fit point
double Error(unsigned int ipoint) const
bool HaveAsymErrors() const
flag to control if data provides asymmetric errors on the value
double SumOfError2() const
compute the total sum of the error square (sum of weight square in case of a weighted data set)
unsigned int Size(unsigned int icoord=0) const
return range size for coordinate icoord (starts from zero) Size == 0 indicates no range is present [-...
void GetRange(unsigned int irange, unsigned int icoord, double &xmin, double &xmax) const
get the i-th range for given coordinate.
unsigned int Size() const
return number of fit points
const double * GetCoordComponent(unsigned int ipoint, unsigned int icoord) const
returns a single coordinate component of a point.
unsigned int NDim() const
return coordinate data dimension
const DataOptions & Opt() const
access to options
const double * Coords(unsigned int ipoint) const
return a pointer to the coordinates data for the given fit point
const DataRange & Range() const
access to range
Class describing the unbinned data sets (just x coordinates values) of any dimensions.
double Weight(unsigned int ipoint) const
return weight
unsigned int NDim() const
return coordinate data dimension
virtual unsigned int NDim() const =0
Retrieve the dimension of the function.
virtual void SetParameters(const double *p)=0
Set the parameter values.
virtual unsigned int NPar() const =0
Return the number of Parameters.
Interface (abstract class) for parametric gradient multi-dimensional functions providing in addition ...
virtual void ParameterGradient(const T *x, const double *p, T *grad) const
Evaluate the all the derivatives (gradient vector) of the function with respect to the parameters at ...
OneDimMultiFunctionAdapter class to wrap a multidimensional function in one dimensional one.
void SetCoord(int icoord)
User class for calculating the derivatives of a function.
double Derivative1(double x)
Returns the first derivative of the function at point x, computed by Richardson's extrapolation metho...
A pseudo container class which is a generator of indices.
This class provides a simple interface to execute the same task multiple times in parallel,...
auto MapReduce(F func, unsigned nTimes, R redfunc) -> typename std::result_of< F()>::type
This method behaves just like Map, but an additional redfunc function must be provided.
Type
enumeration specifying the integration types.
TFitResultPtr Fit(FitObject *h1, TF1 *f1, Foption_t &option, const ROOT::Math::MinimizerOptions &moption, const char *goption, ROOT::Fit::DataRange &range)
double EvaluateChi2(const IModelFunction &func, const BinData &data, const double *x, unsigned int &nPoints, ROOT::Fit::ExecutionPolicy executionPolicy, unsigned nChunks=0)
Chi2 Functions.
ROOT::Math::IParamMultiFunction IModelFunction
void CalculateGradientIntegral(const GFunc &gfunc, const double *x1, const double *x2, const double *p, double *g)
double EvaluateChi2Residual(const IModelFunction &func, const BinData &data, const double *x, unsigned int ipoint, double *g=0)
evaluate the residual contribution to the Chi2 given a model function and the BinPoint data and if th...
void EvaluateLogLGradient(const IModelFunction &func, const UnBinData &data, const double *x, double *grad, unsigned int &nPoints, ROOT::Fit::ExecutionPolicy executionPolicy=ROOT::Fit::ExecutionPolicy::kSerial, unsigned nChunks=0)
evaluate the LogL gradient given a model function and the data at the point x.
double CorrectValue(double rval)
double EvaluatePoissonLogL(const IModelFunction &func, const BinData &data, const double *x, int iWeight, bool extended, unsigned int &nPoints, ROOT::Fit::ExecutionPolicy executionPolicy, unsigned nChunks=0)
evaluate the Poisson LogL given a model function and the data at the point x.
double EvaluateChi2Effective(const IModelFunction &func, const BinData &data, const double *x, unsigned int &nPoints)
evaluate the effective Chi2 given a model function and the data at the point x.
double EvaluatePoissonBinPdf(const IModelFunction &func, const BinData &data, const double *x, unsigned int ipoint, double *g=0)
evaluate the pdf contribution to the Poisson LogL given a model function and the BinPoint data.
bool CheckInfNaNValue(double &rval)
unsigned setAutomaticChunking(unsigned nEvents)
double EvaluateLogL(const IModelFunction &func, const UnBinData &data, const double *p, int iWeight, bool extended, unsigned int &nPoints, ROOT::Fit::ExecutionPolicy executionPolicy, unsigned nChunks=0)
evaluate the LogL given a model function and the data at the point x.
void EvaluatePoissonLogLGradient(const IModelFunction &func, const BinData &data, const double *x, double *grad, unsigned int &nPoints, ROOT::Fit::ExecutionPolicy executionPolicy=ROOT::Fit::ExecutionPolicy::kSerial, unsigned nChunks=0)
evaluate the Poisson LogL given a model function and the data at the point x.
double EvaluatePdf(const IModelFunction &func, const UnBinData &data, const double *x, unsigned int ipoint, double *g=0)
evaluate the pdf contribution to the LogL given a model function and the BinPoint data.
void EvaluateChi2Gradient(const IModelFunction &func, const BinData &data, const double *x, double *grad, unsigned int &nPoints, ROOT::Fit::ExecutionPolicy executionPolicy=ROOT::Fit::ExecutionPolicy::kSerial, unsigned nChunks=0)
evaluate the Chi2 gradient given a model function and the data at the point x.
T EvalLog(T x)
safe evaluation of log(x) with a protections against negative or zero argument to the log smooth line...
UInt_t GetImplicitMTPoolSize()
Returns the size of the pool used for implicit multi-threading.
DataOptions : simple structure holding the options on how the data are filled.