57 template<
class GradFunc = IGradModelFunction>
103 deriv = 0.5 * ( f2 -
f1 )/
h;
106 deriv = (
f1 - f0 )/
h;
122 std::copy(p, p+
fN,
fVec.begin());
130 std::copy(p, p+
fN,
fVec.begin());
131 for (
unsigned int k = 0; k <
fN; ++k) {
137 void Gradient(
const double *
x,
const double * p,
double f0,
double *
g) {
140 for (
unsigned int k = 0; k <
fN; ++k) {
150 g[k] = 0.5 * ( f2 -
f1 )/
h;
153 g[k] = (
f1 - f0 )/
h;
166 mutable std::vector<double>
fVec;
173 if (rval > - std::numeric_limits<double>::max() && rval < std::numeric_limits<double>::max() )
177 return -std::numeric_limits<double>::max();
180 return + std::numeric_limits<double>::max();
187 if (rval > - std::numeric_limits<double>::max() && rval < std::numeric_limits<double>::max() )
191 rval = -std::numeric_limits<double>::max();
196 rval = + std::numeric_limits<double>::max();
205 template <
class GFunc>
207 const double *x1,
const double * x2,
const double * p,
double *
g) {
213 unsigned int npar = gfunc.NPar();
214 for (
unsigned int k = 0; k < npar; ++k ) {
216 g[k] = igDerEval( x1, x2 );
238 unsigned int n = data.Size();
250 bool useBinIntegral = fitOpt.
fIntegral && data.HasBinEdges();
251 bool useBinVolume = (fitOpt.
fBinVolume && data.HasBinEdges());
255 std::cout <<
"\n\nFit data size = " <<
n << std::endl;
256 std::cout <<
"evaluate chi2 using function " << &func <<
" " << p << std::endl;
257 std::cout <<
"use empty bins " << fitOpt.
fUseEmpty << std::endl;
258 std::cout <<
"use integral " << useBinIntegral << std::endl;
259 std::cout <<
"use binvolume " << useBinVolume << std::endl;
260 std::cout <<
"use Exp Errors " << useExpErrors << std::endl;
261 std::cout <<
"use all error=1 " << fitOpt.
fErrors1 << std::endl;
262 if (isWeighted) std::cout <<
"Weighted data set - sumw = " << data.SumOfContent() <<
" sumw2 = " << data.SumOfError2() << std::endl;
275 double maxResValue = std::numeric_limits<double>::max() /
n;
276 double wrefVolume = 1.0;
283 auto mapFunction = [&](
const unsigned i){
288 const auto x1 = data.GetCoordComponent(i, 0);
289 const auto y = data.Value(i);
290 auto invError = data.InvError(i);
294 const double *
x =
nullptr;
295 std::vector<double> xc;
296 double binVolume = 1.0;
298 unsigned int ndim = data.NDim();
299 xc.resize(data.NDim());
300 for (
unsigned int j = 0; j < ndim; ++j) {
301 double xx = *data.GetCoordComponent(i, j);
302 double x2 = data.GetBinUpEdgeComponent(i, j);
303 binVolume *= std::abs(x2 - xx);
304 xc[j] = (useBinIntegral) ? xx : 0.5*(x2 + xx);
308 binVolume *= wrefVolume;
309 }
else if(data.NDim() > 1) {
312 xc.resize(data.NDim());
314 for (
unsigned int j = 1; j < data.NDim(); ++j)
315 xc[j] = *data.GetCoordComponent(i, j);
323 if (!useBinIntegral) {
327 fval = func (
x, p );
333 std::vector<double> x2(data.NDim());
334 data.GetBinUpEdgeCoordinates(i, x2.data());
335 fval = igEval(
x, x2.data());
339 if (useBinVolume) fval *= binVolume;
343 double invWeight = 1.0;
349 invWeight =
y * invError * invError;
353 invWeight = data.SumOfContent()/ data.SumOfError2();
356 double invError2 = (fval > 0) ? invWeight / fval : 0.0;
357 invError = std::sqrt(invError2);
362 std::cout <<
x[0] <<
" " <<
y <<
" " << 1./invError <<
" params : ";
363 for (
unsigned int ipar = 0; ipar < func.
NPar(); ++ipar)
364 std::cout << p[ipar] <<
"\t";
365 std::cout <<
"\tfval = " << fval <<
" bin volume " << binVolume <<
" ref " << wrefVolume << std::endl;
370 double tmp = (
y -fval )* invError;
371 double resval = tmp * tmp;
375 if ( resval < maxResValue )
386 auto redFunction = [](
const std::vector<double> & objs){
387 return std::accumulate(objs.begin(), objs.end(),
double{});
394 Warning(
"FitUtil::EvaluateChi2",
"Multithread execution policy requires IMT, which is disabled. Changing "
395 "to ROOT::EExecutionPolicy::kSequential.");
402 for (
unsigned int i=0; i<
n; ++i) {
403 res += mapFunction(i);
415 Error(
"FitUtil::EvaluateChi2",
"Execution policy unknown. Available choices:\n ROOT::EExecutionPolicy::kSequential (default)\n ROOT::EExecutionPolicy::kMultiThread (requires IMT)\n");
434 unsigned int n = data.Size();
437 std::cout <<
"\n\nFit data size = " <<
n << std::endl;
438 std::cout <<
"evaluate effective chi2 using function " << &func <<
" " << p << std::endl;
441 assert(data.HaveCoordErrors() || data.HaveAsymErrors());
449 unsigned int ndim = func.
NDim();
454 double maxResValue = std::numeric_limits<double>::max() /
n;
458 for (
unsigned int i = 0; i <
n; ++ i) {
462 const double *
x = data.GetPoint(i,
y);
464 double fval = func(
x, p );
466 double delta_y_func =
y - fval;
470 const double *
ex =
nullptr;
471 if (!data.HaveAsymErrors() )
472 ex = data.GetPointError(i,
ey);
474 double eylow, eyhigh = 0;
475 ex = data.GetPointError(i, eylow, eyhigh);
476 if ( delta_y_func < 0)
484 while ( j < ndim &&
ex[j] == 0.) { j++; }
491 double kPrecision = 1.E-8;
492 for (
unsigned int icoord = 0; icoord < ndim; ++icoord) {
494 if (
ex[icoord] > 0) {
498 double x0=
x[icoord];
499 double h = std::max(
kEps* std::abs(
ex[icoord]), 8.0*kPrecision*(std::abs(x0) + kPrecision) );
501 double edx =
ex[icoord] * deriv;
504 std::cout <<
"error for coord " << icoord <<
" = " <<
ex[icoord] <<
" deriv " << deriv << std::endl;
509 double w2 = (e2 > 0) ? 1.0/e2 : 0;
510 double resval = w2 * (
y - fval ) * (
y - fval);
513 std::cout <<
x[0] <<
" " <<
y <<
" ex " <<
ex[0] <<
" ey " <<
ey <<
" params : ";
514 for (
unsigned int ipar = 0; ipar < func.
NPar(); ++ipar)
515 std::cout << p[ipar] <<
"\t";
516 std::cout <<
"\tfval = " << fval <<
"\tresval = " << resval << std::endl;
521 if ( resval < maxResValue )
534 std::cout <<
"chi2 = " << chi2 <<
" n = " << nPoints << std::endl;
551 MATH_ERROR_MSG(
"FitUtil::EvaluateChi2Residual",
"Error on the coordinates are not used in calculating Chi2 residual");
555 double y, invError = 0;
558 bool useBinIntegral = fitOpt.
fIntegral && data.HasBinEdges();
559 bool useBinVolume = (fitOpt.
fBinVolume && data.HasBinEdges());
563 const double * x1 = data.GetPoint(i,
y, invError);
565 unsigned int ndim = data.NDim();
566 double binVolume = 1.0;
567 const double * x2 =
nullptr;
568 if (useBinVolume || useBinIntegral) x2 = data.BinUpEdge(i);
570 std::vector<double> xc;
573 if (!useBinIntegral) {
575 for (
unsigned int j = 0; j < ndim; ++j) {
576 binVolume *= std::abs( x2[j]-x1[j] );
577 xc[j] = 0.5*(x2[j]+ x1[j]);
581 if (useNormBinVolume) binVolume /= data.RefVolume();
584 const double *
x = (useBinVolume) ? xc.data() : x1;
589 double fval0 = (useBinIntegral) ? igEval( x1, x2) : func (
x, p );
593 if (useBinVolume) fval = fval0*binVolume;
599 double invWeight = 1.0;
600 if (data.IsWeighted() && !fitOpt.
fErrors1 ) {
601 invWeight =
y * invError * invError;
602 if (
y == 0) invWeight = (data.SumOfError2() > 0) ? data.SumOfContent()/ data.SumOfError2() : 1.0;
605 double invError2 = (fval > 0) ? invWeight / fval : 0.0;
606 invError = std::sqrt(invError2);
610 double resval = (
y -fval ) * invError;
618 unsigned int npar = func.
NPar();
624 if (!
h ) useFullHessian =
false;
626 if (useFullHessian && (!gfunc || useBinIntegral || (gfunc && !gfunc->
HasParameterHessian())))
627 return std::numeric_limits<double>::quiet_NaN();
631 if (!useBinIntegral ) {
633 if (useFullHessian) {
644 if (!useBinIntegral ) {
653 for (
unsigned int k = 0; k < npar; ++k) {
655 if (useBinVolume)
g[k] *= binVolume;
657 for (
unsigned int l = 0;
l <= k;
l++) {
658 unsigned int idx =
l + k * (k + 1) / 2;
659 if (useFullHessian) {
660 h[idx] *= 2.* resval * (-invError);
661 if (useBinVolume)
h[idx] *= binVolume;
667 h[idx] += 2. *
g[k]*
g[
l];
687 if (data.HaveCoordErrors()) {
689 "Error on the coordinates are not used in calculating Chi2 gradient");
693 const IGradModelFunction *fg =
dynamic_cast<const IGradModelFunction *
>(&
f);
694 assert(fg !=
nullptr);
696 const IGradModelFunction &func = *fg;
699 std::cout <<
"\n\nFit data size = " << nPoints << std::endl;
700 std::cout <<
"evaluate chi2 using function gradient " << &func <<
" " << p << std::endl;
703 const DataOptions &fitOpt = data.Opt();
704 bool useBinIntegral = fitOpt.fIntegral && data.HasBinEdges();
705 bool useBinVolume = (fitOpt.fBinVolume && data.HasBinEdges());
707 double wrefVolume = 1.0;
709 if (fitOpt.fNormBinVolume) wrefVolume /= data.RefVolume();
717 IntegralEvaluator<> igEval(func, p, useBinIntegral,igType);
719 unsigned int npar = func.NPar();
720 unsigned initialNPoints = data.Size();
722 std::vector<bool> isPointRejected(initialNPoints);
724 auto mapFunction = [&](
const unsigned int i) {
726 std::vector<double> gradFunc(npar);
727 std::vector<double> pointContribution(npar);
729 const auto x1 = data.GetCoordComponent(i, 0);
730 const auto y = data.Value(i);
731 auto invError = data.Error(i);
733 invError = (invError != 0.0) ? 1.0 / invError : 1;
737 const double *
x =
nullptr;
738 std::vector<double> xc;
740 unsigned int ndim = data.NDim();
741 double binVolume = 1;
744 for (
unsigned int j = 0; j < ndim; ++j) {
745 double x1_j = *data.GetCoordComponent(i, j);
746 double x2_j = data.GetBinUpEdgeComponent(i, j);
747 binVolume *= std::abs(x2_j - x1_j);
748 xc[j] = (useBinIntegral) ? x1_j : 0.5 * (x2_j + x1_j);
754 binVolume *= wrefVolume;
755 }
else if (ndim > 1) {
758 for (
unsigned int j = 1; j < ndim; ++j)
759 xc[j] = *data.GetCoordComponent(i, j);
765 if (!useBinIntegral) {
767 func.ParameterGradient(
x, p, &gradFunc[0]);
769 std::vector<double> x2(data.NDim());
770 data.GetBinUpEdgeCoordinates(i, x2.data());
773 fval = igEval(
x, x2.data());
780 std::cout <<
x[0] <<
" " <<
y <<
" " << 1. / invError <<
" params : ";
781 for (
unsigned int ipar = 0; ipar < npar; ++ipar)
782 std::cout << p[ipar] <<
"\t";
783 std::cout <<
"\tfval = " << fval << std::endl;
786 isPointRejected[i] =
true;
788 return pointContribution;
792 unsigned int ipar = 0;
793 for (; ipar < npar; ++ipar) {
797 gradFunc[ipar] *= binVolume;
801 double dfval = gradFunc[ipar];
807 pointContribution[ipar] = -2.0 * (
y - fval) * invError * invError * gradFunc[ipar];
812 isPointRejected[i] =
true;
815 return pointContribution;
819 auto redFunction = [&](
const std::vector<std::vector<double>> &pointContributions) {
820 std::vector<double> result(npar);
822 for (
auto const &pointContribution : pointContributions) {
823 for (
unsigned int parameterIndex = 0; parameterIndex < npar; parameterIndex++)
824 result[parameterIndex] += pointContribution[parameterIndex];
830 std::vector<double>
g(npar);
835 Warning(
"FitUtil::EvaluateChi2Gradient",
"Multithread execution policy requires IMT, which is disabled. Changing "
836 "to ROOT::EExecutionPolicy::kSequential.");
842 std::vector<std::vector<double>> allGradients(initialNPoints);
843 for (
unsigned int i = 0; i < initialNPoints; ++i) {
844 allGradients[i] = mapFunction(i);
846 g = redFunction(allGradients);
850 ROOT::TThreadExecutor pool;
852 g = pool.
MapReduce(mapFunction, ROOT::TSeq<unsigned>(0, initialNPoints), redFunction, chunks);
860 Error(
"FitUtil::EvaluateChi2Gradient",
861 "Execution policy unknown. Available choices:\n 0: Serial (default)\n 1: MultiThread (requires IMT)\n");
870 nPoints = initialNPoints;
872 if (std::any_of(isPointRejected.begin(), isPointRejected.end(), [](
bool point) { return point; })) {
873 unsigned nRejected = std::accumulate(isPointRejected.begin(), isPointRejected.end(), 0);
874 assert(nRejected <= initialNPoints);
875 nPoints = initialNPoints - nRejected;
879 "Error - too many points rejected for overflow in gradient calculation");
883 std::copy(
g.begin(),
g.end(), grad);
902 const double *
x = data.Coords(i);
903 double fval = func (
x, p );
906 if (
g ==
nullptr)
return logPdf;
923 for (
unsigned int ipar = 0; ipar < func.
NPar(); ++ipar) {
928 std::cout <<
x[i] <<
"\t";
929 std::cout <<
"\tpar = [ " << func.
NPar() <<
" ] = ";
930 for (
unsigned int ipar = 0; ipar < func.
NPar(); ++ipar)
931 std::cout << p[ipar] <<
"\t";
932 std::cout <<
"\tfval = " << fval;
933 std::cout <<
"\tgrad = [ ";
934 for (
unsigned int ipar = 0; ipar < func.
NPar(); ++ipar)
935 std::cout <<
g[ipar] <<
"\t";
936 std::cout <<
" ] " << std::endl;
944 int iWeight,
bool extended,
unsigned int &nPoints,
949 unsigned int n = data.Size();
953 bool normalizeFunc =
false;
957 (
const_cast<IModelFunctionTempl<double> &
>(func)).
SetParameters(p);
960 nPoints = data.Size();
965 if (!normalizeFunc) {
966 if (data.NDim() == 1) {
967 const double *
x = data.GetCoordComponent(0,0);
971 std::vector<double>
x(data.NDim());
972 for (
unsigned int j = 0; j < data.NDim(); ++j)
973 x[j] = *data.GetCoordComponent(0, j);
982 std::vector<double>
xmin(data.NDim());
983 std::vector<double>
xmax(data.NDim());
984 IntegralEvaluator<> igEval(func, p,
true);
986 if (data.Range().Size() > 0) {
990 norm += igEval.Integral(
xmin.data(),
xmax.data());
996 if (func(
xmin.data(), p) != 0 || func(
xmax.data(), p) != 0) {
998 "A range has not been set and the function is not zero at +/- inf");
1001 norm = igEval.Integral(&
xmin[0], &
xmax[0]);
1007 auto mapFunction = [&](
const unsigned i) {
1012 if (data.NDim() > 1) {
1013 std::vector<double>
x(data.NDim());
1014 for (
unsigned int j = 0; j < data.NDim(); ++j)
1015 x[j] = *data.GetCoordComponent(i, j);
1016#ifdef USE_PARAMCACHE
1017 fval = func(
x.data());
1019 fval = func(
x.data(), p);
1024 const auto x = data.GetCoordComponent(i, 0);
1025#ifdef USE_PARAMCACHE
1033 fval = fval * (1 / norm);
1038 double weight = data.Weight(i);
1045 W2 = weight * weight;
1049 return LikelihoodAux<double>(logval, W, W2);
1060 auto redFunction = [](
const std::vector<LikelihoodAux<double>> & objs){
1061 auto l0 = LikelihoodAux<double>(0.0,0.0,0.0);
1062 for (
auto &
l : objs ) {
1072 Warning(
"FitUtil::EvaluateLogL",
"Multithread execution policy requires IMT, which is disabled. Changing "
1073 "to ROOT::EExecutionPolicy::kSequential.");
1082 for (
unsigned int i=0; i<
n; ++i) {
1083 auto resArray = mapFunction(i);
1084 logl+=resArray.logvalue;
1085 sumW+=resArray.weight;
1086 sumW2+=resArray.weight2;
1090 ROOT::TThreadExecutor pool;
1092 auto resArray = pool.
MapReduce(mapFunction, ROOT::TSeq<unsigned>(0,
n), redFunction, chunks);
1093 logl=resArray.logvalue;
1094 sumW=resArray.weight;
1095 sumW2=resArray.weight2;
1101 Error(
"FitUtil::EvaluateLogL",
"Execution policy unknown. Available choices:\n ROOT::EExecutionPolicy::kSequential (default)\n ROOT::EExecutionPolicy::kMultiThread (requires IMT)\n");
1106 double extendedTerm = 0;
1110 if (!normalizeFunc) {
1111 IntegralEvaluator<> igEval( func, p,
true);
1112 std::vector<double>
xmin(data.NDim());
1113 std::vector<double>
xmax(data.NDim());
1116 if (data.Range().Size() > 0 ) {
1120 nuTot += igEval.Integral(
xmin.data(),
xmax.data());
1126 if (func(
xmin.data(), p) != 0 || func(
xmax.data(), p) != 0) {
1127 MATH_ERROR_MSG(
"FitUtil::EvaluateLogLikelihood",
"A range has not been set and the function is not zero at +/- inf");
1130 nuTot = igEval.Integral(&
xmin[0],&
xmax[0]);
1136 extendedTerm = - nuTot;
1140 extendedTerm = - (sumW2 / sumW) * nuTot;
1149 logl += extendedTerm;
1154 std::cout <<
"Evaluated log L for parameters (";
1155 for (
unsigned int ip = 0; ip < func.NPar(); ++ip)
1156 std::cout <<
" " << p[ip];
1157 std::cout <<
") fval = " << -logl << std::endl;
1169 assert(fg !=
nullptr);
1173 unsigned int npar = func.
NPar();
1174 unsigned initialNPoints = data.Size();
1179 std::cout <<
"\n===> Evaluate Gradient for parameters ";
1180 for (
unsigned int ip = 0; ip < npar; ++ip)
1181 std::cout <<
" " << p[ip];
1185 const double kdmax1 = std::sqrt(std::numeric_limits<double>::max());
1186 const double kdmax2 = std::numeric_limits<double>::max() / (4 * initialNPoints);
1188 auto mapFunction = [&](
const unsigned int i) {
1189 std::vector<double> gradFunc(npar);
1190 std::vector<double> pointContribution(npar);
1193 const double *
x =
nullptr;
1194 std::vector<double> xc;
1195 if (data.NDim() > 1) {
1196 xc.resize(data.NDim() );
1197 for (
unsigned int j = 0; j < data.NDim(); ++j)
1198 xc[j] = *data.GetCoordComponent(i, j);
1201 x = data.GetCoordComponent(i, 0);
1204 double fval = func(
x, p);
1205 func.ParameterGradient(
x, p, &gradFunc[0]);
1210 if (i < 5 || (i > data.Size()-5) ) {
1212 <<
" gradient " << gradFunc[0] <<
" " << gradFunc[1] <<
" " << gradFunc[3] << std::endl;
1213 else std::cout << i <<
" x " <<
x[0] <<
" gradient " << gradFunc[0] <<
" " << gradFunc[1] <<
" " << gradFunc[3] << std::endl;
1218 for (
unsigned int kpar = 0; kpar < npar; ++kpar) {
1220 pointContribution[kpar] = -1. / fval * gradFunc[kpar];
1221 else if (gradFunc[kpar] != 0) {
1222 double gg = kdmax1 * gradFunc[kpar];
1224 gg = std::min(gg, kdmax2);
1226 gg = std::max(gg, -kdmax2);
1227 pointContribution[kpar] = -gg;
1232 return pointContribution;
1236 auto redFunction = [&](
const std::vector<std::vector<double>> &pointContributions) {
1237 std::vector<double> result(npar);
1239 for (
auto const &pointContribution : pointContributions) {
1240 for (
unsigned int parameterIndex = 0; parameterIndex < npar; parameterIndex++)
1241 result[parameterIndex] += pointContribution[parameterIndex];
1247 std::vector<double>
g(npar);
1252 Warning(
"FitUtil::EvaluateLogLGradient",
"Multithread execution policy requires IMT, which is disabled. Changing "
1253 "to ROOT::EExecutionPolicy::kSequential.");
1259 std::vector<std::vector<double>> allGradients(initialNPoints);
1260 for (
unsigned int i = 0; i < initialNPoints; ++i) {
1261 allGradients[i] = mapFunction(i);
1263 g = redFunction(allGradients);
1267 ROOT::TThreadExecutor pool;
1269 g = pool.
MapReduce(mapFunction, ROOT::TSeq<unsigned>(0, initialNPoints), redFunction, chunks);
1273 Error(
"FitUtil::EvaluateLogLGradient",
"Execution policy unknown. Available choices:\n "
1274 "ROOT::EExecutionPolicy::kSequential (default)\n "
1275 "ROOT::EExecutionPolicy::kMultiThread (requires IMT)\n");
1284 std::copy(
g.begin(),
g.end(), grad);
1285 nPoints = data.Size();
1288 std::cout <<
"FitUtil.cxx : Final gradient ";
1289 for (
unsigned int param = 0; param < npar; param++) {
1290 std::cout <<
" " << grad[param];
1303 const double * x1 = data.GetPoint(i,
y);
1306 bool useBinIntegral = fitOpt.
fIntegral && data.HasBinEdges();
1307 bool useBinVolume = (fitOpt.
fBinVolume && data.HasBinEdges());
1310 const double * x2 =
nullptr;
1312 double binVolume = 1;
1313 std::vector<double> xc;
1315 unsigned int ndim = data.NDim();
1317 for (
unsigned int j = 0; j < ndim; ++j) {
1318 double x2j = data.GetBinUpEdgeComponent(i, j);
1319 binVolume *= std::abs( x2j-x1[j] );
1320 xc[j] = 0.5*(x2j+ x1[j]);
1323 binVolume /= data.RefVolume();
1326 const double *
x = (useBinVolume) ? &xc.front() : x1;
1329 if (!useBinIntegral ) {
1330 fval0 = func (
x, p );
1334 std::vector<double> vx2(data.NDim());
1335 data.GetBinUpEdgeCoordinates(i, vx2.data());
1336 fval0 = igEval( x1, vx2.data() ) ;
1338 double fval = fval0;
1339 if (useBinVolume) fval = fval0*binVolume;
1342 fval = std::max(fval, 0.0);
1343 double nlogPdf = fval;
1349 if (
g ==
nullptr)
return nlogPdf;
1351 unsigned int npar = func.
NPar();
1356 if (useFullHessian && (!gfunc || useBinIntegral || (gfunc && !gfunc->
HasParameterHessian())))
1357 return std::numeric_limits<double>::quiet_NaN();
1362 if (!useBinIntegral ) {
1364 if (useFullHessian &&
h) {
1366 return std::numeric_limits<double>::quiet_NaN();
1368 if (!goodHessFunc) {
1369 return std::numeric_limits<double>::quiet_NaN();
1381 if (!useBinIntegral )
1389 double coeffGrad = (fval > 0) ? (1. -
y/fval) : ( (
y > 0) ? std::sqrt( std::numeric_limits<double>::max() ) : 1. );
1390 double coeffHess = (fval > 0) ?
y/(fval*fval) : ( (
y > 0) ? std::sqrt( std::numeric_limits<double>::max() ) : 0. );
1392 coeffGrad *= binVolume;
1393 coeffHess *= binVolume*binVolume;
1395 for (
unsigned int k = 0; k < npar; ++k) {
1398 for (
unsigned int l = k;
l < npar; ++
l) {
1399 unsigned int idx = k +
l * (
l + 1) / 2;
1400 if (useFullHessian) {
1401 h[idx] *= coeffGrad;
1407 h[idx] += coeffHess *
g[k]*
g[
l];
1418 std::cout <<
"x = " <<
x[0] <<
" y " <<
y <<
" fval " << fval <<
" logPdf = " << nlogPdf <<
" gradient : ";
1419 for (
unsigned int ipar = 0; ipar < npar; ++ipar)
1420 std::cout <<
g[ipar] <<
"\t";
1422 std::cout <<
"\thessian : ";
1423 for (
unsigned int ipar = 0; ipar < npar; ++ipar) {
1425 for (
unsigned int jpar = 0; jpar <= ipar; ++jpar) {
1426 std::cout <<
h[ipar + jpar * (jpar + 1) / 2] <<
"\t";
1431 std::cout << std::endl;
1459 unsigned int n = data.Size();
1461#ifdef USE_PARAMCACHE
1465 nPoints = data.Size();
1470 bool useBinIntegral = fitOpt.
fIntegral && data.HasBinEdges();
1471 bool useBinVolume = (fitOpt.
fBinVolume && data.HasBinEdges());
1472 bool useW2 = (iWeight == 2);
1475 double wrefVolume = 1.0;
1482 std::cout <<
"Evaluate PoissonLogL for params = [ ";
1483 for (
unsigned int j = 0; j < func.NPar(); ++j) std::cout << p[j] <<
" , ";
1484 std::cout <<
"] - data size = " <<
n <<
" useBinIntegral " << useBinIntegral <<
" useBinVolume "
1485 << useBinVolume <<
" useW2 " << useW2 <<
" wrefVolume = " << wrefVolume << std::endl;
1494#ifdef USE_PARAMCACHE
1495 IntegralEvaluator<> igEval(func,
nullptr, useBinIntegral, igType);
1497 IntegralEvaluator<> igEval(func, p, useBinIntegral, igType);
1500 auto mapFunction = [&](
const unsigned i) {
1501 auto x1 = data.GetCoordComponent(i, 0);
1502 auto y = *data.ValuePtr(i);
1504 const double *
x =
nullptr;
1505 std::vector<double> xc;
1507 double binVolume = 1.0;
1510 unsigned int ndim = data.NDim();
1511 xc.resize(data.NDim());
1512 for (
unsigned int j = 0; j < ndim; ++j) {
1513 double xx = *data.GetCoordComponent(i, j);
1514 double x2 = data.GetBinUpEdgeComponent(i, j);
1515 binVolume *= std::abs(x2 - xx);
1516 xc[j] = (useBinIntegral) ? xx : 0.5 * (x2 + xx);
1520 binVolume *= wrefVolume;
1521 }
else if (data.NDim() > 1) {
1522 xc.resize(data.NDim());
1524 for (
unsigned int j = 1; j < data.NDim(); ++j) {
1525 xc[j] = *data.GetCoordComponent(i, j);
1532 if (!useBinIntegral) {
1533#ifdef USE_PARAMCACHE
1541 std::vector<double> x2(data.NDim());
1542 data.GetBinUpEdgeCoordinates(i, x2.data());
1543 fval = igEval(
x, x2.data());
1545 if (useBinVolume) fval *= binVolume;
1551 if (i % NSAMPLE == 0) {
1552 std::cout <<
"evt " << i <<
" x = [ ";
1553 for (
unsigned int j = 0; j < func.NDim(); ++j) std::cout <<
x[j] <<
" , ";
1556 std::cout <<
"x2 = [ ";
1557 for (
unsigned int j = 0; j < func.NDim(); ++j) std::cout << data.GetBinUpEdgeComponent(i, j) <<
" , ";
1560 std::cout <<
" y = " <<
y <<
" fval = " << fval << std::endl;
1567 fval = std::max(fval, 0.0);
1569 double nloglike = 0;
1578 double weight = 1.0;
1580 double error = data.Error(i);
1581 weight = (error * error) /
y;
1589 nloglike += weight * ( fval -
y);
1597 if (extended) nloglike = fval -
y;
1606 std::cout <<
" nll = " << nloglike << std::endl;
1613 auto redFunction = [](
const std::vector<double> &objs) {
1614 return std::accumulate(objs.begin(), objs.end(),
double{});
1621 Warning(
"FitUtil::EvaluatePoissonLogL",
"Multithread execution policy requires IMT, which is disabled. Changing "
1622 "to ROOT::EExecutionPolicy::kSequential.");
1629 for (
unsigned int i = 0; i <
n; ++i) {
1630 res += mapFunction(i);
1634 ROOT::TThreadExecutor pool;
1636 res = pool.
MapReduce(mapFunction, ROOT::TSeq<unsigned>(0,
n), redFunction, chunks);
1642 Error(
"FitUtil::EvaluatePoissonLogL",
1643 "Execution policy unknown. Available choices:\n ROOT::EExecutionPolicy::kSequential (default)\n ROOT::EExecutionPolicy::kMultiThread (requires IMT)\n");
1647 std::cout <<
"Loglikelihood = " << res << std::endl;
1659 assert(fg !=
nullptr);
1663#ifdef USE_PARAMCACHE
1668 bool useBinIntegral = fitOpt.fIntegral && data.HasBinEdges();
1669 bool useBinVolume = (fitOpt.fBinVolume && data.HasBinEdges());
1671 double wrefVolume = 1.0;
1672 if (useBinVolume && fitOpt.fNormBinVolume)
1673 wrefVolume /= data.RefVolume();
1681 IntegralEvaluator<> igEval(func, p, useBinIntegral, igType);
1683 unsigned int npar = func.NPar();
1684 unsigned initialNPoints = data.Size();
1686 auto mapFunction = [&](
const unsigned int i) {
1688 std::vector<double> gradFunc(npar);
1689 std::vector<double> pointContribution(npar);
1691 const auto x1 = data.GetCoordComponent(i, 0);
1692 const auto y = data.Value(i);
1693 auto invError = data.Error(i);
1695 invError = (invError != 0.0) ? 1.0 / invError : 1;
1699 const double *
x =
nullptr;
1700 std::vector<double> xc;
1702 unsigned ndim = data.NDim();
1703 double binVolume = 1.0;
1708 for (
unsigned int j = 0; j < ndim; ++j) {
1709 double x1_j = *data.GetCoordComponent(i, j);
1710 double x2_j = data.GetBinUpEdgeComponent(i, j);
1711 binVolume *= std::abs(x2_j - x1_j);
1712 xc[j] = (useBinIntegral) ? x1_j : 0.5 * (x2_j + x1_j);
1718 binVolume *= wrefVolume;
1719 }
else if (ndim > 1) {
1722 for (
unsigned int j = 1; j < ndim; ++j)
1723 xc[j] = *data.GetCoordComponent(i, j);
1729 if (!useBinIntegral) {
1731 func.ParameterGradient(
x, p, &gradFunc[0]);
1735 std::vector<double> x2(data.NDim());
1736 data.GetBinUpEdgeCoordinates(i, x2.data());
1737 fval = igEval(
x, x2.data());
1746 if (i < 5 || (i > data.Size()-5) ) {
1748 <<
" gradient " << gradFunc[0] <<
" " << gradFunc[1] <<
" " << gradFunc[3] << std::endl;
1749 else std::cout << i <<
" x " <<
x[0] <<
" gradient " << gradFunc[0] <<
" " << gradFunc[1] <<
" " << gradFunc[3] << std::endl;
1755 for (
unsigned int ipar = 0; ipar < npar; ++ipar) {
1759 gradFunc[ipar] *= binVolume;
1763 pointContribution[ipar] = gradFunc[ipar] * (1. -
y / fval);
1764 else if (gradFunc[ipar] != 0) {
1765 const double kdmax1 = std::sqrt(std::numeric_limits<double>::max());
1766 const double kdmax2 = std::numeric_limits<double>::max() / (4 * initialNPoints);
1767 double gg = kdmax1 * gradFunc[ipar];
1769 gg = std::min(gg, kdmax2);
1771 gg = std::max(gg, -kdmax2);
1772 pointContribution[ipar] = -gg;
1777 return pointContribution;
1781 auto redFunction = [&](
const std::vector<std::vector<double>> &pointContributions) {
1782 std::vector<double> result(npar);
1784 for (
auto const &pointContribution : pointContributions) {
1785 for (
unsigned int parameterIndex = 0; parameterIndex < npar; parameterIndex++)
1786 result[parameterIndex] += pointContribution[parameterIndex];
1792 std::vector<double>
g(npar);
1797 Warning(
"FitUtil::EvaluatePoissonLogLGradient",
1798 "Multithread execution policy requires IMT, which is disabled. Changing "
1799 "to ROOT::EExecutionPolicy::kSequential.");
1805 std::vector<std::vector<double>> allGradients(initialNPoints);
1806 for (
unsigned int i = 0; i < initialNPoints; ++i) {
1807 allGradients[i] = mapFunction(i);
1809 g = redFunction(allGradients);
1813 ROOT::TThreadExecutor pool;
1815 g = pool.
MapReduce(mapFunction, ROOT::TSeq<unsigned>(0, initialNPoints), redFunction, chunks);
1824 Error(
"FitUtil::EvaluatePoissonLogLGradient",
1825 "Execution policy unknown. Available choices:\n 0: Serial (default)\n 1: MultiThread (requires IMT)\n");
1834 std::copy(
g.begin(),
g.end(), grad);
1837 std::cout <<
"***** Final gradient : ";
1838 for (
unsigned int ii = 0; ii< npar; ++ii) std::cout << grad[ii] <<
" ";
1847 if (nEvents/ncpu < 1000)
return ncpu;
1848 return nEvents/1000;
1852#ifdef R__HAS_STD_EXPERIMENTAL_SIMD
1860 return T{std::numeric_limits<typename T::value_type>::max()};
1863template <
typename V,
typename S>
1864void Load(V &
v, S
const *ptr)
1866 for (
size_t i = 0; i < V::size(); ++i)
1870template <
typename T>
1871auto ReduceAdd(
const T &
v)
1873 typename T::value_type result(0);
1874 for (
size_t i = 0; i < T::size(); ++i) {
1880template <
typename T>
1881bool MaskEmpty(T mask)
1883 for (
size_t i = 0; i < T::size(); ++i)
1889template <
class T = ROOT::Double_v>
1890auto Int2Mask(
unsigned i)
1893 for (
unsigned j = 0; j < T::size(); j++) {
1911 unsigned int n = data.Size();
1912 nPoints = data.Size();
1915#ifdef USE_PARAMCACHE
1922 const DataOptions &fitOpt = data.Opt();
1923 if (fitOpt.fBinVolume || fitOpt.fIntegral || fitOpt.fExpErrors)
1924 Error(
"FitUtil::EvaluateChi2",
1925 "The vectorized implementation doesn't support Integrals, BinVolume or ExpErrors\n. Aborting operation.");
1929 double maxResValue = std::numeric_limits<double>::max() /
n;
1930 std::vector<double> ones{1., 1., 1., 1.};
1931 auto vecSize = Double_v::size();
1933 auto mapFunction = [&](
unsigned int i) {
1935 Double_v x1,
y, invErrorVec;
1936 Load(x1, data.GetCoordComponent(i * vecSize, 0));
1937 Load(
y, data.ValuePtr(i * vecSize));
1938 const auto invError = data.ErrorPtr(i * vecSize);
1939 auto invErrorptr = (invError !=
nullptr) ? invError : &ones.front();
1940 Load(invErrorVec, invErrorptr);
1943 std::vector<Double_v> xc;
1944 if (data.NDim() > 1) {
1945 xc.resize(data.NDim());
1947 for (
unsigned int j = 1; j < data.NDim(); ++j)
1948 Load(xc[j], data.GetCoordComponent(i * vecSize, j));
1956#ifdef USE_PARAMCACHE
1962 Double_v tmp = (
y - fval) * invErrorVec;
1963 Double_v chi2 = tmp * tmp;
1966 where(chi2 > maxResValue, chi2) = maxResValue;
1971 auto redFunction = [](
const std::vector<Double_v> &objs) {
1972 return std::accumulate(objs.begin(), objs.end(), Double_v{});
1980 Warning(
"FitUtil::EvaluateChi2",
"Multithread execution policy requires IMT, which is disabled. Changing "
1981 "to ::ROOT::EExecutionPolicy::kSequential.");
1988 ROOT::TSequentialExecutor pool;
1989 res = pool.
MapReduce(mapFunction, ROOT::TSeq<unsigned>(0, data.Size() / vecSize), redFunction);
1992 ROOT::TThreadExecutor pool;
1994 res = pool.
MapReduce(mapFunction, ROOT::TSeq<unsigned>(0, data.Size() / vecSize), redFunction, chunks);
1997 Error(
"FitUtil::EvaluateChi2",
1998 "Execution policy unknown. Available choices:\n ::ROOT::EExecutionPolicy::kSequential (default)\n "
1999 "::ROOT::EExecutionPolicy::kMultiThread (requires IMT)\n");
2003 if (data.Size() % vecSize != 0)
2006 return ReduceAdd(res);
2010 const double *
const p,
int iWeight,
bool extended,
unsigned int &nPoints,
2014 unsigned int n = data.Size();
2015 nPoints = data.Size();
2018 bool normalizeFunc =
false;
2021#ifdef USE_PARAMCACHE
2028 if (!normalizeFunc) {
2029 if (data.NDim() == 1) {
2031 Load(
x, data.GetCoordComponent(0, 0));
2034 std::vector<Double_v>
x(data.NDim());
2035 for (
unsigned int j = 0; j < data.NDim(); ++j)
2036 Load(
x[j], data.GetCoordComponent(0, j));
2044 if (normalizeFunc) {
2046 std::vector<double>
xmin(data.NDim());
2047 std::vector<double>
xmax(data.NDim());
2050 if (data.Range().Size() > 0) {
2054 norm += igEval.Integral(
xmin.data(),
xmax.data());
2062 Load(xmin_v,
xmin.data());
2063 Load(xmax_v,
xmax.data());
2064 if (ReduceAdd(func(&xmin_v, p)) != 0 || ReduceAdd(func(&xmax_v, p)) != 0) {
2066 "A range has not been set and the function is not zero at +/- inf");
2069 norm = igEval.Integral(&
xmin[0], &
xmax[0]);
2075 auto vecSize = Double_v::size();
2076 unsigned int numVectors =
n / vecSize;
2078 auto mapFunction = [&, p](
const unsigned i) {
2086 Load(x1, data.GetCoordComponent(i * vecSize, 0));
2087 const Double_v *
x =
nullptr;
2088 unsigned int ndim = data.NDim();
2089 std::vector<Double_v> xc;
2093 for (
unsigned int j = 1; j < ndim; ++j)
2094 Load(xc[j], data.GetCoordComponent(i * vecSize, j));
2100#ifdef USE_PARAMCACHE
2107 if (i < 5 || (i > numVectors - 5)) {
2109 std::cout << i <<
" x " <<
x[0] <<
" fval = " << fval;
2111 std::cout << i <<
" x " <<
x[0] <<
" y " <<
x[1] <<
" fval = " << fval;
2116 fval = fval * (1 / norm);
2122 if (data.WeightsPtr(i) ==
nullptr)
2125 Load(weight, data.WeightsPtr(i * vecSize));
2132 W2 = weight * weight;
2137 if (i < 5 || (i > numVectors - 5)) {
2138 std::cout <<
" " << fval <<
" logfval " << logval << std::endl;
2145 auto redFunction = [](
const std::vector<LikelihoodAux<Double_v>> &objs) {
2146 return std::accumulate(
2156 Warning(
"FitUtil::EvaluateLogL",
"Multithread execution policy requires IMT, which is disabled. Changing "
2157 "to ::ROOT::EExecutionPolicy::kSequential.");
2165 ROOT::Fit::FitUtil::LikelihoodAux<Double_v> resArray;
2167 ROOT::TSequentialExecutor pool;
2168 resArray = pool.
MapReduce(mapFunction, ROOT::TSeq<unsigned>(0, data.Size() / vecSize), redFunction);
2171 ROOT::TThreadExecutor pool;
2173 resArray = pool.
MapReduce(mapFunction, ROOT::TSeq<unsigned>(0, data.Size() / vecSize), redFunction, chunks);
2176 Error(
"FitUtil::EvaluateLogL",
2177 "Execution policy unknown. Available choices:\n ::ROOT::EExecutionPolicy::kSequential (default)\n "
2178 "::ROOT::EExecutionPolicy::kMultiThread (requires IMT)\n");
2182 sumW_v = resArray.
weight;
2186 unsigned int remainingPoints =
n % vecSize;
2187 if (remainingPoints > 0) {
2188 auto remainingPointsContribution = mapFunction(numVectors);
2190 auto remainingMask = Int2Mask(remainingPoints);
2191 where(remainingMask, logl_v) = logl_v + remainingPointsContribution.logvalue;
2192 where(remainingMask, sumW_v) = sumW_v + remainingPointsContribution.weight;
2193 where(remainingMask, sumW2_v) = sumW2_v + remainingPointsContribution.weight2;
2197 double logl = ReduceAdd(logl_v);
2198 double sumW = ReduceAdd(sumW_v);
2199 double sumW2 = ReduceAdd(sumW2_v);
2203 double extendedTerm = 0;
2207 if (!normalizeFunc) {
2209 std::vector<double>
xmin(data.NDim());
2210 std::vector<double>
xmax(data.NDim());
2213 if (data.Range().Size() > 0) {
2217 nuTot += igEval.Integral(
xmin.data(),
xmax.data());
2223 Double_v xmin_v, xmax_v;
2224 Load(xmin_v,
xmin.data());
2225 Load(xmax_v,
xmax.data());
2226 if (ReduceAdd(func(&xmin_v, p)) != 0 || ReduceAdd(func(&xmax_v, p)) != 0) {
2228 "A range has not been set and the function is not zero at +/- inf");
2231 nuTot = igEval.Integral(&
xmin[0], &
xmax[0]);
2237 extendedTerm = -nuTot;
2241 extendedTerm = -(sumW2 / sumW) * nuTot;
2250 logl += extendedTerm;
2254 std::cout <<
"Evaluated log L for parameters (";
2255 for (
unsigned int ip = 0; ip < func.NPar(); ++ip)
2256 std::cout <<
" " << p[ip];
2257 std::cout <<
") nll = " << -logl << std::endl;
2264 const double *p,
int iWeight,
bool extended,
unsigned int,
2282#ifdef USE_PARAMCACHE
2285 auto vecSize = Double_v::size();
2287 const DataOptions &fitOpt = data.Opt();
2288 if (fitOpt.fExpErrors || fitOpt.fIntegral)
2289 Error(
"FitUtil::EvaluateChi2",
2290 "The vectorized implementation doesn't support Integrals or BinVolume\n. Aborting operation.");
2291 bool useW2 = (iWeight == 2);
2293 auto mapFunction = [&](
unsigned int i) {
2295 Load(
y, data.ValuePtr(i * vecSize));
2298 if (data.NDim() > 1) {
2299 std::vector<Double_v>
x(data.NDim());
2300 for (
unsigned int j = 0; j < data.NDim(); ++j)
2301 Load(
x[j], data.GetCoordComponent(i * vecSize, j));
2302#ifdef USE_PARAMCACHE
2303 fval = func(
x.data());
2305 fval = func(
x.data(), p);
2310 Load(
x, data.GetCoordComponent(i * vecSize, 0));
2311#ifdef USE_PARAMCACHE
2320 where(fval < 0.0, fval) = 0.0;
2322 Double_v nloglike{};
2331 Double_v error = 0.0;
2332 Load(error, data.ErrorPtr(i * vecSize));
2336 where(
m, weight) = (error * error) /
y;
2339 nloglike = weight * (fval -
y);
2341 where(
y != 0, nloglike) =
2350 nloglike = fval -
y;
2359 auto redFunction = [](
const std::vector<Double_v> &objs) {
2360 return std::accumulate(objs.begin(), objs.end(), Double_v{});
2367 Warning(
"FitUtil::Evaluate<T>::EvalPoissonLogL",
2368 "Multithread execution policy requires IMT, which is disabled. Changing "
2369 "to ::ROOT::EExecutionPolicy::kSequential.");
2376 for (
unsigned int i = 0; i < (data.Size() / vecSize); i++) {
2377 res += mapFunction(i);
2381 ROOT::TThreadExecutor pool;
2383 res = pool.
MapReduce(mapFunction, ROOT::TSeq<unsigned>(0, data.Size() / vecSize), redFunction, chunks);
2386 Error(
"FitUtil::Evaluate<T>::EvalPoissonLogL",
2387 "Execution policy unknown. Available choices:\n ::ROOT::EExecutionPolicy::kSequential (default)\n "
2388 "::ROOT::EExecutionPolicy::kMultiThread (requires IMT)\n");
2392 if (data.Size() % vecSize != 0)
2395 return ReduceAdd(res);
2403auto CheckInfNaNValues(Double_v &rval)
2405 auto mask = rval > -NumericMax<Double_v>() && rval < NumericMax<Double_v>();
2408 where(!mask, rval) = +NumericMax<Double_v>();
2411 where(!mask && rval < 0, rval) = -NumericMax<Double_v>();
2428 if (data.HaveCoordErrors()) {
2430 "Error on the coordinates are not used in calculating Chi2 gradient");
2435 assert(fg !=
nullptr);
2439 const DataOptions &fitOpt = data.Opt();
2440 if (fitOpt.fBinVolume || fitOpt.fIntegral || fitOpt.fExpErrors)
2441 Error(
"FitUtil::EvaluateChi2Gradient",
"The vectorized implementation doesn't support Integrals,"
2442 "BinVolume or ExpErrors\n. Aborting operation.");
2444 unsigned int npar = func.NPar();
2445 auto vecSize = Double_v::size();
2446 unsigned initialNPoints = data.Size();
2447 unsigned numVectors = initialNPoints / vecSize;
2450 std::vector<Double_v::mask_type> validPointsMasks(numVectors + 1);
2452 auto mapFunction = [&](
const unsigned int i) {
2454 std::vector<Double_v> gradFunc(npar);
2455 std::vector<Double_v> pointContributionVec(npar);
2457 Double_v x1,
y, invError;
2459 Load(x1, data.GetCoordComponent(i * vecSize, 0));
2460 Load(
y, data.ValuePtr(i * vecSize));
2461 const auto invErrorPtr = data.ErrorPtr(i * vecSize);
2463 if (invErrorPtr ==
nullptr)
2466 Load(invError, invErrorPtr);
2472 const Double_v *
x =
nullptr;
2474 unsigned int ndim = data.NDim();
2477 std::vector<Double_v> xc;
2481 for (
unsigned int j = 1; j < ndim; ++j)
2482 Load(xc[j], data.GetCoordComponent(i * vecSize, j));
2489 func.ParameterGradient(
x, p, &gradFunc[0]);
2491 validPointsMasks[i] = CheckInfNaNValues(fval);
2492 if (MaskEmpty(validPointsMasks[i])) {
2494 return pointContributionVec;
2498 for (
unsigned int ipar = 0; ipar < npar; ++ipar) {
2501 validPointsMasks[i] = CheckInfNaNValues(gradFunc[ipar]);
2503 if (MaskEmpty(validPointsMasks[i])) {
2508 where(validPointsMasks[i], pointContributionVec[ipar]) =
2509 -2.0 * (
y - fval) * invError * invError * gradFunc[ipar];
2512 return pointContributionVec;
2516 auto redFunction = [&](
const std::vector<std::vector<Double_v>> &partialResults) {
2517 std::vector<Double_v> result(npar);
2519 for (
auto const &pointContributionVec : partialResults) {
2520 for (
unsigned int parameterIndex = 0; parameterIndex < npar; parameterIndex++)
2521 result[parameterIndex] += pointContributionVec[parameterIndex];
2527 std::vector<Double_v> gVec(npar);
2528 std::vector<double>
g(npar);
2536 Warning(
"FitUtil::EvaluateChi2Gradient",
"Multithread execution policy requires IMT, which is disabled. Changing "
2537 "to ::ROOT::EExecutionPolicy::kSequential.");
2543 ROOT::TSequentialExecutor pool;
2544 gVec = pool.
MapReduce(mapFunction, ROOT::TSeq<unsigned>(0, numVectors), redFunction);
2548 ROOT::TThreadExecutor pool;
2550 gVec = pool.
MapReduce(mapFunction, ROOT::TSeq<unsigned>(0, numVectors), redFunction, chunks);
2554 Error(
"FitUtil::EvaluateChi2Gradient",
2555 "Execution policy unknown. Available choices:\n 0: Serial (default)\n 1: MultiThread (requires IMT)\n");
2559 unsigned int remainingPoints = initialNPoints % vecSize;
2560 if (remainingPoints > 0) {
2561 auto remainingPointsContribution = mapFunction(numVectors);
2563 auto remainingMask = Int2Mask(remainingPoints);
2564 for (
unsigned int param = 0; param < npar; param++) {
2565 where(remainingMask, gVec[param]) = gVec[param] + remainingPointsContribution[param];
2569 for (
unsigned int param = 0; param < npar; param++) {
2570 grad[param] = ReduceAdd(gVec[param]);
2574 nPoints = initialNPoints;
2576 if (std::any_of(validPointsMasks.begin(), validPointsMasks.end(), [](
auto validPoints) {
2578 for (size_t i = 0; i < Double_v::mask_type::size(); ++i)
2579 if (!validPoints[i])
2583 unsigned nRejected = 0;
2585 for (
const auto &mask : validPointsMasks) {
2586 for (
unsigned int i = 0; i < vecSize; i++) {
2587 nRejected += !mask[i];
2591 assert(nRejected <= initialNPoints);
2592 nPoints = initialNPoints - nRejected;
2594 if (nPoints < npar) {
2596 "Too many points rejected for overflow in gradient calculation");
2602 const double *p,
double *grad,
unsigned int &,
2608 assert(fg !=
nullptr);
2614 const DataOptions &fitOpt = data.Opt();
2615 if (fitOpt.fBinVolume || fitOpt.fIntegral || fitOpt.fExpErrors)
2616 Error(
"FitUtil::EvaluatePoissonLogLGradient",
"The vectorized implementation doesn't support Integrals,"
2617 "BinVolume or ExpErrors\n. Aborting operation.");
2619 unsigned int npar = func.NPar();
2620 auto vecSize = Double_v::size();
2621 unsigned initialNPoints = data.Size();
2622 unsigned numVectors = initialNPoints / vecSize;
2624 auto mapFunction = [&](
const unsigned int i) {
2626 std::vector<Double_v> gradFunc(npar);
2627 std::vector<Double_v> pointContributionVec(npar);
2631 Load(x1, data.GetCoordComponent(i * vecSize, 0));
2632 Load(
y, data.ValuePtr(i * vecSize));
2636 const Double_v *
x =
nullptr;
2638 unsigned ndim = data.NDim();
2639 std::vector<Double_v> xc;
2643 for (
unsigned int j = 1; j < ndim; ++j)
2644 Load(xc[j], data.GetCoordComponent(i * vecSize, j));
2651 func.ParameterGradient(
x, p, &gradFunc[0]);
2654 for (
unsigned int ipar = 0; ipar < npar; ++ipar) {
2655 auto positiveValuesMask = fval > 0;
2658 where(positiveValuesMask, pointContributionVec[ipar]) = gradFunc[ipar] * (1. -
y / fval);
2660 auto validNegativeValuesMask = !positiveValuesMask && gradFunc[ipar] != 0;
2662 if (!MaskEmpty(validNegativeValuesMask)) {
2663 const Double_v kdmax1 =
sqrt(NumericMax<Double_v>());
2664 const Double_v kdmax2 = NumericMax<Double_v>() / (4 * initialNPoints);
2665 Double_v gg = kdmax1 * gradFunc[ipar];
2667 where(mask, pointContributionVec[ipar]) = min(gg, kdmax2);
2668 where(!mask, pointContributionVec[ipar]) =
max(gg, -kdmax2);
2669 pointContributionVec[ipar] = -pointContributionVec[ipar];
2676 if (i < 5 || (i > data.Size() - 5)) {
2677 if (data.NDim() > 1)
2678 std::cout << i <<
" x " <<
x[0] <<
" y " <<
x[1];
2680 std::cout << i <<
" x " <<
x[0];
2681 std::cout <<
" func " << fval <<
" gradient ";
2682 for (
unsigned int ii = 0; ii < npar; ++ii)
2683 std::cout <<
" " << pointContributionVec[ii];
2689 return pointContributionVec;
2693 auto redFunction = [&](
const std::vector<std::vector<Double_v>> &partialResults) {
2694 std::vector<Double_v> result(npar);
2696 for (
auto const &pointContributionVec : partialResults) {
2697 for (
unsigned int parameterIndex = 0; parameterIndex < npar; parameterIndex++)
2698 result[parameterIndex] += pointContributionVec[parameterIndex];
2704 std::vector<Double_v> gVec(npar);
2712 Warning(
"FitUtil::EvaluatePoissonLogLGradient",
2713 "Multithread execution policy requires IMT, which is disabled. Changing "
2714 "to ::ROOT::EExecutionPolicy::kSequential.");
2720 ROOT::TSequentialExecutor pool;
2721 gVec = pool.
MapReduce(mapFunction, ROOT::TSeq<unsigned>(0, numVectors), redFunction);
2725 ROOT::TThreadExecutor pool;
2727 gVec = pool.
MapReduce(mapFunction, ROOT::TSeq<unsigned>(0, numVectors), redFunction, chunks);
2731 Error(
"FitUtil::EvaluatePoissonLogLGradient",
"Execution policy unknown. Available choices:\n "
2732 "::ROOT::EExecutionPolicy::kSequential (default)\n "
2733 "::ROOT::EExecutionPolicy::kMultiThread (requires IMT)\n");
2737 unsigned int remainingPoints = initialNPoints % vecSize;
2738 if (remainingPoints > 0) {
2739 auto remainingPointsContribution = mapFunction(numVectors);
2741 auto remainingMask = Int2Mask(remainingPoints);
2742 for (
unsigned int param = 0; param < npar; param++) {
2743 where(remainingMask, gVec[param]) = gVec[param] + remainingPointsContribution[param];
2747 for (
unsigned int param = 0; param < npar; param++) {
2748 grad[param] = ReduceAdd(gVec[param]);
2752 std::cout <<
"***** Final gradient : ";
2753 for (
unsigned int ii = 0; ii < npar; ++ii)
2754 std::cout << grad[ii] <<
" ";
2760 const double *p,
double *grad,
unsigned int &,
2766 assert(fg !=
nullptr);
2770 unsigned int npar = func.NPar();
2771 auto vecSize = Double_v::size();
2772 unsigned initialNPoints = data.Size();
2773 unsigned numVectors = initialNPoints / vecSize;
2776 std::cout <<
"\n===> Evaluate Gradient for parameters ";
2777 for (
unsigned int ip = 0; ip < npar; ++ip)
2778 std::cout <<
" " << p[ip];
2784 const Double_v kdmax1 =
sqrt(NumericMax<Double_v>());
2785 const Double_v kdmax2 = NumericMax<Double_v>() / (4 * initialNPoints);
2787 auto mapFunction = [&](
const unsigned int i) {
2788 std::vector<Double_v> gradFunc(npar);
2789 std::vector<Double_v> pointContributionVec(npar);
2792 Load(x1, data.GetCoordComponent(i * vecSize, 0));
2794 const Double_v *
x =
nullptr;
2796 unsigned int ndim = data.NDim();
2797 std::vector<Double_v> xc(ndim);
2801 for (
unsigned int j = 1; j < ndim; ++j)
2802 Load(xc[j], data.GetCoordComponent(i * vecSize, j));
2808 Double_v fval = func(
x, p);
2809 func.ParameterGradient(
x, p, &gradFunc[0]);
2812 if (i < 5 || (i > numVectors - 5)) {
2814 std::cout << i <<
" x " <<
x[0] <<
" y " <<
x[1] <<
" gradient " << gradFunc[0] <<
" " << gradFunc[1]
2815 <<
" " << gradFunc[3] << std::endl;
2817 std::cout << i <<
" x " <<
x[0] <<
" gradient " << gradFunc[0] <<
" " << gradFunc[1] <<
" "
2818 << gradFunc[3] << std::endl;
2822 auto positiveValues = fval > 0;
2824 for (
unsigned int kpar = 0; kpar < npar; ++kpar) {
2825 if (!MaskEmpty(positiveValues)) {
2826 where(positiveValues, pointContributionVec[kpar]) = -1. / fval * gradFunc[kpar];
2829 auto nonZeroGradientValues = !positiveValues && gradFunc[kpar] != 0;
2830 if (!MaskEmpty(nonZeroGradientValues)) {
2831 Double_v gg = kdmax1 * gradFunc[kpar];
2832 auto mask = nonZeroGradientValues && gg > 0;
2833 where(mask, pointContributionVec[kpar]) = -min(gg, kdmax2);
2834 where(!mask, pointContributionVec[kpar]) = -
max(gg, -kdmax2);
2839 return pointContributionVec;
2843 auto redFunction = [&](
const std::vector<std::vector<Double_v>> &pointContributions) {
2844 std::vector<Double_v> result(npar);
2846 for (
auto const &pointContributionVec : pointContributions) {
2847 for (
unsigned int parameterIndex = 0; parameterIndex < npar; parameterIndex++)
2848 result[parameterIndex] += pointContributionVec[parameterIndex];
2854 std::vector<Double_v> gVec(npar);
2855 std::vector<double>
g(npar);
2863 Warning(
"FitUtil::EvaluateLogLGradient",
"Multithread execution policy requires IMT, which is disabled. Changing "
2864 "to ::ROOT::EExecutionPolicy::kSequential.");
2870 ROOT::TSequentialExecutor pool;
2871 gVec = pool.
MapReduce(mapFunction, ROOT::TSeq<unsigned>(0, numVectors), redFunction);
2875 ROOT::TThreadExecutor pool;
2877 gVec = pool.
MapReduce(mapFunction, ROOT::TSeq<unsigned>(0, numVectors), redFunction, chunks);
2881 Error(
"FitUtil::EvaluateLogLGradient",
"Execution policy unknown. Available choices:\n "
2882 "::ROOT::EExecutionPolicy::kSequential (default)\n "
2883 "::ROOT::EExecutionPolicy::kMultiThread (requires IMT)\n");
2887 unsigned int remainingPoints = initialNPoints % vecSize;
2888 if (remainingPoints > 0) {
2889 auto remainingPointsContribution = mapFunction(numVectors);
2891 auto remainingMask = Int2Mask(initialNPoints % vecSize);
2892 for (
unsigned int param = 0; param < npar; param++) {
2893 where(remainingMask, gVec[param]) = gVec[param] + remainingPointsContribution[param];
2897 for (
unsigned int param = 0; param < npar; param++) {
2898 grad[param] = ReduceAdd(gVec[param]);
2902 std::cout <<
"Final gradient ";
2903 for (
unsigned int param = 0; param < npar; param++) {
2904 std::cout <<
" " << grad[param];
#define MATH_ERROR_MSG(loc, str)
Error("WriteTObject","The current directory (%s) is not associated with a file. The object (%s) has not been written.", GetName(), objname)
void Warning(const char *location, const char *msgfmt,...)
Use this function in warning situations.
void SetParameters(TFitEditor::FuncParams_t &pars, TF1 *func)
Restore the parameters from pars into the function.
externTVirtualMutex * gROOTMutex
#define R__LOCKGUARD(mutex)
Class describing the binned data sets : vectors of x coordinates, y values and optionally error on y ...
SimpleGradientCalculator(int gdim, const IModelFunction &func, double eps=2.E-8, int istrat=1)
const IModelFunction & fFunc
std::vector< double > fVec
double ParameterDerivative(const double *x, const double *p, int ipar) const
void Gradient(const double *x, const double *p, double f0, double *g)
unsigned int NDim() const
void ParameterGradient(const double *x, const double *p, double f0, double *g)
unsigned int NPar() const
double DoParameterDerivative(const double *x, const double *p, double f0, int k) const
Class describing the un-binned data sets (just x coordinates values) of any dimensions.
virtual unsigned int NDim() const =0
Retrieve the dimension of the function.
virtual unsigned int NPar() const =0
Return the number of Parameters.
virtual void ParameterGradient(const T *x, const double *p, T *grad) const
Evaluate the all the derivatives (gradient vector) of the function with respect to the parameters at ...
virtual bool HasParameterHessian() const
virtual bool ParameterHessian(const T *, const double *, T *) const
Evaluate the all the Hessian (second derivatives matrix) of the function with respect to the paramete...
OneDimMultiFunctionAdapter class to wrap a multidimensional function in one dimensional one.
void SetCoord(int icoord)
User class for calculating the derivatives of a function.
double Derivative1(double x)
Returns the first derivative of the function at point x, computed by Richardson's extrapolation metho...
auto MapReduce(F func, unsigned nTimes, R redfunc) -> InvokeResult_t< F >
Execute a function without arguments several times (Map) and accumulate the results into a single val...
A pseudo container class which is a generator of indices.
This class provides a simple interface to execute the same task multiple times in parallel threads,...
auto MapReduce(F func, unsigned nTimes, R redfunc) -> InvokeResult_t< F >
Execute a function nTimes in parallel (Map) and accumulate the results into a single value (Reduce).
Type
enumeration specifying the integration types.
@ kGAUSS
simple Gauss integration method with fixed rule
@ kDEFAULT
default type specified in the static options
namespace defining utility free functions using in Fit for evaluating the various fit method function...
ROOT::Math::IParamMultiGradFunction IGradModelFunction
ROOT::Math::IParamMultiFunction IModelFunction
void CalculateGradientIntegral(const GFunc &gfunc, const double *x1, const double *x2, const double *p, double *g)
double EvaluatePoissonBinPdf(const IModelFunction &func, const BinData &data, const double *x, unsigned int ipoint, double *g=nullptr, double *h=nullptr, bool hasGrad=false, bool fullHessian=false)
evaluate the pdf contribution to the Poisson LogL given a model function and the BinPoint data.
ROOT::Math::IParamMultiFunctionTempl< T > IModelFunctionTempl
double CorrectValue(double rval)
void EvaluatePoissonLogLGradient(const IModelFunction &func, const BinData &data, const double *p, double *grad, unsigned int &nPoints, ::ROOT::EExecutionPolicy executionPolicy=::ROOT::EExecutionPolicy::kSequential, unsigned nChunks=0)
evaluate the Poisson LogL given a model function and the data at the point p.
double EvaluateChi2Residual(const IModelFunction &func, const BinData &data, const double *p, unsigned int ipoint, double *g=nullptr, double *h=nullptr, bool hasGrad=false, bool fullHessian=false)
evaluate the residual contribution to the Chi2 given a model function and the BinPoint data and if th...
double EvaluatePoissonLogL(const IModelFunction &func, const BinData &data, const double *p, int iWeight, bool extended, unsigned int &nPoints, ::ROOT::EExecutionPolicy executionPolicy, unsigned nChunks=0)
evaluate the Poisson LogL given a model function and the data at the point p.
double EvaluateChi2Effective(const IModelFunction &func, const BinData &data, const double *x, unsigned int &nPoints)
evaluate the effective Chi2 given a model function and the data at the point x.
void EvaluateLogLGradient(const IModelFunction &func, const UnBinData &data, const double *p, double *grad, unsigned int &nPoints, ::ROOT::EExecutionPolicy executionPolicy=::ROOT::EExecutionPolicy::kSequential, unsigned nChunks=0)
evaluate the LogL gradient given a model function and the data at the point p.
double EvaluatePdf(const IModelFunction &func, const UnBinData &data, const double *p, unsigned int ipoint, double *g=nullptr, double *h=nullptr, bool hasGrad=false, bool fullHessian=false)
evaluate the pdf contribution to the LogL given a model function and the BinPoint data.
ROOT::Math::IParamMultiGradFunctionTempl< T > IGradModelFunctionTempl
bool CheckInfNaNValue(double &rval)
unsigned setAutomaticChunking(unsigned nEvents)
double EvaluateLogL(const IModelFunction &func, const UnBinData &data, const double *p, int iWeight, bool extended, unsigned int &nPoints, ::ROOT::EExecutionPolicy executionPolicy, unsigned nChunks=0)
evaluate the LogL given a model function and the data at the point x.
double EvaluateChi2(const IModelFunction &func, const BinData &data, const double *p, unsigned int &nPoints, ::ROOT::EExecutionPolicy executionPolicy, unsigned nChunks=0)
Chi2 Functions.
void EvaluateChi2Gradient(const IModelFunction &func, const BinData &data, const double *p, double *grad, unsigned int &nPoints, ::ROOT::EExecutionPolicy executionPolicy=::ROOT::EExecutionPolicy::kSequential, unsigned nChunks=0)
evaluate the Chi2 gradient given a model function and the data at the point p.
Namespace for the fitting classes.
void(off) SmallVectorTemplateBase< T
T EvalLog(T x)
safe evaluation of log(x) with a protections against negative or zero argument to the log smooth line...
VecExpr< UnaryOp< Sqrt< T >, VecExpr< A, T, D >, T >, T, D > sqrt(const VecExpr< A, T, D > &rhs)
UInt_t GetThreadPoolSize()
Returns the size of ROOT's thread pool.
__device__ AFloat max(AFloat x, AFloat y)
DataOptions : simple structure holding the options on how the data are filled.
bool fErrors1
use all errors equal to 1, i.e. fit without errors (default is false)
bool fNormBinVolume
normalize data by a normalized the bin volume (bin volume divided by a reference value)
bool fUseEmpty
use empty bins (default is false) with a fixed error of 1
bool fIntegral
use integral of bin content instead of bin center (default is false)
bool fExpErrors
use expected errors from the function and not from the data
bool fBinVolume
normalize data by the bin volume (it is used in the Poisson likelihood fits)
double operator()(const double *x, const double *p) const
void SetDerivComponent(unsigned int ipar)
unsigned int NDim() const
ParamDerivFunc(const GradFunc &f)