31 const MnStrategy &strategy,
unsigned int maxfcn,
double edmval)
const
41 print.
Debug(
"Convergence when edm <", edmval);
44 print.
Warn(
"No variable parameters are defined! - Return current function value ");
50 print.
Debug(
"initial edm is ", edm);
56 print.
Error(
"Initial matrix not positive defined, edm = ",edm,
"\nExit minimization ");
60 std::vector<MinimumState> result;
64 result.push_back(seed.
State());
75 int maxfcn_eff =
int(0.5 * maxfcn);
81 min =
Minimum(fcn, gc, seed, result, maxfcn_eff, edmval);
87 print.
Warn(
"FunctionMinimum is invalid");
94 edm = result.back().Edm();
96 print.
Debug(
"Approximate Edm", edm,
"npass", ipass);
101 print.
Debug(
"FumiliBuilder will verify convergence and Error matrix; "
112 result.push_back(st);
114 print.
Info(
"After Hessian");
121 print.
Debug(
"Edm", edm,
"State", st);
125 if (ipass > 0 && edm >= edmprev) {
126 print.
Warn(
"Stop iterations, no improvements after Hesse; current Edm", edm,
"previous value", edmprev);
130 print.
Debug(
"Tolerance not sufficient, continue minimization; Edm", edm,
"Requested", edmval);
151 }
while (edm > edmval);
154 min.
Add(result.back());
160 std::vector<MinimumState> &result,
unsigned int maxfcn,
double edmval)
const
197 double edm = initialState.
Edm();
199 MnPrint print(
"FumiliBuilder");
201 print.
Info(
"Iterating FumiliBuilder", maxfcn, edmval);
203 print.
Debug(
"Initial State:",
"\n Parameter", initialState.
Vec(),
"\n Gradient", initialState.
Gradient().
Vec(),
204 "\n Inv Hessian", initialState.
Error().
InvHessian(),
"\n edm", initialState.
Edm(),
"\n maxfcn",
205 maxfcn,
"\n tolerance", edmval);
208 edm *= (1. + 3. * initialState.
Error().Dcovar());
219 auto & x0 = initialState.
Vec();
221 double delta = 0.3 * std::max(1.0 , normX0);
222 const double eta = 0.1;
224 const double tr_factor_up = 3;
225 const double tr_factor_down = 0.5;
226 bool acceptNewPoint =
true;
229 print.
Info(
"Using Fumili with a line search algorithm");
230 else if (doTrustRegion && scaleTR)
231 print.
Info(
"Using Fumili with a scaled trust region algorithm with factors up/down",tr_factor_up,tr_factor_down);
233 print.
Info(
"Using Fumili with a trust region algorithm with factors up/down",tr_factor_up,tr_factor_down);
236 double lambda = (doLineSearch) ? 0.001 : 0;
245 step = -1. *
s0.Error().InvHessian() *
s0.Gradient().Vec();
247 print.
Debug(
"Iteration -", result.size(),
"\n Fval",
s0.Fval(),
"numOfCall", fcn.
NumOfCalls(),
248 "\n Internal Parameter values",
s0.Vec(),
"\n Newton step", step);
253 print.
Warn(
"Matrix not pos.def, gdel =", gdel,
" > 0");
257 step = -1. *
s0.Error().InvHessian() *
s0.Gradient().Vec();
260 print.
Warn(
"After correction, gdel =", gdel);
263 result.push_back(
s0);
271 double fval2 = (doLineSearch) ? fcnCaller(
s0.Vec() + step) : 0;
276 if (doLineSearch && p.
Fval() >=
s0.Fval()) {
279 auto pp = lsearch(fcn,
s0.Parameters(), step, gdel, prec);
281 if (std::fabs(pp.Y() -
s0.Fval()) < prec.
Eps()) {
286 print.
Debug(
"New point after Line Search :",
"\n FVAL ", p.
Fval(),
"\n Parameter", p.
Vec());
289 auto &
H =
s0.Error().Hessian();
290 unsigned int n = (scaleTR) ?
H.Nrow() : 0;
297 for (
unsigned int i = 0; i <
n; i++){
298 double d = std::sqrt(
H(i,i));
301 Dinv2(i,i) = 1./(
d*
d);
309 print.
Debug(
"scaling Trust region with diagonal matrix D ",D);
310 stepScaled = D * step;
327 print.
Debug(
"Accept full Newton step - it is inside TR ",delta);
332 double normGrad2 = 0;
335 auto gScaled = Dinv *
s0.Gradient().Grad();
339 for (
unsigned int i = 0; i <
n; i++) {
340 for (
unsigned int j = 0; j <=i; j++) {
341 Hscaled(i,j) =
H(i,j) * Dinv(i,i) * Dinv(j,j);
349 double normGrad = sqrt(normGrad2);
353 print.
Debug(
"computed gdel gHg and normGN",gdel,gHg,norm*norm, normGrad2);
356 step = - (delta/ normGrad) *
s0.Gradient().Grad();
357 if (scaleTR) step = Dinv2 * step;
358 print.
Debug(
"Use as new point the Cauchy point - along gradient with norm=delta ", delta);
361 double tau = std::min( (normGrad2*normGrad)/(gHg*delta), 1.0);
364 stepC = -tau * (delta / normGrad) *
s0.Gradient().Grad();
366 stepC = Dinv2 * stepC;
368 print.
Debug(
"Use as new point the Cauchy point - along gradient with tau ", tau,
"delta = ", delta);
374 auto diffP = step - stepC;
378 double c = (scaleTR) ?
inner_product(stepC, stepC) - delta * delta : delta * delta * (tau * tau - 1.);
380 print.
Debug(
" dogleg equation",
a,
b,
c);
386 print.
Warn(
"a is equal to zero! a = ",
a);
387 print.
Info(
" delta ", delta,
" tau ", tau,
" gHg ", gHg,
" normgrad2 ", normGrad2);
390 double t1 = (-
b + sqrt(
b *
b - 4. *
a *
c)) / (2.0 *
a);
391 double t2 = (-
b - sqrt(
b *
b - 4. *
a *
c)) / (2.0 *
a);
393 print.
Debug(
" solution dogleg equation",
t1, t2);
394 if (
t1 >= 0 &&
t1 <= 1.)
399 step = stepC + t * diffP;
401 print.
Debug(
"New dogleg point is t = ", t);
403 print.
Debug(
"New accepted step is ",step);
415 double svs = 0.5 *
similarity(step,
s0.Error().Hessian());
416 double rho = (p.
Fval()-
s0.Fval())/(gdel+svs);
418 delta = tr_factor_down * delta;
420 if (rho > 0.75 && norm == delta) {
421 delta = std::min(tr_factor_up * delta, 100.*norm);
424 print.
Debug(
"New point after Trust region :",
"norm tr ",norm,
" rho ", rho,
" delta ", delta,
425 " FVAL ", p.
Fval(),
"\n Parameter", p.
Vec());
428 acceptNewPoint = (rho > eta);
429 if (acceptNewPoint) {
432 print.
Debug(
"Trust region: accept new point p = x + step since rho is larger than eta");
436 print.
Debug(
"Trust region reject new point and repeat since rho is smaller than eta");
444 if (acceptNewPoint || result.size() == 1) {
448 g = gc(p,
s0.Gradient());
461 print.
Debug(
"Updated new point:",
"\n FVAL ", p.
Fval(),
"\n Parameter", p.
Vec(),
"\n Gradient",
g.Vec(),
462 "\n InvHessian",
e.InvHessian(),
"\n Hessian",
e.Hessian(),
"\n Edm", edm);
465 print.
Warn(
"Matrix not pos.def., Edm < 0");
471 result.push_back(
s0);
491 print.
Debug(
"finish iteration -", result.size(),
"lambda =", lambda,
"f1 =", p.
Fval(),
"f0 =",
s0.Fval(),
492 "num of calls =", fcn.
NumOfCalls(),
"edm =", edm);
500 edm *= (1. + 3. *
e.Dcovar());
505 }
while (edm > edmval && fcn.
NumOfCalls() < maxfcn);
514 if (edm < std::fabs(prec.
Eps2() * result.back().Fval())) {
515 print.
Warn(
"Machine accuracy limits further improvement");
518 }
else if (edm < 10 * edmval) {
522 print.
Warn(
"No convergence; Edm", edm,
"is above tolerance", 10 * edmval);
528 print.
Debug(
"Exiting successfully",
"Ncalls", fcn.
NumOfCalls(),
"FCN", result.back().Fval(),
"Edm", edm,
"Requested",
FunctionMinimum Minimum(const MnFcn &fMnFcn, const GradientCalculator &fGradienCalculator, const MinimumSeed &fMinimumSeed, const MnStrategy &fMnStrategy, unsigned int maxfcn, double edmval) const override
Class the member function calculating the Minimum and verifies the result depending on the strategy.
FumiliMethodType fMethodType
const FumiliErrorUpdator & ErrorUpdator() const
Accessor to the Error updator of the builder.
const VariableMetricEDMEstimator & Estimator() const
Accessor to the EDM (expected vertical distance to the Minimum) estimator.
virtual MinimumError Update(const MinimumState &fMinimumState, const MinimumParameters &fMinimumParameters, const GradientCalculator &fGradientCalculator, double lambda) const
Member function that calculates the Error matrix (or the Hessian matrix containing the (approximate) ...
const MnAlgebraicVector & Vec() const
class holding the full result of the minimization; both internal and external (MnUserParameterState) ...
const std::vector< MinimumState > & States() const
void Add(const MinimumState &state, Status status=MnValid)
add latest minimization state (for example add Hesse result after Migrad)
const MinimumError & Error() const
const MinimumState & State() const
bool IsAboveMaxEdm() const
const MinimumSeed & Seed() const
interface class for gradient calculators
unsigned int size() const
void TraceIteration(int iter, const MinimumState &state) const
MinimumError keeps the inv.
const MnAlgebraicSymMatrix & InvHessian() const
const MnAlgebraicVector & Vec() const
const FunctionGradient & Gradient() const
const MinimumError & Error() const
const MnUserTransformation & Trafo() const
const MinimumParameters & Parameters() const
const MnMachinePrecision & Precision() const
const MinimumState & State() const
MinimumState keeps the information (position, Gradient, 2nd deriv, etc) after one minimization step (...
const MinimumError & Error() const
const MnAlgebraicVector & Vec() const
const FunctionGradient & Gradient() const
Wrapper class to FCNBase interface used internally by Minuit.
unsigned int NumOfCalls() const
API class for calculating the numerical covariance matrix (== 2x Inverse Hessian == 2x Inverse 2nd de...
Implements a 1-dimensional minimization along a given direction (i.e.
Sets the relative floating point (double) arithmetic precision.
double Eps() const
eps returns the smallest possible number so that 1.+eps > 1.
double Eps2() const
eps2 returns 2*sqrt(eps)
Force the covariance matrix to be positive defined by adding extra terms in the diagonal.
void Debug(const Ts &... args)
void Error(const Ts &... args)
void Info(const Ts &... args)
void Warn(const Ts &... args)
API class for defining four levels of strategies: low (0), medium (1), high (2), very high (>=3); act...
double HessianRecomputeThreshold() const
double Estimate(const FunctionGradient &, const MinimumError &) const
LAVector MnAlgebraicVector
LASymMatrix MnAlgebraicSymMatrix
double similarity(const LAVector &, const LASymMatrix &)
double inner_product(const LAVector &, const LAVector &)