Logo ROOT  
Reference Guide
 
Loading...
Searching...
No Matches
VariableMetricBuilder.cxx
Go to the documentation of this file.
1// @(#)root/minuit2:$Id$
2// Authors: M. Winkler, F. James, L. Moneta, A. Zsenei 2003-2005
3
4/**********************************************************************
5 * *
6 * Copyright (c) 2005 LCG ROOT Math team, CERN/PH-SFT *
7 * *
8 **********************************************************************/
9
17#include "Minuit2/MinimumSeed.h"
18#include "Minuit2/MnFcn.h"
20#include "Minuit2/MnPosDef.h"
22#include "Minuit2/LaSum.h"
23#include "Minuit2/LaProd.h"
24#include "Minuit2/MnStrategy.h"
25#include "Minuit2/MnHesse.h"
26#include "Minuit2/MnPrint.h"
27
28#include <cmath>
29#include <cassert>
30
31namespace ROOT {
32
33namespace Minuit2 {
34
35double inner_product(const LAVector &, const LAVector &);
36
37void VariableMetricBuilder::AddResult(std::vector<MinimumState> &result, const MinimumState &state) const
38{
39 // // if (!store) store = StorageLevel();
40 // // store |= (result.size() == 0);
41 // if (store)
42 result.push_back(state);
43 // else {
44 // result.back() = state;
45 // }
46 if (TraceIter())
47 TraceIteration(result.size() - 1, result.back());
48 else {
49 MnPrint print("VariableMetricBuilder", PrintLevel());
50 print.Info(MnPrint::Oneline(result.back(), result.size() - 1));
51 }
52}
53
55 const MnStrategy &strategy, unsigned int maxfcn, double edmval) const
56{
57 MnPrint print("VariableMetricBuilder", PrintLevel());
58
59 // top level function to find minimum from a given initial seed
60 // iterate on a minimum search in case of first attempt is not successful
61
62 // to be consistent with F77 Minuit
63 // in Minuit2 edm is correct and is ~ a factor of 2 smaller than F77Minuit
64 // There are also a check for convergence if (edm < 0.1 edmval for exiting the loop)
65 // LM: change factor to 2E-3 to be consistent with F77Minuit
66 edmval *= 0.002;
67
68 // set global printlevel to the local one so all calls to MN_INFO_MSG can be controlled in the same way
69 // at exit of this function the BuilderPrintLevelConf object is destructed and automatically the
70 // previous level will be restored
71
72 // double edm = Estimator().Estimate(seed.Gradient(), seed.Error());
73 double edm = seed.State().Edm();
74
75 FunctionMinimum min(seed, fcn.Up());
76
77 if (seed.Parameters().Vec().size() == 0) {
78 print.Warn("No free parameters.");
79 return min;
80 }
81
82 if (!seed.IsValid()) {
83 print.Error("Minimum seed invalid.");
84 return min;
85 }
86
87 if (edm < 0.) {
88 print.Error("Initial matrix not pos.def.");
89
90 // assert(!seed.Error().IsPosDef());
91 return min;
92 }
93
94 std::vector<MinimumState> result;
95 if (StorageLevel() > 0)
96 result.reserve(10);
97 else
98 result.reserve(2);
99
100 // do actual iterations
101 print.Info("Start iterating until Edm is <", edmval, "with call limit =", maxfcn);
102
103 AddResult(result, seed.State());
104
105 // try first with a maxfxn = 80% of maxfcn
106 int maxfcn_eff = maxfcn;
107 int ipass = 0;
108 bool iterate = false;
109 bool hessianComputed = false;
110
111 do {
112
113 iterate = false;
114 hessianComputed = false;
115
116 print.Debug(ipass > 0 ? "Continue" : "Start", "iterating...");
117
118 min = Minimum(fcn, gc, seed, result, maxfcn_eff, edmval);
119
120 // if max function call reached exits
121 if (min.HasReachedCallLimit()) {
122 print.Warn("FunctionMinimum is invalid, reached function call limit");
123 return min;
124 }
125
126 // second time check for validity of function Minimum
127 if (ipass > 0) {
128 if (!min.IsValid()) {
129 print.Warn("FunctionMinimum is invalid after second try");
130 return min;
131 }
132 }
133
134 // resulting edm of minimization
135 edm = result.back().Edm();
136 // need to correct again for Dcovar: edm *= (1. + 3. * e.Dcovar()) ???
137
138 if ((strategy.Strategy() == 2) || (strategy.Strategy() == 1 && min.Error().Dcovar() > 0.05)) {
139
140 print.Debug("MnMigrad will verify convergence and Error matrix; dcov =", min.Error().Dcovar());
141
142 MinimumState st = MnHesse(strategy)(fcn, min.State(), min.Seed().Trafo(), maxfcn);
143
144 hessianComputed = true;
145
146 print.Info("After Hessian");
147
148 AddResult(result, st);
149
150 if (!st.IsValid()) {
151 print.Warn("Invalid Hessian - exit the minimization");
152 break;
153 }
154
155 // check new edm
156 edm = st.Edm();
157
158 print.Debug("New Edm", edm, "Requested", edmval);
159
160 if (edm > edmval) {
161 // be careful with machine precision and avoid too small edm
162 double machineLimit = std::fabs(seed.Precision().Eps2() * result.back().Fval());
163 if (edm >= machineLimit) {
164 iterate = true;
165
166 print.Info("Tolerance not sufficient, continue minimization; "
167 "Edm",
168 edm, "Required", edmval);
169 } else {
170 print.Warn("Reached machine accuracy limit; Edm", edm, "is smaller than machine limit", machineLimit,
171 "while", edmval, "was requested");
172 }
173 }
174 }
175
176 // end loop on iterations
177 // ? need a maximum here (or max of function calls is enough ? )
178 // continnue iteration (re-calculate function Minimum if edm IS NOT sufficient)
179 // no need to check that hesse calculation is done (if isnot done edm is OK anyway)
180 // count the pass to exit second time when function Minimum is invalid
181 // increase by 20% maxfcn for doing some more tests
182 if (ipass == 0)
183 maxfcn_eff = int(maxfcn * 1.3);
184 ipass++;
185 } while (iterate);
186
187 // Add latest state (Hessian calculation)
188 // and check edm (add a factor of 10 in tolerance )
189 if (edm > 10 * edmval) {
190 min.Add(result.back(), FunctionMinimum::MnAboveMaxEdm());
191 print.Warn("No convergence; Edm", edm, "is above tolerance", 10 * edmval);
192 } else {
193 // check if minimum has edm above max before
194 if (min.IsAboveMaxEdm()) {
195 print.Info("Edm has been re-computed after Hesse; Edm", edm, "is now within tolerance");
196 }
197 if (hessianComputed) min.Add(result.back());
198 }
199
200 print.Debug("Minimum found", min);
201
202 return min;
203}
204
206 std::vector<MinimumState> &result, unsigned int maxfcn,
207 double edmval) const
208{
209 // function performing the minimum searches using the Variable Metric algorithm (MIGRAD)
210 // perform first a line search in the - Vg direction and then update using the Davidon formula (Davidon Error
211 // updator) stop when edm reached is less than required (edmval)
212
213 // after the modification when I iterate on this functions, so it can be called many times,
214 // the seed is used here only to get precision and construct the returned FunctionMinimum object
215
216 MnPrint print("VariableMetricBuilder", PrintLevel());
217
218 const MnMachinePrecision &prec = seed.Precision();
219
220 // result.push_back(MinimumState(seed.Parameters(), seed.Error(), seed.Gradient(), edm, fcn.NumOfCalls()));
221 const MinimumState &initialState = result.back();
222
223 double edm = initialState.Edm();
224
225 print.Debug("Initial State:", "\n Parameter:", initialState.Vec(), "\n Gradient:", initialState.Gradient().Vec(),
226 "\n InvHessian:", initialState.Error().InvHessian(), "\n Edm:", initialState.Edm());
227
228 // iterate until edm is small enough or max # of iterations reached
229 edm *= (1. + 3. * initialState.Error().Dcovar());
230 MnLineSearch lsearch;
231 MnAlgebraicVector step(initialState.Gradient().Vec().size());
232 // keep also prevStep
233 MnAlgebraicVector prevStep(initialState.Gradient().Vec().size());
234
235 MinimumState s0 = result.back();
236
237 do {
238
239 // MinimumState s0 = result.back();
240
241 step = -1. * s0.Error().InvHessian() * s0.Gradient().Vec();
242
243 print.Debug("Iteration", result.size(), "Fval", s0.Fval(), "numOfCall", fcn.NumOfCalls(),
244 "\n Internal parameters", s0.Vec(), "\n Newton step", step);
245
246 // check if derivatives are not zero
247 if (inner_product(s0.Gradient().Vec(), s0.Gradient().Vec()) <= 0) {
248 print.Debug("all derivatives are zero - return current status");
249 break;
250 }
251
252 double gdel = inner_product(step, s0.Gradient().Grad());
253
254 if (gdel > 0.) {
255 print.Warn("Matrix not pos.def, gdel =", gdel, "> 0");
256
257 MnPosDef psdf;
258 s0 = psdf(s0, prec);
259 step = -1. * s0.Error().InvHessian() * s0.Gradient().Vec();
260 // #ifdef DEBUG
261 // std::cout << "After MnPosdef - Error " << s0.Error().InvHessian() << " Gradient " <<
262 // s0.Gradient().Vec() << " step " << step << std::endl;
263 // #endif
264 gdel = inner_product(step, s0.Gradient().Grad());
265
266 print.Warn("gdel =", gdel);
267
268 if (gdel > 0.) {
269 AddResult(result, s0);
270
271 return FunctionMinimum(seed, result, fcn.Up());
272 }
273 }
274
275 MnParabolaPoint pp = lsearch(fcn, s0.Parameters(), step, gdel, prec);
276
277 // <= needed for case 0 <= 0
278 if (std::fabs(pp.Y() - s0.Fval()) <= std::fabs(s0.Fval()) * prec.Eps()) {
279
280 print.Warn("No improvement in line search");
281
282 // no improvement exit (is it really needed LM ? in vers. 1.22 tried alternative )
283 // add new state when only fcn changes
284 if (result.size() <= 1)
285 AddResult(result, MinimumState(s0.Parameters(), s0.Error(), s0.Gradient(), s0.Edm(), fcn.NumOfCalls()));
286 else
287 // no need to re-store the state
288 AddResult(result, MinimumState(pp.Y(), s0.Edm(), fcn.NumOfCalls()));
289
290 break;
291 }
292
293 print.Debug("Result after line search :", "\n x =", pp.X(), "\n Old Fval =", s0.Fval(),
294 "\n New Fval =", pp.Y(), "\n NFcalls =", fcn.NumOfCalls());
295
296 MinimumParameters p(s0.Vec() + pp.X() * step, pp.Y());
297
298 FunctionGradient g = gc(p, s0.Gradient());
299
300 edm = Estimator().Estimate(g, s0.Error());
301
302 if (std::isnan(edm)) {
303 print.Warn("Edm is NaN; stop iterations");
304 AddResult(result, s0);
305 return FunctionMinimum(seed, result, fcn.Up());
306 }
307
308 if (edm < 0.) {
309 print.Warn("Matrix not pos.def., try to make pos.def.");
310
311 MnPosDef psdf;
312 s0 = psdf(s0, prec);
313 edm = Estimator().Estimate(g, s0.Error());
314 if (edm < 0.) {
315 print.Warn("Matrix still not pos.def.; stop iterations");
316
317 AddResult(result, s0);
318
319 return FunctionMinimum(seed, result, fcn.Up());
320 }
321 }
323
324 // avoid print Hessian that will invert the matrix
325 print.Debug("Updated new point:", "\n Parameter:", p.Vec(), "\n Gradient:", g.Vec(),
326 "\n InvHessian:", e.Matrix(), "\n Edm:", edm);
327
328 // update the state
329 s0 = MinimumState(p, e, g, edm, fcn.NumOfCalls());
330 if (StorageLevel() || result.size() <= 1)
331 AddResult(result, s0);
332 else
333 // use a reduced state for not-final iterations
334 AddResult(result, MinimumState(p.Fval(), edm, fcn.NumOfCalls()));
335
336 // correct edm
337 edm *= (1. + 3. * e.Dcovar());
338
339 print.Debug("Dcovar =", e.Dcovar(), "\tCorrected edm =", edm);
340
341 } while (edm > edmval && fcn.NumOfCalls() < maxfcn); // end of iteration loop
342
343 // save last result in case of no complete final states
344 // when the result is filled above (reduced storage) the resulting state will not be valid
345 // since they will not have parameter values and error
346 // the line above will fill as last element a valid state
347 if (!result.back().IsValid())
348 result.back() = s0;
349
350 if (fcn.NumOfCalls() >= maxfcn) {
351 print.Warn("Call limit exceeded");
352
353 return FunctionMinimum(seed, result, fcn.Up(), FunctionMinimum::MnReachedCallLimit());
354 }
355
356 if (edm > edmval) {
357 if (edm < std::fabs(prec.Eps2() * result.back().Fval())) {
358 print.Warn("Machine accuracy limits further improvement");
359
360 return FunctionMinimum(seed, result, fcn.Up());
361 } else if (edm < 10 * edmval) {
362 return FunctionMinimum(seed, result, fcn.Up());
363 } else {
364 print.Warn("Iterations finish without convergence; Edm", edm, "Requested", edmval);
365
366 return FunctionMinimum(seed, result, fcn.Up(), FunctionMinimum::MnAboveMaxEdm());
367 }
368 }
369 // std::cout<<"result.back().Error().Dcovar()= "<<result.back().Error().Dcovar()<<std::endl;
370
371 print.Debug("Exiting successfully;", "Ncalls", fcn.NumOfCalls(), "FCN", result.back().Fval(), "Edm", edm,
372 "Requested", edmval);
373
374 return FunctionMinimum(seed, result, fcn.Up());
375}
376
377} // namespace Minuit2
378
379} // namespace ROOT
#define g(i)
Definition RSha256.hxx:105
#define s0(x)
Definition RSha256.hxx:90
#define e(i)
Definition RSha256.hxx:103
const MnAlgebraicVector & Vec() const
class holding the full result of the minimization; both internal and external (MnUserParameterState) ...
void Add(const MinimumState &state)
const MinimumError & Error() const
const MinimumState & State() const
const MinimumSeed & Seed() const
interface class for gradient calculators
unsigned int size() const
Definition LAVector.h:227
void TraceIteration(int iter, const MinimumState &state) const
virtual MinimumError Update(const MinimumState &, const MinimumParameters &, const FunctionGradient &) const =0
MinimumError keeps the inv.
const MnAlgebraicSymMatrix & InvHessian() const
const MnAlgebraicVector & Vec() const
MinimumSeed contains the starting values for the minimization produced by the SeedGenerator.
Definition MinimumSeed.h:31
const MnUserTransformation & Trafo() const
Definition MinimumSeed.h:43
const MinimumParameters & Parameters() const
Definition MinimumSeed.h:40
const MnMachinePrecision & Precision() const
Definition MinimumSeed.h:44
const MinimumState & State() const
Definition MinimumSeed.h:39
MinimumState keeps the information (position, Gradient, 2nd deriv, etc) after one minimization step (...
const MinimumError & Error() const
const MnAlgebraicVector & Vec() const
const FunctionGradient & Gradient() const
Wrapper class to FCNBase interface used internally by Minuit.
Definition MnFcn.h:30
double Up() const
Definition MnFcn.cxx:39
unsigned int NumOfCalls() const
Definition MnFcn.h:39
API class for calculating the numerical covariance matrix (== 2x Inverse Hessian == 2x Inverse 2nd de...
Definition MnHesse.h:39
Implements a 1-dimensional minimization along a given direction (i.e.
Sets the relative floating point (double) arithmetic precision.
double Eps() const
eps returns the smallest possible number so that 1.+eps > 1.
double Eps2() const
eps2 returns 2*sqrt(eps)
double Y() const
Accessor to the y (second) coordinate.
double X() const
Accessor to the x (first) coordinate.
Force the covariance matrix to be positive defined by adding extra terms in the diagonal.
Definition MnPosDef.h:25
void Debug(const Ts &... args)
Definition MnPrint.h:138
void Error(const Ts &... args)
Definition MnPrint.h:120
void Info(const Ts &... args)
Definition MnPrint.h:132
void Warn(const Ts &... args)
Definition MnPrint.h:126
API class for defining three levels of strategies: low (0), medium (1), high (>=2); acts on: Migrad (...
Definition MnStrategy.h:27
unsigned int Strategy() const
Definition MnStrategy.h:38
void AddResult(std::vector< MinimumState > &result, const MinimumState &state) const
const VariableMetricEDMEstimator & Estimator() const
virtual FunctionMinimum Minimum(const MnFcn &, const GradientCalculator &, const MinimumSeed &, const MnStrategy &, unsigned int, double) const
const MinimumErrorUpdator & ErrorUpdator() const
double Estimate(const FunctionGradient &, const MinimumError &) const
int iterate(rng_state_t *X)
Definition mixmax.icc:34
double inner_product(const LAVector &, const LAVector &)
tbb::task_arena is an alias of tbb::interface7::task_arena, which doesn't allow to forward declare tb...