Logo ROOT  
Reference Guide
 
Loading...
Searching...
No Matches
NegativeG2LineSearch.cxx
Go to the documentation of this file.
1// @(#)root/minuit2:$Id$
2// Authors: M. Winkler, F. James, L. Moneta, A. Zsenei 2003-2005
3
4/**********************************************************************
5 * *
6 * Copyright (c) 2005 LCG ROOT Math team, CERN/PH-SFT *
7 * *
8 **********************************************************************/
9
11#include "Minuit2/MnFcn.h"
17#include "Minuit2/MnPrint.h"
18
19#include "Math/Util.h"
20
21#include <cmath>
22
23namespace ROOT {
24
25namespace Minuit2 {
26
28 const MnMachinePrecision &prec) const
29{
30
31 // when the second derivatives are negative perform a line search along Parameter which gives
32 // negative second derivative and magnitude equal to the Gradient step size.
33 // Recalculate the gradients for all the Parameter after the correction and
34 // continue iteration in case the second derivatives are still negative
35 //
36 MnPrint print("NegativeG2LineSearch");
37
38 // Print the runtime on returning from the function
39 ROOT::Math::Util::TimingScope timingScope([&print](std::string const& s){ print.Info(s); }, "Done after");
40
41 bool negG2 = HasNegativeG2(st.Gradient(), prec);
42 if (!negG2)
43 return st;
44
45 print.Info("Doing a NegativeG2LineSearch since one of the G2 component is negative");
46
47 unsigned int n = st.Parameters().Vec().size();
48 FunctionGradient dgrad = st.Gradient();
49 MinimumParameters pa = st.Parameters();
50 bool iterate = false;
51 unsigned int iter = 0;
52 // in case of analytical gradients we don't have step sizes
53 bool hasGStep = !dgrad.IsAnalytical();
54 //print.Trace("Gradient ", dgrad.Vec(), "G2",dgrad.G2());
55 // gradient present in the state must have G2 otherwise something is wrong
56 if (!dgrad.HasG2()) {
57 print.Error("Input gradient to NG2LS must have G2 already computed");
58 return st;
59 }
60
61 do {
62 iterate = false;
63 for (unsigned int i = 0; i < n; i++) {
64
65 if (dgrad.G2()(i) <= 0) {
66
67 // check also the gradient (if it is zero ) I can skip the param)
68
69 if (std::fabs(dgrad.Vec()(i)) < prec.Eps() && std::fabs(dgrad.G2()(i)) < prec.Eps())
70 continue;
71 // if(dgrad.G2()(i) < prec.Eps()) {
72 // do line search if second derivative negative
73 MnAlgebraicVector step(n);
75
76 // when using analytical gradient use as step size a dummy value of 1
77 // maybe could do better using user given parameter step sizes
78 // tested using inverse of G2() gives worse behaviour
79 if (dgrad.Vec()(i) < 0)
80 step(i) = (hasGStep) ? dgrad.Gstep()(i) : 1;
81 else
82 step(i) = (hasGStep) ? -dgrad.Gstep()(i) : -1;
83
84 double gdel = step(i) * dgrad.Vec()(i);
85
86 // if using sec derivative information
87 // double g2del = step(i)*step(i) * dgrad.G2()(i);
88
89 print.Debug("Iter", iter, "param", i, pa.Vec()(i), "grad2", dgrad.G2()(i), "grad",
90 dgrad.Vec()(i), "grad step", step(i), " gdel ", gdel);
91
92 auto pp = lsearch(fcn, pa, step, gdel, prec);
93
94 print.Debug("Line search result", pp.X(), "f(0)", pa.Fval(), "f(1)", pp.Y());
95
96 step *= pp.X();
97 pa = MinimumParameters(pa.Vec() + step, pp.Y());
98
99 dgrad = gc(pa, dgrad);
100 // re-compute also G2 if needed
101 if (!dgrad.HasG2()) {
102 //no need to compute Hessian here but only G2
103 print.Debug("Compute G2 at the new point", pa.Vec());
105 bool ret = gc.G2(pa,g2);
106 if (!ret) {
107 print.Error("Cannot compute G2");
108 assert(false);
109 return st;
110 }
111
112 dgrad = FunctionGradient(dgrad.Grad(), g2);
113 }
114
115 print.Debug("New result after Line search - iter", iter, "param", i, pa.Vec()(i), "step", step(i), "new grad2",
116 dgrad.G2()(i), "new grad", dgrad.Vec()(i));
117
118 iterate = true;
119 break;
120 }
121 }
122 } while (iter++ < 2 * n && iterate);
123
124 // even if we computed the Hessian it is still better to use the diagonal part, the G2
125 print.Debug("Approximate new covariance after NegativeG2LS using only G2");
127 for (unsigned int i = 0; i < n; i++) {
128 mat(i, i) = std::fabs(dgrad.G2()(i)) > prec.Eps() ? 1. / dgrad.G2()(i) :
129 1; // use an arbitrary value (e.g. 1)
130 }
131
132 MinimumError err(mat, 1.);
133 double edm = VariableMetricEDMEstimator().Estimate(dgrad, err);
134
135 if (edm < 0) {
137 }
138
139 return MinimumState(pa, err, dgrad, edm, fcn.NumOfCalls());
140}
141
143{
144 // check if function gradient has any component which is negative
145
146 for (unsigned int i = 0; i < grad.Vec().size(); i++)
147
148 if (grad.G2()(i) <= 0) {
149
150 return true;
151 }
152
153 return false;
154}
155
156} // namespace Minuit2
157
158} // namespace ROOT
size_t size(const MatrixT &matrix)
retrieve the size of a square matrix
ROOT::Detail::TRangeCast< T, true > TRangeDynCast
TRangeDynCast is an adapter class that allows the typed iteration through a TCollection.
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void gc
const MnAlgebraicVector & Vec() const
const MnAlgebraicVector & G2() const
interface class for gradient calculators
Class describing a symmetric matrix of size n.
Definition MnMatrix.h:438
MinimumError keeps the inv.
MinimumState keeps the information (position, Gradient, 2nd deriv, etc) after one minimization step (...
Wrapper class to FCNBase interface used internally by Minuit.
Definition MnFcn.h:34
Implements a 1-dimensional minimization along a given direction (i.e.
Sets the relative floating point (double) arithmetic precision.
void Debug(const Ts &... args)
Definition MnPrint.h:135
void Error(const Ts &... args)
Definition MnPrint.h:117
void Info(const Ts &... args)
Definition MnPrint.h:129
bool HasNegativeG2(const FunctionGradient &, const MnMachinePrecision &) const
MinimumState operator()(const MnFcn &, const MinimumState &, const GradientCalculator &, const MnMachinePrecision &) const
double Estimate(const FunctionGradient &, const MinimumError &) const
const Int_t n
Definition legend1.C:16
int iterate(rng_state_t *X)
Definition mixmax.icc:34
Namespace for new ROOT classes and functions.