Logo ROOT  
Reference Guide
 
Loading...
Searching...
No Matches
solveLinear.C
Go to the documentation of this file.
1/// \file
2/// \ingroup tutorial_matrix
3/// \notebook -nodraw
4/// This macro shows several ways to perform a linear least-squares
5/// analysis . To keep things simple we fit a straight line to 4
6/// data points
7/// The first 4 methods use the linear algebra package to find
8/// x such that min \f$ (A x - b)^T (A x - b) \f$ where A and b
9/// are calculated with the data points and the functional expression :
10///
11/// 1. Normal equations:
12/// Expanding the expression \f$ (A x - b)^T (A x - b) \f$ and taking the
13/// derivative wrt x leads to the "Normal Equations":
14/// \f$ A^T A x = A^T b \f$ where \f$ A^T A \f$ is a positive definite matrix. Therefore,
15/// a Cholesky decomposition scheme can be used to calculate its inverse .
16/// This leads to the solution \f$ x = (A^T A)^-1 A^T b \f$ . All this is done in
17/// routine NormalEqn . We made it a bit more complicated by giving the
18/// data weights .
19/// Numerically this is not the best way to proceed because effectively the
20/// condition number of \f$ A^T A \f$ is twice as large as that of A, making inversion
21/// more difficult
22///
23/// 2. SVD
24/// One can show that solving \f$ A x = b \f$ for x with A of size \f$ (m x n) \f$
25/// and \f$ m > n \f$ through a Singular Value Decomposition is equivalent to minimizing
26/// \f$ (A x - b)^T (A x - b) \f$ Numerically , this is the most stable method of all 5
27///
28/// 3. Pseudo Inverse
29/// Here we calculate the generalized matrix inverse ("pseudo inverse") by
30/// solving \f$ A X = Unit \f$ for matrix \f$ X \f$ through an SVD . The formal expression for
31/// is \f$ X = (A^T A)^-1 A^T \f$ . Then we multiply it by \f$ b \f$ .
32/// Numerically, not as good as 2 and not as fast . In general it is not a
33/// good idea to solve a set of linear equations with a matrix inversion .
34///
35/// 4. Pseudo Inverse , brute force
36/// The pseudo inverse is calculated brute force through a series of matrix
37/// manipulations . It shows nicely some operations in the matrix package,
38/// but is otherwise a big "no no" .
39///
40/// 5. Least-squares analysis with Minuit
41/// An objective function L is minimized by Minuit, where
42/// \f$ L = sum_i { (y - c_0 -c_1 * x / e)^2 } \f$
43/// Minuit will calculate numerically the derivative of L wrt c_0 and c_1 .
44/// It has not been told that these derivatives are linear in the parameters
45/// c_0 and c_1 .
46/// For ill-conditioned linear problems it is better to use the fact it is
47/// a linear fit as in 2 .
48///
49/// Another interesting thing is the way we assign data to the vectors and
50/// matrices through adoption .
51/// This allows data assignment without physically moving bytes around .
52///
53/// #### USAGE
54///
55/// This macro can be executed via CLING or via ACLIC
56/// - via the interpreter, do
57/// ~~~{.cpp}
58/// root > .x solveLinear.C
59/// ~~~
60/// - via ACLIC
61/// ~~~{.cpp}
62/// root > gSystem->Load("libMatrix");
63/// root > gSystem->Load("libGpad");
64/// root > .x solveLinear.C+
65/// ~~~
66///
67/// \macro_output
68/// \macro_code
69///
70/// \author Eddy Offermann
71
72#include "Riostream.h"
73#include "TMatrixD.h"
74#include "TVectorD.h"
75#include "TGraphErrors.h"
76#include "TDecompChol.h"
77#include "TDecompSVD.h"
78#include "TF1.h"
79
80
81void solveLinear(Double_t eps = 1.e-12)
82{
83 cout << "Perform the fit y = c0 + c1 * x in four different ways" << endl;
84
85 const Int_t nrVar = 2;
86 const Int_t nrPnts = 4;
87
88 Double_t ax[] = {0.0,1.0,2.0,3.0};
89 Double_t ay[] = {1.4,1.5,3.7,4.1};
90 Double_t ae[] = {0.5,0.2,1.0,0.5};
91
92 // Make the vectors 'Use" the data : they are not copied, the vector data
93 // pointer is just set appropriately
94
95 TVectorD x; x.Use(nrPnts,ax);
96 TVectorD y; y.Use(nrPnts,ay);
97 TVectorD e; e.Use(nrPnts,ae);
98
99 TMatrixD A(nrPnts,nrVar);
100 TMatrixDColumn(A,0) = 1.0;
101 TMatrixDColumn(A,1) = x;
102
103 cout << " - 1. solve through Normal Equations" << endl;
104
105 const TVectorD c_norm = NormalEqn(A,y,e);
106
107 cout << " - 2. solve through SVD" << endl;
108 // numerically preferred method
109
110 // first bring the weights in place
111 TMatrixD Aw = A;
112 TVectorD yw = y;
113 for (Int_t irow = 0; irow < A.GetNrows(); irow++) {
114 TMatrixDRow(Aw,irow) *= 1/e(irow);
115 yw(irow) /= e(irow);
116 }
117
118 TDecompSVD svd(Aw);
119 Bool_t ok;
120 const TVectorD c_svd = svd.Solve(yw,ok);
121
122 cout << " - 3. solve with pseudo inverse" << endl;
123
124 const TMatrixD pseudo1 = svd.Invert();
125 TVectorD c_pseudo1 = yw;
126 c_pseudo1 *= pseudo1;
127
128 cout << " - 4. solve with pseudo inverse, calculated brute force" << endl;
129
131 const TMatrixD pseudo2 = AtA.Invert() * Aw.T();
132 TVectorD c_pseudo2 = yw;
133 c_pseudo2 *= pseudo2;
134
135 cout << " - 5. Minuit through TGraph" << endl;
136
137 TGraphErrors *gr = new TGraphErrors(nrPnts,ax,ay,0,ae);
138 TF1 *f1 = new TF1("f1","pol1",0,5);
139 gr->Fit("f1","Q");
140 TVectorD c_graph(nrVar);
141 c_graph(0) = f1->GetParameter(0);
142 c_graph(1) = f1->GetParameter(1);
143
144 // Check that all 4 answers are identical within a certain
145 // tolerance . The 1e-12 is somewhat arbitrary . It turns out that
146 // the TGraph fit is different by a few times 1e-13.
147
148 Bool_t same = kTRUE;
149 same &= VerifyVectorIdentity(c_norm,c_svd,0,eps);
150 same &= VerifyVectorIdentity(c_norm,c_pseudo1,0,eps);
151 same &= VerifyVectorIdentity(c_norm,c_pseudo2,0,eps);
152 same &= VerifyVectorIdentity(c_norm,c_graph,0,eps);
153 if (same)
154 cout << " All solutions are the same within tolerance of " << eps << endl;
155 else
156 cout << " Some solutions differ more than the allowed tolerance of " << eps << endl;
157}
#define e(i)
Definition RSha256.hxx:103
bool Bool_t
Definition RtypesCore.h:63
int Int_t
Definition RtypesCore.h:45
double Double_t
Definition RtypesCore.h:59
constexpr Bool_t kTRUE
Definition RtypesCore.h:100
TVectorD NormalEqn(const TMatrixD &A, const TVectorD &b)
Solve min {(A .
TMatrixTRow< Double_t > TMatrixDRow
TMatrixTColumn< Double_t > TMatrixDColumn
Single Value Decomposition class.
Definition TDecompSVD.h:24
1-Dim function class
Definition TF1.h:214
virtual Double_t GetParameter(Int_t ipar) const
Definition TF1.h:518
A TGraphErrors is a TGraph with error bars.
virtual TFitResultPtr Fit(const char *formula, Option_t *option="", Option_t *goption="", Axis_t xmin=0, Axis_t xmax=0)
Fit this graph with function with name fname.
Definition TGraph.cxx:1228
TMatrixT< Element > & T()
Definition TMatrixT.h:155
TMatrixT< Element > & Invert(Double_t *det=nullptr)
Invert the matrix and calculate its determinant.
TVectorT< Element > & Use(Int_t lwb, Int_t upb, Element *data)
Use the array data to fill the vector lwb..upb].
Definition TVectorT.cxx:349
Double_t y[n]
Definition legend1.C:17
Double_t x[n]
Definition legend1.C:17
TGraphErrors * gr
Definition legend1.C:25
TF1 * f1
Definition legend1.C:11
Bool_t VerifyVectorIdentity(const TVectorT< Element > &m1, const TVectorT< Element > &m2, Int_t verbose, Element maxDevAllow)
Verify that elements of the two vectors are equal within maxDevAllow .