From $ROOTSYS/tutorials/matrix/solveLinear.C

//Author: Eddy Offermann
// This macro shows several ways to perform a linear least-squares
// analysis . To keep things simple we fit a straight line to 4
// data points
// The first 4 methods use the linear algebra package to find
//  x  such that min (A x - b)^T (A x - b) where A and b
//  are calculated with the data points  and the functional expression :
//
//  1. Normal equations:
//   Expanding the expression (A x - b)^T (A x - b) and taking the
//   derivative wrt x leads to the "Normal Equations":
//   A^T A x = A^T b  where A^T A is a positive definite matrix. Therefore,
//   a Cholesky decomposition scheme can be used to calculate its inverse .
//   This leads to the solution x = (A^T A)^-1 A^T b . All this is done in
//   routine NormalEqn . We made it a bit more complicated by giving the
//   data weights .
//   Numerically this is not the best way to proceed because effctively the
//   condition number of A^T A is twice as large as that of A, making inversion
//   more difficult
//
//  2. SVD
//   One can show that solving A x = b for x with A of size (m x n) and m > n
//   through a Singular Value Decomposition is equivalent to miminizing 
//   (A x - b)^T (A x - b)
//   Numerically , this is the most stable method of all 5
//
//  3. Pseudo Inverse
//   Here we calulate the generalized matrix inverse ("pseudo inverse") by
//   solving A X = Unit for matrix X through an SVD . The formal expression for
//   is X = (A^T A)^-1 A^T . Then we multiply it by b .
//   Numerically, not as good as 2 and not as fast . In general it is not a
//   good idea to solve a set of linear equations with a matrix inversion .
//
//  4. Pseudo Inverse , brute force
//   The pseudo inverse is calculated brute force through a series of matrix
//   manipulations . It shows nicely some operations in the matrix package,
//   but is otherwise a big "no no" .
//
//  5. Least-squares analysis with Minuit
//   An objective function L is minimized by Minuit, where
//    L = sum_i { (y - c_0 -c_1 * x / e)^2 }
//   Minuit will calculate numerically the derivative of L wrt c_0 and c_1 .
//   It has not been told that these derivatives are linear in the parameters
//   c_0 and c_1 .
//   For ill-conditioned linear problems it is better to use the fact it is
//   a linear fit as in 2 .
//
// Another interesting thing is the way we assign data to the vectors and 
// matrices through adoption .
// This allows data assignment without physically moving bytes around .
//
//   USAGE
//   -----
// This macro can be execued via CINT or via ACLIC
// - via CINT, do
//    root > .x solveLinear.C
// - via ACLIC
//    root > gSystem->Load("libMatrix");
//    root > gSystem->Load("libGpad");
//    root > .x solveLinear.C+
//
#ifndef __CINT__
#include "Riostream.h"
#include "TMatrixD.h"
#include "TVectorD.h"
#include "TGraphErrors.h"
#include "TDecompChol.h"
#include "TDecompSVD.h"
#include "TF1.h"
#endif


void solveLinear(Double_t eps = 1.e-12)
{
#ifdef __CINT__
  gSystem->Load("libMatrix");
#endif
  cout << "Perform the fit  y = c0 + c1 * x in four different ways" << endl;

  const Int_t nrVar  = 2;
  const Int_t nrPnts = 4;

  Double_t ax[] = {0.0,1.0,2.0,3.0};
  Double_t ay[] = {1.4,1.5,3.7,4.1};
  Double_t ae[] = {0.5,0.2,1.0,0.5};

  // Make the vectors 'Use" the data : they are not copied, the vector data
  // pointer is just set appropriately

  TVectorD x; x.Use(nrPnts,ax);
  TVectorD y; y.Use(nrPnts,ay);
  TVectorD e; e.Use(nrPnts,ae);

  TMatrixD A(nrPnts,nrVar);
  TMatrixDColumn(A,0) = 1.0;
  TMatrixDColumn(A,1) = x;

  cout << " - 1. solve through Normal Equations" << endl;

  const TVectorD c_norm = NormalEqn(A,y,e);

  cout << " - 2. solve through SVD" << endl;
  // numerically  preferred method

  // first bring the weights in place
  TMatrixD Aw = A;
  TVectorD yw = y;
  for (Int_t irow = 0; irow < A.GetNrows(); irow++) {
    TMatrixDRow(Aw,irow) *= 1/e(irow);
    yw(irow) /= e(irow);
  }

  TDecompSVD svd(Aw);
  Bool_t ok;
  const TVectorD c_svd = svd.Solve(yw,ok);

  cout << " - 3. solve with pseudo inverse" << endl;

  const TMatrixD pseudo1  = svd.Invert();
  TVectorD c_pseudo1 = yw;
  c_pseudo1 *= pseudo1;

  cout << " - 4. solve with pseudo inverse, calculated brute force" << endl;

  TMatrixDSym AtA(TMatrixDSym::kAtA,Aw);
  const TMatrixD pseudo2 = AtA.Invert() * Aw.T();
  TVectorD c_pseudo2 = yw;
  c_pseudo2 *= pseudo2;

  cout << " - 5. Minuit through TGraph" << endl;

  TGraphErrors *gr = new TGraphErrors(nrPnts,ax,ay,0,ae);
  TF1 *f1 = new TF1("f1","pol1",0,5);
  gr->Fit("f1","Q");
  TVectorD c_graph(nrVar);
  c_graph(0) = f1->GetParameter(0);
  c_graph(1) = f1->GetParameter(1);

  // Check that all 4 answers are identical within a certain
  // tolerance . The 1e-12 is somewhat arbitrary . It turns out that
  // the TGraph fit is different by a few times 1e-13.

  Bool_t same = kTRUE;
  same &= VerifyVectorIdentity(c_norm,c_svd,0,eps);
  same &= VerifyVectorIdentity(c_norm,c_pseudo1,0,eps);
  same &= VerifyVectorIdentity(c_norm,c_pseudo2,0,eps);
  same &= VerifyVectorIdentity(c_norm,c_graph,0,eps);
  if (same)
    cout << " All solutions are the same within tolerance of " << eps << endl;
  else
    cout << " Some solutions differ more than the allowed tolerance of " << eps << endl;
}
 solveLinear.C:1
 solveLinear.C:2
 solveLinear.C:3
 solveLinear.C:4
 solveLinear.C:5
 solveLinear.C:6
 solveLinear.C:7
 solveLinear.C:8
 solveLinear.C:9
 solveLinear.C:10
 solveLinear.C:11
 solveLinear.C:12
 solveLinear.C:13
 solveLinear.C:14
 solveLinear.C:15
 solveLinear.C:16
 solveLinear.C:17
 solveLinear.C:18
 solveLinear.C:19
 solveLinear.C:20
 solveLinear.C:21
 solveLinear.C:22
 solveLinear.C:23
 solveLinear.C:24
 solveLinear.C:25
 solveLinear.C:26
 solveLinear.C:27
 solveLinear.C:28
 solveLinear.C:29
 solveLinear.C:30
 solveLinear.C:31
 solveLinear.C:32
 solveLinear.C:33
 solveLinear.C:34
 solveLinear.C:35
 solveLinear.C:36
 solveLinear.C:37
 solveLinear.C:38
 solveLinear.C:39
 solveLinear.C:40
 solveLinear.C:41
 solveLinear.C:42
 solveLinear.C:43
 solveLinear.C:44
 solveLinear.C:45
 solveLinear.C:46
 solveLinear.C:47
 solveLinear.C:48
 solveLinear.C:49
 solveLinear.C:50
 solveLinear.C:51
 solveLinear.C:52
 solveLinear.C:53
 solveLinear.C:54
 solveLinear.C:55
 solveLinear.C:56
 solveLinear.C:57
 solveLinear.C:58
 solveLinear.C:59
 solveLinear.C:60
 solveLinear.C:61
 solveLinear.C:62
 solveLinear.C:63
 solveLinear.C:64
 solveLinear.C:65
 solveLinear.C:66
 solveLinear.C:67
 solveLinear.C:68
 solveLinear.C:69
 solveLinear.C:70
 solveLinear.C:71
 solveLinear.C:72
 solveLinear.C:73
 solveLinear.C:74
 solveLinear.C:75
 solveLinear.C:76
 solveLinear.C:77
 solveLinear.C:78
 solveLinear.C:79
 solveLinear.C:80
 solveLinear.C:81
 solveLinear.C:82
 solveLinear.C:83
 solveLinear.C:84
 solveLinear.C:85
 solveLinear.C:86
 solveLinear.C:87
 solveLinear.C:88
 solveLinear.C:89
 solveLinear.C:90
 solveLinear.C:91
 solveLinear.C:92
 solveLinear.C:93
 solveLinear.C:94
 solveLinear.C:95
 solveLinear.C:96
 solveLinear.C:97
 solveLinear.C:98
 solveLinear.C:99
 solveLinear.C:100
 solveLinear.C:101
 solveLinear.C:102
 solveLinear.C:103
 solveLinear.C:104
 solveLinear.C:105
 solveLinear.C:106
 solveLinear.C:107
 solveLinear.C:108
 solveLinear.C:109
 solveLinear.C:110
 solveLinear.C:111
 solveLinear.C:112
 solveLinear.C:113
 solveLinear.C:114
 solveLinear.C:115
 solveLinear.C:116
 solveLinear.C:117
 solveLinear.C:118
 solveLinear.C:119
 solveLinear.C:120
 solveLinear.C:121
 solveLinear.C:122
 solveLinear.C:123
 solveLinear.C:124
 solveLinear.C:125
 solveLinear.C:126
 solveLinear.C:127
 solveLinear.C:128
 solveLinear.C:129
 solveLinear.C:130
 solveLinear.C:131
 solveLinear.C:132
 solveLinear.C:133
 solveLinear.C:134
 solveLinear.C:135
 solveLinear.C:136
 solveLinear.C:137
 solveLinear.C:138
 solveLinear.C:139
 solveLinear.C:140
 solveLinear.C:141
 solveLinear.C:142
 solveLinear.C:143
 solveLinear.C:144
 solveLinear.C:145
 solveLinear.C:146
 solveLinear.C:147
 solveLinear.C:148
 solveLinear.C:149
 solveLinear.C:150
 solveLinear.C:151
 solveLinear.C:152
 solveLinear.C:153