Logo ROOT   6.30.04
Reference Guide
 All Namespaces Files Pages
GSLNLSMinimizer.cxx
Go to the documentation of this file.
1 // @(#)root/mathmore:$Id$
2 // Author: L. Moneta Wed Dec 20 17:16:32 2006
3 
4 /**********************************************************************
5  * *
6  * Copyright (c) 2006 LCG ROOT Math Team, CERN/PH-SFT *
7  * *
8  * *
9  **********************************************************************/
10 
11 // Implementation file for class GSLNLSMinimizer
12 
13 #include "Math/GSLNLSMinimizer.h"
14 
17 
18 #include "Math/Error.h"
19 #include "GSLMultiFit.h"
20 #include "gsl/gsl_errno.h"
21 
22 
23 #include "Math/FitMethodFunction.h"
24 //#include "Math/Derivator.h"
25 
26 #include <iostream>
27 #include <iomanip>
28 #include <cassert>
29 
30 namespace ROOT {
31 
32  namespace Math {
33 
34 
35 // class to implement transformation of chi2 function
36 // in general could make template on the fit method function type
37 
38 class FitTransformFunction : public FitMethodFunction {
39 
40 public:
41 
42  FitTransformFunction(const FitMethodFunction & f, const std::vector<EMinimVariableType> & types, const std::vector<double> & values,
43  const std::map<unsigned int, std::pair<double, double> > & bounds) :
44  FitMethodFunction( f.NDim(), f.NPoints() ),
45  fOwnTransformation(true),
46  fFunc(f),
47  fTransform(new MinimTransformFunction( new MultiNumGradFunction(f), types, values, bounds) ),
48  fGrad( std::vector<double>(f.NDim() ) )
49  {
50  // constructor
51  // need to pass to MinimTransformFunction a new pointer which will be managed by the class itself
52  // pass a gradient pointer although it will not be used byb the class
53  }
54 
55  FitTransformFunction(const FitMethodFunction & f, MinimTransformFunction *transFunc ) :
56  FitMethodFunction( f.NDim(), f.NPoints() ),
57  fOwnTransformation(false),
58  fFunc(f),
59  fTransform(transFunc),
60  fGrad( std::vector<double>(f.NDim() ) )
61  {
62  // constructor from al already existing Transformation object. Ownership of the transformation onbect is passed to caller
63  }
64 
65  ~FitTransformFunction() {
66  if (fOwnTransformation) {
67  assert(fTransform);
68  delete fTransform;
69  }
70  }
71 
72  // re-implement data element
73  virtual double DataElement(const double * x, unsigned i, double * g = 0) const {
74  // transform from x internal to x external
75  const double * xExt = fTransform->Transformation(x);
76  if ( g == 0) return fFunc.DataElement( xExt, i );
77  // use gradient
78  double val = fFunc.DataElement( xExt, i, &fGrad[0]);
79  // transform gradient
80  fTransform->GradientTransformation( x, &fGrad.front(), g);
81  return val;
82  }
83 
84 
85  IMultiGenFunction * Clone() const {
86  // not supported
87  return 0;
88  }
89 
90  // dimension (this is number of free dimensions)
91  unsigned int NDim() const {
92  return fTransform->NDim();
93  }
94 
95  unsigned int NTot() const {
96  return fTransform->NTot();
97  }
98 
99  // forward of transformation functions
100  const double * Transformation( const double * x) const { return fTransform->Transformation(x); }
101 
102 
103  /// inverse transformation (external -> internal)
104  void InvTransformation(const double * xext, double * xint) const { fTransform->InvTransformation(xext,xint); }
105 
106  /// inverse transformation for steps (external -> internal) at external point x
107  void InvStepTransformation(const double * x, const double * sext, double * sint) const { fTransform->InvStepTransformation(x,sext,sint); }
108 
109  ///transform gradient vector (external -> internal) at internal point x
110  void GradientTransformation(const double * x, const double *gext, double * gint) const { fTransform->GradientTransformation(x,gext,gint); }
111 
112  void MatrixTransformation(const double * x, const double *cint, double * cext) const { fTransform->MatrixTransformation(x,cint,cext); }
113 
114 private:
115 
116  // objects of this class are not meant for copying or assignment
117  FitTransformFunction(const FitTransformFunction& rhs);
118  FitTransformFunction& operator=(const FitTransformFunction& rhs);
119 
120  double DoEval(const double * x) const {
121  return fFunc( fTransform->Transformation(x) );
122  }
123 
124  bool fOwnTransformation;
125  const FitMethodFunction & fFunc; // pointer to original fit method function
126  MinimTransformFunction * fTransform; // pointer to transformation function
127  mutable std::vector<double> fGrad; // cached vector of gradient values
128 
129 };
130 
131 
132 
133 
134 // GSLNLSMinimizer implementation
135 
136 GSLNLSMinimizer::GSLNLSMinimizer( int type ) :
137  //fNFree(0),
138  fSize(0),
139  fChi2Func(0)
140 {
141  // Constructor implementation : create GSLMultiFit wrapper object
142  const gsl_multifit_fdfsolver_type * gsl_type = 0; // use default type defined in GSLMultiFit
143  if (type == 1) gsl_type = gsl_multifit_fdfsolver_lmsder; // scaled lmder version
144  if (type == 2) gsl_type = gsl_multifit_fdfsolver_lmder; // unscaled version
145 
146  fGSLMultiFit = new GSLMultiFit( gsl_type );
147 
148  fEdm = -1;
149 
150  // default tolerance and max iterations
151  int niter = ROOT::Math::MinimizerOptions::DefaultMaxIterations();
152  if (niter <= 0) niter = 100;
153  SetMaxIterations(niter);
154 
155  fLSTolerance = ROOT::Math::MinimizerOptions::DefaultTolerance();
156  if (fLSTolerance <=0) fLSTolerance = 0.0001; // default internal value
157 
158  SetPrintLevel(ROOT::Math::MinimizerOptions::DefaultPrintLevel());
159 }
160 
161 GSLNLSMinimizer::~GSLNLSMinimizer () {
162  assert(fGSLMultiFit != 0);
163  delete fGSLMultiFit;
164 }
165 
166 
167 
168 void GSLNLSMinimizer::SetFunction(const ROOT::Math::IMultiGenFunction & func) {
169  // set the function to minimizer
170  // need to create vector of functions to be passed to GSL multifit
171  // support now only CHi2 implementation
172 
173  // call base class method. It will clone the function and set ndimension
174  BasicMinimizer::SetFunction(func);
175  //need to check if function can be used
176  const ROOT::Math::FitMethodFunction * chi2Func = dynamic_cast<const ROOT::Math::FitMethodFunction *>(ObjFunction());
177  if (chi2Func == 0) {
178  if (PrintLevel() > 0) std::cout << "GSLNLSMinimizer: Invalid function set - only Chi2Func supported" << std::endl;
179  return;
180  }
181  fSize = chi2Func->NPoints();
182  fNFree = NDim();
183 
184  // use vector by value
185  fResiduals.reserve(fSize);
186  for (unsigned int i = 0; i < fSize; ++i) {
187  fResiduals.push_back( LSResidualFunc(*chi2Func, i) );
188  }
189  // keep pointers to the chi2 function
190  fChi2Func = chi2Func;
191  }
192 
193 void GSLNLSMinimizer::SetFunction(const ROOT::Math::IMultiGradFunction & func ) {
194  // set the function to minimizer using gradient interface
195  // not supported yet, implemented using the other SetFunction
196  return SetFunction(static_cast<const ROOT::Math::IMultiGenFunction &>(func) );
197 }
198 
199 
200 bool GSLNLSMinimizer::Minimize() {
201  // set initial parameters of the minimizer
202  int debugLevel = PrintLevel();
203 
204 
205  assert (fGSLMultiFit != 0);
206  if (fResiduals.size() != fSize || fChi2Func == 0) {
207  MATH_ERROR_MSG("GSLNLSMinimizer::Minimize","Function has not been set");
208  return false;
209  }
210 
211  unsigned int npar = NPar();
212  unsigned int ndim = NDim();
213  if (npar == 0 || npar < ndim) {
214  MATH_ERROR_MSGVAL("GSLNLSMinimizer::Minimize","Wrong number of parameters",npar);
215  return false;
216  }
217 
218  // set residual functions and check if a transformation is needed
219  std::vector<double> startValues;
220 
221  // transformation need a grad function. Delegate fChi2Func to given object
222  MultiNumGradFunction * gradFunction = new MultiNumGradFunction(*fChi2Func);
223  MinimTransformFunction * trFuncRaw = CreateTransformation(startValues, gradFunction);
224  // need to transform in a FitTransformFunction which is set in the residual functions
225  std::unique_ptr<FitTransformFunction> trFunc;
226  if (trFuncRaw) {
227  trFunc.reset(new FitTransformFunction(*fChi2Func, trFuncRaw) );
228  //FitTransformationFunction *trFunc = new FitTransformFunction(*fChi2Func, trFuncRaw);
229  for (unsigned int ires = 0; ires < fResiduals.size(); ++ires) {
230  fResiduals[ires] = LSResidualFunc(*trFunc, ires);
231  }
232 
233  assert(npar == trFunc->NTot() );
234  }
235 
236  if (debugLevel >=1 ) std::cout <<"Minimize using GSLNLSMinimizer " << std::endl;
237 
238 // // use a global step size = min (step vectors)
239 // double stepSize = 1;
240 // for (unsigned int i = 0; i < fSteps.size(); ++i)
241 // //stepSize += fSteps[i];
242 // if (fSteps[i] < stepSize) stepSize = fSteps[i];
243 
244  int iret = fGSLMultiFit->Set( fResiduals, &startValues.front() );
245  if (iret) {
246  MATH_ERROR_MSGVAL("GSLNLSMinimizer::Minimize","Error setting the residual functions ",iret);
247  return false;
248  }
249 
250  if (debugLevel >=1 ) std::cout <<"GSLNLSMinimizer: " << fGSLMultiFit->Name() << " - start iterating......... " << std::endl;
251 
252  // start iteration
253  unsigned int iter = 0;
254  int status;
255  bool minFound = false;
256  do {
257  status = fGSLMultiFit->Iterate();
258 
259  if (debugLevel >=1) {
260  std::cout << "----------> Iteration " << iter << " / " << MaxIterations() << " status " << gsl_strerror(status) << std::endl;
261  const double * x = fGSLMultiFit->X();
262  if (trFunc.get()) x = trFunc->Transformation(x);
263  int pr = std::cout.precision(18);
264  std::cout << " FVAL = " << (*fChi2Func)(x) << std::endl;
265  std::cout.precision(pr);
266  std::cout << " X Values : ";
267  for (unsigned int i = 0; i < NDim(); ++i)
268  std::cout << " " << VariableName(i) << " = " << X()[i];
269  std::cout << std::endl;
270  }
271 
272  if (status) break;
273 
274  // check also the delta in X()
275  status = fGSLMultiFit->TestDelta( Tolerance(), Tolerance() );
276  if (status == GSL_SUCCESS) {
277  minFound = true;
278  }
279 
280  // double-check with the gradient
281  int status2 = fGSLMultiFit->TestGradient( Tolerance() );
282  if ( minFound && status2 != GSL_SUCCESS) {
283  // check now edm
284  fEdm = fGSLMultiFit->Edm();
285  if (fEdm > Tolerance() ) {
286  // continue the iteration
287  status = status2;
288  minFound = false;
289  }
290  }
291 
292  if (debugLevel >=1) {
293  std::cout << " after Gradient and Delta tests: " << gsl_strerror(status);
294  if (fEdm > 0) std::cout << ", edm is: " << fEdm;
295  std::cout << std::endl;
296  }
297 
298  iter++;
299 
300  }
301  while (status == GSL_CONTINUE && iter < MaxIterations() );
302 
303  // check edm
304  fEdm = fGSLMultiFit->Edm();
305  if ( fEdm < Tolerance() ) {
306  minFound = true;
307  }
308 
309  // save state with values and function value
310  const double * x = fGSLMultiFit->X();
311  if (x == 0) return false;
312 
313  SetFinalValues(x);
314 
315  SetMinValue( (*fChi2Func)(x) );
316  fStatus = status;
317 
318  fErrors.resize(NDim());
319 
320  // get errors from cov matrix
321  const double * cov = fGSLMultiFit->CovarMatrix();
322  if (cov) {
323 
324  fCovMatrix.resize(ndim*ndim);
325 
326  if (trFunc.get() ) {
327  trFunc->MatrixTransformation(x, fGSLMultiFit->CovarMatrix(), &fCovMatrix[0] );
328  }
329  else {
330  std::copy(cov, cov + ndim*ndim, fCovMatrix.begin() );
331  }
332 
333  for (unsigned int i = 0; i < ndim; ++i)
334  fErrors[i] = std::sqrt(fCovMatrix[i*ndim + i]);
335  }
336 
337  if (minFound) {
338 
339  if (debugLevel >=1 ) {
340  std::cout << "GSLNLSMinimizer: Minimum Found" << std::endl;
341  int pr = std::cout.precision(18);
342  std::cout << "FVAL = " << MinValue() << std::endl;
343  std::cout << "Edm = " << fEdm << std::endl;
344  std::cout.precision(pr);
345  std::cout << "NIterations = " << iter << std::endl;
346  std::cout << "NFuncCalls = " << fChi2Func->NCalls() << std::endl;
347  for (unsigned int i = 0; i < NDim(); ++i)
348  std::cout << std::setw(12) << VariableName(i) << " = " << std::setw(12) << X()[i] << " +/- " << std::setw(12) << fErrors[i] << std::endl;
349  }
350 
351  return true;
352  }
353  else {
354  if (debugLevel >=1 ) {
355  std::cout << "GSLNLSMinimizer: Minimization did not converge" << std::endl;
356  std::cout << "FVAL = " << MinValue() << std::endl;
357  std::cout << "Edm = " << fGSLMultiFit->Edm() << std::endl;
358  std::cout << "Niterations = " << iter << std::endl;
359  }
360  return false;
361  }
362  return false;
363 }
364 
365 const double * GSLNLSMinimizer::MinGradient() const {
366  // return gradient (internal values)
367  return fGSLMultiFit->Gradient();
368 }
369 
370 
371 double GSLNLSMinimizer::CovMatrix(unsigned int i , unsigned int j ) const {
372  // return covariance matrix element
373  unsigned int ndim = NDim();
374  if ( fCovMatrix.size() == 0) return 0;
375  if (i > ndim || j > ndim) return 0;
376  return fCovMatrix[i*ndim + j];
377 }
378 
379 int GSLNLSMinimizer::CovMatrixStatus( ) const {
380  // return covariance matrix status = 0 not computed,
381  // 1 computed but is approximate because minimum is not valid, 3 is fine
382  if ( fCovMatrix.size() == 0) return 0;
383  // case minimization did not finished correctly
384  if (fStatus != GSL_SUCCESS) return 1;
385  return 3;
386 }
387 
388 
389  } // end namespace Math
390 
391 } // end namespace ROOT
392