48 double gDefaultErrorDef = ROOT::Math::MinimizerOptions::DefaultErrorDef();
58 Fitter::Fitter(
const std::shared_ptr<FitResult> & result) :
65 if (result->fFitFunc) SetFunction(*fResult->fFitFunc);
66 if (result->fObjFunc) fObjFunction = fResult->fObjFunc;
67 if (result->fFitData) fData = fResult->fFitData;
77 Fitter::Fitter(
const Fitter & rhs)
84 Fitter & Fitter::operator = (
const Fitter &rhs)
88 if (
this == &rhs)
return *
this;
103 void Fitter::SetFunction(
const IModelFunction & func,
bool useGradient)
106 fUseGradient = useGradient;
108 const IGradModelFunction * gradFunc =
dynamic_cast<const IGradModelFunction*
>(&func);
110 SetFunction(*gradFunc,
true);
114 MATH_WARN_MSG(
"Fitter::SetFunction",
"Requested function does not provide gradient - use it as non-gradient function ");
117 fUseGradient =
false;
122 fFunc = std::shared_ptr<IModelFunction>(
dynamic_cast<IModelFunction *
>(func.Clone() ) );
126 fConfig.CreateParamsSettings(*fFunc);
130 void Fitter::SetFunction(
const IModel1DFunction & func,
bool useGradient)
132 fUseGradient = useGradient;
134 const IGradModel1DFunction * gradFunc =
dynamic_cast<const IGradModel1DFunction*
>(&func);
136 SetFunction(*gradFunc,
true);
140 MATH_WARN_MSG(
"Fitter::SetFunction",
"Requested function does not provide gradient - use it as non-gradient function ");
143 fUseGradient =
false;
147 fFunc = std::shared_ptr<IModelFunction>(
new ROOT::Math::MultiDimParamFunctionAdapter(func));
150 fConfig.CreateParamsSettings(*fFunc);
154 void Fitter::SetFunction(
const IGradModelFunction & func,
bool useGradient)
156 fUseGradient = useGradient;
159 fFunc = std::shared_ptr<IModelFunction>(
dynamic_cast<IGradModelFunction *
> ( func.Clone() ) );
163 fConfig.CreateParamsSettings(*fFunc);
168 void Fitter::SetFunction(
const IGradModel1DFunction & func,
bool useGradient)
171 fUseGradient = useGradient;
173 fFunc = std::shared_ptr<IModelFunction>(
new ROOT::Math::MultiDimParamGradFunctionAdapter(func));
176 fConfig.CreateParamsSettings(*fFunc);
181 bool Fitter::SetFCN(
const ROOT::Math::IMultiGenFunction & fcn,
const double * params,
unsigned int dataSize,
bool chi2fit) {
184 fUseGradient =
false;
185 unsigned int npar = fcn.NDim();
187 MATH_ERROR_MSG(
"Fitter::SetFCN",
"FCN function has zero parameters ");
191 fConfig.SetParamsSettings(npar, params);
193 if ( fConfig.ParamsSettings().size() != npar) {
194 MATH_ERROR_MSG(
"Fitter::SetFCN",
"wrong fit parameter settings");
200 fDataSize = dataSize;
204 fObjFunction = std::unique_ptr<ROOT::Math::IMultiGenFunction> ( fcn.Clone() );
207 if (fResult && fResult->FittedFunction() == 0 && fFunc) fFunc.reset();
208 if (fData) fData.reset();
213 bool Fitter::SetFCN(
const ROOT::Math::IMultiGenFunction &fcn,
const IModelFunction & func,
const double *params,
unsigned int dataSize,
bool chi2fit) {
215 if (!SetFCN(fcn, params, dataSize, chi2fit) )
return false;
217 fFunc = std::shared_ptr<IModelFunction>(
dynamic_cast<IModelFunction *
>(func.Clone()));
218 return (fFunc !=
nullptr);
221 bool Fitter::SetFCN(
const ROOT::Math::IMultiGradFunction &fcn,
const double *params,
unsigned int dataSize,
226 if (!SetFCN(static_cast<const ROOT::Math::IMultiGenFunction &>(fcn), params, dataSize, chi2fit))
232 bool Fitter::SetFCN(
const ROOT::Math::IMultiGradFunction &fcn,
const IModelFunction &func,
const double *params,
233 unsigned int dataSize,
bool chi2fit)
236 if (!SetFCN(fcn, params, dataSize, chi2fit) )
return false;
237 fFunc = std::shared_ptr<IModelFunction>(
dynamic_cast<IModelFunction *
>(func.Clone()));
238 return (fFunc !=
nullptr);
241 bool Fitter::SetFCN(
const ROOT::Math::FitMethodFunction &fcn,
const double *params)
245 bool chi2fit = (fcn.Type() == ROOT::Math::FitMethodFunction::kLeastSquare);
246 if (!SetFCN(fcn, params, fcn.NPoints(), chi2fit))
248 fUseGradient =
false;
249 fFitType = fcn.Type();
253 bool Fitter::SetFCN(
const ROOT::Math::FitMethodGradFunction &fcn,
const double *params)
257 bool chi2fit = (fcn.Type() == ROOT::Math::FitMethodGradFunction::kLeastSquare);
258 if (!SetFCN(fcn, params, fcn.NPoints(), chi2fit))
261 fFitType = fcn.Type();
265 bool Fitter::FitFCN(
const BaseFunc &fcn,
const double *params,
unsigned int dataSize,
bool chi2fit)
269 if (!SetFCN(fcn, params, dataSize, chi2fit))
274 bool Fitter::FitFCN(
const BaseGradFunc &fcn,
const double *params,
unsigned int dataSize,
bool chi2fit)
278 if (!SetFCN(fcn, params, dataSize, chi2fit))
283 bool Fitter::FitFCN(
const ROOT::Math::FitMethodFunction &fcn,
const double *params)
286 if (!SetFCN(fcn, params))
291 bool Fitter::FitFCN(
const ROOT::Math::FitMethodGradFunction &fcn,
const double *params)
294 if (!SetFCN(fcn, params))
299 bool Fitter::SetFCN(MinuitFCN_t fcn,
int npar,
const double *params,
unsigned int dataSize,
bool chi2fit)
305 npar = fConfig.ParamsSettings().size();
307 MATH_ERROR_MSG(
"Fitter::FitFCN",
"Fit Parameter settings have not been created ");
312 ROOT::Fit::FcnAdapter newFcn(fcn, npar);
313 return SetFCN(newFcn, params, dataSize, chi2fit);
316 bool Fitter::FitFCN(MinuitFCN_t fcn,
int npar,
const double *params,
unsigned int dataSize,
bool chi2fit)
320 if (!SetFCN(fcn, npar, params, dataSize, chi2fit))
322 fUseGradient =
false;
326 bool Fitter::FitFCN()
331 MATH_ERROR_MSG(
"Fitter::FitFCN",
"Objective function has not been set");
335 if (!fFunc || !fData)
338 if (!DoInitMinimizer())
341 return DoMinimization();
344 bool Fitter::EvalFCN()
348 if (fFunc && fResult->FittedFunction() == 0)
352 MATH_ERROR_MSG(
"Fitter::FitFCN",
"Objective function has not been set");
356 fResult = std::make_shared<ROOT::Fit::FitResult>(fConfig);
358 double fcnval = (*fObjFunction)(fResult->GetParams());
360 fResult->fVal = fcnval;
365 bool Fitter::DoLeastSquareFit(
const ROOT::Fit::ExecutionPolicy &executionPolicy)
369 std::shared_ptr<BinData> data = std::dynamic_pointer_cast<BinData>(fData);
373 if (!fFunc && !fFunc_v) {
374 MATH_ERROR_MSG(
"Fitter::DoLeastSquareFit",
"model function is not set");
379 std::cout <<
"Fitter ParamSettings " << Config().ParamsSettings()[3].IsBound() <<
" lower limit "
380 << Config().ParamsSettings()[3].LowerLimit() <<
" upper limit "
381 << Config().ParamsSettings()[3].UpperLimit() << std::endl;
385 fDataSize = data->Size();
390 Chi2FCN<BaseFunc, IModelFunction_v> chi2(data, fFunc_v, executionPolicy);
391 fFitType = chi2.Type();
392 return DoMinimization(chi2);
394 Chi2FCN<BaseFunc> chi2(data, fFunc, executionPolicy);
395 fFitType = chi2.Type();
396 return DoMinimization(chi2);
400 if (fConfig.MinimizerOptions().PrintLevel() > 0)
401 MATH_INFO_MSG(
"Fitter::DoLeastSquareFit",
"use gradient from model function");
404 std::shared_ptr<IGradModelFunction_v> gradFun = std::dynamic_pointer_cast<IGradModelFunction_v>(fFunc_v);
406 Chi2FCN<BaseGradFunc, IModelFunction_v> chi2(data, gradFun);
407 fFitType = chi2.Type();
408 return DoMinimization(chi2);
411 std::shared_ptr<IGradModelFunction> gradFun = std::dynamic_pointer_cast<IGradModelFunction>(fFunc);
413 Chi2FCN<BaseGradFunc> chi2(data, gradFun);
414 fFitType = chi2.Type();
415 return DoMinimization(chi2);
418 MATH_ERROR_MSG(
"Fitter::DoLeastSquareFit",
"wrong type of function - it does not provide gradient");
424 bool Fitter::DoBinnedLikelihoodFit(
bool extended,
const ROOT::Fit::ExecutionPolicy &executionPolicy)
429 std::shared_ptr<BinData> data = std::dynamic_pointer_cast<BinData>(fData);
432 bool useWeight = fConfig.UseWeightCorrection();
435 if (!fFunc && !fFunc_v) {
436 MATH_ERROR_MSG(
"Fitter::DoBinnedLikelihoodFit",
"model function is not set");
441 if (fConfig.MinimizerOptions().ErrorDef() == gDefaultErrorDef) {
442 fConfig.MinimizerOptions().SetErrorDef(0.5);
445 if (useWeight && fConfig.MinosErrors()) {
446 MATH_INFO_MSG(
"Fitter::DoBinnedLikelihoodFit",
"MINOS errors cannot be computed in weighted likelihood fits");
447 fConfig.SetMinosErrors(
false);
451 fDataSize = data->Size();
457 Chi2FCN<BaseFunc, IModelFunction_v> chi2(data, fFunc_v);
458 PoissonLikelihoodFCN<BaseFunc, IModelFunction_v> logl(data, fFunc_v, useWeight, extended, executionPolicy);
459 fFitType = logl.Type();
461 if (!DoMinimization(logl, &chi2))
464 logl.UseSumOfWeightSquare();
465 if (!ApplyWeightCorrection(logl))
470 Chi2FCN<BaseFunc> chi2(data, fFunc);
471 PoissonLikelihoodFCN<BaseFunc> logl(data, fFunc, useWeight, extended, executionPolicy);
472 fFitType = logl.Type();
474 if (!DoMinimization(logl, &chi2))
477 logl.UseSumOfWeightSquare();
478 if (!ApplyWeightCorrection(logl))
485 Chi2FCN<BaseFunc, IModelFunction_v> chi2(data, fFunc_v);
486 std::shared_ptr<IGradModelFunction_v> gradFun = std::dynamic_pointer_cast<IGradModelFunction_v>(fFunc_v);
488 MATH_ERROR_MSG(
"Fitter::DoBinnedLikelihoodFit",
"wrong type of function - it does not provide gradient");
491 PoissonLikelihoodFCN<BaseGradFunc, IModelFunction_v> logl(data, gradFun, useWeight,
true, executionPolicy);
492 fFitType = logl.Type();
494 if (!DoMinimization(logl, &chi2))
497 logl.UseSumOfWeightSquare();
498 if (!ApplyWeightCorrection(logl))
503 Chi2FCN<BaseFunc> chi2(data, fFunc);
504 if (fConfig.MinimizerOptions().PrintLevel() > 0)
505 MATH_INFO_MSG(
"Fitter::DoLikelihoodFit",
"use gradient from model function");
507 std::shared_ptr<IGradModelFunction> gradFun = std::dynamic_pointer_cast<IGradModelFunction>(fFunc);
509 MATH_ERROR_MSG(
"Fitter::DoBinnedLikelihoodFit",
"wrong type of function - it does not provide gradient");
515 MATH_WARN_MSG(
"Fitter::DoBinnedLikelihoodFit",
516 "Not-extended binned fit with gradient not yet supported - do an extended fit");
518 PoissonLikelihoodFCN<BaseGradFunc> logl(data, gradFun, useWeight,
true, executionPolicy);
519 fFitType = logl.Type();
521 if (!DoMinimization(logl, &chi2))
524 logl.UseSumOfWeightSquare();
525 if (!ApplyWeightCorrection(logl))
533 bool Fitter::DoUnbinnedLikelihoodFit(
bool extended,
const ROOT::Fit::ExecutionPolicy &executionPolicy) {
536 std::shared_ptr<UnBinData> data = std::dynamic_pointer_cast<UnBinData>(fData);
539 bool useWeight = fConfig.UseWeightCorrection();
541 if (!fFunc && !fFunc_v) {
542 MATH_ERROR_MSG(
"Fitter::DoUnbinnedLikelihoodFit",
"model function is not set");
546 if (useWeight && fConfig.MinosErrors() ) {
547 MATH_INFO_MSG(
"Fitter::DoUnbinnedLikelihoodFit",
"MINOS errors cannot be computed in weighted likelihood fits");
548 fConfig.SetMinosErrors(
false);
553 fDataSize = data->Size();
557 std::cout <<
"Fitter ParamSettings " << Config().ParamsSettings()[ipar].IsBound() <<
" lower limit " << Config().ParamsSettings()[ipar].LowerLimit() <<
" upper limit " << Config().ParamsSettings()[ipar].UpperLimit() << std::endl;
561 if (fConfig.MinimizerOptions().ErrorDef() == gDefaultErrorDef ) {
562 fConfig.MinimizerOptions().SetErrorDef(0.5);
568 LogLikelihoodFCN<BaseFunc, IModelFunction_v> logl(data, fFunc_v, useWeight, extended, executionPolicy);
569 fFitType = logl.Type();
570 if (!DoMinimization (logl) )
return false;
572 logl.UseSumOfWeightSquare();
573 if (!ApplyWeightCorrection(logl) )
return false;
577 LogLikelihoodFCN<BaseFunc> logl(data, fFunc, useWeight, extended, executionPolicy);
579 fFitType = logl.Type();
580 if (!DoMinimization (logl) )
return false;
582 logl.UseSumOfWeightSquare();
583 if (!ApplyWeightCorrection(logl) )
return false;
590 if (fConfig.MinimizerOptions().PrintLevel() > 0)
591 MATH_INFO_MSG(
"Fitter::DoUnbinnedLikelihoodFit",
"use gradient from model function");
592 std::shared_ptr<IGradModelFunction_v> gradFun = std::dynamic_pointer_cast<IGradModelFunction_v>(fFunc_v);
595 MATH_WARN_MSG(
"Fitter::DoUnbinnedLikelihoodFit",
596 "Extended unbinned fit with gradient not yet supported - do a not-extended fit");
598 LogLikelihoodFCN<BaseGradFunc, IModelFunction_v> logl(data, gradFun, useWeight, extended);
599 fFitType = logl.Type();
600 if (!DoMinimization(logl))
603 logl.UseSumOfWeightSquare();
604 if (!ApplyWeightCorrection(logl))
609 MATH_ERROR_MSG(
"Fitter::DoUnbinnedLikelihoodFit",
"wrong type of function - it does not provide gradient");
612 if (fConfig.MinimizerOptions().PrintLevel() > 0)
613 MATH_INFO_MSG(
"Fitter::DoUnbinnedLikelihoodFit",
"use gradient from model function");
614 std::shared_ptr<IGradModelFunction> gradFun = std::dynamic_pointer_cast<IGradModelFunction>(fFunc);
617 MATH_WARN_MSG(
"Fitter::DoUnbinnedLikelihoodFit",
618 "Extended unbinned fit with gradient not yet supported - do a not-extended fit");
620 LogLikelihoodFCN<BaseGradFunc> logl(data, gradFun, useWeight, extended);
621 fFitType = logl.Type();
622 if (!DoMinimization(logl))
625 logl.UseSumOfWeightSquare();
626 if (!ApplyWeightCorrection(logl))
631 MATH_ERROR_MSG(
"Fitter::DoUnbinnedLikelihoodFit",
"wrong type of function - it does not provide gradient");
638 bool Fitter::DoLinearFit( ) {
640 std::shared_ptr<BinData> data = std::dynamic_pointer_cast<BinData>(fData);
644 std::string prevminimizer = fConfig.MinimizerType();
645 fConfig.SetMinimizer(
"Linear");
649 bool ret = DoLeastSquareFit();
650 fConfig.SetMinimizer(prevminimizer.c_str());
655 bool Fitter::CalculateHessErrors() {
659 MATH_ERROR_MSG(
"Fitter::CalculateHessErrors",
"Objective function has not been set");
665 if (fFitType == 2 && fConfig.UseWeightCorrection() ) {
666 MATH_ERROR_MSG(
"Fitter::CalculateHessErrors",
"Re-computation of Hesse errors not implemented for weighted likelihood fits");
667 MATH_INFO_MSG(
"Fitter::CalculateHessErrors",
"Do the Fit using configure option FitConfig::SetParabErrors()");
687 if (fMinimizer && !fResult ) {
688 MATH_ERROR_MSG(
"Fitter::CalculateHessErrors",
"FitResult has not been created");
693 if (!DoUpdateMinimizerOptions()) {
694 MATH_ERROR_MSG(
"Fitter::CalculateHessErrors",
"Error re-initializing the minimizer");
700 MATH_ERROR_MSG(
"Fitter::CalculateHessErrors",
"Need to do a fit before calculating the errors");
706 bool ret = fMinimizer->Hesse();
707 if (!ret) MATH_WARN_MSG(
"Fitter::CalculateHessErrors",
"Error when calculating Hessian");
712 if (fResult->IsEmpty() )
713 fResult = std::unique_ptr<ROOT::Fit::FitResult>(
new ROOT::Fit::FitResult(fConfig) );
717 ret |= fResult->Update(fMinimizer, fConfig, ret);
720 if (fFitType != ROOT::Math::FitMethodFunction::kUndefined ) {
721 fResult->fNCalls = GetNCallsFromFCN();
725 if (fConfig.UpdateAfterFit() && ret) DoUpdateFitConfig();
731 bool Fitter::CalculateMinosErrors() {
739 MATH_ERROR_MSG(
"Fitter::CalculateMinosErrors",
"Minimizer does not exist - cannot calculate Minos errors");
743 if (!fResult || fResult->IsEmpty() ) {
744 MATH_ERROR_MSG(
"Fitter::CalculateMinosErrors",
"Invalid Fit Result - cannot calculate Minos errors");
748 if (fFitType == 2 && fConfig.UseWeightCorrection() ) {
749 MATH_ERROR_MSG(
"Fitter::CalculateMinosErrors",
"Computation of MINOS errors not implemented for weighted likelihood fits");
754 if (!DoUpdateMinimizerOptions(
false)) {
755 MATH_ERROR_MSG(
"Fitter::CalculateHessErrors",
"Error re-initializing the minimizer");
761 fConfig.SetMinosErrors(
false);
764 const std::vector<unsigned int> & ipars = fConfig.MinosParams();
765 unsigned int n = (ipars.size() > 0) ? ipars.size() : fResult->Parameters().size();
767 for (
unsigned int i = 0; i < n; ++i) {
769 unsigned int index = (ipars.size() > 0) ? ipars[i] : i;
770 bool ret = fMinimizer->GetMinosError(index, elow, eup);
771 if (ret) fResult->SetMinosError(index, elow, eup);
775 MATH_ERROR_MSG(
"Fitter::CalculateMinosErrors",
"Minos error calculation failed for all parameters");
780 ok &= fResult->Update(fMinimizer, fConfig, fResult->IsValid());
789 struct ObjFuncTrait {
790 static unsigned int NCalls(
const Func & ) {
return 0; }
791 static int Type(
const Func & ) {
return -1; }
792 static bool IsGrad() {
return false; }
795 struct ObjFuncTrait<ROOT::Math::FitMethodFunction> {
796 static unsigned int NCalls(
const ROOT::Math::FitMethodFunction & f ) {
return f.NCalls(); }
797 static int Type(
const ROOT::Math::FitMethodFunction & f) {
return f.Type(); }
798 static bool IsGrad() {
return false; }
801 struct ObjFuncTrait<ROOT::Math::FitMethodGradFunction> {
802 static unsigned int NCalls(
const ROOT::Math::FitMethodGradFunction & f ) {
return f.NCalls(); }
803 static int Type(
const ROOT::Math::FitMethodGradFunction & f) {
return f.Type(); }
804 static bool IsGrad() {
return true; }
807 bool Fitter::DoInitMinimizer() {
811 assert(fObjFunction.get() );
814 if ( fConfig.ParamsSettings().size() != fObjFunction->NDim() ) {
815 MATH_ERROR_MSG(
"Fitter::DoInitMinimizer",
"wrong function dimension or wrong size for FitConfig");
821 fMinimizer = std::shared_ptr<ROOT::Math::Minimizer> ( fConfig.CreateMinimizer() );
822 if (fMinimizer.get() == 0) {
823 MATH_ERROR_MSG(
"Fitter::DoInitMinimizer",
"Minimizer cannot be created");
829 const ROOT::Math::IMultiGradFunction * gradfcn =
dynamic_cast<const ROOT::Math::IMultiGradFunction *
> (fObjFunction.get() );
831 MATH_ERROR_MSG(
"Fitter::DoInitMinimizer",
"wrong type of function - it does not provide gradient");
834 fMinimizer->SetFunction( *gradfcn);
837 fMinimizer->SetFunction( *fObjFunction);
840 fMinimizer->SetVariables(fConfig.ParamsSettings().begin(), fConfig.ParamsSettings().end() );
843 if (fConfig.ParabErrors()) fMinimizer->SetValidError(
true);
849 bool Fitter::DoUpdateMinimizerOptions(
bool canDifferentMinim ) {
855 std::string newMinimType = fConfig.MinimizerName();
856 if (fMinimizer && fResult && newMinimType != fResult->MinimizerType()) {
858 if (canDifferentMinim) {
859 std::string msg =
"Using now " + newMinimType;
860 MATH_INFO_MSG(
"Fitter::DoUpdateMinimizerOptions: ", msg.c_str());
861 if (!DoInitMinimizer() )
865 std::string msg =
"Cannot change minimizer. Continue using " + fResult->MinimizerType();
866 MATH_WARN_MSG(
"Fitter::DoUpdateMinimizerOptions",msg.c_str());
872 if (!DoInitMinimizer())
877 fMinimizer->SetOptions(fConfig.MinimizerOptions());
881 bool Fitter::DoMinimization(
const ROOT::Math::IMultiGenFunction * chi2func) {
886 bool ret = fMinimizer->Minimize();
891 if (!fResult) fResult = std::make_shared<FitResult>();
892 fResult->FillResult(fMinimizer,fConfig, fFunc, ret, fDataSize, fBinFit, chi2func );
895 if (fResult->fNCalls == 0 && fFitType != ROOT::Math::FitMethodFunction::kUndefined ) {
896 fResult->fNCalls = GetNCallsFromFCN();
900 fResult->fObjFunc = fObjFunction;
901 fResult->fFitData = fData;
905 std::cout <<
"ROOT::Fit::Fitter::DoMinimization : ncalls = " << fResult->fNCalls <<
" type of objfunc " << fFitFitResType <<
" typeid: " <<
typeid(*fObjFunction).name() <<
" use gradient " << fUseGradient << std::endl;
908 if (fConfig.NormalizeErrors() && fFitType == ROOT::Math::FitMethodFunction::kLeastSquare )
909 fResult->NormalizeErrors();
912 if (fConfig.UpdateAfterFit() && ret) DoUpdateFitConfig();
917 bool Fitter::DoMinimization(
const BaseFunc & objFunc,
const ROOT::Math::IMultiGenFunction * chi2func) {
922 fObjFunction = std::unique_ptr<ROOT::Math::IMultiGenFunction> ( objFunc.Clone() );
923 if (!DoInitMinimizer())
return false;
924 return DoMinimization(chi2func);
928 void Fitter::DoUpdateFitConfig() {
930 if (fResult->IsEmpty() || !fResult->IsValid() )
return;
931 for (
unsigned int i = 0; i < fConfig.NPar(); ++i) {
932 ParameterSettings & par = fConfig.ParSettings(i);
933 par.SetValue( fResult->Value(i) );
934 if (fResult->Error(i) > 0) par.SetStepSize( fResult->Error(i) );
938 int Fitter::GetNCallsFromFCN() {
943 const ROOT::Math::FitMethodFunction * fcn =
dynamic_cast<const ROOT::Math::FitMethodFunction *
>(fObjFunction.get());
944 if (fcn) ncalls = fcn->NCalls();
947 const ROOT::Math::FitMethodGradFunction * fcn =
dynamic_cast<const ROOT::Math::FitMethodGradFunction*
>(fObjFunction.get());
948 if (fcn) ncalls = fcn->NCalls();
954 bool Fitter::ApplyWeightCorrection(
const ROOT::Math::IMultiGenFunction & loglw2,
bool minimizeW2L) {
962 if (fMinimizer.get() == 0) {
963 MATH_ERROR_MSG(
"Fitter::ApplyWeightCorrection",
"Must perform first a fit before applying the correction");
967 unsigned int n = loglw2.NDim();
969 std::vector<double> cov(n*n);
970 bool ret = fMinimizer->GetCovMatrix(&cov[0] );
972 MATH_ERROR_MSG(
"Fitter::ApplyWeightCorrection",
"Previous fit has no valid Covariance matrix");
976 fObjFunction = std::unique_ptr<ROOT::Math::IMultiGenFunction> ( loglw2.Clone() );
979 if (!DoInitMinimizer())
return false;
985 if (minimizeW2L) fMinimizer->Minimize();
987 ret = fMinimizer->Hesse();
989 MATH_ERROR_MSG(
"Fitter::ApplyWeightCorrection",
"Error running Hesse on weight2 likelihood - cannot compute errors");
993 if (fMinimizer->CovMatrixStatus() != 3) {
994 MATH_WARN_MSG(
"Fitter::ApplyWeightCorrection",
"Covariance matrix for weighted likelihood is not accurate, the errors may be not reliable");
995 if (fMinimizer->CovMatrixStatus() == 2)
996 MATH_WARN_MSG(
"Fitter::ApplyWeightCorrection",
"Covariance matrix for weighted likelihood was forced to be defined positive");
997 if (fMinimizer->CovMatrixStatus() <= 0)
999 MATH_ERROR_MSG(
"Fitter::ApplyWeightCorrection",
"Covariance matrix for weighted likelihood is not valid !");
1013 std::vector<double> hes(n*n);
1014 ret = fMinimizer->GetHessianMatrix(&hes[0] );
1016 MATH_ERROR_MSG(
"Fitter::ApplyWeightCorrection",
"Error retrieving Hesse on weight2 likelihood - cannot compute errors");
1032 std::vector<double> tmp(n*n);
1033 for (
unsigned int i = 0; i < n; ++i) {
1034 for (
unsigned int j = 0; j < n; ++j) {
1035 for (
unsigned int k = 0; k < n; ++k)
1036 tmp[i*n+j] += hes[i*n + k] * cov[k*n + j];
1040 std::vector<double> newCov(n*n);
1041 for (
unsigned int i = 0; i < n; ++i) {
1042 for (
unsigned int j = 0; j < n; ++j) {
1043 for (
unsigned int k = 0; k < n; ++k)
1044 newCov[i*n+j] += cov[i*n + k] * tmp[k*n + j];
1049 for (
unsigned int i = 0; i < n; ++i) {
1050 fResult->fErrors[i] = std::sqrt( newCov[i*(n+1)] );
1051 for (
unsigned int j = 0; j <= i; ++j)
1052 fResult->fCovMatrix[k++] = newCov[i *n + j];
1061 void Fitter::ExamineFCN() {
1067 if ( GetDataFromFCN<BasicFCN<ROOT::Math::IMultiGenFunction, ROOT::Math::IParamMultiFunction, BinData> >() )
return;
1068 if ( GetDataFromFCN<BasicFCN<ROOT::Math::IMultiGenFunction, ROOT::Math::IParamMultiFunction, UnBinData> >() )
return;
1070 if ( GetDataFromFCN<BasicFCN<ROOT::Math::IMultiGradFunction, ROOT::Math::IParamMultiFunction, BinData> >() )
return;
1071 if ( GetDataFromFCN<BasicFCN<ROOT::Math::IMultiGradFunction, ROOT::Math::IParamMultiFunction, UnBinData> >() )
return;