38 #ifndef ROOT_TMVA_MethodMLP
39 #define ROOT_TMVA_MethodMLP
64 #define MethodMLP_UseMinuit__
65 #undef MethodMLP_UseMinuit__
69 class MethodMLP :
public MethodANNBase,
public IFitterTarget,
public ConvergenceTest {
74 MethodMLP(
const TString& jobName,
75 const TString& methodTitle,
77 const TString& theOption );
79 MethodMLP( DataSetInfo& theData,
80 const TString& theWeightFile );
84 virtual Bool_t HasAnalysisType( Types::EAnalysisType type, UInt_t numberClasses, UInt_t numberTargets );
88 Double_t ComputeEstimator ( std::vector<Double_t>& parameters );
89 Double_t EstimatorFunction( std::vector<Double_t>& parameters );
91 enum ETrainingMethod { kBP=0, kBFGS, kGA };
92 enum EBPTrainingMode { kSequential=0, kBatch };
94 bool HasInverseHessian() {
return fCalculateErrors; }
95 Double_t GetMvaValue( Double_t* err=0, Double_t* errUpper=0 );
100 void MakeClassSpecific( std::ostream&,
const TString& )
const;
103 void GetHelpMessage()
const;
109 void DeclareOptions();
110 void ProcessOptions();
113 void Train( Int_t nEpochs );
115 void InitializeLearningRates();
118 Double_t CalculateEstimator( Types::ETreeType treeType = Types::kTraining, Int_t iEpoch = -1 );
121 void BFGSMinimize( Int_t nEpochs );
122 void SetGammaDelta( TMatrixD &Gamma, TMatrixD &Delta, std::vector<Double_t> &Buffer );
123 void SteepestDir( TMatrixD &Dir );
124 Bool_t GetHessian( TMatrixD &Hessian, TMatrixD &Gamma, TMatrixD &Delta );
125 void SetDir( TMatrixD &Hessian, TMatrixD &Dir );
126 Double_t DerivDir( TMatrixD &Dir );
127 Bool_t LineSearch( TMatrixD &Dir, std::vector<Double_t> &Buffer, Double_t* dError=0 );
129 void SimulateEvent(
const Event* ev );
130 void SetDirWeights( std::vector<Double_t> &Origin, TMatrixD &Dir, Double_t alpha );
132 Double_t GetMSEErr(
const Event* ev, UInt_t index = 0 );
133 Double_t GetCEErr(
const Event* ev, UInt_t index = 0 );
136 void BackPropagationMinimize( Int_t nEpochs );
137 void TrainOneEpoch();
138 void Shuffle( Int_t* index, Int_t n );
139 void DecaySynapseWeights(Bool_t lateEpoch );
140 void TrainOneEvent( Int_t ievt);
141 Double_t GetDesiredOutput(
const Event* ev );
142 void UpdateNetwork( Double_t desired, Double_t eventWeight=1.0 );
143 void UpdateNetwork(
const std::vector<Float_t>& desired, Double_t eventWeight=1.0);
144 void CalculateNeuronDeltas();
145 void UpdateSynapses();
146 void AdjustSynapseWeights();
149 void TrainOneEventFast( Int_t ievt, Float_t*& branchVar, Int_t& type );
152 void GeneticMinimize();
155 #ifdef MethodMLP_UseMinuit__
157 void MinuitMinimize();
158 static MethodMLP* GetThisPtr();
159 static void IFCN( Int_t& npars, Double_t* grad, Double_t &f, Double_t* fitPars, Int_t ifl );
160 void FCN( Int_t& npars, Double_t* grad, Double_t &f, Double_t* fitPars, Int_t ifl );
165 bool fCalculateErrors;
167 std::vector<Double_t> fPriorDev;
168 void GetApproxInvHessian ( TMatrixD& InvHessian,
bool regulate=
true );
169 void UpdateRegulators();
173 ETrainingMethod fTrainingMethod;
174 TString fTrainMethodS;
176 Float_t fSamplingFraction;
177 Float_t fSamplingEpoch;
178 Float_t fSamplingWeight;
179 Bool_t fSamplingTraining;
180 Bool_t fSamplingTesting;
190 EBPTrainingMode fBPMode;
201 Double_t fGA_SC_factor;
204 std::vector<std::pair<Float_t,Float_t> >* fDeviationsFromTargets;
206 Float_t fWeightRange;
208 #ifdef MethodMLP_UseMinuit__
210 Int_t fNumberOfWeights;
211 static MethodMLP* fgThis;
215 static const Int_t fgPRINT_ESTIMATOR_INC = 10;
216 static const Bool_t fgPRINT_SEQ = kFALSE;
217 static const Bool_t fgPRINT_BATCH = kFALSE;
219 ClassDef(MethodMLP,0);