Hall-D Software  alpha
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
DNeutralShower_FCALQualityMLP.h
Go to the documentation of this file.
1 // Class: ReadMLP
2 // Automatically generated by MethodBase::MakeClass
3 //
4 
5 /* configuration options =====================================================
6 
7 #GEN -*-*-*-*-*-*-*-*-*-*-*- general info -*-*-*-*-*-*-*-*-*-*-*-
8 
9 Method : MLP::MLP
10 TMVA Release : 4.2.1 [262657]
11 ROOT Release : 6.08/06 [395270]
12 Creator : rebecca
13 Date : Sat Dec 23 19:09:24 2017
14 Host : Darwin macitois18.cern.ch 16.4.0 Darwin Kernel Version 16.4.0: Thu Dec 22 22:53:21 PST 2016; root:xnu-3789.41.3~3/RELEASE_X86_64 x86_64
15 Dir : /usr/local/root
16 Training events: 1751055
17 Analysis type : [Classification]
18 
19 
20 #OPT -*-*-*-*-*-*-*-*-*-*-*-*- options -*-*-*-*-*-*-*-*-*-*-*-*-
21 
22 # Set by User:
23 NCycles: "200" [Number of training cycles]
24 HiddenLayers: "N+3" [Specification of hidden layer architecture]
25 NeuronType: "radial" [Neuron activation function type]
26 V: "False" [Verbose output (short form of "VerbosityLevel" below - overrides the latter one)]
27 VarTransform: "N" [List of variable transformations performed before training, e.g., "D_Background,P_Signal,G,N_AllClasses" for: "Decorrelation, PCA-transformation, Gaussianisation, Normalisation, each for the given class of events ('AllClasses' denotes all events of all classes, if no class indication is given, 'All' is assumed)"]
28 H: "True" [Print method-specific help message]
29 TestRate: "6" [Test for overtraining performed at each #th epochs]
30 UseRegulator: "False" [Use regulator to avoid over-training]
31 # Default:
32 RandomSeed: "1" [Random seed for initial synapse weights (0 means unique seed for each run; default value '1')]
33 EstimatorType: "CE" [MSE (Mean Square Estimator) for Gaussian Likelihood or CE(Cross-Entropy) for Bernoulli Likelihood]
34 NeuronInputType: "sum" [Neuron input function type]
35 VerbosityLevel: "Default" [Verbosity level]
36 CreateMVAPdfs: "False" [Create PDFs for classifier outputs (signal and background)]
37 IgnoreNegWeightsInTraining: "False" [Events with negative weights are ignored in the training (but are included for testing and performance evaluation)]
38 TrainingMethod: "BP" [Train with Back-Propagation (BP), BFGS Algorithm (BFGS), or Genetic Algorithm (GA - slower and worse)]
39 LearningRate: "2.000000e-02" [ANN learning rate parameter]
40 DecayRate: "1.000000e-02" [Decay rate for learning parameter]
41 EpochMonitoring: "False" [Provide epoch-wise monitoring plots according to TestRate (caution: causes big ROOT output file!)]
42 Sampling: "1.000000e+00" [Only 'Sampling' (randomly selected) events are trained each epoch]
43 SamplingEpoch: "1.000000e+00" [Sampling is used for the first 'SamplingEpoch' epochs, afterwards, all events are taken for training]
44 SamplingImportance: "1.000000e+00" [ The sampling weights of events in epochs which successful (worse estimator than before) are multiplied with SamplingImportance, else they are divided.]
45 SamplingTraining: "True" [The training sample is sampled]
46 SamplingTesting: "False" [The testing sample is sampled]
47 ResetStep: "50" [How often BFGS should reset history]
48 Tau: "3.000000e+00" [LineSearch "size step"]
49 BPMode: "sequential" [Back-propagation learning mode: sequential or batch]
50 BatchSize: "-1" [Batch size: number of events/batch, only set if in Batch Mode, -1 for BatchSize=number_of_events]
51 ConvergenceImprove: "1.000000e-30" [Minimum improvement which counts as improvement (<0 means automatic convergence check is turned off)]
52 ConvergenceTests: "-1" [Number of steps (without improvement) required for convergence (<0 means automatic convergence check is turned off)]
53 UpdateLimit: "10000" [Maximum times of regulator update]
54 CalculateErrors: "False" [Calculates inverse Hessian matrix at the end of the training to be able to calculate the uncertainties of an MVA value]
55 WeightRange: "1.000000e+00" [Take the events for the estimator calculations from small deviations from the desired value to large deviations only over the weight range]
56 ##
57 
58 
59 #VAR -*-*-*-*-*-*-*-*-*-*-*-* variables *-*-*-*-*-*-*-*-*-*-*-*-
60 
61 NVar 8
62 nHits nHits nHits nHits 'F' [2,18]
63 e9e25Sh e9e25Sh e9e25Sh e9e25Sh 'F' [0.223209112883,1]
64 e1e9Sh e1e9Sh e1e9Sh e1e9Sh 'F' [0.159292444587,1]
65 sumUSh sumUSh sumUSh sumUSh 'F' [2.02385636028e-10,251.844528198]
66 sumVSh sumVSh sumVSh sumVSh 'F' [2.56061630233e-11,174.307571411]
67 asymUVSh asymUVSh asymUVSh asymUVSh 'F' [-1,1]
68 speedSh speedSh speedSh speedSh 'F' [-98087.1953125,479840.125]
69 dtTrSh dtTrSh dtTrSh dtTrSh 'F' [-999975.1875,164.864715576]
70 NSpec 0
71 
72 
73 ============================================================================ */
74 
75 #include <vector>
76 #include <cmath>
77 #include <string>
78 #include <iostream>
79 
80 #ifndef IClassifierReader__def
81 #define IClassifierReader__def
82 
84 
85  public:
86 
87  // constructor
89  virtual ~IClassifierReader() {}
90 
91  // return classifier response
92  virtual double GetMvaValue( const std::vector<double>& inputValues ) const = 0;
93 
94  // returns classifier status
95  bool IsStatusClean() const { return fStatusIsClean; }
96 
97  protected:
98 
100 };
101 
102 #endif
103 
105 
106  public:
107 
108  // constructor
109  DNeutralShower_FCALQualityMLP( std::vector<std::string>& theInputVars )
110  : IClassifierReader(),
111  fClassName( "DNeutralShower_FCALQualityMLP" ),
112  fNvars( 8 ),
113  fIsNormalised( false )
114  {
115  // the training input variables
116  const char* inputVars[] = { "nHits", "e9e25Sh", "e1e9Sh", "sumUSh", "sumVSh", "asymUVSh", "speedSh", "dtTrSh" };
117 
118  // sanity checks
119  if (theInputVars.size() <= 0) {
120  std::cout << "Problem in class \"" << fClassName << "\": empty input vector" << std::endl;
121  fStatusIsClean = false;
122  }
123 
124  if (theInputVars.size() != fNvars) {
125  std::cout << "Problem in class \"" << fClassName << "\": mismatch in number of input values: "
126  << theInputVars.size() << " != " << fNvars << std::endl;
127  fStatusIsClean = false;
128  }
129 
130  // validate input variables
131  for (size_t ivar = 0; ivar < theInputVars.size(); ivar++) {
132  if (theInputVars[ivar] != inputVars[ivar]) {
133  std::cout << "Problem in class \"" << fClassName << "\": mismatch in input variable names" << std::endl
134  << " for variable [" << ivar << "]: " << theInputVars[ivar].c_str() << " != " << inputVars[ivar] << std::endl;
135  fStatusIsClean = false;
136  }
137  }
138 
139  // initialize min and max vectors (for normalisation)
140  fVmin[0] = -1;
141  fVmax[0] = 1;
142  fVmin[1] = -1;
143  fVmax[1] = 1;
144  fVmin[2] = -1;
145  fVmax[2] = 0.99999988079071;
146  fVmin[3] = -1;
147  fVmax[3] = 0.99999988079071;
148  fVmin[4] = -1;
149  fVmax[4] = 0.99999988079071;
150  fVmin[5] = -1;
151  fVmax[5] = 1;
152  fVmin[6] = -1;
153  fVmax[6] = 1;
154  fVmin[7] = -1;
155  fVmax[7] = 1;
156 
157  // initialize input variable types
158  fType[0] = 'F';
159  fType[1] = 'F';
160  fType[2] = 'F';
161  fType[3] = 'F';
162  fType[4] = 'F';
163  fType[5] = 'F';
164  fType[6] = 'F';
165  fType[7] = 'F';
166 
167  // initialize constants
168  Initialize();
169 
170  // initialize transformation
171  InitTransform();
172  }
173 
174  // destructor
176  Clear(); // method-specific
177  }
178 
179  // the classifier response
180  // "inputValues" is a vector of input values in the same order as the
181  // variables given to the constructor
182  double GetMvaValue( const std::vector<double>& inputValues ) const;
183 
184  private:
185 
186  // method-specific destructor
187  void Clear();
188 
189  // input variable transformation
190 
191  double fMin_1[3][8];
192  double fMax_1[3][8];
193  void InitTransform_1();
194  void Transform_1( std::vector<double> & iv, int sigOrBgd ) const;
195  void InitTransform();
196  void Transform( std::vector<double> & iv, int sigOrBgd ) const;
197 
198  // common member variables
199  const char* fClassName;
200 
201  const size_t fNvars;
202  size_t GetNvar() const { return fNvars; }
203  char GetType( int ivar ) const { return fType[ivar]; }
204 
205  // normalisation of input variables
206  const bool fIsNormalised;
207  bool IsNormalised() const { return fIsNormalised; }
208  double fVmin[8];
209  double fVmax[8];
210  double NormVariable( double x, double xmin, double xmax ) const {
211  // normalise to output range: [-1, 1]
212  return 2*(x - xmin)/(xmax - xmin) - 1.0;
213  }
214 
215  // type of input variable: 'F' or 'I'
216  char fType[8];
217 
218  // initialize internal variables
219  void Initialize();
220  double GetMvaValue__( const std::vector<double>& inputValues ) const;
221 
222  // private members (method specific)
223 
224  double ActivationFnc(double x) const;
225  double OutputActivationFnc(double x) const;
226 
227  int fLayers;
228  int fLayerSize[3];
229  double fWeightMatrix0to1[12][9]; // weight matrix from layer 0 to 1
230  double fWeightMatrix1to2[1][12]; // weight matrix from layer 1 to 2
231 
232  double * fWeights[3];
233 };
234 
236 {
237  // build network structure
238  fLayers = 3;
239  fLayerSize[0] = 9; fWeights[0] = new double[9];
240  fLayerSize[1] = 12; fWeights[1] = new double[12];
241  fLayerSize[2] = 1; fWeights[2] = new double[1];
242  // weight matrix from layer 0 to 1
243  fWeightMatrix0to1[0][0] = -5.67548018894114;
244  fWeightMatrix0to1[1][0] = 0.671209634882693;
245  fWeightMatrix0to1[2][0] = 2.08345048146891;
246  fWeightMatrix0to1[3][0] = 8.03348403391923;
247  fWeightMatrix0to1[4][0] = -2.99437705684878;
248  fWeightMatrix0to1[5][0] = -5.77002471289812;
249  fWeightMatrix0to1[6][0] = 0.148954096917011;
250  fWeightMatrix0to1[7][0] = 0.872486561760455;
251  fWeightMatrix0to1[8][0] = 3.41922816394206;
252  fWeightMatrix0to1[9][0] = -19.5590817002943;
253  fWeightMatrix0to1[10][0] = -2.66195505436744;
254  fWeightMatrix0to1[0][1] = -0.508924769224103;
255  fWeightMatrix0to1[1][1] = -0.413826939497546;
256  fWeightMatrix0to1[2][1] = 2.55208743545114;
257  fWeightMatrix0to1[3][1] = 8.74978061120493;
258  fWeightMatrix0to1[4][1] = 2.91640933526037;
259  fWeightMatrix0to1[5][1] = 15.5992956753358;
260  fWeightMatrix0to1[6][1] = 4.16454503643829;
261  fWeightMatrix0to1[7][1] = 0.335951584076802;
262  fWeightMatrix0to1[8][1] = 0.443761406492043;
263  fWeightMatrix0to1[9][1] = -1.27608307823435;
264  fWeightMatrix0to1[10][1] = 0.723088796764191;
265  fWeightMatrix0to1[0][2] = 0.00236735007849696;
266  fWeightMatrix0to1[1][2] = -0.32748149144678;
267  fWeightMatrix0to1[2][2] = 0.0378203764594197;
268  fWeightMatrix0to1[3][2] = 0.864139835325729;
269  fWeightMatrix0to1[4][2] = 1.1741041486026;
270  fWeightMatrix0to1[5][2] = 0.202813866259565;
271  fWeightMatrix0to1[6][2] = -2.99801331683627;
272  fWeightMatrix0to1[7][2] = -0.753917826643766;
273  fWeightMatrix0to1[8][2] = 1.42114606981556;
274  fWeightMatrix0to1[9][2] = -1.66405308842105;
275  fWeightMatrix0to1[10][2] = 0.602137259462235;
276  fWeightMatrix0to1[0][3] = 0.531704015403295;
277  fWeightMatrix0to1[1][3] = 11.2465707337611;
278  fWeightMatrix0to1[2][3] = 12.3039731123197;
279  fWeightMatrix0to1[3][3] = 0.843360503501562;
280  fWeightMatrix0to1[4][3] = 68.7904388998873;
281  fWeightMatrix0to1[5][3] = 20.7060822974567;
282  fWeightMatrix0to1[6][3] = -3.47388885611392;
283  fWeightMatrix0to1[7][3] = 14.191555888897;
284  fWeightMatrix0to1[8][3] = -20.8337091131509;
285  fWeightMatrix0to1[9][3] = -0.403970269048419;
286  fWeightMatrix0to1[10][3] = 32.0709599268463;
287  fWeightMatrix0to1[0][4] = 2.84373100343368;
288  fWeightMatrix0to1[1][4] = 3.44508123789802;
289  fWeightMatrix0to1[2][4] = 18.8527523704618;
290  fWeightMatrix0to1[3][4] = 11.8130280910594;
291  fWeightMatrix0to1[4][4] = 42.7072286627262;
292  fWeightMatrix0to1[5][4] = 24.6370840569919;
293  fWeightMatrix0to1[6][4] = -2.86684710766166;
294  fWeightMatrix0to1[7][4] = 19.8622638845141;
295  fWeightMatrix0to1[8][4] = -31.583590821179;
296  fWeightMatrix0to1[9][4] = 7.27382508545517;
297  fWeightMatrix0to1[10][4] = 20.3083673421654;
298  fWeightMatrix0to1[0][5] = 0.500208715016816;
299  fWeightMatrix0to1[1][5] = -0.966403723015704;
300  fWeightMatrix0to1[2][5] = 1.27221558852442;
301  fWeightMatrix0to1[3][5] = 2.26006771546241;
302  fWeightMatrix0to1[4][5] = 0.0785642731700331;
303  fWeightMatrix0to1[5][5] = 0.617801329166234;
304  fWeightMatrix0to1[6][5] = 0.398921931295215;
305  fWeightMatrix0to1[7][5] = 1.30742304878111;
306  fWeightMatrix0to1[8][5] = -1.5854303524047;
307  fWeightMatrix0to1[9][5] = 0.141055620539128;
308  fWeightMatrix0to1[10][5] = 0.00899660912966566;
309  fWeightMatrix0to1[0][6] = -0.0508883863396253;
310  fWeightMatrix0to1[1][6] = -4.87942115767901;
311  fWeightMatrix0to1[2][6] = -6.43897971268509;
312  fWeightMatrix0to1[3][6] = -3.1501589923107;
313  fWeightMatrix0to1[4][6] = -30.7770129585863;
314  fWeightMatrix0to1[5][6] = -8.09296254959834;
315  fWeightMatrix0to1[6][6] = 5.05250669726875;
316  fWeightMatrix0to1[7][6] = -7.05308033490298;
317  fWeightMatrix0to1[8][6] = 14.0953692508894;
318  fWeightMatrix0to1[9][6] = 2.64029964307745;
319  fWeightMatrix0to1[10][6] = -17.9294940033197;
320  fWeightMatrix0to1[0][7] = -0.597870262314557;
321  fWeightMatrix0to1[1][7] = 5.8670108587539;
322  fWeightMatrix0to1[2][7] = 11.1416376728042;
323  fWeightMatrix0to1[3][7] = 2.80651871091009;
324  fWeightMatrix0to1[4][7] = 41.2273170837517;
325  fWeightMatrix0to1[5][7] = 10.0532138457348;
326  fWeightMatrix0to1[6][7] = -1.88967602119454;
327  fWeightMatrix0to1[7][7] = 12.1243481622399;
328  fWeightMatrix0to1[8][7] = -18.1601793890722;
329  fWeightMatrix0to1[9][7] = -2.47560187111571;
330  fWeightMatrix0to1[10][7] = 19.0890322060697;
331  fWeightMatrix0to1[0][8] = -1.30446854774301;
332  fWeightMatrix0to1[1][8] = 3.22895639429101;
333  fWeightMatrix0to1[2][8] = 10.4491920342795;
334  fWeightMatrix0to1[3][8] = 3.9033323280269;
335  fWeightMatrix0to1[4][8] = 42.4730956576089;
336  fWeightMatrix0to1[5][8] = 7.77940842702126;
337  fWeightMatrix0to1[6][8] = -7.44115551912371;
338  fWeightMatrix0to1[7][8] = 13.8025369842737;
339  fWeightMatrix0to1[8][8] = -18.9472518819945;
340  fWeightMatrix0to1[9][8] = -7.14534776675644;
341  fWeightMatrix0to1[10][8] = 20.0808465165044;
342  // weight matrix from layer 1 to 2
343  fWeightMatrix1to2[0][0] = -1.36904487724954;
344  fWeightMatrix1to2[0][1] = -3.88451495296165;
345  fWeightMatrix1to2[0][2] = -3.21831596336124;
346  fWeightMatrix1to2[0][3] = 0.515375727140815;
347  fWeightMatrix1to2[0][4] = 2.75074777877701;
348  fWeightMatrix1to2[0][5] = -1.26289162166287;
349  fWeightMatrix1to2[0][6] = 1.52012250393066;
350  fWeightMatrix1to2[0][7] = -2.62164354202067;
351  fWeightMatrix1to2[0][8] = -1.59667983619438;
352  fWeightMatrix1to2[0][9] = -2.22405141009694;
353  fWeightMatrix1to2[0][10] = 8.38563962629394;
354  fWeightMatrix1to2[0][11] = 0.68727802568716;
355 }
356 
357 inline double DNeutralShower_FCALQualityMLP::GetMvaValue__( const std::vector<double>& inputValues ) const
358 {
359  if (inputValues.size() != (unsigned int)fLayerSize[0]-1) {
360  std::cout << "Input vector needs to be of size " << fLayerSize[0]-1 << std::endl;
361  return 0;
362  }
363 
364  for (int l=0; l<fLayers; l++)
365  for (int i=0; i<fLayerSize[l]; i++) fWeights[l][i]=0;
366 
367  for (int l=0; l<fLayers-1; l++)
368  fWeights[l][fLayerSize[l]-1]=1;
369 
370  for (int i=0; i<fLayerSize[0]-1; i++)
371  fWeights[0][i]=inputValues[i];
372 
373  // layer 0 to 1
374  for (int o=0; o<fLayerSize[1]-1; o++) {
375  for (int i=0; i<fLayerSize[0]; i++) {
376  double inputVal = fWeightMatrix0to1[o][i] * fWeights[0][i];
377  fWeights[1][o] += inputVal;
378  }
379  fWeights[1][o] = ActivationFnc(fWeights[1][o]);
380  }
381  // layer 1 to 2
382  for (int o=0; o<fLayerSize[2]; o++) {
383  for (int i=0; i<fLayerSize[1]; i++) {
384  double inputVal = fWeightMatrix1to2[o][i] * fWeights[1][i];
385  fWeights[2][o] += inputVal;
386  }
387  fWeights[2][o] = OutputActivationFnc(fWeights[2][o]);
388  }
389 
390  return fWeights[2][0];
391 }
392 
393 inline double DNeutralShower_FCALQualityMLP::ActivationFnc(double x) const {
394  // radial
395  return exp(-x*x/2.0);
396 }
398  // sigmoid
399  return 1.0/(1.0+exp(-x));
400 }
401 
402 // Clean up
404 {
405  // clean up the arrays
406  for (int lIdx = 0; lIdx < 3; lIdx++) {
407  delete[] fWeights[lIdx];
408  }
409 }
410  inline double DNeutralShower_FCALQualityMLP::GetMvaValue( const std::vector<double>& inputValues ) const
411  {
412  // classifier response value
413  double retval = 0;
414 
415  // classifier response, sanity check first
416  if (!IsStatusClean()) {
417  std::cout << "Problem in class \"" << fClassName << "\": cannot return classifier response"
418  << " because status is dirty" << std::endl;
419  retval = 0;
420  }
421  else {
422  if (IsNormalised()) {
423  // normalise variables
424  std::vector<double> iV;
425  iV.reserve(inputValues.size());
426  int ivar = 0;
427  for (std::vector<double>::const_iterator varIt = inputValues.begin();
428  varIt != inputValues.end(); varIt++, ivar++) {
429  iV.push_back(NormVariable( *varIt, fVmin[ivar], fVmax[ivar] ));
430  }
431  Transform( iV, -1 );
432  retval = GetMvaValue__( iV );
433  }
434  else {
435  std::vector<double> iV;
436  int ivar = 0;
437  for (std::vector<double>::const_iterator varIt = inputValues.begin();
438  varIt != inputValues.end(); varIt++, ivar++) {
439  iV.push_back(*varIt);
440  }
441  Transform( iV, -1 );
442  retval = GetMvaValue__( iV );
443  }
444  }
445 
446  return retval;
447  }
448 
449 //_______________________________________________________________________
451 {
452  // Normalization transformation, initialisation
453  fMin_1[0][0] = 2;
454  fMax_1[0][0] = 16;
455  fMin_1[1][0] = 2;
456  fMax_1[1][0] = 18;
457  fMin_1[2][0] = 2;
458  fMax_1[2][0] = 18;
459  fMin_1[0][1] = 0.323900818825;
460  fMax_1[0][1] = 1;
461  fMin_1[1][1] = 0.223209112883;
462  fMax_1[1][1] = 1;
463  fMin_1[2][1] = 0.223209112883;
464  fMax_1[2][1] = 1;
465  fMin_1[0][2] = 0.196359068155;
466  fMax_1[0][2] = 1;
467  fMin_1[1][2] = 0.159292444587;
468  fMax_1[1][2] = 1;
469  fMin_1[2][2] = 0.159292444587;
470  fMax_1[2][2] = 1;
471  fMin_1[0][3] = 3.04391463146e-08;
472  fMax_1[0][3] = 123.788467407;
473  fMin_1[1][3] = 2.02385636028e-10;
474  fMax_1[1][3] = 251.844528198;
475  fMin_1[2][3] = 2.02385636028e-10;
476  fMax_1[2][3] = 251.844528198;
477  fMin_1[0][4] = 1.07598518984e-09;
478  fMax_1[0][4] = 32.411315918;
479  fMin_1[1][4] = 2.56061630233e-11;
480  fMax_1[1][4] = 174.307571411;
481  fMin_1[2][4] = 2.56061630233e-11;
482  fMax_1[2][4] = 174.307571411;
483  fMin_1[0][5] = -1;
484  fMax_1[0][5] = 1;
485  fMin_1[1][5] = -1;
486  fMax_1[1][5] = 1;
487  fMin_1[2][5] = -1;
488  fMax_1[2][5] = 1;
489  fMin_1[0][6] = -1266.28295898;
490  fMax_1[0][6] = 202.097763062;
491  fMin_1[1][6] = -98087.1953125;
492  fMax_1[1][6] = 479840.125;
493  fMin_1[2][6] = -98087.1953125;
494  fMax_1[2][6] = 479840.125;
495  fMin_1[0][7] = -999975.1875;
496  fMax_1[0][7] = 127.996780396;
497  fMin_1[1][7] = -999973.375;
498  fMax_1[1][7] = 164.864715576;
499  fMin_1[2][7] = -999975.1875;
500  fMax_1[2][7] = 164.864715576;
501 }
502 
503 //_______________________________________________________________________
504 inline void DNeutralShower_FCALQualityMLP::Transform_1( std::vector<double>& iv, int cls) const
505 {
506  // Normalization transformation
507  if (cls < 0 || cls > 2) {
508  if (2 > 1 ) cls = 2;
509  else cls = 2;
510  }
511  const int nVar = 8;
512 
513  // get indices of used variables
514 
515  // define the indices of the variables which are transformed by this transformation
516  static std::vector<int> indicesGet;
517  static std::vector<int> indicesPut;
518 
519  if ( indicesGet.empty() ) {
520  indicesGet.reserve(fNvars);
521  indicesGet.push_back( 0);
522  indicesGet.push_back( 1);
523  indicesGet.push_back( 2);
524  indicesGet.push_back( 3);
525  indicesGet.push_back( 4);
526  indicesGet.push_back( 5);
527  indicesGet.push_back( 6);
528  indicesGet.push_back( 7);
529  }
530  if ( indicesPut.empty() ) {
531  indicesPut.reserve(fNvars);
532  indicesPut.push_back( 0);
533  indicesPut.push_back( 1);
534  indicesPut.push_back( 2);
535  indicesPut.push_back( 3);
536  indicesPut.push_back( 4);
537  indicesPut.push_back( 5);
538  indicesPut.push_back( 6);
539  indicesPut.push_back( 7);
540  }
541 
542  static std::vector<double> dv;
543  dv.resize(nVar);
544  for (int ivar=0; ivar<nVar; ivar++) dv[ivar] = iv[indicesGet.at(ivar)];
545  for (int ivar=0;ivar<8;ivar++) {
546  double offset = fMin_1[cls][ivar];
547  double scale = 1.0/(fMax_1[cls][ivar]-fMin_1[cls][ivar]);
548  iv[indicesPut.at(ivar)] = (dv[ivar]-offset)*scale * 2 - 1;
549  }
550 }
551 
552 //_______________________________________________________________________
554 {
555  InitTransform_1();
556 }
557 
558 //_______________________________________________________________________
559 inline void DNeutralShower_FCALQualityMLP::Transform( std::vector<double>& iv, int sigOrBgd ) const
560 {
561  Transform_1( iv, sigOrBgd );
562 }
Double_t x[NCHANNELS]
Definition: st_tw_resols.C:39
void Transform(std::vector< double > &iv, int sigOrBgd) const
double GetMvaValue(const std::vector< double > &inputValues) const
double GetMvaValue__(const std::vector< double > &inputValues) const
void Transform_1(std::vector< double > &iv, int sigOrBgd) const
double NormVariable(double x, double xmin, double xmax) const
DNeutralShower_FCALQualityMLP(std::vector< std::string > &theInputVars)
virtual double GetMvaValue(const std::vector< double > &inputValues) const =0