16 static std::default_random_engine generator;
17 std::normal_distribution<double> distribution (mean, sigma);
18 return distribution (generator);
24 static std::default_random_engine generator;
25 std::uniform_real_distribution<double> distribution(minValue, maxValue);
26 return distribution(generator);
33 static std::default_random_engine generator;
34 std::uniform_int_distribution<int> distribution(0,maxValue-1);
35 return distribution(generator);
41 static std::default_random_engine generator;
42 std::student_t_distribution<double> distribution (distributionParameter);
43 return distribution (generator);
48 : m_hasDropOut (false)
49 , m_isInputLayer (true)
50 , m_hasWeights (false)
51 , m_hasGradients (false)
69 m_size = std::distance (itInputBegin, itInputEnd);
79 std::shared_ptr<
std::function<
double(
double)>> _activationFunction,
80 std::shared_ptr<
std::function<
double(
double)>> _inverseActivationFunction,
102 std::shared_ptr<
std::function<
double(
double)>> _activationFunction,
124 std::transform (begin (
m_values), end (
m_values), std::back_inserter (probabilitiesContainer), (*
Sigmoid.get ()));
130 std::for_each (begin (probabilitiesContainer), end (probabilitiesContainer), [&sum](
double& p){ p =
std::exp (p); sum += p; });
132 std::for_each (begin (probabilitiesContainer), end (probabilitiesContainer), [sum ](
double& p){ p /=
sum; });
138 return probabilitiesContainer;
146 : m_numNodes (_numNodes)
147 , m_eModeOutputValues (eModeOutputValues)
148 , m_activationFunctionType (_activationFunction)
150 for (
size_t iNode = 0; iNode < _numNodes; ++iNode)
154 switch (_activationFunction)
212 size_t _convergenceSteps,
size_t _batchSize,
size_t _testRepetitions,
215 double _momentum,
int _repetitions,
bool _useMultithreading)
216 : m_timer (100, name)
218 , m_maxProgress (100)
219 , m_convergenceSteps (_convergenceSteps)
220 , m_batchSize (_batchSize)
221 , m_testRepetitions (_testRepetitions)
222 , m_factorWeightDecay (_factorWeightDecay)
227 , m_regularization (eRegularization)
228 , fLearningRate (_learningRate)
229 , fMomentum (_momentum)
230 , fRepetitions (_repetitions)
231 , fMinimizerType (_eMinimizerType)
232 , m_convergenceCount (0)
233 , m_maxConvergenceCount (0)
235 , m_useMultithreading (_useMultithreading)
264 create (
"ROC", 100, 0, 1, 100, 0, 1);
265 create (
"Significance", 100, 0, 1, 100, 0, 3);
266 create (
"OutputSig", 100, 0, 1);
267 create (
"OutputBkg", 100, 0, 1);
286 m_output.push_back (output);
287 m_targets.push_back (target);
288 m_weights.push_back (weight);
307 if (m_output.empty ())
309 double minVal = *std::min_element (begin (m_output), end (m_output));
310 double maxVal = *std::max_element (begin (m_output), end (m_output));
311 const size_t numBinsROC = 1000;
312 const size_t numBinsData = 100;
314 std::vector<double> truePositives (numBinsROC+1, 0);
315 std::vector<double> falsePositives (numBinsROC+1, 0);
316 std::vector<double> trueNegatives (numBinsROC+1, 0);
317 std::vector<double> falseNegatives (numBinsROC+1, 0);
319 std::vector<double>
x (numBinsData, 0);
320 std::vector<double> datSig (numBinsData+1, 0);
321 std::vector<double> datBkg (numBinsData+1, 0);
323 double binSizeROC = (maxVal - minVal)/(
double)numBinsROC;
324 double binSizeData = (maxVal - minVal)/(
double)numBinsData;
326 double sumWeightsSig = 0.0;
327 double sumWeightsBkg = 0.0;
329 for (
size_t b = 0;
b < numBinsData; ++
b)
331 double binData = minVal +
b*binSizeData;
335 if (
fabs(binSizeROC) < 0.0001)
338 for (
size_t i = 0, iEnd = m_output.size (); i < iEnd; ++i)
340 double val = m_output.at (i);
341 double truth = m_targets.at (i);
342 double weight = m_weights.at (i);
344 bool isSignal = (truth > 0.5 ? true :
false);
346 if (m_sumOfSigWeights != 0 && m_sumOfBkgWeights != 0)
349 weight *= m_sumOfSigWeights;
351 weight *= m_sumOfBkgWeights;
354 size_t binROC = (val-minVal)/binSizeROC;
355 size_t binData = (val-minVal)/binSizeData;
359 for (
size_t n = 0;
n <= binROC; ++
n)
361 truePositives.at (
n) += weight;
363 for (
size_t n = binROC+1;
n < numBinsROC; ++
n)
365 falseNegatives.at (
n) += weight;
368 datSig.at (binData) += weight;
369 sumWeightsSig += weight;
373 for (
size_t n = 0;
n <= binROC; ++
n)
375 falsePositives.at (
n) += weight;
377 for (
size_t n = binROC+1;
n < numBinsROC; ++
n)
379 trueNegatives.at (
n) += weight;
382 datBkg.at (binData) += weight;
383 sumWeightsBkg += weight;
387 std::vector<double> sigEff;
388 std::vector<double> backRej;
390 double bestSignificance = 0;
391 double bestCutSignificance = 0;
393 double numEventsScaleFactor = 1.0;
394 if (m_scaleToNumEvents > 0)
396 size_t numEvents = m_output.size ();
397 numEventsScaleFactor = double (m_scaleToNumEvents)/double (numEvents);
401 clear (
"Significance");
403 for (
size_t i = 0; i < numBinsROC; ++i)
405 double tp = truePositives.at (i) * numEventsScaleFactor;
406 double fp = falsePositives.at (i) * numEventsScaleFactor;
407 double tn = trueNegatives.at (i) * numEventsScaleFactor;
408 double fn = falseNegatives.at (i) * numEventsScaleFactor;
410 double seff = (tp+fn == 0.0 ? 1.0 : (tp / (tp+fn)));
411 double brej = (tn+fp == 0.0 ? 0.0 : (tn / (tn+fp)));
413 sigEff.push_back (seff);
414 backRej.push_back (brej);
420 double currentCut = (i * binSizeROC)+minVal;
424 double significance = sig /
sqrt (sig + bkg);
425 if (significance > bestSignificance)
427 bestSignificance = significance;
428 bestCutSignificance = currentCut;
431 addPoint (
"Significance", currentCut, significance);
435 m_significances.push_back (bestSignificance);
436 static size_t testCycle = 0;
440 for (
size_t i = 0; i < numBinsData; ++i)
442 addPoint (
"OutputSig", x.at (i), datSig.at (i)/sumWeightsSig);
443 addPoint (
"OutputBkg", x.at (i), datBkg.at (i)/sumWeightsBkg);
460 m_cutValue = bestCutSignificance;
496 m_sumOfSigWeights = sumOfSigWeights; m_sumOfBkgWeights = sumOfBkgWeights;
503 std::string _fileNameNetConfig,
504 std::string _fileNameResult,
505 std::vector<Pattern>* _resultPatternContainer)
507 m_pResultPatternContainer = _resultPatternContainer;
508 m_fileNameResult = _fileNameResult;
509 m_fileNameNetConfig = _fileNameNetConfig;
526 size_t prevNodes (inputSize ());
527 for (
auto& layer : m_layers)
529 if (index >= trainingStartLayer)
530 num += layer.numWeights (prevNodes);
531 prevNodes = layer.numNodes ();
542 for (
auto& layer : m_layers)
544 if (index >= trainingStartLayer)
545 num += layer.numNodes ();
556 size_t numDrops = dropFraction * _numNodes;
557 if (numDrops >= _numNodes)
558 numDrops = _numNodes - 1;
559 dropContainer.insert (end (dropContainer), _numNodes-numDrops,
true);
560 dropContainer.insert (end (dropContainer), numDrops,
false);
562 std::random_shuffle (end (dropContainer)-_numNodes, end (dropContainer));
void addPoint(std::string histoName, double x)
for monitoring
void setWeightSums(double sumOfSigWeights, double sumOfBkgWeights)
set the weight sums to be scaled to (preparations for monitoring output)
static std::shared_ptr< std::function< double(double)> > InvGauss
static long int sum(long int i)
MinimizerType
< list all the minimizer types
static std::shared_ptr< std::function< double(double)> > Tanh
std::vector< char > DropContainer
static std::shared_ptr< std::function< double(double)> > InvReLU
std::shared_ptr< std::function< double(double)> > m_inverseActivationFunction
stores the inverse activation function
bool isFlagSet(T flag, T value)
static std::shared_ptr< std::function< double(double)> > InvTanh
size_t convergenceSteps() const
how many steps until training is deemed to have converged
std::shared_ptr< Monitoring > fMonitoring
void plot(std::string histoName, std::string options, int pad, EColor color)
for monitoring
iterator_type m_itGradientBegin
iterator to the first gradient of this layer in the gradient vector
bool m_hasGradients
does this layer have gradients (only if in training mode)
ModeOutputValues m_eModeOutput
stores the output mode (DIRECT, SIGMOID, SOFTMAX)
static std::shared_ptr< std::function< double(double)> > InvSoftSign
bool m_isInputLayer
is this layer an input layer
static std::shared_ptr< std::function< double(double)> > TanhShift
Settings(TString name, size_t _convergenceSteps=15, size_t _batchSize=10, size_t _testRepetitions=7, double _factorWeightDecay=1e-5, TMVA::DNN::EnumRegularization _regularization=TMVA::DNN::EnumRegularization::NONE, MinimizerType _eMinimizerType=MinimizerType::fSteepest, double _learningRate=1e-5, double _momentum=0.3, int _repetitions=3, bool _multithreading=true)
c'tor
std::vector< double > m_valueGradients
stores the gradients of the values (nodes)
static std::shared_ptr< std::function< double(double)> > Sigmoid
container_type::const_iterator const_iterator_type
const_iterator_type m_itConstWeightBegin
const iterator to the first weight of this layer in the weight vector
const_iterator_type m_itInputBegin
iterator to the first of the nodes in the input node vector
bool m_hasDropOut
dropOut is turned on?
static std::shared_ptr< std::function< double(double)> > SymmReLU
void function(const Char_t *name_, T fun, const Char_t *docstring=0)
void startTrainCycle()
action to be done when the training cycle is started (e.g.
void create(std::string histoName, int bins, double min, double max)
for monitoring
double studenttDouble(double distributionParameter)
LayerData(const_iterator_type itInputBegin, const_iterator_type itInputEnd, ModeOutputValues eModeOutput=ModeOutputValues::DIRECT)
c'tor of LayerData
virtual ~Settings()
d'tor
std::vector< double > m_deltas
stores the deltas for the DNN training
VecExpr< UnaryOp< Fabs< T >, VecExpr< A, T, D >, T >, T, D > fabs(const VecExpr< A, T, D > &rhs)
Layer(size_t numNodes, EnumFunction activationFunction, ModeOutputValues eModeOutputValues=ModeOutputValues::DIRECT)
c'tor for defining a Layer
container_type::iterator iterator_type
container_type computeProbabilities() const
compute the probabilities from the node values
size_t m_convergenceCount
std::vector< double > container_type
void endTrainCycle(double)
action to be done when the training cycle is ended (e.g.
void fillDropContainer(DropContainer &dropContainer, double dropFraction, size_t numNodes) const
prepare the drop-out-container (select the nodes which are to be dropped out)
virtual void endTestCycle()
action to be done when the training cycle is ended (e.g.
static std::shared_ptr< std::function< double(double)> > SoftSign
static std::shared_ptr< std::function< double(double)> > ReLU
static std::shared_ptr< std::function< double(double)> > InvSigmoid
size_t numWeights(size_t trainingStartLayer=0) const
returns the number of weights in this net
bool m_hasWeights
does this layer have weights (it does not if it is the input layer)
static std::shared_ptr< std::function< double(double)> > GaussComplement
double gaussDouble(double mean, double sigma)
static std::shared_ptr< std::function< double(double)> > Gauss
size_t numNodes(size_t trainingStartLayer=0) const
returns the number of nodes in this net
double uniformDouble(double minValue, double maxValue)
std::vector< double > m_values
stores the values of the nodes in this layer
void setResultComputation(std::string _fileNameNetConfig, std::string _fileNameResult, std::vector< Pattern > *_resultPatternContainer)
preparation for monitoring output
size_t m_maxConvergenceCount
static std::shared_ptr< std::function< double(double)> > Linear
Abstract ClassifierFactory template that handles arbitrary types.
static std::shared_ptr< std::function< double(double)> > InvGaussComplement
const_iterator_type m_itInputEnd
iterator to the end of the nodes in the input node vector
std::shared_ptr< std::function< double(double)> > m_activationFunction
activation function for this layer
static std::shared_ptr< std::function< double(double)> > InvLinear
std::shared_ptr< std::function< double(double)> > m_inverseActivationFunction
inverse activation function for this layer
void testSample(double error, double output, double target, double weight)
action to be done after the computation of a test sample (e.g.
virtual void startTestCycle()
action to be done when the test cycle is started (e.g.
you should not use this method at all Int_t Int_t Double_t Double_t Double_t Int_t Double_t Double_t Double_t Double_t b
static std::shared_ptr< std::function< double(double)> > InvTanhShift
void clear(std::string histoName)
for monitoring
static std::shared_ptr< std::function< double(double)> > ZeroFnc
virtual bool hasConverged(double testError)
has this training converged already?
int randomInt(int maxValue)
std::shared_ptr< std::function< double(double)> > m_activationFunction
stores the activation function
static std::shared_ptr< std::function< double(double)> > InvSymmReLU