12 std::shared_ptr<std::function<
double(
double)>>
Gauss = std::make_shared<std::function<
double(
double)>> ([](
double value){
const double s = 6.0;
return exp (-std::pow(
value*s,2.0)); });
13 std::shared_ptr<std::function<
double(
double)>>
GaussComplement = std::make_shared<std::function<
double(
double)>> ([](
double value){
const double s = 6.0;
return 1.0 - exp (-std::pow(
value*s,2.0)); });
16 std::shared_ptr<std::function<
double(
double)>>
InvLinear = std::make_shared<std::function<
double(
double)>> ([](
double ){
return 1.0; });
17 std::shared_ptr<std::function<
double(
double)>>
InvReLU = std::make_shared<std::function<
double(
double)>> ([](
double value){
const double margin = 0.0;
return value > margin ? 1.0 : 0; });
20 std::shared_ptr<std::function<
double(
double)>>
InvSoftSign = std::make_shared<std::function<
double(
double)>> ([](
double value){
return std::pow ((1.0 - fabs (
value)),2.0); });
21 std::shared_ptr<std::function<
double(
double)>>
InvSymmReLU = std::make_shared<std::function<
double(
double)>> ([](
double value){
const double margin = 0.3;
return value > margin ? 1.0 :
value < -margin ? 1.0 : 0; });
22 std::shared_ptr<std::function<
double(
double)>>
InvTanh = std::make_shared<std::function<
double(
double)>> ([](
double value){
return 1.0 - std::pow (
value, 2.0); });
25 std::shared_ptr<std::function<
double(
double)>>
ReLU = std::make_shared<std::function<
double(
double)>> ([](
double value){
const double margin = 0.0;
return value > margin ?
value-margin : 0; });
26 std::shared_ptr<std::function<
double(
double)>>
Sigmoid = std::make_shared<std::function<
double(
double)>> ([](
double value){
value = std::max (-100.0, std::min (100.0,
value));
return 1.0/(1.0 + std::exp (-
value)); });
27 std::shared_ptr<std::function<
double(
double)>>
SoftPlus = std::make_shared<std::function<
double(
double)>> ([](
double value){
return std::log (1.0+ std::exp (
value)); });
28 std::shared_ptr<std::function<
double(
double)>>
ZeroFnc = std::make_shared<std::function<
double(
double)>> ([](
double ){
return 0; });
29 std::shared_ptr<std::function<
double(
double)>>
Tanh = std::make_shared<std::function<
double(
double)>> ([](
double value){
return tanh (
value); });
37 static std::default_random_engine
generator;
45 static std::default_random_engine
generator;
54 static std::default_random_engine
generator;
62 static std::default_random_engine
generator;
69 : m_hasDropOut (
false)
70 , m_isInputLayer (
true)
71 , m_hasWeights (
false)
72 , m_hasGradients (
false)
82 : m_hasDropOut (
false)
83 , m_isInputLayer (
true)
84 , m_hasWeights (
false)
85 , m_hasGradients (
false)
104 , m_hasDropOut (
false)
109 , m_isInputLayer (
false)
110 , m_hasWeights (
true)
111 , m_hasGradients (
true)
126 , m_hasDropOut (
false)
129 , m_inverseActivationFunction ()
130 , m_isInputLayer (
false)
131 , m_hasWeights (
true)
132 , m_hasGradients (
false)
171 for (
size_t iNode = 0; iNode <
_numNodes; ++iNode)
237 : m_timer (100,
name)
239 , m_maxProgress (100)
253 , m_convergenceCount (0)
254 , m_maxConvergenceCount (0)
285 create (
"ROC", 100, 0, 1, 100, 0, 1);
286 create (
"Significance", 100, 0, 1, 100, 0, 3);
287 create (
"OutputSig", 100, 0, 1);
288 create (
"OutputBkg", 100, 0, 1);
408 std::vector<double>
sigEff;
422 clear (
"Significance");
432 double brej = (
tn+fp == 0.0 ? 0.0 : (
tn / (
tn+fp)));
563 num +=
layer.numNodes ();
ROOT::Detail::TRangeCast< T, true > TRangeDynCast
TRangeDynCast is an adapter class that allows the typed iteration through a TCollection.
winID h TVirtualViewer3D TVirtualGLPainter p
winID h TVirtualViewer3D TVirtualGLPainter char TVirtualGLPainter plot
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void char Point_t Rectangle_t WindowAttributes_t Float_t Float_t Float_t Int_t Int_t UInt_t UInt_t Rectangle_t Int_t Int_t Window_t TString Int_t GCValues_t GetPrimarySelectionOwner GetDisplay GetScreen GetColormap GetNativeEvent const char const char dpyName wid window const char font_name cursor keysym reg const char only_if_exist regb h Point_t winding char text const char depth char const char Int_t count const char ColorStruct_t color const char Pixmap_t Pixmap_t PictureAttributes_t attr const char char ret_data h unsigned char height h Atom_t Int_t ULong_t ULong_t unsigned char prop_list Atom_t Atom_t target
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void char Point_t Rectangle_t WindowAttributes_t index
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void value
void startTrainCycle()
action to be done when the training cycle is started (e.g.
void endTrainCycle(double)
action to be done when the training cycle is ended (e.g.
virtual void endTestCycle()
action to be done when the training cycle is ended (e.g.
std::vector< Pattern > * m_pResultPatternContainer
void setResultComputation(std::string _fileNameNetConfig, std::string _fileNameResult, std::vector< Pattern > *_resultPatternContainer)
preparation for monitoring output
std::string m_fileNameResult
std::vector< double > m_significances
std::vector< double > m_weights
std::string m_fileNameNetConfig
std::vector< double > m_targets
void testSample(double error, double output, double target, double weight)
action to be done after the computation of a test sample (e.g.
size_t m_scaleToNumEvents
virtual void startTestCycle()
action to be done when the test cycle is started (e.g.
void setWeightSums(double sumOfSigWeights, double sumOfBkgWeights)
set the weight sums to be scaled to (preparations for monitoring output)
std::vector< double > m_output
const_iterator_type m_itInputBegin
iterator to the first of the nodes in the input node vector
std::vector< double > m_deltas
stores the deltas for the DNN training
container_type::iterator iterator_type
LayerData(const_iterator_type itInputBegin, const_iterator_type itInputEnd, ModeOutputValues eModeOutput=ModeOutputValues::DIRECT)
c'tor of LayerData
std::vector< double > container_type
std::vector< double > m_values
stores the values of the nodes in this layer
const_iterator_type m_itInputEnd
iterator to the end of the nodes in the input node vector
container_type::const_iterator const_iterator_type
std::vector< double > m_valueGradients
stores the gradients of the values (nodes)
container_type computeProbabilities() const
compute the probabilities from the node values
ModeOutputValues m_eModeOutput
stores the output mode (DIRECT, SIGMOID, SOFTMAX)
std::shared_ptr< std::function< double(double)> > m_activationFunction
stores the activation function
std::shared_ptr< std::function< double(double)> > m_inverseActivationFunction
stores the inverse activation function
Layer(size_t numNodes, EnumFunction activationFunction, ModeOutputValues eModeOutputValues=ModeOutputValues::DIRECT)
c'tor for defining a Layer
std::vector< Layer > m_layers
layer-structure-data
size_t inputSize() const
input size of the DNN
size_t numNodes(size_t trainingStartLayer=0) const
returns the number of nodes in this net
void fillDropContainer(DropContainer &dropContainer, double dropFraction, size_t numNodes) const
prepare the drop-out-container (select the nodes which are to be dropped out)
size_t numWeights(size_t trainingStartLayer=0) const
returns the number of weights in this net
void clear(std::string histoName)
for monitoring
virtual bool hasConverged(double testError)
has this training converged already?
Settings(TString name, size_t _convergenceSteps=15, size_t _batchSize=10, size_t _testRepetitions=7, double _factorWeightDecay=1e-5, TMVA::DNN::EnumRegularization _regularization=TMVA::DNN::EnumRegularization::NONE, MinimizerType _eMinimizerType=MinimizerType::fSteepest, double _learningRate=1e-5, double _momentum=0.3, int _repetitions=3, bool _multithreading=true)
c'tor
void addPoint(std::string histoName, double x)
for monitoring
size_t m_convergenceCount
size_t convergenceSteps() const
how many steps until training is deemed to have converged
std::shared_ptr< Monitoring > fMonitoring
void create(std::string histoName, int bins, double min, double max)
for monitoring
size_t m_maxConvergenceCount
std::shared_ptr< std::function< double(double)> > InvGauss
double uniformDouble(double minValue, double maxValue)
std::shared_ptr< std::function< double(double)> > SymmReLU
std::shared_ptr< std::function< double(double)> > TanhShift
std::shared_ptr< std::function< double(double)> > Tanh
std::shared_ptr< std::function< double(double)> > InvSigmoid
std::shared_ptr< std::function< double(double)> > SoftPlus
std::shared_ptr< std::function< double(double)> > ZeroFnc
std::shared_ptr< std::function< double(double)> > InvSoftSign
double studenttDouble(double distributionParameter)
std::shared_ptr< std::function< double(double)> > InvGaussComplement
std::shared_ptr< std::function< double(double)> > InvTanh
std::shared_ptr< std::function< double(double)> > Linear
std::shared_ptr< std::function< double(double)> > InvReLU
std::shared_ptr< std::function< double(double)> > GaussComplement
std::shared_ptr< std::function< double(double)> > Gauss
MinimizerType
< list all the minimizer types
std::shared_ptr< std::function< double(double)> > Sigmoid
double gaussDouble(double mean, double sigma)
std::shared_ptr< std::function< double(double)> > SoftSign
std::shared_ptr< std::function< double(double)> > InvSoftPlus
std::shared_ptr< std::function< double(double)> > ReLU
bool isFlagSet(T flag, T value)
int randomInt(int maxValue)
std::shared_ptr< std::function< double(double)> > InvTanhShift
std::vector< char > DropContainer
std::shared_ptr< std::function< double(double)> > InvSymmReLU
std::shared_ptr< std::function< double(double)> > InvLinear
create variable transformations
static uint64_t sum(uint64_t i)