81 : MethodBase(jobName, Types::kDNN, methodTitle, theData, theOption), fWeightInitialization(), fOutputFunction(),
82 fLayoutString(), fErrorStrategy(), fTrainingStrategyString(), fWeightInitializationString(),
83 fArchitectureString(), fTrainingSettings(), fResume(false), fSettings()
92 : MethodBase( Types::kDNN, theData, theWeightFile),
93 fWeightInitialization(), fOutputFunction(), fLayoutString(), fErrorStrategy(),
94 fTrainingStrategyString(), fWeightInitializationString(), fArchitectureString(),
95 fTrainingSettings(), fResume(false), fSettings()
97 fWeightInitialization = DNN::EInitialization::kGauss;
98 fOutputFunction = DNN::EOutputFunction::kSigmoid;
130 <<
"MethodDNN is deprecated and it will be removed in future ROOT version. "
131 "Please use MethodDL ( TMVA::kDL)"
152 DeclareOptionRef(fLayoutString=
"SOFTSIGN|(N+100)*2,LINEAR",
154 "Layout of the network.");
156 DeclareOptionRef(fValidationSize =
"20%",
"ValidationSize",
157 "Part of the training data to use for "
158 "validation. Specify as 0.2 or 20% to use a "
159 "fifth of the data set as validation set. "
160 "Specify as 100 to use exactly 100 events. "
163 DeclareOptionRef(fErrorStrategy=
"CROSSENTROPY",
165 "Loss function: Mean squared error (regression)"
166 " or cross entropy (binary classification).");
167 AddPreDefVal(
TString(
"CROSSENTROPY"));
168 AddPreDefVal(
TString(
"SUMOFSQUARES"));
169 AddPreDefVal(
TString(
"MUTUALEXCLUSIVE"));
171 DeclareOptionRef(fWeightInitializationString=
"XAVIER",
172 "WeightInitialization",
173 "Weight initialization strategy");
174 AddPreDefVal(
TString(
"XAVIER"));
175 AddPreDefVal(
TString(
"XAVIERUNIFORM"));
177 DeclareOptionRef(fArchitectureString =
"CPU",
"Architecture",
"Which architecture to perform the training on.");
178 AddPreDefVal(
TString(
"STANDARD"));
181 AddPreDefVal(
TString(
"OPENCL"));
184 fTrainingStrategyString =
"LearningRate=1e-1,"
187 "ConvergenceSteps=50,"
193 "DropRepetitions=5|LearningRate=1e-4,"
196 "ConvergenceSteps=50,"
201 "DropConfig=0.0+0.5+0.5,"
203 "Multithreading=True",
205 "Defines the training strategies.");
215 LayoutVector_t layout;
216 const TString layerDelimiter(
",");
217 const TString subDelimiter(
"|");
219 const size_t inputSize = GetNvar();
221 TObjArray* layerStrings = layoutString.Tokenize(layerDelimiter);
222 TIter nextLayer (layerStrings);
225 for (; layerString !=
nullptr; layerString = (
TObjString*) nextLayer()) {
230 TIter nextToken (subStrings);
233 for (; token !=
nullptr; token = (
TObjString *) nextToken()) {
239 if (strActFnc ==
"RELU") {
241 }
else if (strActFnc ==
"TANH") {
243 }
else if (strActFnc ==
"SYMMRELU") {
245 }
else if (strActFnc ==
"SOFTSIGN") {
247 }
else if (strActFnc ==
"SIGMOID") {
249 }
else if (strActFnc ==
"LINEAR") {
251 }
else if (strActFnc ==
"GAUSS") {
260 strNumNodes.ReplaceAll (
"N", strN);
261 strNumNodes.ReplaceAll (
"n", strN);
263 numNodes = fml.Eval (inputSize);
269 layout.push_back(std::make_pair(numNodes, activationFunction));
282 KeyValueVector_t blockKeyValues;
283 const TString keyValueDelim (
"=");
285 TObjArray* blockStrings = parseString.Tokenize (blockDelim);
286 TIter nextBlock (blockStrings);
289 for (; blockString !=
nullptr; blockString = (
TObjString *) nextBlock())
291 blockKeyValues.push_back (std::map<TString,TString>());
292 std::map<TString,TString>& currentBlock = blockKeyValues.back ();
295 TIter nextToken (subStrings);
298 for (; token !=
nullptr; token = (
TObjString *)nextToken())
301 int delimPos = strKeyValue.First (keyValueDelim.Data ());
307 TString strValue =
TString (strKeyValue (delimPos+1, strKeyValue.Length ()));
312 currentBlock.insert (std::make_pair (strKey, strValue));
315 return blockKeyValues;
323 std::map<TString, TString>::const_iterator it = keyValueMap.find (key);
324 if (it == keyValueMap.end()) {
348 return value.Atoi ();
354double fetchValue (
const std::map<TString,TString>& keyValueMap,
355 TString key,
double defaultValue)
361 return value.Atof ();
380bool fetchValue (
const std::map<TString,TString>& keyValueMap,
381 TString key,
bool defaultValue)
397std::vector<double>
fetchValue(
const std::map<TString, TString> & keyValueMap,
399 std::vector<double> defaultValue)
402 if (parseString ==
"") {
406 std::vector<double> values;
408 const TString tokenDelim (
"+");
410 TIter nextToken (tokenStrings);
412 for (; tokenString != NULL; tokenString = (
TObjString*)nextToken ()) {
413 std::stringstream sstr;
416 sstr >> currentValue;
417 values.push_back (currentValue);
426 if (IgnoreEventsWithNegWeightsInTraining()) {
428 <<
"Will ignore negative events in training!"
432 if (fArchitectureString ==
"STANDARD") {
433 Log() << kERROR <<
"The STANDARD architecture has been deprecated. "
434 "Please use Architecture=CPU or Architecture=CPU."
435 "See the TMVA Users' Guide for instructions if you "
436 "encounter problems."
438 Log() << kFATAL <<
"The STANDARD architecture has been deprecated. "
439 "Please use Architecture=CPU or Architecture=CPU."
440 "See the TMVA Users' Guide for instructions if you "
441 "encounter problems."
445 if (fArchitectureString ==
"OPENCL") {
446 Log() << kERROR <<
"The OPENCL architecture has not been implemented yet. "
447 "Please use Architecture=CPU or Architecture=CPU for the "
448 "time being. See the TMVA Users' Guide for instructions "
449 "if you encounter problems."
451 Log() << kFATAL <<
"The OPENCL architecture has not been implemented yet. "
452 "Please use Architecture=CPU or Architecture=CPU for the "
453 "time being. See the TMVA Users' Guide for instructions "
454 "if you encounter problems."
458 if (fArchitectureString ==
"GPU") {
460 Log() << kERROR <<
"CUDA backend not enabled. Please make sure "
461 "you have CUDA installed and it was successfully "
464 Log() << kFATAL <<
"CUDA backend not enabled. Please make sure "
465 "you have CUDA installed and it was successfully "
471 if (fArchitectureString ==
"CPU") {
473 Log() << kERROR <<
"Multi-core CPU backend not enabled. Please make sure "
474 "you have a BLAS implementation and it was successfully "
475 "detected by CMake as well that the imt CMake flag is set."
477 Log() << kFATAL <<
"Multi-core CPU backend not enabled. Please make sure "
478 "you have a BLAS implementation and it was successfully "
479 "detected by CMake as well that the imt CMake flag is set."
489 size_t inputSize = GetNVariables ();
490 size_t outputSize = 1;
492 outputSize = GetNTargets();
494 outputSize = DataInfo().GetNClasses();
497 fNet.SetBatchSize(1);
498 fNet.SetInputWidth(inputSize);
500 auto itLayout = std::begin (fLayout);
501 auto itLayoutEnd = std::end (fLayout)-1;
502 for ( ; itLayout != itLayoutEnd; ++itLayout) {
503 fNet.AddLayer((*itLayout).first, (*itLayout).second);
505 fNet.AddLayer(outputSize, EActivationFunction::kIdentity);
511 fOutputFunction = EOutputFunction::kSigmoid;
514 if (fErrorStrategy ==
"SUMOFSQUARES") {
515 fNet.SetLossFunction(ELossFunction::kMeanSquaredError);
517 if (fErrorStrategy ==
"CROSSENTROPY") {
518 fNet.SetLossFunction(ELossFunction::kCrossEntropy);
520 fOutputFunction = EOutputFunction::kSigmoid;
522 if (fErrorStrategy !=
"SUMOFSQUARES") {
523 Log () << kWARNING <<
"For regression only SUMOFSQUARES is a valid "
524 <<
" neural net error function. Setting error function to "
525 <<
" SUMOFSQUARES now." <<
Endl;
527 fNet.SetLossFunction(ELossFunction::kMeanSquaredError);
528 fOutputFunction = EOutputFunction::kIdentity;
530 if (fErrorStrategy ==
"SUMOFSQUARES") {
531 fNet.SetLossFunction(ELossFunction::kMeanSquaredError);
533 if (fErrorStrategy ==
"CROSSENTROPY") {
534 fNet.SetLossFunction(ELossFunction::kCrossEntropy);
536 if (fErrorStrategy ==
"MUTUALEXCLUSIVE") {
537 fNet.SetLossFunction(ELossFunction::kSoftmaxCrossEntropy);
539 fOutputFunction = EOutputFunction::kSoftmax;
546 if (fWeightInitializationString ==
"XAVIER") {
549 else if (fWeightInitializationString ==
"XAVIERUNIFORM") {
561 GetNumValidationSamples();
563 KeyValueVector_t strategyKeyValues = ParseKeyValueString(fTrainingStrategyString,
567 std::cout <<
"Parsed Training DNN string " << fTrainingStrategyString << std::endl;
568 std::cout <<
"STring has size " << strategyKeyValues.size() << std::endl;
569 for (
auto& block : strategyKeyValues) {
579 std::vector<Double_t>());
599 fTrainingSettings.push_back(settings);
614 Int_t nValidationSamples = 0;
619 if (fValidationSize.EndsWith(
"%")) {
624 Double_t valSizeAsDouble = fValidationSize.Atof() / 100.0;
625 nValidationSamples = GetEventCollection(
Types::kTraining).size() * valSizeAsDouble;
627 Log() << kFATAL <<
"Cannot parse number \"" << fValidationSize
628 <<
"\". Expected string like \"20%\" or \"20.0%\"." <<
Endl;
630 }
else if (fValidationSize.IsFloat()) {
631 Double_t valSizeAsDouble = fValidationSize.Atof();
633 if (valSizeAsDouble < 1.0) {
635 nValidationSamples = GetEventCollection(
Types::kTraining).size() * valSizeAsDouble;
638 nValidationSamples = valSizeAsDouble;
641 Log() <<
kFATAL <<
"Cannot parse number \"" << fValidationSize <<
"\". Expected string like \"0.2\" or \"100\"."
647 if (nValidationSamples < 0) {
648 Log() <<
kFATAL <<
"Validation size \"" << fValidationSize <<
"\" is negative." <<
Endl;
651 if (nValidationSamples == 0) {
652 Log() <<
kFATAL <<
"Validation size \"" << fValidationSize <<
"\" is zero." <<
Endl;
655 if (nValidationSamples >= (
Int_t)trainingSetSize) {
656 Log() <<
kFATAL <<
"Validation size \"" << fValidationSize
657 <<
"\" is larger than or equal in size to training set (size=\"" << trainingSetSize <<
"\")." <<
Endl;
660 return nValidationSamples;
667 if (fInteractive && fInteractive->NotInitialized()){
668 std::vector<TString> titles = {
"Error on training set",
"Error on test set"};
669 fInteractive->Init(titles);
675 size_t nValidationSamples = GetNumValidationSamples();
676 size_t nTrainingSamples = GetEventCollection(
Types::kTraining).size() - nValidationSamples;
677 size_t nTestSamples = nValidationSamples;
679 if (nTrainingSamples < settings.batchSize ||
680 nValidationSamples < settings.batchSize ||
681 nTestSamples < settings.batchSize) {
682 Log() << kFATAL <<
"Number of samples in the datasets are train: "
683 << nTrainingSamples <<
" valid: " << nValidationSamples
684 <<
" test: " << nTestSamples <<
". "
685 <<
"One of these is smaller than the batch size of "
686 << settings.batchSize <<
". Please increase the batch"
687 <<
" size to be at least the same size as the smallest"
688 <<
" of these values." <<
Endl;
692 if (fArchitectureString ==
"GPU") {
694 if (!fExitFromTraining) fIPyMaxIter = fIPyCurrentIter;
697 }
else if (fArchitectureString ==
"OpenCL") {
698 Log() << kFATAL <<
"OpenCL backend not yet supported." <<
Endl;
700 }
else if (fArchitectureString ==
"CPU") {
702 if (!fExitFromTraining) fIPyMaxIter = fIPyCurrentIter;
707 Log() << kINFO <<
"Using Standard Implementation.";
709 std::vector<Pattern> trainPattern;
710 std::vector<Pattern> testPattern;
712 size_t nValidationSamples = GetNumValidationSamples();
713 size_t nTrainingSamples = GetEventCollection(
Types::kTraining).size() - nValidationSamples;
715 const std::vector<TMVA::Event *> &allData = GetEventCollection(
Types::kTraining);
716 const std::vector<TMVA::Event *> eventCollectionTraining{allData.begin(), allData.begin() + nTrainingSamples};
717 const std::vector<TMVA::Event *> eventCollectionTesting{allData.begin() + nTrainingSamples, allData.end()};
719 for (
auto &
event : eventCollectionTraining) {
720 const std::vector<Float_t>& values =
event->GetValues();
722 double outputValue =
event->GetClass () == 0 ? 0.9 : 0.1;
723 trainPattern.push_back(
Pattern (values.begin(),
726 event->GetWeight()));
727 trainPattern.back().addInput(1.0);
729 std::vector<Float_t> oneHot(DataInfo().GetNClasses(), 0.0);
730 oneHot[
event->GetClass()] = 1.0;
731 trainPattern.push_back(
Pattern (values.begin(), values.end(),
732 oneHot.cbegin(), oneHot.cend(),
733 event->GetWeight()));
734 trainPattern.back().addInput(1.0);
736 const std::vector<Float_t>& targets =
event->GetTargets ();
737 trainPattern.push_back(
Pattern(values.begin(),
741 event->GetWeight ()));
742 trainPattern.back ().addInput (1.0);
746 for (
auto &
event : eventCollectionTesting) {
747 const std::vector<Float_t>& values =
event->GetValues();
749 double outputValue =
event->GetClass () == 0 ? 0.9 : 0.1;
750 testPattern.push_back(
Pattern (values.begin(),
753 event->GetWeight()));
754 testPattern.back().addInput(1.0);
756 std::vector<Float_t> oneHot(DataInfo().GetNClasses(), 0.0);
757 oneHot[
event->GetClass()] = 1.0;
758 testPattern.push_back(
Pattern (values.begin(), values.end(),
759 oneHot.cbegin(), oneHot.cend(),
760 event->GetWeight()));
761 testPattern.back().addInput(1.0);
763 const std::vector<Float_t>& targets =
event->GetTargets ();
764 testPattern.push_back(
Pattern(values.begin(),
768 event->GetWeight ()));
769 testPattern.back ().addInput (1.0);
774 std::vector<double> weights;
776 net.SetIpythonInteractive(fInteractive, &fExitFromTraining, &fIPyMaxIter, &fIPyCurrentIter);
778 net.setInputSize(fNet.GetInputWidth() + 1);
779 net.setOutputSize(fNet.GetOutputWidth() + 1);
781 for (
size_t i = 0; i < fNet.GetDepth(); i++) {
785 case EActivationFunction::kIdentity:
g = EnumFunction::LINEAR;
break;
786 case EActivationFunction::kRelu:
g = EnumFunction::RELU;
break;
787 case EActivationFunction::kSigmoid:
g = EnumFunction::SIGMOID;
break;
788 case EActivationFunction::kTanh:
g = EnumFunction::TANH;
break;
789 case EActivationFunction::kFastTanh:
g = EnumFunction::TANH;
break;
790 case EActivationFunction::kSymmRelu:
g = EnumFunction::SYMMRELU;
break;
791 case EActivationFunction::kSoftSign:
g = EnumFunction::SOFTSIGN;
break;
792 case EActivationFunction::kGauss:
g = EnumFunction::GAUSS;
break;
794 if (i < fNet.GetDepth() - 1) {
795 net.addLayer(
Layer(fNet.GetLayer(i).GetWidth(),
g));
798 switch(fOutputFunction) {
799 case EOutputFunction::kIdentity:
h = ModeOutputValues::DIRECT;
break;
800 case EOutputFunction::kSigmoid:
h = ModeOutputValues::SIGMOID;
break;
801 case EOutputFunction::kSoftmax:
h = ModeOutputValues::SOFTMAX;
break;
803 net.addLayer(
Layer(fNet.GetLayer(i).GetWidth(),
g,
h));
807 switch(fNet.GetLossFunction()) {
808 case ELossFunction::kMeanSquaredError:
809 net.setErrorFunction(ModeErrorFunction::SUMOFSQUARES);
811 case ELossFunction::kCrossEntropy:
812 net.setErrorFunction(ModeErrorFunction::CROSSENTROPY);
814 case ELossFunction::kSoftmaxCrossEntropy:
815 net.setErrorFunction(ModeErrorFunction::CROSSENTROPY_MUTUALEXCLUSIVE);
819 switch(fWeightInitialization) {
820 case EInitialization::kGauss:
821 net.initializeWeights(WeightInitializationStrategy::XAVIER,
822 std::back_inserter(weights));
824 case EInitialization::kUniform:
825 net.initializeWeights(WeightInitializationStrategy::XAVIERUNIFORM,
826 std::back_inserter(weights));
829 net.initializeWeights(WeightInitializationStrategy::XAVIER,
830 std::back_inserter(weights));
835 for (
auto s : fTrainingSettings) {
838 switch(s.regularization) {
839 case ERegularization::kNone:
r = EnumRegularization::NONE;
break;
840 case ERegularization::kL1:
r = EnumRegularization::L1;
break;
841 case ERegularization::kL2:
r = EnumRegularization::L2;
break;
845 s.testInterval, s.weightDecay,
r,
846 MinimizerType::fSteepest, s.learningRate,
847 s.momentum, 1, s.multithreading);
848 std::shared_ptr<Settings> ptrSettings(settings);
849 ptrSettings->setMonitoring (0);
851 <<
"Training with learning rate = " << ptrSettings->learningRate ()
852 <<
", momentum = " << ptrSettings->momentum ()
853 <<
", repetitions = " << ptrSettings->repetitions ()
856 ptrSettings->setProgressLimits ((idxSetting)*100.0/(fSettings.size ()),
857 (idxSetting+1)*100.0/(fSettings.size ()));
859 const std::vector<double>& dropConfig = ptrSettings->dropFractions ();
860 if (!dropConfig.empty ()) {
861 Log () << kINFO <<
"Drop configuration" <<
Endl
862 <<
" drop repetitions = " << ptrSettings->dropRepetitions()
867 for (
auto f : dropConfig) {
868 Log () << kINFO <<
" Layer " << idx <<
" = " <<
f <<
Endl;
871 Log () << kINFO <<
Endl;
874 ptrSettings->momentum(),
875 ptrSettings->repetitions());
876 net.train(weights, trainPattern, testPattern, minimizer, *ptrSettings.get());
878 Log () << kINFO <<
Endl;
881 size_t weightIndex = 0;
882 for (
size_t l = 0;
l < fNet.GetDepth();
l++) {
883 auto & layerWeights = fNet.GetLayer(
l).GetWeights();
884 for (
Int_t j = 0; j < layerWeights.GetNcols(); j++) {
885 for (
Int_t i = 0; i < layerWeights.GetNrows(); i++) {
886 layerWeights(i,j) = weights[weightIndex];
890 auto & layerBiases = fNet.GetLayer(
l).GetBiases();
892 for (
Int_t i = 0; i < layerBiases.GetNrows(); i++) {
893 layerBiases(i,0) = weights[weightIndex];
897 for (
Int_t i = 0; i < layerBiases.GetNrows(); i++) {
898 layerBiases(i,0) = 0.0;
902 if (!fExitFromTraining) fIPyMaxIter = fIPyCurrentIter;
912 Log() << kINFO <<
"Start of neural network training on GPU." <<
Endl <<
Endl;
914 size_t nValidationSamples = GetNumValidationSamples();
915 size_t nTrainingSamples = GetEventCollection(
Types::kTraining).size() - nValidationSamples;
916 size_t nTestSamples = nValidationSamples;
918 Log() << kDEBUG <<
"Using " << nValidationSamples <<
" validation samples." <<
Endl;
919 Log() << kDEBUG <<
"Using " << nTestSamples <<
" training samples." <<
Endl;
921 size_t trainingPhase = 1;
922 fNet.Initialize(fWeightInitialization);
926 fInteractive->ClearGraphs();
930 net.SetWeightDecay(settings.weightDecay);
931 net.SetRegularization(settings.regularization);
935 std::vector<Double_t> dropoutVector(settings.dropoutProbabilities);
936 for (
auto &
p : dropoutVector) {
939 net.SetDropoutProbabilities(dropoutVector);
941 net.InitializeGradients();
942 auto testNet = net.CreateClone(settings.batchSize);
944 Log() << kINFO <<
"Training phase " << trainingPhase <<
" of "
945 << fTrainingSettings.size() <<
":" <<
Endl;
952 const std::vector<Event *> trainingInputData =
953 std::vector<Event *>(allData.begin(), allData.begin() + nTrainingSamples);
954 const std::vector<Event *> testInputData =
955 std::vector<Event *>(allData.begin() + nTrainingSamples, allData.end());
957 if (trainingInputData.size() != nTrainingSamples) {
958 Log() << kFATAL <<
"Inconsistent training sample size" <<
Endl;
960 if (testInputData.size() != nTestSamples) {
961 Log() << kFATAL <<
"Inconsistent test sample size" <<
Endl;
965 TMVAInput_t trainingTuple = std::tie(trainingInputData, DataInfo());
966 TMVAInput_t testTuple = std::tie(testInputData, DataInfo());
967 DataLoader_t trainingData(trainingTuple, nTrainingSamples,
968 net.GetBatchSize(), net.GetInputWidth(),
969 net.GetOutputWidth(), nThreads);
970 DataLoader_t testData(testTuple, nTestSamples, testNet.GetBatchSize(),
971 net.GetInputWidth(), net.GetOutputWidth(),
974 settings.convergenceSteps,
975 settings.testInterval);
977 std::vector<TNet<TCuda<>>> nets{};
978 std::vector<TBatch<TCuda<>>> batches{};
979 nets.reserve(nThreads);
980 for (
size_t i = 0; i < nThreads; i++) {
982 for (
size_t j = 0; j < net.GetDepth(); j++)
984 auto &masterLayer = net.GetLayer(j);
985 auto &layer = nets.back().GetLayer(j);
987 masterLayer.GetWeights());
989 masterLayer.GetBiases());
993 bool converged =
false;
994 size_t stepCount = 0;
995 size_t batchesInEpoch = nTrainingSamples / net.GetBatchSize();
997 std::chrono::time_point<std::chrono::system_clock> start, end;
998 start = std::chrono::system_clock::now();
1000 if (!fInteractive) {
1001 Log() << std::setw(10) <<
"Epoch" <<
" | "
1002 << std::setw(12) <<
"Train Err."
1003 << std::setw(12) <<
"Test Err."
1004 << std::setw(12) <<
"GFLOP/s"
1005 << std::setw(12) <<
"Conv. Steps" <<
Endl;
1006 std::string separator(62,
'-');
1007 Log() << separator <<
Endl;
1015 trainingData.Shuffle();
1016 for (
size_t i = 0; i < batchesInEpoch; i += nThreads) {
1018 for (
size_t j = 0; j < nThreads; j++) {
1019 batches.reserve(nThreads);
1020 batches.push_back(trainingData.GetBatch());
1022 if (settings.momentum > 0.0) {
1023 minimizer.
StepMomentum(net, nets, batches, settings.momentum);
1025 minimizer.
Step(net, nets, batches);
1033 for (
auto batch : testData) {
1034 auto inputMatrix = batch.GetInput();
1035 auto outputMatrix = batch.GetOutput();
1036 testError += testNet.Loss(inputMatrix, outputMatrix);
1038 testError /= (
Double_t) (nTestSamples / settings.batchSize);
1041 fTrainHistory.AddValue(
"testError",stepCount,testError);
1043 end = std::chrono::system_clock::now();
1047 for (
auto batch : trainingData) {
1048 auto inputMatrix = batch.GetInput();
1049 auto outputMatrix = batch.GetOutput();
1050 trainingError += net.Loss(inputMatrix, outputMatrix);
1052 trainingError /= (
Double_t) (nTrainingSamples / settings.batchSize);
1054 fTrainHistory.AddValue(
"trainingError",stepCount,trainingError);
1057 std::chrono::duration<double> elapsed_seconds = end - start;
1058 double seconds = elapsed_seconds.count();
1059 double nFlops = (
double) (settings.testInterval * batchesInEpoch);
1060 nFlops *= net.GetNFlops() * 1
e-9;
1063 start = std::chrono::system_clock::now();
1066 fInteractive->AddPoint(stepCount, trainingError, testError);
1069 if (fExitFromTraining)
break;
1071 Log() << std::setw(10) << stepCount <<
" | "
1072 << std::setw(12) << trainingError
1073 << std::setw(12) << testError
1074 << std::setw(12) << nFlops / seconds
1082 for (
size_t l = 0;
l < net.GetDepth();
l++) {
1090 Log() << kFATAL <<
"CUDA backend not enabled. Please make sure "
1091 "you have CUDA installed and it was successfully "
1092 "detected by CMAKE." <<
Endl;
1102 Log() << kINFO <<
"Start of neural network training on CPU." <<
Endl <<
Endl;
1104 size_t nValidationSamples = GetNumValidationSamples();
1105 size_t nTrainingSamples = GetEventCollection(
Types::kTraining).size() - nValidationSamples;
1106 size_t nTestSamples = nValidationSamples;
1108 Log() << kDEBUG <<
"Using " << nValidationSamples <<
" validation samples." <<
Endl;
1109 Log() << kDEBUG <<
"Using " << nTestSamples <<
" training samples." <<
Endl;
1111 fNet.Initialize(fWeightInitialization);
1113 size_t trainingPhase = 1;
1117 fInteractive->ClearGraphs();
1120 Log() <<
"Training phase " << trainingPhase <<
" of "
1121 << fTrainingSettings.size() <<
":" <<
Endl;
1125 net.SetWeightDecay(settings.weightDecay);
1126 net.SetRegularization(settings.regularization);
1129 std::vector<Double_t> dropoutVector(settings.dropoutProbabilities);
1130 for (
auto &
p : dropoutVector) {
1133 net.SetDropoutProbabilities(dropoutVector);
1134 net.InitializeGradients();
1135 auto testNet = net.CreateClone(settings.batchSize);
1140 const std::vector<Event *> &allData = GetEventCollection(
Types::kTraining);
1141 const std::vector<Event *> trainingInputData =
1142 std::vector<Event *>(allData.begin(), allData.begin() + nTrainingSamples);
1143 const std::vector<Event *> testInputData =
1144 std::vector<Event *>(allData.begin() + nTrainingSamples, allData.end());
1146 if (trainingInputData.size() != nTrainingSamples) {
1147 Log() << kFATAL <<
"Inconsistent training sample size" <<
Endl;
1149 if (testInputData.size() != nTestSamples) {
1150 Log() << kFATAL <<
"Inconsistent test sample size" <<
Endl;
1153 size_t nThreads = 1;
1154 TMVAInput_t trainingTuple = std::tie(trainingInputData, DataInfo());
1155 TMVAInput_t testTuple = std::tie(testInputData, DataInfo());
1156 DataLoader_t trainingData(trainingTuple, nTrainingSamples,
1157 net.GetBatchSize(), net.GetInputWidth(),
1158 net.GetOutputWidth(), nThreads);
1159 DataLoader_t testData(testTuple, nTestSamples, testNet.GetBatchSize(),
1160 net.GetInputWidth(), net.GetOutputWidth(),
1163 settings.convergenceSteps,
1164 settings.testInterval);
1166 std::vector<TNet<TCpu<>>> nets{};
1167 std::vector<TBatch<TCpu<>>> batches{};
1168 nets.reserve(nThreads);
1169 for (
size_t i = 0; i < nThreads; i++) {
1170 nets.push_back(net);
1171 for (
size_t j = 0; j < net.GetDepth(); j++)
1173 auto &masterLayer = net.GetLayer(j);
1174 auto &layer = nets.back().GetLayer(j);
1176 masterLayer.GetWeights());
1178 masterLayer.GetBiases());
1182 bool converged =
false;
1183 size_t stepCount = 0;
1184 size_t batchesInEpoch = nTrainingSamples / net.GetBatchSize();
1186 std::chrono::time_point<std::chrono::system_clock> start, end;
1187 start = std::chrono::system_clock::now();
1189 if (!fInteractive) {
1190 Log() << std::setw(10) <<
"Epoch" <<
" | "
1191 << std::setw(12) <<
"Train Err."
1192 << std::setw(12) <<
"Test Err."
1193 << std::setw(12) <<
"GFLOP/s"
1194 << std::setw(12) <<
"Conv. Steps" <<
Endl;
1195 std::string separator(62,
'-');
1196 Log() << separator <<
Endl;
1203 trainingData.Shuffle();
1204 for (
size_t i = 0; i < batchesInEpoch; i += nThreads) {
1206 for (
size_t j = 0; j < nThreads; j++) {
1207 batches.reserve(nThreads);
1208 batches.push_back(trainingData.GetBatch());
1210 if (settings.momentum > 0.0) {
1211 minimizer.
StepMomentum(net, nets, batches, settings.momentum);
1213 minimizer.
Step(net, nets, batches);
1221 for (
auto batch : testData) {
1222 auto inputMatrix = batch.GetInput();
1223 auto outputMatrix = batch.GetOutput();
1224 auto weightMatrix = batch.GetWeights();
1225 testError += testNet.Loss(inputMatrix, outputMatrix, weightMatrix);
1227 testError /= (
Double_t) (nTestSamples / settings.batchSize);
1230 fTrainHistory.AddValue(
"testError",stepCount,testError);
1232 end = std::chrono::system_clock::now();
1236 for (
auto batch : trainingData) {
1237 auto inputMatrix = batch.GetInput();
1238 auto outputMatrix = batch.GetOutput();
1239 auto weightMatrix = batch.GetWeights();
1240 trainingError += net.Loss(inputMatrix, outputMatrix, weightMatrix);
1242 trainingError /= (
Double_t) (nTrainingSamples / settings.batchSize);
1245 fTrainHistory.AddValue(
"trainingError",stepCount,trainingError);
1248 fInteractive->AddPoint(stepCount, trainingError, testError);
1250 if (fExitFromTraining)
break;
1254 std::chrono::duration<double> elapsed_seconds = end - start;
1255 double seconds = elapsed_seconds.count();
1256 double nFlops = (
double) (settings.testInterval * batchesInEpoch);
1257 nFlops *= net.GetNFlops() * 1
e-9;
1260 start = std::chrono::system_clock::now();
1263 fInteractive->AddPoint(stepCount, trainingError, testError);
1266 if (fExitFromTraining)
break;
1268 Log() << std::setw(10) << stepCount <<
" | "
1269 << std::setw(12) << trainingError
1270 << std::setw(12) << testError
1271 << std::setw(12) << nFlops / seconds
1281 for (
size_t l = 0;
l < net.GetDepth();
l++) {
1282 auto & layer = fNet.GetLayer(
l);
1289 Log() << kFATAL <<
"Multi-core CPU backend not enabled. Please make sure "
1290 "you have a BLAS implementation and it was successfully "
1291 "detected by CMake as well that the imt CMake flag is set." <<
Endl;
1299 size_t nVariables = GetEvent()->GetNVariables();
1303 const std::vector<Float_t>& inputValues = GetEvent()->GetValues();
1304 for (
size_t i = 0; i < nVariables; i++) {
1305 X(0,i) = inputValues[i];
1308 fNet.Prediction(YHat,
X, fOutputFunction);
1316 size_t nVariables = GetEvent()->GetNVariables();
1319 const Event *ev = GetEvent();
1320 const std::vector<Float_t>& inputValues = ev->
GetValues();
1321 for (
size_t i = 0; i < nVariables; i++) {
1322 X(0,i) = inputValues[i];
1325 size_t nTargets = std::max(1u, ev->
GetNTargets());
1327 std::vector<Float_t>
output(nTargets);
1328 auto net = fNet.CreateClone(1);
1329 net.Prediction(YHat,
X, fOutputFunction);
1331 for (
size_t i = 0; i < nTargets; i++)
1334 if (fRegressionReturnVal == NULL) {
1335 fRegressionReturnVal =
new std::vector<Float_t>();
1337 fRegressionReturnVal->clear();
1340 for (
size_t i = 0; i < nTargets; ++i) {
1344 const Event* evT2 = GetTransformationHandler().InverseTransform(evT);
1345 for (
size_t i = 0; i < nTargets; ++i) {
1346 fRegressionReturnVal->push_back(evT2->
GetTarget(i));
1349 return *fRegressionReturnVal;
1356 Matrix_t YHat(1, DataInfo().GetNClasses());
1357 if (fMulticlassReturnVal == NULL) {
1358 fMulticlassReturnVal =
new std::vector<Float_t>(DataInfo().GetNClasses());
1361 const std::vector<Float_t>& inputValues = GetEvent()->GetValues();
1362 for (
size_t i = 0; i < nVariables; i++) {
1363 X(0,i) = inputValues[i];
1366 fNet.Prediction(YHat,
X, fOutputFunction);
1367 for (
size_t i = 0; i < (size_t) YHat.GetNcols(); i++) {
1368 (*fMulticlassReturnVal)[i] = YHat(0, i);
1370 return *fMulticlassReturnVal;
1378 Int_t inputWidth = fNet.GetInputWidth();
1379 Int_t depth = fNet.GetDepth();
1380 char lossFunction =
static_cast<char>(fNet.GetLossFunction());
1382 gTools().StringFromInt(inputWidth));
1386 TString(
static_cast<char>(fOutputFunction)));
1388 for (
Int_t i = 0; i < depth; i++) {
1389 const auto& layer = fNet.GetLayer(i);
1391 int activationFunction =
static_cast<int>(layer.GetActivationFunction());
1394 WriteMatrixXML(layerxml,
"Weights", layer.GetWeights());
1395 WriteMatrixXML(layerxml,
"Biases", layer.GetBiases());
1409 fNet.SetBatchSize(1);
1411 size_t inputWidth, depth;
1414 char lossFunctionChar;
1416 char outputFunctionChar;
1419 fNet.SetInputWidth(inputWidth);
1420 fNet.SetLossFunction(
static_cast<ELossFunction>(lossFunctionChar));
1423 size_t previousWidth = inputWidth;
1425 for (
size_t i = 0; i < depth; i++) {
1441 ReadMatrixXML(layerXML,
"Weights", weights);
1442 ReadMatrixXML(layerXML,
"Biases", biases);
1443 fNet.GetLayer(i).GetWeights() = weights;
1444 fNet.GetLayer(i).GetBiases() = biases;
1447 previousWidth =
width;
1461 fRanking =
new Ranking( GetName(),
"Importance" );
1462 for (
UInt_t ivar=0; ivar<GetNvar(); ivar++) {
1463 fRanking->AddRank(
Rank( GetInputLabel(ivar), 1.0));
1487 Log() << col <<
"--- Short description:" << colres <<
Endl;
1489 Log() <<
"The DNN neural network is a feedforward" <<
Endl;
1490 Log() <<
"multilayer perceptron implementation. The DNN has a user-" <<
Endl;
1491 Log() <<
"defined hidden layer architecture, where the number of input (output)" <<
Endl;
1492 Log() <<
"nodes is determined by the input variables (output classes, i.e., " <<
Endl;
1493 Log() <<
"signal and one background, regression or multiclass). " <<
Endl;
1495 Log() << col <<
"--- Performance optimisation:" << colres <<
Endl;
1498 const char* txt =
"The DNN supports various options to improve performance in terms of training speed and \n \
1499reduction of overfitting: \n \
1501 - different training settings can be stacked. Such that the initial training \n\
1502 is done with a large learning rate and a large drop out fraction whilst \n \
1503 in a later stage learning rate and drop out can be reduced. \n \
1506 initial training stage: 0.0 for the first layer, 0.5 for later layers. \n \
1507 later training stage: 0.1 or 0.0 for all layers \n \
1508 final training stage: 0.0] \n \
1509 Drop out is a technique where a at each training cycle a fraction of arbitrary \n \
1510 nodes is disabled. This reduces co-adaptation of weights and thus reduces overfitting. \n \
1511 - L1 and L2 regularization are available \n \
1513 [recommended 10 - 150] \n \
1514 Arbitrary mini-batch sizes can be chosen. \n \
1515 - Multithreading \n \
1516 [recommended: True] \n \
1517 Multithreading can be turned on. The minibatches are distributed to the available \n \
1518 cores. The algorithm is lock-free (\"Hogwild!\"-style) for each cycle. \n \
1522 - example: \"TANH|(N+30)*2,TANH|(N+30),LINEAR\" \n \
1524 . two hidden layers (separated by \",\") \n \
1525 . the activation function is TANH (other options: RELU, SOFTSIGN, LINEAR) \n \
1526 . the activation function for the output layer is LINEAR \n \
1527 . the first hidden layer has (N+30)*2 nodes where N is the number of input neurons \n \
1528 . the second hidden layer has N+30 nodes, where N is the number of input neurons \n \
1529 . the number of nodes in the output layer is determined by the number of output nodes \n \
1530 and can therefore not be chosen freely. \n \
1532 \"ErrorStrategy\": \n \
1534 The error of the neural net is determined by a sum-of-squares error function \n \
1535 For regression, this is the only possible choice. \n \
1537 The error of the neural net is determined by a cross entropy function. The \n \
1538 output values are automatically (internally) transformed into probabilities \n \
1539 using a sigmoid function. \n \
1540 For signal/background classification this is the default choice. \n \
1541 For multiclass using cross entropy more than one or no output classes \n \
1542 can be equally true or false (e.g. Event 0: A and B are true, Event 1: \n \
1543 A and C is true, Event 2: C is true, ...) \n \
1544 - MUTUALEXCLUSIVE \n \
1545 In multiclass settings, exactly one of the output classes can be true (e.g. either A or B or C) \n \
1547 \"WeightInitialization\" \n \
1550 \"Xavier Glorot & Yoshua Bengio\"-style of initializing the weights. The weights are chosen randomly \n \
1551 such that the variance of the values of the nodes is preserved for each layer. \n \
1552 - XAVIERUNIFORM \n \
1553 The same as XAVIER, but with uniformly distributed weights instead of gaussian weights \n \
1555 Random values scaled by the layer size \n \
1557 \"TrainingStrategy\" \n \
1558 - example: \"LearningRate=1e-1,Momentum=0.3,ConvergenceSteps=50,BatchSize=30,TestRepetitions=7,WeightDecay=0.0,Renormalize=L2,DropConfig=0.0,DropRepetitions=5|LearningRate=1e-4,Momentum=0.3,ConvergenceSteps=50,BatchSize=20,TestRepetitions=7,WeightDecay=0.001,Renormalize=L2,DropFraction=0.0,DropRepetitions=5\" \n \
1559 - explanation: two stacked training settings separated by \"|\" \n \
1560 . first training setting: \"LearningRate=1e-1,Momentum=0.3,ConvergenceSteps=50,BatchSize=30,TestRepetitions=7,WeightDecay=0.0,Renormalize=L2,DropConfig=0.0,DropRepetitions=5\" \n \
1561 . second training setting : \"LearningRate=1e-4,Momentum=0.3,ConvergenceSteps=50,BatchSize=20,TestRepetitions=7,WeightDecay=0.001,Renormalize=L2,DropFractions=0.0,DropRepetitions=5\" \n \
1562 . LearningRate : \n \
1563 - recommended for classification: 0.1 initially, 1e-4 later \n \
1564 - recommended for regression: 1e-4 and less \n \
1566 preserve a fraction of the momentum for the next training batch [fraction = 0.0 - 1.0] \n \
1567 . Repetitions : \n \
1568 train \"Repetitions\" repetitions with the same minibatch before switching to the next one \n \
1569 . ConvergenceSteps : \n \
1570 Assume that convergence is reached after \"ConvergenceSteps\" cycles where no improvement \n \
1571 of the error on the test samples has been found. (Mind that only at each \"TestRepetitions\" \n \
1572 cycle the test samples are evaluated and thus the convergence is checked) \n \
1574 Size of the mini-batches. \n \
1575 . TestRepetitions \n \
1576 Perform testing the neural net on the test samples each \"TestRepetitions\" cycle \n \
1578 If \"Renormalize\" is set to L1 or L2, \"WeightDecay\" provides the renormalization factor \n \
1580 NONE, L1 (|w|) or L2 (w^2) \n \
1582 Drop a fraction of arbitrary nodes of each of the layers according to the values given \n \
1583 in the DropConfig. \n \
1584 [example: DropConfig=0.0+0.5+0.3 \n \
1585 meaning: drop no nodes in layer 0 (input layer), half of the nodes in layer 1 and 30% of the nodes \n \
1587 recommended: leave all the nodes turned on for the input layer (layer 0) \n \
1588 turn off half of the nodes in later layers for the initial training; leave all nodes \n \
1589 turned on (0.0) in later training stages] \n \
1590 . DropRepetitions \n \
1591 Each \"DropRepetitions\" cycle the configuration of which nodes are dropped is changed \n \
1592 [recommended : 1] \n \
1593 . Multithreading \n \
1594 turn on multithreading [recommended: True] \n \
1596 Log () << txt <<
Endl;
#define REGISTER_METHOD(CLASS)
for example
winID h TVirtualViewer3D TVirtualGLPainter p
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void char Point_t Rectangle_t WindowAttributes_t Float_t r
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void value
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void char Point_t Rectangle_t WindowAttributes_t Float_t Float_t Float_t Int_t Int_t UInt_t UInt_t Rectangle_t Int_t Int_t Window_t TString Int_t GCValues_t GetPrimarySelectionOwner GetDisplay GetScreen GetColormap GetNativeEvent const char const char dpyName wid window const char font_name cursor keysym reg const char only_if_exist regb h Point_t winding char text const char depth char const char Int_t count const char ColorStruct_t color const char Pixmap_t Pixmap_t PictureAttributes_t attr const char char ret_data h unsigned char height h Atom_t Int_t ULong_t ULong_t unsigned char prop_list Atom_t Atom_t Atom_t Time_t type
Bool_t WriteOptionsReference() const
Layer defines the layout of a layer.
Settings for the training of the neural net.
Steepest Gradient Descent algorithm (SGD)
static void Copy(Matrix_t &B, const Matrix_t &A)
static void Copy(Matrix_t &B, const Matrix_t &A)
bool HasConverged()
Increases the minimization step counter by the test error evaluation period and uses the current inte...
void Step(Net_t &net, Matrix_t &input, const Matrix_t &output, const Matrix_t &weights)
Perform a single optimization step on a given batch.
size_t GetTestInterval() const
void StepMomentum(Net_t &master, std::vector< Net_t > &nets, std::vector< TBatch< Architecture_t > > &batches, Scalar_t momentum)
Same as the Step(...) method for multiple batches but uses momentum.
size_t GetConvergenceCount() const
size_t GetConvergenceSteps() const
Generic neural network class.
void SetTarget(UInt_t itgt, Float_t value)
set the target value (dimension itgt) to value
UInt_t GetNVariables() const
accessor to the number of variables
UInt_t GetNTargets() const
accessor to the number of targets
std::vector< Float_t > & GetValues()
Float_t GetTarget(UInt_t itgt) const
Deep Neural Network Implementation.
virtual Bool_t HasAnalysisType(Types::EAnalysisType type, UInt_t numberClasses, UInt_t numberTargets)
virtual const std::vector< Float_t > & GetMulticlassValues()
UInt_t GetNumValidationSamples()
void ReadWeightsFromXML(void *wghtnode)
std::vector< std::map< TString, TString > > KeyValueVector_t
typename Architecture_t::Matrix_t Matrix_t
void ReadWeightsFromStream(std::istream &i)
LayoutVector_t ParseLayoutString(TString layerSpec)
void MakeClassSpecific(std::ostream &, const TString &) const
MethodDNN(const TString &jobName, const TString &methodTitle, DataSetInfo &theData, const TString &theOption)
virtual Double_t GetMvaValue(Double_t *err=nullptr, Double_t *errUpper=nullptr)
DNN::EInitialization fWeightInitialization
const Ranking * CreateRanking()
KeyValueVector_t ParseKeyValueString(TString parseString, TString blockDelim, TString tokenDelim)
DNN::EOutputFunction fOutputFunction
void AddWeightsXMLTo(void *parent) const
void GetHelpMessage() const
virtual const std::vector< Float_t > & GetRegressionValues()
Ranking for variables in method (implementation)
Collectable string class.
const TString & GetString() const
Int_t Atoi() const
Return integer value of string.
TSubString Strip(EStripType s=kTrailing, char c=' ') const
Return a substring of self stripped at beginning and/or end.
Bool_t IsFloat() const
Returns kTRUE if string contains a floating point or integer number.
const char * Data() const
void ToUpper()
Change string to upper case.
TObjArray * Tokenize(const TString &delim) const
This function is used to isolate sequential tokens in a TString.
Bool_t BeginsWith(const char *s, ECaseCompare cmp=kExact) const
static TString Itoa(Int_t value, Int_t base)
Converts an Int_t to a TString with respect to the base specified (2-36).
XMLNodePointer_t NewChild(XMLNodePointer_t parent, XMLNsPointer_t ns, const char *name, const char *content=nullptr)
create new child element for parent node
XMLNodePointer_t GetChild(XMLNodePointer_t xmlnode, Bool_t realnode=kTRUE)
returns first child of xmlnode
XMLAttrPointer_t NewAttr(XMLNodePointer_t xmlnode, XMLNsPointer_t, const char *name, const char *value)
creates new attribute for xmlnode, namespaces are not supported for attributes
EOutputFunction
Enum that represents output functions.
auto regularization(const typename Architecture_t::Matrix_t &A, ERegularization R) -> decltype(Architecture_t::L1Regularization(A))
Evaluate the regularization functional for a given weight matrix.
EActivationFunction
Enum that represents layer activation functions.
ELossFunction
Enum that represents objective functions for the net, i.e.
std::tuple< const std::vector< Event * > &, const DataSetInfo & > TMVAInput_t
create variable transformations
TString fetchValue(const std::map< TString, TString > &keyValueMap, TString key)
MsgLogger & Endl(MsgLogger &ml)
Double_t Log(Double_t x)
Returns the natural logarithm of x.
DNN::ERegularization regularization
std::vector< Double_t > dropoutProbabilities