ROOT 6.12/07 Reference Guide |
Definition at line 54 of file Minimizers.h.
Public Types | |
using | Matrix_t = typename Architecture_t::Matrix_t |
using | Scalar_t = typename Architecture_t::Scalar_t |
Public Member Functions | |
TGradientDescent () | |
TGradientDescent (Scalar_t learningRate, size_t convergenceSteps, size_t testInterval) | |
size_t | GetConvergenceCount () const |
size_t | GetConvergenceSteps () const |
Scalar_t | GetTestError () const |
size_t | GetTestInterval () const |
Scalar_t | GetTrainingError () const |
bool | HasConverged () |
Increases the minimization step counter by the test error evaluation period and uses the current internal value of the test error to determine if the minimization has converged. More... | |
bool | HasConverged (Scalar_t testError) |
Increases the minimization step counter by the test error evaluation period and uses the provided test error value to determine if the minimization has converged. More... | |
void | Reset () |
Reset minimizer object to default state. More... | |
void | SetBatchSize (Scalar_t rate) |
void | SetConvergenceSteps (size_t steps) |
void | SetLearningRate (Scalar_t rate) |
void | SetTestInterval (size_t interval) |
template<typename Net_t > | |
void | Step (Net_t &net, Matrix_t &input, const Matrix_t &output, const Matrix_t &weights) |
Perform a single optimization step on a given batch. More... | |
template<typename Net_t > | |
void | Step (Net_t &master, std::vector< Net_t > &nets, std::vector< TBatch< Architecture_t >> &batches) |
Perform multiple optimization steps simultaneously. More... | |
template<typename Net_t > | |
Scalar_t | StepLoss (Net_t &net, Matrix_t &input, const Matrix_t &output, const Matrix_t &weights) |
Same as Step(...) but also evaluate the loss on the given training data. More... | |
template<typename Net_t > | |
auto | StepLoss (Net_t &net, Matrix_t &input, const Matrix_t &output, const Matrix_t &weights) -> Scalar_t |
template<typename Net_t > | |
void | StepMomentum (Net_t &master, std::vector< Net_t > &nets, std::vector< TBatch< Architecture_t >> &batches, Scalar_t momentum) |
Same as the Step(...) method for multiple batches but uses momentum. More... | |
template<typename Net_t > | |
void | StepNesterov (Net_t &master, std::vector< Net_t > &nets, std::vector< TBatch< Architecture_t >> &batches, Scalar_t momentum) |
Same as the Step(...) method for multiple batches but uses Nesterov momentum. More... | |
template<typename Net_t > | |
void | StepReducedWeights (Net_t &net, Matrix_t &input, const Matrix_t &output) |
Does not evaluate the loss and therefore not trigger a possible synchronization with the device. More... | |
template<typename Net_t > | |
Scalar_t | StepReducedWeightsLoss (Net_t &net, Matrix_t &input, const Matrix_t &output, const Matrix_t &weights) |
Similar to StepReducedWeights(...) but also evaluates the loss. More... | |
template<typename Net_t > | |
auto | StepReducedWeightsLoss (Net_t &net, Matrix_t &input, const Matrix_t &output, const Matrix_t &weights) -> Scalar_t |
template<typename Data_t , typename Net_t > | |
Scalar_t | Train (const Data_t &TrainingDataIn, size_t nTrainingSamples, const Data_t &TestDataIn, size_t nTestSamples, Net_t &net, size_t nThreads=1) |
Train the given net using the given training input data (events), training output data (labels), test input data (events), test output data (labels). More... | |
template<typename Data_t , typename Net_t > | |
auto | Train (const Data_t &trainingData, size_t nTrainingSamples, const Data_t &testData, size_t nTestSamples, Net_t &net, size_t nThreads) -> Scalar_t |
template<typename Data_t , typename Net_t > | |
Scalar_t | TrainMomentum (const Data_t &TrainingDataIn, size_t nTrainingSamples, const Data_t &TestDataIn, size_t nTestSamples, Net_t &net, Scalar_t momentum, size_t nThreads=1) |
Same as Train(...) but uses the given momentum. More... | |
template<typename Data_t , typename Net_t > | |
auto | TrainMomentum (const Data_t &trainingData, size_t nTrainingSamples, const Data_t &testData, size_t nTestSamples, Net_t &net, Scalar_t momentum, size_t nThreads) -> Scalar_t |
Private Attributes | |
size_t | fBatchSize |
Batch size to use for the training. More... | |
size_t | fConvergenceCount |
Current number of training epochs without. More... | |
size_t | fConvergenceSteps |
Number of training epochs without considerable. More... | |
Scalar_t | fLearningRate |
Learning rate \(\alpha\). More... | |
Scalar_t | fMinimumError |
The minimum loss achieved on the training set. More... | |
size_t | fStepCount |
Number of steps performed in the current training session. More... | |
Scalar_t | fTestError |
Holds the most recently computed test loss. More... | |
size_t | fTestInterval |
Interval for the computation of the test error. More... | |
Scalar_t | fTrainingError |
Holds the most recently computed training loss. More... | |
#include <TMVA/DNN/Minimizers.h>
using TMVA::DNN::TGradientDescent< Architecture_t >::Matrix_t = typename Architecture_t::Matrix_t |
Definition at line 58 of file Minimizers.h.
using TMVA::DNN::TGradientDescent< Architecture_t >::Scalar_t = typename Architecture_t::Scalar_t |
Definition at line 57 of file Minimizers.h.
TMVA::DNN::TGradientDescent< Architecture_t >::TGradientDescent | ( | ) |
Definition at line 175 of file Minimizers.h.
TMVA::DNN::TGradientDescent< Architecture_t >::TGradientDescent | ( | Scalar_t | learningRate, |
size_t | convergenceSteps, | ||
size_t | testInterval | ||
) |
Definition at line 184 of file Minimizers.h.
|
inline |
Definition at line 159 of file Minimizers.h.
|
inline |
Definition at line 160 of file Minimizers.h.
|
inline |
Definition at line 162 of file Minimizers.h.
|
inline |
Definition at line 163 of file Minimizers.h.
|
inline |
Definition at line 161 of file Minimizers.h.
|
inline |
Increases the minimization step counter by the test error evaluation period and uses the current internal value of the test error to determine if the minimization has converged.
Definition at line 665 of file Minimizers.h.
|
inline |
Increases the minimization step counter by the test error evaluation period and uses the provided test error value to determine if the minimization has converged.
Definition at line 679 of file Minimizers.h.
|
inline |
Reset minimizer object to default state.
Definition at line 81 of file Minimizers.h.
|
inline |
Definition at line 168 of file Minimizers.h.
|
inline |
Definition at line 165 of file Minimizers.h.
|
inline |
Definition at line 167 of file Minimizers.h.
|
inline |
Definition at line 166 of file Minimizers.h.
|
inline |
Perform a single optimization step on a given batch.
Propagates the input matrix foward through the net, evaluates the loss and propagates the gradients backward through the net. The computed gradients are scaled by the learning rate \(\alpha\) and subtracted from the weights and bias values of each layer.
Definition at line 329 of file Minimizers.h.
|
inline |
Perform multiple optimization steps simultaneously.
Performs the backprop algorithm on the input batches given in batches
on the neural networks given in nets
. The forward and backward propagation steps are executed in an interleaving manner in order to exploit potential batch-level parallelism for asynchronous device calls.
Definition at line 372 of file Minimizers.h.
Scalar_t TMVA::DNN::TGradientDescent< Architecture_t >::StepLoss | ( | Net_t & | net, |
Matrix_t & | input, | ||
const Matrix_t & | output, | ||
const Matrix_t & | weights | ||
) |
Same as Step(...) but also evaluate the loss on the given training data.
Note that this requires synchronization between host and device.
|
inline |
Definition at line 350 of file Minimizers.h.
|
inline |
Same as the Step(...) method for multiple batches but uses momentum.
Definition at line 436 of file Minimizers.h.
|
inline |
Same as the Step(...) method for multiple batches but uses Nesterov momentum.
Definition at line 526 of file Minimizers.h.
|
inline |
Does not evaluate the loss and therefore not trigger a possible synchronization with the device.
Trains the weights of each layer, but only the bias terms of the first layer for compatibility with the previous implementation.
Definition at line 615 of file Minimizers.h.
Scalar_t TMVA::DNN::TGradientDescent< Architecture_t >::StepReducedWeightsLoss | ( | Net_t & | net, |
Matrix_t & | input, | ||
const Matrix_t & | output, | ||
const Matrix_t & | weights | ||
) |
Similar to StepReducedWeights(...) but also evaluates the loss.
May trigger synchronization with the device.
|
inline |
Definition at line 640 of file Minimizers.h.
Scalar_t TMVA::DNN::TGradientDescent< Architecture_t >::Train | ( | const Data_t & | TrainingDataIn, |
size_t | nTrainingSamples, | ||
const Data_t & | TestDataIn, | ||
size_t | nTestSamples, | ||
Net_t & | net, | ||
size_t | nThreads = 1 |
||
) |
Train the given net using the given training input data (events), training output data (labels), test input data (events), test output data (labels).
auto TMVA::DNN::TGradientDescent< Architecture_t >::Train | ( | const Data_t & | trainingData, |
size_t | nTrainingSamples, | ||
const Data_t & | testData, | ||
size_t | nTestSamples, | ||
Net_t & | net, | ||
size_t | nThreads | ||
) | -> Scalar_t |
Definition at line 194 of file Minimizers.h.
Scalar_t TMVA::DNN::TGradientDescent< Architecture_t >::TrainMomentum | ( | const Data_t & | TrainingDataIn, |
size_t | nTrainingSamples, | ||
const Data_t & | TestDataIn, | ||
size_t | nTestSamples, | ||
Net_t & | net, | ||
Scalar_t | momentum, | ||
size_t | nThreads = 1 |
||
) |
Same as Train(...) but uses the given momentum.
auto TMVA::DNN::TGradientDescent< Architecture_t >::TrainMomentum | ( | const Data_t & | trainingData, |
size_t | nTrainingSamples, | ||
const Data_t & | testData, | ||
size_t | nTestSamples, | ||
Net_t & | net, | ||
Scalar_t | momentum, | ||
size_t | nThreads | ||
) | -> Scalar_t |
Definition at line 257 of file Minimizers.h.
|
private |
Batch size to use for the training.
Definition at line 61 of file Minimizers.h.
|
private |
Current number of training epochs without.
considerable decrease in the test error.
Definition at line 65 of file Minimizers.h.
|
private |
Number of training epochs without considerable.
decrease in the test error for convergence.
Definition at line 63 of file Minimizers.h.
|
private |
Learning rate \(\alpha\).
Definition at line 70 of file Minimizers.h.
|
private |
The minimum loss achieved on the training set.
during the current traning session.
Definition at line 71 of file Minimizers.h.
|
private |
Number of steps performed in the current training session.
Definition at line 62 of file Minimizers.h.
|
private |
Holds the most recently computed test loss.
Definition at line 69 of file Minimizers.h.
|
private |
Interval for the computation of the test error.
Definition at line 67 of file Minimizers.h.
|
private |
Holds the most recently computed training loss.
Definition at line 68 of file Minimizers.h.