1 #ifndef TMVA_TEST_DNN_UTILITY 2 #define TMVA_TEST_DNN_UTILITY 21 template <
typename AArchitecture>
24 int nlayers = rand() % 5 + 1;
26 std::vector<EActivationFunction> ActivationFunctions
29 for (
int i = 0; i < nlayers; i++) {
30 int width = rand() % 20 + 1;
32 ActivationFunctions[rand() % ActivationFunctions.size()];
39 template <
typename AMatrix>
47 for (
size_t i = 0; i <
m; i++) {
48 for (
size_t j = 0; j <
n; j++) {
59 template <
typename AMatrix>
70 for (
size_t i = 0; i <
m; i++) {
71 for (
size_t j = 0; j <
n; j++) {
72 X(i,j) = rand.
Gaus(0.0, sigma);
79 template <
typename AMatrix>
87 template <
typename AMatrix>
94 for (
size_t i = 0; i <
m; i++) {
95 for (
size_t j = 0; j <
n; j++) {
103 template <
typename AMatrix,
typename F>
110 for (
size_t i = 0; i <
m; i++) {
111 for (
size_t j = 0; j <
n; j++) {
120 template <
typename AMatrix,
typename F>
130 for (
size_t i = 0; i <
m; i++) {
131 for (
size_t j = 0; j <
n; j++) {
132 Z(i,j) =
f(
X(i,j),
Y(i,j));
139 template <
typename AMatrix,
typename AFloat,
typename F>
148 for (
size_t i = 0; i <
m; i++) {
149 for (
size_t j = 0; j <
n; j++) {
150 result =
f(result,
X(i,j));
159 template <
typename AMatrix,
typename AFloat,
typename F>
168 for (
size_t i = 0; i <
m; i++) {
169 for (
size_t j = 0; j <
n; j++) {
170 result =
f(result,
X(i,j));
173 return result / (AFloat) (m * n);
178 template <
typename T>
188 if (x * y ==
T(0.0) ||
192 return diff / (abs(x) + abs(y));
199 template <
typename Matrix1,
typename Matrix2>
202 decltype(
X(0,0)) curError, maxError = 0.0;
207 assert(m ==
Y.GetNrows());
208 assert(n ==
Y.GetNcols());
210 for (
Int_t i = 0; i <
m; i++) {
211 for (
Int_t j = 0; j <
n; j++) {
213 maxError = std::max(curError, maxError);
223 template <
typename F,
typename AFloat>
226 return f(dx) -
f(0.0 - dx);
231 template <
typename AFloat>
234 std::ostringstream out{};
auto maximumRelativeError(const Matrix1 &X, const Matrix2 &Y) -> decltype(X(0, 0))
Compute the maximum, element-wise relative error of the matrices X and Y normalized by the element of...
void randomMatrix(AMatrix &X)
Fill matrix with random, Gaussian-distributed values.
void applyMatrix(AMatrix &X, F f)
Apply functional to each element in the matrix.
virtual Double_t Gaus(Double_t mean=0, Double_t sigma=1)
Samples a random number from the standard Normal (Gaussian) Distribution with the given mean and sigm...
void constructRandomLinearNet(TNet< AArchitecture > &net)
Construct a random linear neural network with up to five layers.
void identityMatrix(AMatrix &X)
Set matrix to the identity matrix.
void AddLayer(size_t width, EActivationFunction f, Scalar_t dropoutProbability=1.0)
Add a layer of the given size to the neural net.
AFloat reduceMean(F f, AFloat start, const AMatrix &X)
Apply function to matrix element-wise and compute the mean of the resulting element values...
std::string print_error(AFloat &e)
Color code error.
This is the base class for the ROOT Random number generators.
AFloat finiteDifference(F f, AFloat dx)
Numerically compute the derivative of the functional f using finite differences.
void zipWithMatrix(AMatrix &Z, F f, const AMatrix &X, const AMatrix &Y)
Combine elements of two given matrices into a single matrix using the given function f...
Generic neural network class.
void randomBatch(AMatrix &X)
Generate a random batch as input for a neural net.
void copyMatrix(AMatrix &X, const AMatrix &Y)
Generate a random batch as input for a neural net.
T relativeError(const T &x, const T &y)
Compute the relative error of x and y.
you should not use this method at all Int_t Int_t Double_t Double_t Double_t e
Abstract ClassifierFactory template that handles arbitrary types.
EActivationFunction
Enum that represents layer activation functions.
AFloat reduce(F f, AFloat start, const AMatrix &X)
Generate a random batch as input for a neural net.