Logo ROOT  
Reference Guide
 
Loading...
Searching...
No Matches
RecurrentPropagation.cu
Go to the documentation of this file.
1// @(#)root/tmva/tmva/dnn:$Id$
2// Author: Lorenzo Moneta 2020
3
4/*************************************************************************
5 * Copyright (C) 2017, Saurav Shekhar *
6 * All rights reserved. *
7 * *
8 * For the licensing terms see $ROOTSYS/LICENSE. *
9 * For the list of contributors see $ROOTSYS/README/CREDITS. *
10 *************************************************************************/
11
12 //////////////////////////////////////////////////////////////////
13 // Implementation of the functions required for the forward and //
14 // backward propagation of activations through a recurrent neural network //
15 // for CUDA architectures. //
16 //////////////////////////////////////////////////////////////////
17
19
20namespace TMVA
21{
22namespace DNN
23{
24template <typename AFloat>
25template <typename RNNLayer>
26void TCudnn<AFloat>::InitializeRecurrentTensors(RNNLayer *layer)
27{
28 // initialization of the RNN tensors for setting the right layout (ROW major)
29 size_t timeSteps = (layer->DoesReturnSequence()) ? layer->GetTimeSteps() : 1;
30 layer->GetOutput() =
31 Tensor_t(layer->GetOutput().GetDeviceBuffer(),
32 {layer->GetBatchSize(), timeSteps, layer->GetStateSize()}, GetTensorLayout());
33 layer->GetActivationGradients() =
34 Tensor_t(layer->GetActivationGradients().GetDeviceBuffer(), {layer->GetBatchSize(), timeSteps, layer->GetStateSize()},
35 GetTensorLayout());
36
37 // make the weight tensors in the right layout (Row-major)
38 for (size_t i = 0; i < layer->GetWeights().size(); ++i) {
39 auto &w = layer->GetWeightsAt(i);
40
41 w = Tensor_t(layer->GetWeightsAt(i).GetDeviceBuffer(), {layer->GetWeightsAt(i).GetNrows(), layer->GetWeightsAt(i).GetNcols()},
42 GetTensorLayout());
43 }
44 // now the biases
45 for (size_t i = 0; i < layer->GetBiases().size(); ++i) {
46
47 // reshape tensors
48 auto &b = layer->GetBiasesAt(i);
49 b = Tensor_t(layer->GetBiasesAt(i).GetDeviceBuffer(), {layer->GetStateSize(), 1}, GetTensorLayout(), 0, 0);
50
51 }
52
53 // layer->GetWeightsState() = Tensor_t(layer->GetWeightsState().GetDeviceBuffer(),
54 // {layer->GetStateSize(), layer->GetStateSize()}, GetTensorLayout());
55 // layer->GetWeightsInput() = Tensor_t(layer->GetWeightsInput().GetDeviceBuffer(),
56 // {layer->GetStateSize(), layer->GetInputSize()}, GetTensorLayout());
57 // layer->GetBiasesState() = Tensor_t(layer->GetBiasesState().GetDeviceBuffer(),
58 // {layer->GetStateSize(), 1 }, GetTensorLayout());
59
60 layer->GetX() = Tensor_t({layer->GetTimeSteps(), layer->GetBatchSize(), layer->GetInputSize() }, GetTensorLayout());
61 layer->GetY() = Tensor_t({layer->GetTimeSteps(), layer->GetBatchSize(), layer->GetStateSize() }, GetTensorLayout());
62
63 layer->GetDX() = Tensor_t({layer->GetTimeSteps(), layer->GetBatchSize(), layer->GetInputSize() }, GetTensorLayout());
64 layer->GetDY() = Tensor_t({layer->GetTimeSteps(), layer->GetBatchSize(), layer->GetStateSize() }, GetTensorLayout());
65}
66//____________________________________________________________________________
67template <typename AFloat>
68template <typename RNNLayer>
69void TCudnn<AFloat>::InitializeRecurrentDescriptors(TDescriptors *&descriptors, RNNLayer *layer)
70{
71
72 auto rnnDescriptors = new RNNDescriptors_t ();
73 CUDNNCHECK(cudnnCreateRNNDescriptor(&rnnDescriptors->LayerDescriptor));
74
75 CUDNNCHECK(cudnnCreateDropoutDescriptor(&rnnDescriptors->HelperDescriptor));
76
77 enum RNNType {kRNN, kLSTM, kGRU};
78 RNNType rnn_type = kRNN;
79 if ( std::is_same<RNNLayer, LSTMLayer_t>::value ) rnn_type = kLSTM;
80 if ( std::is_same<RNNLayer, GRULayer_t>::value ) rnn_type = kGRU;
81
82 cudnnHandle_t handle = layer->GetOutput().GetCudnnHandle();
83 float dropoutProb = 0.0; // layer->GetDroputProbability();
84
85 void *dropoutStates = nullptr; // random generator states ??
86 size_t dropoutStateSize = 0;
87
88 // get size of droput states
89 CUDNNCHECK(cudnnDropoutGetStatesSize(handle, &dropoutStateSize));
90
91 //unsigned long long seed = GetRandomGenerator().Integer(INT_MAX);
92 // use GetSeed to avoid generating other numbers which will break sequence
93 unsigned long long seed = GetRandomGenerator().GetSeed();
94
95 CUDNNCHECK(cudnnSetDropoutDescriptor(rnnDescriptors->HelperDescriptor, handle, dropoutProb, dropoutStates,
96 dropoutStateSize, seed));
97 // cudnnDropoutDescriptor_t dropoutDesc,
98 // cudnnHandle_t handle,
99 // float dropout,
100 // void *states,
101 // size_t stateSizeInBytes,
102 // unsigned long long seed)
103
104 int inputSize = layer->GetInputSize();
105 int hiddenSize = layer->GetStateSize();
106 int numLayers = 1; // this is not time steps is for stacked layers // layer->GetTimeSteps();
107 //cudnnRNNInputMode_t inputMode = CUDNN_SKIP_INPUT; // the leasing dimension of x must be equal to hiddenSize
108 cudnnRNNInputMode_t inputMode = CUDNN_LINEAR_INPUT; // this a vanilla rnn
109
110 cudnnDirectionMode_t direction = CUDNN_UNIDIRECTIONAL; // can be CUDNN_BIDIRECTIONAL
111 bool bidirectional = (direction == CUDNN_BIDIRECTIONAL);
112
113 cudnnRNNMode_t mode = CUDNN_RNN_TANH; // can be CUDNN_RNN_RELU, CUDNN_LSTM, CUDNN_GRU
114 if (rnn_type == kLSTM) mode = CUDNN_LSTM; // lstm case
115 if (rnn_type == kGRU) mode = CUDNN_GRU;
116
117 cudnnRNNAlgo_t algo = CUDNN_RNN_ALGO_STANDARD; // can be also CUDNN_RNN_ALGO_PERSIST_STATIC or CUDNN_RNN_ALGO_PERSIST_DYNAMIC
118
119 // this identifies the weights matrices
120 int numLinearLayers = 0;
121 if (mode == CUDNN_RNN_RELU || mode == CUDNN_RNN_TANH) {
122 numLinearLayers = 2;
123 }
124 if (mode == CUDNN_GRU ) {
125 numLinearLayers = 6;
126 }
127 if (mode == CUDNN_LSTM) {
128 numLinearLayers = 8;
129 }
130 // this should be the size of the weights vector
131 assert(numLinearLayers == layer->GetWeights().size());
132
133 cudnnDataType_t mathPrec = CUDNN_DATA_FLOAT;
134 if (std::is_same<AFloat, double>::value) { mathPrec = CUDNN_DATA_DOUBLE;}
135
136 // set bias mode
137 cudnnRNNBiasMode_t biasMode = CUDNN_RNN_NO_BIAS;
138 if (layer->GetBiases().size() > 0)
139 biasMode = CUDNN_RNN_SINGLE_INP_BIAS;
140 //biasMode = CUDNN_RNN_REC_BIAS; // difference is only for GRU
141
142 // needed for cudnn 8
143 cudnnDataType_t dataType = mathPrec; // use same (needed from cuDnn 8)
144 int projSize = hiddenSize;
145 // note droputDescriptor is HelperDescriptor
146
147 int seqLength = layer->GetTimeSteps();
148
149
150#if (CUDNN_VERSION >= 8000)
151 unsigned int auxFlags = CUDNN_RNN_PADDED_IO_ENABLED; // not sure what to pass here
152 cudnnMathType_t mathType = CUDNN_DEFAULT_MATH;
153 // CUDNNCHECK(cudnnSetRNNDescriptor_v6(handle, rnnDescriptors->LayerDescriptor, hiddenSize, numLayers, rnnDescriptors->HelperDescriptor, inputMode, direction, mode, algo, mathPrec) );
154 CUDNNCHECK(cudnnSetRNNDescriptor_v8(rnnDescriptors->LayerDescriptor, algo, mode, biasMode, direction,
155 inputMode, dataType, mathPrec, mathType, inputSize, hiddenSize, projSize, numLayers,
156 rnnDescriptors->HelperDescriptor, auxFlags));
157 // in cudnn 8 we need to create the data descriptors
158 CUDNNCHECK(cudnnCreateRNNDataDescriptor(&rnnDescriptors->xDataDesc));
159 CUDNNCHECK(cudnnCreateRNNDataDescriptor(&rnnDescriptors->yDataDesc));
160 // fill the data descriptors (do not support padding)
161 std::vector<int> seqLengthArray(layer->GetBatchSize(), seqLength);
162 int vectorSize = inputSize; // for input
163 //cudnnRNNDataLayout_t layout = CUDNN_RNN_DATA_LAYOUT_SEQ_MAJOR_PACKED; // should be this one if not using padding
164 cudnnRNNDataLayout_t layout = CUDNN_RNN_DATA_LAYOUT_SEQ_MAJOR_UNPACKED;
165 AFloat paddingFill = 0;
166 CUDNNCHECK(cudnnSetRNNDataDescriptor(rnnDescriptors->xDataDesc, dataType, layout, seqLength,
167 layer->GetBatchSize(), vectorSize, seqLengthArray.data(), &paddingFill));
168 // for output RNN data
169 vectorSize = bidirectional ? hiddenSize * 2 : hiddenSize;
170 CUDNNCHECK(cudnnSetRNNDataDescriptor(rnnDescriptors->yDataDesc, dataType, layout, seqLength,
171 layer->GetBatchSize(), vectorSize, seqLengthArray.data(), &paddingFill));
172
173#else
174 CUDNNCHECK(cudnnSetRNNDescriptor(handle, rnnDescriptors->LayerDescriptor, hiddenSize, numLayers, rnnDescriptors->HelperDescriptor, inputMode, direction, mode, algo, mathPrec) );
175
176 CUDNNCHECK(cudnnSetRNNBiasMode(rnnDescriptors->LayerDescriptor, biasMode));
177
178
179 // define tensor descriptors for RNN
180
181 int dimA[3];
182 int strideA[3];
183
184
185 rnnDescriptors->xDesc.resize(seqLength);
186 rnnDescriptors->yDesc.resize(seqLength);
187 rnnDescriptors->dxDesc.resize(seqLength);
188 rnnDescriptors->dyDesc.resize(seqLength);
189 TensorDescriptor_t *xDesc = rnnDescriptors->xDesc.data();
190 TensorDescriptor_t *yDesc = rnnDescriptors->yDesc.data();
191 TensorDescriptor_t *dxDesc = rnnDescriptors->dxDesc.data();
192 TensorDescriptor_t *dyDesc = rnnDescriptors->dyDesc.data();
193
194 for (int i = 0; i < seqLength; i++) {
195 CUDNNCHECK(cudnnCreateTensorDescriptor(&xDesc[i]));
196 CUDNNCHECK(cudnnCreateTensorDescriptor(&yDesc[i]));
197 CUDNNCHECK(cudnnCreateTensorDescriptor(&dxDesc[i]));
198 CUDNNCHECK(cudnnCreateTensorDescriptor(&dyDesc[i]));
199
200 dimA[0] = layer->GetBatchSize();
201 dimA[1] = layer->GetInputSize();
202 dimA[2] = 1;
203
204 strideA[0] = dimA[2] * dimA[1];
205 strideA[1] = dimA[2];
206 strideA[2] = 1;
207
208 CUDNNCHECK(cudnnSetTensorNdDescriptor(xDesc[i], mathPrec, 3, dimA, strideA));
209 CUDNNCHECK(cudnnSetTensorNdDescriptor(dxDesc[i], mathPrec, 3, dimA, strideA));
210
211 dimA[0] = layer->GetBatchSize();
212 dimA[1] = bidirectional ? hiddenSize * 2 : hiddenSize;
213 dimA[2] = 1;
214
215 strideA[0] = dimA[2] * dimA[1];
216 strideA[1] = dimA[2];
217 strideA[2] = 1;
218
219 CUDNNCHECK(cudnnSetTensorNdDescriptor(yDesc[i], mathPrec, 3, dimA, strideA));
220 CUDNNCHECK(cudnnSetTensorNdDescriptor(dyDesc[i], mathPrec, 3, dimA, strideA));
221 }
222#endif
223
224
225
226
227 // Set the filter parameters
228
229 size_t weightsSize = 0;
230#if (CUDNN_VERSION >= 8000)
231 size_t weightSpaceSize = 0;
232 CUDNNCHECK(cudnnGetRNNWeightSpaceSize(handle, rnnDescriptors->LayerDescriptor, &weightSpaceSize));
233 // we allocate the weight and weigh-gradient buffer suing Tensor_t (see below)
234 weightsSize = weightSpaceSize;
235#else
236
237 // weight descriptors
238 CUDNNCHECK(cudnnCreateFilterDescriptor(&rnnDescriptors->WeightsDescriptor));
239 CUDNNCHECK(cudnnCreateFilterDescriptor(&rnnDescriptors->WeightsGradDescriptor));
240
241 CUDNNCHECK(cudnnGetRNNParamsSize(handle, rnnDescriptors->LayerDescriptor, xDesc[0], &weightsSize, mathPrec));
242#endif
243
244 int dimW[3];
245 dimW[0] = (mathPrec == CUDNN_DATA_DOUBLE) ? weightsSize / sizeof(double) : weightsSize / sizeof(float);
246 dimW[1] = 1;
247 dimW[2] = 1;
248 // resize now weights tensor
249 auto &weightTensor = layer->GetWeightsTensor();
250 auto &weightGradTensor = layer->GetWeightGradientsTensor();
251
252#if (CUDNN_VERSION >= 8000)
253 // allocate weight space using a Tensor
254 // use tensor of dim=1 to avoid creating a tensor descriptor in TCudaTensor
255 weightTensor = Tensor_t( { (size_t) dimW[0]}, GetTensorLayout(), 0, 0);
256 weightGradTensor = Tensor_t({(size_t) dimW[0]}, GetTensorLayout(), 0, 0);
257
258 //std::cout << "allocate weight space tensor and grad weight space of size" << dimW[0] << std::endl;
259
260#else
261 weightTensor = Tensor_t( { (size_t) dimW[0], 1, 1}, GetTensorLayout(), 0, 0);
262 weightGradTensor = Tensor_t({(size_t) dimW[0], 1, 1}, GetTensorLayout(), 0, 0);
263
264 CUDNNCHECK(cudnnSetFilterNdDescriptor(rnnDescriptors->WeightsDescriptor, mathPrec, CUDNN_TENSOR_NCHW, 3, dimW));
265 CUDNNCHECK(cudnnSetFilterNdDescriptor(rnnDescriptors->WeightsGradDescriptor, mathPrec, CUDNN_TENSOR_NCHW, 3, dimW));
266
267
268#endif
269
270 // initialize now RNN weights from RNNLayer:WeightInput, RNNLayer::WeightState and RNNLayer::BiasesState
271
272 // support now only one single layer and not bidirectional
273 int nL = (!bidirectional) ? numLayers : 2 * numLayers; // for bidirectional nL = 2 * numLayers;
274 for (int ilayer = 0; ilayer < nL; ilayer++) {
275 for (int linLayerID = 0; linLayerID < numLinearLayers; linLayerID++) {
276
277 AFloat *linLayerMat = nullptr;
278 AFloat *linLayerBias = nullptr;
279
280 // from version 8 we can use the same function
281#if (CUDNN_VERSION >= 8000)
282 // create descriptors for weight matrices
283 cudnnTensorDescriptor_t linLayerMatDesc;
284 CUDNNCHECK(cudnnCreateTensorDescriptor(&linLayerMatDesc));
285 cudnnTensorDescriptor_t linLayerBiasDesc;
286 CUDNNCHECK(cudnnCreateTensorDescriptor(&linLayerBiasDesc));
287 CUDNNCHECK(cudnnGetRNNWeightParams(handle, rnnDescriptors->LayerDescriptor, ilayer, weightSpaceSize, weightTensor.GetDataPointer(),
288 linLayerID, linLayerMatDesc, (void **)&linLayerMat, linLayerBiasDesc, (void **)&linLayerBias));
289
290 //std::cout << "RNN offsets" << linLayerID << " offset " << linLayerMat-weightTensor.GetDataPointer() << " " << linLayerMat << std::endl;
291#else
292 // create descriptors for weight matrices
293 cudnnFilterDescriptor_t linLayerMatDesc;
294 CUDNNCHECK(cudnnCreateFilterDescriptor(&linLayerMatDesc));
295 cudnnFilterDescriptor_t linLayerBiasDesc;
296 CUDNNCHECK(cudnnCreateFilterDescriptor(&linLayerBiasDesc));
297
298 CUDNNCHECK(cudnnGetRNNLinLayerMatrixParams(handle, rnnDescriptors->LayerDescriptor, ilayer, rnnDescriptors->xDesc.data()[0],
299 rnnDescriptors->WeightsDescriptor, weightTensor.GetDataPointer(),
300 linLayerID, linLayerMatDesc, (void **)&linLayerMat));
301 // for the bias
302 CUDNNCHECK(cudnnGetRNNLinLayerBiasParams(handle, rnnDescriptors->LayerDescriptor, ilayer,
303 rnnDescriptors->xDesc.data()[0], rnnDescriptors->WeightsDescriptor,
304 weightTensor.GetDataPointer(), linLayerID, linLayerBiasDesc,
305 (void **)&linLayerBias));
306#endif
307
308 // copy now weights from GPU to GPU (from layer->GetWeights() -> pointers needed by Cudnn)
309
310 cudnnDataType_t dataType;
311 int nbDims;
312 int filterDimA[3] = {0,0,0};
313 if (linLayerMat) {
314#if (CUDNN_VERSION >= 8000)
315 int strideA[3];
316 CUDNNCHECK(cudnnGetTensorNdDescriptor(linLayerMatDesc, 3, &dataType, &nbDims, filterDimA, strideA));
317#else
318 cudnnTensorFormat_t format;
319 CUDNNCHECK(cudnnGetFilterNdDescriptor(linLayerMatDesc, 3, &dataType, &format, &nbDims, filterDimA));
320#endif
321 /// RNN: linLayerID = 0 : input weight
322 // = 1 : input state
323 //
324 // LSTM = 0,4 : input gate ( weight input + weight state)
325 // = 1,5 : forget gate weight
326 // = 2, 6 : new memory gate weight
327 // = 3, 7 : output gate
328 //
329 // fortunatly same convention is used in the RNNLayers::GetWeights()[ID]
330
331 // copy layer weights in linLayerMat
332 // if (linLayerID == 0)
333 // {
334 // copy from GetStateWeights (tensor is state x state)
335 int wsize = layer->GetWeightsAt(linLayerID).GetSize();
336
337
338 //std::cout << "lin layer ID " << linLayerID << " " << linLayerMat << " " << linLayerBias << std::endl;
339 //std::cout << "input weight size = " << wsize << " { " << layer->GetWeightsAt(linLayerID).GetNrows() << " "
340 // << layer->GetWeightsAt(linLayerID).GetNcols() << "} should be " << filterDimA[1] << " x "
341 // << filterDimA[2] << std::endl;
342
343
344 // here we copy initial weight values for Layer::GetWeightsAt(...) in CuDNN weight space
345 //assert(wsize == filterDimA[1] * filterDimA[2]);
346 cudaMemcpyAsync(linLayerMat, layer->GetWeightsAt(linLayerID).GetDataPointer(), wsize * sizeof(AFloat),
347 cudaMemcpyDeviceToDevice, layer->GetWeightsAt(linLayerID).GetComputeStream());
348 //std::cout << "copy weights size " << wsize << " at offset " << linLayerMat-weightTensor.GetDataPointer() << std::endl;
349
350 }
351
352 // Here for the bias : standard is input bias mode
353
354 // linLayerID = 0 (RNN) 0,1,2,3 LSTM 0,1,2 GRU if CUDNN_RNN_SINGLE_INP_BIAS mode
355 int biasID = linLayerID;
356 if (biasMode == CUDNN_RNN_SINGLE_REC_BIAS) {
357 // case of state bias
358 //linLayerID = 1 (RNN), (4,5,6,7) LSTM , (3,4,5) GRU
359 biasID = linLayerID - 1;
360 if (mode == CUDNN_LSTM) biasID = linLayerID - 4;
361 if (mode == CUDNN_GRU) biasID = linLayerID - 3;
362 }
363 if (linLayerBias) {
364
365#if (CUDNN_VERSION >= 8000)
366 int strideA[3];
367 CUDNNCHECK(cudnnGetTensorNdDescriptor(linLayerBiasDesc, 3, &dataType, &nbDims, filterDimA, strideA));
368#else
369 CUDNNCHECK(cudnnGetFilterNdDescriptor(linLayerBiasDesc, 3, &dataType, &format, &nbDims, filterDimA));
370#endif
371
372
373 if (filterDimA[0] > 0) {
374
375 // check if above definitions are valid
376 assert(biasID >= 0);
377
378 // copy from GetStateWeights (tensor is state x state)
379 int wsize = layer->GetBiasesAt(biasID).GetSize();
380
381 //std::cout << "state bias " << wsize << " bias ID " << biasID << " { " <<
382 //layer->GetBiasesAt(biasID).GetNrows() << " "
383 // << layer->GetBiasesAt(biasID).GetNcols() << "} should be " << filterDimA[1] << " x " <<
384 // filterDimA[2]
385 // << std::endl;
386
387 //PrintTensor(layer->GetBiasesAt(biasID), "Bias state");
388
389 // same as above but for biases
390 assert(wsize == filterDimA[1]);
391 cudaMemcpyAsync(linLayerBias, layer->GetBiasesAt(biasID).GetDataPointer(), wsize * sizeof(AFloat),
392 cudaMemcpyDeviceToDevice, layer->GetBiasesAt(biasID).GetComputeStream());
393
394 //std::cout << "copy bias size " << wsize << " at offset " << linLayerBias-weightTensor.GetDataPointer() << std::endl;
395
396
397 }
398 }
399
400
401#if (CUDNN_VERSION >= 8000)
402 // After copying we need to syncronize back the matrices in GetWeightsAt (we do later for versions < 8)
403 // obtain address for gradient of weights too
404
405 AFloat *bGradOffset = nullptr;
406 AFloat *wGradOffset = nullptr;
407 CUDNNCHECK(cudnnGetRNNWeightParams(handle, rnnDescriptors->LayerDescriptor, ilayer, weightSpaceSize, weightGradTensor.GetDataPointer(),
408 linLayerID, linLayerMatDesc, (void **)&wGradOffset, linLayerBiasDesc, (void **)&bGradOffset));
409
410
411 // std::cout << "RNN GRAD offsets" << linLayerID << " offset " << wGradOffset-weightGradTensor.GetDataPointer() << " ptr " << wGradOffset << std::endl;
412 // make tensor w using Cudnn buffer - so it is syncronized
413 if (linLayerMat && wGradOffset) {
414 auto &w = layer->GetWeightsAt(linLayerID);
415 auto & dw = layer->GetWeightGradientsAt(linLayerID);
416 w = Tensor_t( TCudaDeviceBuffer<AFloat>(linLayerMat, w.GetSize(), w.GetComputeStream()), w.GetShape(), GetTensorLayout(), 0, 0);
417 dw = Tensor_t(TCudaDeviceBuffer<AFloat>(wGradOffset, dw.GetSize(), dw.GetComputeStream()), dw.GetShape(), GetTensorLayout(), 0, 0);
418 }
419 if (linLayerBias && bGradOffset) {
420 auto &b = layer->GetBiasesAt(biasID);
421 auto &db = layer->GetBiasGradientsAt(biasID);
422 b = Tensor_t(TCudaDeviceBuffer<AFloat>(linLayerBias, b.GetSize(), b.GetComputeStream()), b.GetShape(), GetTensorLayout(), 0, 0);
423 db = Tensor_t(TCudaDeviceBuffer<AFloat>(bGradOffset, db.GetSize(), db.GetComputeStream()), db.GetShape(), GetTensorLayout(), 0, 0);
424 }
425#endif
426
427 //CUDNNCHECK(cudnnGetFilterNdDescriptor(linLayerBiasDesc, 3, &dataType, &format, &nbDims, filterDimA));
428
429 // initGPUData(linLayerBias, filterDimA[0] * filterDimA[1] * filterDimA[2], 1.f);
430
431 // is needed?
432#if (CUDNN_VERSION >= 8000)
433 //no op
434#else
435 CUDNNCHECK(cudnnDestroyFilterDescriptor(linLayerMatDesc));
436 CUDNNCHECK(cudnnDestroyFilterDescriptor(linLayerBiasDesc));
437#endif
438 // end layer loop
439 }
440 }
441
442 //PrintTensor(weightTensor, "Full WeightTensor");
443
444 // the weight tensor in Cudnn is stored as
445 // weights input + weights state + bias state
446 // This here is quite confusing. It is enough to do for the first weight, where we store everything.
447 // can not we use just Layer::GetWeightTensor in RNNLayer when passing the weights to the forward function?
448
449 // here we need to syncronize GPU buffers in Layer::GetWeightsAt() with Cudnn weight buffer
450 // otherwise weight updates will not be reflected
451#if (CUDNN_VERSION < 8000)
452 size_t offset = 0;
453 for (size_t i = 0; i < layer->GetWeights().size(); ++i) {
454 auto &w = layer->GetWeightsAt(i);
455 auto & dw = layer->GetWeightGradientsAt(i);
456 if (weightTensor(offset, 0, 0) != w(0, 0))
457 std::cerr << "Error - different offset for weight " << i << std::endl;
458
459 // reshape tensors
460 w = Tensor_t(weightTensor.GetDeviceBuffer().GetSubBuffer(offset, w.GetSize()), w.GetShape(),
461 GetTensorLayout(), 0, 0);
462 dw = Tensor_t(weightGradTensor.GetDeviceBuffer().GetSubBuffer(offset, w.GetSize()), w.GetShape(), GetTensorLayout(), 0, 0);
463
464 offset += w.GetSize();
465 }
466 // now the biases
467 for (size_t i = 0; i < layer->GetBiases().size(); ++i) {
468 auto &b = layer->GetBiasesAt(i);
469 auto &db = layer->GetBiasGradientsAt(i);
470 if (weightTensor(offset, 0, 0) != b(0, 0))
471 std::cerr << "Error - different offset for bias " << i << std::endl;
472
473 // reshape tensors
474 b = Tensor_t(weightTensor.GetDeviceBuffer().GetSubBuffer(offset, b.GetSize()), b.GetShape(), GetTensorLayout(), 0, 0);
475 db = Tensor_t(weightGradTensor.GetDeviceBuffer().GetSubBuffer(offset, b.GetSize()), b.GetShape(), GetTensorLayout(), 0,
476 0);
477
478 offset += b.GetSize();
479 }
480#endif
481
482
483 descriptors = rnnDescriptors;
484}
485
486//____________________________________________________________________________
487template<typename AFloat>
488void TCudnn<AFloat>::ReleaseRNNDescriptors(TDescriptors * descriptors)
489{
490 auto & rnnDescriptors = static_cast<RNNDescriptors_t &>(*descriptors);
491 CUDNNCHECK(cudnnDestroyRNNDescriptor(rnnDescriptors.LayerDescriptor));
492
493 ReleaseDescriptor(rnnDescriptors.HelperDescriptor);
494#if (CUDNN_VERSION >= 8000)
495 CUDNNCHECK(cudnnDestroyRNNDataDescriptor(rnnDescriptors.xDataDesc));
496 CUDNNCHECK(cudnnDestroyRNNDataDescriptor(rnnDescriptors.yDataDesc));
497#else
498 ReleaseDescriptor(rnnDescriptors.WeightsDescriptor);
499 ReleaseDescriptor(rnnDescriptors.WeightsGradDescriptor);
500
501 // need to delete the vectors of tensor descriptors
502 for (size_t i = 0; i < rnnDescriptors.xDesc.size(); i++) {
503 cudnnDestroyTensorDescriptor(rnnDescriptors.xDesc.data()[i]);
504 cudnnDestroyTensorDescriptor(rnnDescriptors.yDesc.data()[i]);
505
506 cudnnDestroyTensorDescriptor(rnnDescriptors.dxDesc.data()[i]);
507 cudnnDestroyTensorDescriptor(rnnDescriptors.dyDesc.data()[i]);
508 }
509#endif
510}
511
512
513//____________________________________________________________________________
514template <typename AFloat>
515template <typename RNNLayer>
516void TCudnn<AFloat>::InitializeRecurrentWorkspace(TWorkspace *&workspace, TDescriptors *&descriptors, RNNLayer *layer)
517{
518 auto rnnWorkspace = new RNNWorkspace_t ();
519 auto rnnDescriptors = static_cast<RNNDescriptors_t *>(descriptors);
520
521 cudnnHandle_t handle = layer->GetOutput().GetCudnnHandle();
522
523 bool bidirectional = false;
524
525 //std::cout << "initialize RNN workspaces..." << std::endl;
526
527 size_t numLayers = 1; // support now only one single layer
528 if (bidirectional) numLayers *= 2; // bidirectional RNN is like having two layers
529
530 // redefine shape of layer->GetShape
531 Tensor_t &stateTensor = layer->GetState();
532 stateTensor = Tensor_t(stateTensor.GetDeviceBuffer(), { numLayers, layer->GetBatchSize(), layer->GetStateSize()},
533 GetTensorLayout(), 0, 0 );
534
535 if (layer->GetCell().GetSize() > 0) { // in case of LSTM
536 Tensor_t & cellStateTensor = layer->GetCell();
537 cellStateTensor = Tensor_t(cellStateTensor.GetDeviceBuffer(), {numLayers, layer->GetBatchSize(), layer->GetStateSize()}, GetTensorLayout(), 0, 0 );
538 }
539
540
541 // get workspace size
542#if (CUDNN_VERSION >= 8000)
543
544 // input descriptus (xDesc) should specify maxSeqLength and batchSize
545 CUDNNCHECK(cudnnGetRNNTempSpaceSizes(handle, rnnDescriptors->LayerDescriptor, CUDNN_FWD_MODE_TRAINING,
546 rnnDescriptors->xDataDesc, &rnnWorkspace->ForwardWorkspaceSize,
547 &rnnWorkspace->HelperWorkspaceSize));
548 size_t tmp = 0; // not needed for inference
549 CUDNNCHECK(cudnnGetRNNTempSpaceSizes(handle, rnnDescriptors->LayerDescriptor, CUDNN_FWD_MODE_INFERENCE,
550 rnnDescriptors->xDataDesc, &rnnWorkspace->InferenceWorkspaceSize,
551 &tmp));
552#else
553 // need to fill xDesc with input tensor descriptors for each layer
554 CUDNNCHECK(cudnnGetRNNWorkspaceSize(handle, rnnDescriptors->LayerDescriptor, layer->GetTimeSteps(),
555 rnnDescriptors->xDesc.data(), &rnnWorkspace->ForwardWorkspaceSize));
556
557 CUDNNCHECK(cudnnGetRNNTrainingReserveSize(handle, rnnDescriptors->LayerDescriptor, layer->GetTimeSteps(),
558 rnnDescriptors->xDesc.data(), &rnnWorkspace->HelperWorkspaceSize));
559#endif
560
561 if (rnnWorkspace->ForwardWorkspaceSize > 0) cudaMalloc(&rnnWorkspace->ForwardWorkspace, rnnWorkspace->ForwardWorkspaceSize*sizeof(AFloat));
562 if (rnnWorkspace->ForwardWorkspaceSize > 0 && rnnWorkspace->ForwardWorkspace == nullptr ) {
563 std::cerr << "Error allocating RNN workspace of size " << rnnWorkspace->ForwardWorkspaceSize << " - probably running out of memory on the GPU"
564 << std::endl;
565 std::cout << " layer input shape is { " << layer->GetBatchSize() << " , " << layer->GetTimeSteps() << " , "
566 <<layer->GetStateSize() << " } " << std::endl;
567
568 R__ASSERT(false);
569 }
570
571 if (rnnWorkspace->InferenceWorkspaceSize > 0) //needed only for cudnn >=8
572 cudaMalloc(&rnnWorkspace->InferenceWorkspace, rnnWorkspace->InferenceWorkspaceSize*sizeof(AFloat));
573
574 if (rnnWorkspace->HelperWorkspaceSize > 0) cudaMalloc(&rnnWorkspace->HelperWorkspace, rnnWorkspace->HelperWorkspaceSize*sizeof(AFloat));
575 if (rnnWorkspace->HelperWorkspaceSize > 0 && rnnWorkspace->HelperWorkspace == nullptr ) {
576 std::cerr << "Error allocating RNN reserved workspace of size " << rnnWorkspace->HelperWorkspaceSize << " - probably running out of memory on the GPU"
577 << std::endl;
578 std::cout << " layer input shape is { " << layer->GetBatchSize() << " , " << layer->GetTimeSteps() << " , "
579 <<layer->GetStateSize() << " } " << std::endl;
580
581 R__ASSERT(false);
582 }
583
584 workspace = rnnWorkspace;
585 //std::cout << "Done initialization of RNN workspaces..." << std::endl;
586}
587
588//____________________________________________________________________________
589template <typename AFloat>
590void TCudnn<AFloat>::FreeRNNWorkspace(TWorkspace * workspace) {
591 if (!workspace) return;
592 auto rnnWorkspace = static_cast<RNNWorkspace_t *>(workspace);
593
594 if(rnnWorkspace->ForwardWorkspace) cudaFree(rnnWorkspace->ForwardWorkspace);
595 if(rnnWorkspace->InferenceWorkspace) cudaFree(rnnWorkspace->InferenceWorkspace);
596 if(rnnWorkspace->HelperWorkspace) cudaFree(rnnWorkspace->HelperWorkspace);
597
598
599}
600
601//____________________________________________________________________________
602template <typename AFloat>
603void TCudnn<AFloat>::RNNForward(const Tensor_t &x, const Tensor_t &hx, const Tensor_t &cx, const Tensor_t & weights, Tensor_t &y,
604 Tensor_t &hy, Tensor_t &cy, const RNNDescriptors_t & desc, RNNWorkspace_t &workspace, bool isTraining)
605
606{
607
608 //std::cout << "doing forward...";
609 //std::string msg = (isTraining) ? " in training" : " in inference";
610 //std::cout << msg << std::endl;
611 bool rememberState = false; // pass initial input state and save output state
612 cudnnHandle_t cudnnHandle = x.GetCudnnHandle();
613
614 int seqLength = x.GetShape()[0]; // time steps
615 cudnnRNNDescriptor_t rnnDesc = desc.LayerDescriptor;
616
617 // initial state and cell state will be set to zero
618 bool isLSTM = (cx.GetSize() > 0) && rememberState;
619
620#if (CUDNN_VERSION >= 8000)
621 // forward pass (use same function for training and inference in version > 8)
622 cudnnForwardMode_t fwdMode = (isTraining) ? CUDNN_FWD_MODE_TRAINING : CUDNN_FWD_MODE_INFERENCE;
623 const int * devSeqLength = nullptr; // should be null for versions >= 8.9
624 size_t weightSpaceSize = (std::is_same<AFloat, double>::value) ? weights.GetSize()* sizeof(double) :
625 weights.GetSize()* sizeof(float);
626 size_t workspaceSize = (isTraining) ? workspace.ForwardWorkspaceSize : workspace.InferenceWorkspaceSize;
627 void * workspacePtr = (isTraining) ? workspace.ForwardWorkspace : workspace.InferenceWorkspace;
628 cudnnStatus_t status = cudnnRNNForward(
629 cudnnHandle, rnnDesc, fwdMode, devSeqLength,
630 // for x and y should be DataDescriptors
631 desc.xDataDesc, x.GetDataPointer(), desc.yDataDesc, y.GetDataPointer(),
632 hx.GetTensorDescriptor(), (rememberState) ? hx.GetDataPointer(): nullptr,
633 (rememberState) ? hy.GetDataPointer() : nullptr, // hdesc, hx, hy
634 (isLSTM) ? cx.GetTensorDescriptor() : hx.GetTensorDescriptor(), (isLSTM) ? cx.GetDataPointer() : nullptr,
635 (isLSTM) ? cy.GetDataPointer() : nullptr,
636 weightSpaceSize, weights.GetDataPointer(), workspaceSize, workspacePtr,
637 workspace.HelperWorkspaceSize, workspace.HelperWorkspace);
638
639 assert(status == CUDNN_STATUS_SUCCESS);
640 CUDNNCHECK(status);
641
642#else
643 // Perform forward training
644 if (isTraining) {
645 cudnnStatus_t status = cudnnRNNForwardTraining(
646 cudnnHandle, rnnDesc, seqLength, desc.xDesc.data(), x.GetDataPointer(), hx.GetTensorDescriptor(), (rememberState) ?
647 hx.GetDataPointer() : nullptr, (isLSTM) ? cx.GetTensorDescriptor() : hx.GetTensorDescriptor(), (isLSTM) ? cx.GetDataPointer() : nullptr, desc.WeightsDescriptor,
648 weights.GetDataPointer(), desc.yDesc.data(), y.GetDataPointer(), hy.GetTensorDescriptor(), hy.GetDataPointer(),
649 (isLSTM) ? cy.GetTensorDescriptor() : hy.GetTensorDescriptor(), (isLSTM) ? cy.GetDataPointer() : nullptr, workspace.ForwardWorkspace, workspace.ForwardWorkspaceSize,
650 workspace.HelperWorkspace, workspace.HelperWorkspaceSize);
651
652 assert(status == CUDNN_STATUS_SUCCESS);
653 CUDNNCHECK(status);
654
655 }
656 else {
657 // perform inference
658 cudnnStatus_t status = cudnnRNNForwardInference(
659 cudnnHandle, rnnDesc, seqLength, desc.xDesc.data(), x.GetDataPointer(), hx.GetTensorDescriptor(),
660 (rememberState) ? hx.GetDataPointer() : nullptr,
661 (isLSTM) ? cx.GetTensorDescriptor() : hx.GetTensorDescriptor(), (isLSTM) ? cx.GetDataPointer() : nullptr,
662 desc.WeightsDescriptor, weights.GetDataPointer(), desc.yDesc.data(), y.GetDataPointer(),
663 hy.GetTensorDescriptor(), hy.GetDataPointer(), (isLSTM) ? cy.GetTensorDescriptor() : hy.GetTensorDescriptor(),
664 (isLSTM) ? cy.GetDataPointer() : nullptr, workspace.ForwardWorkspace, workspace.ForwardWorkspaceSize);
665
666 assert(status == CUDNN_STATUS_SUCCESS);
667 CUDNNCHECK(status);
668 }
669#endif
670// std::cout << "\n\n*************\nforward is done" << std::endl;
671// PrintTensor(x, "\nx");
672// PrintTensor(y, "\ny");
673// PrintTensor(weights,"\nweights");
674
675}
676
677//____________________________________________________________________________
678template <typename AFloat>
679void TCudnn<AFloat>::RNNBackward(const Tensor_t &x, const Tensor_t &hx, const Tensor_t &cx, const Tensor_t &y,
680 const Tensor_t &dy, const Tensor_t &dhy, const Tensor_t &dcy, const Tensor_t &weights,
681 Tensor_t &dx, Tensor_t &dhx, Tensor_t &dcx, Tensor_t &dw, const RNNDescriptors_t &desc,
682 RNNWorkspace_t &workspace)
683
684{
685 bool rememberState = false;
686 bool rememberStateGrad = false;
687 bool isLSTM = (cx.GetSize() > 0) && rememberState;
688 int seqLength = x.GetShape()[0];
689 int batchSize = x.GetShape()[1];
690 cudnnRNNDescriptor_t rnnDesc = desc.LayerDescriptor;
691 cudnnHandle_t cudnnHandle = x.GetCudnnHandle();
692
693 // first data gradients (if dx is a summy tensor is first layer and we skip the data gradients )
694 //if (dx.GetSize() > 0) {
695 // cudnn neeeds to call backwared data to make it work !!!
696 //cudnnStatus_t status;
697#if (CUDNN_VERSION >= 8000)
698
699
700//#if (CUDNN_VERSION < 8900)
701// std::vector<int> devSeqLengths(batchSize,seqLength);
702// // need to copy to GPU memory
703// int * gpu_seqLengths = nullptr;
704// cudaMalloc(&gpu_seqLengths, batchSize * sizeof(int));
705// cudaMemcpy(gpu_seqLengths, devSeqLengths.data(), batchSize * sizeof(int), cudaMemcpyHostToDevice);
706//#endif
707 size_t weightSpaceSize = (std::is_same<AFloat, double>::value) ? weights.GetSize()* sizeof(double) :
708 weights.GetSize()* sizeof(float);
709 cudnnStatus_t status = cudnnRNNBackwardData_v8(
710 cudnnHandle, rnnDesc, NULL,
711 desc.yDataDesc, y.GetDataPointer(), dy.GetDataPointer(), // for x and y must be data descriptors
712 desc.xDataDesc, dx.GetDataPointer(),
713 hx.GetTensorDescriptor(), (rememberState) ? hx.GetDataPointer() : nullptr,
714 (rememberStateGrad) ? dhy.GetDataPointer() : nullptr, (rememberStateGrad) ? dhx.GetDataPointer() : nullptr,
715 (isLSTM) ? cx.GetTensorDescriptor() : hx.GetTensorDescriptor(),
716 (isLSTM) ? cx.GetDataPointer() : nullptr, (isLSTM) ? dcy.GetDataPointer() : nullptr, (isLSTM) ? dcx.GetDataPointer() : nullptr,
717 weightSpaceSize, weights.GetDataPointer(),
718 workspace.ForwardWorkspaceSize, workspace.ForwardWorkspace, workspace.HelperWorkspaceSize, workspace.HelperWorkspace);
719
720
721 assert(status == CUDNN_STATUS_SUCCESS);
722 CUDNNCHECK(status);
723
724 //std::cout << "\n\n**********\nbackward data is done" << std::endl;
725 // std::cout << "RNN Backward weights !!! -remmber state" << rememberState << std::endl;
726 //PrintTensor(y, "y");
727 //PrintTensor(dx, "dx");
728 //PrintTensor(weights, "weights");
729 //assert(weights.GetSize() == dw.GetSize());
730
731 // now backward gradient of weights
732 // dweight space buffr should be zerod before
733 status = cudnnRNNBackwardWeights_v8(cudnnHandle, rnnDesc,CUDNN_WGRAD_MODE_ADD, NULL,
734 desc.xDataDesc, x.GetDataPointer(), // should be data descriptors
735 hx.GetTensorDescriptor(), (rememberState) ? hx.GetDataPointer() : nullptr,
736 desc.yDataDesc, y.GetDataPointer(), // data descript
737 weightSpaceSize, dw.GetDataPointer(),
738 workspace.ForwardWorkspaceSize, workspace.ForwardWorkspace, workspace.HelperWorkspaceSize, workspace.HelperWorkspace);
739
740
741 //std::cout << "RNN Backward weights !!! " << std::endl;
742 //PrintTensor(x, "x");
743 //PrintTensor(weights, "weights");
744 //PrintTensor(dw, "dw");
745#else
746 cudnnStatus_t status = cudnnRNNBackwardData(
747 cudnnHandle, rnnDesc, seqLength, desc.yDesc.data(), y.GetDataPointer(), desc.dyDesc.data(), dy.GetDataPointer(),
748 dhy.GetTensorDescriptor(), (rememberStateGrad) ? dhy.GetDataPointer() : nullptr,
749 (isLSTM) ? dcy.GetTensorDescriptor() : dhy.GetTensorDescriptor(), (isLSTM) ? dcy.GetDataPointer() : nullptr, // dcy
750 desc.WeightsDescriptor, weights.GetDataPointer(), hx.GetTensorDescriptor(),
751 (rememberState) ? hx.GetDataPointer() : nullptr, (isLSTM) ? cx.GetTensorDescriptor() : hx.GetTensorDescriptor(),
752 (isLSTM) ? cx.GetDataPointer() : nullptr, // cx
753 desc.dxDesc.data(), dx.GetDataPointer(), dhx.GetTensorDescriptor(),
754 (rememberState) ? dhx.GetDataPointer() : nullptr,
755 (isLSTM) ? dcx.GetTensorDescriptor() : dhx.GetTensorDescriptor(),
756 (isLSTM) ? dcx.GetDataPointer() : nullptr, // dcx
757 workspace.ForwardWorkspace, workspace.ForwardWorkspaceSize, workspace.HelperWorkspace,
758 workspace.HelperWorkspaceSize);
759
760 assert(status == CUDNN_STATUS_SUCCESS);
761 CUDNNCHECK(status);
762
763
764 status = cudnnRNNBackwardWeights(cudnnHandle, rnnDesc, seqLength, desc.xDesc.data(), x.GetDataPointer(),
765 hx.GetTensorDescriptor(), (rememberState) ? hx.GetDataPointer() : nullptr,
766 desc.yDesc.data(), y.GetDataPointer(), workspace.ForwardWorkspace,
767 workspace.ForwardWorkspaceSize, desc.WeightsGradDescriptor, dw.GetDataPointer(),
768 workspace.HelperWorkspace, workspace.HelperWorkspaceSize);
769
770 assert(status == CUDNN_STATUS_SUCCESS);
771 CUDNNCHECK(status);
772#endif
773
774}
775
776
777template<typename AFloat>
778void TCudnn<AFloat>::Rearrange(Tensor_t & y, const Tensor_t & x) {
779
780 AFloat alpha = 1;
781 AFloat beta = 0;
782 cudnnHandle_t cudnnHandle = x.GetCudnnHandle();
783 // x can be a tensor of dimension 3 or dimension 4
784 Tensor_t tmp = x;
785 TensorDescriptor_t d = tmp.GetTensorDescriptor();
786 int n = 0;
787 int dims[4];
788 int strides[4];
789 cudnnDataType_t dataType;
790 cudnnGetTensorNdDescriptor(d,tmp.GetNDim() , &dataType, &n, dims, strides);
791 assert(n >=3);
792
793 // assume x shape is B x T x S or B x T x 1 x S and y shape is T x B x S
794 const int xNdim = 3;
795 auto outputShape = y.GetShape();
796 assert(xNdim == y.GetNDim());
797 // swap from x to y first 2 dimension
798 assert(outputShape[0] = dims[1]); // T
799 assert(outputShape[1] == dims[0]); // B
800 assert(outputShape[2] == (n ==4) ? dims[3] : dims[2]); // S
801 if (n==4) assert(dims[2] == 1);
802
803
804 // input stride of T is S and of B is TxS
805 int xStrides[xNdim] = { (int) outputShape[2], (int)(outputShape[2] * outputShape[0]), 1 };
806 int xDims[xNdim];
807 for (int i = 0; i < xNdim; ++i)
808 xDims[i] = outputShape[i];
809
810 cudnnStatus_t status = cudnnSetTensorNdDescriptor(d, dataType, xNdim, xDims, xStrides);
811 assert(status == CUDNN_STATUS_SUCCESS);
812 CUDNNCHECK(status);
813 status = cudnnTransformTensor(cudnnHandle, &alpha, d, x.GetDataPointer() , &beta,
814 y.GetTensorDescriptor(), y.GetDataPointer());
815 assert(status == CUDNN_STATUS_SUCCESS);
816 CUDNNCHECK(status);
817
818 // reset original descriptor in tensor x
819 status = cudnnSetTensorNdDescriptor(d, dataType, n, dims, strides);
820 assert(status == CUDNN_STATUS_SUCCESS);
821
822 //PrintTensor(x, "x as B x T x S");
823 //PrintTensor(y, "y as T x B x S");
824}
825
826} // namespace DNN
827} // namespace TMVA
#define d(i)
Definition RSha256.hxx:102
#define b(i)
Definition RSha256.hxx:100
size_t size(const MatrixT &matrix)
retrieve the size of a square matrix
#define R__ASSERT(e)
Checks condition e and reports a fatal error if it's false.
Definition TError.h:125
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void data
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void char Point_t Rectangle_t WindowAttributes_t Float_t Float_t Float_t Int_t Int_t UInt_t UInt_t Rectangle_t Int_t Int_t Window_t TString Int_t GCValues_t GetPrimarySelectionOwner GetDisplay GetScreen GetColormap GetNativeEvent const char const char dpyName wid window const char font_name cursor keysym reg const char only_if_exist regb h Point_t winding char text const char depth char const char Int_t count const char ColorStruct_t color const char Pixmap_t Pixmap_t PictureAttributes_t attr const char char ret_data h unsigned char height h offset
Option_t Option_t TPoint TPoint const char mode
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void char Point_t Rectangle_t WindowAttributes_t Float_t Float_t Float_t Int_t Int_t UInt_t UInt_t Rectangle_t Int_t Int_t Window_t TString Int_t GCValues_t GetPrimarySelectionOwner GetDisplay GetScreen GetColormap GetNativeEvent const char const char dpyName wid window const char font_name cursor keysym reg const char only_if_exist regb h Point_t winding char text const char depth char const char Int_t count const char ColorStruct_t color const char Pixmap_t Pixmap_t PictureAttributes_t attr const char char ret_data h unsigned char height h Atom_t Int_t ULong_t ULong_t unsigned char prop_list Atom_t Atom_t Atom_t Time_t format
double beta(double x, double y)
Calculates the beta function.
Double_t y[n]
Definition legend1.C:17
Double_t x[n]
Definition legend1.C:17
const Int_t n
Definition legend1.C:16
create variable transformations