27 #ifndef TMVA_DNN_TENSORDATALOADER 28 #define TMVA_DNN_TENSORDATALOADER 42 std::tuple<const std::vector<TMatrixT<Double_t>> &,
const TMatrixT<Double_t> &,
const TMatrixT<Double_t> &>;
56 template <
typename Architecture_t>
59 using Matrix_t =
typename Architecture_t::Matrix_t;
81 template <
typename Data_t,
typename Architecture_t>
92 template <
typename Data_t,
typename Architecture_t>
100 : fTensorDataLoader(tensorDataLoader), fBatchIndex(index)
129 template <
typename Data_t,
typename Architecture_t>
134 using Matrix_t =
typename Architecture_t::Matrix_t;
155 TTensorDataLoader(
const Data_t &
data,
size_t nSamples,
size_t batchSize,
size_t batchDepth,
size_t batchHeight,
156 size_t batchWidth,
size_t nOutputFeatures,
size_t nStreams = 1);
179 template<
typename RNG>
180 void Shuffle(RNG & rng);
191 template <
typename Architecture_t>
202 template <
typename Data_t,
typename Architecture_t>
204 size_t batchDepth,
size_t batchHeight,
size_t batchWidth,
205 size_t nOutputFeatures,
size_t nStreams)
206 : fData(data), fNSamples(nSamples), fBatchSize(batchSize), fBatchDepth(batchDepth), fBatchHeight(batchHeight),
207 fBatchWidth(batchWidth), fNOutputFeatures(nOutputFeatures), fBatchIndex(0), fNStreams(nStreams), fDeviceBuffers(),
208 fHostBuffers(), fSampleIndices()
226 template <
typename Data_t,
typename Architecture_t>
239 HostBuffer_t inputHostBuffer = hostBuffer.GetSubBuffer(0, inputTensorSize);
240 HostBuffer_t outputHostBuffer = hostBuffer.GetSubBuffer(inputTensorSize, outputMatrixSize);
241 HostBuffer_t weightHostBuffer = hostBuffer.GetSubBuffer(inputTensorSize + outputMatrixSize, weightMatrixSize);
243 DeviceBuffer_t inputDeviceBuffer = deviceBuffer.GetSubBuffer(0, inputTensorSize);
244 DeviceBuffer_t outputDeviceBuffer = deviceBuffer.GetSubBuffer(inputTensorSize, outputMatrixSize);
245 DeviceBuffer_t weightDeviceBuffer = deviceBuffer.GetSubBuffer(inputTensorSize + outputMatrixSize, weightMatrixSize);
258 deviceBuffer.CopyFrom(hostBuffer);
260 std::vector<Matrix_t> inputTensor;
263 DeviceBuffer_t subInputDeviceBuffer = inputDeviceBuffer.GetSubBuffer(i * jump, jump);
264 inputTensor.emplace_back(subInputDeviceBuffer,
fBatchHeight, fBatchWidth);
266 Matrix_t outputMatrix(outputDeviceBuffer, fBatchSize, fNOutputFeatures);
267 Matrix_t weightMatrix(weightDeviceBuffer, fBatchSize, fNOutputFeatures);
274 template <
typename Data_t,
typename Architecture_t>
275 template <
typename RNG>
TTensorDataLoader< Data_t, Architecture_t > & fTensorDataLoader
std::vector< Matrix_t > & GetInput()
Return the tensor representing the input data.
TTensorBatch(std::vector< Matrix_t > &, Matrix_t &, Matrix_t &)
std::vector< DeviceBuffer_t > fDeviceBuffers
The device buffers used to keep the input, output and weight data.
bool operator!=(const TTensorBatchIterator &other)
void CopyTensorInput(HostBuffer_t &buffer, IndexIterator_t begin)
Copy input tensor into the given host buffer.
typename Architecture_t::Matrix_t Matrix_t
TTensorBatch< Architecture_t > GetTensorBatch()
Return the next batch from the training set.
std::vector< Matrix_t > fInputTensor
The input tensor batch, one matrix one input.
size_t fBatchHeight
The number od rows in each matrix.
std::vector< HostBuffer_t > fHostBuffers
The host buffers used to load the input, output and weight data.
Class that contains all the data information.
typename std::vector< size_t >::iterator IndexIterator_t
TTensorBatchIterator(TTensorDataLoader< Data_t, Architecture_t > &tensorDataLoader, size_t index=0)
Matrix_t & GetWeights()
Return the matrix holding the event weights.
size_t fBatchIndex
The index of the batch when there are multiple batches in parallel.
TTensorBatchIterator operator++()
TTensorBatch & operator=(const TTensorBatch &)=default
void CopyTensorWeights(HostBuffer_t &buffer, IndexIterator_t begin)
Copy weight matrix into the given host buffer.
std::tuple< const std::vector< TMatrixT< Double_t > > &, const TMatrixT< Double_t > &, const TMatrixT< Double_t > & > TensorInput
typename Architecture_t::Matrix_t Matrix_t
TTensorDataLoader(const Data_t &data, size_t nSamples, size_t batchSize, size_t batchDepth, size_t batchHeight, size_t batchWidth, size_t nOutputFeatures, size_t nStreams=1)
Constructor.
void CopyTensorOutput(HostBuffer_t &buffer, IndexIterator_t begin)
Copy output matrix into the given host buffer.
typename Architecture_t::DeviceBuffer_t DeviceBuffer_t
Matrix_t & GetOutput()
Return the matrix representing the output data.
size_t fBatchDepth
The number of matrices in the tensor.
void Shuffle(RNG &rng)
Shuffle the order of the samples in the batch.
typename Architecture_t::HostBuffer_t HostBuffer_t
Abstract ClassifierFactory template that handles arbitrary types.
size_t fNSamples
The total number of samples in the dataset.
std::tuple< const std::vector< Event * > &, const DataSetInfo & > TMVAInput_t
Matrix_t fOutputMatrix
The output matrix representing the ground truth.
size_t fBatchWidth
The number of columns in each matrix.
size_t fBatchSize
The size of a batch.
const Data_t & fData
The data that should be loaded in the batches.
size_t fNOutputFeatures
The number of outputs from the classifier/regressor.
size_t fNStreams
Number of buffer pairs.
TTensorBatch< Architecture_t > operator*()
std::vector< size_t > fSampleIndices
Ordering of the samples in the epoch.