29template <
typename AReal>
37template <
typename AReal>
46template <
typename AReal>
56template <
typename AReal>
64template <
typename AReal>
79 for (
size_t i = 0; i < batchSize; i++) {
80 size_t sampleIndex = *sampleIterator;
81 for (
size_t j = 0; j <
n; j++) {
82 size_t bufferIndex = j * batchSize + i;
83 buffer[bufferIndex] =
static_cast<Float_t>(inputMatrix(sampleIndex, j));
97 for (
size_t i = 0; i < batchSize; i++) {
98 size_t sampleIndex = *sampleIterator;
99 for (
size_t j = 0; j <
n; j++) {
100 size_t bufferIndex = j * batchSize + i;
101 buffer[bufferIndex] =
static_cast<Float_t>(outputMatrix(sampleIndex, j));
114 for (
size_t i = 0; i < batchSize; i++) {
115 size_t sampleIndex = *sampleIterator;
116 buffer[i] =
static_cast<Float_t>(outputMatrix(sampleIndex, 0));
129 for (
size_t i = 0; i < batchSize; i++) {
130 size_t sampleIndex = *sampleIterator;
131 for (
size_t j = 0; j <
n; j++) {
132 size_t bufferIndex = j * batchSize + i;
133 buffer[bufferIndex] = inputMatrix(sampleIndex, j);
147 for (
size_t i = 0; i < batchSize; i++) {
148 size_t sampleIndex = *sampleIterator;
149 for (
size_t j = 0; j <
n; j++) {
150 size_t bufferIndex = j * batchSize + i;
151 buffer[bufferIndex] = outputMatrix(sampleIndex, j);
164 for (
size_t i = 0; i < batchSize; i++) {
165 size_t sampleIndex = *sampleIterator;
166 buffer[i] =
static_cast<Double_t>(outputMatrix(sampleIndex, 0));
176 Event *
event = std::get<0>(fData)[0];
178 for (
size_t i = 0; i < batchSize; i++) {
179 size_t sampleIndex = * sampleIterator++;
180 event = std::get<0>(fData)[sampleIndex];
181 for (
size_t j = 0; j <
n; j++) {
182 size_t bufferIndex = j * batchSize + i;
183 buffer[bufferIndex] =
event->GetValue(j);
194 size_t n = buffer.
GetSize() / batchSize;
198 for (
size_t i = 0; i < batchSize; i++) {
199 size_t sampleIndex = *sampleIterator++;
200 Event *
event = std::get<0>(fData)[sampleIndex];
201 for (
size_t j = 0; j <
n; j++) {
203 size_t bufferIndex = j * batchSize + i;
205 if (
event->GetNTargets() == 0) {
211 buffer[bufferIndex] = 0.0;
212 if (j ==
event->GetClass()) {
213 buffer[bufferIndex] = 1.0;
217 buffer[bufferIndex] =
static_cast<Float_t>(
event->GetTarget(j));
228 for (
size_t i = 0; i < batchSize; i++) {
229 size_t sampleIndex = *sampleIterator++;
230 Event *
event = std::get<0>(fData)[sampleIndex];
231 buffer[i] =
event->GetWeight();
240 Event *
event = std::get<0>(fData)[0];
242 for (
size_t i = 0; i < batchSize; i++) {
243 size_t sampleIndex = * sampleIterator++;
244 event = std::get<0>(fData)[sampleIndex];
245 for (
size_t j = 0; j <
n; j++) {
246 size_t bufferIndex = j * batchSize + i;
247 buffer[bufferIndex] =
static_cast<Float_t>(
event->GetValue(j));
258 size_t n = buffer.
GetSize() / batchSize;
262 for (
size_t i = 0; i < batchSize; i++) {
263 size_t sampleIndex = *sampleIterator++;
264 Event *
event = std::get<0>(fData)[sampleIndex];
265 for (
size_t j = 0; j <
n; j++) {
267 size_t bufferIndex = j * batchSize + i;
269 if (
event->GetNTargets() == 0) {
275 buffer[bufferIndex] = 0.0;
276 if (j ==
event->GetClass()) {
277 buffer[bufferIndex] = 1.0;
281 buffer[bufferIndex] =
static_cast<Float_t>(
event->GetTarget(j));
292 for (
size_t i = 0; i < batchSize; i++) {
293 size_t sampleIndex = *sampleIterator++;
294 Event *
event = std::get<0>(fData)[sampleIndex];
295 buffer[i] =
static_cast<Float_t>(
event->GetWeight());
304 const std::vector<TMatrixT<Double_t>> &inputTensor = std::get<0>(fData);
306 if (fBatchDepth == 1) {
307 for (
size_t i = 0; i < fBatchHeight; i++) {
308 size_t sampleIndex = *sampleIterator;
309 for (
size_t j = 0; j < fBatchWidth; j++) {
310 size_t bufferIndex = j * fBatchHeight + i;
311 buffer[bufferIndex] =
static_cast<Float_t>(inputTensor[0](sampleIndex, j));
316 for (
size_t i = 0; i < fBatchDepth; i++) {
317 size_t sampleIndex = *sampleIterator;
318 for (
size_t j = 0; j < fBatchHeight; j++) {
319 for (
size_t k = 0; k < fBatchWidth; k++) {
320 size_t bufferIndex = i * fBatchHeight * fBatchWidth + k * fBatchHeight + j;
321 buffer[bufferIndex] =
static_cast<Float_t>(inputTensor[sampleIndex](j, k));
337 for (
size_t i = 0; i < fBatchSize; i++) {
338 size_t sampleIndex = *sampleIterator;
339 for (
size_t j = 0; j <
n; j++) {
340 size_t bufferIndex = j * fBatchSize + i;
341 buffer[bufferIndex] =
static_cast<Float_t>(outputMatrix(sampleIndex, j));
354 for (
size_t i = 0; i < fBatchSize; i++) {
355 size_t sampleIndex = *sampleIterator;
356 buffer[i] =
static_cast<Float_t>(outputMatrix(sampleIndex, 0));
368 DeviceBufferTuple DeviceBuffers = CopyTensorBatches();
370 Tensor_t inputTensor( std::get<0>(DeviceBuffers), { fBatchHeight, fBatchWidth, fBatchSize } );
376 Matrix_t outputMatrix(std::get<1>(DeviceBuffers), fBatchSize, fNOutputFeatures);
377 Matrix_t weightMatrix(std::get<2>(DeviceBuffers), fBatchSize, fNOutputFeatures);
389 const std::vector<TMatrixT<Double_t>> &inputTensor = std::get<0>(fData);
391 if (fBatchDepth == 1) {
392 for (
size_t i = 0; i < fBatchHeight; i++) {
393 size_t sampleIndex = *sampleIterator;
394 for (
size_t j = 0; j < fBatchWidth; j++) {
395 size_t bufferIndex = j * fBatchHeight + i;
396 buffer[bufferIndex] = inputTensor[0](sampleIndex, j);
401 for (
size_t i = 0; i < fBatchDepth; i++) {
402 size_t sampleIndex = *sampleIterator;
403 for (
size_t j = 0; j < fBatchHeight; j++) {
404 for (
size_t k = 0; k < fBatchWidth; k++) {
405 size_t bufferIndex = i * fBatchHeight * fBatchWidth + k * fBatchHeight + j;
406 buffer[bufferIndex] = inputTensor[sampleIndex](j, k);
422 for (
size_t i = 0; i < fBatchSize; i++) {
423 size_t sampleIndex = *sampleIterator;
424 for (
size_t j = 0; j <
n; j++) {
425 size_t bufferIndex = j * fBatchSize + i;
426 buffer[bufferIndex] = outputMatrix(sampleIndex, j);
439 for (
size_t i = 0; i < fBatchSize; i++) {
440 size_t sampleIndex = *sampleIterator;
441 buffer[i] =
static_cast<Double_t>(outputMatrix(sampleIndex, 0));
452 DeviceBufferTuple DeviceBuffers = CopyTensorBatches();
454 Tensor_t inputTensor( std::get<0>(DeviceBuffers), { fBatchHeight, fBatchWidth, fBatchSize } );
461 Matrix_t outputMatrix(std::get<1>(DeviceBuffers), fBatchSize, fNOutputFeatures);
462 Matrix_t weightMatrix(std::get<2>(DeviceBuffers), fBatchSize, fNOutputFeatures);
478 if (fBatchDepth == 1 && fBatchHeight == fBatchSize) {
479 for (
size_t i = 0; i < fBatchHeight; i++) {
480 size_t sampleIndex = *sampleIterator;
481 Event *
event = std::get<0>(fData)[sampleIndex];
482 for (
size_t j = 0; j < fBatchWidth; j++) {
483 size_t bufferIndex = j * fBatchHeight + i;
484 buffer[bufferIndex] =
event->GetValue(j);
488 }
else if (fBatchDepth == fBatchSize) {
490 for (
size_t i = 0; i < fBatchDepth; i++) {
491 size_t sampleIndex = *sampleIterator;
492 Event *
event = std::get<0>(fData)[sampleIndex];
493 for (
size_t j = 0; j < fBatchHeight; j++) {
494 for (
size_t k = 0; k < fBatchWidth; k++) {
496 size_t bufferIndex = i * fBatchHeight * fBatchWidth + k * fBatchHeight + j;
497 buffer[bufferIndex] =
event->GetValue(j * fBatchWidth + k);
504 Error(
"TTensorDataLoader",
"Inconsistency between batch depth and batch size");
515 size_t n = buffer.
GetSize() / fBatchSize;
519 for (
size_t i = 0; i < fBatchSize; i++) {
520 size_t sampleIndex = *sampleIterator++;
521 Event *
event = std::get<0>(fData)[sampleIndex];
522 for (
size_t j = 0; j <
n; j++) {
524 size_t bufferIndex = j * fBatchSize + i;
526 if (
event->GetNTargets() == 0) {
532 buffer[bufferIndex] = 0.0;
533 if (j ==
event->GetClass()) {
534 buffer[bufferIndex] = 1.0;
538 buffer[bufferIndex] =
static_cast<Float_t>(
event->GetTarget(j));
549 for (
size_t i = 0; i < fBatchSize; i++) {
550 size_t sampleIndex = *sampleIterator++;
551 Event *
event = std::get<0>(fData)[sampleIndex];
552 buffer[i] =
event->GetWeight();
563 DeviceBufferTuple DeviceBuffers = CopyTensorBatches();
566 Tensor_t inputTensor( std::get<0>(DeviceBuffers), { fBatchHeight, fBatchWidth, fBatchSize } );
572 Matrix_t outputMatrix(std::get<1>(DeviceBuffers), fBatchSize, fNOutputFeatures);
573 Matrix_t weightMatrix(std::get<2>(DeviceBuffers), fBatchSize, fNOutputFeatures);
588 if (fBatchDepth == 1 && fBatchHeight == fBatchSize) {
589 for (
size_t i = 0; i < fBatchHeight; i++) {
590 size_t sampleIndex = *sampleIterator;
591 Event *
event = std::get<0>(fData)[sampleIndex];
592 for (
size_t j = 0; j < fBatchWidth; j++) {
593 size_t bufferIndex = j * fBatchHeight + i;
594 buffer[bufferIndex] =
event->GetValue(j);
598 }
else if (fBatchDepth == fBatchSize) {
600 for (
size_t i = 0; i < fBatchDepth; i++) {
601 size_t sampleIndex = *sampleIterator;
602 Event *
event = std::get<0>(fData)[sampleIndex];
603 for (
size_t j = 0; j < fBatchHeight; j++) {
604 for (
size_t k = 0; k < fBatchWidth; k++) {
606 size_t bufferIndex = i * fBatchHeight * fBatchWidth + k * fBatchHeight + j;
607 buffer[bufferIndex] =
event->GetValue(j * fBatchWidth + k);
614 Error(
"TTensorDataLoader",
"Inconsistency between batch depth and batch size");
625 size_t n = buffer.
GetSize() / fBatchSize;
629 for (
size_t i = 0; i < fBatchSize; i++) {
630 size_t sampleIndex = *sampleIterator++;
631 Event *
event = std::get<0>(fData)[sampleIndex];
632 for (
size_t j = 0; j <
n; j++) {
634 size_t bufferIndex = j * fBatchSize + i;
636 if (
event->GetNTargets() == 0) {
642 buffer[bufferIndex] = 0.0;
643 if (j ==
event->GetClass()) {
644 buffer[bufferIndex] = 1.0;
648 buffer[bufferIndex] =
static_cast<Float_t>(
event->GetTarget(j));
659 for (
size_t i = 0; i < fBatchSize; i++) {
660 size_t sampleIndex = *sampleIterator++;
661 Event *
event = std::get<0>(fData)[sampleIndex];
662 buffer[i] =
event->GetWeight();
673 DeviceBufferTuple DeviceBuffers = CopyTensorBatches();
675 Tensor_t inputTensor( std::get<0>(DeviceBuffers), { fBatchHeight, fBatchWidth, fBatchSize } );
682 Matrix_t outputMatrix(std::get<1>(DeviceBuffers), fBatchSize, fNOutputFeatures);
683 Matrix_t weightMatrix(std::get<2>(DeviceBuffers), fBatchSize, fNOutputFeatures);
692template class TCpuBuffer<Double_t>;
693template class TCpuBuffer<Float_t>;
695template class TTensorDataLoader<TensorInput, TCpu<Float_t>>;
696template class TTensorDataLoader<TMVAInput_t, TCpu<Float_t>>;
697template class TTensorDataLoader<TensorInput, TCpu<Double_t>>;
698template class TTensorDataLoader<TMVAInput_t, TCpu<Double_t>>;
size_t size(const MatrixT &matrix)
retrieve the size of a square matrix
void Error(const char *location, const char *msgfmt,...)
Use this function in case an error occurred.
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void char Point_t Rectangle_t WindowAttributes_t Float_t Float_t Float_t Int_t Int_t UInt_t UInt_t Rectangle_t Int_t Int_t Window_t TString Int_t GCValues_t GetPrimarySelectionOwner GetDisplay GetScreen GetColormap GetNativeEvent const char const char dpyName wid window const char font_name cursor keysym reg const char only_if_exist regb h Point_t winding char text const char depth char const char Int_t count const char ColorStruct_t color const char Pixmap_t Pixmap_t PictureAttributes_t attr const char char ret_data h unsigned char height h offset
TCpuBuffer GetSubBuffer(size_t offset, size_t start) const
Return sub-buffer of size start starting at element offset.
size_t GetSize() const
copy pointer from an external
void CopyTo(TCpuBuffer &) const
Copy data to another buffer.
void CopyFrom(const TCpuBuffer &)
Copy data from another buffer.
std::shared_ptr< AFloat * > fBuffer
struct TMVA::DNN::TCpuBuffer::TDestructor fDestructor
Class that contains all the data information.
Bool_t IsSignal(const Event *ev) const
UInt_t GetNVariables() const
accessor to the number of variables
typename std::vector< size_t >::iterator IndexIterator_t
create variable transformations
void operator()(AFloat **pointer)