Logo ROOT  
Reference Guide
 
Loading...
Searching...
No Matches
RPageStorageFile.cxx
Go to the documentation of this file.
1/// \file RPageStorageFile.cxx
2/// \ingroup NTuple
3/// \author Jakob Blomer <jblomer@cern.ch>
4/// \date 2019-11-25
5
6/*************************************************************************
7 * Copyright (C) 1995-2019, Rene Brun and Fons Rademakers. *
8 * All rights reserved. *
9 * *
10 * For the licensing terms see $ROOTSYS/LICENSE. *
11 * For the list of contributors see $ROOTSYS/README/CREDITS. *
12 *************************************************************************/
13
14#include <ROOT/RCluster.hxx>
15#include <ROOT/RClusterPool.hxx>
16#include <ROOT/RLogger.hxx>
18#include <ROOT/RNTupleModel.hxx>
20#include <ROOT/RNTupleZip.hxx>
21#include <ROOT/RPage.hxx>
23#include <ROOT/RPagePool.hxx>
25#include <ROOT/RRawFile.hxx>
27#include <ROOT/RNTupleTypes.hxx>
28#include <ROOT/RNTupleUtils.hxx>
29
30#include <RVersion.h>
31#include <TDirectory.h>
32#include <TError.h>
33
34#include <algorithm>
35#include <cstdio>
36#include <cstdlib>
37#include <cstring>
38#include <iterator>
39#include <limits>
40#include <utility>
41
42#include <functional>
43#include <mutex>
44
56
63
70
77
79
81{
83 auto szZipHeader =
84 RNTupleCompressor::Zip(serializedHeader, length, GetWriteOptions().GetCompression(), zipBuffer.get());
85 fWriter->WriteNTupleHeader(zipBuffer.get(), szZipHeader, length);
86}
87
90{
91 std::uint64_t offsetData;
92 {
93 RNTupleAtomicTimer timer(fCounters->fTimeWallWrite, fCounters->fTimeCpuWrite);
94 offsetData = fWriter->WriteBlob(sealedPage.GetBuffer(), sealedPage.GetBufferSize(), bytesPacked);
95 }
96
98 result.SetPosition(offsetData);
99 result.SetNBytesOnStorage(sealedPage.GetDataSize());
100 fCounters->fNPageCommitted.Inc();
101 fCounters->fSzWritePayload.Add(sealedPage.GetBufferSize());
102 fNBytesCurrentCluster += sealedPage.GetBufferSize();
103 return result;
104}
105
108{
109 auto element = columnHandle.fColumn->GetElement();
111 {
112 RNTupleAtomicTimer timer(fCounters->fTimeWallZip, fCounters->fTimeCpuZip);
113 sealedPage = SealPage(page, *element);
114 }
115
116 fCounters->fSzZip.Add(page.GetNBytes());
117 return WriteSealedPage(sealedPage, element->GetPackedSize(page.GetNElements()));
118}
119
122{
123 const auto nBits = fDescriptorBuilder.GetDescriptor().GetColumnDescriptor(physicalColumnId).GetBitsOnStorage();
124 const auto bytesPacked = (nBits * sealedPage.GetNElements() + 7) / 8;
125 return WriteSealedPage(sealedPage, bytesPacked);
126}
127
129{
130 RNTupleAtomicTimer timer(fCounters->fTimeWallWrite, fCounters->fTimeCpuWrite);
131
132 std::uint64_t offset = fWriter->ReserveBlob(batch.fSize, batch.fBytesPacked);
133
134 locators.reserve(locators.size() + batch.fSealedPages.size());
135
136 for (const auto *pagePtr : batch.fSealedPages) {
137 fWriter->WriteIntoReservedBlob(pagePtr->GetBuffer(), pagePtr->GetBufferSize(), offset);
139 locator.SetPosition(offset);
140 locator.SetNBytesOnStorage(pagePtr->GetDataSize());
141 locators.push_back(locator);
142 offset += pagePtr->GetBufferSize();
143 }
144
145 fCounters->fNPageCommitted.Add(batch.fSealedPages.size());
146 fCounters->fSzWritePayload.Add(batch.fSize);
147 fNBytesCurrentCluster += batch.fSize;
148
149 batch.fSize = 0;
150 batch.fBytesPacked = 0;
151 batch.fSealedPages.clear();
152}
153
154std::vector<ROOT::RNTupleLocator>
155ROOT::Internal::RPageSinkFile::CommitSealedPageVImpl(std::span<RPageStorage::RSealedPageGroup> ranges,
156 const std::vector<bool> &mask)
157{
158 const std::uint64_t maxKeySize = fOptions->GetMaxKeySize();
159
161 std::vector<RNTupleLocator> locators;
162
163 std::size_t iPage = 0;
164 for (auto rangeIt = ranges.begin(); rangeIt != ranges.end(); ++rangeIt) {
165 auto &range = *rangeIt;
166 if (range.fFirst == range.fLast) {
167 // Skip empty ranges, they might not have a physical column ID!
168 continue;
169 }
170
171 const auto bitsOnStorage =
172 fDescriptorBuilder.GetDescriptor().GetColumnDescriptor(range.fPhysicalColumnId).GetBitsOnStorage();
173
174 for (auto sealedPageIt = range.fFirst; sealedPageIt != range.fLast; ++sealedPageIt, ++iPage) {
175 if (!mask[iPage])
176 continue;
177
178 const auto bytesPacked = (bitsOnStorage * sealedPageIt->GetNElements() + 7) / 8;
179
180 if (batch.fSize > 0 && batch.fSize + sealedPageIt->GetBufferSize() > maxKeySize) {
181 /**
182 * Adding this page would exceed maxKeySize. Since we always want to write into a single key
183 * with vectorized writes, we commit the current set of pages before proceeding.
184 * NOTE: we do this *before* checking if sealedPageIt->GetBufferSize() > maxKeySize to guarantee that
185 * we always flush the current batch before doing an individual WriteBlob. This way we
186 * preserve the assumption that a CommitBatch always contain a sequential set of pages.
187 */
188 CommitBatchOfPages(batch, locators);
189 }
190
191 if (sealedPageIt->GetBufferSize() > maxKeySize) {
192 // This page alone is bigger than maxKeySize: save it by itself, since it will need to be
193 // split into multiple keys.
194
195 // Since this check implies the previous check on batchSize + newSize > maxSize, we should
196 // already have committed the current batch before writing this page.
197 assert(batch.fSize == 0);
198
199 std::uint64_t offset =
200 fWriter->WriteBlob(sealedPageIt->GetBuffer(), sealedPageIt->GetBufferSize(), bytesPacked);
202 locator.SetPosition(offset);
203 locator.SetNBytesOnStorage(sealedPageIt->GetDataSize());
204 locators.push_back(locator);
205
206 fCounters->fNPageCommitted.Inc();
207 fCounters->fSzWritePayload.Add(sealedPageIt->GetBufferSize());
208 fNBytesCurrentCluster += sealedPageIt->GetBufferSize();
209
210 } else {
211 batch.fSealedPages.emplace_back(&(*sealedPageIt));
212 batch.fSize += sealedPageIt->GetBufferSize();
213 batch.fBytesPacked += bytesPacked;
214 }
215 }
216 }
217
218 if (batch.fSize > 0) {
219 CommitBatchOfPages(batch, locators);
220 }
221
222 return locators;
223}
224
226{
227 auto result = fNBytesCurrentCluster;
228 fNBytesCurrentCluster = 0;
229 return result;
230}
231
234{
236 auto szPageListZip =
237 RNTupleCompressor::Zip(serializedPageList, length, GetWriteOptions().GetCompression(), bufPageListZip.get());
238
240 result.SetNBytesOnStorage(szPageListZip);
241 result.SetPosition(fWriter->WriteBlob(bufPageListZip.get(), szPageListZip, length));
242 return result;
243}
244
246{
247 fWriter->UpdateStreamerInfos(fDescriptorBuilder.BuildStreamerInfos());
249 auto szFooterZip =
250 RNTupleCompressor::Zip(serializedFooter, length, GetWriteOptions().GetCompression(), bufFooterZip.get());
251 fWriter->WriteNTupleFooter(bufFooterZip.get(), szFooterZip, length);
252 fWriter->Commit(GetWriteOptions().GetCompression());
253}
254
255////////////////////////////////////////////////////////////////////////////////
256
259 fClusterPool(
260 std::make_unique<RClusterPool>(*this, ROOT::Internal::RNTupleReadOptionsManip::GetClusterBunchSize(opts)))
261{
262 EnableDefaultMetrics("RPageSourceFile");
263}
264
266 std::unique_ptr<ROOT::Internal::RRawFile> file,
267 const ROOT::RNTupleReadOptions &options)
268 : RPageSourceFile(ntupleName, options)
269{
270 fFile = std::move(file);
273}
274
275ROOT::Internal::RPageSourceFile::RPageSourceFile(std::string_view ntupleName, std::string_view path,
276 const ROOT::RNTupleReadOptions &options)
277 : RPageSourceFile(ntupleName, ROOT::Internal::RRawFile::Create(path), options)
278{
279}
280
281std::unique_ptr<ROOT::Internal::RPageSourceFile>
283{
284 if (!anchor.fFile)
285 throw RException(R__FAIL("This RNTuple object was not streamed from a ROOT file (TFile or descendant)"));
286
287 std::unique_ptr<ROOT::Internal::RRawFile> rawFile;
288 // For local TFiles, TDavixFile, and TNetXNGFile, we want to open a new RRawFile to take advantage of the faster
289 // reading. We check the exact class name to avoid classes inheriting in ROOT (for example TMemFile) or in
290 // experiment frameworks.
291 std::string className = anchor.fFile->IsA()->GetName();
292 auto url = anchor.fFile->GetEndpointUrl();
293 auto protocol = std::string(url->GetProtocol());
294 if (className == "TFile") {
296 } else if (className == "TDavixFile" || className == "TNetXNGFile") {
298 } else {
300 }
301
302 auto pageSource = std::make_unique<RPageSourceFile>("", std::move(rawFile), options);
303 pageSource->fAnchor = anchor;
304 pageSource->fNTupleName = pageSource->fDescriptorBuilder.GetDescriptor().GetName();
305 return pageSource;
306}
307
309
311{
312 // If we constructed the page source with (ntuple name, path), we need to find the anchor first.
313 // Otherwise, the page source was created by OpenFromAnchor()
314 if (!fAnchor) {
315 fAnchor = fReader.GetNTuple(fNTupleName).Unwrap();
316 }
317 fReader.SetMaxKeySize(fAnchor->GetMaxKeySize());
318
319 fDescriptorBuilder.SetVersion(fAnchor->GetVersionEpoch(), fAnchor->GetVersionMajor(), fAnchor->GetVersionMinor(),
320 fAnchor->GetVersionPatch());
321 fDescriptorBuilder.SetOnDiskHeaderSize(fAnchor->GetNBytesHeader());
322 fDescriptorBuilder.AddToOnDiskFooterSize(fAnchor->GetNBytesFooter());
323
324 // Reserve enough space for the compressed and the uncompressed header/footer (see AttachImpl)
325 const auto bufSize = fAnchor->GetNBytesHeader() + fAnchor->GetNBytesFooter() +
326 std::max(fAnchor->GetLenHeader(), fAnchor->GetLenFooter());
327 fStructureBuffer.fBuffer = MakeUninitArray<unsigned char>(bufSize);
328 fStructureBuffer.fPtrHeader = fStructureBuffer.fBuffer.get();
329 fStructureBuffer.fPtrFooter = fStructureBuffer.fBuffer.get() + fAnchor->GetNBytesHeader();
330
331 auto readvLimits = fFile->GetReadVLimits();
332 // Never try to vectorize reads to a split key
333 readvLimits.fMaxSingleSize = std::min<size_t>(readvLimits.fMaxSingleSize, fAnchor->GetMaxKeySize());
334
335 if ((readvLimits.fMaxReqs < 2) ||
336 (std::max(fAnchor->GetNBytesHeader(), fAnchor->GetNBytesFooter()) > readvLimits.fMaxSingleSize) ||
337 (fAnchor->GetNBytesHeader() + fAnchor->GetNBytesFooter() > readvLimits.fMaxTotalSize)) {
338 RNTupleAtomicTimer timer(fCounters->fTimeWallRead, fCounters->fTimeCpuRead);
339 fReader.ReadBuffer(fStructureBuffer.fPtrHeader, fAnchor->GetNBytesHeader(), fAnchor->GetSeekHeader());
340 fReader.ReadBuffer(fStructureBuffer.fPtrFooter, fAnchor->GetNBytesFooter(), fAnchor->GetSeekFooter());
341 fCounters->fNRead.Add(2);
342 } else {
343 RNTupleAtomicTimer timer(fCounters->fTimeWallRead, fCounters->fTimeCpuRead);
344 R__ASSERT(fAnchor->GetNBytesHeader() < std::numeric_limits<std::size_t>::max());
345 R__ASSERT(fAnchor->GetNBytesFooter() < std::numeric_limits<std::size_t>::max());
346 ROOT::Internal::RRawFile::RIOVec readRequests[2] = {{fStructureBuffer.fPtrHeader, fAnchor->GetSeekHeader(),
347 static_cast<std::size_t>(fAnchor->GetNBytesHeader()), 0},
348 {fStructureBuffer.fPtrFooter, fAnchor->GetSeekFooter(),
349 static_cast<std::size_t>(fAnchor->GetNBytesFooter()), 0}};
350 fFile->ReadV(readRequests, 2);
351 fCounters->fNReadV.Inc();
352 }
353}
354
356{
357 auto unzipBuf = reinterpret_cast<unsigned char *>(fStructureBuffer.fPtrFooter) + fAnchor->GetNBytesFooter();
358
359 RNTupleDecompressor::Unzip(fStructureBuffer.fPtrHeader, fAnchor->GetNBytesHeader(), fAnchor->GetLenHeader(),
360 unzipBuf);
361 RNTupleSerializer::DeserializeHeader(unzipBuf, fAnchor->GetLenHeader(), fDescriptorBuilder);
362
363 RNTupleDecompressor::Unzip(fStructureBuffer.fPtrFooter, fAnchor->GetNBytesFooter(), fAnchor->GetLenFooter(),
364 unzipBuf);
365 RNTupleSerializer::DeserializeFooter(unzipBuf, fAnchor->GetLenFooter(), fDescriptorBuilder);
366
367 auto desc = fDescriptorBuilder.MoveDescriptor();
368
369 std::vector<unsigned char> buffer;
370 for (const auto &cgDesc : desc.GetClusterGroupIterable()) {
371 buffer.resize(std::max<size_t>(buffer.size(),
372 cgDesc.GetPageListLength() + cgDesc.GetPageListLocator().GetNBytesOnStorage()));
373 auto *zipBuffer = buffer.data() + cgDesc.GetPageListLength();
374 fReader.ReadBuffer(zipBuffer, cgDesc.GetPageListLocator().GetNBytesOnStorage(),
375 cgDesc.GetPageListLocator().GetPosition<std::uint64_t>());
376 RNTupleDecompressor::Unzip(zipBuffer, cgDesc.GetPageListLocator().GetNBytesOnStorage(),
377 cgDesc.GetPageListLength(), buffer.data());
378
379 RNTupleSerializer::DeserializePageList(buffer.data(), cgDesc.GetPageListLength(), cgDesc.GetId(), desc, mode);
380 }
381
382 // For the page reads, we rely on the I/O scheduler to define the read requests
383 fFile->SetBuffering(false);
384
385 return desc;
386}
387
390{
391 const auto clusterId = localIndex.GetClusterId();
392
394 {
395 auto descriptorGuard = GetSharedDescriptorGuard();
396 const auto &clusterDescriptor = descriptorGuard->GetClusterDescriptor(clusterId);
397 pageInfo = clusterDescriptor.GetPageRange(physicalColumnId).Find(localIndex.GetIndexInCluster());
398 }
399
400 sealedPage.SetBufferSize(pageInfo.GetLocator().GetNBytesOnStorage() + pageInfo.HasChecksum() * kNBytesPageChecksum);
401 sealedPage.SetNElements(pageInfo.GetNElements());
402 sealedPage.SetHasChecksum(pageInfo.HasChecksum());
403 if (!sealedPage.GetBuffer())
404 return;
405 if (pageInfo.GetLocator().GetType() != RNTupleLocator::kTypePageZero) {
406 fReader.ReadBuffer(const_cast<void *>(sealedPage.GetBuffer()), sealedPage.GetBufferSize(),
407 pageInfo.GetLocator().GetPosition<std::uint64_t>());
408 } else {
409 assert(!pageInfo.HasChecksum());
410 memcpy(const_cast<void *>(sealedPage.GetBuffer()), ROOT::Internal::RPage::GetPageZeroBuffer(),
411 sealedPage.GetBufferSize());
412 }
413
414 sealedPage.VerifyChecksumIfEnabled().ThrowOnError();
415}
416
420{
421 const auto columnId = columnHandle.fPhysicalId;
422 const auto clusterId = clusterInfo.fClusterId;
423 const auto pageInfo = clusterInfo.fPageInfo;
424
425 const auto element = columnHandle.fColumn->GetElement();
426 const auto elementSize = element->GetSize();
427 const auto elementInMemoryType = element->GetIdentifier().fInMemoryType;
428
429 if (pageInfo.GetLocator().GetType() == RNTupleLocator::kTypePageZero) {
430 auto pageZero = fPageAllocator->NewPage(elementSize, pageInfo.GetNElements());
431 pageZero.GrowUnchecked(pageInfo.GetNElements());
432 memset(pageZero.GetBuffer(), 0, pageZero.GetNBytes());
433 pageZero.SetWindow(clusterInfo.fColumnOffset + pageInfo.GetFirstElementIndex(),
435 return fPagePool.RegisterPage(std::move(pageZero), RPagePool::RKey{columnId, elementInMemoryType});
436 }
437
439 sealedPage.SetNElements(pageInfo.GetNElements());
440 sealedPage.SetHasChecksum(pageInfo.HasChecksum());
441 sealedPage.SetBufferSize(pageInfo.GetLocator().GetNBytesOnStorage() + pageInfo.HasChecksum() * kNBytesPageChecksum);
442 std::unique_ptr<unsigned char[]> directReadBuffer; // only used if cluster pool is turned off
443
444 if (fOptions.GetClusterCache() == ROOT::RNTupleReadOptions::EClusterCache::kOff) {
446 {
447 RNTupleAtomicTimer timer(fCounters->fTimeWallRead, fCounters->fTimeCpuRead);
448 fReader.ReadBuffer(directReadBuffer.get(), sealedPage.GetBufferSize(),
449 pageInfo.GetLocator().GetPosition<std::uint64_t>());
450 }
451 fCounters->fNPageRead.Inc();
452 fCounters->fNRead.Inc();
453 fCounters->fSzReadPayload.Add(sealedPage.GetBufferSize());
454 sealedPage.SetBuffer(directReadBuffer.get());
455 } else {
456 if (!fCurrentCluster || (fCurrentCluster->GetId() != clusterId) || !fCurrentCluster->ContainsColumn(columnId))
457 fCurrentCluster = fClusterPool->GetCluster(clusterId, fActivePhysicalColumns.ToColumnSet());
458 R__ASSERT(fCurrentCluster->ContainsColumn(columnId));
459
460 auto cachedPageRef =
462 if (!cachedPageRef.Get().IsNull())
463 return cachedPageRef;
464
465 ROnDiskPage::Key key(columnId, pageInfo.GetPageNumber());
466 auto onDiskPage = fCurrentCluster->GetOnDiskPage(key);
467 R__ASSERT(onDiskPage && (sealedPage.GetBufferSize() == onDiskPage->GetSize()));
468 sealedPage.SetBuffer(onDiskPage->GetAddress());
469 }
470
472 {
473 RNTupleAtomicTimer timer(fCounters->fTimeWallUnzip, fCounters->fTimeCpuUnzip);
474 newPage = UnsealPage(sealedPage, *element).Unwrap();
475 fCounters->fSzUnzip.Add(elementSize * pageInfo.GetNElements());
476 }
477
478 newPage.SetWindow(clusterInfo.fColumnOffset + pageInfo.GetFirstElementIndex(),
480 fCounters->fNPageUnsealed.Inc();
481 return fPagePool.RegisterPage(std::move(newPage), RPagePool::RKey{columnId, elementInMemoryType});
482}
483
484std::unique_ptr<ROOT::Internal::RPageSource> ROOT::Internal::RPageSourceFile::CloneImpl() const
485{
486 auto clone = new RPageSourceFile(fNTupleName, fOptions);
487 clone->fFile = fFile->Clone();
488 clone->fReader = ROOT::Internal::RMiniFileReader(clone->fFile.get());
489 return std::unique_ptr<RPageSourceFile>(clone);
490}
491
492std::unique_ptr<ROOT::Internal::RCluster>
494 std::vector<ROOT::Internal::RRawFile::RIOVec> &readRequests)
495{
496 struct ROnDiskPageLocator {
497 ROOT::DescriptorId_t fColumnId = 0;
498 ROOT::NTupleSize_t fPageNo = 0;
499 std::uint64_t fOffset = 0;
500 std::uint64_t fSize = 0;
501 std::size_t fBufPos = 0;
502 };
503
504 std::vector<ROnDiskPageLocator> onDiskPages;
505 auto activeSize = 0;
506 auto pageZeroMap = std::make_unique<ROnDiskPageMap>();
507 PrepareLoadCluster(clusterKey, *pageZeroMap,
510 const auto &pageLocator = pageInfo.GetLocator();
512 throw RException(R__FAIL("tried to read a page with an unknown locator"));
513 const auto nBytes =
514 pageLocator.GetNBytesOnStorage() + pageInfo.HasChecksum() * kNBytesPageChecksum;
516 onDiskPages.push_back(
517 {physicalColumnId, pageNo, pageLocator.GetPosition<std::uint64_t>(), nBytes, 0});
518 });
519
520 // Linearize the page requests by file offset
521 std::sort(onDiskPages.begin(), onDiskPages.end(),
522 [](const ROnDiskPageLocator &a, const ROnDiskPageLocator &b) { return a.fOffset < b.fOffset; });
523
524 // In order to coalesce close-by pages, we collect the sizes of the gaps between pages on disk. We then order
525 // the gaps by size, sum them up and find a cutoff for the largest gap that we tolerate when coalescing pages.
526 // The size of the cutoff is given by the fraction of extra bytes we are willing to read in order to reduce
527 // the number of read requests. We thus schedule the lowest number of requests given a tolerable fraction
528 // of extra bytes.
529 // TODO(jblomer): Eventually we may want to select the parameter at runtime according to link latency and speed,
530 // memory consumption, device block size.
531 float maxOverhead = 0.25 * float(activeSize);
532 std::vector<std::size_t> gaps;
533 if (onDiskPages.size())
534 gaps.reserve(onDiskPages.size() - 1);
535 for (unsigned i = 1; i < onDiskPages.size(); ++i) {
536 std::int64_t gap =
537 static_cast<int64_t>(onDiskPages[i].fOffset) - (onDiskPages[i - 1].fSize + onDiskPages[i - 1].fOffset);
538 gaps.emplace_back(std::max(gap, std::int64_t(0)));
539 // If the pages overlap, substract the overlapped bytes from `activeSize`
540 activeSize += std::min(gap, std::int64_t(0));
541 }
542 std::sort(gaps.begin(), gaps.end());
543 std::size_t gapCut = 0;
544 std::size_t currentGap = 0;
545 float szExtra = 0.0;
546 for (auto g : gaps) {
547 if (g != currentGap) {
549 currentGap = g;
550 }
551 szExtra += g;
552 if (szExtra > maxOverhead)
553 break;
554 }
555
556 // In a first step, we coalesce the read requests and calculate the cluster buffer size.
557 // In a second step, we'll fix-up the memory destinations for the read calls given the
558 // address of the allocated buffer. We must not touch, however, the read requests from previous
559 // calls to PrepareSingleCluster()
560 const auto currentReadRequestIdx = readRequests.size();
561
563 // To simplify the first loop iteration, pretend an empty request starting at the first page's fOffset.
564 if (!onDiskPages.empty())
565 req.fOffset = onDiskPages[0].fOffset;
566 std::size_t szPayload = 0;
567 std::size_t szOverhead = 0;
568 const std::uint64_t maxKeySize = fReader.GetMaxKeySize();
569 for (auto &s : onDiskPages) {
570 R__ASSERT(s.fSize > 0);
571 const std::int64_t readUpTo = req.fOffset + req.fSize;
572 // Note: byte ranges of pages may overlap
573 const std::uint64_t overhead = std::max(static_cast<std::int64_t>(s.fOffset) - readUpTo, std::int64_t(0));
574 const std::uint64_t extent = std::max(static_cast<std::int64_t>(s.fOffset + s.fSize) - readUpTo, std::int64_t(0));
575 if (req.fSize + extent < maxKeySize && overhead <= gapCut) {
578 s.fBufPos = reinterpret_cast<intptr_t>(req.fBuffer) + s.fOffset - req.fOffset;
579 req.fSize += extent;
580 continue;
581 }
582
583 // close the current request and open new one
584 if (req.fSize > 0)
585 readRequests.emplace_back(req);
586
587 req.fBuffer = reinterpret_cast<unsigned char *>(req.fBuffer) + req.fSize;
588 s.fBufPos = reinterpret_cast<intptr_t>(req.fBuffer);
589
590 szPayload += s.fSize;
591 req.fOffset = s.fOffset;
592 req.fSize = s.fSize;
593 }
594 readRequests.emplace_back(req);
595 fCounters->fSzReadPayload.Add(szPayload);
596 fCounters->fSzReadOverhead.Add(szOverhead);
597
598 // Register the on disk pages in a page map
599 auto buffer = new unsigned char[reinterpret_cast<intptr_t>(req.fBuffer) + req.fSize];
600 auto pageMap = std::make_unique<ROOT::Internal::ROnDiskPageMapHeap>(std::unique_ptr<unsigned char[]>(buffer));
601 for (const auto &s : onDiskPages) {
602 ROnDiskPage::Key key(s.fColumnId, s.fPageNo);
603 pageMap->Register(key, ROnDiskPage(buffer + s.fBufPos, s.fSize));
604 }
605 fCounters->fNPageRead.Add(onDiskPages.size());
606 for (auto i = currentReadRequestIdx; i < readRequests.size(); ++i) {
607 readRequests[i].fBuffer = buffer + reinterpret_cast<intptr_t>(readRequests[i].fBuffer);
608 }
609
610 auto cluster = std::make_unique<RCluster>(clusterKey.fClusterId);
611 cluster->Adopt(std::move(pageMap));
612 cluster->Adopt(std::move(pageZeroMap));
613 for (auto colId : clusterKey.fPhysicalColumnSet)
614 cluster->SetColumnAvailable(colId);
615 return cluster;
616}
617
618std::vector<std::unique_ptr<ROOT::Internal::RCluster>>
620{
621 fCounters->fNClusterLoaded.Add(clusterKeys.size());
622
623 std::vector<std::unique_ptr<ROOT::Internal::RCluster>> clusters;
624 std::vector<ROOT::Internal::RRawFile::RIOVec> readRequests;
625
626 clusters.reserve(clusterKeys.size());
627 for (auto key : clusterKeys) {
628 clusters.emplace_back(PrepareSingleCluster(key, readRequests));
629 }
630
631 auto nReqs = readRequests.size();
632 auto readvLimits = fFile->GetReadVLimits();
633 // We never want to do vectorized reads of split blobs, so we limit our single size to maxKeySize.
634 readvLimits.fMaxSingleSize = std::min<size_t>(readvLimits.fMaxSingleSize, fReader.GetMaxKeySize());
635
636 int iReq = 0;
637 while (nReqs > 0) {
638 auto nBatch = std::min(nReqs, readvLimits.fMaxReqs);
639
640 if (readvLimits.HasSizeLimit()) {
641 std::uint64_t totalSize = 0;
642 for (std::size_t i = 0; i < nBatch; ++i) {
643 if (readRequests[iReq + i].fSize > readvLimits.fMaxSingleSize) {
644 nBatch = i;
645 break;
646 }
647
648 totalSize += readRequests[iReq + i].fSize;
649 if (totalSize > readvLimits.fMaxTotalSize) {
650 nBatch = i;
651 break;
652 }
653 }
654 }
655
656 if (nBatch <= 1) {
657 nBatch = 1;
658 RNTupleAtomicTimer timer(fCounters->fTimeWallRead, fCounters->fTimeCpuRead);
659 fReader.ReadBuffer(readRequests[iReq].fBuffer, readRequests[iReq].fSize, readRequests[iReq].fOffset);
660 } else {
661 RNTupleAtomicTimer timer(fCounters->fTimeWallRead, fCounters->fTimeCpuRead);
662 fFile->ReadV(&readRequests[iReq], nBatch);
663 }
664 fCounters->fNReadV.Inc();
665 fCounters->fNRead.Add(nBatch);
666
667 iReq += nBatch;
668 nReqs -= nBatch;
669 }
670
671 return clusters;
672}
fBuffer
dim_t fSize
#define R__FAIL(msg)
Short-hand to return an RResult<T> in an error state; the RError is implicitly converted into RResult...
Definition RError.hxx:300
#define b(i)
Definition RSha256.hxx:100
#define g(i)
Definition RSha256.hxx:105
#define a(i)
Definition RSha256.hxx:99
ROOT::Detail::TRangeCast< T, true > TRangeDynCast
TRangeDynCast is an adapter class that allows the typed iteration through a TCollection.
#define R__ASSERT(e)
Checks condition e and reports a fatal error if it's false.
Definition TError.h:125
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void char Point_t Rectangle_t WindowAttributes_t Float_t Float_t Float_t Int_t Int_t UInt_t UInt_t Rectangle_t mask
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void char Point_t Rectangle_t WindowAttributes_t Float_t Float_t Float_t Int_t Int_t UInt_t UInt_t Rectangle_t Int_t Int_t Window_t TString Int_t GCValues_t GetPrimarySelectionOwner GetDisplay GetScreen GetColormap GetNativeEvent const char const char dpyName wid window const char font_name cursor keysym reg const char only_if_exist regb h Point_t winding char text const char depth char const char Int_t count const char ColorStruct_t color const char Pixmap_t Pixmap_t PictureAttributes_t attr const char char ret_data h unsigned char height h offset
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void char Point_t Rectangle_t WindowAttributes_t Float_t Float_t Float_t Int_t Int_t UInt_t UInt_t Rectangle_t result
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void char Point_t Rectangle_t WindowAttributes_t Float_t Float_t Float_t Int_t Int_t UInt_t UInt_t Rectangle_t Int_t Int_t Window_t TString Int_t GCValues_t GetPrimarySelectionOwner GetDisplay GetScreen GetColormap GetNativeEvent const char const char dpyName wid window const char font_name cursor keysym reg const char only_if_exist regb h Point_t winding char text const char depth char const char Int_t count const char ColorStruct_t color const char Pixmap_t Pixmap_t PictureAttributes_t attr const char char ret_data h unsigned char height h length
Option_t Option_t TPoint TPoint const char mode
Managed a set of clusters containing compressed and packed pages.
An in-memory subset of the packed and compressed pages of a cluster.
Definition RCluster.hxx:148
Read RNTuple data blocks from a TFile container, provided by a RRawFile.
Definition RMiniFile.hxx:55
Helper class to compress data blocks in the ROOT compression frame format.
static std::size_t Zip(const void *from, std::size_t nbytes, int compression, void *to)
Returns the size of the compressed data, written into the provided output buffer.
Helper class to uncompress data blocks in the ROOT compression frame format.
static void Unzip(const void *from, size_t nbytes, size_t dataLen, void *to)
The nbytes parameter provides the size ls of the from buffer.
Write RNTuple data blocks in a TFile or a bare file container.
static std::unique_ptr< RNTupleFileWriter > Recreate(std::string_view ntupleName, std::string_view path, EContainerFormat containerFormat, const ROOT::RNTupleWriteOptions &options)
Create or truncate the local file given by path with the new empty RNTuple identified by ntupleName.
static std::unique_ptr< RNTupleFileWriter > Append(std::string_view ntupleName, TDirectory &fileOrDirectory, std::uint64_t maxKeySize)
The directory parameter can also be a TFile object (TFile inherits from TDirectory).
A helper class for serializing and deserialization of the RNTuple binary format.
static RResult< void > DeserializePageList(const void *buffer, std::uint64_t bufSize, ROOT::DescriptorId_t clusterGroupId, RNTupleDescriptor &desc, EDescriptorDeserializeMode mode)
static RResult< void > DeserializeFooter(const void *buffer, std::uint64_t bufSize, ROOT::Internal::RNTupleDescriptorBuilder &descBuilder)
static RResult< void > DeserializeHeader(const void *buffer, std::uint64_t bufSize, ROOT::Internal::RNTupleDescriptorBuilder &descBuilder)
A memory region that contains packed and compressed pages.
Definition RCluster.hxx:99
A page as being stored on disk, that is packed and compressed.
Definition RCluster.hxx:41
Base class for a sink with a physical storage backend.
void EnableDefaultMetrics(const std::string &prefix)
Enables the default set of metrics provided by RPageSink.
A thread-safe cache of pages loaded from the page source.
Definition RPagePool.hxx:44
Reference to a page stored in the page pool.
Storage provider that write ntuple pages into a file.
void CommitBatchOfPages(CommitBatch &batch, std::vector< RNTupleLocator > &locators)
Subroutine of CommitSealedPageVImpl, used to perform a vector write of the (multi-)range of pages con...
RPageSinkFile(std::string_view ntupleName, const ROOT::RNTupleWriteOptions &options)
std::uint64_t StageClusterImpl() final
Returns the number of bytes written to storage (excluding metadata)
void InitImpl(unsigned char *serializedHeader, std::uint32_t length) final
RNTupleLocator CommitPageImpl(ColumnHandle_t columnHandle, const RPage &page) override
RNTupleLocator WriteSealedPage(const RPageStorage::RSealedPage &sealedPage, std::size_t bytesPacked)
We pass bytesPacked so that TFile::ls() reports a reasonable value for the compression ratio of the c...
RNTupleLocator CommitClusterGroupImpl(unsigned char *serializedPageList, std::uint32_t length) final
Returns the locator of the page list envelope of the given buffer that contains the serialized page l...
RNTupleLocator CommitSealedPageImpl(ROOT::DescriptorId_t physicalColumnId, const RPageStorage::RSealedPage &sealedPage) final
std::unique_ptr< ROOT::Internal::RNTupleFileWriter > fWriter
std::vector< RNTupleLocator > CommitSealedPageVImpl(std::span< RPageStorage::RSealedPageGroup > ranges, const std::vector< bool > &mask) final
Vector commit of preprocessed pages.
Storage provider that reads ntuple pages from a file.
std::unique_ptr< ROOT::Internal::RCluster > PrepareSingleCluster(const ROOT::Internal::RCluster::RKey &clusterKey, std::vector< RRawFile::RIOVec > &readRequests)
Helper function for LoadClusters: it prepares the memory buffer (page map) and the read requests for ...
RPageRef LoadPageImpl(ColumnHandle_t columnHandle, const RClusterInfo &clusterInfo, ROOT::NTupleSize_t idxInCluster) final
static std::unique_ptr< RPageSourceFile > CreateFromAnchor(const RNTuple &anchor, const ROOT::RNTupleReadOptions &options=ROOT::RNTupleReadOptions())
Used from the RNTuple class to build a datasource if the anchor is already available.
ROOT::RNTupleDescriptor AttachImpl(RNTupleSerializer::EDescriptorDeserializeMode mode) final
LoadStructureImpl() has been called before AttachImpl() is called
std::vector< std::unique_ptr< ROOT::Internal::RCluster > > LoadClusters(std::span< ROOT::Internal::RCluster::RKey > clusterKeys) final
Populates all the pages of the given cluster ids and columns; it is possible that some columns do not...
RPageSourceFile(std::string_view ntupleName, const ROOT::RNTupleReadOptions &options)
std::unique_ptr< RPageSource > CloneImpl() const final
The cloned page source creates a new raw file and reader and opens its own file descriptor to the dat...
std::unique_ptr< RRawFile > fFile
An RRawFile is used to request the necessary byte ranges from a local or a remote file.
ROOT::Internal::RMiniFileReader fReader
Takes the fFile to read ntuple blobs from it.
void LoadSealedPage(ROOT::DescriptorId_t physicalColumnId, RNTupleLocalIndex localIndex, RSealedPage &sealedPage) final
Read the packed and compressed bytes of a page into the memory buffer provided by sealedPage.
Abstract interface to read data from an ntuple.
void EnableDefaultMetrics(const std::string &prefix)
Enables the default set of metrics provided by RPageSource.
Stores information about the cluster in which this page resides.
Definition RPage.hxx:53
A page is a slice of a column that is mapped into memory.
Definition RPage.hxx:44
static const void * GetPageZeroBuffer()
Return a pointer to the page zero buffer used if there is no on-disk data for a particular deferred c...
Definition RPage.cxx:23
The RRawFileTFile wraps an open TFile, but does not take ownership.
The RRawFile provides read-only access to local and remote files.
Definition RRawFile.hxx:43
static std::unique_ptr< RRawFile > Create(std::string_view url, ROptions options=ROptions())
Factory method that returns a suitable concrete implementation according to the transport in the url.
Definition RRawFile.cxx:64
Base class for all ROOT issued exceptions.
Definition RError.hxx:79
The on-storage metadata of an RNTuple.
Addresses a column element or field item relative to a particular cluster, instead of a global NTuple...
Generic information about the physical location of data.
Common user-tunable settings for reading RNTuples.
Common user-tunable settings for storing RNTuples.
std::uint64_t GetMaxKeySize() const
Representation of an RNTuple data set in a ROOT file.
Definition RNTuple.hxx:67
const_iterator begin() const
const_iterator end() const
Describe directory structure in memory.
Definition TDirectory.h:45
std::unique_ptr< T[]> MakeUninitArray(std::size_t size)
Make an array of default-initialized elements.
Namespace for new ROOT classes and functions.
std::uint64_t DescriptorId_t
Distriniguishes elements of the same type within a descriptor, e.g. different fields.
std::uint64_t NTupleSize_t
Integer type long enough to hold the maximum number of entries in a column.
The identifiers that specifies the content of a (partial) cluster.
Definition RCluster.hxx:152
On-disk pages within a page source are identified by the column and page number.
Definition RCluster.hxx:51
Summarizes cluster-level information that are necessary to load a certain page.
A sealed page contains the bytes of a page as written to storage (packed & compressed).
Used for vector reads from multiple offsets into multiple buffers.
Definition RRawFile.hxx:61
Information about a single page in the context of a cluster's page range.