Logo ROOT  
Reference Guide
 
Loading...
Searching...
No Matches
RPageStorageFile.cxx
Go to the documentation of this file.
1/// \file RPageStorageFile.cxx
2/// \ingroup NTuple
3/// \author Jakob Blomer <jblomer@cern.ch>
4/// \date 2019-11-25
5
6/*************************************************************************
7 * Copyright (C) 1995-2019, Rene Brun and Fons Rademakers. *
8 * All rights reserved. *
9 * *
10 * For the licensing terms see $ROOTSYS/LICENSE. *
11 * For the list of contributors see $ROOTSYS/README/CREDITS. *
12 *************************************************************************/
13
14#include <ROOT/RCluster.hxx>
15#include <ROOT/RLogger.hxx>
17#include <ROOT/RNTupleModel.hxx>
19#include <ROOT/RNTupleZip.hxx>
20#include <ROOT/RPage.hxx>
22#include <ROOT/RPagePool.hxx>
24#include <ROOT/RRawFile.hxx>
26#include <ROOT/RNTupleTypes.hxx>
27#include <ROOT/RNTupleUtils.hxx>
28
29#include <RVersion.h>
30#include <TDirectory.h>
31#include <TError.h>
33
34#include <algorithm>
35#include <cstdio>
36#include <cstdlib>
37#include <cstring>
38#include <iterator>
39#include <limits>
40#include <utility>
41
42#include <functional>
43#include <mutex>
44
46
53
60
67
74
75ROOT::Internal::RPageSinkFile::RPageSinkFile(std::unique_ptr<ROOT::Internal::RNTupleFileWriter> writer,
76 const ROOT::RNTupleWriteOptions &options)
77 : RPageSinkFile(writer->GetNTupleName(), options)
78{
79 fWriter = std::move(writer);
80}
81
83
85{
87 auto szZipHeader =
88 RNTupleCompressor::Zip(serializedHeader, length, GetWriteOptions().GetCompression(), zipBuffer.get());
89 fWriter->WriteNTupleHeader(zipBuffer.get(), szZipHeader, length);
90}
91
94{
96
97 auto fnAddStreamerInfo = [this](const ROOT::RFieldBase *field) {
98 const TClass *cl = nullptr;
99 if (auto classField = dynamic_cast<const RClassField *>(field)) {
100 cl = classField->GetClass();
101 } else if (auto streamerField = dynamic_cast<const RStreamerField *>(field)) {
102 cl = streamerField->GetClass();
103 } else if (auto soaField = dynamic_cast<const ROOT::Experimental::RSoAField *>(field)) {
104 cl = soaField->GetSoAClass();
105 }
106 if (!cl)
107 return;
108
109 auto streamerInfo = cl->GetStreamerInfo(field->GetTypeVersion());
110 if (!streamerInfo) {
111 throw RException(R__FAIL(std::string("cannot get streamerInfo for ") + cl->GetName() + " [" +
112 std::to_string(field->GetTypeVersion()) + "]"));
113 }
114 fInfosOfClassFields[streamerInfo->GetNumber()] = streamerInfo;
115 };
116
117 for (const auto field : changeset.fAddedFields) {
119 for (const auto &subField : *field) {
121 }
122 }
123}
124
127{
128 std::uint64_t offsetData;
129 {
130 RNTupleAtomicTimer timer(fCounters->fTimeWallWrite, fCounters->fTimeCpuWrite);
131 offsetData = fWriter->WriteBlob(sealedPage.GetBuffer(), sealedPage.GetBufferSize(), bytesPacked);
132 }
133
135 result.SetPosition(offsetData);
136 result.SetNBytesOnStorage(sealedPage.GetDataSize());
137 fCounters->fNPageCommitted.Inc();
138 fCounters->fSzWritePayload.Add(sealedPage.GetBufferSize());
139 fNBytesCurrentCluster += sealedPage.GetBufferSize();
140 return result;
141}
142
145{
146 auto element = columnHandle.fColumn->GetElement();
148 {
149 RNTupleAtomicTimer timer(fCounters->fTimeWallZip, fCounters->fTimeCpuZip);
150 sealedPage = SealPage(page, *element);
151 }
152
153 fCounters->fSzZip.Add(page.GetNBytes());
154 return WriteSealedPage(sealedPage, element->GetPackedSize(page.GetNElements()));
155}
156
159{
160 const auto nBits = fDescriptorBuilder.GetDescriptor().GetColumnDescriptor(physicalColumnId).GetBitsOnStorage();
161 const auto bytesPacked = (nBits * sealedPage.GetNElements() + 7) / 8;
162 return WriteSealedPage(sealedPage, bytesPacked);
163}
164
166{
167 RNTupleAtomicTimer timer(fCounters->fTimeWallWrite, fCounters->fTimeCpuWrite);
168
169 std::uint64_t offset = fWriter->ReserveBlob(batch.fSize, batch.fBytesPacked);
170
171 locators.reserve(locators.size() + batch.fSealedPages.size());
172
173 for (const auto *pagePtr : batch.fSealedPages) {
174 fWriter->WriteIntoReservedBlob(pagePtr->GetBuffer(), pagePtr->GetBufferSize(), offset);
176 locator.SetPosition(offset);
177 locator.SetNBytesOnStorage(pagePtr->GetDataSize());
178 locators.push_back(locator);
179 offset += pagePtr->GetBufferSize();
180 }
181
182 fCounters->fNPageCommitted.Add(batch.fSealedPages.size());
183 fCounters->fSzWritePayload.Add(batch.fSize);
184 fNBytesCurrentCluster += batch.fSize;
185
186 batch.fSize = 0;
187 batch.fBytesPacked = 0;
188 batch.fSealedPages.clear();
189}
190
191std::vector<ROOT::RNTupleLocator>
192ROOT::Internal::RPageSinkFile::CommitSealedPageVImpl(std::span<RPageStorage::RSealedPageGroup> ranges,
193 const std::vector<bool> &mask)
194{
195 const std::uint64_t maxKeySize = fOptions->GetMaxKeySize();
196
198 std::vector<RNTupleLocator> locators;
199
200 std::size_t iPage = 0;
201 for (auto rangeIt = ranges.begin(); rangeIt != ranges.end(); ++rangeIt) {
202 auto &range = *rangeIt;
203 if (range.fFirst == range.fLast) {
204 // Skip empty ranges, they might not have a physical column ID!
205 continue;
206 }
207
208 const auto bitsOnStorage =
209 fDescriptorBuilder.GetDescriptor().GetColumnDescriptor(range.fPhysicalColumnId).GetBitsOnStorage();
210
211 for (auto sealedPageIt = range.fFirst; sealedPageIt != range.fLast; ++sealedPageIt, ++iPage) {
212 if (!mask[iPage])
213 continue;
214
215 const auto bytesPacked = (bitsOnStorage * sealedPageIt->GetNElements() + 7) / 8;
216
217 if (batch.fSize > 0 && batch.fSize + sealedPageIt->GetBufferSize() > maxKeySize) {
218 /**
219 * Adding this page would exceed maxKeySize. Since we always want to write into a single key
220 * with vectorized writes, we commit the current set of pages before proceeding.
221 * NOTE: we do this *before* checking if sealedPageIt->GetBufferSize() > maxKeySize to guarantee that
222 * we always flush the current batch before doing an individual WriteBlob. This way we
223 * preserve the assumption that a CommitBatch always contain a sequential set of pages.
224 */
225 CommitBatchOfPages(batch, locators);
226 }
227
228 if (sealedPageIt->GetBufferSize() > maxKeySize) {
229 // This page alone is bigger than maxKeySize: save it by itself, since it will need to be
230 // split into multiple keys.
231
232 // Since this check implies the previous check on batchSize + newSize > maxSize, we should
233 // already have committed the current batch before writing this page.
234 assert(batch.fSize == 0);
235
236 std::uint64_t offset =
237 fWriter->WriteBlob(sealedPageIt->GetBuffer(), sealedPageIt->GetBufferSize(), bytesPacked);
239 locator.SetPosition(offset);
240 locator.SetNBytesOnStorage(sealedPageIt->GetDataSize());
241 locators.push_back(locator);
242
243 fCounters->fNPageCommitted.Inc();
244 fCounters->fSzWritePayload.Add(sealedPageIt->GetBufferSize());
245 fNBytesCurrentCluster += sealedPageIt->GetBufferSize();
246
247 } else {
248 batch.fSealedPages.emplace_back(&(*sealedPageIt));
249 batch.fSize += sealedPageIt->GetBufferSize();
250 batch.fBytesPacked += bytesPacked;
251 }
252 }
253 }
254
255 if (batch.fSize > 0) {
256 CommitBatchOfPages(batch, locators);
257 }
258
259 return locators;
260}
261
263{
264 auto result = fNBytesCurrentCluster;
265 fNBytesCurrentCluster = 0;
266 return result;
267}
268
271{
273 auto szPageListZip =
274 RNTupleCompressor::Zip(serializedPageList, length, GetWriteOptions().GetCompression(), bufPageListZip.get());
275
277 result.SetNBytesOnStorage(szPageListZip);
278 result.SetPosition(fWriter->WriteBlob(bufPageListZip.get(), szPageListZip, length));
279 return result;
280}
281
284{
285 // Add the streamer info records from streamer fields: because of runtime polymorphism we may need to add additional
286 // types not covered by the type names of the class fields
287 for (const auto &extraTypeInfo : fDescriptorBuilder.GetDescriptor().GetExtraTypeInfoIterable()) {
289 continue;
290 // Ideally, we would avoid deserializing the streamer info records of the streamer fields that we just serialized.
291 // However, this happens only once at the end of writing and only when streamer fields are used, so the
292 // preference here is for code simplicity.
293 fInfosOfClassFields.merge(RNTupleSerializer::DeserializeStreamerInfos(extraTypeInfo.GetContent()).Unwrap());
294 }
295 fWriter->UpdateStreamerInfos(fInfosOfClassFields);
296
298 auto szFooterZip =
299 RNTupleCompressor::Zip(serializedFooter, length, GetWriteOptions().GetCompression(), bufFooterZip.get());
300 fWriter->WriteNTupleFooter(bufFooterZip.get(), szFooterZip, length);
301 return fWriter->Commit(GetWriteOptions().GetCompression());
302}
303
304std::unique_ptr<ROOT::Internal::RPageSink>
306{
307 auto writer = fWriter->CloneAsHidden(name);
308 auto cloned = std::unique_ptr<RPageSinkFile>(new RPageSinkFile(std::move(writer), opts));
309 return cloned;
310}
311
312////////////////////////////////////////////////////////////////////////////////
313
319
321 std::unique_ptr<ROOT::Internal::RRawFile> file,
322 const ROOT::RNTupleReadOptions &options)
323 : RPageSourceFile(ntupleName, options)
324{
325 fFile = std::move(file);
328}
329
330ROOT::Internal::RPageSourceFile::RPageSourceFile(std::string_view ntupleName, std::string_view path,
331 const ROOT::RNTupleReadOptions &options)
332 : RPageSourceFile(ntupleName, ROOT::Internal::RRawFile::Create(path), options)
333{
334}
335
336std::unique_ptr<ROOT::Internal::RPageSourceFile>
338{
339 if (!anchor.fFile)
340 throw RException(R__FAIL("This RNTuple object was not streamed from a ROOT file (TFile or descendant)"));
341
342 std::unique_ptr<ROOT::Internal::RRawFile> rawFile;
343 // For local TFiles, TDavixFile, TCurlFile, and TNetXNGFile, we want to open a new RRawFile to take advantage of the
344 // faster reading. We check the exact class name to avoid classes inheriting in ROOT (for example TMemFile) or in
345 // experiment frameworks.
346 std::string className = anchor.fFile->IsA()->GetName();
347 auto url = anchor.fFile->GetEndpointUrl();
348 auto protocol = std::string(url->GetProtocol());
349 if (className == "TFile") {
351 } else if (className == "TDavixFile" || className == "TCurlFile" || className == "TNetXNGFile") {
353 } else {
355 }
356
357 auto pageSource = std::make_unique<RPageSourceFile>("", std::move(rawFile), options);
358 pageSource->fAnchor = anchor;
359 // NOTE: fNTupleName gets set only upon Attach().
360 return pageSource;
361}
362
364{
365 fClusterPool.StopBackgroundThread();
366}
367
368std::unique_ptr<ROOT::Internal::RPageSource>
370 const ROOT::RNTupleReadOptions &options)
371{
372 assert(anchorLink.fLocator.GetType() == RNTupleLocator::kTypeFile);
373
374 const auto anchorPos = anchorLink.fLocator.GetPosition<std::uint64_t>();
375 auto anchor =
376 fReader.GetNTupleProperAtOffset(anchorPos, anchorLink.fLocator.GetNBytesOnStorage(), anchorLink.fLength).Unwrap();
377 auto pageSource = std::make_unique<RPageSourceFile>("", fFile->Clone(), options);
378 pageSource->fAnchor = anchor;
379 // NOTE: fNTupleName gets set only upon Attach().
380 return pageSource;
381}
382
384{
385 // If we constructed the page source with (ntuple name, path), we need to find the anchor first.
386 // Otherwise, the page source was created by OpenFromAnchor()
387 if (!fAnchor) {
388 fAnchor = fReader.GetNTuple(fNTupleName).Unwrap();
389 }
390 fReader.SetMaxKeySize(fAnchor->GetMaxKeySize());
391
392 fDescriptorBuilder.SetVersion(fAnchor->GetVersionEpoch(), fAnchor->GetVersionMajor(), fAnchor->GetVersionMinor(),
393 fAnchor->GetVersionPatch());
394 fDescriptorBuilder.SetOnDiskHeaderSize(fAnchor->GetNBytesHeader());
395 fDescriptorBuilder.AddToOnDiskFooterSize(fAnchor->GetNBytesFooter());
396
397 // Reserve enough space for the compressed and the uncompressed header/footer (see AttachImpl)
398 const auto bufSize = fAnchor->GetNBytesHeader() + fAnchor->GetNBytesFooter() +
399 std::max(fAnchor->GetLenHeader(), fAnchor->GetLenFooter());
400 fStructureBuffer.fBuffer = MakeUninitArray<unsigned char>(bufSize);
401 fStructureBuffer.fPtrHeader = fStructureBuffer.fBuffer.get();
402 fStructureBuffer.fPtrFooter = fStructureBuffer.fBuffer.get() + fAnchor->GetNBytesHeader();
403
404 auto readvLimits = fFile->GetReadVLimits();
405 // Never try to vectorize reads to a split key
406 readvLimits.fMaxSingleSize = std::min<size_t>(readvLimits.fMaxSingleSize, fAnchor->GetMaxKeySize());
407
408 if ((readvLimits.fMaxReqs < 2) ||
409 (std::max(fAnchor->GetNBytesHeader(), fAnchor->GetNBytesFooter()) > readvLimits.fMaxSingleSize) ||
410 (fAnchor->GetNBytesHeader() + fAnchor->GetNBytesFooter() > readvLimits.fMaxTotalSize)) {
411 RNTupleAtomicTimer timer(fCounters->fTimeWallRead, fCounters->fTimeCpuRead);
412 fReader.ReadBuffer(fStructureBuffer.fPtrHeader, fAnchor->GetNBytesHeader(), fAnchor->GetSeekHeader());
413 fReader.ReadBuffer(fStructureBuffer.fPtrFooter, fAnchor->GetNBytesFooter(), fAnchor->GetSeekFooter());
414 fCounters->fNRead.Add(2);
415 } else {
416 RNTupleAtomicTimer timer(fCounters->fTimeWallRead, fCounters->fTimeCpuRead);
417 R__ASSERT(fAnchor->GetNBytesHeader() < std::numeric_limits<std::size_t>::max());
418 R__ASSERT(fAnchor->GetNBytesFooter() < std::numeric_limits<std::size_t>::max());
419 ROOT::Internal::RRawFile::RIOVec readRequests[2] = {{fStructureBuffer.fPtrHeader, fAnchor->GetSeekHeader(),
420 static_cast<std::size_t>(fAnchor->GetNBytesHeader()), 0},
421 {fStructureBuffer.fPtrFooter, fAnchor->GetSeekFooter(),
422 static_cast<std::size_t>(fAnchor->GetNBytesFooter()), 0}};
423 fFile->ReadV(readRequests, 2);
424 fCounters->fNReadV.Inc();
425 }
426}
427
429{
430 auto unzipBuf = reinterpret_cast<unsigned char *>(fStructureBuffer.fPtrFooter) + fAnchor->GetNBytesFooter();
431
432 RNTupleDecompressor::Unzip(fStructureBuffer.fPtrHeader, fAnchor->GetNBytesHeader(), fAnchor->GetLenHeader(),
433 unzipBuf);
434 RNTupleSerializer::DeserializeHeader(unzipBuf, fAnchor->GetLenHeader(), fDescriptorBuilder);
435
436 RNTupleDecompressor::Unzip(fStructureBuffer.fPtrFooter, fAnchor->GetNBytesFooter(), fAnchor->GetLenFooter(),
437 unzipBuf);
438 RNTupleSerializer::DeserializeFooter(unzipBuf, fAnchor->GetLenFooter(), fDescriptorBuilder);
439
440 auto desc = fDescriptorBuilder.MoveDescriptor();
441
442 // fNTupleName is empty if and only if we created this source via CreateFromAnchor. If that's the case, this is the
443 // earliest we can set the name.
444 if (fNTupleName.empty())
445 fNTupleName = desc.GetName();
446
447 std::vector<unsigned char> buffer;
448 for (const auto &cgDesc : desc.GetClusterGroupIterable()) {
449 buffer.resize(std::max<size_t>(buffer.size(),
450 cgDesc.GetPageListLength() + cgDesc.GetPageListLocator().GetNBytesOnStorage()));
451 auto *zipBuffer = buffer.data() + cgDesc.GetPageListLength();
452 fReader.ReadBuffer(zipBuffer, cgDesc.GetPageListLocator().GetNBytesOnStorage(),
453 cgDesc.GetPageListLocator().GetPosition<std::uint64_t>());
454 RNTupleDecompressor::Unzip(zipBuffer, cgDesc.GetPageListLocator().GetNBytesOnStorage(),
455 cgDesc.GetPageListLength(), buffer.data());
456
457 RNTupleSerializer::DeserializePageList(buffer.data(), cgDesc.GetPageListLength(), cgDesc.GetId(), desc, mode);
458 }
459
460 // For the page reads, we rely on the I/O scheduler to define the read requests
461 fFile->SetBuffering(false);
462
463 return desc;
464}
465
468{
469 const auto clusterId = localIndex.GetClusterId();
470
472 {
473 auto descriptorGuard = GetSharedDescriptorGuard();
474 const auto &clusterDescriptor = descriptorGuard->GetClusterDescriptor(clusterId);
475 pageInfo = clusterDescriptor.GetPageRange(physicalColumnId).Find(localIndex.GetIndexInCluster());
476 }
477
478 sealedPage.SetBufferSize(pageInfo.GetLocator().GetNBytesOnStorage() + pageInfo.HasChecksum() * kNBytesPageChecksum);
479 sealedPage.SetNElements(pageInfo.GetNElements());
480 sealedPage.SetHasChecksum(pageInfo.HasChecksum());
481 if (!sealedPage.GetBuffer())
482 return;
483 if (pageInfo.GetLocator().GetType() != RNTupleLocator::kTypePageZero) {
484 fReader.ReadBuffer(const_cast<void *>(sealedPage.GetBuffer()), sealedPage.GetBufferSize(),
485 pageInfo.GetLocator().GetPosition<std::uint64_t>());
486 } else {
487 assert(!pageInfo.HasChecksum());
488 memcpy(const_cast<void *>(sealedPage.GetBuffer()), ROOT::Internal::RPage::GetPageZeroBuffer(),
489 sealedPage.GetBufferSize());
490 }
491
492 sealedPage.VerifyChecksumIfEnabled().ThrowOnError();
493}
494
498{
499 const auto columnId = columnHandle.fPhysicalId;
500 const auto clusterId = clusterInfo.fClusterId;
501 const auto pageInfo = clusterInfo.fPageInfo;
502
503 const auto element = columnHandle.fColumn->GetElement();
504 const auto elementSize = element->GetSize();
505 const auto elementInMemoryType = element->GetIdentifier().fInMemoryType;
506
507 if (pageInfo.GetLocator().GetType() == RNTupleLocator::kTypePageZero) {
508 auto pageZero = fPageAllocator->NewPage(elementSize, pageInfo.GetNElements());
509 pageZero.GrowUnchecked(pageInfo.GetNElements());
510 memset(pageZero.GetBuffer(), 0, pageZero.GetNBytes());
511 pageZero.SetWindow(clusterInfo.fColumnOffset + pageInfo.GetFirstElementIndex(),
513 return fPagePool.RegisterPage(std::move(pageZero), RPagePool::RKey{columnId, elementInMemoryType});
514 }
515
517 sealedPage.SetNElements(pageInfo.GetNElements());
518 sealedPage.SetHasChecksum(pageInfo.HasChecksum());
519 sealedPage.SetBufferSize(pageInfo.GetLocator().GetNBytesOnStorage() + pageInfo.HasChecksum() * kNBytesPageChecksum);
520 std::unique_ptr<unsigned char[]> directReadBuffer; // only used if cluster pool is turned off
521
522 if (fOptions.GetClusterCache() == ROOT::RNTupleReadOptions::EClusterCache::kOff) {
524 {
525 RNTupleAtomicTimer timer(fCounters->fTimeWallRead, fCounters->fTimeCpuRead);
526 fReader.ReadBuffer(directReadBuffer.get(), sealedPage.GetBufferSize(),
527 pageInfo.GetLocator().GetPosition<std::uint64_t>());
528 }
529 fCounters->fNPageRead.Inc();
530 fCounters->fNRead.Inc();
531 fCounters->fSzReadPayload.Add(sealedPage.GetBufferSize());
532 sealedPage.SetBuffer(directReadBuffer.get());
533 } else {
534 if (!fCurrentCluster || (fCurrentCluster->GetId() != clusterId) || !fCurrentCluster->ContainsColumn(columnId))
535 fCurrentCluster = fClusterPool.GetCluster(clusterId, fActivePhysicalColumns.ToColumnSet());
536 R__ASSERT(fCurrentCluster->ContainsColumn(columnId));
537
538 auto cachedPageRef =
540 if (!cachedPageRef.Get().IsNull())
541 return cachedPageRef;
542
543 ROnDiskPage::Key key(columnId, pageInfo.GetPageNumber());
544 auto onDiskPage = fCurrentCluster->GetOnDiskPage(key);
545 R__ASSERT(onDiskPage && (sealedPage.GetBufferSize() == onDiskPage->GetSize()));
546 sealedPage.SetBuffer(onDiskPage->GetAddress());
547 }
548
550 {
551 RNTupleAtomicTimer timer(fCounters->fTimeWallUnzip, fCounters->fTimeCpuUnzip);
552 newPage = UnsealPage(sealedPage, *element).Unwrap();
553 fCounters->fSzUnzip.Add(elementSize * pageInfo.GetNElements());
554 }
555
556 newPage.SetWindow(clusterInfo.fColumnOffset + pageInfo.GetFirstElementIndex(),
558 fCounters->fNPageUnsealed.Inc();
559 return fPagePool.RegisterPage(std::move(newPage), RPagePool::RKey{columnId, elementInMemoryType});
560}
561
562std::unique_ptr<ROOT::Internal::RPageSource> ROOT::Internal::RPageSourceFile::CloneImpl() const
563{
564 auto clone = new RPageSourceFile(fNTupleName, fOptions);
565 clone->fFile = fFile->Clone();
566 clone->fReader = ROOT::Internal::RMiniFileReader(clone->fFile.get());
567 return std::unique_ptr<RPageSourceFile>(clone);
568}
569
570std::unique_ptr<ROOT::Internal::RCluster>
572 std::vector<ROOT::Internal::RRawFile::RIOVec> &readRequests)
573{
574 struct ROnDiskPageLocator {
575 ROOT::DescriptorId_t fColumnId = 0;
576 ROOT::NTupleSize_t fPageNo = 0;
577 std::uint64_t fOffset = 0;
578 std::uint64_t fSize = 0;
579 std::size_t fBufPos = 0;
580 };
581
582 std::vector<ROnDiskPageLocator> onDiskPages;
583 auto activeSize = 0;
584 auto pageZeroMap = std::make_unique<ROnDiskPageMap>();
585 PrepareLoadCluster(
589 const auto &pageLocator = pageInfo.GetLocator();
591 throw RException(R__FAIL("tried to read a page with an unknown locator"));
592 const auto nBytes = pageLocator.GetNBytesOnStorage() + pageInfo.HasChecksum() * kNBytesPageChecksum;
594 onDiskPages.push_back({physicalColumnId, pageNo, pageLocator.GetPosition<std::uint64_t>(), nBytes, 0});
595 });
596
597 // Linearize the page requests by file offset
598 std::sort(onDiskPages.begin(), onDiskPages.end(),
599 [](const ROnDiskPageLocator &a, const ROnDiskPageLocator &b) { return a.fOffset < b.fOffset; });
600
601 // In order to coalesce close-by pages, we collect the sizes of the gaps between pages on disk. We then order
602 // the gaps by size, sum them up and find a cutoff for the largest gap that we tolerate when coalescing pages.
603 // The size of the cutoff is given by the fraction of extra bytes we are willing to read in order to reduce
604 // the number of read requests. We thus schedule the lowest number of requests given a tolerable fraction
605 // of extra bytes.
606 // TODO(jblomer): Eventually we may want to select the parameter at runtime according to link latency and speed,
607 // memory consumption, device block size.
608 float maxOverhead = 0.25 * float(activeSize);
609 std::vector<std::size_t> gaps;
610 if (onDiskPages.size())
611 gaps.reserve(onDiskPages.size() - 1);
612 for (unsigned i = 1; i < onDiskPages.size(); ++i) {
613 std::int64_t gap =
614 static_cast<int64_t>(onDiskPages[i].fOffset) - (onDiskPages[i - 1].fSize + onDiskPages[i - 1].fOffset);
615 gaps.emplace_back(std::max(gap, std::int64_t(0)));
616 // If the pages overlap, substract the overlapped bytes from `activeSize`
617 activeSize += std::min(gap, std::int64_t(0));
618 }
619 std::sort(gaps.begin(), gaps.end());
620 std::size_t gapCut = 0;
621 std::size_t currentGap = 0;
622 float szExtra = 0.0;
623 for (auto g : gaps) {
624 if (g != currentGap) {
626 currentGap = g;
627 }
628 szExtra += g;
629 if (szExtra > maxOverhead)
630 break;
631 }
632
633 // In a first step, we coalesce the read requests and calculate the cluster buffer size.
634 // In a second step, we'll fix-up the memory destinations for the read calls given the
635 // address of the allocated buffer. We must not touch, however, the read requests from previous
636 // calls to PrepareSingleCluster()
637 const auto currentReadRequestIdx = readRequests.size();
638
640 // To simplify the first loop iteration, pretend an empty request starting at the first page's fOffset.
641 if (!onDiskPages.empty())
642 req.fOffset = onDiskPages[0].fOffset;
643 std::size_t szPayload = 0;
644 std::size_t szOverhead = 0;
645 const std::uint64_t maxKeySize = fReader.GetMaxKeySize();
646 for (auto &s : onDiskPages) {
647 R__ASSERT(s.fSize > 0);
648 const std::int64_t readUpTo = req.fOffset + req.fSize;
649 // Note: byte ranges of pages may overlap
650 const std::uint64_t overhead = std::max(static_cast<std::int64_t>(s.fOffset) - readUpTo, std::int64_t(0));
651 const std::uint64_t extent = std::max(static_cast<std::int64_t>(s.fOffset + s.fSize) - readUpTo, std::int64_t(0));
652 if (req.fSize + extent < maxKeySize && overhead <= gapCut) {
655 s.fBufPos = reinterpret_cast<intptr_t>(req.fBuffer) + s.fOffset - req.fOffset;
656 req.fSize += extent;
657 continue;
658 }
659
660 // close the current request and open new one
661 if (req.fSize > 0)
662 readRequests.emplace_back(req);
663
664 req.fBuffer = reinterpret_cast<unsigned char *>(req.fBuffer) + req.fSize;
665 s.fBufPos = reinterpret_cast<intptr_t>(req.fBuffer);
666
667 szPayload += s.fSize;
668 req.fOffset = s.fOffset;
669 req.fSize = s.fSize;
670 }
671 readRequests.emplace_back(req);
672 fCounters->fSzReadPayload.Add(szPayload);
673 fCounters->fSzReadOverhead.Add(szOverhead);
674
675 // Register the on disk pages in a page map
676 auto buffer = new unsigned char[reinterpret_cast<intptr_t>(req.fBuffer) + req.fSize];
677 auto pageMap = std::make_unique<ROOT::Internal::ROnDiskPageMapHeap>(std::unique_ptr<unsigned char[]>(buffer));
678 for (const auto &s : onDiskPages) {
679 ROnDiskPage::Key key(s.fColumnId, s.fPageNo);
680 pageMap->Register(key, ROnDiskPage(buffer + s.fBufPos, s.fSize));
681 }
682 fCounters->fNPageRead.Add(onDiskPages.size());
683 for (auto i = currentReadRequestIdx; i < readRequests.size(); ++i) {
684 readRequests[i].fBuffer = buffer + reinterpret_cast<intptr_t>(readRequests[i].fBuffer);
685 }
686
687 auto cluster = std::make_unique<RCluster>(clusterKey.fClusterId);
688 cluster->Adopt(std::move(pageMap));
689 cluster->Adopt(std::move(pageZeroMap));
690 for (auto colId : clusterKey.fPhysicalColumnSet)
691 cluster->SetColumnAvailable(colId);
692 return cluster;
693}
694
695std::vector<std::unique_ptr<ROOT::Internal::RCluster>>
697{
698 fCounters->fNClusterLoaded.Add(clusterKeys.size());
699
700 std::vector<std::unique_ptr<ROOT::Internal::RCluster>> clusters;
701 std::vector<ROOT::Internal::RRawFile::RIOVec> readRequests;
702
703 clusters.reserve(clusterKeys.size());
704 for (auto key : clusterKeys) {
705 clusters.emplace_back(PrepareSingleCluster(key, readRequests));
706 }
707
708 auto nReqs = readRequests.size();
709 auto readvLimits = fFile->GetReadVLimits();
710 // We never want to do vectorized reads of split blobs, so we limit our single size to maxKeySize.
711 readvLimits.fMaxSingleSize = std::min<size_t>(readvLimits.fMaxSingleSize, fReader.GetMaxKeySize());
712
713 int iReq = 0;
714 while (nReqs > 0) {
715 auto nBatch = std::min(nReqs, readvLimits.fMaxReqs);
716
717 if (readvLimits.HasSizeLimit()) {
718 std::uint64_t totalSize = 0;
719 for (std::size_t i = 0; i < nBatch; ++i) {
720 if (readRequests[iReq + i].fSize > readvLimits.fMaxSingleSize) {
721 nBatch = i;
722 break;
723 }
724
725 totalSize += readRequests[iReq + i].fSize;
726 if (totalSize > readvLimits.fMaxTotalSize) {
727 nBatch = i;
728 break;
729 }
730 }
731 }
732
733 if (nBatch <= 1) {
734 nBatch = 1;
735 RNTupleAtomicTimer timer(fCounters->fTimeWallRead, fCounters->fTimeCpuRead);
736 fReader.ReadBuffer(readRequests[iReq].fBuffer, readRequests[iReq].fSize, readRequests[iReq].fOffset);
737 } else {
738 RNTupleAtomicTimer timer(fCounters->fTimeWallRead, fCounters->fTimeCpuRead);
739 fFile->ReadV(&readRequests[iReq], nBatch);
740 }
741 fCounters->fNReadV.Inc();
742 fCounters->fNRead.Add(nBatch);
743
744 iReq += nBatch;
745 nReqs -= nBatch;
746 }
747
748 return clusters;
749}
750
752{
753 fReader.LoadStreamerInfo();
754}
fBuffer
dim_t fSize
#define R__FAIL(msg)
Short-hand to return an RResult<T> in an error state; the RError is implicitly converted into RResult...
Definition RError.hxx:300
#define b(i)
Definition RSha256.hxx:100
#define g(i)
Definition RSha256.hxx:105
#define a(i)
Definition RSha256.hxx:99
ROOT::Detail::TRangeCast< T, true > TRangeDynCast
TRangeDynCast is an adapter class that allows the typed iteration through a TCollection.
#define R__ASSERT(e)
Checks condition e and reports a fatal error if it's false.
Definition TError.h:125
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void char Point_t Rectangle_t WindowAttributes_t Float_t Float_t Float_t Int_t Int_t UInt_t UInt_t Rectangle_t mask
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void char Point_t Rectangle_t WindowAttributes_t Float_t Float_t Float_t Int_t Int_t UInt_t UInt_t Rectangle_t Int_t Int_t Window_t TString Int_t GCValues_t GetPrimarySelectionOwner GetDisplay GetScreen GetColormap GetNativeEvent const char const char dpyName wid window const char font_name cursor keysym reg const char only_if_exist regb h Point_t winding char text const char depth char const char Int_t count const char ColorStruct_t color const char Pixmap_t Pixmap_t PictureAttributes_t attr const char char ret_data h unsigned char height h offset
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void char Point_t Rectangle_t WindowAttributes_t Float_t Float_t Float_t Int_t Int_t UInt_t UInt_t Rectangle_t result
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void char Point_t Rectangle_t WindowAttributes_t Float_t Float_t Float_t Int_t Int_t UInt_t UInt_t Rectangle_t Int_t Int_t Window_t TString Int_t GCValues_t GetPrimarySelectionOwner GetDisplay GetScreen GetColormap GetNativeEvent const char const char dpyName wid window const char font_name cursor keysym reg const char only_if_exist regb h Point_t winding char text const char depth char const char Int_t count const char ColorStruct_t color const char Pixmap_t Pixmap_t PictureAttributes_t attr const char char ret_data h unsigned char height h length
Option_t Option_t TPoint TPoint const char mode
char name[80]
Definition TGX11.cxx:145
An interface to read from, or write to, a ROOT file, as well as performing other common operations.
Definition RFile.hxx:253
The SoA field provides I/O for an in-memory SoA layout linked to an on-disk collection of the underly...
Definition RFieldSoA.hxx:55
Read RNTuple data blocks from a TFile container, provided by a RRawFile.
Definition RMiniFile.hxx:61
static std::size_t Zip(const void *from, std::size_t nbytes, int compression, void *to)
Returns the size of the compressed data, written into the provided output buffer.
static void Unzip(const void *from, size_t nbytes, size_t dataLen, void *to)
The nbytes parameter provides the size ls of the from buffer.
static std::unique_ptr< RNTupleFileWriter > Append(std::string_view ntupleName, TDirectory &fileOrDirectory, std::uint64_t maxKeySize, bool isHidden)
The directory parameter can also be a TFile object (TFile inherits from TDirectory).
static std::unique_ptr< RNTupleFileWriter > Recreate(std::string_view ntupleName, std::string_view path, EContainerFormat containerFormat, const ROOT::RNTupleWriteOptions &options)
Create or truncate the local file given by path with the new empty RNTuple identified by ntupleName.
static RResult< void > DeserializePageList(const void *buffer, std::uint64_t bufSize, ROOT::DescriptorId_t clusterGroupId, RNTupleDescriptor &desc, EDescriptorDeserializeMode mode)
static RResult< void > DeserializeFooter(const void *buffer, std::uint64_t bufSize, ROOT::Internal::RNTupleDescriptorBuilder &descBuilder)
static RResult< StreamerInfoMap_t > DeserializeStreamerInfos(const std::string &extraTypeInfoContent)
static RResult< void > DeserializeHeader(const void *buffer, std::uint64_t bufSize, ROOT::Internal::RNTupleDescriptorBuilder &descBuilder)
A page as being stored on disk, that is packed and compressed.
Definition RCluster.hxx:41
Base class for a sink with a physical storage backend.
void UpdateSchema(const ROOT::Internal::RNTupleModelChangeset &changeset, ROOT::NTupleSize_t firstEntry) override
Incorporate incremental changes to the model into the ntuple descriptor.
void EnableDefaultMetrics(const std::string &prefix)
Enables the default set of metrics provided by RPageSink.
Reference to a page stored in the page pool.
Storage provider that write ntuple pages into a file.
void CommitBatchOfPages(CommitBatch &batch, std::vector< RNTupleLocator > &locators)
Subroutine of CommitSealedPageVImpl, used to perform a vector write of the (multi-)range of pages con...
RPageSinkFile(std::string_view ntupleName, const ROOT::RNTupleWriteOptions &options)
std::unique_ptr< RPageSink > CloneAsHidden(std::string_view name, const ROOT::RNTupleWriteOptions &opts) const override
Creates a new sink with the same underlying storage as this but writing to a different RNTuple named ...
std::uint64_t StageClusterImpl() final
Returns the number of bytes written to storage (excluding metadata)
void InitImpl(unsigned char *serializedHeader, std::uint32_t length) final
RNTupleLocator CommitPageImpl(ColumnHandle_t columnHandle, const RPage &page) override
RNTupleLocator WriteSealedPage(const RPageStorage::RSealedPage &sealedPage, std::size_t bytesPacked)
We pass bytesPacked so that TFile::ls() reports a reasonable value for the compression ratio of the c...
RNTupleLocator CommitClusterGroupImpl(unsigned char *serializedPageList, std::uint32_t length) final
Returns the locator of the page list envelope of the given buffer that contains the serialized page l...
RNTupleLocator CommitSealedPageImpl(ROOT::DescriptorId_t physicalColumnId, const RPageStorage::RSealedPage &sealedPage) final
RNTupleLink CommitDatasetImpl() final
std::unique_ptr< ROOT::Internal::RNTupleFileWriter > fWriter
void UpdateSchema(const ROOT::Internal::RNTupleModelChangeset &changeset, ROOT::NTupleSize_t firstEntry) final
Incorporate incremental changes to the model into the ntuple descriptor.
std::vector< RNTupleLocator > CommitSealedPageVImpl(std::span< RPageStorage::RSealedPageGroup > ranges, const std::vector< bool > &mask) final
Vector commit of preprocessed pages.
Storage provider that reads ntuple pages from a file.
std::unique_ptr< ROOT::Internal::RCluster > PrepareSingleCluster(const ROOT::Internal::RCluster::RKey &clusterKey, std::vector< RRawFile::RIOVec > &readRequests)
Helper function for LoadClusters: it prepares the memory buffer (page map) and the read requests for ...
std::unique_ptr< RPageSource > OpenWithDifferentAnchor(const ROOT::Internal::RNTupleLink &anchorLink, const ROOT::RNTupleReadOptions &options={}) final
Creates a new PageSource using the same underlying file as this but referring to a different RNTuple,...
RPageRef LoadPageImpl(ColumnHandle_t columnHandle, const RClusterInfo &clusterInfo, ROOT::NTupleSize_t idxInCluster) final
static std::unique_ptr< RPageSourceFile > CreateFromAnchor(const RNTuple &anchor, const ROOT::RNTupleReadOptions &options=ROOT::RNTupleReadOptions())
Used from the RNTuple class to build a datasource if the anchor is already available.
ROOT::RNTupleDescriptor AttachImpl(RNTupleSerializer::EDescriptorDeserializeMode mode) final
LoadStructureImpl() has been called before AttachImpl() is called
std::vector< std::unique_ptr< ROOT::Internal::RCluster > > LoadClusters(std::span< ROOT::Internal::RCluster::RKey > clusterKeys) final
Populates all the pages of the given cluster ids and columns; it is possible that some columns do not...
RPageSourceFile(std::string_view ntupleName, const ROOT::RNTupleReadOptions &options)
std::unique_ptr< RPageSource > CloneImpl() const final
The cloned page source creates a new raw file and reader and opens its own file descriptor to the dat...
void LoadStreamerInfo() final
Forces the loading of ROOT StreamerInfo from the underlying file.
std::unique_ptr< RRawFile > fFile
An RRawFile is used to request the necessary byte ranges from a local or a remote file.
ROOT::Internal::RMiniFileReader fReader
Takes the fFile to read ntuple blobs from it.
void LoadSealedPage(ROOT::DescriptorId_t physicalColumnId, RNTupleLocalIndex localIndex, RSealedPage &sealedPage) final
Read the packed and compressed bytes of a page into the memory buffer provided by sealedPage.
Abstract interface to read data from an ntuple.
void EnableDefaultMetrics(const std::string &prefix)
Enables the default set of metrics provided by RPageSource.
Stores information about the cluster in which this page resides.
Definition RPage.hxx:53
A page is a slice of a column that is mapped into memory.
Definition RPage.hxx:44
static const void * GetPageZeroBuffer()
Return a pointer to the page zero buffer used if there is no on-disk data for a particular deferred c...
Definition RPage.cxx:23
The RRawFileTFile wraps an open TFile, but does not take ownership.
The RRawFile provides read-only access to local and remote files.
Definition RRawFile.hxx:43
static std::unique_ptr< RRawFile > Create(std::string_view url, ROptions options=ROptions())
Factory method that returns a suitable concrete implementation according to the transport in the url.
Definition RRawFile.cxx:64
The field for a class with dictionary.
Definition RField.hxx:138
Base class for all ROOT issued exceptions.
Definition RError.hxx:79
A field translates read and write calls from/to underlying columns to/from tree values.
The on-storage metadata of an RNTuple.
Addresses a column element or field item relative to a particular cluster, instead of a global NTuple...
Generic information about the physical location of data.
Common user-tunable settings for reading RNTuples.
Common user-tunable settings for storing RNTuples.
std::uint64_t GetMaxKeySize() const
Representation of an RNTuple data set in a ROOT file.
Definition RNTuple.hxx:68
const_iterator begin() const
const_iterator end() const
The field for a class using ROOT standard streaming.
Definition RField.hxx:238
TClass instances represent classes, structs and namespaces in the ROOT type system.
Definition TClass.h:84
TVirtualStreamerInfo * GetStreamerInfo(Int_t version=0, Bool_t isTransient=kFALSE) const
returns a pointer to the TVirtualStreamerInfo object for version If the object does not exist,...
Definition TClass.cxx:4657
Describe directory structure in memory.
Definition TDirectory.h:45
const char * GetName() const override
Returns name of object.
Definition TNamed.h:49
std::uint64_t DescriptorId_t
Distriniguishes elements of the same type within a descriptor, e.g. different fields.
std::uint64_t NTupleSize_t
Integer type long enough to hold the maximum number of entries in a column.
The identifiers that specifies the content of a (partial) cluster.
Definition RCluster.hxx:152
The incremental changes to a RNTupleModel
On-disk pages within a page source are identified by the column and page number.
Definition RCluster.hxx:51
Summarizes cluster-level information that are necessary to load a certain page.
A sealed page contains the bytes of a page as written to storage (packed & compressed).
Used for vector reads from multiple offsets into multiple buffers.
Definition RRawFile.hxx:61
Information about a single page in the context of a cluster's page range.