Logo ROOT   6.30.04
Reference Guide
 All Namespaces Files Pages
TBasket.cxx
Go to the documentation of this file.
1 // @(#)root/tree:$Id: 4e77188fbf1e7fd026a984989de66663c49b12fc $
2 // Author: Rene Brun 19/01/96
3 /*************************************************************************
4  * Copyright (C) 1995-2000, Rene Brun and Fons Rademakers. *
5  * All rights reserved. *
6  * *
7  * For the licensing terms see $ROOTSYS/LICENSE. *
8  * For the list of contributors see $ROOTSYS/README/CREDITS. *
9  *************************************************************************/
10 
11 #include <chrono>
12 
13 #include "TBasket.h"
14 #include "TBuffer.h"
15 #include "TBufferFile.h"
16 #include "TTree.h"
17 #include "TBranch.h"
18 #include "TFile.h"
19 #include "TLeaf.h"
20 #include "TBufferFile.h"
21 #include "TMath.h"
22 #include "TROOT.h"
23 #include "TTreeCache.h"
24 #include "TVirtualMutex.h"
25 #include "TVirtualPerfStats.h"
26 #include "TTimeStamp.h"
27 #include "ROOT/TIOFeatures.hxx"
28 #include "RZip.h"
29 
30 #include <bitset>
31 
32 const UInt_t kDisplacementMask = 0xFF000000; // In the streamer the two highest bytes of
33  // the fEntryOffset are used to stored displacement.
34 
35 ClassImp(TBasket);
36 
37 /** \class TBasket
38 \ingroup tree
39 
40 Manages buffers for branches of a Tree.
41 
42 See picture in TTree.
43 */
44 
45 ////////////////////////////////////////////////////////////////////////////////
46 /// Default contructor.
47 
48 TBasket::TBasket()
49 {
50 }
51 
52 ////////////////////////////////////////////////////////////////////////////////
53 /// Constructor used during reading.
54 
55 TBasket::TBasket(TDirectory *motherDir) : TKey(motherDir)
56 {
57 }
58 
59 ////////////////////////////////////////////////////////////////////////////////
60 /// Basket normal constructor, used during writing.
61 
62 TBasket::TBasket(const char *name, const char *title, TBranch *branch)
63  : TKey(branch->GetDirectory()), fBufferSize(branch->GetBasketSize()), fNevBufSize(branch->GetEntryOffsetLen()),
64  fHeaderOnly(kTRUE), fIOBits(branch->GetIOFeatures().GetFeatures())
65 {
66  SetName(name);
67  SetTitle(title);
68  fClassName = "TBasket";
69  fBuffer = nullptr;
70  fBufferRef = new TBufferFile(TBuffer::kWrite, fBufferSize);
71  fVersion += 1000;
72  if (branch->GetDirectory()) {
73  TFile *file = branch->GetFile();
74  fBufferRef->SetParent(file);
75  }
76  if (branch->GetTree()) {
77 #ifdef R__USE_IMT
78  fCompressedBufferRef = branch->GetTransientBuffer(fBufferSize);
79 #else
80  fCompressedBufferRef = branch->GetTree()->GetTransientBuffer(fBufferSize);
81 #endif
82  fOwnsCompressedBuffer = kFALSE;
83  if (!fCompressedBufferRef) {
84  fCompressedBufferRef = new TBufferFile(TBuffer::kRead, fBufferSize);
85  fOwnsCompressedBuffer = kTRUE;
86  }
87  }
88  fBranch = branch;
89  Streamer(*fBufferRef);
90  fKeylen = fBufferRef->Length();
91  fObjlen = fBufferSize - fKeylen;
92  fLast = fKeylen;
93  fBuffer = 0;
94  fHeaderOnly = kFALSE;
95  if (fNevBufSize) {
96  fEntryOffset = new Int_t[fNevBufSize];
97  for (Int_t i=0;i<fNevBufSize;i++) fEntryOffset[i] = 0;
98  }
99  branch->GetTree()->IncrementTotalBuffers(fBufferSize);
100 }
101 
102 ////////////////////////////////////////////////////////////////////////////////
103 /// Basket destructor.
104 
105 TBasket::~TBasket()
106 {
107  if (fDisplacement) delete [] fDisplacement;
108  ResetEntryOffset();
109  if (fBufferRef) delete fBufferRef;
110  fBufferRef = 0;
111  fBuffer = 0;
112  fDisplacement= 0;
113  // Note we only delete the compressed buffer if we own it
114  if (fCompressedBufferRef && fOwnsCompressedBuffer) {
115  delete fCompressedBufferRef;
116  fCompressedBufferRef = 0;
117  }
118 }
119 
120 ////////////////////////////////////////////////////////////////////////////////
121 /// Increase the size of the current fBuffer up to newsize.
122 
123 void TBasket::AdjustSize(Int_t newsize)
124 {
125  if (fBuffer == fBufferRef->Buffer()) {
126  fBufferRef->Expand(newsize);
127  fBuffer = fBufferRef->Buffer();
128  } else {
129  fBufferRef->Expand(newsize);
130  }
131  fBranch->GetTree()->IncrementTotalBuffers(newsize-fBufferSize);
132  fBufferSize = newsize;
133  fLastWriteBufferSize[0] = newsize;
134  fLastWriteBufferSize[1] = 0;
135  fLastWriteBufferSize[2] = 0;
136  fNextBufferSizeRecord = 1;
137 }
138 
139 ////////////////////////////////////////////////////////////////////////////////
140 /// Copy the basket of this branch onto the file to.
141 
142 Long64_t TBasket::CopyTo(TFile *to)
143 {
144  fBufferRef->SetWriteMode();
145  Int_t nout = fNbytes - fKeylen;
146  fBuffer = fBufferRef->Buffer();
147  Create(nout, to);
148  fBufferRef->SetBufferOffset(0);
149  fHeaderOnly = kTRUE;
150  Streamer(*fBufferRef);
151  fHeaderOnly = kFALSE;
152  Int_t nBytes = WriteFileKeepBuffer(to);
153 
154  return nBytes>0 ? nBytes : -1;
155 }
156 
157 ////////////////////////////////////////////////////////////////////////////////
158 /// Delete fEntryOffset array.
159 
160 void TBasket::DeleteEntryOffset()
161 {
162  ResetEntryOffset();
163  fNevBufSize = 0;
164 }
165 
166 ////////////////////////////////////////////////////////////////////////////////
167 /// Drop buffers of this basket if it is not the current basket.
168 
169 Int_t TBasket::DropBuffers()
170 {
171  if (!fBuffer && !fBufferRef) return 0;
172 
173  if (fDisplacement) delete [] fDisplacement;
174  ResetEntryOffset();
175  if (fBufferRef) delete fBufferRef;
176  if (fCompressedBufferRef && fOwnsCompressedBuffer) delete fCompressedBufferRef;
177  fBufferRef = 0;
178  fCompressedBufferRef = 0;
179  fBuffer = 0;
180  fDisplacement= 0;
181  fEntryOffset = 0;
182  fBranch->GetTree()->IncrementTotalBuffers(-fBufferSize);
183  return fBufferSize;
184 }
185 
186 ////////////////////////////////////////////////////////////////////////////////
187 /// Calculates the entry offset array, if possible.
188 ///
189 /// Result is cached, meaning that this should only be invoked once per basket.
190 
191 Int_t *TBasket::GetCalculatedEntryOffset()
192 {
193  if (fEntryOffset != reinterpret_cast<Int_t *>(-1)) {
194  return fEntryOffset;
195  }
196 
197  if (R__unlikely(!fBranch)) {
198  Error("GetCalculatedEntryOffset", "Basket entry offset calculation requested, but no associated TBranch!");
199  return nullptr;
200  }
201  if (R__unlikely(fBranch->GetNleaves() != 1)) {
202  Error("GetCalculatedEntryOffset", "Branch contains multiple leaves - unable to calculated entry offsets!");
203  return nullptr;
204  }
205  TLeaf *leaf = static_cast<TLeaf *>((*fBranch->GetListOfLeaves())[0]);
206  fEntryOffset = leaf->GenerateOffsetArray(fKeylen, fNevBuf);
207  return fEntryOffset;
208 }
209 
210 ////////////////////////////////////////////////////////////////////////////////
211 /// Determine whether we can generate the offset array when this branch is read.
212 ///
213 
214 Bool_t TBasket::CanGenerateOffsetArray()
215 {
216  if (fBranch->GetNleaves() != 1) {
217  return kFALSE;
218  }
219  TLeaf *leaf = static_cast<TLeaf *>((*fBranch->GetListOfLeaves())[0]);
220  return leaf->CanGenerateOffsetArray();
221 }
222 
223 ////////////////////////////////////////////////////////////////////////////////
224 /// Get pointer to buffer for internal entry.
225 
226 Int_t TBasket::GetEntryPointer(Int_t entry)
227 {
228  Int_t offset;
229  Int_t *entryOffset = GetEntryOffset();
230  if (entryOffset) offset = entryOffset[entry];
231  else offset = fKeylen + entry*fNevBufSize;
232  fBufferRef->SetBufferOffset(offset);
233  return offset;
234 }
235 
236 ////////////////////////////////////////////////////////////////////////////////
237 /// Load basket buffers in memory without unziping.
238 /// This function is called by TTreeCloner.
239 /// The function returns 0 in case of success, 1 in case of error.
240 
241 Int_t TBasket::LoadBasketBuffers(Long64_t pos, Int_t len, TFile *file, TTree *tree)
242 {
243  if (fBufferRef) {
244  // Reuse the buffer if it exist.
245  fBufferRef->Reset();
246 
247  // We use this buffer both for reading and writing, we need to
248  // make sure it is properly sized for writing.
249  fBufferRef->SetWriteMode();
250  if (fBufferRef->BufferSize() < len) {
251  fBufferRef->Expand(len);
252  }
253  fBufferRef->SetReadMode();
254  } else {
255  fBufferRef = new TBufferFile(TBuffer::kRead, len);
256  }
257  fBufferRef->SetParent(file);
258  char *buffer = fBufferRef->Buffer();
259  file->Seek(pos);
260  TFileCacheRead *pf = tree->GetReadCache(file);
261  if (pf) {
262  TVirtualPerfStats* temp = gPerfStats;
263  if (tree->GetPerfStats()) gPerfStats = tree->GetPerfStats();
264  Int_t st = pf->ReadBuffer(buffer,pos,len);
265  if (st < 0) {
266  return 1;
267  } else if (st == 0) {
268  // fOffset might have been changed via TFileCacheRead::ReadBuffer(), reset it
269  file->Seek(pos);
270  // If we are using a TTreeCache, disable reading from the default cache
271  // temporarily, to force reading directly from file
272  TTreeCache *fc = dynamic_cast<TTreeCache*>(file->GetCacheRead());
273  if (fc) fc->Disable();
274  Int_t ret = file->ReadBuffer(buffer,len);
275  if (fc) fc->Enable();
276  pf->AddNoCacheBytesRead(len);
277  pf->AddNoCacheReadCalls(1);
278  if (ret) {
279  return 1;
280  }
281  }
282  gPerfStats = temp;
283  // fOffset might have been changed via TFileCacheRead::ReadBuffer(), reset it
284  file->SetOffset(pos + len);
285  } else {
286  TVirtualPerfStats* temp = gPerfStats;
287  if (tree->GetPerfStats() != 0) gPerfStats = tree->GetPerfStats();
288  if (file->ReadBuffer(buffer,len)) {
289  gPerfStats = temp;
290  return 1; //error while reading
291  }
292  else gPerfStats = temp;
293  }
294 
295  fBufferRef->SetReadMode();
296  fBufferRef->SetBufferOffset(0);
297  Streamer(*fBufferRef);
298 
299  return 0;
300 }
301 
302 ////////////////////////////////////////////////////////////////////////////////
303 /// Remove the first dentries of this basket, moving entries at
304 /// dentries to the start of the buffer.
305 
306 void TBasket::MoveEntries(Int_t dentries)
307 {
308  Int_t i;
309 
310  if (dentries >= fNevBuf) return;
311  Int_t bufbegin;
312  Int_t moved;
313 
314  Int_t *entryOffset = GetEntryOffset();
315  if (entryOffset) {
316  bufbegin = entryOffset[dentries];
317  moved = bufbegin-GetKeylen();
318 
319  // First store the original location in the fDisplacement array
320  // and record the new start offset
321 
322  if (!fDisplacement) {
323  fDisplacement = new Int_t[fNevBufSize];
324  }
325  for (i = 0; i<(fNevBufSize-dentries); ++i) {
326  fDisplacement[i] = entryOffset[i + dentries];
327  entryOffset[i] = entryOffset[i + dentries] - moved;
328  }
329  for (i = fNevBufSize-dentries; i<fNevBufSize; ++i) {
330  fDisplacement[i] = 0;
331  entryOffset[i] = 0;
332  }
333 
334  } else {
335  // If there is no EntryOffset array, this means
336  // that each entry has the same size and that
337  // it does not point to other objects (hence there
338  // is no need for a displacement array).
339  bufbegin = GetKeylen() + dentries*fNevBufSize;
340  moved = bufbegin-GetKeylen();
341  }
342  TBuffer *buf = GetBufferRef();
343  char *buffer = buf->Buffer();
344  memmove(buffer+GetKeylen(),buffer+bufbegin,buf->Length()-bufbegin);
345  buf->SetBufferOffset(buf->Length()-moved);
346  fNevBuf -= dentries;
347 }
348 
349 #define OLD_CASE_EXPRESSION fObjlen==fNbytes-fKeylen && GetBranch()->GetCompressionLevel()!=0 && file->GetVersion()<=30401
350 ////////////////////////////////////////////////////////////////////////////////
351 /// By-passing buffer unzipping has been requested and is
352 /// possible (only 1 entry in this basket).
353 
354 Int_t TBasket::ReadBasketBuffersUncompressedCase()
355 {
356  fBuffer = fBufferRef->Buffer();
357 
358  // Make sure that the buffer is set at the END of the data
359  fBufferRef->SetBufferOffset(fNbytes);
360 
361  // Indicate that this buffer is weird.
362  fBufferRef->SetBit(TBufferFile::kNotDecompressed);
363 
364  // Usage of this mode assume the existance of only ONE
365  // entry in this basket.
366  ResetEntryOffset();
367  delete [] fDisplacement; fDisplacement = 0;
368 
369  fBranch->GetTree()->IncrementTotalBuffers(fBufferSize);
370  return 0;
371 }
372 
373 ////////////////////////////////////////////////////////////////////////////////
374 /// We always create the TBuffer for the basket but it hold the buffer from the cache.
375 
376 Int_t TBasket::ReadBasketBuffersUnzip(char* buffer, Int_t size, Bool_t mustFree, TFile* file)
377 {
378  if (fBufferRef) {
379  fBufferRef->SetBuffer(buffer, size, mustFree);
380  fBufferRef->SetReadMode();
381  fBufferRef->Reset();
382  } else {
383  fBufferRef = new TBufferFile(TBuffer::kRead, size, buffer, mustFree);
384  }
385  fBufferRef->SetParent(file);
386 
387  Streamer(*fBufferRef);
388 
389  if (IsZombie()) {
390  return -1;
391  }
392 
393  Bool_t oldCase = OLD_CASE_EXPRESSION;
394 
395  if ((fObjlen > fNbytes-fKeylen || oldCase) && TestBit(TBufferFile::kNotDecompressed) && (fNevBuf==1)) {
396  return TBasket::ReadBasketBuffersUncompressedCase();
397  }
398 
399  fBuffer = fBufferRef->Buffer();
400  return fObjlen+fKeylen;
401 }
402 
403 ////////////////////////////////////////////////////////////////////////////////
404 /// Initialize a buffer for reading if it is not already initialized
405 
406 static inline TBuffer* R__InitializeReadBasketBuffer(TBuffer* bufferRef, Int_t len, TFile* file)
407 {
408  TBuffer* result;
409  if (R__likely(bufferRef)) {
410  bufferRef->SetReadMode();
411  Int_t curBufferSize = bufferRef->BufferSize();
412  if (curBufferSize < len) {
413  // Experience shows that giving 5% "wiggle-room" decreases churn.
414  bufferRef->Expand(Int_t(len*1.05));
415  }
416  bufferRef->Reset();
417  result = bufferRef;
418  } else {
419  result = new TBufferFile(TBuffer::kRead, len);
420  }
421  result->SetParent(file);
422  return result;
423 }
424 
425 ////////////////////////////////////////////////////////////////////////////////
426 /// Initialize the compressed buffer; either from the TTree or create a local one.
427 
428 void inline TBasket::InitializeCompressedBuffer(Int_t len, TFile* file)
429 {
430  Bool_t compressedBufferExists = fCompressedBufferRef != NULL;
431  fCompressedBufferRef = R__InitializeReadBasketBuffer(fCompressedBufferRef, len, file);
432  if (R__unlikely(!compressedBufferExists)) {
433  fOwnsCompressedBuffer = kTRUE;
434  }
435 }
436 
437 void TBasket::ResetEntryOffset()
438 {
439  if (fEntryOffset != reinterpret_cast<Int_t *>(-1)) {
440  delete[] fEntryOffset;
441  }
442  fEntryOffset = nullptr;
443 }
444 
445 ////////////////////////////////////////////////////////////////////////////////
446 /// Read basket buffers in memory and cleanup.
447 ///
448 /// Read a basket buffer. Check if buffers of previous ReadBasket
449 /// should not be dropped. Remember, we keep buffers in memory up to
450 /// fMaxVirtualSize.
451 /// The function returns 0 in case of success, 1 in case of error
452 /// This function was modified with the addition of the parallel
453 /// unzipping, it will try to get the unzipped file from the cache
454 /// receiving only a pointer to that buffer (so we shall not
455 /// delete that pointer), although we get a new buffer in case
456 /// it's not found in the cache.
457 /// There is a lot of code duplication but it was necesary to assure
458 /// the expected behavior when there is no cache.
459 
460 Int_t TBasket::ReadBasketBuffers(Long64_t pos, Int_t len, TFile *file)
461 {
462  if(!fBranch->GetDirectory()) {
463  return -1;
464  }
465 
466  Bool_t oldCase;
467  char *rawUncompressedBuffer, *rawCompressedBuffer;
468  Int_t uncompressedBufferLen;
469 
470  // See if the cache has already unzipped the buffer for us.
471  TFileCacheRead *pf = nullptr;
472  {
473  R__LOCKGUARD_IMT(gROOTMutex); // Lock for parallel TTree I/O
474  pf = fBranch->GetTree()->GetReadCache(file);
475  }
476  if (pf) {
477  Int_t res = -1;
478  Bool_t free = kTRUE;
479  char *buffer = nullptr;
480  res = pf->GetUnzipBuffer(&buffer, pos, len, &free);
481  if (R__unlikely(res >= 0)) {
482  len = ReadBasketBuffersUnzip(buffer, res, free, file);
483  // Note that in the kNotDecompressed case, the above function will return 0;
484  // In such a case, we should stop processing
485  if (len <= 0) return -len;
486  goto AfterBuffer;
487  }
488  }
489 
490  // Determine which buffer to use, so that we can avoid a memcpy in case of
491  // the basket was not compressed.
492  TBuffer* readBufferRef;
493  if (R__unlikely(fBranch->GetCompressionLevel()==0)) {
494  // Initialize the buffer to hold the uncompressed data.
495  fBufferRef = R__InitializeReadBasketBuffer(fBufferRef, len, file);
496  readBufferRef = fBufferRef;
497  } else {
498  // Initialize the buffer to hold the compressed data.
499  fCompressedBufferRef = R__InitializeReadBasketBuffer(fCompressedBufferRef, len, file);
500  readBufferRef = fCompressedBufferRef;
501  }
502 
503  // fBufferSize is likely to be change in the Streamer call (below)
504  // and we will re-add the new size later on.
505  fBranch->GetTree()->IncrementTotalBuffers(-fBufferSize);
506 
507  if (!readBufferRef) {
508  Error("ReadBasketBuffers", "Unable to allocate buffer.");
509  return 1;
510  }
511 
512  if (pf) {
513  TVirtualPerfStats* temp = gPerfStats;
514  if (fBranch->GetTree()->GetPerfStats() != 0) gPerfStats = fBranch->GetTree()->GetPerfStats();
515  Int_t st = 0;
516  {
517  R__LOCKGUARD_IMT(gROOTMutex); // Lock for parallel TTree I/O
518  st = pf->ReadBuffer(readBufferRef->Buffer(),pos,len);
519  }
520  if (st < 0) {
521  return 1;
522  } else if (st == 0) {
523  // Read directly from file, not from the cache
524  // If we are using a TTreeCache, disable reading from the default cache
525  // temporarily, to force reading directly from file
526  R__LOCKGUARD_IMT(gROOTMutex); // Lock for parallel TTree I/O
527  TTreeCache *fc = dynamic_cast<TTreeCache*>(file->GetCacheRead());
528  if (fc) fc->Disable();
529  Int_t ret = file->ReadBuffer(readBufferRef->Buffer(),pos,len);
530  if (fc) fc->Enable();
531  pf->AddNoCacheBytesRead(len);
532  pf->AddNoCacheReadCalls(1);
533  if (ret) {
534  return 1;
535  }
536  }
537  gPerfStats = temp;
538  } else {
539  // Read from the file and unstream the header information.
540  TVirtualPerfStats* temp = gPerfStats;
541  if (fBranch->GetTree()->GetPerfStats() != 0) gPerfStats = fBranch->GetTree()->GetPerfStats();
542  R__LOCKGUARD_IMT(gROOTMutex); // Lock for parallel TTree I/O
543  if (file->ReadBuffer(readBufferRef->Buffer(),pos,len)) {
544  gPerfStats = temp;
545  return 1;
546  }
547  else gPerfStats = temp;
548  }
549  Streamer(*readBufferRef);
550  if (IsZombie()) {
551  return 1;
552  }
553 
554  rawCompressedBuffer = readBufferRef->Buffer();
555 
556  // Are we done?
557  if (R__unlikely(readBufferRef == fBufferRef)) // We expect most basket to be compressed.
558  {
559  if (R__likely(fObjlen+fKeylen == fNbytes)) {
560  // The basket was really not compressed as expected.
561  goto AfterBuffer;
562  } else {
563  // Well, somehow the buffer was compressed anyway, we have the compressed data in the uncompressed buffer
564  // Make sure the compressed buffer is initialized, and memcpy.
565  InitializeCompressedBuffer(len, file);
566  if (!fCompressedBufferRef) {
567  Error("ReadBasketBuffers", "Unable to allocate buffer.");
568  return 1;
569  }
570  fBufferRef->Reset();
571  rawCompressedBuffer = fCompressedBufferRef->Buffer();
572  memcpy(rawCompressedBuffer, fBufferRef->Buffer(), len);
573  }
574  }
575 
576  // Initialize buffer to hold the uncompressed data
577  // Note that in previous versions we didn't allocate buffers until we verified
578  // the zip headers; this is no longer beforehand as the buffer lifetime is scoped
579  // to the TBranch.
580  uncompressedBufferLen = len > fObjlen+fKeylen ? len : fObjlen+fKeylen;
581  fBufferRef = R__InitializeReadBasketBuffer(fBufferRef, uncompressedBufferLen, file);
582  rawUncompressedBuffer = fBufferRef->Buffer();
583  fBuffer = rawUncompressedBuffer;
584 
585  oldCase = OLD_CASE_EXPRESSION;
586  // Case where ROOT thinks the buffer is compressed. Copy over the key and uncompress the object
587  if (fObjlen > fNbytes-fKeylen || oldCase) {
588  if (R__unlikely(TestBit(TBufferFile::kNotDecompressed) && (fNevBuf==1))) {
589  return ReadBasketBuffersUncompressedCase();
590  }
591 
592  // Optional monitor for zip time profiling.
593  Double_t start = 0;
594  if (R__unlikely(gPerfStats)) {
595  start = TTimeStamp();
596  }
597 
598  memcpy(rawUncompressedBuffer, rawCompressedBuffer, fKeylen);
599  char *rawUncompressedObjectBuffer = rawUncompressedBuffer+fKeylen;
600  UChar_t *rawCompressedObjectBuffer = (UChar_t*)rawCompressedBuffer+fKeylen;
601  Int_t nin, nbuf;
602  Int_t nout = 0, noutot = 0, nintot = 0;
603 
604  // Unzip all the compressed objects in the compressed object buffer.
605  while (1) {
606  // Check the header for errors.
607  if (R__unlikely(R__unzip_header(&nin, rawCompressedObjectBuffer, &nbuf) != 0)) {
608  Error("ReadBasketBuffers", "Inconsistency found in header (nin=%d, nbuf=%d)", nin, nbuf);
609  break;
610  }
611  if (R__unlikely(oldCase && (nin > fObjlen || nbuf > fObjlen))) {
612  //buffer was very likely not compressed in an old version
613  memcpy(rawUncompressedBuffer+fKeylen, rawCompressedObjectBuffer+fKeylen, fObjlen);
614  goto AfterBuffer;
615  }
616 
617  R__unzip(&nin, rawCompressedObjectBuffer, &nbuf, (unsigned char*) rawUncompressedObjectBuffer, &nout);
618  if (!nout) break;
619  noutot += nout;
620  nintot += nin;
621  if (noutot >= fObjlen) break;
622  rawCompressedObjectBuffer += nin;
623  rawUncompressedObjectBuffer += nout;
624  }
625 
626  // Make sure the uncompressed numbers are consistent with header.
627  if (R__unlikely(noutot != fObjlen)) {
628  Error("ReadBasketBuffers", "fNbytes = %d, fKeylen = %d, fObjlen = %d, noutot = %d, nout=%d, nin=%d, nbuf=%d", fNbytes,fKeylen,fObjlen, noutot,nout,nin,nbuf);
629  fBranch->GetTree()->IncrementTotalBuffers(fBufferSize);
630  return 1;
631  }
632  len = fObjlen+fKeylen;
633  TVirtualPerfStats* temp = gPerfStats;
634  if (fBranch->GetTree()->GetPerfStats() != 0) gPerfStats = fBranch->GetTree()->GetPerfStats();
635  if (R__unlikely(gPerfStats)) {
636  gPerfStats->UnzipEvent(fBranch->GetTree(),pos,start,nintot,fObjlen);
637  }
638  gPerfStats = temp;
639  } else {
640  // Nothing is compressed - copy over wholesale.
641  memcpy(rawUncompressedBuffer, rawCompressedBuffer, len);
642  }
643 
644 AfterBuffer:
645 
646  fBranch->GetTree()->IncrementTotalBuffers(fBufferSize);
647 
648  // Read offsets table if needed.
649  // If there's no EntryOffsetLen in the branch -- or the fEntryOffset is marked to be calculated-on-demand --
650  // then we skip reading out.
651  if (!fBranch->GetEntryOffsetLen() || (fEntryOffset == reinterpret_cast<Int_t *>(-1))) {
652  return 0;
653  }
654  // At this point, we're required to read out an offset array.
655  ResetEntryOffset(); // TODO: every basket, we reset the offset array. Is this necessary?
656  // Could we instead switch to std::vector?
657  fBufferRef->SetBufferOffset(fLast);
658  fBufferRef->ReadArray(fEntryOffset);
659  if (R__unlikely(!fEntryOffset)) {
660  fEntryOffset = new Int_t[fNevBuf+1];
661  fEntryOffset[0] = fKeylen;
662  Warning("ReadBasketBuffers","basket:%s has fNevBuf=%d but fEntryOffset=0, pos=%lld, len=%d, fNbytes=%d, fObjlen=%d, trying to repair",GetName(),fNevBuf,pos,len,fNbytes,fObjlen);
663  return 0;
664  }
665  if (fIOBits & static_cast<UChar_t>(TBasket::EIOBits::kGenerateOffsetMap)) {
666  // In this case, we cannot regenerate the offset array at runtime -- but we wrote out an array of
667  // sizes instead of offsets (as sizes compress much better).
668  fEntryOffset[0] = fKeylen;
669  for (Int_t idx = 1; idx < fNevBuf + 1; idx++) {
670  fEntryOffset[idx] += fEntryOffset[idx - 1];
671  }
672  }
673  fReadEntryOffset = kTRUE;
674  // Read the array of diplacement if any.
675  delete [] fDisplacement;
676  fDisplacement = 0;
677  if (fBufferRef->Length() != len) {
678  // There is more data in the buffer! It is the displacement
679  // array. If len is less than TBuffer::kMinimalSize the actual
680  // size of the buffer is too large, so we can not use the
681  // fBufferRef->BufferSize()
682  fBufferRef->ReadArray(fDisplacement);
683  }
684 
685  return 0;
686 }
687 
688 ////////////////////////////////////////////////////////////////////////////////
689 /// Read basket buffers in memory and cleanup
690 ///
691 /// Read first bytes of a logical record starting at position pos
692 /// return record length (first 4 bytes of record).
693 
694 Int_t TBasket::ReadBasketBytes(Long64_t pos, TFile *file)
695 {
696  const Int_t len = 128;
697  char buffer[len];
698  Int_t keylen;
699  file->GetRecordHeader(buffer, pos,len, fNbytes, fObjlen, keylen);
700  fKeylen = keylen;
701  return fNbytes;
702 }
703 
704 ////////////////////////////////////////////////////////////////////////////////
705 /// Disown all references to the internal buffer - some other object likely now
706 /// owns it.
707 ///
708 /// This TBasket is now useless and invalid until it is told to adopt a buffer.
709 void TBasket::DisownBuffer()
710 {
711  fBufferRef = NULL;
712 }
713 
714 
715 ////////////////////////////////////////////////////////////////////////////////
716 /// Adopt a buffer from an external entity
717 void TBasket::AdoptBuffer(TBuffer *user_buffer)
718 {
719  delete fBufferRef;
720  fBufferRef = user_buffer;
721 }
722 
723 
724 ////////////////////////////////////////////////////////////////////////////////
725 /// Reset the basket to the starting state. i.e. as it was after calling
726 /// the constructor (and potentially attaching a TBuffer.)
727 /// Reduce memory used by fEntryOffset and the TBuffer if needed ..
728 
729 void TBasket::Reset()
730 {
731  // By default, we don't reallocate.
732  fResetAllocation = false;
733 #ifdef R__TRACK_BASKET_ALLOC_TIME
734  fResetAllocationTime = 0;
735 #endif
736 
737  // Name, Title, fClassName, fBranch
738  // stay the same.
739 
740  // Downsize the buffer if needed.
741  // See if our current buffer size is significantly larger (>2x) than the historical average.
742  // If so, try decreasing it at this flush boundary to closer to the size from OptimizeBaskets
743  // (or this historical average).
744  Int_t curSize = fBufferRef->BufferSize();
745  // fBufferLen at this point is already reset, so use indirect measurements
746  Int_t curLen = (GetObjlen() + GetKeylen());
747  Long_t newSize = -1;
748  if (curSize > 2*curLen)
749  {
750  Long_t curBsize = fBranch->GetBasketSize();
751  if (curSize > 2*curBsize ) {
752  Long_t avgSize = (Long_t)(fBranch->GetTotBytes() / (1+fBranch->GetWriteBasket())); // Average number of bytes per basket so far
753  if (curSize > 2*avgSize) {
754  newSize = curBsize;
755  if (curLen > newSize) {
756  newSize = curLen;
757  }
758  if (avgSize > newSize) {
759  newSize = avgSize;
760  }
761  newSize = newSize + 512 - newSize%512; // Wiggle room and alignment (512 is same as in OptimizeBaskets)
762  }
763  }
764  }
765  // If fBufferRef grew since we last saw it, shrink it to "target memory ratio" of the occupied size
766  // This discourages us from having poorly-occupied buffers on branches with little variability.
767  //
768  // Does not help protect against a burst in event sizes, but does help in the cases where the basket
769  // size jumps from 4MB to 8MB while filling the basket, but we only end up utilizing 4.1MB.
770  //
771  // The above code block is meant to protect against extremely large events.
772 
773  Float_t target_mem_ratio = fBranch->GetTree()->GetTargetMemoryRatio();
774  Int_t max_size = TMath::Max(fLastWriteBufferSize[0], std::max(fLastWriteBufferSize[1], fLastWriteBufferSize[2]));
775  Int_t target_size = static_cast<Int_t>(target_mem_ratio * Float_t(max_size));
776  if (max_size && (curSize > target_size) && (newSize == -1)) {
777  newSize = target_size;
778  newSize = newSize + 512 - newSize % 512; // Wiggle room and alignment, as above.
779  // We only bother with a resize if it saves 8KB (two normal memory pages).
780  if ((newSize > curSize - 8 * 1024) ||
781  (static_cast<Float_t>(curSize) / static_cast<Float_t>(newSize) < target_mem_ratio)) {
782  newSize = -1;
783  } else if (gDebug > 0) {
784  Info("Reset", "Resizing to %ld bytes (was %d); last three sizes were [%d, %d, %d].", newSize, curSize,
785  fLastWriteBufferSize[0], fLastWriteBufferSize[1], fLastWriteBufferSize[2]);
786  }
787  }
788 
789  if (newSize != -1) {
790  fResetAllocation = true;
791 #ifdef R__TRACK_BASKET_ALLOC_TIME
792  std::chrono::time_point<std::chrono::system_clock> start, end;
793  start = std::chrono::high_resolution_clock::now();
794 #endif
795  fBufferRef->Expand(newSize,kFALSE); // Expand without copying the existing data.
796 #ifdef R__TRACK_BASKET_ALLOC_TIME
797  end = std::chrono::high_resolution_clock::now();
798  auto us = std::chrono::duration_cast<std::chrono::microseconds>(end - start);
799  fResetAllocationTime = us.count();
800 #endif
801  }
802 
803  // Record the actual occupied size of the buffer.
804  fLastWriteBufferSize[fNextBufferSizeRecord] = curLen;
805  fNextBufferSizeRecord = (fNextBufferSizeRecord + 1) % 3;
806 
807  TKey::Reset();
808 
809  Int_t newNevBufSize = fBranch->GetEntryOffsetLen();
810  if (newNevBufSize==0) {
811  ResetEntryOffset();
812  } else if ((newNevBufSize != fNevBufSize) || (fEntryOffset <= reinterpret_cast<Int_t *>(-1))) {
813  ResetEntryOffset();
814  fEntryOffset = new Int_t[newNevBufSize];
815  }
816  fNevBufSize = newNevBufSize;
817 
818  fNevBuf = 0;
819  Int_t *storeEntryOffset = fEntryOffset;
820  fEntryOffset = 0;
821  Int_t *storeDisplacement = fDisplacement;
822  fDisplacement= 0;
823  fBuffer = 0;
824 
825  fBufferRef->Reset();
826  fBufferRef->SetWriteMode();
827 
828  fHeaderOnly = kTRUE;
829  fLast = 0; //Must initialize before calling Streamer()
830 
831  Streamer(*fBufferRef);
832 
833  fKeylen = fBufferRef->Length();
834  fObjlen = fBufferSize - fKeylen;
835  fLast = fKeylen;
836  fBuffer = 0;
837  fHeaderOnly = kFALSE;
838  fDisplacement= storeDisplacement;
839  fEntryOffset = storeEntryOffset;
840  if (fNevBufSize) {
841  for (Int_t i=0;i<fNevBufSize;i++) fEntryOffset[i] = 0;
842  }
843 }
844 
845 ////////////////////////////////////////////////////////////////////////////////
846 /// Set read mode of basket.
847 
848 void TBasket::SetReadMode()
849 {
850  fLast = fBufferRef->Length();
851  fBufferRef->SetReadMode();
852 }
853 
854 ////////////////////////////////////////////////////////////////////////////////
855 /// Set write mode of basket.
856 
857 void TBasket::SetWriteMode()
858 {
859  fBufferRef->SetWriteMode();
860  fBufferRef->SetBufferOffset(fLast);
861 }
862 
863 ////////////////////////////////////////////////////////////////////////////////
864 /// Stream a class object.
865 
866 void TBasket::Streamer(TBuffer &b)
867 {
868  // As in TBranch::GetBasket, this is used as a half-hearted measure to suppress
869  // the error reporting when many failures occur.
870  static std::atomic<Int_t> nerrors(0);
871 
872  char flag;
873  if (b.IsReading()) {
874  TKey::Streamer(b); //this must be first
875  Version_t v = b.ReadVersion();
876  b >> fBufferSize;
877  // NOTE: we now use the upper-bit of the fNevBufSize to see if we have serialized any of the
878  // optional IOBits. If that bit is set, we immediately read out the IOBits; to replace this
879  // (minimal) safeguard against corruption, we will set aside the upper-bit of fIOBits to do
880  // the same thing (the fact this bit is reserved is tested in the unit tests). If there is
881  // someday a need for more than 7 IOBits, we'll widen the field using the same trick.
882  //
883  // We like to keep this safeguard because we immediately will allocate a buffer based on
884  // the value of fNevBufSize -- and would like to avoid wildly inappropriate allocations.
885  b >> fNevBufSize;
886  if (fNevBufSize < 0) {
887  fNevBufSize = -fNevBufSize;
888  b >> fIOBits;
889  if (!fIOBits || (fIOBits & (1 << 7))) {
890  Error("TBasket::Streamer",
891  "The value of fNevBufSize (%d) or fIOBits (%d) is incorrect ; setting the buffer to a zombie.",
892  -fNevBufSize, fIOBits);
893  MakeZombie();
894  fNevBufSize = 0;
895  } else if (fIOBits && (fIOBits & ~static_cast<Int_t>(EIOBits::kSupported))) {
896  nerrors++;
897  if (nerrors < 10) {
898  Error("Streamer", "The value of fIOBits (%s) contains unknown flags (supported flags "
899  "are %s), indicating this was written with a newer version of ROOT "
900  "utilizing critical IO features this version of ROOT does not support."
901  " Refusing to deserialize.",
902  std::bitset<32>(static_cast<Int_t>(fIOBits)).to_string().c_str(),
903  std::bitset<32>(static_cast<Int_t>(EIOBits::kSupported)).to_string().c_str());
904  } else if (nerrors == 10) {
905  Error("Streamer", "Maximum number of errors has been reported; disabling further messages"
906  "from this location until the process exits.");
907  }
908  fNevBufSize = 0;
909  MakeZombie();
910  }
911  }
912  b >> fNevBuf;
913  b >> fLast;
914  b >> flag;
915  if (fLast > fBufferSize) fBufferSize = fLast;
916  Bool_t mustGenerateOffsets = false;
917  if (flag >= 80) {
918  mustGenerateOffsets = true;
919  flag -= 80;
920  }
921  if (!mustGenerateOffsets && flag && (flag % 10 != 2)) {
922  ResetEntryOffset();
923  fEntryOffset = new Int_t[fNevBufSize];
924  if (fNevBuf) b.ReadArray(fEntryOffset);
925  if (20<flag && flag<40) {
926  for(int i=0; i<fNevBuf; i++){
927  fEntryOffset[i] &= ~kDisplacementMask;
928  }
929  }
930  if (flag>40) {
931  fDisplacement = new Int_t[fNevBufSize];
932  b.ReadArray(fDisplacement);
933  }
934  } else if (mustGenerateOffsets) {
935  // We currently believe that in all cases when offsets can be generated, then the
936  // displacement array must be zero.
937  assert(flag <= 40);
938  fEntryOffset = reinterpret_cast<Int_t *>(-1);
939  }
940  if (flag == 1 || flag > 10) {
941  fBufferRef = new TBufferFile(TBuffer::kRead,fBufferSize);
942  fBufferRef->SetParent(b.GetParent());
943  char *buf = fBufferRef->Buffer();
944  if (v > 1) b.ReadFastArray(buf,fLast);
945  else b.ReadArray(buf);
946  fBufferRef->SetBufferOffset(fLast);
947  // This is now done in the TBranch streamer since fBranch might not
948  // yet be set correctly.
949  // fBranch->GetTree()->IncrementTotalBuffers(fBufferSize);
950  }
951  } else {
952 
953  TKey::Streamer(b); //this must be first
954  b.WriteVersion(TBasket::IsA());
955  if (fBufferRef) {
956  Int_t curLast = fBufferRef->Length();
957  if (!fHeaderOnly && !fSeekKey && curLast > fLast) fLast = curLast;
958  }
959  if (fLast > fBufferSize) fBufferSize = fLast;
960 
961  b << fBufferSize;
962  if (fIOBits) {
963  b << -fNevBufSize;
964  b << fIOBits;
965  } else {
966  b << fNevBufSize;
967  }
968  b << fNevBuf;
969  b << fLast;
970  Bool_t mustGenerateOffsets = fEntryOffset && fNevBuf &&
971  (fIOBits & static_cast<UChar_t>(TBasket::EIOBits::kGenerateOffsetMap)) &&
972  CanGenerateOffsetArray();
973  // We currently believe that in all cases when offsets can be generated, then the
974  // displacement array must be zero.
975  assert(!mustGenerateOffsets || fDisplacement == nullptr);
976  if (fHeaderOnly) {
977  flag = mustGenerateOffsets ? 80 : 0;
978  b << flag;
979  } else {
980  // On return from this function, we are guaranteed that fEntryOffset
981  // is either a valid pointer or nullptr.
982  if (fNevBuf) {
983  GetEntryOffset();
984  }
985  flag = 1;
986  if (!fNevBuf || !fEntryOffset)
987  flag = 2;
988  if (fBufferRef) flag += 10;
989  if (fDisplacement) flag += 40;
990  // Test if we can skip writing out the offset map.
991  if (mustGenerateOffsets) {
992  flag += 80;
993  }
994  b << flag;
995 
996  if (!mustGenerateOffsets && fEntryOffset && fNevBuf) {
997  b.WriteArray(fEntryOffset, fNevBuf);
998  if (fDisplacement) b.WriteArray(fDisplacement, fNevBuf);
999  }
1000  if (fBufferRef) {
1001  char *buf = fBufferRef->Buffer();
1002  b.WriteFastArray(buf, fLast);
1003  }
1004  }
1005  }
1006 }
1007 
1008 ////////////////////////////////////////////////////////////////////////////////
1009 /// Update basket header and EntryOffset table.
1010 
1011 void TBasket::Update(Int_t offset, Int_t skipped)
1012 {
1013  Int_t *entryOffset = GetEntryOffset();
1014  if (entryOffset) {
1015  if (fNevBuf+1 >= fNevBufSize) {
1016  Int_t newsize = TMath::Max(10,2*fNevBufSize);
1017  Int_t *newoff = TStorage::ReAllocInt(fEntryOffset, newsize,
1018  fNevBufSize);
1019  if (fDisplacement) {
1020  Int_t *newdisp = TStorage::ReAllocInt(fDisplacement, newsize,
1021  fNevBufSize);
1022  fDisplacement = newdisp;
1023  }
1024  fEntryOffset = newoff;
1025  fNevBufSize = newsize;
1026 
1027  //Update branch only for the first 10 baskets
1028  if (fBranch->GetWriteBasket() < 10) {
1029  fBranch->SetEntryOffsetLen(newsize);
1030  }
1031  }
1032  fEntryOffset[fNevBuf] = offset;
1033 
1034  if (skipped!=offset && !fDisplacement){
1035  fDisplacement = new Int_t[fNevBufSize];
1036  for (Int_t i = 0; i<fNevBufSize; i++) fDisplacement[i] = fEntryOffset[i];
1037  }
1038  if (fDisplacement) {
1039  fDisplacement[fNevBuf] = skipped;
1040  fBufferRef->SetBufferDisplacement(skipped);
1041  }
1042  }
1043 
1044  fNevBuf++;
1045 }
1046 
1047 ////////////////////////////////////////////////////////////////////////////////
1048 /// Write buffer of this basket on the current file.
1049 ///
1050 /// The function returns the number of bytes committed to the memory.
1051 /// If a write error occurs, the number of bytes returned is -1.
1052 /// If no data are written, the number of bytes returned is 0.
1053 
1054 Int_t TBasket::WriteBuffer()
1055 {
1056  const Int_t kWrite = 1;
1057 
1058  TFile *file = fBranch->GetFile(kWrite);
1059  if (!file) return 0;
1060  if (!file->IsWritable()) {
1061  return -1;
1062  }
1063  fMotherDir = file; // fBranch->GetDirectory();
1064 
1065  // This mutex prevents multiple TBasket::WriteBuffer invocations from interacting
1066  // with the underlying TFile at once - TFile is assumed to *not* be thread-safe.
1067  //
1068  // The only parallelism we'd like to exploit (right now!) is the compression
1069  // step - everything else should be serialized at the TFile level.
1070 #ifdef R__USE_IMT
1071  std::unique_lock<std::mutex> sentry(file->fWriteMutex);
1072 #endif // R__USE_IMT
1073 
1074  if (R__unlikely(fBufferRef->TestBit(TBufferFile::kNotDecompressed))) {
1075  // Read the basket information that was saved inside the buffer.
1076  Bool_t writing = fBufferRef->IsWriting();
1077  fBufferRef->SetReadMode();
1078  fBufferRef->SetBufferOffset(0);
1079 
1080  Streamer(*fBufferRef);
1081  if (writing) fBufferRef->SetWriteMode();
1082  Int_t nout = fNbytes - fKeylen;
1083 
1084  fBuffer = fBufferRef->Buffer();
1085 
1086  Create(nout,file);
1087  fBufferRef->SetBufferOffset(0);
1088  fHeaderOnly = kTRUE;
1089 
1090  Streamer(*fBufferRef); //write key itself again
1091  int nBytes = WriteFileKeepBuffer();
1092  fHeaderOnly = kFALSE;
1093  return nBytes>0 ? fKeylen+nout : -1;
1094  }
1095 
1096  // Transfer fEntryOffset table at the end of fBuffer.
1097  fLast = fBufferRef->Length();
1098  Int_t *entryOffset = GetEntryOffset();
1099  if (entryOffset) {
1100  Bool_t hasOffsetBit = fIOBits & static_cast<UChar_t>(TBasket::EIOBits::kGenerateOffsetMap);
1101  if (!CanGenerateOffsetArray()) {
1102  // If we have set the offset map flag, but cannot dynamically generate the map, then
1103  // we should at least convert the offset array to a size array. Note that we always
1104  // write out (fNevBuf+1) entries to match the original case.
1105  if (hasOffsetBit) {
1106  for (Int_t idx = fNevBuf; idx > 0; idx--) {
1107  entryOffset[idx] -= entryOffset[idx - 1];
1108  }
1109  entryOffset[0] = 0;
1110  }
1111  fBufferRef->WriteArray(entryOffset, fNevBuf + 1);
1112  // Convert back to offset format: keeping both sizes and offsets in-memory were considered,
1113  // but it seems better to use CPU than memory.
1114  if (hasOffsetBit) {
1115  entryOffset[0] = fKeylen;
1116  for (Int_t idx = 1; idx < fNevBuf + 1; idx++) {
1117  entryOffset[idx] += entryOffset[idx - 1];
1118  }
1119  }
1120  } else if (!hasOffsetBit) { // In this case, write out as normal
1121  fBufferRef->WriteArray(entryOffset, fNevBuf + 1);
1122  }
1123  if (fDisplacement) {
1124  fBufferRef->WriteArray(fDisplacement, fNevBuf + 1);
1125  delete[] fDisplacement;
1126  fDisplacement = 0;
1127  }
1128  }
1129 
1130  Int_t lbuf, nout, noutot, bufmax, nzip;
1131  lbuf = fBufferRef->Length();
1132  fObjlen = lbuf - fKeylen;
1133 
1134  fHeaderOnly = kTRUE;
1135  fCycle = fBranch->GetWriteBasket();
1136  Int_t cxlevel = fBranch->GetCompressionLevel();
1137  ROOT::RCompressionSetting::EAlgorithm::EValues cxAlgorithm = static_cast<ROOT::RCompressionSetting::EAlgorithm::EValues>(fBranch->GetCompressionAlgorithm());
1138  if (cxlevel > 0) {
1139  Int_t nbuffers = 1 + (fObjlen - 1) / kMAXZIPBUF;
1140  Int_t buflen = fKeylen + fObjlen + 9 * nbuffers + 28; //add 28 bytes in case object is placed in a deleted gap
1141  InitializeCompressedBuffer(buflen, file);
1142  if (!fCompressedBufferRef) {
1143  Warning("WriteBuffer", "Unable to allocate the compressed buffer");
1144  return -1;
1145  }
1146  fCompressedBufferRef->SetWriteMode();
1147  fBuffer = fCompressedBufferRef->Buffer();
1148  char *objbuf = fBufferRef->Buffer() + fKeylen;
1149  char *bufcur = &fBuffer[fKeylen];
1150  noutot = 0;
1151  nzip = 0;
1152  for (Int_t i = 0; i < nbuffers; ++i) {
1153  if (i == nbuffers - 1) bufmax = fObjlen - nzip;
1154  else bufmax = kMAXZIPBUF;
1155  // Compress the buffer. Note that we allow multiple TBasket compressions to occur at once
1156  // for a given TFile: that's because the compression buffer when we use IMT is no longer
1157  // shared amongst several threads.
1158 #ifdef R__USE_IMT
1159  sentry.unlock();
1160 #endif // R__USE_IMT
1161  // NOTE this is declared with C linkage, so it shouldn't except. Also, when
1162  // USE_IMT is defined, we are guaranteed that the compression buffer is unique per-branch.
1163  // (see fCompressedBufferRef in constructor).
1164  R__zipMultipleAlgorithm(cxlevel, &bufmax, objbuf, &bufmax, bufcur, &nout, cxAlgorithm);
1165 #ifdef R__USE_IMT
1166  sentry.lock();
1167 #endif // R__USE_IMT
1168 
1169  // test if buffer has really been compressed. In case of small buffers
1170  // when the buffer contains random data, it may happen that the compressed
1171  // buffer is larger than the input. In this case, we write the original uncompressed buffer
1172  if (nout == 0 || nout >= fObjlen) {
1173  nout = fObjlen;
1174  // We used to delete fBuffer here, we no longer want to since
1175  // the buffer (held by fCompressedBufferRef) might be re-used later.
1176  fBuffer = fBufferRef->Buffer();
1177  Create(fObjlen,file);
1178  fBufferRef->SetBufferOffset(0);
1179 
1180  Streamer(*fBufferRef); //write key itself again
1181  if ((nout+fKeylen)>buflen) {
1182  Warning("WriteBuffer","Possible memory corruption due to compression algorithm, wrote %d bytes past the end of a block of %d bytes. fNbytes=%d, fObjLen=%d, fKeylen=%d",
1183  (nout+fKeylen-buflen),buflen,fNbytes,fObjlen,fKeylen);
1184  }
1185  goto WriteFile;
1186  }
1187  bufcur += nout;
1188  noutot += nout;
1189  objbuf += kMAXZIPBUF;
1190  nzip += kMAXZIPBUF;
1191  }
1192  nout = noutot;
1193  Create(noutot,file);
1194  fBufferRef->SetBufferOffset(0);
1195 
1196  Streamer(*fBufferRef); //write key itself again
1197  memcpy(fBuffer,fBufferRef->Buffer(),fKeylen);
1198  } else {
1199  fBuffer = fBufferRef->Buffer();
1200  Create(fObjlen,file);
1201  fBufferRef->SetBufferOffset(0);
1202 
1203  Streamer(*fBufferRef); //write key itself again
1204  nout = fObjlen;
1205  }
1206 
1207 WriteFile:
1208  Int_t nBytes = WriteFileKeepBuffer();
1209  fHeaderOnly = kFALSE;
1210  return nBytes>0 ? fKeylen+nout : -1;
1211 }