Start line:  
End line:  

Snippet Preview

Snippet HTML Code

Stack Overflow Questions
   package com.fasterxml.storemate.store.impl;
   
   import java.io.*;
   import java.util.ArrayList;
   import java.util.List;
   
   import org.slf4j.Logger;
  
Full store front-end implementation.
  
  public class StorableStoreImpl extends AdminStorableStore
  {
    
No real seed used for Murmur3/32.
  
      private final static int HASH_SEED = .;

    
We will partition key space in 64 slices for locking purposes; needs to be high enough to make lock contention unlikely, but shouldn't be too high to waste resources on locks themselves.
  
      private final static int LOCK_PARTITIONS = 64;
  
  
      
      private final Logger LOG = LoggerFactory.getLogger(getClass());
  
      /*
      /**********************************************************************
      /* Simple config, compression/inline settings
      /**********************************************************************
       */
  
      protected final boolean _compressionEnabled;
      protected final int _maxInlinedStorageSize;
  
      protected final int _minCompressibleSize;
      protected final int _maxGZIPCompressibleSize;
  
      protected final int _minBytesToStream;
      
      protected final boolean _requireChecksumForPreCompressed;
      
      /*
      /**********************************************************************
      /* External helper objects
      /**********************************************************************
       */
  
      protected final TimeMaster _timeMaster;
  
      protected final FileManager _fileManager;
    
    
Backend store implementation that abstracts out differences between underlying physical storage libraries.
  
      protected final StoreBackend _backend;
  
      /*
      /**********************************************************************
      /* Internal helper objects
      /**********************************************************************
       */
    
    
Helper object that knows how to encode and decode little bit of metadata that we use.
  
      protected final StorableConverter _storableConverter;

    
We will also need a simple form of locking to make 'read+write' combinations atomic without requiring backend store to have real transactions. This is sufficient only because we know the specific usage pattern, and the problem to resolve: it is not a general replacement for real transactions.
 
     protected final StorePartitions _partitions;
    
    
We can reuse read buffers as they are somewhat costly to allocate, reallocate all the time. Buffer used needs to be big enough to contain all conceivably inlineable cases (considering possible compression). Currently we'll use 64k as the cut-off point.
 
 
     /*
     /**********************************************************************
     /* Store status
     /**********************************************************************
      */
 
     protected final AtomicBoolean _closed = new AtomicBoolean(false);
 
     /*
     /**********************************************************************
     /* Life-cycle
     /**********************************************************************
      */
 
     public StorableStoreImpl(StoreConfig configStoreBackend physicalStore,
             TimeMaster timeMasterFileManager fileManager)
     {
          = config.compressionEnabled;
          = config.minUncompressedSizeForCompression;
          = config.maxUncompressedSizeForGZIP;
          = config.maxInlinedStorageSize;
          = config.minPayloadForStreaming;
         
          = config.requireChecksumForPreCompressed;
 
          = physicalStore;
          = fileManager;
          = timeMaster;
          = physicalStore.getStorableConverter();
 
         // May want to make this configurable in future...
         // 'true' means "fair", minor overhead, prevents potential starvation
          = new StorePartitions(true);
     }
 
     @Override
     public void start()
     {
         .start();
     }
     
     @Override
     public void stop()
     {
         if (!.getAndSet(true)) {
             .stop();
         }
     }
     
     /*
     /**********************************************************************
     /* API, simple accessors for state, helper objects
     /**********************************************************************
      */
 
     @Override
     public boolean isClosed() {
         return .get();
     }
 
     @Override
     public FileManager getFileManager() {
         return ;
     }
 
     @Override
     public TimeMaster getTimeMaster() {
         return ;
     }
     
     /*
     /**********************************************************************
     /* API, metadata access
     /**********************************************************************
      */
 
     @Override
     public long getEntryCount()
     {
         _checkClosed();
         return .getEntryCount();
     }
 
     @Override
     public long getIndexedCount()
     {
         _checkClosed();
         return .getIndexedCount();
     }
 
     @Override
     public long getOldestInFlightTimestamp() {
         return .getOldestInFlightTimestamp();
     }
     
     /*
     /**********************************************************************
     /* API, data reads
     /**********************************************************************
      */
 
     @Override
     public boolean hasEntry(StorableKey key)
     {
         _checkClosed();
         return .hasEntry(key);
     }
 
     @Override
     public Storable findEntry(StorableKey keythrows StoreException
     {
         _checkClosed();
         return .findEntry(key);
     }
 
     /*
     /**********************************************************************
     /* API, entry creation
     /**********************************************************************
      */
     
     @Override
     public StorableCreationResult insert(StorableKey keyInputStream input,
             StorableCreationMetadata stdMetadataByteContainer customMetadata)
         throws IOExceptionStoreException
     {
         _checkClosed();
         return _putEntry(keyinputstdMetadatacustomMetadata);
     }
 
     @Override
     public StorableCreationResult insert(StorableKey keyByteContainer input,
             StorableCreationMetadata stdMetadataByteContainer customMetadata)
         throws IOExceptionStoreException
     {
         _checkClosed();
         return _putEntry(keyinputstdMetadatacustomMetadata);
     }
     
     @Override
     public StorableCreationResult upsert(StorableKey keyInputStream input,
             StorableCreationMetadata stdMetadataByteContainer customMetadata,
             boolean removeOldDataFile)
         throws IOExceptionStoreException
     {
         _checkClosed();
         StorableCreationResult result = _putEntry(keyinputstdMetadatacustomMetadata);
         if (removeOldDataFile) {
             Storable old = result.getPreviousEntry();
             if (old != null) {
                 _deleteBackingFile(keyold.getExternalFile());
             }
         }
         return result;
     }
 
     @Override
     public StorableCreationResult upsert(StorableKey keyByteContainer input,
             StorableCreationMetadata stdMetadataByteContainer customMetadata,
             boolean removeOldDataFile)
         throws IOExceptionStoreException
     {
         _checkClosed();
         StorableCreationResult result = _putEntry(keyinputstdMetadatacustomMetadata);
         if (removeOldDataFile) {
             Storable old = result.getPreviousEntry();
             if (old != null) {
                 _deleteBackingFile(keyold.getExternalFile());
             }
         }
         return result;
     }
 
     @Override
             StorableCreationMetadata stdMetadataByteContainer customMetadata,
             boolean removeOldDataFileOverwriteChecker checker)
         throws IOExceptionStoreException
     {
         _checkClosed();
         StorableCreationResult result = _putEntry(keyinputstdMetadatacustomMetadatachecker);
         if (removeOldDataFile) {
             Storable old = result.getPreviousEntry();
             if (old != null) {
                 _deleteBackingFile(keyold.getExternalFile());
             }
         }
         return result;
     }
 
     @Override
             StorableCreationMetadata stdMetadataByteContainer customMetadata,
             boolean removeOldDataFileOverwriteChecker checker)
         throws IOExceptionStoreException
     {
         _checkClosed();
         StorableCreationResult result = _putEntry(keyinputstdMetadatacustomMetadatachecker);
         if (removeOldDataFile) {
             Storable old = result.getPreviousEntry();
             if (old != null) {
                 _deleteBackingFile(keyold.getExternalFile());
             }
         }
         return result;
     }
     
     /*
     /**********************************************************************
     /* Internal methods for entry creation, first level
     /**********************************************************************
      */
    
    
Method for putting an entry in the database; depending on arguments, either overwriting existing entry (if overwrites allowed), or failing insertion.

Parameters:
stdMetadata Standard metadata, which may be modified by this method, to "fill in" optional or missing data.
input Input stream used for reading the content. NOTE: method never closes this stream
 
     protected StorableCreationResult _putEntry(StorableKey keyInputStream input,
             StorableCreationMetadata stdMetadataByteContainer customMetadata,
             OverwriteChecker allowOverwrites)
         throws IOExceptionStoreException
     {
         /* NOTE: we do NOT want to clone passed-in metadata, because we want
          * to fill in some of optional values, and override others (compression)
          */
         BufferRecycler.Holder bufferHolder = .getHolder();        
         final byte[] readBuffer = bufferHolder.borrowBuffer();
         int len = 0;
 
         try {
             try {
                 len = IOUtil.readFully(inputreadBuffer);
             } catch (IOException e) {
                 throw new StoreException.IO(key"Failed to read payload for key "+key+": "+e.getMessage(), e);
             }
     
             // First things first: verify that compression is what it claims to be:
             final Compression originalCompression = stdMetadata.compression;
             String error = IOUtil.verifyCompression(originalCompressionreadBufferlen);
             if (error != null) {
                 throw new StoreException.Input(key..error);
             }
             if (len < readBuffer.length) { // read it all: we are done with input stream
                 if (originalCompression == null) { // client did not compress, we may try to
                     return _compressAndPutSmallEntry(keystdMetadatacustomMetadata,
                             allowOverwrites, ByteContainer.simple(readBuffer, 0, len));
                 }
                 return _putSmallPreCompressedEntry(keystdMetadatacustomMetadata,
                         allowOverwrites, ByteContainer.simple(readBuffer, 0, len));
             }
             // partial read in buffer, rest from input stream:
             return _putLargeEntry(keystdMetadatacustomMetadata,
                     allowOverwritesreadBufferleninput);
         } finally {
             bufferHolder.returnBuffer(readBuffer);
         }
     }
 
     protected StorableCreationResult _putEntry(StorableKey keyByteContainer input,
             StorableCreationMetadata stdMetadataByteContainer customMetadata,
             OverwriteChecker allowOverwrites)
         throws IOExceptionStoreException
     {
         // First things first: verify that compression is what it claims to be:
         final Compression originalCompression = stdMetadata.compression;
         String error = IOUtil.verifyCompression(originalCompressioninput);
         if (error != null) {
             throw new StoreException.Input(key..error);
         }
         if (originalCompression == null) { // client did not compress, we may try to
             return _compressAndPutSmallEntry(keystdMetadatacustomMetadata,
                     allowOverwritesinput);
         }
         return _putSmallPreCompressedEntry(keystdMetadatacustomMetadata,
                 allowOverwritesinput);
     }
     
     /*
     /**********************************************************************
     /* Internal methods for entry creation, second level
     /**********************************************************************
      */
     
             StorableCreationMetadata metadataByteContainer customMetadata,
             OverwriteChecker allowOverwritesByteContainer data)
         throws IOExceptionStoreException
     {
         final int origLength = data.byteLength();
         // must verify checksum unless we got compressed payload
         // do we insist on checksum? Not if client has not yet compressed it:
         int actualChecksum = _calcChecksum(data);
         final int origChecksum = metadata.contentHash;
         if (origChecksum == .) {
             metadata.contentHash = actualChecksum;
         } else {
             if (origChecksum != actualChecksum) {
                 throw new StoreException.Input(key..,
                         "Incorrect checksum (0x"+Integer.toHexString(origChecksum)
                         +"), calculated to be 0x"+Integer.toHexString(actualChecksum));
             }
         }
         if (_shouldTryToCompress(metadatadata)) {
             byte[] compBytes;
             Compression compression = null;
             try {
                 if (origLength <= ) {
                     compression = .;
                     compBytes = Compressors.gzipCompress(data);
                 } else {
                     compression = .;
                     compBytes = Compressors.lzfCompress(data);
                 }
             } catch (IOException e) {
                 throw new StoreException.IO(key,
                         "Problem when compressing content as "+compression+": "+e.getMessage(), e);
             }
             // if compression would not, like, compress, don't bother:
             if (compBytes.length >= origLength) {
                 compression = null;
             } else {
                 data = ByteContainer.simple(compBytes);
                 metadata.compression = compression;
                 metadata.uncompressedSize = origLength;
                 metadata.storageSize = compBytes.length;
                 metadata.compressedContentHash = _calcChecksum(data);
             }
         }
         metadata.storageSize = data.byteLength();
         return _putSmallEntry(keymetadatacustomMetadataallowOverwritesdata);
     }
 
             StorableCreationMetadata metadataByteContainer customMetadata,
             OverwriteChecker allowOverwritesByteContainer data)
         throws IOExceptionStoreException
     {
         /* !!! TODO: what to do with checksum? Should we require checksum
          *   of raw or compressed entity? (or both); whether to store both;
          *   verify etc...
          */
         final int origChecksum = metadata.contentHash;
         if (origChecksum == .) {
             if () {
                 throw new StoreException.Input(key..,
                         "No checksum for non-compressed data provided for pre-compressed entry");
             }
         }
 
         // 30-Mar-2012, tsaloranta: Alas, we don't really know the length from gzip (and even
         //   from lzf would need to decode to some degree); not worth doing it
 //        metadata.size = -1;
 //        metadata.storageSize = dataLength;
 
         // may get checksum for compressed data, or might not; if not, calculate:
         if (metadata.compression != .) {
             if (metadata.compressedContentHash == .) {
                 metadata.compressedContentHash = _calcChecksum(data);
             }
         }
         return _putSmallEntry(keymetadatacustomMetadataallowOverwritesdata);
     }
 
             StorableCreationMetadata stdMetadataByteContainer customMetadata,
             OverwriteChecker allowOverwritesByteContainer data)
         throws IOExceptionStoreException
     {
         Storable storable;
         final long creationTime;
         
         // inline? Yes if small enough
         if (data.byteLength() <= ) {
             creationTime = .currentTimeMillis();
             storable = .encodeInlined(keycreationTime,
                     stdMetadatacustomMetadatadata);
         } else {
             // otherwise, need to create file and all that fun...
             long fileCreationTime = .currentTimeMillis();
             FileReference fileRef = .createStorageFile(key,
                     stdMetadata.compressionfileCreationTime);
             try {
                 IOUtil.writeFile(fileRef.getFile(), data);
             } catch (IOException e) {
                 // better remove the file, if one exists...
                 fileRef.getFile().delete();
                 throw new StoreException.IO(key,
                         "Failed to write storage file of "+data.byteLength()+" bytes: "+e.getMessage(), e);
             }
             // but modtime better be taken only now, as above may have taken some time (I/O bound)
             creationTime = .currentTimeMillis();
             storable = .encodeOfflined(keycreationTime,
                     stdMetadatacustomMetadatafileRef);
         }
         return _putPartitionedEntry(keycreationTimestdMetadatastorableallowOverwrites);
     }
 
     @SuppressWarnings("resource")
             StorableCreationMetadata stdMetadataByteContainer customMetadata,
             OverwriteChecker allowOverwrites,
             byte[] readBufferint readByteCount,
             InputStream input)
         throws IOExceptionStoreException
     {
         boolean skipCompression;
         Compression comp = stdMetadata.compression;
         
         if (comp != null) { // pre-compressed, or blocked
             skipCompression = true;
             comp = stdMetadata.compression;
         } else {
             if (! || Compressors.isCompressed(readBuffer, 0, readByteCount)) {
                 skipCompression = true;
                 comp = .;
             } else {
                 skipCompression = false;
                 comp = .;
             }
             stdMetadata.compression = comp;
         }
         
         // So: start by creating the result file
         long fileCreationTime = .currentTimeMillis();
         final FileReference fileRef = .createStorageFile(keycompfileCreationTime);
         File storedFile = fileRef.getFile();
         
         OutputStream out = null;
         CountingOutputStream compressedOut;
 
         try {
             if (skipCompression) {
                 compressedOut = null;
                 out = new FileOutputStream(storedFile);
             } else {
                 compressedOut = new CountingOutputStream(new FileOutputStream(storedFile),
                         new IncrementalMurmur3Hasher());
                 out = Compressors.compressingStream(compressedOutcomp);
             }
             out.write(readBuffer, 0, readByteCount);
         } catch (IOException e) {
             try {
                 if (out != null) {
                     out.close();
                 }
             } catch (IOException e2) { }
             throw new StoreException.IO(key"Failed to write initial "+readByteCount+" bytes of file '"+storedFile.getAbsolutePath()+"'"e);
         }
         IncrementalMurmur3Hasher hasher = new IncrementalMurmur3Hasher();        
         hasher.update(readBuffer, 0, readByteCount);
         long copiedBytes = readByteCount;
         
         // and then need to proceed with copying the rest, compressing along the way
         try {
             while (true) {
                 int count;
                 try {
                     count = input.read(readBuffer);
                 } catch (IOException e) { // probably will fail to write response too but...
                     throw new StoreException.IO(key"Failed to read content to store (after "+copiedBytes+" bytes)"e);
                 }
                 if (count < 0) {
                     break;
                 }
                 copiedBytes += count;
                 try {
                     out.write(readBuffer, 0, count);
                 } catch (IOException e) {
                     throw new StoreException.IO(key"Failed to write "+count+" bytes (after "+copiedBytes
                             +") to file '"+storedFile.getAbsolutePath()+"'"e);
                 }
                 hasher.update(readBuffer, 0, count);
             }
         } finally {
             try {
                 out.close();
             } catch (IOException e) { }
         }
         
         // Checksum calculation and storage details differ depending on whether compression is used
         if (skipCompression) {
             // Storage sizes must match, first of all, if provided
             if (stdMetadata.storageSize != copiedBytes && stdMetadata.storageSize >= 0) {
                 throw new StoreException.Input(key..,
                         "Incorrect length for entry; storageSize="+stdMetadata.storageSize
                         +", bytes read: "+copiedBytes);
             }
 
             final int actualHash = _cleanChecksum(hasher.calculateHash());
             stdMetadata.storageSize = copiedBytes;
             if (stdMetadata.compression == .) {
                 if (stdMetadata.contentHash == .) {
                     stdMetadata.contentHash = actualHash;
                 } else if (stdMetadata.contentHash != actualHash) {
                     throw new StoreException.Input(key..,
                             "Incorrect checksum for not-compressed entry ("+copiedBytes+" bytes): got 0x"
                                     +Integer.toHexString(stdMetadata.contentHash)+", calculated to be 0x"
                                     +Integer.toHexString(actualHash));
                 }
             } else { // already compressed
 //                stdMetadata.compressedContentHash = _cleanChecksum(hasher.calculateHash());
                 if (stdMetadata.compressedContentHash == .) {
                     stdMetadata.compressedContentHash = actualHash;
                 } else {
                     if (stdMetadata.compressedContentHash != actualHash) {
                         throw new StoreException.Input(key..,
                                 "Incorrect checksum for "+stdMetadata.compression+" pre-compressed entry ("+copiedBytes
                                 +" bytes): got 0x"
                                 +Integer.toHexString(stdMetadata.compressedContentHash)+", calculated to be 0x"
                                 +Integer.toHexString(actualHash));
                     }
                 }
             }
             // we don't really know the original size, either way:
             stdMetadata.uncompressedSize = 0L;
         } else {
             final int contentHash = _cleanChecksum(hasher.calculateHash());
             final int compressedHash = _cleanChecksum(compressedOut.calculateHash());
             
             stdMetadata.uncompressedSize = copiedBytes;
             stdMetadata.storageSize = compressedOut.count();
             // must verify checksum, if one was offered...
             if (stdMetadata.contentHash == .) {
                 stdMetadata.contentHash = contentHash;
             } else {
                 if (stdMetadata.contentHash != contentHash) {
                     throw new StoreException.Input(key..,
                             "Incorrect checksum for entry ("+copiedBytes+" bytes, compression: "
                             		+stdMetadata.compression+"; comp checksum 0x"+stdMetadata.compressedContentHash
                             		+"): got 0x"+Integer.toHexString(stdMetadata.contentHash)
                             		+", calculated to be 0x"+Integer.toHexString(contentHash));
                 }
             }
             if (stdMetadata.compressedContentHash == .) {
                 stdMetadata.compressedContentHash = compressedHash;
             } else {
                 if (stdMetadata.compressedContentHash != compressedHash) {
                     throw new StoreException.Input(key..,
                             "Incorrect checksum for "+stdMetadata.compression+" compressed entry ("
                             		+stdMetadata.storageSize+"/"+copiedBytes+" bytes): got 0x"
                                 +Integer.toHexString(stdMetadata.compressedContentHash)+", calculated to be 0x"
                                 +Integer.toHexString(compressedHash));
                 }
             }
         }
         long creationTime = .currentTimeMillis();
         Storable storable = .encodeOfflined(keycreationTime,
                 stdMetadatacustomMetadatafileRef);
 
         return _putPartitionedEntry(keycreationTimestdMetadatastorableallowOverwrites);
     }

    

Parameters:
operationTime Timestamp used as the "last-modified" timestamp in metadata; important as it determines last-modified traversal order for synchronization
 
             final long operationTime,
             final StorableCreationMetadata stdMetadataStorable storable,
             final OverwriteChecker allowOverwrites)
         throws IOExceptionStoreException
     {
         StorableCreationResult result = .withLockedPartition(keyoperationTime,
                 new StoreOperationCallback<Storable,StorableCreationResult>() {
                     @Override
                     public StorableCreationResult perform(StorableKey k0,
                             StoreBackend backendStorable s0)
                         throws IOExceptionStoreException
                     {
                         // blind update, insert-only are easy
                         Boolean defaultOk = allowOverwrites.mayOverwrite(k0);
                         if (defaultOk != null) { // depends on entry in question...
                             if (defaultOk.booleanValue()) { // always ok, fine ("upsert")
                                 Storable old = backend.putEntry(k0s0);
                                 return new StorableCreationResult(k0trues0old);
                             }
                             // strict "insert"
                             Storable old = backend.createEntry(k0s0);
                             if (old == null) { // ok, succeeded
                                 return new StorableCreationResult(k0trues0null);
                             }
                             // fail: caller may need to clean up the underlying file
                             return new StorableCreationResult(k0falses0old);
                         }
                         // But if things depend on existence of old entry, or entries, trickier:
                         AtomicReference<StorableoldEntryRef = new AtomicReference<Storable>();                       
                         if (!backend.upsertEntry(k0s0allowOverwritesoldEntryRef)) {
                             // fail due to existing entry
                             return new StorableCreationResult(k0falses0oldEntryRef.get());
                         }
                         return new StorableCreationResult(k0trues0oldEntryRef.get());
                     }
                 },
                 storable);
 
         //_partitions.put(key, stdMetadata, storable, allowOverwrite);
         if (!result.succeeded()) {
             // One piece of clean up: for failed insert, delete backing file, if any
 //            if (!allowOverwrite) {
             // otherwise, may need to delete file that was created
             FileReference ref = stdMetadata.dataFile;
             if (ref != null) {
                 _deleteBackingFile(keyref.getFile());
             }
         }
         return result;
     }
 
     /*
     /**********************************************************************
     /* API, entry deletion
     /**********************************************************************
      */
 
     @Override
             final boolean removeInlinedDatafinal boolean removeExternalData)
         throws IOExceptionStoreException
     {
         _checkClosed();
         final long currentTime = .currentTimeMillis();
         Storable entry = .withLockedPartition(keycurrentTime,
             new ReadModifyOperationCallback<Object,Storable>() {
                 @Override
                 protected Storable perform(StorableKey k0,
                         StoreBackend backendObject argStorable e0)
                     throws IOExceptionStoreException
                 {
                     // First things first: if no entry, nothing to do
                     if (e0 == null) {
                         return null;
                     }
                     return _softDelete(k0e0currentTimeremoveInlinedDataremoveExternalData);
                 }
         }, null);
         return new StorableDeletionResult(keyentry);
     }
     
     @Override
             final boolean removeExternalData)
         throws IOExceptionStoreException
     {
         _checkClosed();
         final long currentTime = .currentTimeMillis();
         Storable entry = .withLockedPartition(keycurrentTime,
             new ReadModifyOperationCallback<Object,Storable>() {
 
                 @Override
                 protected Storable perform(StorableKey k0,
                         StoreBackend backendObject argStorable e0)
                     throws IOExceptionStoreException
                 {                
                     // First things first: if no entry, nothing to do
                     if (e0 == null) {
                         return null;
                     }
                     return _hardDelete(k0e0removeExternalData);
                 }
         }, null);
         return new StorableDeletionResult(keyentry);
     }
 
     /*
     /**********************************************************************
     /* API, public entry iteration methods
     /**********************************************************************
      */
     
     @Override
             StorableKey firstKey)
         throws StoreException {
         return .iterateEntriesByKey(cbfirstKey);
     }
 
     @Override
             StorableKey lastSeen)
         throws StoreException {
         // if we didn't get "lastSeen", same as regular method
         if (lastSeen == null) {
             return .iterateEntriesByKey(cbnull);
         }
         return .iterateEntriesAfterKey(cblastSeen);
     }
     
     @Override
             long firstTimestamp)
         throws StoreException
     {
         return .iterateEntriesByModifiedTime(cbfirstTimestamp);
     }
     
     /*
     /**********************************************************************
     /* API, admin methods (from AdminStorableStore) 
     /**********************************************************************
      */
 
     @Override
     public int getInFlightWritesCount()
     {
         return .getInFlightCount();
     }
     
     @Override
     public long getTombstoneCount(long maxRuntimeMsecs)
         throws StoreException
     {
         final long startTime = .currentTimeMillis();
         final long maxMax = . - startTime;
         final long maxEndTime = startTime + Math.min(maxMaxmaxRuntimeMsecs);
 
         TombstoneCounter counter = new TombstoneCounter(maxEndTime);
         if (.scanEntries(counter) != .) {
             throw new IllegalStateException("getTombstoneCount() run too long (max "+maxRuntimeMsecs
                     +"); failed after "+counter.tombstones+"/"+counter.total+" records");
         }
         return counter.tombstones;
     }
 
     @Override
     public List<StorabledumpEntries(final int maxCountfinal boolean includeDeleted)
         throws StoreException
     {
         final ArrayList<Storableresult = new ArrayList<Storable>();
         if (maxCount > 0) {
             .iterateEntriesByKey(new StorableIterationCallback() {
                 // all keys are fine
                 @Override public IterationAction verifyKey(StorableKey key) { return .; }
                 @Override
                 public IterationAction processEntry(Storable entry) {
                     if (includeDeleted || !entry.isDeleted()) {
                         result.add(entry);
                         if (result.size() >= maxCount) {
                             return .;
                         }
                     }
                     return .;
                 }
             });
         }
         return result;
     }

    
Method for iterating over entries in creation-time order, from the oldest to newest entries.
 
     @Override
     public List<StorabledumpOldestEntries(final int maxCount,
             final long fromTimefinal boolean includeDeleted)
         throws StoreException
     {
         final ArrayList<Storableresult = new ArrayList<Storable>();
         if (maxCount > 0) {
                 // we are fine with all timestamps
                 @Override
                 public IterationAction verifyTimestamp(long timestamp) {
                     return .;
                 }
                 // all keys are fine
                 @Override public IterationAction verifyKey(StorableKey key) {
                     return .;
                 }
                 @Override
                 public IterationAction processEntry(Storable entry) {
                     if (includeDeleted || !entry.isDeleted()) {
                         result.add(entry);
                         if (result.size() >= maxCount) {
                             return .;
                         }
                     }
                     return .;
                 }
             }, fromTime);
         }
         return result;
     }

    
Method for physically deleting specified number of entries, in whatever order entries are stored in the database (not necessarily insertion order)

Returns:
Number of entries deleted
 
     @Override
     public int removeEntries(final int maxToRemove)
         throws IOExceptionStoreException
     {
         int removed = 0;
         if (maxToRemove > 0) {
             StorableCollector collector = new StorableCollector(maxToRemove) {
                 @Override
                 public boolean includeEntry(Storable entry) { // any and all entries
                     return true;
                 }
             };
             for (StorableKey key : collector.getCollected()) {
                 hardDelete(keytrue);
                 ++removed;
             }
         }
         return removed;
     }

    
Helper method only to be called by tests; normal operation should rely on background tombstone cleaning process.

Parameters:
maxToRemove Max number of tombstones to delete
Returns:
Number of tombstones actually deleted
 
     @Override
     public int removeTombstones(final int maxToRemove)
         throws IOExceptionStoreException
     {
         int removed = 0;
         if (maxToRemove > 0) {
             StorableCollector collector = new StorableCollector(maxToRemove) {
                 @Override
                 public boolean includeEntry(Storable entry) {
                     return entry.isDeleted();
                 }
             };
             /* no time limit on tombstone removal. But should we scan (unordered)
              * or iterate?
              */
             .iterateEntriesByKey(collector);
             for (StorableKey key : collector.getCollected()) {
                 hardDelete(keytrue);
                 ++removed;
             }
         }
         return removed;
     }
     
     /*
     /**********************************************************************
     /* Internal methods for entry deletion
     /**********************************************************************
      */
     
     protected Storable _softDelete(StorableKey keyStorable entryfinal long currentTime,
             final boolean removeInlinedDatafinal boolean removeExternalData)
         throws IOExceptionStoreException
     {
         // Ok now... need to delete some data?
         boolean hasExternalToDelete = removeExternalData && entry.hasExternalData();
         if (!entry.isDeleted() || hasExternalToDelete
                 || (removeInlinedData && entry.hasInlineData())) {
             File extFile = hasExternalToDelete ? entry.getExternalFile() : null;
             Storable modEntry = .softDeletedCopy(keyentrycurrentTime,
                     removeInlinedDataremoveExternalData);
             .ovewriteEntry(keymodEntry);
             if (extFile != null) {
                 _deleteBackingFile(keyextFile);
             }
             return modEntry;
         }
         return entry;
     }
 
     protected Storable _hardDelete(StorableKey keyStorable entry,
             final boolean removeExternalData)
         throws IOExceptionStoreException
     {
         // Hard deletion is not hard at all (pun attack!)...
         if (removeExternalData && entry.hasExternalData()) {
             _deleteBackingFile(keyentry.getExternalFile());
         }
         .deleteEntry(key);
        return entry;
    }
    
    /*
    /**********************************************************************
    /* Internal methods, other
    /**********************************************************************
     */
    protected void _deleteBackingFile(StorableKey keyFile extFile)
    {
        if (extFile != null) {
            try {
                boolean ok = extFile.delete();
                if (!ok) {
                    .warn("Failed to delete backing data file of key {}, path: {}",
                            keyextFile.getAbsolutePath());
                }
            } catch (Exception e) {
                .warn("Failed to delete backing data file of key "+key+", path: "+extFile.getAbsolutePath(), e);
            }
        }
    }    
            
    protected static int _calcChecksum(ByteContainer data) {
        // important: mask zero value, which occurs with empty content
        return _cleanChecksum(data.hash(.));
    }
    protected static int _cleanChecksum(int checksum) {
        return (checksum == .) ? . : checksum;
    }
    
    
Helper method called to check whether given (partial) piece of content might benefit from compression, as per currently defined rules. To be eligible, all of below needs to be true:
  • compression is enabled for store
  • caller indicated data isn't pre-compressed it (or indicate it does not want compression)
  • data is big enough that it might help (i.e. it's not "too small to compress")
  • data does not look like it has been compressed (regardless of what caller said) using one of algorithms we know of
    protected boolean _shouldTryToCompress(StorableCreationMetadata metadata,
            ByteContainer data)
    {
        return 
            && (metadata.compression == null)
            && (data.byteLength() >= )
            && !Compressors.isCompressed(data);
    }
    
    protected void _checkClosed()
    {
        if (<