Start line:  
End line:  

Snippet Preview

Snippet HTML Code

Stack Overflow Questions
   /*
    * Licensed to the Apache Software Foundation (ASF) under one
    * or more contributor license agreements.  See the NOTICE file
    * distributed with this work for additional information
    * regarding copyright ownership.  The ASF licenses this file
    * to you under the Apache License, Version 2.0 (the
    * "License"); you may not use this file except in compliance
    * with the License.  You may obtain a copy of the License at
    *
   *     http://www.apache.org/licenses/LICENSE-2.0
   *
   * Unless required by applicable law or agreed to in writing, software
   * distributed under the License is distributed on an "AS IS" BASIS,
   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
   * See the License for the specific language governing permissions and
   * limitations under the License.
   */
  package org.apache.hadoop.hbase.io.hfile;
  
  import java.io.DataInput;
  import java.util.List;
  
  
HFile reader for version 2.
  
  public class HFileReaderV2 extends AbstractHFileReader {
  
    private static final Log LOG = LogFactory.getLog(HFileReaderV2.class);

  
Minor versions in HFile V2 starting with this number have hbase checksums
  
    public static final int MINOR_VERSION_WITH_CHECKSUM = 1;
  
In HFile V2 minor version that does not support checksums
  
    public static final int MINOR_VERSION_NO_CHECKSUM = 0;

  
HFile minor version that introduced pbuf filetrailer
  
    public static final int PBUF_TRAILER_MINOR_VERSION = 2;

  
The size of a (key length, value length) tuple that prefixes each entry in a data block.
  
    public final static int KEY_VALUE_LEN_SIZE = 2 * .;
  
    protected boolean includesMemstoreTS = false;
    protected boolean decodeMemstoreTS = false;
    protected boolean shouldIncludeMemstoreTS() {
      return ;
    }

  
Filesystem-level block reader.
  
    protected HFileBlock.FSReader fsBlockReader;

  
A "sparse lock" implementation allowing to lock on a particular block identified by offset. The purpose of this is to avoid two clients loading the same block, and have all but one client wait to get the block from the cache.
  
    private IdLock offsetLock = new IdLock();

  
Blocks read from the load-on-open section, excluding data root index, meta index, and file info.
  
    private List<HFileBlockloadOnOpenBlocks = new ArrayList<HFileBlock>();

  
Minimum minor version supported by this HFile format
  
    static final int MIN_MINOR_VERSION = 0;

  
Maximum minor version supported by this HFile format
  
    // We went to version 2 when we moved to pb'ing fileinfo and the trailer on
    // the file. This version can read Writables version 1.
   static final int MAX_MINOR_VERSION = 3;

  
Minor versions starting with this number have faked index key
 
   static final int MINOR_VERSION_WITH_FAKED_KEY = 3;
 
   protected HFileContext hfileContext;

  
Opens a HFile. You must load the index before you can use it by calling AbstractHFileReader.loadFileInfo().

Parameters:
path Path to HFile.
trailer File trailer.
fsdis input stream.
size Length of the stream.
cacheConf Cache configuration.
hfs
conf
 
   public HFileReaderV2(final Path pathfinal FixedFileTrailer trailer,
       final FSDataInputStreamWrapper fsdisfinal long sizefinal CacheConfig cacheConf,
       final HFileSystem hfsfinal Configuration confthrows IOException {
     super(pathtrailersizecacheConfhfsconf);
     this. = conf;
     validateMinorVersion(pathtrailer.getMinorVersion());
     this. = createHFileContext(fsdishfspathtrailer);
     HFileBlock.FSReaderV2 fsBlockReaderV2 = new HFileBlock.FSReaderV2(fsdishfspath,
         );
     this. = fsBlockReaderV2// upcast
 
     // Comparator class name is stored in the trailer in version 2.
      = trailer.createComparator();
         trailer.getNumDataIndexLevels(), this);
         ., 1);
 
     // Parse load-on-open data.
 
     HFileBlock.BlockIterator blockIter = fsBlockReaderV2.blockRange(
         trailer.getLoadOnOpenDataOffset(),
          - trailer.getTrailerSize());
 
     // Data index. We also read statistics about the block index written after
     // the root level.
         blockIter.nextBlockWithBlockType(.),
         trailer.getDataIndexCount());
 
     // Meta index.
         blockIter.nextBlockWithBlockType(.),
         trailer.getMetaIndexCount());
 
     // File info
      = new FileInfo();
     byte [] keyValueFormatVersion =
      = keyValueFormatVersion != null &&
         Bytes.toInt(keyValueFormatVersion) ==
     fsBlockReaderV2.setIncludesMemstoreTS();
     if () {
     }
 
     // Read data block encoding algorithm name from file info.
      = HFileDataBlockEncoderImpl.createFromFileInfo();
     fsBlockReaderV2.setDataBlockEncoder();
 
     // Store all other load-on-open blocks for further consumption.
     HFileBlock b;
     while ((b = blockIter.nextBlock()) != null) {
       .add(b);
     }
 
     // Prefetch file blocks upon open if requested
     if (cacheConf.shouldPrefetchOnOpen()) {
       PrefetchExecutor.request(pathnew Runnable() {
         public void run() {
           try {
             long offset = 0;
             long end =  - getTrailer().getTrailerSize();
             HFileBlock prevBlock = null;
             while (offset < end) {
               if (Thread.interrupted()) {
                 break;
               }
               long onDiskSize = -1;
               if (prevBlock != null) {
                 onDiskSize = prevBlock.getNextBlockOnDiskSizeWithHeader();
               }
               HFileBlock block = readBlock(offsetonDiskSizetruefalsefalsefalsenull);
               prevBlock = block;
               offset += block.getOnDiskSizeWithHeader();
             }
           } catch (IOException e) {
             // IOExceptions are probably due to region closes (relocation, etc.)
             if (.isTraceEnabled()) {
               .trace("Exception encountered while prefetching " + path + ":"e);
             }
           } catch (Exception e) {
             // Other exceptions are interesting
             .warn("Exception encountered while prefetching " + path + ":"e);
           } finally {
             PrefetchExecutor.complete(path);
           }
         }
       });
     }
   }
 
   protected HFileContext createHFileContext(FSDataInputStreamWrapper fsdislong fileSize,
       HFileSystem hfsPath pathFixedFileTrailer trailerthrows IOException {
     return new HFileContextBuilder()
       .withCompression(this.)
       .build();
   }

  
Create a Scanner on this file. No seeks or reads are done on creation. Call HFileScanner.seekTo(byte[]) to position an start the read. There is nothing to clean up in a Scanner. Letting go of your references to the scanner is sufficient.

Parameters:
cacheBlocks True if we should cache blocks read in by this scanner.
pread Use positional read rather than seek+read if true (pread is better for random reads, seek+read is better scanning).
isCompaction is scanner being used for a compaction?
Returns:
Scanner on this file.
 
    @Override
    public HFileScanner getScanner(boolean cacheBlocksfinal boolean pread,
       final boolean isCompaction) {
       return new EncodedScannerV2(thiscacheBlockspreadisCompaction,
           );
     }
 
     return new ScannerV2(thiscacheBlockspreadisCompaction);
   }

  

Parameters:
metaBlockName
cacheBlock Add block to cache, if found
Returns:
block wrapped in a ByteBuffer, with header skipped
Throws:
java.io.IOException
 
   public ByteBuffer getMetaBlock(String metaBlockNameboolean cacheBlock)
       throws IOException {
     if (.getMetaIndexCount() == 0) {
       return null// there are no meta blocks
     }
     if ( == null) {
       throw new IOException("Meta index not loaded");
     }
 
     byte[] mbname = Bytes.toBytes(metaBlockName);
     int block = .rootBlockContainingKey(mbname, 0,
         mbname.length);
     if (block == -1)
       return null;
     long blockSize = .getRootBlockDataSize(block);
 
     // Per meta key from any given file, synchronize reads for said block. This
     // is OK to do for meta blocks because the meta block index is always
     // single-level.
     synchronized (.getRootBlockKey(block)) {
       // Check cache for block. If found return.
       long metaBlockOffset = .getRootBlockOffset(block);
       BlockCacheKey cacheKey = new BlockCacheKey(metaBlockOffset,
           ..);
 
       cacheBlock &= .shouldCacheDataOnRead();
       if (.isBlockCacheEnabled()) {
         HFileBlock cachedBlock =
           (HFileBlock.getBlockCache().getBlock(cacheKeycacheBlockfalsetrue);
         if (cachedBlock != null) {
           assert cachedBlock.isUnpacked() : "Packed block leak.";
           // Return a distinct 'shallow copy' of the block,
           // so pos does not get messed by the scanner
           return cachedBlock.getBufferWithoutHeader();
         }
         // Cache Miss, please load.
       }
 
       HFileBlock metaBlock = .readBlockData(metaBlockOffset,
           blockSize, -1, true).unpack();
 
       // Cache the block
       if (cacheBlock) {
         .getBlockCache().cacheBlock(cacheKeymetaBlock,
             .isInMemory());
       }
 
       return metaBlock.getBufferWithoutHeader();
     }
   }

  
Read in a file block.

Parameters:
dataBlockOffset offset to read.
onDiskBlockSize size of the block
cacheBlock
pread Use positional read instead of seek+read (positional is better doing random reads whereas seek+read is better scanning).
isCompaction is this block being read as part of a compaction
expectedBlockType the block type we are expecting to read with this read operation, or null to read whatever block type is available and avoid checking (that might reduce caching efficiency of encoded data blocks)
Returns:
Block wrapped in a ByteBuffer.
Throws:
java.io.IOException
 
   public HFileBlock readBlock(long dataBlockOffsetlong onDiskBlockSize,
       final boolean cacheBlockboolean preadfinal boolean isCompaction,
       final boolean updateCacheMetricsBlockType expectedBlockType)
       throws IOException {
     if ( == null) {
       throw new IOException("Block index not loaded");
     }
     if (dataBlockOffset < 0
         || dataBlockOffset >= .getLoadOnOpenDataOffset()) {
       throw new IOException("Requested block is out of range: "
           + dataBlockOffset + ", lastDataBlockOffset: "
           + .getLastDataBlockOffset());
     }
     // For any given block from any given file, synchronize reads for said
     // block.
     // Without a cache, this synchronizing is needless overhead, but really
     // the other choice is to duplicate work (which the cache would prevent you
     // from doing).
 
     BlockCacheKey cacheKey =
         new BlockCacheKey(dataBlockOffset,
             .getDataBlockEncoding(),
             expectedBlockType);
 
     boolean useLock = false;
     IdLock.Entry lockEntry = null;
     TraceScope traceScope = Trace.startSpan("HFileReaderV2.readBlock");
     try {
       while (true) {
         if (useLock) {
           lockEntry = .getLockEntry(dataBlockOffset);
         }
 
         // Check cache for block. If found return.
         if (.isBlockCacheEnabled()) {
           // Try and get the block from the block cache. If the useLock variable is true then this
           // is the second time through the loop and it should not be counted as a block cache miss.
           HFileBlock cachedBlock = (HFileBlock.getBlockCache().getBlock(cacheKey
             cacheBlockuseLockupdateCacheMetrics);
           if (cachedBlock != null) {
             if (.shouldCacheCompressed(cachedBlock.getBlockType().getCategory())) {
               cachedBlock = cachedBlock.unpack();
             }
             if (Trace.isTracing()) {
               traceScope.getSpan().addTimelineAnnotation("blockCacheHit");
             }
             assert cachedBlock.isUnpacked() : "Packed block leak.";
             if (cachedBlock.getBlockType().isData()) {
               ..incrementAndGet();
 
               // Validate encoding type for data blocks. We include encoding
               // type in the cache key, and we expect it to match on a cache hit.
               if (cachedBlock.getDataBlockEncoding() != .getDataBlockEncoding()) {
                 throw new IOException("Cached block under key " + cacheKey + " "
                   + "has wrong encoding: " + cachedBlock.getDataBlockEncoding() + " (expected: "
                   + .getDataBlockEncoding() + ")");
               }
             }
             return cachedBlock;
           }
           // Carry on, please load.
         }
         if (!useLock) {
           // check cache again with lock
           useLock = true;
           continue;
         }
         if (Trace.isTracing()) {
           traceScope.getSpan().addTimelineAnnotation("blockCacheMiss");
         }
         // Load block from filesystem.
         HFileBlock hfileBlock = .readBlockData(dataBlockOffsetonDiskBlockSize, -1,
             pread);
         validateBlockType(hfileBlockexpectedBlockType);
         HFileBlock unpacked = hfileBlock.unpack();
         BlockType.BlockCategory category = hfileBlock.getBlockType().getCategory();
 
         // Cache the block if necessary
         if (cacheBlock && .shouldCacheBlockOnRead(category)) {
           .getBlockCache().cacheBlock(cacheKey,
             .shouldCacheCompressed(category) ? hfileBlock : unpacked,
             .isInMemory());
         }
 
         if (updateCacheMetrics && hfileBlock.getBlockType().isData()) {
           ..incrementAndGet();
         }
 
         return unpacked;
       }
     } finally {
       traceScope.close();
       if (lockEntry != null) {
         .releaseLockEntry(lockEntry);
       }
     }
   }
 
   public boolean hasMVCCInfo() {
     return  && ;
   }

  
Compares the actual type of a block retrieved from cache or disk with its expected type and throws an exception in case of a mismatch. Expected block type of org.apache.hadoop.hbase.io.hfile.BlockType.DATA is considered to match the actual block type [@link org.apache.hadoop.hbase.io.hfile.BlockType.ENCODED_DATA as well.

Parameters:
block a block retrieved from cache or disk
expectedBlockType the expected block type, or null to skip the check
 
   private void validateBlockType(HFileBlock block,
       BlockType expectedBlockTypethrows IOException {
     if (expectedBlockType == null) {
       return;
     }
     BlockType actualBlockType = block.getBlockType();
     if (actualBlockType == . &&
         expectedBlockType == .) {
       // We consider DATA to match ENCODED_DATA for the purpose of this
       // verification.
       return;
     }
     if (actualBlockType != expectedBlockType) {
       throw new IOException("Expected block type " + expectedBlockType + ", " +
           "but got " + actualBlockType + ": " + block);
     }
   }

  

Returns:
Last key in the file. May be null if file has no entries. Note that this is not the last row key, but rather the byte form of the last KeyValue.
 
   public byte[] getLastKey() {
     return .isEmpty() ? null : ;
   }

  

Returns:
Midkey for this file. We work with block boundaries only so returned midkey is an approximation only.
Throws:
java.io.IOException
 
   public byte[] midkey() throws IOException {
     return .midkey();
   }
 
   public void close() throws IOException {
   }
 
   public void close(boolean evictOnClosethrows IOException {
     PrefetchExecutor.cancel();
     if (evictOnClose && .isBlockCacheEnabled()) {
       int numEvicted = .getBlockCache().evictBlocksByHfileName();
       if (.isTraceEnabled()) {
         .trace("On close, file=" +  + " evicted=" + numEvicted
           + " block(s)");
       }
     }
   }

  
For testing
 
     return ;
   }
 
 
   protected abstract static class AbstractScannerV2
       extends AbstractHFileReader.Scanner {
     protected HFileBlock block;

    
The next indexed key is to keep track of the indexed key of the next data block. If the nextIndexedKey is HConstants.NO_NEXT_INDEXED_KEY, it means that the current data block is the last data block. If the nextIndexedKey is null, it means the nextIndexedKey has not been loaded yet.
 
     protected byte[] nextIndexedKey;
 
     public AbstractScannerV2(HFileReaderV2 rboolean cacheBlocks,
         final boolean preadfinal boolean isCompaction) {
       super(rcacheBlockspreadisCompaction);
     }

    
An internal API function. Seek to the given key, optionally rewinding to the first key of the block before doing the seek.

Parameters:
key key byte array
offset key offset in the key byte array
length key length
rewind whether to rewind to the first key of the block before doing the seek. If this is false, we are assuming we never go back, otherwise the result is undefined.
Returns:
-1 if the key is earlier than the first key of the file, 0 if we are at the given key, 1 if we are past the given key -2 if the key is earlier than the first key of the file while using a faked index key
Throws:
java.io.IOException
 
     protected int seekTo(byte[] keyint offsetint lengthboolean rewind)
         throws IOException {
       HFileBlockIndex.BlockIndexReader indexReader =
           .getDataBlockIndexReader();
       BlockWithScanInfo blockWithScanInfo =
         indexReader.loadDataBlockWithScanInfo(keyoffsetlength,
             );
       if (blockWithScanInfo == null || blockWithScanInfo.getHFileBlock() == null) {
         // This happens if the key e.g. falls before the beginning of the file.
         return -1;
       }
       return loadBlockAndSeekToKey(blockWithScanInfo.getHFileBlock(),
           blockWithScanInfo.getNextIndexedKey(), rewindkeyoffsetlengthfalse);
     }
 
     protected abstract ByteBuffer getFirstKeyInBlock(HFileBlock curBlock);
 
     protected abstract int loadBlockAndSeekToKey(HFileBlock seekToBlockbyte[] nextIndexedKey,
         boolean rewindbyte[] keyint offsetint lengthboolean seekBefore)
         throws IOException;
 
     @Override
     public int seekTo(byte[] keyint offsetint lengththrows IOException {
       // Always rewind to the first key of the block, because the given key
       // might be before or after the current key.
       return seekTo(keyoffsetlengthtrue);
     }
 
     @Override
     public int reseekTo(byte[] keyint offsetint lengththrows IOException {
       int compared;
       if (isSeeked()) {
         compared = compareKey(.getComparator(), keyoffsetlength);
         if (compared < 1) {
           // If the required key is less than or equal to current key, then
           // don't do anything.
           return compared;
         } else {
           if (this. != null &&
               (this. == . ||
                .getComparator().compareFlatKey(keyoffsetlength,
                    , 0, .) < 0)) {
             // The reader shall continue to scan the current data block instead of querying the
             // block index as long as it knows the target key is strictly smaller than
             // the next indexed key or the current data block is the last data block.
             return loadBlockAndSeekToKey(this.this.,
                 falsekeyoffsetlengthfalse);
           }
         }
       }
       // Don't rewind on a reseek operation, because reseek implies that we are
       // always going forward in the file.
       return seekTo(keyoffsetlengthfalse);
     }
 
     @Override
     public boolean seekBefore(byte[] keyint offsetint length)
         throws IOException {
       HFileBlock seekToBlock =
           .getDataBlockIndexReader().seekToDataBlock(keyoffsetlength,
               );
       if (seekToBlock == null) {
         return false;
       }
       ByteBuffer firstKey = getFirstKeyInBlock(seekToBlock);
 
       if (.getComparator().compareFlatKey(firstKey.array(),
           firstKey.arrayOffset(), firstKey.limit(), keyoffsetlength) >= 0)
       {
         long previousBlockOffset = seekToBlock.getPrevBlockOffset();
         // The key we are interested in
         if (previousBlockOffset == -1) {
           // we have a 'problem', the key we want is the first of the file.
           return false;
         }
 
         // It is important that we compute and pass onDiskSize to the block
         // reader so that it does not have to read the header separately to
         // figure out the size.
         seekToBlock = .readBlock(previousBlockOffset,
             seekToBlock.getOffset() - previousBlockOffset,
             true.);
         // TODO shortcut: seek forward in this block to the last key of the
         // block.
       }
       byte[] firstKeyInCurrentBlock = Bytes.getBytes(firstKey);
       loadBlockAndSeekToKey(seekToBlockfirstKeyInCurrentBlocktruekeyoffsetlengthtrue);
       return true;
     }


    
Scans blocks in the "scanned" section of the HFile until the next data block is found.

Returns:
the next block, or null if there are no more data blocks
Throws:
java.io.IOException
 
     protected HFileBlock readNextDataBlock() throws IOException {
       long lastDataBlockOffset = .getTrailer().getLastDataBlockOffset();
       if ( == null)
         return null;
 
       HFileBlock curBlock = ;
 
       do {
         if (curBlock.getOffset() >= lastDataBlockOffset)
           return null;
 
         if (curBlock.getOffset() < 0) {
           throw new IOException("Invalid block file offset: " + );
         }
 
         // We are reading the next block without block type validation, because
         // it might turn out to be a non-data block.
         curBlock = .readBlock(curBlock.getOffset()
             + curBlock.getOnDiskSizeWithHeader(),
             curBlock.getNextBlockOnDiskSizeWithHeader(), ,
             truenull);
       } while (!curBlock.getBlockType().isData());
 
       return curBlock;
     }
    
Compare the given key against the current key

Parameters:
comparator
key
offset
length
Returns:
-1 is the passed key is smaller than the current key, 0 if equal and 1 if greater
 
     public abstract int compareKey(KVComparator comparatorbyte[] keyint offset,
         int length);
   }

  
Implementation of HFileScanner interface.
 
   protected static class ScannerV2 extends AbstractScannerV2 {
     private HFileReaderV2 reader;
 
     public ScannerV2(HFileReaderV2 rboolean cacheBlocks,
         final boolean preadfinal boolean isCompaction) {
       super(rcacheBlockspreadisCompaction);
       this. = r;
     }
 
     @Override
     public KeyValue getKeyValue() {
       if (!isSeeked())
         return null;
 
       KeyValue ret = new KeyValue(.array(), .arrayOffset()
           + .position(), getCellBufSize());
       if (this..shouldIncludeMemstoreTS()) {
         ret.setMvccVersion();
       }
       return ret;
     }
 
     protected int getCellBufSize() {
       return  +  + ;
     }
 
     @Override
     public ByteBuffer getKey() {
       assertSeeked();
       return ByteBuffer.wrap(
           .array(),
           .arrayOffset() + .position()
               + ).slice();
     }
 
     @Override
     public int compareKey(KVComparator comparatorbyte[] keyint offsetint length) {
       return comparator.compareFlatKey(keyoffsetlength.array(),
     }
 
     @Override
     public ByteBuffer getValue() {
       assertSeeked();
       return ByteBuffer.wrap(
           .array(),
           .arrayOffset() + .position()
               +  + ).slice();
     }
 
     protected void setNonSeekedState() {
        = null;
        = null;
        = 0;
        = 0;
        = 0;
        = 0;
     }

    
Go to the next key/value in the block section. Loads the next block if necessary. If successful, getKey() and getValue() can be called.

Returns:
true if successfully navigated to the next key/value
 
     @Override
     public boolean next() throws IOException {
       assertSeeked();
 
       try {
       } catch (IllegalArgumentException e) {
         .error("Current pos = " + .position()
             + "; currKeyLen = " +  + "; currValLen = "
             +  + "; block limit = " + .limit()
             + "; HFile name = " + .getName()
             + "; currBlock currBlockOffset = " + .getOffset());
         throw e;
       }
 
       if (.remaining() <= 0) {
         long lastDataBlockOffset =
             .getTrailer().getLastDataBlockOffset();
 
         if (.getOffset() >= lastDataBlockOffset) {
           setNonSeekedState();
           return false;
         }
 
         // read the next block
         HFileBlock nextBlock = readNextDataBlock();
         if (nextBlock == null) {
           setNonSeekedState();
           return false;
         }
 
         updateCurrBlock(nextBlock);
         return true;
       }
 
       // We are still in the same block.
       readKeyValueLen();
       return true;
     }
 
     protected int getNextCellStartPosition() {
           + ;
     }

    
Positions this scanner at the start of the file.

Returns:
false if empty file; i.e. a call to next would return false and the current key and value are undefined.
Throws:
java.io.IOException
 
     @Override
     public boolean seekTo() throws IOException {
       if ( == null) {
         return false;
       }
 
       if (.getTrailer().getEntryCount() == 0) {
         // No data blocks.
         return false;
       }
 
       long firstDataBlockOffset =
           .getTrailer().getFirstDataBlockOffset();
       if ( != null && .getOffset() == firstDataBlockOffset) {
         .rewind();
         readKeyValueLen();
         return true;
       }
 
        = .readBlock(firstDataBlockOffset, -1, ,
           true.);
       if (.getOffset() < 0) {
         throw new IOException("Invalid block offset: " + .getOffset());
       }
       updateCurrBlock();
       return true;
     }
 
     @Override
     protected int loadBlockAndSeekToKey(HFileBlock seekToBlockbyte[] nextIndexedKey,
         boolean rewindbyte[] keyint offsetint lengthboolean seekBefore)
         throws IOException {
       if ( == null || .getOffset() != seekToBlock.getOffset()) {
         updateCurrBlock(seekToBlock);
       } else if (rewind) {
         .rewind();
       }
 
       // Update the nextIndexedKey
       this. = nextIndexedKey;
       return blockSeek(keyoffsetlengthseekBefore);
     }

    
Updates the current block to be the given HFileBlock. Seeks to the the first key/value pair.

Parameters:
newBlock the block to make current
 
     protected void updateCurrBlock(HFileBlock newBlock) {
        = newBlock;
 
       // sanity check
       if (.getBlockType() != .) {
         throw new IllegalStateException("ScannerV2 works only on data " +
             "blocks, got " + .getBlockType() + "; " +
             "fileName=" + . + ", " +
             "dataBlockEncoder=" + . + ", " +
             "isCompaction=" + );
       }
 
       readKeyValueLen();
       ++;
 
       // Reset the next indexed key
       this. = null;
     }
 
     protected void readKeyValueLen() {
       .mark();
        = .getInt();
        = .getInt();
       ByteBufferUtils.skip( + );
       readMvccVersion();
       if ( < 0 ||  < 0
           ||  > .limit()
           ||  > .limit()) {
         throw new IllegalStateException("Invalid currKeyLen " + 
             + " or currValueLen " +  + ". Block offset: "
             + .getOffset() + ", block length: " + .limit()
             + ", position: " + .position() + " (without header).");
       }
       .reset();
     }
 
     protected void readMvccVersion() {
       if (this..shouldIncludeMemstoreTS()) {
         if (this..) {
           try {
              = Bytes.readVLong(.array(), .arrayOffset()
                 + .position());
              = WritableUtils.getVIntSize();
           } catch (Exception e) {
             throw new RuntimeException("Error reading memstore timestamp"e);
           }
         } else {
            = 0;
            = 1;
         }
       }
     }

    
Within a loaded block, seek looking for the last key that is smaller than (or equal to?) the key we are interested in. A note on the seekBefore: if you have seekBefore = true, AND the first key in the block = key, then you'll get thrown exceptions. The caller has to check for that case and load the previous block as appropriate.

Parameters:
key the key to find
seekBefore find the key before the given key in case of exact match.
Returns:
0 in case of an exact key match, 1 in case of an inexact match, -2 in case of an inexact match and furthermore, the input key less than the first key of current block(e.g. using a faked index key)
 
     protected int blockSeek(byte[] keyint offsetint length,
         boolean seekBefore) {
       int klenvlen;
       long memstoreTS = 0;
       int memstoreTSLen = 0;
       int lastKeyValueSize = -1;
       do {
         .mark();
         klen = .getInt();
         vlen = .getInt();
         .reset();
         if (this..shouldIncludeMemstoreTS()) {
           if (this..) {
             try {
               int memstoreTSOffset = .arrayOffset()
                   + .position() +  + klen + vlen;
               memstoreTS = Bytes.readVLong(.array(),
                   memstoreTSOffset);
               memstoreTSLen = WritableUtils.getVIntSize(memstoreTS);
             } catch (Exception e) {
               throw new RuntimeException("Error reading memstore timestamp"e);
             }
           } else {
             memstoreTS = 0;
             memstoreTSLen = 1;
           }
         }
 
         int keyOffset = .arrayOffset() + .position()
             + ;
         int comp = .getComparator().compareFlatKey(keyoffsetlength,
             .array(), keyOffsetklen);
 
         if (comp == 0) {
           if (seekBefore) {
             if (lastKeyValueSize < 0) {
               throw new IllegalStateException("blockSeek with seekBefore "
                   + "at the first key of the block: key="
                   + Bytes.toStringBinary(key) + ", blockOffset="
                   + .getOffset() + ", onDiskSize="
                   + .getOnDiskSizeWithHeader());
             }
             .position(.position() - lastKeyValueSize);
             readKeyValueLen();
             return 1; // non exact match.
           }
            = klen;
            = vlen;
           if (this..shouldIncludeMemstoreTS()) {
              = memstoreTS;
              = memstoreTSLen;
           }
           return 0; // indicate exact match
         } else if (comp < 0) {
           if (lastKeyValueSize > 0)
             .position(.position() - lastKeyValueSize);
           readKeyValueLen();
           if (lastKeyValueSize == -1 && .position() == 0
               && this...getMinorVersion() >= ) {
             return .;
           }
           return 1;
         }
 
         // The size of this key/value tuple, including key/value length fields.
         lastKeyValueSize = klen + vlen + memstoreTSLen + ;
         .position(.position() + lastKeyValueSize);
       } while (.remaining() > 0);
 
       // Seek to the last key we successfully read. This will happen if this is
       // the last key/value pair in the file, in which case the following call
       // to next() has to return false.
       .position(.position() - lastKeyValueSize);
       readKeyValueLen();
       return 1; // didn't exactly find it.
     }
 
     @Override
     protected ByteBuffer getFirstKeyInBlock(HFileBlock curBlock) {
       ByteBuffer buffer = curBlock.getBufferWithoutHeader();
       // It is safe to manipulate this buffer because we own the buffer object.
       buffer.rewind();
       int klen = buffer.getInt();
       buffer.getInt();
       ByteBuffer keyBuff = buffer.slice();
       keyBuff.limit(klen);
       keyBuff.rewind();
       return keyBuff;
     }
 
     @Override
     public String getKeyString() {
       return Bytes.toStringBinary(.array(),
           .arrayOffset() + .position()
               + );
    }
    @Override
    public String getValueString() {
      return Bytes.toString(.array(), .arrayOffset()
          + .position() +  + ,
          );
    }
  }

  
ScannerV2 that operates on encoded data blocks.
  protected static class EncodedScannerV2 extends AbstractScannerV2 {
    private final HFileBlockDecodingContext decodingCtx;
    private final DataBlockEncoder.EncodedSeeker seeker;
    private final DataBlockEncoder dataBlockEncoder;
    protected final HFileContext meta;
    public EncodedScannerV2(HFileReaderV2 readerboolean cacheBlocks,
        boolean preadboolean isCompactionHFileContext meta) {
      super(readercacheBlockspreadisCompaction);
      DataBlockEncoding encoding = reader.dataBlockEncoder.getDataBlockEncoding();
       = encoding.getEncoder();
        reader.getComparator(), );
      this. = meta;
    }
    @Override
    public boolean isSeeked(){
      return this. != null;
    }

    
Updates the current block to be the given HFileBlock. Seeks to the the first key/value pair.

Parameters:
newBlock the block to make current
Throws:
CorruptHFileException
    private void updateCurrentBlock(HFileBlock newBlockthrows CorruptHFileException {
       = newBlock;
      // sanity checks
      if (.getBlockType() != .) {
        throw new IllegalStateException(
            "EncodedScanner works only on encoded data blocks");
      }
      short dataBlockEncoderId = .getDataBlockEncodingId();
      if (!DataBlockEncoding.isCorrectEncoder(dataBlockEncoderId)) {
        String encoderCls = .getClass().getName();
        throw new CorruptHFileException("Encoder " + encoderCls
          + " doesn't support data block encoding "
          + DataBlockEncoding.getNameFromId(dataBlockEncoderId));
      }
      ++;
      // Reset the next indexed key
      this. = null;
    }
    private ByteBuffer getEncodedBuffer(HFileBlock newBlock) {
      ByteBuffer origBlock = newBlock.getBufferReadOnly();
      ByteBuffer encodedBlock = ByteBuffer.wrap(origBlock.array(),
          origBlock.arrayOffset() + newBlock.headerSize() +
          .,
          newBlock.getUncompressedSizeWithoutHeader() -
          .).slice();
      return encodedBlock;
    }
    @Override
    public boolean seekTo() throws IOException {
      if ( == null) {
        return false;
      }
      if (.getTrailer().getEntryCount() == 0) {
        // No data blocks.
        return false;
      }
      long firstDataBlockOffset =
      if ( != null && .getOffset() == firstDataBlockOffset) {
        .rewind();
        return true;
      }
       = .readBlock(firstDataBlockOffset, -1, ,
          true.);
      if (.getOffset() < 0) {
        throw new IOException("Invalid block offset: " + .getOffset());
      }
      return true;
    }
    @Override
    public boolean next() throws IOException {
      boolean isValid = .next();
      if (!isValid) {
         = readNextDataBlock();
        isValid =  != null;
        if (isValid) {
          updateCurrentBlock();
        }
      }
      return isValid;
    }
    @Override
    public ByteBuffer getKey() {
      assertValidSeek();
      return .getKeyDeepCopy();
    }
    @Override
    public int compareKey(KVComparator comparatorbyte[] keyint offsetint length) {
      return .compareKey(comparatorkeyoffsetlength);