Start line:  
End line:  

Snippet Preview

Snippet HTML Code

Stack Overflow Questions
  package com.ning.compress.lzf;
  
  
Decorator java.io.InputStream implementation used for reading uncompressed data and compressing it on the fly, such that reads return compressed data. It is reverse of LZFInputStream (which instead uncompresses data).

Author(s):
Tatu Saloranta
Since:
0.9.5
See also:
LZFInputStream
 
 public class LZFCompressingInputStream extends InputStream
 {
     private final BufferRecycler _recycler;
 
     private ChunkEncoder _encoder;

    
Stream used for reading data to be compressed
 
     protected final InputStream _inputStream;

    
Flag that indicates if we have already called 'inputStream.close()' (to avoid calling it multiple times)
 
     protected boolean _inputStreamClosed;

    
Flag that indicates whether we force full reads (reading of as many bytes as requested), or 'optimal' reads (up to as many as available, but at least one). Default is false, meaning that 'optimal' read is used.
 
     protected boolean _cfgFullReads = false;
    
    
Buffer in which uncompressed input is first read, before getting encoded in _encodedBytes.
 
     protected byte[] _inputBuffer;

    
Buffer that contains compressed data that is returned to readers.
 
     protected byte[] _encodedBytes;
    
    
The current position (next char to output) in the uncompressed bytes buffer.
 
     protected int _bufferPosition = 0;
    
    
Length of the current uncompressed bytes buffer
 
     protected int _bufferLength = 0;

    
Number of bytes read from the underlying _inputStream
 
     protected int _readCount = 0;
     
     /*
     ///////////////////////////////////////////////////////////////////////
     // Construction, configuration
     ///////////////////////////////////////////////////////////////////////
      */
     
     {
         this(nullin);
     }

    

Since:
0.9.8
 
     public LZFCompressingInputStream(final ChunkEncoder encoderInputStream in)
     {
         // may be passed by caller, or could be null
          = encoder;
          = in;
          = BufferRecycler.instance();
         // let's not yet allocate encoding buffer; don't know optimal size
     }

    
Method that can be used define whether reads should be "full" or "optimal": former means that full compressed blocks are read right away as needed, optimal that only smaller chunks are read at a time, more being read as needed.
    public void setUseFullReads(boolean b) {
         = b;
    }
    
    /*
    ///////////////////////////////////////////////////////////////////////
    // InputStream implementation
    ///////////////////////////////////////////////////////////////////////
     */
    
    @Override
    public int available()
    {
        if () { // javadocs suggest 0 for closed as well (not -1)
            return 0;
        }
        int left = ( - );
        return (left <= 0) ? 0 : left;
    }
    
    @Override
    public int read() throws IOException
    {
        if (!readyBuffer()) {
            return -1;
        }
        return [++] & 255;
    }
        
    @Override
    public int read(final byte[] bufferthrows IOException
    {
        return read(buffer, 0, buffer.length);
    }
    @Override
    public int read(final byte[] bufferint offsetint lengththrows IOException
    {
        if (length < 1) {
            return 0;
        }
        if (!readyBuffer()) {
            return -1;
        }
        // First let's read however much data we happen to have...
        int chunkLength = Math.min( - length);
        System.arraycopy(bufferoffsetchunkLength);
         += chunkLength;
        if (chunkLength == length || !) {
            return chunkLength;
        }
        // Need more data, then
        int totalRead = chunkLength;
        do {
            offset += chunkLength;
            if (!readyBuffer()) {
                break;
            }
            chunkLength = Math.min( - , (length - totalRead));
            System.arraycopy(bufferoffsetchunkLength);
             += chunkLength;
            totalRead += chunkLength;
        } while (totalRead < length);
        return totalRead;
    }
    
    @Override
    public void close() throws IOException
    {
         =  = 0;
        byte[] buf = ;
        if (buf != null) {
             = null;
            .releaseEncodeBuffer(buf);
        }
        if ( != null) {
            .close();
        }
        _closeInput();
    }
    
    private void _closeInput() throws IOException
    {
        byte[] buf = ;
        if (buf != null) {
             = null;
            .releaseInputBuffer(buf);
        }
        if (!) {
             = true;
            .close();
        }
    }

    
Overridden to just skip at most a single chunk at a time
    @Override
    public long skip(long nthrows IOException
    {
        if () {
            return -1;
        }
        int left = ( - );
        // if none left, must read more:
        if (left <= 0) {
            // otherwise must read more to skip...
            int b = read();
            if (b < 0) { // EOF
                return -1;
            }
            // push it back to get accurate skip count
            --;
            left = ( - );
        }
        // either way, just skip whatever we have decoded
        if (left > n) {
            left = (intn;
        }
         += left;
        return left;
    }
    /*
    ///////////////////////////////////////////////////////////////////////
    // Internal methods
    ///////////////////////////////////////////////////////////////////////
     */

    
Fill the uncompressed bytes buffer by reading the underlying inputStream.

    protected boolean readyBuffer() throws IOException
    {
        if ( < ) {
            return true;
        }
        if () {
            return false;
        }
        // Ok: read as much as we can from input source first
        int count = .read(, 0, .);
        if (count < 0) { // if no input read, it's EOF
            _closeInput(); // and we can close input source as well
            return false;
        }
        int chunkLength = count;
        int left = . - count;
        
        while ((count = .read(chunkLengthleft)) > 0) {
            chunkLength += count;
            left -= count;
            if (left < 1) {
                break;
            }
        }
         = 0;
        // Ok: if we don't yet have an encoder (and buffer for it), let's get one
        if ( == null) {
            // need 7 byte header, plus regular max buffer size:
            int bufferLen = chunkLength + ((chunkLength + 31) >> 5) + 7;
             = ChunkEncoderFactory.optimalNonAllocatingInstance(bufferLen);
        }
        if ( == null) {
            int bufferLen = chunkLength + ((chunkLength + 31) >> 5) + 7;
             = .allocEncodingBuffer(bufferLen);
        }
        // offset of 7 so we can prepend header as necessary
        int encodeEnd = .tryCompress(, 0, chunkLength, 7);
        // but did it compress?
        if (encodeEnd < (chunkLength + 5)) { // yes! (compared to 5 byte uncomp prefix, data)
            // prepend header in situ
            LZFChunk.appendCompressedHeader(chunkLengthencodeEnd-7, , 0);
             = encodeEnd;
        } else { // no -- so sad...
            int ptr = LZFChunk.appendNonCompressedHeader(chunkLength, 0);
            // TODO: figure out a way to avoid this copy; need a header
            System.arraycopy(, 0, ptrchunkLength);
             = ptr + chunkLength;
        }
        if (count < 0) { // did we get end-of-input?
            _closeInput();
        }
        return true;
    }
New to GrepCode? Check out our FAQ X