Start line:  
End line:  

Snippet Preview

Snippet HTML Code

Stack Overflow Questions
  package com.ning.compress.lzf.util;
  
  import java.io.File;
 
Helper class that allows use of LZF compression even if a library requires use of java.io.FileOutputStream.

Note that use of this class is not recommended unless you absolutely must use a java.io.FileOutputStream instance; otherwise basic com.ning.compress.lzf.LZFOutputStream (which uses aggregation for underlying streams) is more appropriate

Implementation note: much of the code is just copied from com.ning.compress.lzf.LZFOutputStream, so care must be taken to keep implementations in sync if there are fixes.

 
 public class LZFFileOutputStream extends FileOutputStream implements WritableByteChannel
 {
     private static final int OUTPUT_BUFFER_SIZE = .;
 
     private final ChunkEncoder _encoder;
     private final BufferRecycler _recycler;
 
     protected byte[] _outputBuffer;
     protected int _position = 0;

    
Configuration setting that governs whether basic 'flush()' should first complete a block or not.

Default value is 'true'.

 
     protected boolean _cfgFinishBlockOnFlush = true;

    
Flag that indicates if we have already called '_outputStream.close()' (to avoid calling it multiple times)
 
     protected boolean _outputStreamClosed;

    
Wrapper object we use to allow decoder to write directly to the stream, without ending in infinite loop...
 
     private final Wrapper _wrapper;
 
     /*
     ///////////////////////////////////////////////////////////////////////
     // Construction, configuration
     ///////////////////////////////////////////////////////////////////////
      */
 
     public LZFFileOutputStream(File filethrows FileNotFoundException {
         this(ChunkEncoderFactory.optimalInstance(), file);
     }
 
     public LZFFileOutputStream(File fileboolean appendthrows FileNotFoundException {
         this(ChunkEncoderFactory.optimalInstance(), fileappend);
     }
 
     public LZFFileOutputStream(FileDescriptor fdObj) {
         this(ChunkEncoderFactory.optimalInstance(), fdObj);
     }
 
     public LZFFileOutputStream(String namethrows FileNotFoundException {
         this(ChunkEncoderFactory.optimalInstance(), name);
     }
 
     public LZFFileOutputStream(String nameboolean appendthrows FileNotFoundException {
         this(ChunkEncoderFactory.optimalInstance(), nameappend);
     }
 
     public LZFFileOutputStream(ChunkEncoder encoderFile filethrows FileNotFoundException {
         super(file);
          = encoder;
          = BufferRecycler.instance();
          = new Wrapper();
     }
 
     public LZFFileOutputStream(ChunkEncoder encoderFile fileboolean appendthrows FileNotFoundException {
         super(fileappend);
          = encoder;
          = BufferRecycler.instance();
         = new Wrapper();
    }
    public LZFFileOutputStream(ChunkEncoder encoderFileDescriptor fdObj) {
        super(fdObj);
         = encoder;
         = BufferRecycler.instance();
         = new Wrapper();
    }
    public LZFFileOutputStream(ChunkEncoder encoderString namethrows FileNotFoundException {
        super(name);
         = encoder;
         = BufferRecycler.instance();
         = new Wrapper();
    }
    public LZFFileOutputStream(ChunkEncoder encoderString nameboolean appendthrows FileNotFoundException {
        super(nameappend);
         = encoder;
         = BufferRecycler.instance();
         = new Wrapper();
    }

    
Method for defining whether call to flush() will also complete current block (similar to calling finishBlock()) or not.
    public LZFFileOutputStream setFinishBlockOnFlush(boolean b) {
         = b;
        return this;
    }
    /*
    ///////////////////////////////////////////////////////////////////////
    // FileOutputStream overrides
    ///////////////////////////////////////////////////////////////////////
     */
    @Override
    public boolean isOpen() {
        return ! ;
    }
    @Override
    public void close() throws IOException
    {
        if (!) {
            if ( > 0) {
                writeCompressedBlock();
            }
            super.flush();
            super.close();
             = true;
            .close();
            byte[] buf = ;
            if (buf != null) {
                 = null;
                .releaseOutputBuffer(buf);
            }
        }
    }
    @Override
    public void flush() throws IOException
    {
        checkNotClosed();
        if ( &&  > 0) {
            writeCompressedBlock();
        }
        super.flush();
    }
    // fine as is: don't override
    // public FileChannel getChannel();
    // final, can't override:
    // public FileDescriptor getFD();
    @Override
    public void write(byte[] bthrows IOException
    {
        write(b, 0, b.length);
    }
    @Override
    public void write(byte[] bufferint offsetint length)  throws IOException
    {
        checkNotClosed();
        final int BUFFER_LEN = .;
        // simple case first: empty _outputBuffer and "big" input buffer: write first full blocks, if any, without copying
        while ( == 0 && length >= BUFFER_LEN) {
            .encodeAndWriteChunk(bufferoffsetBUFFER_LEN);
            offset += BUFFER_LEN;
            length -= BUFFER_LEN;
        }
        // simple case first: buffering only (for trivially short writes)
        int free = BUFFER_LEN - ;
        if (free > length) {
            System.arraycopy(bufferoffsetlength);
             += length;
            return;
        }
        // otherwise, copy whatever we can, flush
        System.arraycopy(bufferoffsetfree);
        offset += free;
        length -= free;
         += free;
        writeCompressedBlock();
        // then write intermediate full blocks, if any, without copying:
        while (length >= BUFFER_LEN) {
            .encodeAndWriteChunk(bufferoffsetBUFFER_LEN);
            offset += BUFFER_LEN;
            length -= BUFFER_LEN;
        }
        // and finally, copy leftovers in buffer, if any
        if (length > 0) {
            System.arraycopy(bufferoffset, 0, length);
        }
         = length;
    }
    @Override
    public void write(int bthrows IOException
    {
        checkNotClosed();
        if ( >= .) {
            writeCompressedBlock();
        }
        [++] = (byteb;
    }
    public void write(final InputStream inthrows IOException {
        writeCompressedBlock(); // will flush _outputBuffer
        int read;
        while ((read = in.read()) >= 0) {
             = read;
            writeCompressedBlock();
        }
    }
    /*
    ///////////////////////////////////////////////////////////////////////
    // WritableByteChannel implementation
    ///////////////////////////////////////////////////////////////////////
     */
    /* 26-Nov-2013, tatu: Why is this synchronized? Pretty much nothing else is,
     *   so why this method?
     */
    @Override
    public synchronized int write(final ByteBuffer srcthrows IOException {
        int r = src.remaining();
        if (r <= 0) {
            return r;
        }
        writeCompressedBlock(); // will flush _outputBuffer
        if (src.hasArray()) {
            // direct compression from backing array
            write(src.array(), src.arrayOffset(), src.limit() - src.arrayOffset());
        } else {
            // need to copy to heap array first
            while (src.hasRemaining()) {
                int toRead = Math.min(src.remaining(), .);
                src.get(, 0, toRead);
                 = toRead;
                writeCompressedBlock();
            }
        }
        return r;
    }
    
    public void write(final FileChannel inthrows IOException {
        MappedByteBuffer src = in.map(., 0, in.size());
        write(src);
    }
    /*
    ///////////////////////////////////////////////////////////////////////
    // Additional public methods
    ///////////////////////////////////////////////////////////////////////
     */

    
Accessor for checking whether call to "flush()" will first finish the current block or not
    public boolean getFinishBlockOnFlush() {
        return ;
    }

    
Method that can be used to force completion of the current block, which means that all buffered data will be compressed into an LZF block. This typically results in lower compression ratio as larger blocks compress better; but may be necessary for network connections to ensure timely sending of data.
    public LZFFileOutputStream finishBlock() throws IOException
    {
        checkNotClosed();
        if ( > 0) {
            writeCompressedBlock();
        }
        return this;
    }
    /*
    ///////////////////////////////////////////////////////////////////////
    // Internal methods
    ///////////////////////////////////////////////////////////////////////
     */

    
Compress and write the current block to the OutputStream
    protected void writeCompressedBlock() throws IOException
    {
        int left = ;
         = 0;
        int offset = 0;
        while (left > 0) {
            int chunkLen = Math.min(.left);
            .encodeAndWriteChunk(offsetchunkLen);
            offset += chunkLen;
            left -= chunkLen;
        }
    }
    protected void rawWrite(byte[] bufferint offsetint length)  throws IOException
    {
        super.write(bufferoffsetlength);
    }
    protected void checkNotClosed() throws IOException
    {
        if () {
            throw new IOException(getClass().getName()+" already closed");
        }
    }
    /*
    ///////////////////////////////////////////////////////////////////////
    // Helper class(es)
    ///////////////////////////////////////////////////////////////////////
     */

    
This simple wrapper is needed to re-route read calls so that they will use "raw" writes
    private final class Wrapper extends OutputStream
    {
        @Override
        public void write(int arg0throws IOException {
            throw new UnsupportedOperationException();
        }
        @Override
        public void write(byte[] bufferint offsetint length)  throws IOException
        {
            rawWrite(bufferoffsetlength);
        }
    }
New to GrepCode? Check out our FAQ X