Start line:  
End line:  

Snippet Preview

Snippet HTML Code

Stack Overflow Questions
  /*
   * Copyright 2014-2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
   *
   * Licensed under the Apache License, Version 2.0 (the "License").
   * You may not use this file except in compliance with the License.
   * A copy of the License is located at
   *
   *  http://aws.amazon.com/apache2.0
   *
  * or in the "license" file accompanying this file. This file is distributed
  * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
  * express or implied. See the License for the specific language governing
  * permissions and limitations under the License.
  */
 package com.amazonaws.services.s3.internal;
 
 import java.io.File;
 import java.util.UUID;
 
 
Used to split an output stream into multiple files for purposes such as parallel uploads.
 
 public class MultiFileOutputStream extends OutputStream implements OnFileDelete {
     static final int DEFAULT_PART_SIZE = 5 << 20; // 5MB
     private final File root;
     private final String namePrefix;
     private int filesCreated;
     private long partSize = ;
     private long diskLimit = .;
     private UploadObjectObserver observer;
    
Number of bytes that have been written to the current file.
 
     private int currFileBytesWritten;
    
Total number of bytes written to all files so far.
 
     private long totalBytesWritten;
     private FileOutputStream os;
     private boolean closed;

    
null means no blocking necessary.
 
     private Semaphore diskPermits;

    
Construct an instance to use the default temporary directory and temp file naming convention. The init(com.amazonaws.services.s3.UploadObjectObserver,long,long) must be called before this stream is considered fully initialized.
 
     public MultiFileOutputStream() {
          = new File(System.getProperty("java.io.tmpdir"));
          = yyMMdd_hhmmss() + "." + UUID.randomUUID();
     }

    
Construct an instance to use the specified directory for temp file creations, and the specified prefix for temp file naming. The init(com.amazonaws.services.s3.UploadObjectObserver,long,long) must be called before this stream is considered fully initialized.
 
     public MultiFileOutputStream(File rootString namePrefix) {
         if (root == null || !root.isDirectory() || !root.canWrite())
             throw new IllegalArgumentException(root
                     + " must be a writable directory");
         if (namePrefix == null || namePrefix.trim().length() == 0)
             throw new IllegalArgumentException(
                     "Please specify a non-empty name prefix");
         this. = root;
         this. = namePrefix;
     }

    
Used to initialized this stream. This method is an SPI (service provider interface) that is called from AmazonS3EncryptionClient.

Implementation of this method should never block.

Parameters:
observer the upload object observer
partSize part size for multi-part upload
diskLimit the maximum disk space to be used for this multi-part upload
Returns:
this object
            long partSizelong diskLimit) {
        if (observer == null)
            throw new IllegalArgumentException("Observer must be specified");
        this. = observer;
        if (diskLimit < partSize << 1) {
            throw new IllegalArgumentException(
                "Maximum temporary disk space must be at least twice as large as the part size: partSize="
                + partSize + ", diskSize=" + diskLimit);
        }
        this. = partSize;
        this. = diskLimit;
        final int max = (int)(diskLimit/partSize);
        this. = max < 0 ? null : new Semaphore(max);
        return this;
    }

    
This method would block as necessary if running out of disk space.
    @Override
    public void write(int bthrows IOException {
        fos().write(b);
        ++;
        ++;
    }

    
This method would block as necessary if running out of disk space.
    @Override
    public void write(byte[] bthrows IOException {
        if (b.length == 0)
            return;
        fos().write(b);
         += b.length;
         += b.length;
    }

    
This method would block as necessary if running out of disk space.
    @Override
    public void write(byte[] bint offint lenthrows IOException {
        if (b.length == 0)
            return;
        fos().write(bofflen);
         += len;
         += len;
    }

    
Returns the file output stream to be used for writing, blocking if necessary if running out of disk space.

Throws:
java.lang.InterruptedException if the running thread was interrupted
    private FileOutputStream fos() throws IOException {
        if ()
            throw new IOException("Output stream is already closed");
        if ( == null ||  >= ) {
            if ( != null) {
                .close();
                // notify about the new file ready for processing
                .onPartCreate(new PartCreationEvent(
                        getFile(), falsethis));
            }
             = 0;
            ++;
            blockIfNecessary();
            final File file = getFile();
            file.deleteOnExit();
             = new FileOutputStream(file);
        }
        return ;
    }
    @Override
    public void onFileDelete(FileDeletionEvent event) {
        if ( != null)
            .release();
    }

    
Blocks the running thread if running out of disk space.

Throws:
com.amazonaws.AbortedException if the running thread is interrupted while acquiring a semaphore
    private void blockIfNecessary() {
        if ( == null ||  == .)
            return;
        try {
            .acquire();
        } catch (InterruptedException e) {
            // don't want to re-interrupt so it won't cause SDK stream to be
            // closed in case the thread is reused for a different request
            throw new AbortedException(e);
        }
    }
    @Override
    public void flush() throws IOException {
        if ( != null)
            .flush();
    }
    @Override
    public void close() throws IOException {
        if ()
            return;
         = true;
        if ( != null) {
            .close();
            File lastPart = getFile();
            if (lastPart.length() == 0) {
                if (!lastPart.delete()) {
                    LogFactory.getLog(getClass()).debug(
                            "Ignoring failure to delete empty file " + lastPart);
                }
            } else {
                // notify about the new file ready for processing
                .onPartCreate(new PartCreationEvent(
                        getFile(), truethis));
            }
        }
    }
    public void cleanup() {
        for (int i=0; i < getNumFilesWritten(); i++) {
            File f = getFile(i);
            if (f.exists()) {
                if (!f.delete()) {
                    LogFactory.getLog(getClass()).debug(
                            "Ignoring failure to delete file " + f);
                }
            }
        }
    }

    

Returns:
the number of files written with the specified prefix with the part number as the file extension.
    public int getNumFilesWritten() {
        return ;
    }
    public File getFile(int partNumber) {
        return new File( + "." + partNumber);
    }
    public long getPartSize() {
        return ;
    }
    public File getRoot() {
        return ;
    }
    public String getNamePrefix() {
        return ;
    }
    public long getTotalBytesWritten() {
        return ;
    }
    static String yyMMdd_hhmmss() {
        return DateTimeFormat.forPattern("yyMMdd-hhmmss").print(new DateTime());
    }
    public boolean isClosed() {
        return ;
    }
    public long getDiskLimit() {
        return ;
    }
New to GrepCode? Check out our FAQ X