Start line:  
End line:  

Snippet Preview

Snippet HTML Code

Stack Overflow Questions
Written by Gil Tene of Azul Systems, and released to the public domain, as explained at http://creativecommons.org/publicdomain/zero/1.0/

Author(s):
Gil Tene
  
  
  package org.HdrHistogram;
  

An integer values High Dynamic Range (HDR) Histogram that supports safe concurrent recording operations.

A ConcurrentHistogram guarantees lossless recording of values into the hsitogram even when the histogram is updated by mutliple threads, and supports auto-resize and shift operations that may result from or occur concurrently with other recording operations.

It is important to note that concurrent recording, auto-sizing, and value shifting are the only thread-safe behaviors provided by ConcurrentHistogram, and that it is not otherwise synchronized. Specifically, ConcurrentHistogram provides no implicit synchronization that would prevent the contents of the histogram from changing during queries, iterations, copies, or addition operations on the histogram. Callers wishing to make potentially concurrent, multi-threaded updates that would safely work in the presence of queries, copies, or additions of histogram objects should either take care to externally synchronize and/or order their access, use the SynchronizedHistogram variant, or (recommended) use Recorder or SingleWriterRecorder which are intended for this purpose.

Auto-resizing: When constructed with no specified value range range (or when auto-resize is turned on with AbstractHistogram.setAutoResize(boolean)) a Histogram will auto-resize its dynamic range to include recorded values as they are encountered. Note that recording calls that cause auto-resizing may take longer to execute, as resizing incurrs allocation and copying of internal data structures.

See package description for org.HdrHistogram for details.

 
 
 public class ConcurrentHistogram extends Histogram {
 
             AtomicLongFieldUpdater.newUpdater(ConcurrentHistogram.class"totalCount");
     volatile long totalCount;
 
 
     @Override
     long getCountAtIndex(final int index) {
         try {
             .readerLock();
             long activeCount = .get(normalizeIndex(index.getNormalizingIndexOffset()));
             long inactiveCount = .get(normalizeIndex(index.getNormalizingIndexOffset()));
             return activeCount + inactiveCount;
         } finally {
             .readerUnlock();
         }
     }
 
     @Override
     long getCountAtNormalizedIndex(final int index) {
         try {
             .readerLock();
             long activeCount = .get(index);
             long inactiveCount = .get(index);
             return activeCount + inactiveCount;
         } finally {
             .readerUnlock();
         }
     }
 
     @Override
     void incrementCountAtIndex(final int index) {
         long criticalValue = .writerCriticalSectionEnter();
         try {
         } finally {
             .writerCriticalSectionExit(criticalValue);
         }
     }
 
     @Override
     void addToCountAtIndex(final int indexfinal long value) {
         long criticalValue = .writerCriticalSectionEnter();
         try {
             .addAndGet(normalizeIndex(index.getNormalizingIndexOffset()), value);
         } finally {
             .writerCriticalSectionExit(criticalValue);
         }
     }
 
     @Override
     void setCountAtIndex(int indexlong value) {
         try {
             .readerLock();
             .lazySet(normalizeIndex(index.getNormalizingIndexOffset()), value);
        } finally {
            .readerUnlock();
        }
    }
    @Override
    void setCountAtNormalizedIndex(int indexlong value) {
        try {
            .readerLock();
            .lazySet(indexvalue);
            .lazySet(index, 0);
        } finally {
            .readerUnlock();
        }
    }
    @Override
        return .getNormalizingIndexOffset();
    }
    @Override
    void setNormalizingIndexOffset(int normalizingIndexOffset) {
        setNormalizingIndexOffset(normalizingIndexOffset, 0, false);
    }
    private void setNormalizingIndexOffset(
            int normalizingIndexOffset,
            int shiftedAmount,
            boolean lowestHalfBucketPopulated) {
        try {
            .readerLock();
            if (normalizingIndexOffset == .getNormalizingIndexOffset()) {
                return// Nothing to do.
            }
            // Save and clear the inactive 0 value count:
            long inactiveZeroValueCount =
                    .get(normalizeIndex(0, .getNormalizingIndexOffset()));
            // Change the normalizingIndexOffset on the current inactiveCounts:
            .setNormalizingIndexOffset(normalizingIndexOffset);
            // Handle the inactive lowest half bucket:
            if ((shiftedAmount > 0) && lowestHalfBucketPopulated) {
                shiftLowestInactiveHalfBucketContentsLeft(shiftedAmount);
            }
            // Restore the inactive 0 value count:
            .lazySet(
                    normalizeIndex(0, .getNormalizingIndexOffset()),
                    inactiveZeroValueCount
            );
            // switch active and inactive:
            AtomicLongArrayWithNormalizingOffset tmp = ;
             = ;
             = tmp;
            .flipPhase();
            // Save and clear the newly inactive 0 value count:
            inactiveZeroValueCount =
                    .get(normalizeIndex(0, .getNormalizingIndexOffset()));
            // Change the normalizingIndexOffset on the newly inactiveCounts:
            .setNormalizingIndexOffset(normalizingIndexOffset);
            // Handle the newly inactive lowest half bucket:
            if ((shiftedAmount > 0) && lowestHalfBucketPopulated) {
                shiftLowestInactiveHalfBucketContentsLeft(shiftedAmount);
            }
            // Restore the newly inactive 0 value count:
            .lazySet(
                    normalizeIndex(0, .getNormalizingIndexOffset()),
                    inactiveZeroValueCount
            );
            // switch active and inactive again:
            tmp = ;
             = ;
             = tmp;
            .flipPhase();
            // At this point, both active and inactive have normalizingIndexOffset safely set,
            // and the switch in each was done without any writers using the wrong value in flight.
        } finally {
            .readerUnlock();
        }
    }
    private void shiftLowestInactiveHalfBucketContentsLeft(int shiftAmount) {
        final int numberOfBinaryOrdersOfMagnitude = shiftAmount >> ;
        // The lowest inactive half-bucket (not including the 0 value) is special: unlike all other half
        // buckets, the lowest half bucket values cannot be scaled by simply changing the
        // normalizing offset. Instead, they must be individually re-recorded at the new
        // scale, and cleared from the current one.
        //
        // We know that all half buckets "below" the current lowest one are full of 0s, because
        // we would have overflowed otherwise. So we need to shift the values in the current
        // lowest half bucket into that range (including the current lowest half bucket itself).
        // Iterating up from the lowermost non-zero "from slot" and copying values to the newly
        // scaled "to slot" (and then zeroing the "from slot"), will work in a single pass,
        // because the scale "to slot" index will always be a lower index than its or any
        // preceding non-scaled "from slot" index:
        //
        // (Note that we specifically avoid slot 0, as it is directly handled in the outer case)
        for (int fromIndex = 1; fromIndex < fromIndex++) {
            long toValue = valueFromIndex(fromIndex) << numberOfBinaryOrdersOfMagnitude;
            int toIndex = countsArrayIndex(toValue);
            int normalizedToIndex = normalizeIndex(toIndex.getNormalizingIndexOffset());
            long countAtFromIndex = .get(fromIndex);
            .lazySet(normalizedToIndexcountAtFromIndex);
            .lazySet(fromIndex, 0);
        }
        // Note that the above loop only creates O(N) work for histograms that have values in
        // the lowest half-bucket (excluding the 0 value). Histograms that never have values
        // there (e.g. all integer value histograms used as internal storage in DoubleHistograms)
        // will never loop, and their shifts will remain O(1).
    }
    @Override
    void shiftNormalizingIndexByOffset(int offsetToAddboolean lowestHalfBucketPopulated) {
        try {
            .readerLock();
            int newNormalizingIndexOffset = .getNormalizingIndexOffset() + offsetToAdd;
            setNormalizingIndexOffset(newNormalizingIndexOffsetoffsetToAddlowestHalfBucketPopulated);
        } finally {
            .readerUnlock();
        }
    }
    @Override
    void resize(long newHighestTrackableValue) {
        try {
            .readerLock();
            int oldNormalizedZeroIndex = normalizeIndex(0, .getNormalizingIndexOffset());
            establishSize(newHighestTrackableValue);
            int countsDelta =  - .length();
            // Resize the current inactiveCounts:
            AtomicLongArray oldInactiveCounts = ;
             =
                    new AtomicLongArrayWithNormalizingOffset(
                            ,
                            .getNormalizingIndexOffset()
                    );
            // Copy inactive contents to newly sized inactiveCounts:
            for (int i = 0 ; i < oldInactiveCounts.length(); i++) {
                .lazySet(ioldInactiveCounts.get(i));
            }
            if (oldNormalizedZeroIndex != 0) {
                // We need to shift the stuff from the zero index and up to the end of the array:
                int newNormalizedZeroIndex = oldNormalizedZeroIndex + countsDelta;
                int lengthToCopy = ( - countsDelta) - oldNormalizedZeroIndex;
                int srcdst;
                for (src = oldNormalizedZeroIndexdst =  newNormalizedZeroIndex;
                     src < oldNormalizedZeroIndex + lengthToCopy;
                     src++, dst++) {
                    .lazySet(dstoldInactiveCounts.get(src));
                }
            }
            // switch active and inactive:
            AtomicLongArrayWithNormalizingOffset tmp = ;
             = ;
             = tmp;
            .flipPhase();
            // Resize the newly inactiveCounts:
            oldInactiveCounts = ;
             =
                    new AtomicLongArrayWithNormalizingOffset(
                            ,
                            .getNormalizingIndexOffset()
                    );
            // Copy inactive contents to newly sized inactiveCounts:
            for (int i = 0 ; i < oldInactiveCounts.length(); i++) {
                .lazySet(ioldInactiveCounts.get(i));
            }
            if (oldNormalizedZeroIndex != 0) {
                // We need to shift the stuff from the zero index and up to the end of the array:
                int newNormalizedZeroIndex = oldNormalizedZeroIndex + countsDelta;
                int lengthToCopy = ( - countsDelta) - oldNormalizedZeroIndex;
                int srcdst;
                for (src = oldNormalizedZeroIndexdst =  newNormalizedZeroIndex;
                     src < oldNormalizedZeroIndex + lengthToCopy;
                     src++, dst++) {
                    .lazySet(dstoldInactiveCounts.get(src));
                }
            }
            // switch active and inactive again:
            tmp = ;
             = ;
             = tmp;
            .flipPhase();
            // At this point, both active and inactive have been safely resized,
            // and the switch in each was done without any writers modifying it in flight.
        } finally {
            .readerUnlock();
        }
    }
    @Override
    public void setAutoResize(boolean autoResize) {
        this. = true;
    }
    @Override
    void clearCounts() {
        try {
            .readerLock();
            for (int i = 0; i < .length(); i++) {
                .lazySet(i, 0);
                .lazySet(i, 0);
            }
            .set(this, 0);
        } finally {
            .readerUnlock();
        }
    }
    @Override
    public ConcurrentHistogram copy() {
        ConcurrentHistogram copy = new ConcurrentHistogram(this);
        copy.add(this);
        return copy;
    }
    @Override
    public ConcurrentHistogram copyCorrectedForCoordinatedOmission(final long expectedIntervalBetweenValueSamples) {
        ConcurrentHistogram toHistogram = new ConcurrentHistogram(this);
        toHistogram.addWhileCorrectingForCoordinatedOmission(thisexpectedIntervalBetweenValueSamples);
        return toHistogram;
    }
    @Override
    public long getTotalCount() {
        return .get(this);
    }
    @Override
    void setTotalCount(final long totalCount) {
        .set(thistotalCount);
    }
    @Override
    void incrementTotalCount() {
        .incrementAndGet(this);
    }
    @Override
    void addToTotalCount(final long value) {
        .addAndGet(thisvalue);
    }
    @Override
        return (512 + (2 * 8 * .length()));
    }

    
Construct an auto-resizing ConcurrentHistogram with a lowest discernible value of 1 and an auto-adjusting highestTrackableValue. Can auto-reize up to track values up to (Long.MAX_VALUE / 2).

Parameters:
numberOfSignificantValueDigits Specifies the precision to use. This is the number of significant decimal digits to which the histogram will maintain value resolution and separation. Must be a non-negative integer between 0 and 5.
    public ConcurrentHistogram(final int numberOfSignificantValueDigits) {
        this(1, 2, numberOfSignificantValueDigits);
        setAutoResize(true);
    }

    
Construct a ConcurrentHistogram given the Highest value to be tracked and a number of significant decimal digits. The histogram will be constructed to implicitly track (distinguish from 0) values as low as 1.

Parameters:
highestTrackableValue The highest value to be tracked by the histogram. Must be a positive integer that is >= 2.
numberOfSignificantValueDigits Specifies the precision to use. This is the number of significant decimal digits to which the histogram will maintain value resolution and separation. Must be a non-negative integer between 0 and 5.
    public ConcurrentHistogram(final long highestTrackableValuefinal int numberOfSignificantValueDigits) {
        this(1, highestTrackableValuenumberOfSignificantValueDigits);
    }

    
Construct a ConcurrentHistogram given the Lowest and Highest values to be tracked and a number of significant decimal digits. Providing a lowestDiscernibleValue is useful is situations where the units used for the histogram's values are much smaller that the minimal accuracy required. E.g. when tracking time values stated in nanosecond units, where the minimal accuracy required is a microsecond, the proper value for lowestDiscernibleValue would be 1000.

Parameters:
lowestDiscernibleValue The lowest value that can be tracked (distinguished from 0) by the histogram. Must be a positive integer that is >= 1. May be internally rounded down to nearest power of 2.
highestTrackableValue The highest value to be tracked by the histogram. Must be a positive integer that is >= (2 * lowestDiscernibleValue).
numberOfSignificantValueDigits Specifies the precision to use. This is the number of significant decimal digits to which the histogram will maintain value resolution and separation. Must be a non-negative integer between 0 and 5.
    public ConcurrentHistogram(final long lowestDiscernibleValuefinal long highestTrackableValue,
                               final int numberOfSignificantValueDigits) {
        super(lowestDiscernibleValuehighestTrackableValuenumberOfSignificantValueDigitsfalse);
         = 8;
    }

    
Construct a histogram with the same range settings as a given source histogram, duplicating the source's start/end timestamps (but NOT it's contents)

Parameters:
source The source histogram to duplicate
    public ConcurrentHistogram(final AbstractHistogram source) {
        super(sourcefalse);
         = 8;
    }

    
Construct a new histogram by decoding it from a ByteBuffer.

Parameters:
buffer The buffer to decode from
minBarForHighestTrackableValue Force highestTrackableValue to be set at least this high
Returns:
The newly constructed histogram
    public static ConcurrentHistogram decodeFromByteBuffer(final ByteBuffer buffer,
                                                           final long minBarForHighestTrackableValue) {
        return (ConcurrentHistogramdecodeFromByteBuffer(bufferConcurrentHistogram.class,
                minBarForHighestTrackableValue);
    }

    
Construct a new histogram by decoding it from a compressed form in a ByteBuffer.

Parameters:
buffer The buffer to decode from
minBarForHighestTrackableValue Force highestTrackableValue to be set at least this high
Returns:
The newly constructed histogram
Throws:
java.util.zip.DataFormatException on error parsing/decompressing the buffer
    public static ConcurrentHistogram decodeFromCompressedByteBuffer(final ByteBuffer buffer,
                                                                     final long minBarForHighestTrackableValuethrows DataFormatException {
                minBarForHighestTrackableValue);
    }
    private void readObject(final ObjectInputStream o)
            throws IOExceptionClassNotFoundException {
        o.defaultReadObject();
    }
    @Override
    synchronized void fillCountsArrayFromBuffer(final ByteBuffer bufferfinal int length) {
        LongBuffer logbuffer = buffer.asLongBuffer();
        for (int i = 0; i < lengthi++) {
            .lazySet(ilogbuffer.get());
            .lazySet(i, 0);
        }
    }
    // We try to cache the LongBuffer used in output cases, as repeated
    // output form the same histogram using the same buffer is likely:
    private LongBuffer cachedDstLongBuffer = null;
    private ByteBuffer cachedDstByteBuffer = null;
    private int cachedDstByteBufferPosition = 0;
    @Override
    synchronized void fillBufferFromCountsArray(final ByteBuffer bufferfinal int length) {
        if (( == null) ||
                (buffer != ) ||
                (buffer.position() != )) {
             = buffer;
             = buffer.position();
             = buffer.asLongBuffer();
        }
        .rewind();
        try {
            .readerLock();
            for (int i = 0; i < lengthi++) {
                .put(.get(i) + .get(i));
            }
        } finally {
            .readerUnlock();
        }
    }
    static class AtomicLongArrayWithNormalizingOffset extends AtomicLongArray {
        private int normalizingIndexOffset;
        AtomicLongArrayWithNormalizingOffset(int lengthint normalizingIndexOffset) {
            super(length);
            this. = normalizingIndexOffset;
        }
        public int getNormalizingIndexOffset() {
            return ;
        }
        public void setNormalizingIndexOffset(int normalizingIndexOffset) {
            this. = normalizingIndexOffset;
        }
    }
New to GrepCode? Check out our FAQ X