Start line:  
End line:  

Snippet Preview

Snippet HTML Code

Stack Overflow Questions
   /*
    * Copyright (C) 2009 The Guava Authors
    *
    * Licensed under the Apache License, Version 2.0 (the "License");
    * you may not use this file except in compliance with the License.
    * You may obtain a copy of the License at
    *
    * http://www.apache.org/licenses/LICENSE-2.0
    *
   * Unless required by applicable law or agreed to in writing, software
   * distributed under the License is distributed on an "AS IS" BASIS,
   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
   * See the License for the specific language governing permissions and
   * limitations under the License.
   */
  
  package com.google.common.cache;
  
  import static com.google.common.base.Preconditions.checkNotNull;
  import static com.google.common.base.Preconditions.checkState;
  import static com.google.common.cache.CacheBuilder.NULL_TICKER;
  import static com.google.common.cache.CacheBuilder.UNSET_INT;
  import static com.google.common.util.concurrent.Uninterruptibles.getUninterruptibly;
  import static java.util.concurrent.TimeUnit.NANOSECONDS;
  
  
  import java.util.Map;
  import java.util.Queue;
  import java.util.Set;
  
  import  javax.annotation.Nullable;
  import  javax.annotation.concurrent.GuardedBy;

The concurrent hash map implementation built by CacheBuilder.

This implementation is heavily derived from revision 1.96 of ConcurrentHashMap.java.

Author(s):
Charles Fry
Bob Lee (com.google.common.collect.MapMaker)
Doug Lea (ConcurrentHashMap)
  
  class LocalCache<K, V> extends AbstractMap<K, V> implements ConcurrentMap<K, V> {
  
    /*
     * The basic strategy is to subdivide the table among Segments, each of which itself is a
     * concurrently readable hash table. The map supports non-blocking reads and concurrent writes
     * across different segments.
     *
     * If a maximum size is specified, a best-effort bounding is performed per segment, using a
    * page-replacement algorithm to determine which entries to evict when the capacity has been
    * exceeded.
    *
    * The page replacement algorithm's data structures are kept casually consistent with the map. The
    * ordering of writes to a segment is sequentially consistent. An update to the map and recording
    * of reads may not be immediately reflected on the algorithm's data structures. These structures
    * are guarded by a lock and operations are applied in batches to avoid lock contention. The
    * penalty of applying the batches is spread across threads so that the amortized cost is slightly
    * higher than performing just the operation without enforcing the capacity constraint.
    *
    * This implementation uses a per-segment queue to record a memento of the additions, removals,
    * and accesses that were performed on the map. The queue is drained on writes and when it exceeds
    * its capacity threshold.
    *
    * The Least Recently Used page replacement algorithm was chosen due to its simplicity, high hit
    * rate, and ability to be implemented with O(1) time complexity. The initial LRU implementation
    * operates per-segment rather than globally for increased implementation simplicity. We expect
    * the cache hit rate to be similar to that of a global LRU algorithm.
    */
 
   // Constants
 
  
The maximum capacity, used if a higher value is implicitly specified by either of the constructors with arguments. MUST be a power of two <= 1<<30 to ensure that entries are indexable using ints.
 
   static final int MAXIMUM_CAPACITY = 1 << 30;

  
The maximum number of segments to allow; used to bound constructor arguments.
 
   static final int MAX_SEGMENTS = 1 << 16; // slightly conservative
 
  
Number of (unsynchronized) retries in the containsValue method.
 
   static final int CONTAINS_VALUE_RETRIES = 3;

  
Number of cache access operations that can be buffered per segment before the cache's recency ordering information is updated. This is used to avoid lock contention by recording a memento of reads and delaying a lock acquisition until the threshold is crossed or a mutation occurs.

This must be a (2^n)-1 as it is used as a mask.

 
   static final int DRAIN_THRESHOLD = 0x3F;

  
Maximum number of entries to be drained in a single cleanup run. This applies independently to the cleanup queue and both reference queues.
 
   // TODO(fry): empirically optimize this
   static final int DRAIN_MAX = 16;
 
   // Fields
 
   static final Logger logger = Logger.getLogger(LocalCache.class.getName());
 
   static final ListeningExecutorService sameThreadExecutor = MoreExecutors.sameThreadExecutor();

  
Mask value for indexing into segments. The upper bits of a key's hash code are used to choose the segment.
 
   final int segmentMask;

  
Shift value for indexing within segments. Helps prevent entries that end up in the same segment from also ending up in the same bucket.
 
   final int segmentShift;

  
The segments, each of which is a specialized hash table.
 
   final Segment<K, V>[] segments;

  
The concurrency level.
 
   final int concurrencyLevel;

  
Strategy for comparing keys.
 
Strategy for comparing values.
 
Strategy for referencing keys.
 
   final Strength keyStrength;

  
Strategy for referencing values.
 
   final Strength valueStrength;

  
The maximum weight of this map. UNSET_INT if there is no maximum.
 
   final long maxWeight;

  
Weigher to weigh cache entries.
 
   final Weigher<K, V> weigher;

  
How long after the last access to an entry the map will retain that entry.
 
   final long expireAfterAccessNanos;

  
How long after the last write to an entry the map will retain that entry.
 
   final long expireAfterWriteNanos;

  
How long after the last write an entry becomes a candidate for refresh.
 
   final long refreshNanos;

  
Entries waiting to be consumed by the removal listener.
 
   // TODO(fry): define a new type which creates event objects and automates the clear logic
A listener that is invoked when an entry is removed due to expiration or garbage collection of soft/weak entries.
 
   final RemovalListener<K, V> removalListener;

  
Measures time in a testable way.
 
   final Ticker ticker;

  
Factory used to create new entries.
 
   final EntryFactory entryFactory;

  
Accumulates global cache statistics. Note that there are also per-segments stats counters which must be aggregated to obtain a global stats view.
 
The default cache loader to use on loading operations.
 
   @Nullable
   final CacheLoader<? super K, V> defaultLoader;

  
Creates a new, empty map with the specified strategy, initial capacity and concurrency level.
 
       CacheBuilder<? super K, ? super V> builder, @Nullable CacheLoader<? super K, V> loader) {
      = Math.min(builder.getConcurrencyLevel(), );
 
      = builder.getKeyStrength();
      = builder.getValueStrength();
 
      = builder.getKeyEquivalence();
      = builder.getValueEquivalence();
 
      = builder.getMaximumWeight();
      = builder.getWeigher();
      = builder.getRefreshNanos();
 
      = builder.getRemovalListener();
         ? LocalCache.<RemovalNotification<K, V>>discardingQueue()
         : new ConcurrentLinkedQueue<RemovalNotification<K, V>>();
 
      = builder.getTicker(recordsTime());
      = loader;
 
     int initialCapacity = Math.min(builder.getInitialCapacity(), );
     if (evictsBySize() && !customWeigher()) {
       initialCapacity = Math.min(initialCapacity, (int);
     }
 
     // Find the lowest power-of-two segmentCount that exceeds concurrencyLevel, unless
     // maximumSize/Weight is specified in which case ensure that each segment gets at least 10
     // entries. The special casing for size-based eviction is only necessary because that eviction
     // happens per segment instead of globally, so too many segments compared to the maximum size
     // will result in random eviction behavior.
     int segmentShift = 0;
     int segmentCount = 1;
     while (segmentCount < 
            && (!evictsBySize() || segmentCount * 20 <= )) {
       ++segmentShift;
       segmentCount <<= 1;
     }
     this. = 32 - segmentShift;
      = segmentCount - 1;
 
     this. = newSegmentArray(segmentCount);
 
     int segmentCapacity = initialCapacity / segmentCount;
     if (segmentCapacity * segmentCount < initialCapacity) {
       ++segmentCapacity;
     }
 
     int segmentSize = 1;
     while (segmentSize < segmentCapacity) {
       segmentSize <<= 1;
     }
 
     if (evictsBySize()) {
       // Ensure sum of segment max weights = overall max weights
       long maxSegmentWeight =  / segmentCount + 1;
       long remainder =  % segmentCount;
       for (int i = 0; i < this..length; ++i) {
         if (i == remainder) {
           maxSegmentWeight--;
         }
         this.[i] =
             createSegment(segmentSizemaxSegmentWeightbuilder.getStatsCounterSupplier().get());
       }
     } else {
       for (int i = 0; i < this..length; ++i) {
         this.[i] =
             createSegment(segmentSizebuilder.getStatsCounterSupplier().get());
       }
     }
   }
 
   boolean evictsBySize() {
     return  >= 0;
   }
 
   boolean customWeigher() {
     return  != .;
   }
 
   boolean expires() {
     return expiresAfterWrite() || expiresAfterAccess();
   }
 
   boolean expiresAfterWrite() {
     return  > 0;
   }
 
   boolean expiresAfterAccess() {
     return  > 0;
   }
 
   boolean refreshes() {
     return  > 0;
   }
 
   boolean usesAccessQueue() {
     return expiresAfterAccess() || evictsBySize();
   }
 
   boolean usesWriteQueue() {
     return expiresAfterWrite();
   }
 
   boolean recordsWrite() {
     return expiresAfterWrite() || refreshes();
   }
 
   boolean recordsAccess() {
     return expiresAfterAccess();
   }
 
   boolean recordsTime() {
     return recordsWrite() || recordsAccess();
   }
 
   boolean usesWriteEntries() {
     return usesWriteQueue() || recordsWrite();
   }
 
   boolean usesAccessEntries() {
     return usesAccessQueue() || recordsAccess();
   }
 
   boolean usesKeyReferences() {
     return  != .;
   }
 
   boolean usesValueReferences() {
     return  != .;
   }
 
   enum Strength {
     /*
      * TODO(kevinb): If we strongly reference the value and aren't loading, we needn't wrap the
      * value. This could save ~8 bytes per entry.
      */
 
     STRONG {
       @Override
       <K, V> ValueReference<K, V> referenceValue(
           Segment<K, V> segmentReferenceEntry<K, V> entry, V valueint weight) {
         return (weight == 1)
             ? new StrongValueReference<K, V>(value)
             : new WeightedStrongValueReference<K, V>(valueweight);
       }
 
       @Override
         return Equivalence.equals();
       }
     },
 
     SOFT {
       @Override
       <K, V> ValueReference<K, V> referenceValue(
           Segment<K, V> segmentReferenceEntry<K, V> entry, V valueint weight) {
         return (weight == 1)
             ? new SoftValueReference<K, V>(segment.valueReferenceQueuevalueentry)
             : new WeightedSoftValueReference<K, V>(
                 segment.valueReferenceQueuevalueentryweight);
       }
 
       @Override
         return Equivalence.identity();
       }
     },
 
     WEAK {
       @Override
       <K, V> ValueReference<K, V> referenceValue(
           Segment<K, V> segmentReferenceEntry<K, V> entry, V valueint weight) {
         return (weight == 1)
             ? new WeakValueReference<K, V>(segment.valueReferenceQueuevalueentry)
             : new WeightedWeakValueReference<K, V>(
                 segment.valueReferenceQueuevalueentryweight);
       }
 
       @Override
         return Equivalence.identity();
       }
     };

    
Creates a reference for the given value according to this value strength.
 
     abstract <K, V> ValueReference<K, V> referenceValue(
         Segment<K, V> segmentReferenceEntry<K, V> entry, V valueint weight);

    
Returns the default equivalence strategy used to compare and hash keys or values referenced at this strength. This strategy will be used unless the user explicitly specifies an alternate strategy.
 
     abstract Equivalence<ObjectdefaultEquivalence();
   }

  
Creates new entries.
 
   enum EntryFactory {
     STRONG {
       @Override
       <K, V> ReferenceEntry<K, V> newEntry(
           Segment<K, V> segment, K keyint hash, @Nullable ReferenceEntry<K, V> next) {
         return new StrongEntry<K, V>(keyhashnext);
       }
     },
     STRONG_ACCESS {
       @Override
       <K, V> ReferenceEntry<K, V> newEntry(
           Segment<K, V> segment, K keyint hash, @Nullable ReferenceEntry<K, V> next) {
         return new StrongAccessEntry<K, V>(keyhashnext);
       }
 
       @Override
       <K, V> ReferenceEntry<K, V> copyEntry(
           Segment<K, V> segmentReferenceEntry<K, V> originalReferenceEntry<K, V> newNext) {
         ReferenceEntry<K, V> newEntry = super.copyEntry(segmentoriginalnewNext);
         copyAccessEntry(originalnewEntry);
         return newEntry;
       }
     },
     STRONG_WRITE {
       @Override
       <K, V> ReferenceEntry<K, V> newEntry(
           Segment<K, V> segment, K keyint hash, @Nullable ReferenceEntry<K, V> next) {
         return new StrongWriteEntry<K, V>(keyhashnext);
       }
 
       @Override
       <K, V> ReferenceEntry<K, V> copyEntry(
           Segment<K, V> segmentReferenceEntry<K, V> originalReferenceEntry<K, V> newNext) {
         ReferenceEntry<K, V> newEntry = super.copyEntry(segmentoriginalnewNext);
         copyWriteEntry(originalnewEntry);
         return newEntry;
       }
     },
     STRONG_ACCESS_WRITE {
       @Override
       <K, V> ReferenceEntry<K, V> newEntry(
           Segment<K, V> segment, K keyint hash, @Nullable ReferenceEntry<K, V> next) {
         return new StrongAccessWriteEntry<K, V>(keyhashnext);
       }
 
       @Override
       <K, V> ReferenceEntry<K, V> copyEntry(
           Segment<K, V> segmentReferenceEntry<K, V> originalReferenceEntry<K, V> newNext) {
         ReferenceEntry<K, V> newEntry = super.copyEntry(segmentoriginalnewNext);
         copyAccessEntry(originalnewEntry);
         copyWriteEntry(originalnewEntry);
         return newEntry;
       }
     },
 
     WEAK {
       @Override
       <K, V> ReferenceEntry<K, V> newEntry(
           Segment<K, V> segment, K keyint hash, @Nullable ReferenceEntry<K, V> next) {
         return new WeakEntry<K, V>(segment.keyReferenceQueuekeyhashnext);
       }
     },
     WEAK_ACCESS {
       @Override
       <K, V> ReferenceEntry<K, V> newEntry(
           Segment<K, V> segment, K keyint hash, @Nullable ReferenceEntry<K, V> next) {
         return new WeakAccessEntry<K, V>(segment.keyReferenceQueuekeyhashnext);
       }
 
       @Override
       <K, V> ReferenceEntry<K, V> copyEntry(
           Segment<K, V> segmentReferenceEntry<K, V> originalReferenceEntry<K, V> newNext) {
         ReferenceEntry<K, V> newEntry = super.copyEntry(segmentoriginalnewNext);
         copyAccessEntry(originalnewEntry);
         return newEntry;
       }
     },
     WEAK_WRITE {
       @Override
       <K, V> ReferenceEntry<K, V> newEntry(
           Segment<K, V> segment, K keyint hash, @Nullable ReferenceEntry<K, V> next) {
         return new WeakWriteEntry<K, V>(segment.keyReferenceQueuekeyhashnext);
       }
 
       @Override
       <K, V> ReferenceEntry<K, V> copyEntry(
           Segment<K, V> segmentReferenceEntry<K, V> originalReferenceEntry<K, V> newNext) {
         ReferenceEntry<K, V> newEntry = super.copyEntry(segmentoriginalnewNext);
         copyWriteEntry(originalnewEntry);
         return newEntry;
       }
     },
     WEAK_ACCESS_WRITE {
       @Override
       <K, V> ReferenceEntry<K, V> newEntry(
           Segment<K, V> segment, K keyint hash, @Nullable ReferenceEntry<K, V> next) {
         return new WeakAccessWriteEntry<K, V>(segment.keyReferenceQueuekeyhashnext);
       }
 
       @Override
       <K, V> ReferenceEntry<K, V> copyEntry(
           Segment<K, V> segmentReferenceEntry<K, V> originalReferenceEntry<K, V> newNext) {
         ReferenceEntry<K, V> newEntry = super.copyEntry(segmentoriginalnewNext);
         copyAccessEntry(originalnewEntry);
         copyWriteEntry(originalnewEntry);
         return newEntry;
       }
     };

    
Masks used to compute indices in the following table.
 
     static final int ACCESS_MASK = 1;
     static final int WRITE_MASK = 2;
     static final int WEAK_MASK = 4;

    
Look-up table for factories.
 
     static final EntryFactory[] factories = {
     };
 
     static EntryFactory getFactory(Strength keyStrengthboolean usesAccessQueue,
         boolean usesWriteQueue) {
       int flags = ((keyStrength == .) ?  : 0)
           | (usesAccessQueue ?  : 0)
           | (usesWriteQueue ?  : 0);
       return [flags];
     }

    
Creates a new entry.

Parameters:
segment to create the entry for
key of the entry
hash of the key
next entry in the same bucket
 
     abstract <K, V> ReferenceEntry<K, V> newEntry(
         Segment<K, V> segment, K keyint hash, @Nullable ReferenceEntry<K, V> next);

    
Copies an entry, assigning it a new next entry.

Parameters:
original the entry to copy
newNext entry in the same bucket
 
     @GuardedBy("Segment.this")
     <K, V> ReferenceEntry<K, V> copyEntry(
         Segment<K, V> segmentReferenceEntry<K, V> originalReferenceEntry<K, V> newNext) {
       return newEntry(segmentoriginal.getKey(), original.getHash(), newNext);
     }
 
     @GuardedBy("Segment.this")
     <K, V> void copyAccessEntry(ReferenceEntry<K, V> originalReferenceEntry<K, V> newEntry) {
       // TODO(fry): when we link values instead of entries this method can go
       // away, as can connectAccessOrder, nullifyAccessOrder.
       newEntry.setAccessTime(original.getAccessTime());
 
       connectAccessOrder(original.getPreviousInAccessQueue(), newEntry);
       connectAccessOrder(newEntryoriginal.getNextInAccessQueue());
 
       nullifyAccessOrder(original);
     }
 
     @GuardedBy("Segment.this")
     <K, V> void copyWriteEntry(ReferenceEntry<K, V> originalReferenceEntry<K, V> newEntry) {
       // TODO(fry): when we link values instead of entries this method can go
       // away, as can connectWriteOrder, nullifyWriteOrder.
       newEntry.setWriteTime(original.getWriteTime());
 
       connectWriteOrder(original.getPreviousInWriteQueue(), newEntry);
       connectWriteOrder(newEntryoriginal.getNextInWriteQueue());
 
       nullifyWriteOrder(original);
     }
   }

  
A reference to a value.
 
   interface ValueReference<K, V> {
    
Returns the value. Does not block or throw exceptions.
 
     @Nullable
     V get();

    
Waits for a value that may still be loading. Unlike get(), this method can block (in the case of FutureValueReference).

Throws:
ExecutionException if the loading thread throws an exception
ExecutionError if the loading thread throws an error
 
     V waitForValue() throws ExecutionException;

    
Returns the weight of this entry. This is assumed to be static between calls to setValue.
 
     int getWeight();

    
Returns the entry associated with this value reference, or null if this value reference is independent of any entry.
 
     @Nullable
     ReferenceEntry<K, V> getEntry();

    
Creates a copy of this reference for the given entry.

value may be null only for a loading reference.

 
     ValueReference<K, V> copyFor(
         ReferenceQueue<V> queue, @Nullable V valueReferenceEntry<K, V> entry);

    
Notifify pending loads that a new value was set. This is only relevant to loading value references.
 
     void notifyNewValue(@Nullable V newValue);

    
Returns true if a new value is currently loading, regardless of whether or not there is an existing value. It is assumed that the return value of this method is constant for any given ValueReference instance.
 
     boolean isLoading();

    
Returns true if this reference contains an active value, meaning one that is still considered present in the cache. Active values consist of live values, which are returned by cache lookups, and dead values, which have been evicted but awaiting removal. Non-active values consist strictly of loading values, though during refresh a value may be both active and loading.
 
     boolean isActive();
   }

  
Placeholder. Indicates that the value hasn't been set yet.
 
   static final ValueReference<ObjectObjectUNSET = new ValueReference<ObjectObject>() {
     @Override
     public Object get() {
       return null;
     }
 
     @Override
     public int getWeight() {
       return 0;
     }
 
     @Override
     public ReferenceEntry<ObjectObjectgetEntry() {
       return null;
     }
 
     @Override
         @Nullable Object valueReferenceEntry<ObjectObjectentry) {
       return this;
     }
 
     @Override
     public boolean isLoading() {
       return false;
     }
 
     @Override
     public boolean isActive() {
       return false;
     }
 
     @Override
     public Object waitForValue() {
       return null;
     }
 
     @Override
     public void notifyNewValue(Object newValue) {}
   };

  
Singleton placeholder that indicates a value is being loaded.
 
   @SuppressWarnings("unchecked"// impl never uses a parameter or returns any non-null value
   static <K, V> ValueReference<K, V> unset() {
     return (ValueReference<K, V>) ;
   }

  
An entry in a reference map. Entries in the map can be in the following states: Valid: - Live: valid key/value are set - Loading: loading is pending Invalid: - Expired: time expired (key/value may still be set) - Collected: key/value was partially collected, but not yet cleaned up - Unset: marked as unset, awaiting cleanup or reuse
 
   interface ReferenceEntry<K, V> {
    
Returns the value reference from this entry.
 
     ValueReference<K, V> getValueReference();

    
Sets the value reference for this entry.
 
     void setValueReference(ValueReference<K, V> valueReference);

    
Returns the next entry in the chain.
 
     @Nullable
     ReferenceEntry<K, V> getNext();

    
Returns the entry's hash.
 
     int getHash();

    
Returns the key for this entry.
 
     @Nullable
     K getKey();
 
     /*
      * Used by entries that use access order. Access entries are maintained in a doubly-linked list.
      * New entries are added at the tail of the list at write time; stale entries are expired from
      * the head of the list.
      */

    
Returns the time that this entry was last accessed, in ns.
 
     long getAccessTime();

    
Sets the entry access time in ns.
 
     void setAccessTime(long time);

    
Returns the next entry in the access queue.
 
     ReferenceEntry<K, V> getNextInAccessQueue();

    
Sets the next entry in the access queue.
 
     void setNextInAccessQueue(ReferenceEntry<K, V> next);

    
Returns the previous entry in the access queue.
 
     ReferenceEntry<K, V> getPreviousInAccessQueue();

    
Sets the previous entry in the access queue.
 
     void setPreviousInAccessQueue(ReferenceEntry<K, V> previous);
 
     /*
      * Implemented by entries that use write order. Write entries are maintained in a
      * doubly-linked list. New entries are added at the tail of the list at write time and stale
      * entries are expired from the head of the list.
      */

    
Returns the time that this entry was last written, in ns.
 
     long getWriteTime();

    
Sets the entry write time in ns.
 
     void setWriteTime(long time);

    
Returns the next entry in the write queue.
 
     ReferenceEntry<K, V> getNextInWriteQueue();

    
Sets the next entry in the write queue.
 
     void setNextInWriteQueue(ReferenceEntry<K, V> next);

    
Returns the previous entry in the write queue.
 
     ReferenceEntry<K, V> getPreviousInWriteQueue();

    
Sets the previous entry in the write queue.
 
     void setPreviousInWriteQueue(ReferenceEntry<K, V> previous);
   }
 
   private enum NullEntry implements ReferenceEntry<ObjectObject> {
     INSTANCE;
 
     @Override
       return null;
     }
 
     @Override
     public void setValueReference(ValueReference<ObjectObjectvalueReference) {}
 
     @Override
     public ReferenceEntry<ObjectObjectgetNext() {
       return null;
     }
 
     @Override
     public int getHash() {
       return 0;
     }
 
     @Override
     public Object getKey() {
       return null;
     }
 
     @Override
     public long getAccessTime() {
       return 0;
     }
 
     @Override
     public void setAccessTime(long time) {}
 
     @Override
       return this;
     }
 
     @Override
     public void setNextInAccessQueue(ReferenceEntry<ObjectObjectnext) {}
 
     @Override
       return this;
     }
 
     @Override
     public void setPreviousInAccessQueue(ReferenceEntry<ObjectObjectprevious) {}
 
     @Override
     public long getWriteTime() {
       return 0;
     }
 
     @Override
     public void setWriteTime(long time) {}
 
     @Override
       return this;
     }
 
     @Override
     public void setNextInWriteQueue(ReferenceEntry<ObjectObjectnext) {}
 
     @Override
       return this;
     }
 
     @Override
     public void setPreviousInWriteQueue(ReferenceEntry<ObjectObjectprevious) {}
   }
 
   static abstract class AbstractReferenceEntry<K, V> implements ReferenceEntry<K, V> {
     @Override
     public ValueReference<K, V> getValueReference() {
       throw new UnsupportedOperationException();
     }
 
     @Override
     public void setValueReference(ValueReference<K, V> valueReference) {
       throw new UnsupportedOperationException();
     }
 
     @Override
     public ReferenceEntry<K, V> getNext() {
       throw new UnsupportedOperationException();
     }
 
     @Override
     public int getHash() {
       throw new UnsupportedOperationException();
     }
 
     @Override
     public K getKey() {
       throw new UnsupportedOperationException();
     }
 
     @Override
     public long getAccessTime() {
       throw new UnsupportedOperationException();
     }
 
     @Override
     public void setAccessTime(long time) {
       throw new UnsupportedOperationException();
     }
 
     @Override
     public ReferenceEntry<K, V> getNextInAccessQueue() {
       throw new UnsupportedOperationException();
     }
 
     @Override
     public void setNextInAccessQueue(ReferenceEntry<K, V> next) {
       throw new UnsupportedOperationException();
     }
 
     @Override
     public ReferenceEntry<K, V> getPreviousInAccessQueue() {
       throw new UnsupportedOperationException();
     }
 
     @Override
     public void setPreviousInAccessQueue(ReferenceEntry<K, V> previous) {
       throw new UnsupportedOperationException();
     }
 
     @Override
     public long getWriteTime() {
       throw new UnsupportedOperationException();
     }
 
     @Override
     public void setWriteTime(long time) {
       throw new UnsupportedOperationException();
     }
 
     @Override
     public ReferenceEntry<K, V> getNextInWriteQueue() {
       throw new UnsupportedOperationException();
     }
 
     @Override
     public void setNextInWriteQueue(ReferenceEntry<K, V> next) {
       throw new UnsupportedOperationException();
     }
 
    @Override
    public ReferenceEntry<K, V> getPreviousInWriteQueue() {
      throw new UnsupportedOperationException();
    }
    @Override
    public void setPreviousInWriteQueue(ReferenceEntry<K, V> previous) {
      throw new UnsupportedOperationException();
    }
  }
  @SuppressWarnings("unchecked"// impl never uses a parameter or returns any non-null value
  static <K, V> ReferenceEntry<K, V> nullEntry() {
    return (ReferenceEntry<K, V>) .;
  }
  static final Queue<? extends ObjectDISCARDING_QUEUE = new AbstractQueue<Object>() {
    @Override
    public boolean offer(Object o) {
      return true;
    }
    @Override
    public Object peek() {
      return null;
    }
    @Override
    public Object poll() {
      return null;
    }
    @Override
    public int size() {
      return 0;
    }
    @Override
    public Iterator<Objectiterator() {
      return Iterators.emptyIterator();
    }
  };

  
Queue that discards all elements.
  @SuppressWarnings("unchecked"// impl never uses a parameter or returns any non-null value
  static <E> Queue<E> discardingQueue() {
    return (Queue;
  }
  /*
   * Note: All of this duplicate code sucks, but it saves a lot of memory. If only Java had mixins!
   * To maintain this code, make a change for the strong reference type. Then, cut and paste, and
   * replace "Strong" with "Soft" or "Weak" within the pasted text. The primary difference is that
   * strong entries store the key reference directly while soft and weak entries delegate to their
   * respective superclasses.
   */

  
Used for strongly-referenced keys.
  static class StrongEntry<K, V> implements ReferenceEntry<K, V> {
    final K key;
    StrongEntry(K keyint hash, @Nullable ReferenceEntry<K, V> next) {
      this. = key;
      this. = hash;
      this. = next;
    }
    @Override
    public K getKey() {
      return this.;
    }
    // null access
    @Override
    public long getAccessTime() {
      throw new UnsupportedOperationException();
    }
    @Override
    public void setAccessTime(long time) {
      throw new UnsupportedOperationException();
    }
    @Override
    public ReferenceEntry<K, V> getNextInAccessQueue() {
      throw new UnsupportedOperationException();
    }
    @Override
    public void setNextInAccessQueue(ReferenceEntry<K, V> next) {
      throw new UnsupportedOperationException();
    }
    @Override
    public ReferenceEntry<K, V> getPreviousInAccessQueue() {
      throw new UnsupportedOperationException();
    }
    @Override
    public void setPreviousInAccessQueue(ReferenceEntry<K, V> previous) {
      throw new UnsupportedOperationException();
    }
    // null write
    @Override
    public long getWriteTime() {
      throw new UnsupportedOperationException();
    }
    @Override
    public void setWriteTime(long time) {
      throw new UnsupportedOperationException();
    }
    @Override
    public ReferenceEntry<K, V> getNextInWriteQueue() {
      throw new UnsupportedOperationException();
    }
    @Override
    public void setNextInWriteQueue(ReferenceEntry<K, V> next) {
      throw new UnsupportedOperationException();
    }
    @Override
    public ReferenceEntry<K, V> getPreviousInWriteQueue() {
      throw new UnsupportedOperationException();
    }
    @Override
    public void setPreviousInWriteQueue(ReferenceEntry<K, V> previous) {
      throw new UnsupportedOperationException();
    }
    // The code below is exactly the same for each entry type.
    final int hash;
    final ReferenceEntry<K, V> next;
    volatile ValueReference<K, V> valueReference = unset();
    @Override
    public ValueReference<K, V> getValueReference() {
      return ;
    }
    @Override
    public void setValueReference(ValueReference<K, V> valueReference) {
      this. = valueReference;
    }
    @Override
    public int getHash() {
      return ;
    }
    @Override
    public ReferenceEntry<K, V> getNext() {
      return ;
    }
  }
  static final class StrongAccessEntry<K, V> extends StrongEntry<K, V>
      implements ReferenceEntry<K, V> {
    StrongAccessEntry(K keyint hash, @Nullable ReferenceEntry<K, V> next) {
      super(keyhashnext);
    }
    // The code below is exactly the same for each access entry type.
    volatile long accessTime = .;
    @Override
    public long getAccessTime() {
      return ;
    }
    @Override
    public void setAccessTime(long time) {
      this. = time;
    }
    @GuardedBy("Segment.this")
    ReferenceEntry<K, V> nextAccess = nullEntry();
    @Override
    public ReferenceEntry<K, V> getNextInAccessQueue() {
      return ;
    }
    @Override
    public void setNextInAccessQueue(ReferenceEntry<K, V> next) {
      this. = next;
    }
    @GuardedBy("Segment.this")
    @Override
    public ReferenceEntry<K, V> getPreviousInAccessQueue() {
      return ;
    }
    @Override
    public void setPreviousInAccessQueue(ReferenceEntry<K, V> previous) {
      this. = previous;
    }
  }
  static final class StrongWriteEntry<K, V>
      extends StrongEntry<K, V> implements ReferenceEntry<K, V> {
    StrongWriteEntry(K keyint hash, @Nullable ReferenceEntry<K, V> next) {
      super(keyhashnext);
    }
    // The code below is exactly the same for each write entry type.
    volatile long writeTime = .;
    @Override
    public long getWriteTime() {
      return ;
    }
    @Override
    public void setWriteTime(long time) {
      this. = time;
    }
    @GuardedBy("Segment.this")
    ReferenceEntry<K, V> nextWrite = nullEntry();
    @Override
    public ReferenceEntry<K, V> getNextInWriteQueue() {
      return ;
    }