Tabnine Logo
LightWeightGSet.put
Code IndexAdd Tabnine to your IDE (free)

How to use
put
method
in
org.apache.hadoop.util.LightWeightGSet

Best Java code snippets using org.apache.hadoop.util.LightWeightGSet.put (Showing top 20 results out of 315)

origin: org.apache.hadoop/hadoop-common

@Override
public E put(final E element) {
 E existing = super.put(element);
 expandIfNecessary();
 return existing;
}
origin: org.apache.hadoop/hadoop-common

public void addCacheEntryWithPayload(byte[] clientId, int callId,
  Object payload) {
 // since the entry is loaded from editlog, we can assume it succeeded.    
 CacheEntry newEntry = new CacheEntryWithPayload(clientId, callId, payload,
   System.nanoTime() + expirationTime, true);
 lock.lock();
 try {
  set.put(newEntry);
 } finally {
  lock.unlock();
 }
 retryCacheMetrics.incrCacheUpdated();
}
origin: org.apache.hadoop/hadoop-common

/** 
 * Add a new cache entry into the retry cache. The cache entry consists of 
 * clientId and callId extracted from editlog.
 */
public void addCacheEntry(byte[] clientId, int callId) {
 CacheEntry newEntry = new CacheEntry(clientId, callId, System.nanoTime()
   + expirationTime, true);
 lock.lock();
 try {
  set.put(newEntry);
 } finally {
  lock.unlock();
 }
 retryCacheMetrics.incrCacheUpdated();
}

origin: org.apache.hadoop/hadoop-common

@Override
public E put(final E entry) {
 if (!(entry instanceof Entry)) {
  throw new HadoopIllegalArgumentException(
    "!(entry instanceof Entry), entry.getClass()=" + entry.getClass());
 }
 evictExpiredEntries();
 final E existing = super.put(entry);
 if (existing != null) {
  queue.remove(existing);
 }
 final Entry e = (Entry)entry;
 setExpirationTime(e, creationExpirationPeriod);
 queue.offer(e);
 
 evictEntries();
 return existing;
}
origin: org.apache.hadoop/hadoop-common

   + newEntry.callId + " to retryCache");
set.put(newEntry);
retryCacheMetrics.incrCacheUpdated();
return newEntry;
origin: io.hops/hadoop-common

@Override
public E put(final E element) {
 E existing = super.put(element);
 expandIfNecessary();
 return existing;
}
origin: io.hops/hadoop-common

public void addCacheEntryWithPayload(byte[] clientId, int callId,
  Object payload) {
 // since the entry is loaded from editlog, we can assume it succeeded.    
 CacheEntry newEntry = new CacheEntryWithPayload(clientId, callId, payload,
   System.nanoTime() + expirationTime, true);
 lock.lock();
 try {
  set.put(newEntry);
 } finally {
  lock.unlock();
 }
 retryCacheMetrics.incrCacheUpdated();
}
origin: ch.cern.hadoop/hadoop-common

public void addCacheEntryWithPayload(byte[] clientId, int callId,
  Object payload) {
 // since the entry is loaded from editlog, we can assume it succeeded.    
 CacheEntry newEntry = new CacheEntryWithPayload(clientId, callId, payload,
   System.nanoTime() + expirationTime, true);
 lock.lock();
 try {
  set.put(newEntry);
 } finally {
  lock.unlock();
 }
 retryCacheMetrics.incrCacheUpdated();
}
origin: com.github.jiayuhan-it/hadoop-common

public void addCacheEntryWithPayload(byte[] clientId, int callId,
  Object payload) {
 // since the entry is loaded from editlog, we can assume it succeeded.    
 CacheEntry newEntry = new CacheEntryWithPayload(clientId, callId, payload,
   System.nanoTime() + expirationTime, true);
 lock.lock();
 try {
  set.put(newEntry);
 } finally {
  lock.unlock();
 }
 retryCacheMetrics.incrCacheUpdated();
}
origin: io.prestosql.hadoop/hadoop-apache

public void addCacheEntryWithPayload(byte[] clientId, int callId,
  Object payload) {
 // since the entry is loaded from editlog, we can assume it succeeded.    
 CacheEntry newEntry = new CacheEntryWithPayload(clientId, callId, payload,
   System.nanoTime() + expirationTime, true);
 lock.lock();
 try {
  set.put(newEntry);
 } finally {
  lock.unlock();
 }
 retryCacheMetrics.incrCacheUpdated();
}
origin: io.hops/hadoop-common

public void addCacheEntryWithPayload(byte[] clientId, int callId,
  byte[] payload) {
 // since the entry is loaded from editlog, we can assume it succeeded.    
 CacheEntry newEntry = new CacheEntryWithPayload(clientId, callId, payload,
   System.nanoTime() + expirationTime, true);
 lock.lock();
 try {
  set.put(newEntry);
 } finally {
  lock.unlock();
 }
 retryCacheMetrics.incrCacheUpdated();
}
origin: ch.cern.hadoop/hadoop-common

/** 
 * Add a new cache entry into the retry cache. The cache entry consists of 
 * clientId and callId extracted from editlog.
 */
public void addCacheEntry(byte[] clientId, int callId) {
 CacheEntry newEntry = new CacheEntry(clientId, callId, System.nanoTime()
   + expirationTime, true);
 lock.lock();
 try {
  set.put(newEntry);
 } finally {
  lock.unlock();
 }
 retryCacheMetrics.incrCacheUpdated();
}

origin: com.github.jiayuhan-it/hadoop-common

/** 
 * Add a new cache entry into the retry cache. The cache entry consists of 
 * clientId and callId extracted from editlog.
 */
public void addCacheEntry(byte[] clientId, int callId) {
 CacheEntry newEntry = new CacheEntry(clientId, callId, System.nanoTime()
   + expirationTime, true);
 lock.lock();
 try {
  set.put(newEntry);
 } finally {
  lock.unlock();
 }
 retryCacheMetrics.incrCacheUpdated();
}

origin: io.prestosql.hadoop/hadoop-apache

/** 
 * Add a new cache entry into the retry cache. The cache entry consists of 
 * clientId and callId extracted from editlog.
 */
public void addCacheEntry(byte[] clientId, int callId) {
 CacheEntry newEntry = new CacheEntry(clientId, callId, System.nanoTime()
   + expirationTime, true);
 lock.lock();
 try {
  set.put(newEntry);
 } finally {
  lock.unlock();
 }
 retryCacheMetrics.incrCacheUpdated();
}

origin: io.hops/hadoop-common

/** 
 * Add a new cache entry into the retry cache. The cache entry consists of 
 * clientId and callId extracted from editlog.
 */
public void addCacheEntry(byte[] clientId, int callId) {
 CacheEntry newEntry = new CacheEntry(clientId, callId, System.nanoTime()
   + expirationTime, true);
 lock.lock();
 try {
  set.put(newEntry);
 } finally {
  lock.unlock();
 }
 retryCacheMetrics.incrCacheUpdated();
}

origin: io.hops/hadoop-common

/** 
 * Add a new cache entry into the retry cache. The cache entry consists of 
 * clientId and callId extracted from editlog.
 */
public void addCacheEntry(byte[] clientId, int callId) {
 CacheEntry newEntry = new CacheEntry(clientId, callId, System.nanoTime()
   + expirationTime, true);
 lock.lock();
 try {
  set.put(newEntry);
 } finally {
  lock.unlock();
 }
 retryCacheMetrics.incrCacheUpdated();
}

origin: io.hops/hadoop-common

public E put(final E entry, long period) {
 if (!(entry instanceof Entry)) {
  throw new HadoopIllegalArgumentException(
    "!(entry instanceof Entry), entry.getClass()=" + entry.getClass());
 }
 evictExpiredEntries();
 final E existing = super.put(entry);
 if (existing != null) {
  queue.remove(existing);
 }
 final Entry e = (Entry)entry;
 setExpirationTime(e, period);
 queue.offer(e);
 
 evictEntries();
 return existing;
}

origin: com.github.jiayuhan-it/hadoop-common

@Override
public E put(final E entry) {
 if (!(entry instanceof Entry)) {
  throw new HadoopIllegalArgumentException(
    "!(entry instanceof Entry), entry.getClass()=" + entry.getClass());
 }
 evictExpiredEntries();
 final E existing = super.put(entry);
 if (existing != null) {
  queue.remove(existing);
 }
 final Entry e = (Entry)entry;
 setExpirationTime(e, creationExpirationPeriod);
 queue.offer(e);
 
 evictEntries();
 return existing;
}
origin: ch.cern.hadoop/hadoop-common

@Test(timeout=60000)
public void testRemoveAllViaIterator() {
 ArrayList<Integer> list = getRandomList(100, 123);
 LightWeightGSet<TestElement, TestElement> set =
   new LightWeightGSet<TestElement, TestElement>(16);
 for (Integer i : list) {
  set.put(new TestElement(i));
 }
 for (Iterator<TestElement> iter = set.iterator();
   iter.hasNext(); ) {
  iter.next();
  iter.remove();
 }
 Assert.assertEquals(0, set.size());
}
origin: com.github.jiayuhan-it/hadoop-common

@Test(timeout=60000)
public void testRemoveAllViaIterator() {
 ArrayList<Integer> list = getRandomList(100, 123);
 LightWeightGSet<TestElement, TestElement> set =
   new LightWeightGSet<TestElement, TestElement>(16);
 for (Integer i : list) {
  set.put(new TestElement(i));
 }
 for (Iterator<TestElement> iter = set.iterator();
   iter.hasNext(); ) {
  iter.next();
  iter.remove();
 }
 Assert.assertEquals(0, set.size());
}
org.apache.hadoop.utilLightWeightGSetput

Popular methods of LightWeightGSet

  • computeCapacity
  • get
  • iterator
  • <init>
  • actualArrayLength
  • clear
  • convert
  • getIndex
  • remove
  • contains
  • size
  • size

Popular in Java

  • Reading from database using SQL prepared statement
  • getSystemService (Context)
  • scheduleAtFixedRate (ScheduledExecutorService)
  • runOnUiThread (Activity)
  • BufferedImage (java.awt.image)
    The BufferedImage subclass describes an java.awt.Image with an accessible buffer of image data. All
  • BufferedInputStream (java.io)
    A BufferedInputStream adds functionality to another input stream-namely, the ability to buffer the i
  • InputStreamReader (java.io)
    A class for turning a byte stream into a character stream. Data read from the source input stream is
  • Locale (java.util)
    Locale represents a language/country/variant combination. Locales are used to alter the presentatio
  • CountDownLatch (java.util.concurrent)
    A synchronization aid that allows one or more threads to wait until a set of operations being perfor
  • ZipFile (java.util.zip)
    This class provides random read access to a zip file. You pay more to read the zip file's central di
  • Top plugins for WebStorm
Tabnine Logo
  • Products

    Search for Java codeSearch for JavaScript code
  • IDE Plugins

    IntelliJ IDEAWebStormVisual StudioAndroid StudioEclipseVisual Studio CodePyCharmSublime TextPhpStormVimGoLandRubyMineEmacsJupyter NotebookJupyter LabRiderDataGripAppCode
  • Company

    About UsContact UsCareers
  • Resources

    FAQBlogTabnine AcademyTerms of usePrivacy policyJava Code IndexJavascript Code Index
Get Tabnine for your IDE now