/** loops getting the events **/ public void run() { while (true) { try { Thread.sleep(1000); } catch (InterruptedException e) { // ignore } synchronized (mLock) { if (mPaused) { continue; } boolean toHead = true; // were events added to head boolean needUpdate = false; final Iterator it = mPendingEvents.iterator(); while (it.hasNext()) { final EventDetails event = (EventDetails) it.next(); mAllEvents.add(event); toHead = toHead && (event == mAllEvents.first()); needUpdate = needUpdate || matchFilter(event); } mPendingEvents.clear(); if (needUpdate) { updateFilteredEvents(toHead); } } } } }
/** * Find the requested number of available ports for this {@code SocketType}, * each randomly selected from the range [{@code minPort}, {@code maxPort}]. * @param numRequested the number of available ports to find * @param minPort the minimum port number * @param maxPort the maximum port number * @return a sorted set of available port numbers for this socket type * @throws IllegalStateException if the requested number of available ports could not be found */ SortedSet<Integer> findAvailablePorts(int numRequested, int minPort, int maxPort) { Assert.isTrue(minPort > 0, "'minPort' must be greater than 0"); Assert.isTrue(maxPort > minPort, "'maxPort' must be greater than 'minPort'"); Assert.isTrue(maxPort <= PORT_RANGE_MAX, "'maxPort' must be less than or equal to " + PORT_RANGE_MAX); Assert.isTrue(numRequested > 0, "'numRequested' must be greater than 0"); Assert.isTrue((maxPort - minPort) >= numRequested, "'numRequested' must not be greater than 'maxPort' - 'minPort'"); SortedSet<Integer> availablePorts = new TreeSet<>(); int attemptCount = 0; while ((++attemptCount <= numRequested + 100) && availablePorts.size() < numRequested) { availablePorts.add(findAvailablePort(minPort, maxPort)); } if (availablePorts.size() != numRequested) { throw new IllegalStateException(String.format( "Could not find %d available %s ports in the range [%d, %d]", numRequested, name(), minPort, maxPort)); } return availablePorts; } }
@Override public boolean failed(KinesisMessageId messageId) { LOG.debug("Handling failed message {}", messageId); // if maxRetries is 0, dont retry and return false as per interface contract if (maxRetries == 0) { LOG.warn("maxRetries set to 0. Hence not queueing " + messageId); return false; } // if first failure add it to the count map if (!failCounts.containsKey(messageId)) { failCounts.put(messageId, 0L); } // increment the fail count as we started with 0 Long failCount = failCounts.get(messageId); failCounts.put(messageId, ++failCount); // if fail count is greater than maxRetries, discard or ack. for e.g. for maxRetries 3, 4 failures are allowed at maximum if (failCount > maxRetries) { LOG.warn("maxRetries reached so dropping " + messageId); failCounts.remove(messageId); return false; } // if reached so far, add it to the set of messages waiting to be retried with next retry time based on how many times it failed retryTimes.put(messageId, getRetryTime(failCount)); retryMessageSet.add(messageId); LOG.debug("Scheduled {} for retry at {} and retry attempt {}", messageId, retryTimes.get(messageId), failCount); return true; }
long[] getBiggestObjectsByRetainedSize(int number) { SortedSet<RetainedSizeEntry> bigObjects = new TreeSet<RetainedSizeEntry>(); long[] bigIds = new long[number]; long min = 0; for (long index=0;index<fileSize;index+=ENTRY_SIZE) { long id = getID(index); if (id != 0) { long retainedSize = createEntry(index).getRetainedSize(); if (bigObjects.size()<number) { bigObjects.add(new RetainedSizeEntry(id,retainedSize)); min = ((RetainedSizeEntry)bigObjects.last()).retainedSize; } else if (retainedSize>min) { bigObjects.remove(bigObjects.last()); bigObjects.add(new RetainedSizeEntry(id,retainedSize)); min = ((RetainedSizeEntry)bigObjects.last()).retainedSize; } } } int i = 0; Iterator<RetainedSizeEntry> it = bigObjects.iterator(); while(it.hasNext()) { bigIds[i++]=((RetainedSizeEntry)it.next()).instanceId; } return bigIds; }
private void put(Date date, String revision, String author, String comment, String path) { long time = date.getTime(); date.setTime(time - (time % 3600000l)); Map<String, Map<List<String>, SortedSet<String>>> ac = hash.get(date); if (ac == null) { ac = new HashMap<>(); hash.put(date, ac); } Map<List<String>, SortedSet<String>> cf = ac.get(author); if (cf == null) { cf = new HashMap<>(); ac.put(author, cf); } // We are not going to modify the list so this is safe to do. List<String> cr = new ArrayList<>(); cr.add(comment); cr.add(revision); SortedSet<String> fls = cf.get(cr); if (fls == null) { fls = new TreeSet<>(); cf.put(cr, fls); } fls.add(path); }
while (it.hasNext()) { FileObject child = it.next(); if (childrenByName.put(child.getNameExt(), child) != null) { throw new IllegalArgumentException("Duplicate in children list: " + child.getPath() + "\nChildren: " + children); // NOI18N childrenByPosition.add(new ChildAndPosition(child, (Number) pos)); } else if (logWarnings && pos != null) { LOG.log(Level.WARNING, "Encountered nonnumeric position attribute {0} of {1} for {2}\nChildren: {3}", new Object[]{pos, pos.getClass(), child.getPath(), children}); LOG.log(Level.FINEST, " no more attribs {0}", parent); // NOI18N edges.put(previousChild, edge = new HashSet<FileObject>()); if (logWarnings && /* #201893*/ !parent.getPath().matches("Projects/.+/Lookup") && !childrenByPosition.isEmpty() && childrenByPosition.size() < children.size()) { List<FileObject> missingPositions = new ArrayList<FileObject>(children); for (ChildAndPosition cap : childrenByPosition) { break IGNORE_ERGO; missingNames.add(n); List<String> presentNames = new ArrayList<String>(childrenByPosition.size()); for (ChildAndPosition cap : childrenByPosition) { final String n = cap.child.getNameExt(); break IGNORE_ERGO; presentNames.add(n);
public static SortedSet<Interval> computeCompactIntervals(SortedSet<Interval> intervals) { final SortedSet<Interval> compactIntervals = new TreeSet<>(Comparators.intervalsByStartThenEnd()); List<Interval> toBeAccumulated = new ArrayList<>(); for (Interval interval : intervals) { if (toBeAccumulated.size() == 0) { toBeAccumulated.add(interval); } else { if (toBeAccumulated.get(toBeAccumulated.size() - 1).abuts(interval)) { toBeAccumulated.add(interval); } else { compactIntervals.add(JodaUtils.umbrellaInterval(toBeAccumulated)); toBeAccumulated.clear(); toBeAccumulated.add(interval); } } } if (toBeAccumulated.size() > 0) { compactIntervals.add(JodaUtils.umbrellaInterval(toBeAccumulated)); } return compactIntervals; } }
public void setDefaults() { SortedSet set = new TreeSet(); set.add("foo"); set.add("bar"); set.add("baz"); setStringSet(set); Map map = new TreeMap(); map.put( "now", new Date() ); map.put( "never", null ); map.put( "big bang", new Date(0) ); setStringDateMap(map); List list = new ArrayList(); setStringArray( (String[]) list.toArray( new String[0] ) ); customs = new ArrayList(); customs.add( new String[] { "foo", "bar" } ); customs.add( new String[] { "A", "B" } ); customs.add( new String[] { "1", "2" } );
strings.add(info.db); if(!isPartOfDynamicPartitionInsert && info.type == LockType.SHARED_WRITE) { writeSet.add(info); SortedSet<LockInfo> lockSet = new TreeSet<LockInfo>(new LockInfoComparator()); while (rs.next()) { lockSet.add(new LockInfo(rs)); LockInfo[] locks = lockSet.toArray(new LockInfo[lockSet.size()]); if(LOG.isTraceEnabled()) { LOG.trace("Locks to check(full): "); LockAction lockAction = jumpTable.get(info.type).get(locks[i].type).get(locks[i].state); LOG.debug("desired Lock: " + info + " checked Lock: " + locks[i] + " action: " + lockAction); switch (lockAction) {
final Settings ads = authzDyn.get(ad); final boolean enabled = ads.getAsBoolean("enabled", true); final boolean httpEnabled = enabled && ads.getAsBoolean("http_enabled", true); destroyableComponents0.add((Destroyable) authorizationBackend); final Settings ads = dyn.get(ad); final boolean enabled = ads.getAsBoolean("enabled", true); final boolean httpEnabled = enabled && ads.getAsBoolean("http_enabled", true); restAuthDomains0.add(_ad); transportAuthDomains0.add(_ad); destroyableComponents0.add((Destroyable) httpAuthenticator); destroyableComponents0.add((Destroyable) authenticationBackend);
clusterModel.brokers().forEach(broker -> numExcludedReplicasByPositionInBroker.put(broker.id(), new HashMap<>())); for (String excludedTopic : excludedTopics) { for (Partition partition : _partitionsByTopic.get(excludedTopic)) { numExcludedReplicasByPositionInBroker.get(partition.leader().broker().id()).merge(position, 1, Integer::sum); numExcludedReplicasByPositionInBroker.get(followerBroker.id()).merge(position, 1, Integer::sum); int numExcludedReplicasInPosition = numExcludedReplicasByPositionInBroker.get(broker.id()).getOrDefault(i, 0); BrokerReplicaCount brokerReplicaCount = new BrokerReplicaCount(broker, numExcludedReplicasInPosition); aliveBrokersByReplicaCount.add(brokerReplicaCount); _aliveBrokerReplicaCountByPosition.put(i, aliveBrokersByReplicaCount);
@Override protected void doAdd(InputFile inputFile) { if (inputFile.language() != null) { languages.add(inputFile.language()); } fileMap.put(inputFile.relativePath(), inputFile); filesByNameCache.put(inputFile.filename(), inputFile); filesByExtensionCache.put(FileExtensionPredicate.getExtension(inputFile), inputFile); }
DeleteVersionsNode node = e.getValue(); long deleteMvcc = Long.MAX_VALUE; SortedSet<Long> deleteVersionMvccs = node.deletesMap.get(cell.getTimestamp()); if (deleteVersionMvccs != null) { SortedSet<Long> tail = deleteVersionMvccs.tailSet(cell.getSequenceId()); .subMap(cell.getSequenceId(), true, Math.min(duplicateMvcc, deleteMvcc), true); for (Map.Entry<Long, SortedSet<Long>> seg : subMap.entrySet()) { if (seg.getValue().size() >= maxVersions) { return DeleteResult.VERSION_MASKED; seg.getValue().add(cell.getSequenceId());
/** * Create a broker under this cluster/rack and get the created broker. * Add the broker id and info to {@link #_capacityEstimationInfoByBrokerId} if the broker capacity has been estimated. * * @param rackId Id of the rack that the broker will be created in. * @param host The host of this broker * @param brokerId Id of the broker to be created. * @param brokerCapacityInfo Capacity information of the created broker. * @return Created broker. */ public Broker createBroker(String rackId, String host, int brokerId, BrokerCapacityInfo brokerCapacityInfo) { _potentialLeadershipLoadByBrokerId.putIfAbsent(brokerId, new Load()); Rack rack = rack(rackId); _brokerIdToRack.put(brokerId, rack); if (brokerCapacityInfo.isEstimated()) { _capacityEstimationInfoByBrokerId.put(brokerId, brokerCapacityInfo.estimationInfo()); } Broker broker = rack.createBroker(brokerId, host, brokerCapacityInfo.capacity()); _aliveBrokers.add(broker); _brokers.add(broker); refreshCapacity(); return broker; }
void ensureNotDirectlyModifiable(SortedSet<Integer> unmod) { try { unmod.add(4); fail("UnsupportedOperationException expected"); } catch (UnsupportedOperationException expected) { } try { unmod.remove(4); fail("UnsupportedOperationException expected"); } catch (UnsupportedOperationException expected) { } try { unmod.addAll(Collections.singleton(4)); fail("UnsupportedOperationException expected"); } catch (UnsupportedOperationException expected) { } try { Iterator<Integer> iterator = unmod.iterator(); iterator.next(); iterator.remove(); fail("UnsupportedOperationException expected"); } catch (UnsupportedOperationException expected) { } }
for (Iterator it = map.entrySet().iterator(); it.hasNext();) { entry = (Map.Entry) it.next(); bind_interface = (InetAddress) entry.getKey(); System.out.println(bind_interface.getHostAddress() + ":\t " + entry.getValue()); for (Iterator it = map.values().iterator(); it.hasNext();) { l = (List) it.next(); for (Iterator it2 = l.iterator(); it2.hasNext();) { tmp_addr = (InetSocketAddress) it2.next(); if (!all_mbrs.contains(tmp_addr)) all_mbrs.add(tmp_addr); entry = (Map.Entry) it.next(); r = new Result((InetAddress) entry.getKey(), ((List) entry.getValue()).size()); s.add(r); for (Iterator it = s.iterator(); it.hasNext();) { st.push(it.next());