Refine search
/** * Resets all labels and remove invalid ones. * * This should be called when the assumptions behind label cache computation changes, * but we also call this periodically to self-heal any data out-of-sync issue. */ /*package*/ void trimLabels() { for (Iterator<Label> itr = labels.values().iterator(); itr.hasNext();) { Label l = itr.next(); resetLabel(l); if(l.isEmpty()) itr.remove(); } }
public static void logInstantiators() { for (Iterator itr = dsMap.values().iterator(); itr.hasNext();) { Instantiator instantiator = (Instantiator) itr.next(); logger .info("Instantiator registered with id {} class {}", Integer.valueOf(instantiator.getId()), instantiator.getInstantiatedClass().getName()); } for (Iterator itr = idsToHolders.values().iterator(); itr.hasNext();) { InstantiatorAttributesHolder holder = (InstantiatorAttributesHolder) itr.next(); logger.info("Instantiator registered with holder id {} class {}", Integer.valueOf(holder.getId()), holder.getInstantiatedClassName()); } } }
private void doViewAccepted(final View view) { final ConcurrentHashMap<Address, NodeInfo> newNodes = new ConcurrentHashMap<>(view.size()); final ConcurrentHashMap<NodeAddress, NodeInfo> newNodes2 = new ConcurrentHashMap<>(view.size()); for (final Address a : view) { NodeInfo info = nodeMap.get(a); if (info == null) { info = new NodeInfo(a); } newNodes.put(a, info); newNodes2.put(info.nodeAddress, info); } final NodeInfo newMaster = newNodes.values().iterator().next(); nodeMap.putAll(newNodes); nodeMap.values().retainAll(newNodes.values()); nodeMap2.putAll(newNodes2); nodeMap2.values().retainAll(newNodes2.values()); master = newMaster; viewListener.onViewChange(nodeMap2.keySet()); }
private synchronized void freeUnusedChunks() { if (lastChunk == null || !reuseSpace) { return; } Set<Integer> referenced = collectReferencedChunks(); long time = getTimeSinceCreation(); for (Iterator<Chunk> it = chunks.values().iterator(); it.hasNext(); ) { Chunk c = it.next(); if (!referenced.contains(c.id)) { if (canOverwriteChunk(c, time)) { it.remove(); markMetaChanged(); meta.remove(Chunk.getMetaKey(c.id)); long start = c.block * BLOCK_SIZE; int length = c.len * BLOCK_SIZE; fileStore.free(start, length); } else { if (c.unused == 0) { c.unused = time; meta.put(Chunk.getMetaKey(c.id), c.asString()); markMetaChanged(); } } } } }
/** * Trim query plans not used since the passed in epoch time. */ public List<CQueryPlan> trimQueryPlans(long unusedSince) { List<CQueryPlan> list = new ArrayList<>(); Iterator<CQueryPlan> it = queryPlanCache.values().iterator(); while (it.hasNext()) { CQueryPlan queryPlan = it.next(); if (queryPlan.getLastQueryTime() < unusedSince) { it.remove(); list.add(queryPlan); } } return list; }
private void expireOutbound() { for (Iterator<LocalAddress> iter = _outgoing.values().iterator(); iter.hasNext(); ) { LocalAddress a = iter.next(); if (a.expire < getTunnel().getContext().clock().now()) iter.remove(); } for (Iterator<LocalAddress> iter = _active.values().iterator(); iter.hasNext(); ) { LocalAddress a = iter.next(); I2PSocket s = a.socket; if (s != null && s.isClosed()) iter.remove(); } }
/** * Something b0rked hard, so kill all of our connections without mercy. * Don't bother sending close packets. * This will not close the ServerSocket. * This will not kill the timer threads. * * CAN continue to use the manager. */ public void disconnectAllHard() { //if (_log.shouldLog(Log.INFO)) // _log.info("ConnMan hard disconnect", new Exception("I did it")); for (Iterator<Connection> iter = _connectionByInboundId.values().iterator(); iter.hasNext(); ) { Connection con = iter.next(); con.disconnect(false, false); iter.remove(); } synchronized(_recentlyClosed) { _recentlyClosed.clear(); } _pendingPings.clear(); // FIXME // Ideally we would like to stop all TCBShare and all the timer threads here, // but leave them ready to restart when things resume. // However that's quite difficult. // So the timer threads will continue to run. }
/** @since 0.9.2 */ private void doFailsafe() { for (Iterator<OutboundEstablishState> iter = _liveIntroductions.values().iterator(); iter.hasNext(); ) { OutboundEstablishState state = iter.next(); if (state.getLifetime() > 3*MAX_OB_ESTABLISH_TIME) { iter.remove(); if (_log.shouldLog(Log.WARN)) _log.warn("Failsafe remove LI " + state); } } for (Iterator<OutboundEstablishState> iter = _outboundByClaimedAddress.values().iterator(); iter.hasNext(); ) { OutboundEstablishState state = iter.next(); if (state.getLifetime() > 3*MAX_OB_ESTABLISH_TIME) { iter.remove(); if (_log.shouldLog(Log.WARN)) _log.warn("Failsafe remove OBBCA " + state); } } for (Iterator<OutboundEstablishState> iter = _outboundByHash.values().iterator(); iter.hasNext(); ) { OutboundEstablishState state = iter.next(); if (state.getLifetime() > 3*MAX_OB_ESTABLISH_TIME) { iter.remove(); if (_log.shouldLog(Log.WARN)) _log.warn("Failsafe remove OBBH " + state); } } } }