/** * Given a set of features, add to it all the features directly or indirectly implied by any of * them, and return it. * * @param features the set of features to expand * @return the same set of features, expanded with all implied features */ public static Set<Feature<?>> addImpliedFeatures(Set<Feature<?>> features) { Queue<Feature<?>> queue = new ArrayDeque<>(features); while (!queue.isEmpty()) { Feature<?> feature = queue.remove(); for (Feature<?> implied : feature.getImpliedFeatures()) { if (features.add(implied)) { queue.add(implied); } } } return features; }
/** * delete all file * * @param directory */ public static void deleteAllFile(String directory) { List<File> fileList = new ArrayList<File>(); File directoryFile = new File(directory); Queue<File> queue = new ConcurrentLinkedQueue<File>(); queue.add(directoryFile); while (!queue.isEmpty()) { File file = queue.poll(); if (file.isDirectory()) { File[] fileArray = file.listFiles(); if (fileArray != null) { queue.addAll(Arrays.asList(fileArray)); } } fileList.add(file); } for (int i = fileList.size() - 1; i >= 0; i--) { fileList.get(i).delete(); } }
@Override public List<SourceRecord> poll() throws InterruptedException { if (records.isEmpty()) { // block forever, as this thread will be interrupted if/when the task is stopped ... new CountDownLatch(1).await(); } if (running.get()) { // Still running, so process whatever is in the queue ... List<SourceRecord> results = new ArrayList<>(); int record = 0; while (record < recordsPerBatch && !records.isEmpty()) { results.add(records.poll()); } return results; } // No longer running ... return null; }
/** * Helper method to process the filter tree and get a map from column to a list of predicates applied to it. */ private Map<String, List<Predicate>> getPredicatesMap(@Nonnull FilterQueryTree rootFilterNode) { Map<String, List<Predicate>> predicatesMap = new HashMap<>(); Queue<FilterQueryTree> queue = new LinkedList<>(); queue.add(rootFilterNode); while (!queue.isEmpty()) { FilterQueryTree filterNode = queue.remove(); List<FilterQueryTree> children = filterNode.getChildren(); if (children == null) { String columnName = filterNode.getColumn(); Predicate predicate = Predicate.newPredicate(filterNode); predicatesMap.computeIfAbsent(columnName, k -> new ArrayList<>()).add(predicate); } else { queue.addAll(children); } } return predicatesMap; }
writeOrder.add(entry); readOrder.add(entry); assertTrue(segment.recencyQueue.isEmpty()); if (random.nextBoolean()) { map.get(entry.getKey()); reads.add(entry); i.remove(); assertTrue(segment.recencyQueue.size() <= DRAIN_THRESHOLD); int undrainedIndex = reads.size() - segment.recencyQueue.size(); checkAndDrainRecencyQueue(map, segment, reads.subList(undrainedIndex, reads.size())); readOrder.addAll(reads);
@Override protected List<EventHolder> getEventHolders() { final List<EventHolder> values = Lists.newArrayListWithCapacity(pQueue.size()); while (!pQueue.isEmpty()) { EventHolder event = pQueue.remove(); pagingIdentifiers.put(event.getSegmentId(), event.getOffset()); values.add(event); } return values; } }
protected View findScrollableViewInternal(View content, boolean selfable) { View scrollableView = null; Queue<View> views = new LinkedList<>(Collections.singletonList(content)); while (!views.isEmpty() && scrollableView == null) { View view = views.poll(); if (view != null) { if ((selfable || view != content) && isContentView(view)) { scrollableView = view; } else if (view instanceof ViewGroup) { ViewGroup group = (ViewGroup) view; for (int j = 0; j < group.getChildCount(); j++) { views.add(group.getChildAt(j)); } } } } return scrollableView == null ? content : scrollableView; }
public void testHoldsLockOnAllOperations() { create().element(); create().offer("foo"); create().peek(); create().poll(); create().remove(); create().add("foo"); create().addAll(ImmutableList.of("foo")); create().clear(); create().contains("foo"); create().containsAll(ImmutableList.of("foo")); create().equals(new ArrayDeque<>(ImmutableList.of("foo"))); create().hashCode(); create().isEmpty(); create().iterator(); create().remove("foo"); create().removeAll(ImmutableList.of("foo")); create().retainAll(ImmutableList.of("foo")); create().size(); create().toArray(); create().toArray(new String[] {"foo"}); } }
@Override protected void flushCache() throws SockJsTransportFailureException { while (!getMessageCache().isEmpty()) { String message = getMessageCache().poll(); SockJsMessageCodec messageCodec = getSockJsServiceConfig().getMessageCodec(); SockJsFrame frame = SockJsFrame.messageFrame(messageCodec, message); writeFrame(frame); this.byteCount += (frame.getContentBytes().length + 1); if (logger.isTraceEnabled()) { logger.trace(this.byteCount + " bytes written so far, " + getMessageCache().size() + " more messages not flushed"); } if (this.byteCount >= getSockJsServiceConfig().getStreamBytesLimit()) { logger.trace("Streamed bytes limit reached, recycling current request"); resetRequest(); this.byteCount = 0; break; } } scheduleHeartbeat(); }
/** * Gets a list of the unfinished {@link Allocation}s in the order in which those {@link * Allocation}s were encountered. This can be used to display, for example, currently executing * tasks. The order helps to keep the displayed tasks in a deterministic order (new subtasks * appear below older ones) and not jumbled together in some random order. * * @return a list of unfinished {@link Allocation}s */ ImmutableList<Allocation> getUnfinishedAllocations() { Queue<InsertionOrderUnits> unfinishedInsertionOrderUnits = new PriorityQueue<>(); for (InsertionOrderUnits insertionOrderUnits : completionMap.values()) { if (insertionOrderUnits.units.get() < insertionOrderUnits.allocation.getAllocationUnits()) { unfinishedInsertionOrderUnits.add(insertionOrderUnits); } } ImmutableList.Builder<Allocation> unfinishedAllocations = ImmutableList.builderWithExpectedSize(unfinishedInsertionOrderUnits.size()); while (!unfinishedInsertionOrderUnits.isEmpty()) { unfinishedAllocations.add(unfinishedInsertionOrderUnits.remove().allocation); } return unfinishedAllocations.build(); }
assertTrue(map.isLive(entry, ticker.read())); segment.writeQueue.add(entry); assertSame(value, map.get(key)); assertSame(entry, segment.writeQueue.peek()); assertEquals(1, segment.writeQueue.size()); assertSame(entry, segment.writeQueue.peek()); assertEquals(1, segment.writeQueue.size()); segment.expireEntries(ticker.read()); assertSame(value, map.get(key)); assertSame(entry, segment.writeQueue.peek()); assertEquals(1, segment.writeQueue.size()); segment.expireEntries(ticker.read()); assertNull(map.get(key)); assertTrue(segment.writeQueue.isEmpty());
@Override public void sendMessage(WebSocketMessage<?> message) throws IOException { if (shouldNotSend()) { return; } this.buffer.add(message); this.bufferSize.addAndGet(message.getPayloadLength()); do { if (!tryFlushMessageBuffer()) { if (logger.isTraceEnabled()) { logger.trace(String.format("Another send already in progress: " + "session id '%s':, \"in-progress\" send time %d (ms), buffer size %d bytes", getId(), getTimeSinceSendStarted(), getBufferSize())); } checkSessionLimits(); break; } } while (!this.buffer.isEmpty() && !shouldNotSend()); }
@Override @Nullable public Object scanUnsafe(Attr key) { if (key == Attr.PARENT) return s; if (key == Attr.ERROR) return error; if (key == Attr.TERMINATED) return done && subscribers.isEmpty(); if (key == Attr.DELAY_ERROR) return errorMode != ErrorMode.IMMEDIATE; if (key == Attr.PREFETCH) return maxConcurrency; if (key == Attr.REQUESTED_FROM_DOWNSTREAM) return requested; if (key == Attr.BUFFERED) return subscribers.size(); return InnerOperator.super.scanUnsafe(key); }
/** * A deserialized/reserialized queue has same elements in same order */ public void testSerialization() throws Exception { Queue x = populatedQueue(SIZE); Queue y = serialClone(x); assertNotSame(x, y); assertEquals(x.size(), y.size()); assertEquals(x.toString(), y.toString()); assertTrue(Arrays.equals(x.toArray(), y.toArray())); while (!x.isEmpty()) { assertFalse(y.isEmpty()); assertEquals(x.remove(), y.remove()); } assertTrue(y.isEmpty()); }
@Override public long acquireNewId( long stableGeneration, long unstableGeneration ) { if ( !releasedIds.isEmpty() ) { Pair<Long,Long> free = releasedIds.peek(); if ( free.getLeft() <= stableGeneration ) { releasedIds.poll(); Long pageId = free.getRight(); zapPage( pageId ); return pageId; } } lastId++; return lastId; }