@Override public SortedSet<String> create(String[] elements) { return Collections.emptySortedSet(); } })
public SortedSet<String> languages(String moduleKey) { return languagesCache.getOrDefault(moduleKey, Collections.emptySortedSet()); }
/** * Returns a new {@link GapAwareTrackingToken} instance based on the given {@code index} and collection of {@code * gaps}. * * @param index the highest global sequence number of events up until (and including) this tracking token * @param gaps global sequence numbers of events that have not been seen yet even though these sequence numbers are * smaller than the current index. These missing sequence numbers may be filled in later when those * events get committed to the store or may never be filled in if those events never get committed. * @return a new tracking token from given index and gaps */ @JsonCreator public static GapAwareTrackingToken newInstance(@JsonProperty("index") long index, @JsonProperty("gaps") Collection<Long> gaps) { if (gaps.isEmpty()) { return new GapAwareTrackingToken(index, Collections.emptySortedSet()); } SortedSet<Long> gapSet = new ConcurrentSkipListSet<>(gaps); Assert.isTrue(gapSet.last() < index, () -> String.format("Gap indices [%s] should all be smaller than head index [%d]", gaps, index)); return new GapAwareTrackingToken(index, gapSet); }
private void writeProfiles(JsonWriter json, DbSession dbSession, ComponentDto component) { Set<QualityProfile> qualityProfiles = dbClient.liveMeasureDao().selectMeasure(dbSession, component.projectUuid(), QUALITY_PROFILES_KEY) .map(LiveMeasureDto::getDataAsString) .map(data -> QPMeasureData.fromJson(data).getProfiles()) .orElse(emptySortedSet()); Map<String, QProfileDto> dtoByQPKey = dbClient.qualityProfileDao().selectByUuids(dbSession, qualityProfiles.stream().map(QualityProfile::getQpKey).collect(Collectors.toList())) .stream() .collect(uniqueIndex(QProfileDto::getKee)); json.name("qualityProfiles").beginArray(); qualityProfiles.forEach(qp -> writeToJson(json, qp, !dtoByQPKey.containsKey(qp.getQpKey()))); json.endArray(); }
classes.add(Collections.checkedSet(Collections.emptySet(), Void.class).getClass()); classes.add(Collections.checkedSortedMap(Collections.emptySortedMap(), Void.class, Void.class).getClass()); classes.add(Collections.checkedSortedSet(Collections.emptySortedSet(), Void.class).getClass()); classes.add(Collections.synchronizedSet(Collections.emptySet()).getClass()); classes.add(Collections.synchronizedSortedMap(Collections.emptySortedMap()).getClass()); classes.add(Collections.synchronizedSortedSet(Collections.emptySortedSet()).getClass()); classes.add(Collections.unmodifiableSet(Collections.emptySet()).getClass()); classes.add(Collections.unmodifiableSortedMap(Collections.emptySortedMap()).getClass()); classes.add(Collections.unmodifiableSortedSet(Collections.emptySortedSet()).getClass());
/** * Get brokers that the rebalance process will go over to apply balancing actions to rep licas they contain. * * @param clusterModel The state of the cluster. * @return A collection of brokers that the rebalance process will go over to apply balancing actions to replicas * they contain. */ @Override protected SortedSet<Broker> brokersToBalance(ClusterModel clusterModel) { if (!clusterModel.deadBrokers().isEmpty()) { return clusterModel.deadBrokers(); } if (_currentRebalanceTopic == null) { return Collections.emptySortedSet(); } // Brokers having over minimum number of replicas per broker for the current rebalance topic are eligible for balancing. SortedSet<Broker> brokersToBalance = new TreeSet<>(); int minNumReplicasPerBroker = _replicaDistributionTargetByTopic.get(_currentRebalanceTopic).minNumReplicasPerBroker(); brokersToBalance.addAll(clusterModel.brokers().stream() .filter(broker -> broker.replicasOfTopicInBroker(_currentRebalanceTopic).size() > minNumReplicasPerBroker) .collect(Collectors.toList())); return brokersToBalance; }
.map(LiveMeasureDto::getDataAsString) .map(data -> QPMeasureData.fromJson(data).getProfiles()) .orElse(emptySortedSet());
ZKUtil.deleteNodeFailSilent(zookeeper, oldQueueNode); LOG.info("Removed empty {}/{}", sourceServerName, queueId); return new Pair<>(newQueueId, Collections.emptySortedSet()); return new Pair<>(newQueueId, Collections.emptySortedSet()); } catch (KeeperException | InterruptedException e) { throw new ReplicationException("Claim queue queueId=" + queueId + " from " +
.boxed() .collect(Collectors.toCollection(TreeSet::new)) : Collections.emptySortedSet() ); } else {
public StubTrackingEventStream(long... tokens) { GapAwareTrackingToken lastToken = GapAwareTrackingToken.newInstance(-1, emptySortedSet()); eventMessages = new LinkedList<>(); for (Long seq : tokens) { lastToken = lastToken.advanceTo(seq, 1000, true); eventMessages.add(new GenericTrackedEventMessage<>(lastToken, createEvent(seq))); } }
.boxed() .collect(Collectors.toCollection(TreeSet::new)) : Collections.emptySortedSet() ); } else {
return Collections.emptySortedSet(); return Collections.emptySortedSet();
assertTrue(replayRun.get(0) instanceof ReplayToken); assertTrue(replayRun.get(5) instanceof ReplayToken); assertEquals(GapAwareTrackingToken.newInstance(6, emptySortedSet()), replayRun.get(6));
assertTrue(immutability.test(Collections.emptySet())); assertTrue(immutability.test(Collections.emptySortedMap())); assertTrue(immutability.test(Collections.emptySortedSet())); assertTrue(immutability.test(Boolean.TRUE)); assertTrue(immutability.test(Character.valueOf('a')));
public static PluginConfigurationBuilder testInstance(final String checkstyleVersion) { return new PluginConfigurationBuilder(checkstyleVersion, ScanScope.AllSources, false, false, Collections.emptySortedSet(), Collections.emptyList(), null, false, "aVersion"); }
@Override public SortedSet<String> create(String[] elements) { return Collections.emptySortedSet(); } })
@Override public SortedSet<Bucket> getBuckets() { return Collections.emptySortedSet(); } };
static RevTree build(final long size, final int childTreeCount, @Nullable List<Node> trees, @Nullable List<Node> features, @Nullable SortedSet<Bucket> buckets) { trees = trees == null ? Collections.emptyList() : trees; features = features == null ? Collections.emptyList() : features; buckets = buckets == null ? Collections.emptySortedSet() : buckets; ObjectId id = HashObjectFunnels.hashTree(trees, features, buckets); if (buckets.isEmpty()) { return RevObjectFactory.defaultInstance().createTree(id, size, trees, features); } return RevObjectFactory.defaultInstance().createTree(id, size, childTreeCount, buckets); }
@Override @Nonnull public SortedSet<Edge> getEdgeBlacklist(@Nonnull NetworkSnapshot networkSnapshot) { SortedSet<Edge> blacklistEdges = _storage.loadEdgeBlacklist(networkSnapshot.getNetwork(), networkSnapshot.getSnapshot()); if (blacklistEdges == null) { return Collections.emptySortedSet(); } return blacklistEdges; }
static SortedSet<Node> getReferrersSortedByName(Node handle, final boolean retrieveUnpublished) throws RepositoryException { if (handle.isNodeType(HippoNodeType.NT_DOCUMENT)) { handle = handle.getParent(); } if (!handle.isNodeType(HippoNodeType.NT_HANDLE)) { return Collections.emptySortedSet(); } final Map<String, Node> referrers = WorkflowUtils.getReferringDocuments(handle, retrieveUnpublished); return getSortedReferrers(referrers.values()); }