/** * Add the given {@link PropertySource} to the start of the chain. * @param propertySource the PropertySource to add * @since 4.1 */ public void addFirstPropertySource(PropertySource<?> propertySource) { List<PropertySource<?>> existing = new ArrayList<>(this.propertySources); this.propertySources.clear(); this.propertySources.add(propertySource); this.propertySources.addAll(existing); }
protected void merge(HashMap<Local, Set<NewExpr>> in1, HashMap<Local, Set<NewExpr>> in2, HashMap<Local, Set<NewExpr>> o) { for (Local l : locals) { Set<NewExpr> l1 = in1.get(l), l2 = in2.get(l); Set<NewExpr> out = o.get(l); out.clear(); if (l1.contains(UNKNOWN) || l2.contains(UNKNOWN)) { out.add(UNKNOWN); } else { out.addAll(l1); out.addAll(l2); } } }
@Override public void validateClass(Class<? extends Exception> exceptionClass) { for (WeakReference<Class<? extends Exception>> knownGood : validClasses) { if (exceptionClass.equals(knownGood.get())) { return; } // TODO(cpovirk): if reference has been cleared, remove it? } checkExceptionClassValidity(exceptionClass); /* * It's very unlikely that any loaded Futures class will see getChecked called with more * than a handful of exceptions. But it seems prudent to set a cap on how many we'll cache. * This avoids out-of-control memory consumption, and it keeps the cache from growing so * large that doing the lookup is noticeably slower than redoing the work would be. * * Ideally we'd have a real eviction policy, but until we see a problem in practice, I hope * that this will suffice. I have not even benchmarked with different size limits. */ if (validClasses.size() > 1000) { validClasses.clear(); } validClasses.add(new WeakReference<Class<? extends Exception>>(exceptionClass)); } }
private boolean findNextAll() { if (curMatchIter != null && curMatchIter.hasNext()) { while (curMatchIter.hasNext()) { int next = curMatchIter.next(); curMatchStates.setMatchedGroups(next); String sig = getMatchedSignature(); if (!prevMatchedSignatures.contains(sig)) { prevMatchedSignatures.add(sig); return true; } } } if (nextMatchStart < 0) { return false; } prevMatchedSignatures.clear(); boolean matched = find(nextMatchStart, false); if (matched) { Collection<Integer> matchedBranches = curMatchStates.getMatchIndices(); curMatchIter = matchedBranches.iterator(); int next = curMatchIter.next(); curMatchStates.setMatchedGroups(next); prevMatchedSignatures.add(getMatchedSignature()); } return matched; }
/** * Set the 'autowired' annotation type, to be used on constructors, fields, * setter methods and arbitrary config methods. * <p>The default autowired annotation type is the Spring-provided {@link Autowired} * annotation, as well as {@link Value}. * <p>This setter property exists so that developers can provide their own * (non-Spring-specific) annotation type to indicate that a member is supposed * to be autowired. */ public void setAutowiredAnnotationType(Class<? extends Annotation> autowiredAnnotationType) { Assert.notNull(autowiredAnnotationType, "'autowiredAnnotationType' must not be null"); this.autowiredAnnotationTypes.clear(); this.autowiredAnnotationTypes.add(autowiredAnnotationType); }
/** * Keep attribute of this model and remove other attributes. * @param attr the attribute name of the model * @return this model */ public M keep(String attr) { if (attrs.containsKey(attr)) { // prevent put null value to the newColumns Object keepIt = attrs.get(attr); boolean keepFlag = _getModifyFlag().contains(attr); attrs.clear(); _getModifyFlag().clear(); attrs.put(attr, keepIt); if (keepFlag) _getModifyFlag().add(attr); } else { attrs.clear(); _getModifyFlag().clear(); } return (M)this; }
protected void copyFrom(CompilerHints source) { this.outputSize = source.outputSize; this.outputCardinality = source.outputCardinality; this.avgOutputRecordSize = source.avgOutputRecordSize; this.filterFactor = source.filterFactor; if (source.uniqueFields != null && source.uniqueFields.size() > 0) { if (this.uniqueFields == null) { this.uniqueFields = new HashSet<FieldSet>(); } else { this.uniqueFields.clear(); } this.uniqueFields.addAll(source.uniqueFields); } } }
int want = targetWorkerCount - (currValidWorkers + currentlyProvisioning.size()); while (want > 0) { final AutoScalingData provisioned = workerConfig.getAutoScaler().provision(); break; } else { currentlyProvisioning.addAll(newNodes); lastProvisionTime = DateTimes.nowUtc(); scalingStats.addProvisionEvent(provisioned); if (!currentlyProvisioning.isEmpty()) { Duration durSinceLastProvision = new Duration(lastProvisionTime, DateTimes.nowUtc()); log.info("%s provisioning. Current wait time: %s", currentlyProvisioning, durSinceLastProvision); log.makeAlert("Worker node provisioning taking too long!") .addData("millisSinceLastProvision", durSinceLastProvision.getMillis()) .addData("provisioningCount", currentlyProvisioning.size()) .emit(); currentlyProvisioning.clear();
/** * Removes the plugins that are not compatible with current environment. */ private void unloadIncompatiblePlugins() { // loop as long as the previous loop ignored some plugins. That allows to support dependencies // on many levels, for example D extends C, which extends B, which requires A. If A is not installed, // then B, C and D must be ignored. That's not possible to achieve this algorithm with a single // iteration over plugins. Set<String> removedKeys = new HashSet<>(); do { removedKeys.clear(); for (PluginInfo plugin : pluginInfosByKeys.values()) { if (!isCompatible(plugin, runtime, pluginInfosByKeys)) { removedKeys.add(plugin.getKey()); } } for (String removedKey : removedKeys) { pluginInfosByKeys.remove(removedKey); } } while (!removedKeys.isEmpty()); }
private static void clearAndResetRulesFor(/*@Valid*/ String namespace) { Set<Long> flowIdSet = NAMESPACE_FLOW_ID_MAP.get(namespace); if (flowIdSet != null && !flowIdSet.isEmpty()) { for (Long flowId : flowIdSet) { PARAM_RULES.remove(flowId); FLOW_NAMESPACE_MAP.remove(flowId); } flowIdSet.clear(); } else { resetNamespaceFlowIdMapFor(namespace); } }
private List<EurekaEndpoint> getHostCandidates() { List<EurekaEndpoint> candidateHosts = clusterResolver.getClusterEndpoints(); quarantineSet.retainAll(candidateHosts); // If enough hosts are bad, we have no choice but start over again int threshold = (int) (candidateHosts.size() * transportConfig.getRetryableClientQuarantineRefreshPercentage()); //Prevent threshold is too large if (threshold > candidateHosts.size()) { threshold = candidateHosts.size(); } if (quarantineSet.isEmpty()) { // no-op } else if (quarantineSet.size() >= threshold) { logger.debug("Clearing quarantined list of size {}", quarantineSet.size()); quarantineSet.clear(); } else { List<EurekaEndpoint> remainingHosts = new ArrayList<>(candidateHosts.size()); for (EurekaEndpoint endpoint : candidateHosts) { if (!quarantineSet.contains(endpoint)) { remainingHosts.add(endpoint); } } candidateHosts = remainingHosts; } return candidateHosts; }
public void addPermission(final Type... list) { // Admin is all encompassing permission. No need to add other types if (!this.permissions.contains(Type.ADMIN)) { for (final Type perm : list) { this.permissions.add(perm); } // We add everything, and if there's Admin left, we make sure that only // Admin is remaining. if (this.permissions.contains(Type.ADMIN)) { this.permissions.clear(); this.permissions.add(Type.ADMIN); } } }
@Override public void validateClass(Class<? extends Exception> exceptionClass) { for (WeakReference<Class<? extends Exception>> knownGood : validClasses) { if (exceptionClass.equals(knownGood.get())) { return; } // TODO(cpovirk): if reference has been cleared, remove it? } checkExceptionClassValidity(exceptionClass); /* * It's very unlikely that any loaded Futures class will see getChecked called with more * than a handful of exceptions. But it seems prudent to set a cap on how many we'll cache. * This avoids out-of-control memory consumption, and it keeps the cache from growing so * large that doing the lookup is noticeably slower than redoing the work would be. * * Ideally we'd have a real eviction policy, but until we see a problem in practice, I hope * that this will suffice. I have not even benchmarked with different size limits. */ if (validClasses.size() > 1000) { validClasses.clear(); } validClasses.add(new WeakReference<Class<? extends Exception>>(exceptionClass)); } }
continue; children.add(c); stack1.push(iterateParent(c)); boolean canPop = true; while (e.it.hasNext()) { TypeIdItem tid = e.it.next(); if (tid == null) { continue; if (children.contains(superDef)) { System.err.println("WARN: dep-loop " + e.owner.clazz.descriptor.stringData.string + " -> " + superDef.clazz.descriptor.stringData.string); } else { canPop = false; children.add(superDef); stack1.push(iterateParent(superDef)); break; children.clear();