Refine search
static <E> Attribute<E, ?>[] toArray(Collection<Attribute<E, ?>> attributes, Predicate<Attribute<E, ?>> filter) { LinkedHashSet<Attribute> filtered = new LinkedHashSet<>(); for (Attribute<E, ?> attribute : attributes) { if (filter == null || filter.test(attribute)) { filtered.add(attribute); } } Attribute<E, ?>[] array = new Attribute[filtered.size()]; return filtered.toArray(array); }
private boolean addRecordAndEvictIfNecessary(GlobalMetadata recordToAdd) { // First remove the element from the HashSet if it's already in there to reset // the 'LRU' piece; then add it back in boolean isNew = !metadataRecords.remove(recordToAdd); metadataRecords.add(recordToAdd); // Now remove the first element (which should be the oldest) from the list // if we've exceeded the cache size if (cacheSize != -1 && metadataRecords.size() > cacheSize) { Iterator<GlobalMetadata> recordIt = metadataRecords.iterator(); recordIt.next(); // Remove the oldest element - don't care what it is recordIt.remove(); } return isNew; }
final Class<?>[] array = ancestors.toArray(new Class<?>[ancestors.size()]); Arrays.sort(array, SPECIFICITY_CLASS_COMPARATOR); final LinkedHashSet<Class<?>> lineage = new LinkedHashSet<Class<?>>(array.length + 1); lineage.add(clazz); Collections.addAll(lineage, array); final LineageInfo result = new LineageInfo(lineage, specificity);
public static Locale[] localesFromStrings(String[] localeNames) { // We need to remove duplicates caused by the conversion of "he" to "iw", et cetera. // Java needs the obsolete code, ICU needs the modern code, but we let ICU know about // both so that we never need to convert back when talking to it. LinkedHashSet<Locale> set = new LinkedHashSet<Locale>(); for (String localeName : localeNames) { set.add(localeFromString(localeName)); } return set.toArray(new Locale[set.size()]); }
/** * Extracts the subclasses of the base POJO class registered in the execution config. */ private static LinkedHashSet<Class<?>> getRegisteredSubclassesFromExecutionConfig( Class<?> basePojoClass, ExecutionConfig executionConfig) { LinkedHashSet<Class<?>> subclassesInRegistrationOrder = new LinkedHashSet<>(executionConfig.getRegisteredPojoTypes().size()); for (Class<?> registeredClass : executionConfig.getRegisteredPojoTypes()) { if (registeredClass.equals(basePojoClass)) { continue; } if (!basePojoClass.isAssignableFrom(registeredClass)) { continue; } subclassesInRegistrationOrder.add(registeredClass); } return subclassesInRegistrationOrder; }
findCorrelatedReduceSinkOperators(op, keyCols, partitionCols, sortOrder, parent, correlation); if (correlatedReduceSinkOperators.size() == 0) { newReduceSinkOperators.add(op); } else { for (ReduceSinkOperator rsop : correlatedReduceSinkOperators) { LinkedHashSet<ReduceSinkOperator> exploited = exploitJobFlowCorrelation(rsop, correlationCtx, correlation); if (exploited.size() == 0) { newReduceSinkOperators.add(rsop); } else { newReduceSinkOperators.addAll(exploited);
final Class<?>[] array = ancestors.toArray(new Class<?>[ancestors.size()]); Arrays.sort(array, SPECIFICITY_CLASS_COMPARATOR); final LinkedHashSet<Class<?>> lineage = new LinkedHashSet<Class<?>>(array.length + 1); lineage.add(clazz); Collections.addAll(lineage, array); final LineageInfo result = new LineageInfo(lineage, specificity);
/** * Returns the lineage of the specified class ordered by specificity (the class itself is at position 0 since it is most specific in its lineage). */ public static Class<?>[] getLineage(final Class<?> clazz) { final LineageInfo lineageInfo = getLineageInfo(clazz); return lineageInfo == null ? EMPTY_CLASS_ARRAY : lineageInfo.lineage.toArray(new Class<?>[lineageInfo.lineage.size()]); }
for (String libraryName : map.keySet()) { LinkedHashSet<Class> classSet = map.get(libraryName); Class[] classArray = classSet.toArray(new Class[classSet.size()]); File[] files = generateAndCompile(classArray, libraryName, count == 0, count == map.size() - 1); if (files != null && files.length > 0) {
public MaterializedField withPathAndType(String name, final MajorType type) { final LinkedHashSet<MaterializedField> newChildren = new LinkedHashSet<>(children.size()); for (final MaterializedField child:children) { newChildren.add(child.clone()); } return new MaterializedField(name, type, newChildren); }
findCorrelatedReduceSinkOperators(op, keyCols, partitionCols, sortOrder, parent, correlation); if (correlatedReduceSinkOperators.size() == 0) { newReduceSinkOperators.add(op); } else { for (ReduceSinkOperator rsop : correlatedReduceSinkOperators) { LinkedHashSet<ReduceSinkOperator> exploited = exploitJobFlowCorrelation(rsop, correlationCtx, correlation); if (exploited.size() == 0) { newReduceSinkOperators.add(rsop); } else { newReduceSinkOperators.addAll(exploited);
public String[] getMetaAttributeDomains() { final LinkedHashSet<String> namespaces = new LinkedHashSet<String>(); namespaces.addAll( Arrays.asList( backend.getMetaAttributeDomains() ) ); namespaces.add( MetaAttributeNames.Core.NAMESPACE ); namespaces.add( MetaAttributeNames.Formatting.NAMESPACE ); return namespaces.toArray( new String[namespaces.size()] ); }
@Override public String[] getMechanismNames(final Map<String, ?> props) { final LinkedHashSet<String> names = new LinkedHashSet<String>(); for (SaslServerFactory factory : factories) { if (factory != null) { Collections.addAll(names, factory.getMechanismNames(props)); } } return names.toArray(new String[names.size()]); } }
public String[] getNamesByAnnotation(Class<? extends Annotation> klass, IocContext context) { List<String> names = new ArrayList<String>(loader.getNamesByAnnotation(createLoading(), klass)); IocContext cntx; if (null == context || context == this.context) cntx = this.context; else cntx = new ComboContext(context, this.context); for (String name : cntx.names()) { ObjectProxy op = cntx.fetch(name); if (op.getObj() != null && klass.getAnnotation(klass) != null) names.add(name); } LinkedHashSet<String> re = new LinkedHashSet<String>(); for (String name : names) { if (Strings.isBlank(name) || "null".equals(name)) continue; re.add(name); } return re.toArray(new String[re.size()]); }
private void flattenNode(PlanNode node, int limit) { PlanNode resolved = lookup.resolve(node); // (limit - 2) because you need to account for adding left and right side if (!(resolved instanceof JoinNode) || (sources.size() > (limit - 2))) { sources.add(node); return; } JoinNode joinNode = (JoinNode) resolved; if (joinNode.getType() != INNER || !isDeterministic(joinNode.getFilter().orElse(TRUE_LITERAL)) || joinNode.getDistributionType().isPresent()) { sources.add(node); return; } // we set the left limit to limit - 1 to account for the node on the right flattenNode(joinNode.getLeft(), limit - 1); flattenNode(joinNode.getRight(), limit); joinNode.getCriteria().stream() .map(EquiJoinClause::toExpression) .forEach(filters::add); joinNode.getFilter().ifPresent(filters::add); }