private int computeTotalMainWidth() { final List<AttributedString> mainLines = getMainLines(); final List<AttributedString> mainHeaderLines = getMainHeaderLines(); final int max1 = mainLines.stream().mapToInt(AttributedString::length).max().orElse(0); final int max2 = mainHeaderLines.stream().mapToInt(AttributedString::length).max().orElse(0); return Math.max(max1, max2); }
int getMaxValue(final List<int[]> vals) { return vals .stream() .mapToInt(array -> IntStream.of(array).max().orElse(0)) .max() .orElseThrow(NoSuchElementException::new); }
private int getMultiLineStringLength(final String multiLineString) { final String[] split = multiLineString.split("\n"); return Arrays.stream(split) .mapToInt(String::length) .max() .orElse(0); }
private int extractMaxIndex(String key, String suffixPattern) { // extract index and property keys final String escapedKey = Pattern.quote(key); final Pattern pattern = Pattern.compile(escapedKey + "\\.(\\d+)" + suffixPattern); final IntStream indexes = properties.keySet().stream() .flatMapToInt(k -> { final Matcher matcher = pattern.matcher(k); if (matcher.find()) { return IntStream.of(Integer.valueOf(matcher.group(1))); } return IntStream.empty(); }); // determine max index return indexes.max().orElse(-1); }
/** * @return The priority that this region should have in the compaction queue */ public int getCompactPriority() { return stores.values().stream().mapToInt(HStore::getCompactPriority).min() .orElse(Store.NO_PRIORITY); }
private int getColumnLength(final List<String> columnHeaders, final List<List<String>> rowValues, final int i) { return Math.max( columnHeaders.get(i).length(), rowValues .stream() .mapToInt(r -> getMultiLineStringLength(r.get(i))) .max() .orElse(0)); }
private static int min(int... numbers) { return Arrays.stream(numbers) .min().orElse(Integer.MAX_VALUE); }
@Override public void saveProfile(Map<String, Object> profile) { int idx = IntStream.range(0, profiles.size()) .filter(i -> profile.get("name").equals(profiles.get(i).get("name"))) .findFirst().orElse(-1); if (idx == -1) { profiles.add(profile); } else { profiles.set(idx, profile); } saveSparkConf(profiles); }
private static List<Type> toTypes(Map<Symbol, Integer> layout, LocalExecutionPlanContext context) { // verify layout covers all values int channelCount = layout.values().stream().mapToInt(Integer::intValue).max().orElse(-1) + 1; checkArgument( layout.size() == channelCount && ImmutableSet.copyOf(layout.values()).containsAll(ContiguousSet.create(closedOpen(0, channelCount), integers())), "Layout does not have a symbol for every output channel: %s", layout); Map<Integer, Symbol> channelLayout = ImmutableBiMap.copyOf(layout).inverse(); return range(0, channelCount) .mapToObj(channelLayout::get) .map(context.getTypes()::get) .collect(toImmutableList()); }
private void serializeListData(JsonGenerator jgen, List<List<Number>> listData) throws IOException { OptionalInt max = histogramReducer.totalPoints(listData); List<List<Number>> limited = histogramReducer.limitListData(listData); jgen.writeObjectField(GRAPHICS_LIST, limited); jgen.writeObjectField(TOTAL_NUMBER_OF_POINTS, max.orElse(0)); jgen.writeBooleanField(TOO_MANY_ROWS, max.isPresent() && Histogram.ROWS_LIMIT <= max.getAsInt()); jgen.writeObjectField(ROWS_LIMIT_ITEMS, Histogram.ROWS_LIMIT); jgen.writeObjectField(NUMBER_OF_POINTS_TO_DISPLAY, Histogram.NUMBER_OF_POINTS_TO_DISPLAY + ITEMS); }
public FieldValueProvider decodeField(byte[] value) { requireNonNull(value, "value is null"); int actualEnd = end.orElse(value.length); if (start > value.length) { throw new PrestoException(DECODER_CONVERSION_NOT_SUPPORTED, format( "start offset %s for column '%s' must be less that or equal to value length %s", start, columnName, value.length)); } if (actualEnd > value.length) { throw new PrestoException(DECODER_CONVERSION_NOT_SUPPORTED, format( "end offset %s for column '%s' must be less that or equal to value length %s", actualEnd, columnName, value.length)); } return new RawValueProvider(ByteBuffer.wrap(value, start, actualEnd - start), fieldType, columnName, columnType); }
private synchronized PrioritizedSplitRunner pollNextSplitWorker() { // todo find a better algorithm for this // find the first task that produces a split, then move that task to the // end of the task list, so we get round robin for (Iterator<TaskHandle> iterator = tasks.iterator(); iterator.hasNext(); ) { TaskHandle task = iterator.next(); // skip tasks that are already running the configured max number of drivers if (task.getRunningLeafSplits() >= task.getMaxDriversPerTask().orElse(maximumNumberOfDriversPerTask)) { continue; } PrioritizedSplitRunner split = task.pollNextSplit(); if (split != null) { // move task to end of list iterator.remove(); // CAUTION: we are modifying the list in the loop which would normally // cause a ConcurrentModificationException but we exit immediately tasks.add(task); return split; } } return null; }
private void scheduleDriversForDriverGroupLifeCycle(Lifespan lifespan) { // This method is called when a split that belongs to a previously unseen driver group is scheduled. // It schedules drivers for all the pipelines that have driver group life cycle. if (lifespan.isTaskWide()) { checkArgument(driverRunnerFactoriesWithDriverGroupLifeCycle.isEmpty(), "Instantiating pipeline of driver group lifecycle at task level is not allowed"); return; } List<DriverSplitRunner> runners = new ArrayList<>(); for (DriverSplitRunnerFactory driverSplitRunnerFactory : driverRunnerFactoriesWithDriverGroupLifeCycle) { for (int i = 0; i < driverSplitRunnerFactory.getDriverInstances().orElse(1); i++) { runners.add(driverSplitRunnerFactory.createDriverRunner(null, lifespan)); } } enqueueDriverSplitRunner(true, runners); for (DriverSplitRunnerFactory driverRunnerFactory : driverRunnerFactoriesWithDriverGroupLifeCycle) { driverRunnerFactory.noMoreDriverRunner(ImmutableList.of(lifespan)); } }
@Override public int getDictionaryEntries() { int dictionaryEntries = (int) (getNonNullValueCount() * uniquePercentage); return min(dictionaryEntries, maxDictionaryEntries.orElse(dictionaryEntries)); }
@VisibleForTesting public PageProcessor(Optional<PageFilter> filter, List<? extends PageProjection> projections, OptionalInt initialBatchSize, ExpressionProfiler expressionProfiler) { this.filter = requireNonNull(filter, "filter is null") .map(pageFilter -> { if (pageFilter.getInputChannels().size() == 1 && pageFilter.isDeterministic()) { return new DictionaryAwarePageFilter(pageFilter); } return pageFilter; }); this.projections = requireNonNull(projections, "projections is null").stream() .map(projection -> { if (projection.getInputChannels().size() == 1 && projection.isDeterministic()) { return new DictionaryAwarePageProjection(projection, dictionarySourceIdFunction); } return projection; }) .collect(toImmutableList()); this.projectBatchSize = initialBatchSize.orElse(1); this.expressionProfiler = requireNonNull(expressionProfiler, "expressionProfiler is null"); }
private void scheduleDriversForTaskLifeCycle() { // This method is called at the beginning of the task. // It schedules drivers for all the pipelines that have task life cycle. List<DriverSplitRunner> runners = new ArrayList<>(); for (DriverSplitRunnerFactory driverRunnerFactory : driverRunnerFactoriesWithTaskLifeCycle) { for (int i = 0; i < driverRunnerFactory.getDriverInstances().orElse(1); i++) { runners.add(driverRunnerFactory.createDriverRunner(null, Lifespan.taskWide())); } } enqueueDriverSplitRunner(true, runners); for (DriverSplitRunnerFactory driverRunnerFactory : driverRunnerFactoriesWithTaskLifeCycle) { driverRunnerFactory.noMoreDriverRunner(ImmutableList.of(Lifespan.taskWide())); verify(driverRunnerFactory.isNoMoreDriverRunner()); } }
/** * Find the index of valueIndices which is {@link IndexedInts} returned from {@link #getValueSelector()#getRow()} * corresponding to the {@link #subColumnName}. * * @return index for valueIndices if found. -1 otherwise. */ private int findValueIndicesIndexForSubColumn() { final DimensionSelector keySelector = getKeySelector(); final DimensionSelector valueSelector = getValueSelector(); final IndexedInts keyIndices = keySelector.getRow(); final IndexedInts valueIndices = valueSelector.getRow(); final int limit = Math.min(keyIndices.size(), valueIndices.size()); return IntStream .range(0, limit) .filter(i -> subColumnName.equals(keySelector.lookupName(keyIndices.get(i)))) // subColumnName is never null .findAny() .orElse(-1); }
private synchronized void scheduleTaskIfNecessary(TaskHandle taskHandle) { // if task has less than the minimum guaranteed splits running, // immediately schedule a new split for this task. This assures // that a task gets its fair amount of consideration (you have to // have splits to be considered for running on a thread). if (taskHandle.getRunningLeafSplits() < min(guaranteedNumberOfDriversPerTask, taskHandle.getMaxDriversPerTask().orElse(Integer.MAX_VALUE))) { PrioritizedSplitRunner split = taskHandle.pollNextSplit(); if (split != null) { startSplit(split); splitQueuedTime.add(Duration.nanosSince(split.getCreatedNanos())); } } }
private int insertPublicProjectsWithBranches(OrganizationDto org, MetricDto ncloc, int minimumNcloc) { // private project ComponentDto project1 = db.components().insertPublicProject(org); return Math.max( // Create the ncloc on main branch insertLiveMeasures(project1, ncloc, minimumNcloc), // Create 5 branches and set the ncloc on them IntStream.range(1, 5) .map(i -> insertLiveMeasures(db.components().insertProjectBranch(project1), ncloc, minimumNcloc)) .max().orElse(0) ); }
private int insertPrivateProjectsWithBranches(OrganizationDto org, MetricDto ncloc) { // private project ComponentDto project1 = db.components().insertMainBranch(org); return Math.max( // Create the ncloc on main branch insertLiveMeasures(project1, ncloc, 0), // Create 5 branches and set the ncloc on them IntStream.range(1, 5) .map(i -> insertLiveMeasures(db.components().insertProjectBranch(project1), ncloc, 0)) .max().orElse(0) ); }