public SqlTaskIoStats() { this(new CounterStat(), new CounterStat(), new CounterStat(), new CounterStat()); }
public MultilevelSplitQueue(double levelTimeMultiplier) { this.levelMinPriority = new AtomicLong[LEVEL_THRESHOLD_SECONDS.length]; this.levelWaitingSplits = new ArrayList<>(LEVEL_THRESHOLD_SECONDS.length); ImmutableList.Builder<CounterStat> counters = ImmutableList.builder(); for (int i = 0; i < LEVEL_THRESHOLD_SECONDS.length; i++) { levelScheduledTime[i] = new AtomicLong(); levelMinPriority[i] = new AtomicLong(-1); levelWaitingSplits.add(new PriorityQueue<>()); counters.add(new CounterStat()); } this.selectedLevelCounters = counters.build(); this.levelTimeMultiplier = levelTimeMultiplier; }
public CounterStat getInputDataSize() { OperatorContext inputOperator = getFirst(operatorContexts, null); if (inputOperator != null) { return inputOperator.getInputDataSize(); } else { return new CounterStat(); } }
public CounterStat getInputPositions() { OperatorContext inputOperator = getFirst(operatorContexts, null); if (inputOperator != null) { return inputOperator.getInputPositions(); } else { return new CounterStat(); } }
public CounterStat getOutputDataSize() { OperatorContext inputOperator = getLast(operatorContexts, null); if (inputOperator != null) { return inputOperator.getOutputDataSize(); } else { return new CounterStat(); } }
public CounterStat getOutputPositions() { OperatorContext inputOperator = getLast(operatorContexts, null); if (inputOperator != null) { return inputOperator.getOutputPositions(); } else { return new CounterStat(); } }
public CounterStat getOutputDataSize() { CounterStat stat = new CounterStat(); stat.merge(outputDataSize); for (DriverContext driver : drivers) { stat.merge(driver.getOutputDataSize()); } return stat; }
public CounterStat getInputDataSize() { CounterStat stat = new CounterStat(); stat.merge(rawInputDataSize); for (DriverContext driver : drivers) { stat.merge(driver.getInputDataSize()); } return stat; }
public CounterStat getInputPositions() { CounterStat stat = new CounterStat(); stat.merge(rawInputPositions); for (DriverContext driver : drivers) { stat.merge(driver.getInputPositions()); } return stat; }
public CounterStat getOutputPositions() { CounterStat stat = new CounterStat(); stat.merge(outputPositions); for (DriverContext driver : drivers) { stat.merge(driver.getOutputPositions()); } return stat; }
public CounterStat getOutputPositions() { CounterStat stat = new CounterStat(); for (PipelineContext pipelineContext : pipelineContexts) { if (pipelineContext.isOutputPipeline()) { stat.merge(pipelineContext.getOutputPositions()); } } return stat; }
public CounterStat getInputDataSize() { CounterStat stat = new CounterStat(); for (PipelineContext pipelineContext : pipelineContexts) { if (pipelineContext.isInputPipeline()) { stat.merge(pipelineContext.getInputDataSize()); } } return stat; }
public CounterStat getInputPositions() { CounterStat stat = new CounterStat(); for (PipelineContext pipelineContext : pipelineContexts) { if (pipelineContext.isInputPipeline()) { stat.merge(pipelineContext.getInputPositions()); } } return stat; }
public CounterStat getOutputDataSize() { CounterStat stat = new CounterStat(); for (PipelineContext pipelineContext : pipelineContexts) { if (pipelineContext.isOutputPipeline()) { stat.merge(pipelineContext.getOutputDataSize()); } } return stat; }
public NodeScheduler( NetworkLocationCache networkLocationCache, NetworkTopology networkTopology, InternalNodeManager nodeManager, NodeSchedulerConfig config, NodeTaskMap nodeTaskMap) { this.networkLocationCache = networkLocationCache; this.nodeManager = nodeManager; this.minCandidates = config.getMinCandidates(); this.includeCoordinator = config.isIncludeCoordinator(); this.maxSplitsPerNode = config.getMaxSplitsPerNode(); this.maxPendingSplitsPerTask = config.getMaxPendingSplitsPerTask(); this.nodeTaskMap = requireNonNull(nodeTaskMap, "nodeTaskMap is null"); checkArgument(maxSplitsPerNode >= maxPendingSplitsPerTask, "maxSplitsPerNode must be > maxPendingSplitsPerTask"); this.useNetworkTopology = !config.getNetworkTopology().equals(NetworkTopologyType.LEGACY); ImmutableList.Builder<CounterStat> builder = ImmutableList.builder(); if (useNetworkTopology) { networkLocationSegmentNames = ImmutableList.copyOf(networkTopology.getLocationSegmentNames()); for (int i = 0; i < networkLocationSegmentNames.size() + 1; i++) { builder.add(new CounterStat()); } } else { networkLocationSegmentNames = ImmutableList.of(); } topologicalSplitCounters = builder.build(); }
@Test public void testEmptyBucket() { final HiveSplitSource hiveSplitSource = HiveSplitSource.bucketed( SESSION, "database", "table", TupleDomain.all(), 10, 10, new DataSize(1, MEGABYTE), new TestingHiveSplitLoader(), Executors.newFixedThreadPool(5), new CounterStat()); hiveSplitSource.addToQueue(new TestSplit(0, OptionalInt.of(2))); hiveSplitSource.noMoreSplits(); assertEquals(getSplits(hiveSplitSource, OptionalInt.of(0), 10).size(), 0); assertEquals(getSplits(hiveSplitSource, OptionalInt.of(1), 10).size(), 0); assertEquals(getSplits(hiveSplitSource, OptionalInt.of(2), 10).size(), 1); assertEquals(getSplits(hiveSplitSource, OptionalInt.of(3), 10).size(), 0); }
@Test public void testOutstandingSplitCount() { HiveSplitSource hiveSplitSource = HiveSplitSource.allAtOnce( SESSION, "database", "table", TupleDomain.all(), 10, 10, new DataSize(1, MEGABYTE), new TestingHiveSplitLoader(), Executors.newFixedThreadPool(5), new CounterStat()); // add 10 splits for (int i = 0; i < 10; i++) { hiveSplitSource.addToQueue(new TestSplit(i)); assertEquals(hiveSplitSource.getBufferedInternalSplitCount(), i + 1); } // remove 1 split assertEquals(getSplits(hiveSplitSource, 1).size(), 1); assertEquals(hiveSplitSource.getBufferedInternalSplitCount(), 9); // remove 4 splits assertEquals(getSplits(hiveSplitSource, 4).size(), 4); assertEquals(hiveSplitSource.getBufferedInternalSplitCount(), 5); // try to remove 20 splits, and verify we only got 5 assertEquals(getSplits(hiveSplitSource, 20).size(), 5); assertEquals(hiveSplitSource.getBufferedInternalSplitCount(), 0); }
private static HiveSplitSource hiveSplitSource( BackgroundHiveSplitLoader backgroundHiveSplitLoader, TupleDomain<HiveColumnHandle> compactEffectivePredicate) { return HiveSplitSource.allAtOnce( SESSION, SIMPLE_TABLE.getDatabaseName(), SIMPLE_TABLE.getTableName(), compactEffectivePredicate, 1, 1, new DataSize(32, MEGABYTE), backgroundHiveSplitLoader, EXECUTOR, new CounterStat()); }
private SqlTask newSqlTask() { TaskId taskId = new TaskId("query", 0, idGeneator.incrementAndGet()); URI location = URI.create("fake://task/" + taskId); return createSqlTask( taskId, location, "fake", new DefaultQueryContext(new QueryId("query"), new DataSize(1, MEGABYTE), new DataSize(2, MEGABYTE), memoryPool, new TestingGcMonitor(), executor, scheduledExecutor, new DataSize(1, GIGABYTE), spillSpaceTracker), sqlTaskExecutionFactory, executor, Functions.identity(), new DataSize(32, MEGABYTE), new CounterStat()); } }
public SqlTask createInitialTask() { TaskId taskId = new TaskId("query", 0, nextTaskId.incrementAndGet()); URI location = URI.create("fake://task/" + taskId); DefaultQueryContext queryContext = new DefaultQueryContext(new QueryId("query"), new DataSize(1, MEGABYTE), new DataSize(2, MEGABYTE), new MemoryPool(new MemoryPoolId("test"), new DataSize(1, GIGABYTE)), new TestingGcMonitor(), taskNotificationExecutor, driverYieldExecutor, new DataSize(1, MEGABYTE), new SpillSpaceTracker(new DataSize(1, GIGABYTE))); queryContext.addTaskContext(new TaskStateMachine(taskId, taskNotificationExecutor), testSessionBuilder().build(), false, false, OptionalInt.empty()); return createSqlTask( taskId, location, "fake", queryContext, sqlTaskExecutionFactory, taskNotificationExecutor, Functions.identity(), new DataSize(32, MEGABYTE), new CounterStat()); } }