@Override public void release(JobSpecification job) { IClusterCapacity requiredCapacity = job.getRequiredClusterCapacity(); long reqAggregatedMemoryByteSize = requiredCapacity.getAggregatedMemoryByteSize(); int reqAggregatedNumCores = requiredCapacity.getAggregatedCores(); IClusterCapacity currentCapacity = resourceManager.getCurrentCapacity(); long aggregatedMemoryByteSize = currentCapacity.getAggregatedMemoryByteSize(); int aggregatedNumCores = currentCapacity.getAggregatedCores(); currentCapacity.setAggregatedMemoryByteSize(aggregatedMemoryByteSize + reqAggregatedMemoryByteSize); currentCapacity.setAggregatedCores(aggregatedNumCores + reqAggregatedNumCores); ensureMaxCapacity(); }
private void ensureMaxCapacity() { final IClusterCapacity currentCapacity = resourceManager.getCurrentCapacity(); final IReadOnlyClusterCapacity maximumCapacity = resourceManager.getMaximumCapacity(); if (currentCapacity.getAggregatedCores() > maximumCapacity.getAggregatedCores() || currentCapacity.getAggregatedMemoryByteSize() > maximumCapacity.getAggregatedMemoryByteSize()) { LOGGER.warn("Current cluster available capacity {} is more than its maximum capacity {}", currentCapacity, maximumCapacity); } } }
@Override public JobSubmissionStatus allocate(JobSpecification job) throws HyracksException { IClusterCapacity requiredCapacity = job.getRequiredClusterCapacity(); long reqAggregatedMemoryByteSize = requiredCapacity.getAggregatedMemoryByteSize(); int reqAggregatedNumCores = requiredCapacity.getAggregatedCores(); IReadOnlyClusterCapacity maximumCapacity = resourceManager.getMaximumCapacity(); if (!(reqAggregatedMemoryByteSize <= maximumCapacity.getAggregatedMemoryByteSize() && reqAggregatedNumCores <= maximumCapacity.getAggregatedCores())) { throw HyracksException.create(ErrorCode.JOB_REQUIREMENTS_EXCEED_CAPACITY, requiredCapacity.toString(), maximumCapacity.toString()); } IClusterCapacity currentCapacity = resourceManager.getCurrentCapacity(); long currentAggregatedMemoryByteSize = currentCapacity.getAggregatedMemoryByteSize(); int currentAggregatedAvailableCores = currentCapacity.getAggregatedCores(); if (!(reqAggregatedMemoryByteSize <= currentAggregatedMemoryByteSize && reqAggregatedNumCores <= currentAggregatedAvailableCores)) { return JobSubmissionStatus.QUEUE; } currentCapacity.setAggregatedMemoryByteSize(currentAggregatedMemoryByteSize - reqAggregatedMemoryByteSize); currentCapacity.setAggregatedCores(currentAggregatedAvailableCores - reqAggregatedNumCores); return JobSubmissionStatus.EXECUTE; }
private void assertRequiredMemory(List<PlanStage> stages, long expectedMemory) { final IClusterCapacity clusterCapacity = ResourceUtils.getStageBasedRequiredCapacity(stages, PARALLELISM, FRAME_LIMIT, FRAME_LIMIT, FRAME_LIMIT, FRAME_LIMIT, FRAME_SIZE); Assert.assertEquals(clusterCapacity.getAggregatedMemoryByteSize(), expectedMemory); } }
@Test public void testParallelGroupBy() throws AlgebricksException { IClusterCapacity clusterCapacity = new ClusterCapacity(); RequiredCapacityVisitor visitor = makeComputationCapacityVisitor(PARALLELISM, clusterCapacity); // Constructs a parallel group-by query plan. GroupByOperator globalGby = makeGroupByOperator(AbstractLogicalOperator.ExecutionMode.PARTITIONED); ExchangeOperator exchange = new ExchangeOperator(); exchange.setPhysicalOperator(new HashPartitionExchangePOperator(Collections.emptyList(), null)); GroupByOperator localGby = makeGroupByOperator(AbstractLogicalOperator.ExecutionMode.LOCAL); globalGby.getInputs().add(new MutableObject<>(exchange)); exchange.getInputs().add(new MutableObject<>(localGby)); // Verifies the calculated cluster capacity requirement for the test quer plan. globalGby.accept(visitor, null); Assert.assertTrue(clusterCapacity.getAggregatedCores() == PARALLELISM); Assert.assertTrue(clusterCapacity.getAggregatedMemoryByteSize() == 2 * MEMORY_BUDGET * PARALLELISM + 2 * FRAME_SIZE * PARALLELISM * PARALLELISM); }
@Test public void testUnPartitionedGroupBy() throws AlgebricksException { IClusterCapacity clusterCapacity = new ClusterCapacity(); RequiredCapacityVisitor visitor = makeComputationCapacityVisitor(PARALLELISM, clusterCapacity); // Constructs a parallel group-by query plan. GroupByOperator globalGby = makeGroupByOperator(AbstractLogicalOperator.ExecutionMode.UNPARTITIONED); ExchangeOperator exchange = new ExchangeOperator(); exchange.setPhysicalOperator(new OneToOneExchangePOperator()); exchange.setExecutionMode(AbstractLogicalOperator.ExecutionMode.UNPARTITIONED); GroupByOperator localGby = makeGroupByOperator(AbstractLogicalOperator.ExecutionMode.UNPARTITIONED); globalGby.getInputs().add(new MutableObject<>(exchange)); exchange.getInputs().add(new MutableObject<>(localGby)); // Verifies the calculated cluster capacity requirement for the test quer plan. globalGby.accept(visitor, null); Assert.assertTrue(clusterCapacity.getAggregatedCores() == 1); Assert.assertTrue(clusterCapacity.getAggregatedMemoryByteSize() == 2 * MEMORY_BUDGET + FRAME_SIZE); }
Assert.assertTrue(clusterCapacity.getAggregatedMemoryByteSize() == 3 * MEMORY_BUDGET * PARALLELISM + 2 * 2L * PARALLELISM * PARALLELISM * FRAME_SIZE + 3 * FRAME_SIZE * PARALLELISM);
@Test public void testUnPartitionedJoin() throws AlgebricksException { IClusterCapacity clusterCapacity = new ClusterCapacity(); RequiredCapacityVisitor visitor = makeComputationCapacityVisitor(PARALLELISM, clusterCapacity); // Constructs a join query plan. InnerJoinOperator join = makeJoinOperator(AbstractLogicalOperator.ExecutionMode.UNPARTITIONED); // Left child plan of the join. ExchangeOperator leftChildExchange = new ExchangeOperator(); leftChildExchange.setExecutionMode(AbstractLogicalOperator.ExecutionMode.UNPARTITIONED); leftChildExchange.setPhysicalOperator(new OneToOneExchangePOperator()); InnerJoinOperator leftChild = makeJoinOperator(AbstractLogicalOperator.ExecutionMode.UNPARTITIONED); join.getInputs().add(new MutableObject<>(leftChildExchange)); leftChildExchange.getInputs().add(new MutableObject<>(leftChild)); EmptyTupleSourceOperator ets = new EmptyTupleSourceOperator(); ets.setExecutionMode(AbstractLogicalOperator.ExecutionMode.UNPARTITIONED); leftChild.getInputs().add(new MutableObject<>(ets)); leftChild.getInputs().add(new MutableObject<>(ets)); // Right child plan of the join. ExchangeOperator rightChildExchange = new ExchangeOperator(); rightChildExchange.setExecutionMode(AbstractLogicalOperator.ExecutionMode.UNPARTITIONED); rightChildExchange.setPhysicalOperator(new OneToOneExchangePOperator()); GroupByOperator rightChild = makeGroupByOperator(AbstractLogicalOperator.ExecutionMode.UNPARTITIONED); join.getInputs().add(new MutableObject<>(rightChildExchange)); rightChildExchange.getInputs().add(new MutableObject<>(rightChild)); rightChild.getInputs().add(new MutableObject<>(ets)); // Verifies the calculated cluster capacity requirement for the test quer plan. join.accept(visitor, null); Assert.assertTrue(clusterCapacity.getAggregatedCores() == 1); Assert.assertTrue(clusterCapacity.getAggregatedMemoryByteSize() == 3 * MEMORY_BUDGET + 5L * FRAME_SIZE); }