private void updateDemandForApp(FSAppAttempt sched, Resource maxRes) { sched.updateDemand(); Resource toAdd = sched.getDemand(); if (LOG.isDebugEnabled()) { LOG.debug("Counting resource from " + sched.getName() + " " + toAdd + "; Total resource consumption for " + getName() + " now " + demand); } demand = Resources.add(demand, toAdd); demand = Resources.componentwiseMin(demand, maxRes); }
RMContext rmContext = resourceManager.getRMContext(); FSAppAttempt schedulerApp = new FSAppAttempt(scheduler, applicationAttemptId, "user1", queue , null, rmContext); assertEquals(NodeType.NODE_LOCAL, schedulerApp.getAllowedLocalityLevel( prio, 10, nodeLocalityThreshold, rackLocalityThreshold)); schedulerApp.addSchedulingOpportunity(prio); assertEquals(NodeType.NODE_LOCAL, schedulerApp.getAllowedLocalityLevel( prio, 10, nodeLocalityThreshold, rackLocalityThreshold)); schedulerApp.addSchedulingOpportunity(prio); assertEquals(NodeType.RACK_LOCAL, schedulerApp.getAllowedLocalityLevel( prio, 10, nodeLocalityThreshold, rackLocalityThreshold)); schedulerApp.resetAllowedLocalityLevel(prio, NodeType.NODE_LOCAL); schedulerApp.resetSchedulingOpportunities(prio); assertEquals(NodeType.NODE_LOCAL, schedulerApp.getAllowedLocalityLevel( prio, 10, nodeLocalityThreshold, rackLocalityThreshold)); schedulerApp.addSchedulingOpportunity(prio); assertEquals(NodeType.NODE_LOCAL, schedulerApp.getAllowedLocalityLevel( prio, 10, nodeLocalityThreshold, rackLocalityThreshold)); schedulerApp.addSchedulingOpportunity(prio); assertEquals(NodeType.RACK_LOCAL, schedulerApp.getAllowedLocalityLevel( prio, 10, nodeLocalityThreshold, rackLocalityThreshold));
private Resource assignContainer(FSSchedulerNode node, boolean reserved) { if (LOG.isDebugEnabled()) { LOG.debug("Node offered to app: " + getName() + " reserved: " + reserved); getPriorities(); if (getTotalRequiredResources(priority) <= 0 || !hasContainerForNode(priority, node)) { continue; addSchedulingOpportunity(priority); if (getLiveContainers().size() == 0 && !getUnmanagedAM()) { if (!getQueue().canRunAppAM(getAMResource())) { return Resources.none(); ResourceRequest rackLocalRequest = getResourceRequest(priority, node.getRackName()); ResourceRequest localRequest = getResourceRequest(priority, node.getNodeName()); allowedLocality = getAllowedLocalityLevelByTime(priority, scheduler.getNodeLocalityDelayMs(), scheduler.getRackLocalityDelayMs(), scheduler.getClock().getTime()); } else { allowedLocality = getAllowedLocalityLevel(priority, scheduler.getNumClusterNodes(), scheduler.getNodeLocalityThreshold(),
@Override public String toString() { return getApplicationAttemptId() + " Alloc: " + getCurrentConsumption(); }
@Override public Resource getResourceUsage() { // Here the getPreemptedResources() always return zero, except in // a preemption round // In the common case where preempted resource is zero, return the // current consumption Resource object directly without calling // Resources.subtract which creates a new Resource object for each call. return getPreemptedResources().equals(Resources.none()) ? getCurrentConsumption() : Resources.subtract(getCurrentConsumption(), getPreemptedResources()); }
/** * Preempt a running container according to the priority */ @Override public RMContainer preemptContainer() { if (LOG.isDebugEnabled()) { LOG.debug("App " + getName() + " is going to preempt a running " + "container"); } RMContainer toBePreempted = null; for (RMContainer container : getLiveContainers()) { if (!getPreemptionContainers().contains(container) && (toBePreempted == null || comparator.compare(toBePreempted, container) > 0)) { toBePreempted = container; } } return toBePreempted; } }
if (!application.getUnmanagedAM() && ask.size() == 1 && application.getLiveContainers().isEmpty()) { application.setAMResource(ask.get(0).getCapability()); LOG.debug("allocate: pre-update" + " applicationAttemptId=" + appAttemptId + " application=" + application.getApplicationId()); application.showRequests(); application.updateResourceRequests(ask); application.showRequests(); " applicationAttemptId=" + appAttemptId + " #ask=" + ask.size() + " reservation= " + application.getCurrentReservation()); LOG.debug("Preempting " + application.getPreemptionContainers().size() + " container(s)"); for (RMContainer container : application.getPreemptionContainers()) { preemptionContainerIds.add(container.getContainerId()); application.updateBlacklist(blacklistAdditions, blacklistRemovals); ContainersAndNMTokensAllocation allocation = application.pullNewlyAllocatedContainersAndNMTokens(); Resource headroom = application.getHeadroom(); application.setApplicationHeadroomForMetrics(headroom);
if (!application.getApplicationAttemptId().equals(appAttemptId)) { LOG.error("Calling allocate on previous or removed " + "or non existent application attempt " + appAttemptId); ApplicationId applicationId = application.getApplicationId(); FSLeafQueue queue = application.getQueue(); List<MaxResourceValidationResult> invalidAsks = validateResourceRequests(ask, queue); application.recordContainerRequestTime(getClock().getTime()); ReentrantReadWriteLock.WriteLock lock = application.getWriteLock(); lock.lock(); try { LOG.debug( "allocate: pre-update" + " applicationAttemptId=" + appAttemptId + " application=" + application.getApplicationId()); application.showRequests(); application.updateResourceRequests(ask); application.showRequests(); application.getPreemptionContainerIds(); if (LOG.isDebugEnabled()) { LOG.debug( "allocate: post-update" + " applicationAttemptId=" + appAttemptId
@SuppressWarnings("deprecation") private Resource assignContainer(FSSchedulerNode node, boolean reserved) { if (LOG.isTraceEnabled()) { LOG.trace("Node offered to app: " + getName() + " reserved: " + reserved); Collections.singletonList( node.getReservedContainer().getReservedSchedulerKey()) : getSchedulerKeys(); if (!reserved && !hasContainerForNode(schedulerKey, node)) { continue; addSchedulingOpportunity(schedulerKey); PendingAsk rackLocalPendingAsk = getPendingAsk(schedulerKey, node.getRackName()); PendingAsk nodeLocalPendingAsk = getPendingAsk(schedulerKey, node.getNodeName()); allowedLocality = getAllowedLocalityLevelByTime(schedulerKey, scheduler.getNodeLocalityDelayMs(), scheduler.getRackLocalityDelayMs(), scheduler.getClock().getTime()); } else { allowedLocality = getAllowedLocalityLevel(schedulerKey, scheduler.getNumClusterNodes(), scheduler.getNodeLocalityThreshold(), return assignContainer(node, nodeLocalPendingAsk, NodeType.NODE_LOCAL, reserved, schedulerKey);
ApplicationAttemptId applicationAttemptId = createAppAttemptId(1, 1); FSAppAttempt schedulerApp = new FSAppAttempt(scheduler, applicationAttemptId, "user1", queue, null, rmContext); schedulerApp.getAllowedLocalityLevelByTime(prio, nodeLocalityDelayMs, rackLocalityDelayMs, clock.getTime())); schedulerApp.getAllowedLocalityLevelByTime(prio, nodeLocalityDelayMs, rackLocalityDelayMs, clock.getTime())); schedulerApp.getAllowedLocalityLevelByTime(prio, nodeLocalityDelayMs, rackLocalityDelayMs, clock.getTime())); schedulerApp.resetAllowedLocalityLevel(prio, NodeType.NODE_LOCAL); schedulerApp.resetSchedulingOpportunities(prio, clock.getTime()); assertEquals(NodeType.NODE_LOCAL, schedulerApp.getAllowedLocalityLevelByTime(prio, nodeLocalityDelayMs, rackLocalityDelayMs, clock.getTime())); schedulerApp.getAllowedLocalityLevelByTime(prio, nodeLocalityDelayMs, rackLocalityDelayMs, clock.getTime())); schedulerApp.getAllowedLocalityLevelByTime(prio, nodeLocalityDelayMs, rackLocalityDelayMs, clock.getTime()));
private void verifyNoPreemption() throws InterruptedException { // Sleep long enough to ensure not even one container is preempted. for (int i = 0; i < 100; i++) { if (greedyApp.getLiveContainers().size() != 8) { break; } Thread.sleep(10); } assertEquals(8, greedyApp.getLiveContainers().size()); }
assertEquals(1, app.getLiveContainers().size()); scheduler.handle(nodeUpdateEvent); assertEquals(1, app.getReservedContainers().size()); assertEquals(0, app.getReservedContainers().size());
scheduler.handle(updateEvent); assertEquals("Application1's AM requests 1024 MB memory", 1024, app1.getAMResource().getMemory()); assertEquals("Application1's AM should be running", 1, app1.getLiveContainers().size()); assertEquals("Queue1's AM resource usage should be 1024 MB memory", 1024, queue1.getAmResourceUsage().getMemory()); scheduler.handle(updateEvent); assertEquals("Application2's AM requests 1024 MB memory", 1024, app2.getAMResource().getMemory()); assertEquals("Application2's AM should not be running", 0, app2.getLiveContainers().size()); assertEquals("Queue2's AM resource usage should be 0 MB memory", 0, queue2.getAmResourceUsage().getMemory());
Mockito.when(app1.getDemand()).thenReturn(maxResource); Mockito.when(app1.getResourceUsage()).thenReturn(Resources.none()); FSAppAttempt app2 = mock(FSAppAttempt.class); Mockito.when(app2.getDemand()).thenReturn(maxResource); Mockito.when(app2.getResourceUsage()).thenReturn(Resources.none());
FSAppAttempt app = scheduler.getSchedulerApp(id11); assertNotNull(app); Resource queueUsage = app.getQueue().getResourceUsage(); assertEquals(0, queueUsage.getMemorySize()); assertEquals(0, queueUsage.getVirtualCores()); FSAppAttempt spyApp = spy(app); doReturn(false) .when(spyApp).isWaitingForAMContainer(); spyApp.updateBlacklist(blacklistAdditions, blacklistRemovals); spyApp.getQueue().setFairShare(clusterResource); assertTrue(spyApp.isPlaceBlacklisted(n1.getNodeName())); assertFalse(spyApp.isPlaceBlacklisted(n2.getNodeName())); assertEquals(n2.getUnallocatedResource(), spyApp.getHeadroom()); spyApp.updateBlacklist(blacklistAdditions, blacklistRemovals); assertFalse(spyApp.isPlaceBlacklisted(n1.getNodeName())); assertTrue(spyApp.isPlaceBlacklisted(n2.getNodeName())); assertEquals(n1.getUnallocatedResource(), spyApp.getHeadroom()); spyApp.updateBlacklist(blacklistAdditions, blacklistRemovals); assertFalse(spyApp.isPlaceBlacklisted(n1.getNodeName())); assertFalse(spyApp.isPlaceBlacklisted(n2.getNodeName())); assertEquals(clusterResource, spyApp.getHeadroom());
/** * Removes the given app from this queue. * @return whether or not the app was runnable */ boolean removeApp(FSAppAttempt app) { boolean runnable = false; // Remove app from runnable/nonRunnable list while holding the write lock writeLock.lock(); try { runnable = runnableApps.remove(app); if (!runnable) { // removeNonRunnableApp acquires the write lock again, which is fine if (!removeNonRunnableApp(app)) { throw new IllegalStateException("Given app to remove " + app + " does not exist in queue " + this); } } } finally { writeLock.unlock(); } // Update AM resource usage if needed. If isAMRunning is true, we're not // running an unmanaged AM. if (runnable && app.isAmRunning()) { Resources.subtractFrom(amResourceUsage, app.getAMResource()); getMetrics().setAMResourceUsage(amResourceUsage); } decUsedResource(app.getResourceUsage()); return runnable; }
scheduler.handle(updateEvent); assertEquals("Application1's AM requests 1024 MB memory", 1024, app1.getAMResource().getMemorySize()); assertEquals("Application1's AM should be running", 1, app1.getLiveContainers().size()); assertEquals("Queue1's AM resource usage should be 1024 MB memory", 1024, queue1.getAmResourceUsage().getMemorySize()); scheduler.handle(updateEvent); assertEquals("Application2's AM requests 1024 MB memory", 1024, app2.getAMResource().getMemorySize()); assertEquals("Application2's AM should be running", 1, app2.getLiveContainers().size()); assertEquals("Queue1's AM resource usage should be 2048 MB memory", 2048, queue1.getAmResourceUsage().getMemorySize()); scheduler.handle(updateEvent); assertEquals("Application3's AM resource shouldn't be updated", 0, app3.getAMResource().getMemorySize()); assertEquals("Application3's AM should not be running", 0, app3.getLiveContainers().size()); assertEquals("Queue1's AM resource usage should be 2048 MB memory", 2048, queue1.getAmResourceUsage().getMemorySize()); scheduler.handle(updateEvent); assertEquals("Application1 should have two running containers", 2, app1.getLiveContainers().size()); assertEquals("Queue1's AM resource usage should be 2048 MB memory", 2048, queue1.getAmResourceUsage().getMemorySize());
public synchronized ResourceWeights getAppWeight(FSAppAttempt app) { double weight = 1.0; if (sizeBasedWeight) { // Set weight based on current memory demand weight = Math.log1p(app.getDemand().getMemory()) / Math.log(2); } weight *= app.getPriority().getPriority(); if (weightAdjuster != null) { // Run weight through the user-supplied weightAdjuster weight = weightAdjuster.adjustWeight(app, weight); } ResourceWeights resourceWeights = app.getResourceWeights(); resourceWeights.setWeight((float)weight); return resourceWeights; }
scheduler.update(); scheduler.handle(updateEvent); assertEquals("Basic allocation failed", 1, app1.getLiveContainers().size()); scheduler.handle(updateEvent); assertEquals("Reservation at lower priority failed", 1, app2.getReservedContainers().size()); app1.getLiveContainers().iterator().next().getContainerId(); scheduler.allocate(app1.getApplicationAttemptId(), new ArrayList<>(), null, Arrays.asList(containerId), null, null, NULL_UPDATE_REQUESTS); Collection<RMContainer> liveContainers = app2.getLiveContainers(); assertEquals("Allocation post completion failed", 1, liveContainers.size()); assertEquals("High prio container allocated against low prio reservation",
@Test /** * Ensure that when negative paramaters are given (signaling delay scheduling * no tin use), the least restrictive locality level is returned. */ public void testLocalityLevelWithoutDelays() { FSLeafQueue queue = Mockito.mock(FSLeafQueue.class); Priority prio = Mockito.mock(Priority.class); Mockito.when(prio.getPriority()).thenReturn(1); RMContext rmContext = resourceManager.getRMContext(); ApplicationAttemptId applicationAttemptId = createAppAttemptId(1, 1); FSAppAttempt schedulerApp = new FSAppAttempt(scheduler, applicationAttemptId, "user1", queue , null, rmContext); assertEquals(NodeType.OFF_SWITCH, schedulerApp.getAllowedLocalityLevel( prio, 10, -1.0, -1.0)); }