public ObjectResources(String id) { this.id = id; this.availableResources = new NormalizedResourceOffer(); this.totalResources = new NormalizedResourceOffer(); }
/** * Get the total CPU on this supervisor in % CPU. */ public double getTotalCpu() { return totalResources.getTotalCpu(); }
/** * Calculate the min percentage used of the resource. * @see NormalizedResources#calculateMinPercentageUsedBy(org.apache.storm.scheduler.resource.normalization.NormalizedResources, double, * double) */ public double calculateMinPercentageUsedBy(NormalizedResourceOffer used) { return normalizedResources.calculateMinPercentageUsedBy(used.getNormalizedResources(), getTotalMemoryMb(), used.getTotalMemoryMb()); }
int neededSlots = 0; NormalizedResourceOffer available = cluster.getNonBlacklistedClusterAvailableResources(blacklistedNodeIds); NormalizedResourceOffer needed = new NormalizedResourceOffer(); needed.add(resources); NormalizedResourceOffer shortage = new NormalizedResourceOffer(needed); shortage.remove(available, cluster.getResourceMetrics()); int shortageSlots = neededSlots - availableSlots; LOG.debug("Need {} and {} slots.", needed, neededSlots); LOG.debug("Shortage {} and {} slots.", shortage, shortageSlots); if (shortage.areAnyOverZero() || shortageSlots > 0) { LOG.info("Need {} and {} slots more. Releasing some blacklisted nodes to cover it.", shortage, shortageSlots); int sdAvailableSlots = cluster.getAvailablePorts(sd).size(); readyToRemove.add(supervisorId); shortage.remove(sdAvailable, cluster.getResourceMetrics()); shortageSlots -= sdAvailableSlots; LOG.info("Releasing {} with {} and {} slots leaving {} and {} slots to go", supervisorId, if (!shortage.areAnyOverZero() && shortageSlots <= 0) {
@Override public NormalizedResourceOffer getNonBlacklistedClusterAvailableResources(Collection<String> blacklistedSupervisorIds) { NormalizedResourceOffer available = new NormalizedResourceOffer(); for (SupervisorDetails sup : supervisors.values()) { if (!isBlackListed(sup.getId()) && !blacklistedSupervisorIds.contains(sup.getId())) { available.add(sup.getTotalResources()); available.remove(getAllScheduledResourcesForNode(sup.getId()), getResourceMetrics()); } } return available; }
@Override public NormalizedResourceOffer getAvailableResources(SupervisorDetails sd) { NormalizedResourceOffer ret = new NormalizedResourceOffer(sd.getTotalResources()); for (SchedulerAssignment assignment: assignments.values()) { for (Entry<WorkerSlot, WorkerResources> entry: assignment.getScheduledResources().entrySet()) { if (sd.getId().equals(entry.getKey().getNodeId())) { ret.remove(entry.getValue(), getResourceMetrics()); } } } return ret; }
@Test public void testNodeOverExtendedCpu() { NormalizedResourceOffer availableResources = createOffer(100.0, 0.0); NormalizedResourceOffer scheduledResources = createOffer(110.0, 0.0); availableResources.remove(scheduledResources, new ResourceMetrics(new StormMetricsRegistry())); Assert.assertEquals(0.0, availableResources.getTotalCpu(), 0.001); }
@Test public void testNodeOverExtendedMemory() { NormalizedResourceOffer availableResources = createOffer(0.0, 5.0); NormalizedResourceOffer scheduledResources = createOffer(0.0, 10.0); availableResources.remove(scheduledResources, new ResourceMetrics(new StormMetricsRegistry())); Assert.assertEquals(0.0, availableResources.getTotalMemoryMb(), 0.001); }
if (!resourcesAvailable.couldHoldIgnoringSharedMemory(requestedResources)) { return false; double memoryAvailable = resourcesAvailable.getTotalMemoryMb();
/** * Get the total Memory on this supervisor in MB. */ public double getTotalMemory() { return totalResources.getTotalMemoryMb(); }
NormalizedResourceRequest requestedResources = topologyDetails.getTotalResources(exec); for (ObjectResources objectResources : affinityBasedAllResources.objectResources) { objectResources.availableResources.updateForRareResourceAffinity(requestedResources); return 1; } else { double o1Avg = allResources.availableResourcesOverall.calculateAveragePercentageUsedBy(o1.availableResources); double o2Avg = allResources.availableResourcesOverall.calculateAveragePercentageUsedBy(o2.availableResources);
allResources.availableResourcesOverall.calculateMinPercentageUsedBy(objectResources.availableResources); if (LOG.isTraceEnabled()) { LOG.trace("Effective resources for {} is {}, and numExistingSchedule is {}", return 1; } else { double o1Avg = allResources.availableResourcesOverall.calculateAveragePercentageUsedBy(o1.availableResources); double o2Avg = allResources.availableResourcesOverall.calculateAveragePercentageUsedBy(o2.availableResources);
private AllResources createClusterAllResources() { AllResources allResources = new AllResources("Cluster"); List<ObjectResources> racks = allResources.objectResources; //This is the first time so initialize the resources. for (Map.Entry<String, List<String>> entry : networkTopography.entrySet()) { String rackId = entry.getKey(); List<String> nodeHosts = entry.getValue(); ObjectResources rack = new ObjectResources(rackId); racks.add(rack); for (String nodeHost : nodeHosts) { for (RAS_Node node : hostnameToNodes(nodeHost)) { rack.availableResources.add(node.getTotalAvailableResources()); rack.totalResources.add(node.getTotalAvailableResources()); } } allResources.totalResourcesOverall.add(rack.totalResources); allResources.availableResourcesOverall.add(rack.availableResources); } LOG.debug( "Cluster Overall Avail [ {} ] Total [ {} ]", allResources.availableResourcesOverall, allResources.totalResourcesOverall); return allResources; }
/** * Is there any possibility that exec could ever fit on this node. * @param exec the executor to schedule * @param td the topology the executor is a part of * @return true if there is the possibility it might fit, no guarantee that it will, or false if there is no * way it would ever fit. */ public boolean couldEverFit(ExecutorDetails exec, TopologyDetails td) { NormalizedResourceOffer avail = getTotalAvailableResources(); NormalizedResourceRequest requestedResources = td.getTotalResources(exec); return isAlive && avail.couldHoldIgnoringSharedMemory(requestedResources); }
/** * Gets all available resources for this node. * * @return All of the available resources. */ public NormalizedResourceOffer getTotalAvailableResources() { if (sup != null) { NormalizedResourceOffer availableResources = new NormalizedResourceOffer(sup.getTotalResources()); if (availableResources.remove(cluster.getAllScheduledResourcesForNode(sup.getId()), cluster.getResourceMetrics())) { if (!loggedUnderageUsage) { LOG.error("Resources on {} became negative and was clamped to 0 {}.", hostname, availableResources); loggedUnderageUsage = true; } } return availableResources; } else { return new NormalizedResourceOffer(); } }
/** * Gets the available memory resources for this node. * * @return the available memory for this node */ public double getAvailableMemoryResources() { return getTotalAvailableResources().getTotalMemoryMb(); }
public AllResources(String identifier) { this.identifier = identifier; this.availableResourcesOverall = new NormalizedResourceOffer(); this.totalResourcesOverall = new NormalizedResourceOffer(); }
/** * Calculate the average percentage used. * @see NormalizedResources#calculateAveragePercentageUsedBy(org.apache.storm.scheduler.resource.normalization.NormalizedResources, * double, double) */ public double calculateAveragePercentageUsedBy(NormalizedResourceOffer used) { return normalizedResources.calculateAveragePercentageUsedBy( used.getNormalizedResources(), getTotalMemoryMb(), used.getTotalMemoryMb()); }
/** * Check if resources might be able to fit. * @see NormalizedResources#couldHoldIgnoringSharedMemory(org.apache.storm.scheduler.resource.normalization.NormalizedResources, double, * double) */ public boolean couldHoldIgnoringSharedMemory(NormalizedResourcesWithMemory other) { return normalizedResources.couldHoldIgnoringSharedMemory( other.getNormalizedResources(), getTotalMemoryMb(), other.getTotalMemoryMb()); }