private ClusterResourceDescription getCurrentFreeClusterResources(YarnClient yarnClient) throws YarnException, IOException { List<NodeReport> nodes = yarnClient.getNodeReports(NodeState.RUNNING); int totalFreeMemory = 0; int containerLimit = 0; int[] nodeManagersFree = new int[nodes.size()]; for (int i = 0; i < nodes.size(); i++) { NodeReport rep = nodes.get(i); int free = rep.getCapability().getMemory() - (rep.getUsed() != null ? rep.getUsed().getMemory() : 0); nodeManagersFree[i] = free; totalFreeMemory += free; if (free > containerLimit) { containerLimit = free; } } return new ClusterResourceDescription(totalFreeMemory, containerLimit, nodeManagersFree); }
int totalCores = 0; for (NodeReport rep : nodes) { final Resource res = rep.getCapability(); totalMemory += res.getMemory(); totalCores += res.getVirtualCores();
private void buildNodeMap() throws YarnFacadeException { List<NodeReport> nodes = yarn.getNodeReports(); for (NodeReport node : nodes) { String hostName = node.getNodeId().getHost(); nodeMap.put(hostName, node.getHttpAddress()); yarnNodes.put(hostName, node); } if (LOG.isInfoEnabled()) { LOG.info("YARN Node report"); for (NodeReport node : nodes) { LOG.info("Node: " + node.getHttpAddress() + ", Rack: " + node.getRackName() + " has " + node.getCapability().getMemory() + " MB, " + node.getCapability().getVirtualCores() + " vcores, labels: " + node.getNodeLabels()); } } }
numYarnMaxVcores = yarnClient.getNodeReports(NodeState.RUNNING) .stream() .mapToInt(report -> report.getCapability().getVirtualCores()) .max() .orElse(0);
@Override public void onNodesUpdated(List<NodeReport> nodeReports) { LOG.debug("Received nodes update for '{}' nodes", nodeReports.size()); for (NodeReport nodeReport : nodeReports) { if (nodeReport.getNodeState() == NodeState.RUNNING) { String nodeKey = getNodeName(nodeReport.getNodeId()); nodes.put(nodeKey, nodeReport.getCapability()); LOG.debug("Added node '{}' with '{}' cpus and '{}' memory", nodeKey, nodeReport.getCapability().getVirtualCores(), nodeReport.getCapability().getMemory()); } else { LOG.debug("Removed node '{}'", nodeReport.getNodeId()); nodes.remove(getNodeName(nodeReport.getNodeId())); } } }
/** * Assign host to container given affinity and anti-affinity constraints and resource availibility on node * @param host * @param antiHosts * @param antiPreferredHosts * @param grpObj * @param nodeLocalSet * @param aggrMemory * @param vCores * @return */ public String assignHost(String host, List<String> antiHosts, List<String> antiPreferredHosts, HostOperatorSet grpObj, Set<PTOperator> nodeLocalSet, int aggrMemory, int vCores) { for (Map.Entry<String, NodeReport> nodeEntry : nodeReportMap.entrySet()) { if (nodeEntry.getValue().getNodeState() == NodeState.RUNNING) { int memAvailable = nodeEntry.getValue().getCapability().getMemory() - nodeEntry.getValue().getUsed().getMemory(); int vCoresAvailable = nodeEntry.getValue().getCapability().getVirtualCores() - nodeEntry.getValue().getUsed().getVirtualCores(); if (memAvailable >= aggrMemory && vCoresAvailable >= vCores && !antiHosts.contains(nodeEntry.getKey()) && !antiPreferredHosts.contains(nodeEntry.getKey())) { host = nodeEntry.getKey(); grpObj.setHost(host); nodeLocalMapping.put(nodeLocalSet, host); return host; } } } return null; }
private ClusterResourceDescription getCurrentFreeClusterResources(YarnClient yarnClient) throws YarnException, IOException { List<NodeReport> nodes = yarnClient.getNodeReports(NodeState.RUNNING); int totalFreeMemory = 0; int containerLimit = 0; int[] nodeManagersFree = new int[nodes.size()]; for (int i = 0; i < nodes.size(); i++) { NodeReport rep = nodes.get(i); int free = rep.getCapability().getMemory() - (rep.getUsed() != null ? rep.getUsed().getMemory() : 0); nodeManagersFree[i] = free; totalFreeMemory += free; if (free > containerLimit) { containerLimit = free; } } return new ClusterResourceDescription(totalFreeMemory, containerLimit, nodeManagersFree); }
private ClusterResourceDescription getCurrentFreeClusterResources(YarnClient yarnClient) throws YarnException, IOException { List<NodeReport> nodes = yarnClient.getNodeReports(NodeState.RUNNING); int totalFreeMemory = 0; int containerLimit = 0; int[] nodeManagersFree = new int[nodes.size()]; for (int i = 0; i < nodes.size(); i++) { NodeReport rep = nodes.get(i); int free = rep.getCapability().getMemory() - (rep.getUsed() != null ? rep.getUsed().getMemory() : 0); nodeManagersFree[i] = free; totalFreeMemory += free; if (free > containerLimit) { containerLimit = free; } } return new ClusterResourceDescription(totalFreeMemory, containerLimit, nodeManagersFree); }
private ClusterResourceDescription getCurrentFreeClusterResources( YarnClient yarnClient) throws YarnException, IOException { List<NodeReport> nodes = yarnClient.getNodeReports(NodeState.RUNNING); int totalFreeMemory = 0; int containerLimit = 0; int[] nodeManagersFree = new int[nodes.size()]; for (int i = 0; i < nodes.size(); i++) { NodeReport rep = nodes.get(i); int free = rep.getCapability().getMemory() - (rep.getUsed() != null ? rep. getUsed().getMemory() : 0); nodeManagersFree[i] = free; totalFreeMemory += free; if (free > containerLimit) { containerLimit = free; } } return new ClusterResourceDescription(totalFreeMemory, containerLimit, nodeManagersFree); }
int totalCores = 0; for (NodeReport rep : nodes) { final Resource res = rep.getCapability(); totalMemory += res.getMemory(); totalCores += res.getVirtualCores();
int totalCores = 0; for (NodeReport rep : nodes) { final Resource res = rep.getCapability(); totalMemory += res.getMemory(); totalCores += res.getVirtualCores();
/** * Tracks update to available resources. Resource availability is used to make decisions about where to request new * containers. * * @param nodeReports */ public void updateNodeReports(List<NodeReport> nodeReports) { for (NodeReport nr : nodeReports) { LOG.debug("Node report: rackName={}, nodeid={}, numContainers={}, capability={}, used={}, state={}", nr.getRackName(), nr.getNodeId(), nr.getNumContainers(), nr.getCapability(), nr.getUsed(), nr.getNodeState()); nodeReportMap.put(nr.getNodeId().getHost(), nr); nodeToRack.put(nr.getNodeId().getHost(), nr.getRackName()); } }
int totalCores = 0; for (NodeReport rep : nodes) { final Resource res = rep.getCapability(); totalMemory += res.getMemory(); totalCores += res.getVirtualCores();
if (nodeReport.getNodeState() == NodeState.RUNNING) { String nodeKey = getNodeName(nodeReport.getNodeId()); nodes.put(nodeKey, nodeReport.getCapability()); LOG.debug("Added node '{}' with '{}' cpus and '{}' memory", nodeKey, nodeReport.getCapability().getVirtualCores(), nodeReport.getCapability().getMemory());
@Override public List<NodeInfo> getNodes() throws LlamaException { List<NodeInfo> ret = new ArrayList<NodeInfo>(); try { if (nodes == null) { // Get it from the yarn client. List<NodeReport> nodeReports = yarnClient.getNodeReports(NodeState.RUNNING); for (NodeReport nodeReport : nodeReports) { Resource resource = nodeReport.getCapability(); NodeInfo nodeInfo = new NodeInfo(getNodeName(nodeReport.getNodeId()), resource.getVirtualCores(), resource.getMemory()); ret.add(nodeInfo); } } else { // Get it from the nodes structure which is being kept upto date. synchronized (nodes) { for(Map.Entry<String, Resource> entry : nodes.entrySet()) { Resource nodeReport = entry.getValue(); NodeInfo nodeInfo = new NodeInfo(entry.getKey(), nodeReport.getVirtualCores(), nodeReport.getMemory()); ret.add(nodeInfo); } } } return ret; } catch (Throwable ex) { throw new LlamaException(ex, ErrorCode.AM_CANNOT_GET_NODES, appId); } }
numYarnMaxVcores = yarnClient.getNodeReports(NodeState.RUNNING) .stream() .mapToInt(report -> report.getCapability().getVirtualCores()) .max() .orElse(0);
private void reportYarnResources() { try { long totalMemory = 0L; long totalVCores = 0L; long usedMemory = 0L; long usedVCores = 0L; for (NodeReport nodeReport : yarnClient.getNodeReports(NodeState.RUNNING)) { Resource capability = nodeReport.getCapability(); Resource used = nodeReport.getUsed(); totalMemory += capability.getMemory(); totalVCores += YarnUtils.getVirtualCores(capability); usedMemory += used.getMemory(); usedVCores += YarnUtils.getVirtualCores(used); } MetricsContext collector = getCollector(); LOG.trace("YARN Cluster memory total={}MB, used={}MB", totalMemory, usedMemory); collector.gauge("resources.total.memory", totalMemory); collector.gauge("resources.used.memory", usedMemory); collector.gauge("resources.available.memory", totalMemory - usedMemory); LOG.trace("YARN Cluster vcores total={}, used={}", totalVCores, usedVCores); collector.gauge("resources.total.vcores", totalVCores); collector.gauge("resources.used.vcores", usedVCores); collector.gauge("resources.available.vcores", totalVCores - usedVCores); } catch (Exception e) { LOG.warn("Failed to gather YARN NodeReports", e); } }
private void reportYarnResources() { try { long totalMemory = 0L; long totalVCores = 0L; long usedMemory = 0L; long usedVCores = 0L; for (NodeReport nodeReport : yarnClient.getNodeReports(NodeState.RUNNING)) { Resource capability = nodeReport.getCapability(); Resource used = nodeReport.getUsed(); totalMemory += capability.getMemory(); totalVCores += YarnUtils.getVirtualCores(capability); usedMemory += used.getMemory(); usedVCores += YarnUtils.getVirtualCores(used); } MetricsContext collector = getCollector(); LOG.trace("YARN Cluster memory total={}MB, used={}MB", totalMemory, usedMemory); collector.gauge("resources.total.memory", totalMemory); collector.gauge("resources.used.memory", usedMemory); collector.gauge("resources.available.memory", totalMemory - usedMemory); LOG.trace("YARN Cluster vcores total={}, used={}", totalVCores, usedVCores); collector.gauge("resources.total.vcores", totalVCores); collector.gauge("resources.used.vcores", usedVCores); collector.gauge("resources.available.vcores", totalVCores - usedVCores); } catch (Exception e) { LOG.warn("Failed to gather YARN NodeReports", e); } }
private void onNodeReport(final NodeReport nodeReport) { LOG.log(Level.FINE, "Send node descriptor: {0}", nodeReport); this.reefEventHandlers.onNodeDescriptor(NodeDescriptorEventImpl.newBuilder() .setIdentifier(nodeReport.getNodeId().toString()) .setHostName(nodeReport.getNodeId().getHost()) .setPort(nodeReport.getNodeId().getPort()) .setMemorySize(nodeReport.getCapability().getMemory()) .setRackName(nodeReport.getRackName()) .build()); }
private void onNodeReport(final NodeReport nodeReport) { LOG.log(Level.FINE, "Send node descriptor: {0}", nodeReport); this.reefEventHandlers.onNodeDescriptor(NodeDescriptorProto.newBuilder() .setIdentifier(nodeReport.getNodeId().toString()) .setHostName(nodeReport.getNodeId().getHost()) .setPort(nodeReport.getNodeId().getPort()) .setMemorySize(nodeReport.getCapability().getMemory()) .setRackName(nodeReport.getRackName()) .build()); }