/** {@inheritDoc} */ public synchronized void onNodesUpdated(List<NodeReport> updated) { for (NodeReport node : updated) { // If node unusable. if (node.getNodeState().isUnusable()) { for (IgniteContainer cont : containers.values()) { if (cont.nodeId().equals(node.getNodeId())) { containers.remove(cont.id()); log.log(Level.WARNING, "Node is unusable. Node: {0}, state: {1}.", new Object[]{node.getNodeId().getHost(), node.getNodeState()}); } } log.log(Level.WARNING, "Node is unusable. Node: {0}, state: {1}.", new Object[]{node.getNodeId().getHost(), node.getNodeState()}); } } }
public AMNodeEventStateChanged(NodeReport nodeReport, int sourceId) { super(nodeReport.getNodeId(), sourceId, (nodeReport.getNodeState().isUnusable() ? AMNodeEventType.N_TURNED_UNHEALTHY : AMNodeEventType.N_TURNED_HEALTHY)); this.nodeReport = nodeReport; }
/** * Lists the nodes matching the given node states * * @param nodeStates * @throws YarnException * @throws IOException */ private void listClusterNodes(Set<NodeState> nodeStates) throws YarnException, IOException { PrintWriter writer = new PrintWriter( new OutputStreamWriter(sysout, Charset.forName("UTF-8"))); List<NodeReport> nodesReport = client.getNodeReports( nodeStates.toArray(new NodeState[0])); writer.println("Total Nodes:" + nodesReport.size()); writer.printf(NODES_PATTERN, "Node-Id", "Node-State", "Node-Http-Address", "Number-of-Running-Containers"); for (NodeReport nodeReport : nodesReport) { writer.printf(NODES_PATTERN, nodeReport.getNodeId(), nodeReport .getNodeState(), nodeReport.getHttpAddress(), nodeReport .getNumContainers()); } writer.flush(); }
/** * Lists the nodes matching the given node states * * @param nodeStates * @throws YarnException * @throws IOException */ private void listClusterNodes(Set<NodeState> nodeStates) throws YarnException, IOException { PrintWriter writer = new PrintWriter( new OutputStreamWriter(sysout, Charset.forName("UTF-8"))); List<NodeReport> nodesReport = client.getNodeReports( nodeStates.toArray(new NodeState[0])); writer.println("Total Nodes:" + nodesReport.size()); writer.printf(NODES_PATTERN, "Node-Id", "Node-State", "Node-Http-Address", "Number-of-Running-Containers"); for (NodeReport nodeReport : nodesReport) { writer.printf(NODES_PATTERN, nodeReport.getNodeId(), nodeReport .getNodeState(), nodeReport.getHttpAddress(), nodeReport .getNumContainers()); } writer.flush(); }
/** * Lists the nodes matching the given node states * * @param nodeStates * @throws YarnException * @throws IOException */ private void listClusterNodes(Set<NodeState> nodeStates) throws YarnException, IOException { PrintWriter writer = new PrintWriter( new OutputStreamWriter(sysout, Charset.forName("UTF-8"))); List<NodeReport> nodesReport = client.getNodeReports( nodeStates.toArray(new NodeState[0])); writer.println("Total Nodes:" + nodesReport.size()); writer.printf(NODES_PATTERN, "Node-Id", "Node-State", "Node-Http-Address", "Number-of-Running-Containers"); for (NodeReport nodeReport : nodesReport) { writer.printf(NODES_PATTERN, nodeReport.getNodeId(), nodeReport .getNodeState(), nodeReport.getHttpAddress(), nodeReport .getNumContainers()); } writer.flush(); }
/** * Lists the nodes matching the given node states * * @param nodeStates * @throws YarnException * @throws IOException */ private void listClusterNodes(Set<NodeState> nodeStates) throws YarnException, IOException { PrintWriter writer = new PrintWriter( new OutputStreamWriter(sysout, Charset.forName("UTF-8"))); List<NodeReport> nodesReport = client.getNodeReports( nodeStates.toArray(new NodeState[0])); writer.println("Total Nodes:" + nodesReport.size()); writer.printf(NODES_PATTERN, "Node-Id", "Node-State", "Node-Http-Address", "Number-of-Running-Containers"); for (NodeReport nodeReport : nodesReport) { writer.printf(NODES_PATTERN, nodeReport.getNodeId(), nodeReport .getNodeState(), nodeReport.getHttpAddress(), nodeReport .getNumContainers()); } writer.flush(); }
@Override public void transition(JobImpl job, JobEvent event) { JobUpdatedNodesEvent updateEvent = (JobUpdatedNodesEvent) event; for(NodeReport nr: updateEvent.getUpdatedNodes()) { NodeState nodeState = nr.getNodeState(); if(nodeState.isUnusable()) { // act on the updates job.actOnUnusableNode(nr.getNodeId(), nodeState); } } } }
@Override public void transition(JobImpl job, JobEvent event) { JobUpdatedNodesEvent updateEvent = (JobUpdatedNodesEvent) event; for(NodeReport nr: updateEvent.getUpdatedNodes()) { NodeState nodeState = nr.getNodeState(); if(nodeState.isUnusable()) { // act on the updates job.actOnUnusableNode(nr.getNodeId(), nodeState); } } } }
@Override public void transition(JobImpl job, JobEvent event) { JobUpdatedNodesEvent updateEvent = (JobUpdatedNodesEvent) event; for(NodeReport nr: updateEvent.getUpdatedNodes()) { NodeState nodeState = nr.getNodeState(); if(nodeState.isUnusable()) { // act on the updates job.actOnUnusableNode(nr.getNodeId(), nodeState); } } } }
/** * Assign host to container given affinity and anti-affinity constraints and resource availibility on node * @param host * @param antiHosts * @param antiPreferredHosts * @param grpObj * @param nodeLocalSet * @param aggrMemory * @param vCores * @return */ public String assignHost(String host, List<String> antiHosts, List<String> antiPreferredHosts, HostOperatorSet grpObj, Set<PTOperator> nodeLocalSet, int aggrMemory, int vCores) { for (Map.Entry<String, NodeReport> nodeEntry : nodeReportMap.entrySet()) { if (nodeEntry.getValue().getNodeState() == NodeState.RUNNING) { int memAvailable = nodeEntry.getValue().getCapability().getMemory() - nodeEntry.getValue().getUsed().getMemory(); int vCoresAvailable = nodeEntry.getValue().getCapability().getVirtualCores() - nodeEntry.getValue().getUsed().getVirtualCores(); if (memAvailable >= aggrMemory && vCoresAvailable >= vCores && !antiHosts.contains(nodeEntry.getKey()) && !antiPreferredHosts.contains(nodeEntry.getKey())) { host = nodeEntry.getKey(); grpObj.setHost(host); nodeLocalMapping.put(nodeLocalSet, host); return host; } } } return null; }
@Override public void onNodesUpdated(List<NodeReport> nodeReports) { LOG.debug("Received nodes update for '{}' nodes", nodeReports.size()); for (NodeReport nodeReport : nodeReports) { if (nodeReport.getNodeState() == NodeState.RUNNING) { String nodeKey = getNodeName(nodeReport.getNodeId()); nodes.put(nodeKey, nodeReport.getCapability()); LOG.debug("Added node '{}' with '{}' cpus and '{}' memory", nodeKey, nodeReport.getCapability().getVirtualCores(), nodeReport.getCapability().getMemory()); } else { LOG.debug("Removed node '{}'", nodeReport.getNodeId()); nodes.remove(getNodeName(nodeReport.getNodeId())); } } }
? "" : updatedNode.getNodeId().getHost(); NodeState nodeState = updatedNode.getNodeState(); if (hostname.isEmpty() || nodeState == null) { log.warn("Ignoring incomplete update");
? "" : updatedNode.getNodeId().getHost(); NodeState nodeState = updatedNode.getNodeState(); if (hostname.isEmpty() || nodeState == null) { log.warn("Ignoring incomplete update");
NodeState nodeState = nr.getNodeState(); if (nodeState.isUnusable()) { unusableNodes.add(nr.getNodeId());
/** * Update the node status. * The return code is true if the node state changed enough to * trigger a re-evaluation of pending requests. That is, either a node * became available when it was previously not, or the label changed * on an available node. * * Transitions of a node from live to dead aren't treated as significant, * nor label changes on a dead node. * * @param report latest node report * @return true if the node state changed enough for a request evaluation. */ public synchronized boolean updateNode(NodeReport report) { nodeStateUpdateTime = report.getLastHealthReportTime(); nodeReport = report; NodeState oldState = nodeState; boolean oldStateUnusable = oldState.isUnusable(); nodeState = report.getNodeState(); boolean newUsable = !nodeState.isUnusable(); boolean nodeNowAvailable = oldStateUnusable && newUsable; String labels = this.nodeLabels; nodeLabels = SliderUtils.extractNodeLabel(report); return nodeNowAvailable || newUsable && !this.nodeLabels.equals(labels); }
/** * Update the node status. * The return code is true if the node state changed enough to * trigger a re-evaluation of pending requests. That is, either a node * became available when it was previously not, or the label changed * on an available node. * * Transitions of a node from live to dead aren't treated as significant, * nor label changes on a dead node. * * @param report latest node report * @return true if the node state changed enough for a request evaluation. */ public synchronized boolean updateNode(NodeReport report) { nodeStateUpdateTime = report.getLastHealthReportTime(); nodeReport = report; NodeState oldState = nodeState; boolean oldStateUnusable = oldState.isUnusable(); nodeState = report.getNodeState(); boolean newUsable = !nodeState.isUnusable(); boolean nodeNowAvailable = oldStateUnusable && newUsable; String labels = this.nodeLabels; nodeLabels = SliderUtils.extractNodeLabel(report); return nodeNowAvailable || newUsable && !this.nodeLabels.equals(labels); }
NodeState nodeState = nr.getNodeState(); if (nodeState.isUnusable()) { unusableNodes.add(nr.getNodeId());
/** * Tracks update to available resources. Resource availability is used to make decisions about where to request new * containers. * * @param nodeReports */ public void updateNodeReports(List<NodeReport> nodeReports) { for (NodeReport nr : nodeReports) { LOG.debug("Node report: rackName={}, nodeid={}, numContainers={}, capability={}, used={}, state={}", nr.getRackName(), nr.getNodeId(), nr.getNumContainers(), nr.getCapability(), nr.getUsed(), nr.getNodeState()); nodeReportMap.put(nr.getNodeId().getHost(), nr); nodeToRack.put(nr.getNodeId().getHost(), nr.getRackName()); } }
private void createSpiedMapTasks(Map<NodeReport, TaskId> nodeReportsToTaskIds, Map<TaskId, Task> spiedTasks, JobImpl job, NodeState nodeState, List<NodeReport> nodeReports) { for (Map.Entry<TaskId, Task> e: job.tasks.entrySet()) { TaskId taskId = e.getKey(); Task task = e.getValue(); if (taskId.getTaskType() == TaskType.MAP) { // add an attempt to the task to simulate nodes NodeId nodeId = mock(NodeId.class); TaskAttempt attempt = mock(TaskAttempt.class); when(attempt.getNodeId()).thenReturn(nodeId); TaskAttemptId attemptId = MRBuilderUtils.newTaskAttemptId(taskId, 0); when(attempt.getID()).thenReturn(attemptId); // create a spied task Task spied = spy(task); Map<TaskAttemptId, TaskAttempt> attemptMap = new HashMap<>(); attemptMap.put(attemptId, attempt); when(spied.getAttempts()).thenReturn(attemptMap); doReturn(attempt).when(spied).getAttempt(any(TaskAttemptId.class)); spiedTasks.put(taskId, spied); // create a NodeReport based on the node id NodeReport report = mock(NodeReport.class); when(report.getNodeState()).thenReturn(nodeState); when(report.getNodeId()).thenReturn(nodeId); nodeReports.add(report); nodeReportsToTaskIds.put(report, taskId); } } }
nodes = Collections.synchronizedMap(new HashMap<String, Resource>()); for (NodeReport nodeReport : yarnClient.getNodeReports()) { if (nodeReport.getNodeState() == NodeState.RUNNING) { String nodeKey = getNodeName(nodeReport.getNodeId()); nodes.put(nodeKey, nodeReport.getCapability());