public Boolean hasNode(DiscoveryNode node) { if(nodes == null) { if(log.isDebugEnabled()) { log.debug("Cluster Info Holder not initialized yet for 'nodes'"); } return null; } return nodes.nodeExists(node)?Boolean.TRUE:Boolean.FALSE; }
@Override public ClusterState execute(ClusterState currentState) throws Exception { // if we are no longer master, fail... DiscoveryNodes nodes = currentState.nodes(); if (!nodes.nodeExists(request.sourceNode)) { throw new NodeDoesNotExistOnMasterException(); } return currentState; }
/** Returns true if the task is not assigned or is assigned to a non-existing node */ public static boolean needsReassignment(final Assignment assignment, final DiscoveryNodes nodes) { return (assignment.isAssigned() == false || nodes.nodeExists(assignment.getExecutorNode()) == false); }
/** * Returns the changes comparing this nodes to the provided nodes. */ public Delta delta(DiscoveryNodes other) { List<DiscoveryNode> removed = new ArrayList<>(); List<DiscoveryNode> added = new ArrayList<>(); for (DiscoveryNode node : other) { if (!this.nodeExists(node)) { removed.add(node); } } for (DiscoveryNode node : this) { if (!other.nodeExists(node)) { added.add(node); } } DiscoveryNode previousMasterNode = null; DiscoveryNode newMasterNode = null; if (masterNodeId != null) { if (other.masterNodeId == null || !other.masterNodeId.equals(masterNodeId)) { previousMasterNode = other.getMasterNode(); newMasterNode = getMasterNode(); } } return new Delta(previousMasterNode, newMasterNode, localNodeId, Collections.unmodifiableList(removed), Collections.unmodifiableList(added)); }
/** * Fills the shard fetched data with new (data) nodes and a fresh NodeEntry, and removes from * it nodes that are no longer part of the state. */ private void fillShardCacheWithDataNodes(Map<String, NodeEntry<T>> shardCache, DiscoveryNodes nodes) { // verify that all current data nodes are there for (ObjectObjectCursor<String, DiscoveryNode> cursor : nodes.getDataNodes()) { DiscoveryNode node = cursor.value; if (shardCache.containsKey(node.getId()) == false) { shardCache.put(node.getId(), new NodeEntry<T>(node.getId())); } } // remove nodes that are not longer part of the data nodes set shardCache.keySet().removeIf(nodeId -> !nodes.nodeExists(nodeId)); }
/** * Bans all tasks with the specified parent task from execution, cancels all tasks that are currently executing. * <p> * This method is called when a parent task that has children is cancelled. */ public void setBan(TaskId parentTaskId, String reason) { logger.trace("setting ban for the parent task {} {}", parentTaskId, reason); // Set the ban first, so the newly created tasks cannot be registered synchronized (banedParents) { if (lastDiscoveryNodes.nodeExists(parentTaskId.getNodeId())) { // Only set the ban if the node is the part of the cluster banedParents.put(parentTaskId, reason); } } // Now go through already running tasks and cancel them for (Map.Entry<Long, CancellableTaskHolder> taskEntry : cancellableTasks.entrySet()) { CancellableTaskHolder holder = taskEntry.getValue(); if (holder.hasParent(parentTaskId)) { holder.cancel(reason); } } }
/** * make sure that nodes in clusterState are pinged. Any pinging to nodes which are not * part of the cluster will be stopped */ public void updateNodesAndPing(ClusterState clusterState) { // remove any nodes we don't need, this will cause their FD to stop for (DiscoveryNode monitoredNode : nodesFD.keySet()) { if (!clusterState.nodes().nodeExists(monitoredNode)) { nodesFD.remove(monitoredNode); } } // add any missing nodes for (DiscoveryNode node : clusterState.nodes()) { if (node.equals(localNode)) { // no need to monitor the local node continue; } if (!nodesFD.containsKey(node)) { NodeFD fd = new NodeFD(node); // it's OK to overwrite an existing nodeFD - it will just stop and the new one will pick things up. nodesFD.put(node, fd); // we use schedule with a 0 time value to run the pinger on the pool as it will run on later threadPool.schedule(TimeValue.timeValueMillis(0), ThreadPool.Names.SAME, fd); } } }
@Override public void applyClusterState(ClusterChangedEvent event) { lastDiscoveryNodes = event.state().getNodes(); if (event.nodesRemoved()) { synchronized (banedParents) { lastDiscoveryNodes = event.state().getNodes(); // Remove all bans that were registered by nodes that are no longer in the cluster state Iterator<TaskId> banIterator = banedParents.keySet().iterator(); while (banIterator.hasNext()) { TaskId taskId = banIterator.next(); if (lastDiscoveryNodes.nodeExists(taskId.getNodeId()) == false) { logger.debug("Removing ban for the parent [{}] on the node [{}], reason: the parent node is gone", taskId, event.state().getNodes().getLocalNode()); banIterator.remove(); } } } // Cancel cancellable tasks for the nodes that are gone for (Map.Entry<Long, CancellableTaskHolder> taskEntry : cancellableTasks.entrySet()) { CancellableTaskHolder holder = taskEntry.getValue(); CancellableTask task = holder.getTask(); TaskId parentTaskId = task.getParentTaskId(); if (parentTaskId.isSet() && lastDiscoveryNodes.nodeExists(parentTaskId.getNodeId()) == false) { if (task.cancelOnParentLeaving()) { holder.cancel("Coordinating node [" + parentTaskId.getNodeId() + "] left the cluster"); } } } } }
private void buildDiffAndSerializeStates(ClusterState clusterState, ClusterState previousState, Set<DiscoveryNode> nodesToPublishTo, boolean sendFullVersion, Map<Version, BytesReference> serializedStates, Map<Version, BytesReference> serializedDiffs) { Diff<ClusterState> diff = null; for (final DiscoveryNode node : nodesToPublishTo) { try { if (sendFullVersion || !previousState.nodes().nodeExists(node)) { // will send a full reference if (serializedStates.containsKey(node.getVersion()) == false) { serializedStates.put(node.getVersion(), serializeFullClusterState(clusterState, node.getVersion())); } } else { // will send a diff if (diff == null) { diff = clusterState.diff(previousState); } if (serializedDiffs.containsKey(node.getVersion()) == false) { serializedDiffs.put(node.getVersion(), serializeDiffClusterState(diff, node.getVersion())); } } } catch (IOException e) { throw new ElasticsearchException("failed to serialize cluster_state for publishing to node {}", e, node); } } }
if (sendFullVersion || !previousState.nodes().nodeExists(node)) { sendFullClusterState(clusterState, serializedStates, node, publishTimeout, sendingController); } else {
if (!nodes.isLocalNodeElectedMaster() || !nodes.nodeExists(request.sourceNode)) { logger.trace("checking ping from {} under a cluster state thread", request.sourceNode); masterService.submitStateUpdateTask("master ping (from: " + request.sourceNode + ")", new ClusterStateUpdateTask() {
resolvedNodesIds.add(masterNodeId); } else if (nodeExists(nodeId)) { resolvedNodesIds.add(nodeId); } else {
public Boolean hasNode(DiscoveryNode node) { if(nodes == null) { if(log.isDebugEnabled()) { log.debug("Cluster Info Holder not initialized yet for 'nodes'"); } return null; } return nodes.nodeExists(node)?Boolean.TRUE:Boolean.FALSE; }
@Override public ClusterTasksResult<Task> execute(final ClusterState currentState, final List<Task> tasks) throws Exception { final DiscoveryNodes.Builder remainingNodesBuilder = DiscoveryNodes.builder(currentState.nodes()); boolean removed = false; for (final Task task : tasks) { if (currentState.nodes().nodeExists(task.node())) { remainingNodesBuilder.remove(task.node()); removed = true; } else { logger.debug("node [{}] does not exist in cluster state, ignoring", task); } } if (!removed) { // no nodes to remove, keep the current cluster state return ClusterTasksResult.<Task>builder().successes(tasks).build(currentState); } final ClusterState remainingNodesClusterState = remainingNodesClusterState(currentState, remainingNodesBuilder); final ClusterTasksResult.Builder<Task> resultBuilder = ClusterTasksResult.<Task>builder().successes(tasks); if (electMasterService.hasEnoughMasterNodes(remainingNodesClusterState.nodes()) == false) { final int masterNodes = electMasterService.countMasterNodes(remainingNodesClusterState.nodes()); rejoin.accept(LoggerMessageFormat.format("not enough master nodes (has [{}], but needed [{}])", masterNodes, electMasterService.minimumMasterNodes())); return resultBuilder.build(currentState); } else { return resultBuilder.build(allocationService.deassociateDeadNodes(remainingNodesClusterState, true, describeTasks(tasks))); } }
ShardSnapshotStatus shardStatus = shardEntry.value; if (!shardStatus.state().completed() && shardStatus.nodeId() != null) { if (nodes.nodeExists(shardStatus.nodeId())) { shards.put(shardEntry.key, shardEntry.value); } else {
if (node.equals(BECOME_MASTER_TASK) || node.equals(FINISH_ELECTION_TASK)) { } else if (currentNodes.nodeExists(node)) { logger.debug("received a join request for an existing node [{}]", node); } else {
@Override public ClusterState execute(ClusterState currentState) throws Exception { // if we are no longer master, fail... DiscoveryNodes nodes = currentState.nodes(); if (!nodes.nodeExists(request.sourceNode)) { throw new NodeDoesNotExistOnMasterException(); } return currentState; }
@Override public ClusterState execute(ClusterState currentState) throws Exception { // if we are no longer master, fail... DiscoveryNodes nodes = currentState.nodes(); if (!nodes.nodeExists(request.sourceNode)) { throw new NodeDoesNotExistOnMasterException(); } return currentState; }
@Override public ClusterState execute(ClusterState currentState) throws Exception { // if we are no longer master, fail... DiscoveryNodes nodes = currentState.nodes(); if (!nodes.nodeExists(request.sourceNode)) { throw new NodeDoesNotExistOnMasterException(); } return currentState; }
@Override public ClusterState execute(ClusterState currentState) throws Exception { // if we are no longer master, fail... DiscoveryNodes nodes = currentState.nodes(); if (!nodes.localNodeMaster()) { throw new NotMasterException("local node is not master"); } if (!nodes.nodeExists(request.nodeId)) { throw new NodeDoesNotExistOnMasterException(); } return currentState; }