/** * @param topVer Topology version. * @param ids IDs. * @return Nodes. */ private List<ClusterNode> toNodes(AffinityTopologyVersion topVer, List<UUID> ids) { List<ClusterNode> nodes = new ArrayList<>(ids.size()); for (int i = 0; i < ids.size(); i++) { UUID id = ids.get(i); ClusterNode node = cctx.discovery().node(topVer, id); assert node != null : "Failed to get node [id=" + id + ", topVer=" + topVer + ", locNode=" + cctx.localNode() + ", allNodes=" + cctx.discovery().nodes(topVer) + ']'; nodes.add(node); } return nodes; }
/** * @param e Exception. * @return {@code True} if local node should try reconnect in case of error. */ public boolean reconnectOnError(Throwable e) { return (e instanceof IgniteNeedReconnectException || X.hasCause(e, IOException.class, IgniteClientDisconnectedCheckedException.class)) && cctx.discovery().reconnectSupported(); }
/** * Send finish message. * * @param finishMsg Finish message. */ private void sendFinishMessage(WalStateFinishMessage finishMsg) { try { cctx.discovery().sendCustomEvent(finishMsg); } catch (Exception e) { U.error(log, "Failed to send WAL mode change finish message due to unexpected exception: " + finishMsg, e); } }
/** * @param sb String builder. */ public void dumpPendingMessages(StringBuilder sb) { synchronized (pendingMsgs) { if (pendingMsgs.isEmpty()) return; sb.append("Pending cache messages waiting for exchange [readyVer="). append(cctx.exchange().readyAffinityVersion()). append(", discoVer="). append(cctx.discovery().topologyVersion()).append(']'); sb.append(nl()); for (GridCacheMessage msg : pendingMsgs) { sb.append("Message [waitVer=").append(msg.topologyVersion()).append(", msg=").append(msg).append(']'); sb.append(nl()); } } }
/** * @param caches Caches to stop. * @return Future that will be completed when caches are stopped from the exchange thread. */ public IgniteInternalFuture<Void> deferStopCachesOnClientReconnect(Collection<GridCacheAdapter> caches) { assert cctx.discovery().localNode().isClient(); return exchWorker.deferStopCachesOnClientReconnect(caches); }
/** * Get current coordinator node. * * @return Coordinator node. */ private ClusterNode coordinator() { assert Thread.holdsLock(mux); if (crdNode != null) return crdNode; else { ClusterNode res = null; for (ClusterNode node : cctx.discovery().aliveServerNodes()) { if (res == null || res.order() > node.order()) res = node; } assert res != null; crdNode = res; return res; } }
/** * Sends communication message. * * @param nodeId ID of node to send the message to. * @param msg Message to send. * @param plc IO policy. * @throws IgniteCheckedException If sending failed. */ public void send(UUID nodeId, GridCacheMessage msg, byte plc) throws IgniteCheckedException { ClusterNode n = cctx.discovery().node(nodeId); if (n == null) throw new ClusterTopologyCheckedException("Failed to send message because node left grid [nodeId=" + nodeId + ", msg=" + msg + ']'); send(n, msg, plc); }
/** {@inheritDoc} */ @Override public void onActivate(GridKernalContext kctx) throws IgniteCheckedException { if (log.isDebugEnabled()) log.debug("Activated file write ahead log manager [nodeId=" + cctx.localNodeId() + " topVer=" + cctx.discovery().topologyVersionEx() + " ]"); //NOOP implementation, we need to override it. }
/** * @return {@code true} if local node is in baseline and {@code false} otherwise. */ private boolean isLocalNodeInBaseline() { BaselineTopology topology = cctx.discovery().discoCache().state().baselineTopology(); return topology != null && topology.consistentIds().contains(cctx.localNode().consistentId()); }
/** {@inheritDoc} */ @Override public void onDeActivate(GridKernalContext kctx) { if (log.isDebugEnabled()) log.debug("DeActivate page store manager [id=" + cctx.localNodeId() + " topVer=" + cctx.discovery().topologyVersionEx() + " ]"); stop0(true); }
/** {@inheritDoc} */ @Override public void onDeActivate(GridKernalContext kctx) { if (log.isDebugEnabled()) log.debug("DeActivate file write ahead log [nodeId=" + cctx.localNodeId() + " topVer=" + cctx.discovery().topologyVersionEx() + " ]"); stop0(true); currHnd = null; }
/** {@inheritDoc} */ @Override public void onActivate(GridKernalContext kctx) throws IgniteCheckedException { if (log.isDebugEnabled()) log.debug("Activate page store manager [id=" + cctx.localNodeId() + " topVer=" + cctx.discovery().topologyVersionEx() + " ]"); start0(); }
/** * Process client cache start/close requests, called from exchange thread. * * @param msg Change request. */ void processClientCachesChanges(ClientCacheChangeDummyDiscoveryMessage msg) { AffinityTopologyVersion topVer = cctx.exchange().readyAffinityVersion(); DiscoCache discoCache = cctx.discovery().discoCache(topVer); boolean crd = cctx.localNode().equals(discoCache.oldestAliveServerNode()); Map<Integer, Boolean> startedCaches = processClientCacheStartRequests(msg, crd, topVer, discoCache); Set<Integer> closedCaches = processCacheCloseRequests(msg, crd, topVer); if (startedCaches != null || closedCaches != null) scheduleClientChangeMessage(startedCaches, closedCaches); }
/** {@inheritDoc} */ @Override public void onDeActivate(GridKernalContext kctx) { if (log.isDebugEnabled()) log.debug("DeActivate database manager [id=" + cctx.localNodeId() + " topVer=" + cctx.discovery().topologyVersionEx() + " ]"); onKernalStop0(false); super.onDeActivate(kctx); /* Must be here, because after deactivate we can invoke activate and file lock must be already configured */ stopping = false; }
/** * @param evt Event. */ private void notifyNodeFail(DiscoveryEvent evt) { if (evt.type() == EVT_NODE_LEFT || evt.type() == EVT_NODE_FAILED) { final ClusterNode n = evt.eventNode(); assert cctx.discovery().node(n.id()) == null; for (GridDhtPartitionsExchangeFuture f : exchFuts.values()) f.onNodeLeft(n); } }
/** * @return Initial exchange ID. */ private GridDhtPartitionExchangeId initialExchangeId() { DiscoveryEvent discoEvt = cctx.discovery().localJoinEvent(); assert discoEvt != null; final AffinityTopologyVersion startTopVer = affinityTopologyVersion(discoEvt); assert discoEvt.topologyVersion() == startTopVer.topologyVersion(); return exchangeId(cctx.localNode().id(), startTopVer, discoEvt); }
@Override public void onEvent(Event evt) { assert evt.type() == EVT_NODE_METRICS_UPDATED; DiscoveryEvent discoEvt = (DiscoveryEvent)evt; ClusterNode node = cctx.discovery().node(discoEvt.node().id()); if (node != null && !node.id().equals(cctx.localNodeId())) onReceived(discoEvt.eventNode().id(), node.metrics().getLastDataVersion()); } };
/** * @param fut Affinity future. */ private void onAffinityInitialized(IgniteInternalFuture<Map<Integer, Map<Integer, List<UUID>>>> fut) { try { assert fut.isDone(); Map<Integer, Map<Integer, List<UUID>>> assignmentChange = fut.get(); GridDhtPartitionsFullMessage m = createPartitionsMessage(false, false); CacheAffinityChangeMessage msg = new CacheAffinityChangeMessage(exchId, m, assignmentChange); if (log.isDebugEnabled()) log.debug("Centralized affinity exchange, send affinity change message: " + msg); cctx.discovery().sendCustomEvent(msg); } catch (IgniteCheckedException e) { onDone(e); } }
/** * @param type Event type. */ protected void recordStateChangedEvent(int type){ assert near() && local(); GridEventStorageManager evtMgr = cctx.gridEvents(); if (!system() /* ignoring system tx */ && evtMgr.isRecordable(type)) evtMgr.record(new TransactionStateChangedEvent( cctx.discovery().localNode(), "Transaction state changed.", type, new TransactionEventProxyImpl((GridNearTxLocal)this))); }
/** {@inheritDoc} */ @Override public void onActivate(GridKernalContext ctx) throws IgniteCheckedException { if (log.isDebugEnabled()) log.debug("Activate database manager [id=" + cctx.localNodeId() + " topVer=" + cctx.discovery().topologyVersionEx() + " ]"); snapshotMgr = cctx.snapshot(); if (!cctx.kernalContext().clientNode() && checkpointer == null) checkpointer = new Checkpointer(cctx.igniteInstanceName(), "db-checkpoint-thread", log); super.onActivate(ctx); if (!cctx.kernalContext().clientNode()) { initializeCheckpointPool(); finishRecovery(); } }