/** * @return Cache communication manager. */ public GridCacheIoManager io() { return sharedCtx.io(); }
/** * @param grp Cache group. */ void onCacheGroupCreated(CacheGroupContext grp) { if (!grpHolders.containsKey(grp.groupId())) { cctx.io().addCacheGroupHandler(grp.groupId(), GridDhtAffinityAssignmentResponse.class, (IgniteBiInClosure<UUID, GridDhtAffinityAssignmentResponse>)this::processAffinityAssignmentResponse); } }
/** * Sends response on failed message. * * @param nodeId node id. * @param res response. * @param cctx shared context. * @param plc grid io policy. */ private void sendResponseOnFailedMessage(UUID nodeId, GridCacheMessage res, GridCacheSharedContext cctx, byte plc) { try { cctx.io().send(nodeId, res, plc); } catch (IgniteCheckedException e) { U.error(log, "Failed to send response to node (is node still alive?) [nodeId=" + nodeId + ",res=" + res + ']', e); } }
/** * @param sb String builder. * @param ctx Context. */ static void dumpPendingCacheMessages(StringBuilder sb, GridKernalContext ctx) { ctx.cache().context().io().dumpPendingMessages(sb); }
/** {@inheritDoc} */ @Override public void cacheProcessorStarted() { cacheProc = ctx.cache(); sharedCtx = cacheProc.context(); sharedCtx.io().addCacheHandler( 0, GridChangeGlobalStateMessageResponse.class, this::processChangeGlobalStateResponse ); }
/** {@inheritDoc} */ @Override public void stop(boolean cancel) throws IgniteCheckedException { super.stop(cancel); if (sharedCtx != null) sharedCtx.io().removeHandler(false, 0, GridChangeGlobalStateMessageResponse.class); ctx.event().removeLocalEventListener(lsr, EVT_NODE_LEFT, EVT_NODE_FAILED); IgniteCheckedException stopErr = new IgniteCheckedException( "Node is stopping: " + ctx.igniteInstanceName()); GridChangeGlobalStateFuture f = stateChangeFut.get(); if (f != null) f.onDone(stopErr); }
/** * Initializes affinity and rebalance I/O handlers. */ private void initializeIO() throws IgniteCheckedException { assert !recoveryMode.get() : "Couldn't initialize I/O handlers, recovery mode is on for group " + this; if (ccfg.getCacheMode() != LOCAL) { if (!ctx.kernalContext().clientNode()) { ctx.io().addCacheGroupHandler(groupId(), GridDhtAffinityAssignmentRequest.class, (IgniteBiInClosure<UUID, GridDhtAffinityAssignmentRequest>) this::processAffinityAssignmentRequest); } preldr = new GridDhtPreloader(this); preldr.start(); } else preldr = new GridCachePreloaderAdapter(this); }
/** * @param topVer Topology version. * @param desc Cache descriptor. * @return Cache holder. * @throws IgniteCheckedException If failed. */ private CacheGroupHolder groupHolder(AffinityTopologyVersion topVer, final CacheGroupDescriptor desc) throws IgniteCheckedException { CacheGroupHolder cacheGrp = grpHolders.get(desc.groupId()); if (cacheGrp != null) return cacheGrp; final CacheGroupContext grp = cctx.cache().cacheGroup(desc.groupId()); if (grp == null) { cctx.io().addCacheGroupHandler(desc.groupId(), GridDhtAffinityAssignmentResponse.class, new IgniteBiInClosure<UUID, GridDhtAffinityAssignmentResponse>() { @Override public void apply(UUID nodeId, GridDhtAffinityAssignmentResponse res) { processAffinityAssignmentResponse(nodeId, res); } } ); cacheGrp = CacheGroupHolder2.create(cctx, desc, topVer, null); } else cacheGrp = new CacheGroupHolder1(grp, null); CacheGroupHolder old = grpHolders.put(desc.groupId(), cacheGrp); assert old == null : old; return cacheGrp; }
/** * Method is called on coordinator in situation when initial ExchangeFuture created on client join event was preempted * from exchange history because of IGNITE_EXCHANGE_HISTORY_SIZE property. * * @param node Client node that should try to reconnect to the cluster. * @param msg Single message received from the client which didn't find original ExchangeFuture. */ public void forceClientReconnect(ClusterNode node, GridDhtPartitionsSingleMessage msg) { Exception reconnectException = new IgniteNeedReconnectException(node, null); exchangeGlobalExceptions.put(node.id(), reconnectException); onDone(null, reconnectException); GridDhtPartitionsFullMessage fullMsg = createPartitionsMessage(true, false); fullMsg.setErrorsMap(exchangeGlobalExceptions); try { cctx.io().send(node, fullMsg, SYSTEM_POOL); if (log.isDebugEnabled()) log.debug("Full message for reconnect client was sent to node: " + node + ", fullMsg: " + fullMsg); } catch (IgniteCheckedException e) { U.error(log, "Failed to send reconnect client message [node=" + node + ']', e); } }
/** * */ void stopGroup() { offheapMgr.stop(); if (isRecoveryMode()) return; IgniteCheckedException err = new IgniteCheckedException("Failed to wait for topology update, cache (or node) is stopping."); ctx.evict().onCacheGroupStopped(this); aff.cancelFutures(err); preldr.onKernalStop(); ctx.io().removeCacheGroupHandlers(grpId); }
/** * @return IO manager. */ protected GridCacheIoManager cacheIoManager() { return grid(0).context().cache().context().io(); }
/** * @param cctx Cache context. */ private void closeCache(GridCacheContext cctx) { if (cctx.affinityNode()) { GridCacheAdapter<?, ?> cache = caches.get(cctx.name()); assert cache != null : cctx.name(); jCacheProxies.put(cctx.name(), new IgniteCacheProxyImpl(cache.context(), cache, false)); completeProxyInitialize(cctx.name()); } else { cctx.gate().onStopped(); // Do not close client cache while requests processing is in progress. sharedCtx.io().writeLock(); try { if (!cctx.affinityNode() && cctx.transactional()) sharedCtx.tm().rollbackTransactionsForCache(cctx.cacheId()); completeProxyInitialize(cctx.name()); jCacheProxies.remove(cctx.name()); stopCacheSafely(cctx); } finally { sharedCtx.io().writeUnlock(); } } }
/** * @param nodeId Node id. * @param req Request. */ private void processPartitionCountersRequest(UUID nodeId, PartitionCountersNeighborcastRequest req) { applyPartitionsUpdatesCounters(req.updateCounters()); try { ctx.io().send(nodeId, new PartitionCountersNeighborcastResponse(req.futId()), SYSTEM_POOL); } catch (ClusterTopologyCheckedException ignored) { if (txRecoveryMsgLog.isDebugEnabled()) txRecoveryMsgLog.debug("Failed to send partition counters response, node left [node=" + nodeId + ']'); } catch (IgniteCheckedException e) { U.error(txRecoveryMsgLog, "Failed to send partition counters response [node=" + nodeId + ']', e); } }
cctx.io().sendNoRetry(node, m, SYSTEM_POOL);
/** * @param reqId Request ID. * @param initNodeId Initialize node id. * @param ex Exception. */ private void sendChangeGlobalStateResponse(UUID reqId, UUID initNodeId, Exception ex) { assert reqId != null; assert initNodeId != null; GridChangeGlobalStateMessageResponse res = new GridChangeGlobalStateMessageResponse(reqId, ex); try { if (log.isDebugEnabled()) log.debug("Sending global state change response [nodeId=" + ctx.localNodeId() + ", topVer=" + ctx.discovery().topologyVersionEx() + ", res=" + res + "]"); if (ctx.localNodeId().equals(initNodeId)) processChangeGlobalStateResponse(ctx.localNodeId(), res); else sharedCtx.io().send(initNodeId, res, SYSTEM_POOL); } catch (ClusterTopologyCheckedException e) { if (log.isDebugEnabled()) { log.debug("Failed to send change global state response, node left [node=" + initNodeId + ", res=" + res + ']'); } } catch (IgniteCheckedException e) { U.error(log, "Failed to send change global state response [node=" + initNodeId + ", res=" + res + ']', e); } }
@Override public void finish(UUID nodeId, Collection<GridCacheVersion> vers) { GridDhtTxOnePhaseCommitAckRequest ackReq = new GridDhtTxOnePhaseCommitAckRequest(vers); cctx.kernalContext().gateway().readLock(); try { cctx.io().send(nodeId, ackReq, GridIoPolicy.SYSTEM_POOL); } catch (ClusterTopologyCheckedException ignored) { if (log.isDebugEnabled()) log.debug("Failed to send one phase commit ack to backup node because it left grid: " + nodeId); } catch (IgniteCheckedException e) { log.error("Failed to send one phase commit ack to backup node [backup=" + nodeId + ']', e); } finally { cctx.kernalContext().gateway().readUnlock(); } } };
/** * @throws Exception If failed. */ @Test public void testSendBadMessage() throws Exception { try { startGrids(2); Ignite ignite0 = grid(0); Ignite ignite1 = grid(1); ((IgniteKernal)ignite0).context().cache().context().io().addCacheHandler( 0, TestBadMessage.class, new CI2<UUID, GridCacheMessage>() { @Override public void apply(UUID nodeId, GridCacheMessage msg) { throw new RuntimeException("Test bad message exception"); } }); ((IgniteKernal)ignite1).context().cache().context().io().addCacheHandler( 0, TestBadMessage.class, new CI2<UUID, GridCacheMessage>() { @Override public void apply(UUID nodeId, GridCacheMessage msg) { throw new RuntimeException("Test bad message exception"); } }); ((IgniteKernal)ignite0).context().cache().context().io().send( ((IgniteKernal)ignite1).localNode().id(), new TestBadMessage(), (byte)2); boolean res = failureLatch.await(5, TimeUnit.SECONDS); assertTrue(res); } finally { stopAllGrids(); } }
/** * @param nodeId Node ID. * @param req Request. */ private void processAffinityAssignmentRequest0(UUID nodeId, final GridDhtAffinityAssignmentRequest req) { AffinityTopologyVersion topVer = req.topologyVersion(); if (log.isDebugEnabled()) log.debug("Affinity is ready for topology version, will send response [topVer=" + topVer + ", node=" + nodeId + ']'); AffinityAssignment assignment = aff.cachedAffinity(topVer); GridDhtAffinityAssignmentResponse res = new GridDhtAffinityAssignmentResponse( req.futureId(), grpId, topVer, assignment.assignment()); if (aff.centralizedAffinityFunction()) { assert assignment.idealAssignment() != null; res.idealAffinityAssignment(assignment.idealAssignment()); } if (req.sendPartitionsState()) res.partitionMap(top.partitionMap(true)); try { ctx.io().send(nodeId, res, AFFINITY_POOL); } catch (IgniteCheckedException e) { U.error(log, "Failed to send affinity assignment response to remote node [node=" + nodeId + ']', e); } }
/** * Starts processing. */ public void init() { if (log.isInfoEnabled()) { log.info("Starting delivery partition countres to remote nodes [txId=" + tx.nearXidVersion() + ", futId=" + futId); } HashSet<UUID> siblings = siblingBackups(); cctx.mvcc().addFuture(this, futId); for (UUID peer : siblings) { List<PartitionUpdateCountersMessage> cntrs = cctx.tm().txHandler() .filterUpdateCountersForBackupNode(tx, cctx.node(peer)); if (F.isEmpty(cntrs)) continue; MiniFuture miniFut = new MiniFuture(peer); try { cctx.io().send(peer, new PartitionCountersNeighborcastRequest(cntrs, futId), SYSTEM_POOL); add(miniFut); } catch (IgniteCheckedException e) { if (!(e instanceof ClusterTopologyCheckedException)) log.warning("Failed to send partition counters to remote node [node=" + peer + ']', e); else logNodeLeft(peer); miniFut.onDone(); } } markInitialized(); }
/** * @param nodeId Node id. */ private void cleanupRemoteContexts(UUID nodeId) { ClusterNode node = ctx.discovery().node(nodeId); if (node == null) return; GridDhtPartitionDemandMessage d = new GridDhtPartitionDemandMessage( // Negative number of id signals that supply context // with the same positive id must be cleaned up at the supply node. -rebalanceId, this.topologyVersion(), grp.groupId()); d.timeout(grp.config().getRebalanceTimeout()); try { for (int idx = 0; idx < ctx.gridConfig().getRebalanceThreadPoolSize(); idx++) { d.topic(GridCachePartitionExchangeManager.rebalanceTopic(idx)); ctx.io().sendOrderedMessage(node, GridCachePartitionExchangeManager.rebalanceTopic(idx), d.convertIfNeeded(node.version()), grp.ioPolicy(), grp.config().getRebalanceTimeout()); } } catch (IgniteCheckedException ignored) { if (log.isDebugEnabled()) log.debug("Failed to send failover context cleanup request to node " + nodeId); } }