@Override public int getTopologyId() { return actual.getTopologyId(); }
@Override public int getTopologyId() { return realOne.getTopologyId(); }
@Override public int getTopologyId() { return delegate.getTopologyId(); }
private AbstractTopologyResponse getTopologyResponse(HotRodHeader header, Cache<Address, ServerAddress> addressCache, HotRodServer server) { // If clustered, set up a cache for topology information if (addressCache != null) { switch (header.clientIntel) { case Constants.INTELLIGENCE_TOPOLOGY_AWARE: case Constants.INTELLIGENCE_HASH_DISTRIBUTION_AWARE: // Use the request cache's topology id as the HotRod topologyId. AdvancedCache cache = server.getCacheInstance(UNKNOWN_TYPES, null, header.cacheName, addressCache.getCacheManager(), false, true); RpcManager rpcManager = cache.getRpcManager(); // Only send a topology update if the cache is clustered int currentTopologyId = rpcManager == null ? Constants.DEFAULT_TOPOLOGY_ID : rpcManager.getTopologyId(); // AND if the client's topology id is smaller than the server's topology id if (currentTopologyId >= Constants.DEFAULT_TOPOLOGY_ID && header.topologyId < currentTopologyId) return generateTopologyResponse(header, addressCache, server, currentTopologyId); } } return null; }
/** * Rollbacks a transaction that is remove in all the cluster members. */ public final void rollbackRemoteTransaction(GlobalTransaction gtx) { RpcManager rpcManager = cache.getRpcManager(); CommandsFactory factory = cache.getComponentRegistry().getCommandsFactory(); try { RollbackCommand rollbackCommand = factory.buildRollbackCommand(gtx); rollbackCommand.setTopologyId(rpcManager.getTopologyId()); CompletionStage<Void> cs = rpcManager .invokeCommandOnAll(rollbackCommand, validOnly(), rpcManager.getSyncRpcOptions()); factory.initializeReplicableCommand(rollbackCommand, false); rollbackCommand.invokeAsync().join(); cs.toCompletableFuture().join(); } catch (Throwable throwable) { throw Util.rewrapAsCacheException(throwable); } finally { forgetTransaction(gtx, rpcManager, factory); } }
public static int getServerTopologyId(EmbeddedCacheManager cm, String cacheName) { return cm.getCache(cacheName).getAdvancedCache().getRpcManager().getTopologyId(); }
private Object broadcastClearIfNotLocal(InvocationContext rCtx, VisitableCommand rCommand, Object rv) { FlagAffectedCommand flagCmd = (FlagAffectedCommand) rCommand; if ( !isLocalModeForced( flagCmd ) ) { // just broadcast the clear command - this is simplest! if ( rCtx.isOriginLocal() ) { ((TopologyAffectedCommand) rCommand).setTopologyId(rpcManager.getTopologyId()); if (isSynchronous(flagCmd)) { // the result value will be ignored, we don't need to propagate rv return asyncValue(rpcManager.invokeCommandOnAll(rCommand, VoidResponseCollector.ignoreLeavers(), syncRpcOptions)); } else { rpcManager.sendToAll(rCommand, DeliverOrder.NONE); } } } return rv; }
@Override public Object visitClearCommand(InvocationContext ctx, ClearCommand command) { Object retval = invokeNext(ctx, command); if (!isLocalModeForced(command)) { // just broadcast the clear command - this is simplest! if (ctx.isOriginLocal()) { command.setTopologyId(rpcManager.getTopologyId()); if (isSynchronous(command)) { return asyncValue(rpcManager.invokeCommandOnAll(command, VoidResponseCollector.ignoreLeavers(), syncRpcOptions)); } else { rpcManager.sendToAll(command, DeliverOrder.NONE); } } } return retval; }
private void rollbackRemote(ComponentRegistry cr, CacheXid cacheXid, TxState state) { RollbackCommand rpcCommand = cr.getCommandsFactory().buildRollbackCommand(state.getGlobalTransaction()); RpcManager rpcManager = cr.getComponent(RpcManager.class); rpcCommand.setTopologyId(rpcManager.getTopologyId()); rpcManager.invokeCommandOnAll(rpcCommand, VoidResponseCollector.validOnly(), rpcManager.getSyncRpcOptions()) .thenRun(() -> { //ignore exception so the rollback can be retried. //if a node doesn't find the remote transaction, it returns null. TxFunction function = new SetCompletedTransactionFunction(false); rwMap.eval(cacheXid, function); }); }
private <T extends WriteCommand & RemoteLockCommand> CompletableFuture<?> invalidateAcrossCluster( T command, boolean isTransactional, Object key, Object keyLockOwner) { // increment invalidations counter if statistics maintained incrementInvalidations(); InvalidateCommand invalidateCommand; if (!isLocalModeForced(command)) { if (isTransactional) { invalidateCommand = commandInitializer.buildBeginInvalidationCommand( EnumUtil.EMPTY_BIT_SET, new Object[] { key }, keyLockOwner); } else { invalidateCommand = commandsFactory.buildInvalidateCommand(EnumUtil.EMPTY_BIT_SET, new Object[] {key }); } invalidateCommand.setTopologyId(rpcManager.getTopologyId()); if (log.isDebugEnabled()) { log.debug("Cache [" + rpcManager.getAddress() + "] replicating " + invalidateCommand); } if (isSynchronous(command)) { return rpcManager.invokeCommandOnAll(invalidateCommand, VoidResponseCollector.ignoreLeavers(), syncRpcOptions) .toCompletableFuture(); } else { rpcManager.sendToAll(invalidateCommand, DeliverOrder.NONE); } } return null; }
@Override public Object visitLockControlCommand(TxInvocationContext ctx, LockControlCommand command) { Object retVal = invokeNext( ctx, command ); if ( ctx.isOriginLocal() ) { //unlock will happen async as it is a best effort boolean sync = !command.isUnlock(); List<Address> members = getMembers(); ( (LocalTxInvocationContext) ctx ).remoteLocksAcquired(members); command.setTopologyId(rpcManager.getTopologyId()); if (sync) { return asyncValue(rpcManager.invokeCommandOnAll(command, VoidResponseCollector.ignoreLeavers(), syncRpcOptions)); } else { rpcManager.sendToAll(command, DeliverOrder.NONE); } } return retVal; }
public void testInvokeCommandCollectionSuspect() throws Exception { ClusteredGetCommand command = TestingUtil.extractCommandsFactory(cache(0)).buildClusteredGetCommand("key", 0, 0L); RpcManager rpcManager0 = cache(0).getAdvancedCache().getRpcManager(); command.setTopologyId(rpcManager0.getTopologyId()); CompletionStage<Map<Address, Response>> stage1 = rpcManager0.invokeCommand(Arrays.asList(SUSPECT), command, MapResponseCollector.validOnly(), rpcManager0.getSyncRpcOptions()); Exceptions.expectExecutionException(SuspectException.class, stage1.toCompletableFuture()); CompletionStage<Map<Address, Response>> stage2 = rpcManager0.invokeCommand(Arrays.asList(address(0), SUSPECT), command, MapResponseCollector.validOnly(), rpcManager0.getSyncRpcOptions()); Exceptions.expectExecutionException(SuspectException.class, stage2.toCompletableFuture()); CompletionStage<Map<Address, Response>> stage3 = rpcManager0.invokeCommand(Arrays.asList(address(0), address(1), SUSPECT), command, MapResponseCollector.validOnly(), rpcManager0.getSyncRpcOptions()); Exceptions.expectExecutionException(SuspectException.class, stage3.toCompletableFuture()); }
public void testInvokeCommandCollection() throws Exception { ClusteredGetCommand command = TestingUtil.extractCommandsFactory(cache(0)).buildClusteredGetCommand("key", 0, 0L); RpcManager rpcManager0 = cache(0).getAdvancedCache().getRpcManager(); Exceptions.expectException(IllegalArgumentException.class, () -> rpcManager0.invokeCommand(Arrays.asList(address(0)), command, SingleResponseCollector.validOnly(), rpcManager0.getSyncRpcOptions())); command.setTopologyId(rpcManager0.getTopologyId()); CompletionStage<Map<Address, Response>> stage1 = rpcManager0.invokeCommand(Arrays.asList(address(0)), command, MapResponseCollector.validOnly(), rpcManager0.getSyncRpcOptions()); assertResponse(Collections.emptyMap(), stage1); CompletionStage<Map<Address, Response>> stage2 = rpcManager0.invokeCommand(Arrays.asList(address(1)), command, MapResponseCollector.validOnly(), rpcManager0.getSyncRpcOptions()); assertResponse(Collections.singletonMap(address(1), SUCCESSFUL_EMPTY_RESPONSE), stage2); CompletionStage<Map<Address, Response>> stage3 = rpcManager0.invokeCommand(Arrays.asList(address(0), address(1)), command, MapResponseCollector.validOnly(), rpcManager0.getSyncRpcOptions()); assertResponse(Collections.singletonMap(address(1), SUCCESSFUL_EMPTY_RESPONSE), stage3); }
public void testInvokeCommands() throws Exception { ClusteredGetCommand command = TestingUtil.extractCommandsFactory(cache(0)).buildClusteredGetCommand("key", 0, 0L); RpcManager rpcManager0 = cache(0).getAdvancedCache().getRpcManager(); Exceptions.expectException(IllegalArgumentException.class, () -> { rpcManager0.invokeCommands(Arrays.asList(address(1)), a -> command, MapResponseCollector.validOnly(), rpcManager0.getSyncRpcOptions()); }); command.setTopologyId(rpcManager0.getTopologyId()); CompletionStage<Map<Address, Response>> stage1 = rpcManager0.invokeCommands(Arrays.asList(address(0)), a -> command, MapResponseCollector.validOnly(), rpcManager0.getSyncRpcOptions()); assertResponse(Collections.emptyMap(), stage1); CompletionStage<Map<Address, Response>> stage2 = rpcManager0.invokeCommands(Arrays.asList(address(1)), a -> command, MapResponseCollector.validOnly(), rpcManager0.getSyncRpcOptions()); assertResponse(Collections.singletonMap(address(1), SUCCESSFUL_EMPTY_RESPONSE), stage2); CompletionStage<Map<Address, Response>> stage3 = rpcManager0.invokeCommands(Arrays.asList(address(0), address(1)), a -> command, MapResponseCollector.validOnly(), rpcManager0.getSyncRpcOptions()); assertResponse(Collections.singletonMap(address(1), SUCCESSFUL_EMPTY_RESPONSE), stage3); CompletionStage<Map<Address, Response>> stage4 = rpcManager0.invokeCommands(Arrays.asList(address(0), address(1), address(2)), a -> command, MapResponseCollector.validOnly(), rpcManager0.getSyncRpcOptions()); assertResponse(makeMap(address(1), SUCCESSFUL_EMPTY_RESPONSE, address(2), SUCCESSFUL_EMPTY_RESPONSE), stage4); }
public void testInvokeCommandStaggered() throws Exception { ClusteredGetCommand command = TestingUtil.extractCommandsFactory(cache(0)).buildClusteredGetCommand("key", 0, 0L); RpcManager rpcManager0 = cache(0).getAdvancedCache().getRpcManager(); Exceptions.expectException(IllegalArgumentException.class, () -> rpcManager0.invokeCommandStaggered(Arrays.asList(address(0)), command, SingleResponseCollector.validOnly(), rpcManager0.getSyncRpcOptions())); command.setTopologyId(rpcManager0.getTopologyId()); CompletionStage<ValidResponse> stage1 = rpcManager0.invokeCommandStaggered(Arrays.asList(address(0)), command, SingleResponseCollector.validOnly(), rpcManager0.getSyncRpcOptions()); assertResponse(null, stage1); CompletionStage<ValidResponse> stage2 = rpcManager0.invokeCommandStaggered(Arrays.asList(address(1)), command, SingleResponseCollector.validOnly(), rpcManager0.getSyncRpcOptions()); assertResponse(SUCCESSFUL_EMPTY_RESPONSE, stage2); CompletionStage<ValidResponse> stage3 = rpcManager0.invokeCommandStaggered(Arrays.asList(address(0), address(1)), command, SingleResponseCollector.validOnly(), rpcManager0.getSyncRpcOptions()); assertResponse(SUCCESSFUL_EMPTY_RESPONSE, stage3); CompletionStage<ValidResponse> stage4 = rpcManager0.invokeCommandStaggered(Arrays.asList(address(0), address(1), address(2)), command, SingleResponseCollector.validOnly(), rpcManager0.getSyncRpcOptions()); assertResponse(SUCCESSFUL_EMPTY_RESPONSE, stage4); }
public void testInvokeCommandOnAllSuspect() throws Exception { DistributionManager distributionManager = cache(0).getAdvancedCache().getDistributionManager(); CacheTopology initialTopology = distributionManager.getCacheTopology(); assertEquals(CacheTopology.Phase.NO_REBALANCE, initialTopology.getPhase()); try { ClusteredGetCommand command = TestingUtil.extractCommandsFactory(cache(0)).buildClusteredGetCommand("key", 0, 0L); RpcManager rpcManager0 = cache(0).getAdvancedCache().getRpcManager(); // Add a node to the cache topology, but not to the JGroups cluster view List<Address> newMembers = new ArrayList<>(initialTopology.getMembers()); newMembers.add(SUSPECT); ConsistentHash newCH = new ReplicatedConsistentHashFactory().create(MurmurHash3.getInstance(), 1, 1, newMembers, null); CacheTopology suspectTopology = new CacheTopology(initialTopology.getTopologyId(), initialTopology.getRebalanceId(), newCH, null, null, CacheTopology.Phase.NO_REBALANCE, newCH.getMembers(), null); distributionManager.setCacheTopology(suspectTopology); command.setTopologyId(rpcManager0.getTopologyId()); CompletionStage<Map<Address, Response>> stage1 = rpcManager0.invokeCommandOnAll(command, MapResponseCollector.validOnly(), rpcManager0.getSyncRpcOptions()); Exceptions.expectExecutionException(SuspectException.class, stage1.toCompletableFuture()); } finally { distributionManager.setCacheTopology(initialTopology); } }
public void testInvokeCommandOnAll() throws Exception { ClusteredGetCommand command = TestingUtil.extractCommandsFactory(cache(0)).buildClusteredGetCommand("key", 0, 0L); RpcManager rpcManager0 = cache(0).getAdvancedCache().getRpcManager(); Exceptions.expectException(IllegalArgumentException.class, () -> rpcManager0.invokeCommandOnAll(command, SingleResponseCollector.validOnly(), rpcManager0.getSyncRpcOptions())); command.setTopologyId(rpcManager0.getTopologyId()); CompletionStage<Map<Address, Response>> stage1 = rpcManager0.invokeCommandOnAll(command, MapResponseCollector.validOnly(), rpcManager0.getSyncRpcOptions()); assertResponse(makeMap(address(1), SUCCESSFUL_EMPTY_RESPONSE, address(2), SUCCESSFUL_EMPTY_RESPONSE), stage1); }
public void testNewTopologySentAfterOverlappingMerge() { TestingUtil.waitForNoRebalanceAcrossManagers(managers()); int initialTopology = advancedCache(0).getRpcManager().getTopologyId(); expectCompleteTopology(client, initialTopology); PartitionDescriptor p1 = new PartitionDescriptor(0); // isolatePartitions will always result in a CR fail as Node 0 tries to contact Node 1 in order to receive segments // which is not possible as all messages received by Node 1 from Node 0 are discarded by the DISCARD protocol. // Therefore, it is necessary for the state transfer timeout to be < then the timeout utilised by TestingUtil::waitForNoRebalance isolatePartition(p1.getNodes()); eventuallyEquals(1, () -> advancedCache(0).getDistributionManager().getCacheTopology().getActualMembers().size()); eventuallyExpectPartialTopology(client, initialTopology + 1); partition(0).merge(partition(1)); int finalTopologyId = initialTopology + (partitionHandling == PartitionHandling.DENY_READ_WRITES ? 2 : 7); eventuallyExpectCompleteTopology(client, finalTopologyId); // Check that we got the number of topology updates to NO_REBALANCE right // With DENY_READ_WRITES: // T+1: DEGRADED_MODE in partition [A] // T+2: back to AVAILABLE // With ALLOW_READ_WRITES: // With ALLOW_READ_WRITES: // T+2: CONFLICT_RESOLUTION, preferred CH: owners = (1) [test-NodeA-22368: 256+0] // T+3: NO_REBALANCE update topology after CR and before rebalance begins // T+4:READ_OLD (rebalance starts), T+5:READ_ALL, T+6:READ_NEW, T+7: NO_REBALANCE LocalizedCacheTopology newTopology = advancedCache(0).getDistributionManager().getCacheTopology(); assertEquals(CacheTopology.Phase.NO_REBALANCE, newTopology.getPhase()); }
public void testInvokeCommand1() throws Exception { ClusteredGetCommand command = TestingUtil.extractCommandsFactory(cache(0)).buildClusteredGetCommand("key", 0, 0L); RpcManager rpcManager0 = cache(0).getAdvancedCache().getRpcManager(); Exceptions.expectException(IllegalArgumentException.class, () -> rpcManager0.invokeCommand(address(0), command, SingleResponseCollector.validOnly(), rpcManager0.getSyncRpcOptions())); command.setTopologyId(rpcManager0.getTopologyId()); CompletionStage<ValidResponse> stage1 = rpcManager0.invokeCommand(address(0), command, SingleResponseCollector.validOnly(), rpcManager0.getSyncRpcOptions()); assertResponse(null, stage1); CompletionStage<ValidResponse> stage2 = rpcManager0.invokeCommand(address(1), command, SingleResponseCollector.validOnly(), rpcManager0.getSyncRpcOptions()); assertResponse(SUCCESSFUL_EMPTY_RESPONSE, stage2); CompletionStage<ValidResponse> stage3 = rpcManager0.invokeCommand(SUSPECT, command, SingleResponseCollector.validOnly(), rpcManager0.getSyncRpcOptions()); Exceptions.expectExecutionException(SuspectException.class, stage3.toCompletableFuture()); }
public void testNewTopologySentAfterCleanMerge() { TestingUtil.waitForNoRebalanceAcrossManagers(managers()); int initialTopology = advancedCache(0).getRpcManager().getTopologyId(); expectCompleteTopology(client, initialTopology); PartitionDescriptor p0 = new PartitionDescriptor(0); PartitionDescriptor p1 = new PartitionDescriptor(1); splitCluster(p0.getNodes(), p1.getNodes()); eventuallyEquals(1, () -> advancedCache(0).getDistributionManager().getCacheTopology().getActualMembers().size()); eventuallyEquals(1, () -> advancedCache(1).getDistributionManager().getCacheTopology().getActualMembers().size()); expectPartialTopology(client, initialTopology + 1); partition(0).merge(partition(1)); int finalTopologyId = initialTopology + (partitionHandling == PartitionHandling.DENY_READ_WRITES ? 4 : 8); eventuallyExpectCompleteTopology(client, finalTopologyId); // Check that we got the number of topology updates to NO_REBALANCE right // With DENY_READ_WRITES: // T+1: DEGRADED_MODE in both partitions // T+3: merged, still DEGRADED_MODE // T+4: back to AVAILABLE // With ALLOW_READ_WRITES: // T+2: NO_REBALANCE in partition [B] before merge // T+3: CONFLICT_RESOLUTION, preferred CH: owners = (1) [test-NodeA-22368: 256+0] // T+4: NO_REBALANCE update topology after CR and before rebalance begins // T+5:READ_OLD (rebalance starts), T+6:READ_ALL, T+7:READ_NEW, T+8: NO_REBALANCE LocalizedCacheTopology newTopology = advancedCache(0).getDistributionManager().getCacheTopology(); assertEquals(CacheTopology.Phase.NO_REBALANCE, newTopology.getPhase()); assertEquals(finalTopologyId, newTopology.getTopologyId()); }