public String printAllLogs() { StringBuilder b = new StringBuilder("\n#servers = " + servers.size() + "\n"); for (RaftServerImpl s : iterateServerImpls()) { b.append(" "); b.append(s).append("\n"); final RaftLog log = s.getState().getLog(); if (log instanceof MemoryRaftLog) { b.append(" "); b.append(((MemoryRaftLog) log).getEntryString()); } } return b.toString(); }
public String printAllLogs() { StringBuilder b = new StringBuilder("\n#servers = " + servers.size() + "\n"); for (RaftServerImpl s : iterateServerImpls()) { b.append(" "); b.append(s).append("\n"); final RaftLog log = s.getState().getLog(); if (log instanceof MemoryRaftLog) { b.append(" "); b.append(((MemoryRaftLog) log).getEntryString()); } } return b.toString(); }
static void runTestTransactionContextIsPassedBack(MiniRaftCluster cluster) throws Throwable { // tests that the TrxContext set by the StateMachine in Leader is passed back to the SM int numTrx = 100; final RaftTestUtil.SimpleMessage[] messages = RaftTestUtil.SimpleMessage.create(numTrx); try(final RaftClient client = cluster.createClient()) { for (RaftTestUtil.SimpleMessage message : messages) { client.send(message); } } // TODO: there eshould be a better way to ensure all data is replicated and applied Thread.sleep(cluster.getMaxTimeout() + 100); for (RaftServerImpl raftServer : cluster.iterateServerImpls()) { final SMTransactionContext sm = SMTransactionContext.get(raftServer); sm.rethrowIfException(); assertEquals(numTrx, sm.numApplied.get()); } // check leader RaftServerImpl raftServer = cluster.getLeader(); // assert every transaction has obtained context in leader final SMTransactionContext sm = SMTransactionContext.get(raftServer); List<Long> ll = sm.applied.stream().collect(Collectors.toList()); Collections.sort(ll); assertEquals(ll.toString(), ll.size(), numTrx); for (int i=0; i < numTrx; i++) { assertEquals(ll.toString(), Long.valueOf(i+1), ll.get(i)); } }
void runTestRestartWithCorruptedLogHeader(MiniRaftCluster cluster) throws Exception { RaftTestUtil.waitForLeader(cluster); for(RaftServerImpl impl : cluster.iterateServerImpls()) { JavaUtils.attempt(() -> getOpenLogFile(impl), 10, TimeDuration.valueOf(100, TimeUnit.MILLISECONDS), impl.getId() + ": wait for log file creation", LOG); } // shutdown all servers cluster.getServers().forEach(RaftServerProxy::close); for(RaftServerImpl impl : cluster.iterateServerImpls()) { final File openLogFile = JavaUtils.attempt(() -> getOpenLogFile(impl), 10, 100, impl.getId() + "-getOpenLogFile", LOG); for(int i = 0; i < SegmentedRaftLogFormat.getHeaderLength(); i++) { assertCorruptedLogHeader(impl.getId(), openLogFile, i, cluster, LOG); Assert.assertTrue(getOpenLogFiles(impl).isEmpty()); } } }
public void assertServer(MiniRaftCluster cluster, ClientId clientId, long callId, long oldLastApplied) throws Exception { long leaderApplied = cluster.getLeader().getState().getLastAppliedIndex(); // make sure retry cache has the entry for (RaftServerImpl server : cluster.iterateServerImpls()) { LOG.info("check server " + server.getId()); if (server.getState().getLastAppliedIndex() < leaderApplied) { Thread.sleep(1000); } Assert.assertEquals(2, RaftServerTestUtil.getRetryCacheSize(server)); Assert.assertNotNull(RaftServerTestUtil.getRetryEntry(server, clientId, callId)); // make sure there is only one log entry committed Assert.assertEquals(1, count(server.getState().getLog(), oldLastApplied + 1)); } }
private static void waitAndCheckNewConf(MiniRaftCluster cluster, RaftPeer[] peers, Collection<RaftPeerId> deadPeers) { LOG.info("waitAndCheckNewConf: peers={}, deadPeers={}, {}", Arrays.asList(peers), deadPeers, cluster.printServers()); Assert.assertNotNull(cluster.getLeader()); int numIncluded = 0; int deadIncluded = 0; final RaftConfiguration current = RaftConfiguration.newBuilder() .setConf(peers).setLogEntryIndex(0).build(); for (RaftServerImpl server : cluster.iterateServerImpls()) { LOG.info("checking {}", server); if (deadPeers != null && deadPeers.contains(server.getId())) { if (current.containsInConf(server.getId())) { deadIncluded++; } continue; } if (current.containsInConf(server.getId())) { numIncluded++; Assert.assertTrue(server.getRaftConf().isStable()); Assert.assertTrue(server.getRaftConf().hasNoChange(peers)); } else if (server.isAlive()) { // The server is successfully removed from the conf // It may not be shutdown since it may not be able to talk to the new leader (who is not in its conf). Assert.assertTrue(server.getRaftConf().isStable()); Assert.assertFalse(server.getRaftConf().containsInConf(server.getId())); } } Assert.assertEquals(peers.length, numIncluded + deadIncluded); }
private static void waitAndCheckNewConf(MiniRaftCluster cluster, RaftPeer[] peers, Collection<String> deadPeers) throws Exception { LOG.info(cluster.printServers()); Assert.assertNotNull(cluster.getLeader()); int numIncluded = 0; int deadIncluded = 0; final RaftConfiguration current = RaftConfiguration.newBuilder() .setConf(peers).setLogEntryIndex(0).build(); for (RaftServerImpl server : cluster.iterateServerImpls()) { if (deadPeers != null && deadPeers.contains(server.getId().toString())) { if (current.containsInConf(server.getId())) { deadIncluded++; } continue; } if (current.containsInConf(server.getId())) { numIncluded++; Assert.assertTrue(server.getRaftConf().isStable()); Assert.assertTrue(server.getRaftConf().hasNoChange(peers)); } else { Assert.assertFalse(server.getId() + " is still running: " + server, server.isAlive()); } } Assert.assertEquals(peers.length, numIncluded + deadIncluded); }
for(RaftServerImpl s : cluster.iterateServerImpls()) { if (!s.getId().equals(leaderId)) { ids.add(s.getId());
for (RaftServerImpl server : cluster.iterateServerImpls()) { LOG.info("check server " + server.getId()); if (server.getState().getLastAppliedIndex() < leaderApplied) {
for (RaftServerImpl server : cluster.iterateServerImpls()) { LOG.info("check server " + server.getId()); if (server.getState().getLastAppliedIndex() < leaderApplied) {
for (RaftServerImpl server : cluster.iterateServerImpls()) { LOG.info("check server " + server.getId()); if (server.getState().getLastAppliedIndex() < leaderApplied) {