/** * Shutdown all of the JournalNodes in the cluster. * @throws IOException if one or more nodes failed to stop */ public void shutdown() throws IOException { boolean failed = false; for (JNInfo info : nodes) { try { info.node.stopAndJoin(0); } catch (Exception e) { failed = true; LOG.warn("Unable to stop journal node " + info.node, e); } } if (failed) { throw new IOException("Unable to shut down. Check log for details"); } }
public void restartJournalNode(int i) throws InterruptedException, IOException { JNInfo info = nodes[i]; JournalNode jn = info.node; Configuration conf = new Configuration(jn.getConf()); if (jn.isStarted()) { jn.stopAndJoin(0); } conf.set(DFSConfigKeys.DFS_JOURNALNODE_RPC_ADDRESS_KEY, NetUtils.getHostPortString(info.ipcAddr)); final String uri = info.httpServerURI; if (uri.startsWith("http://")) { conf.set(DFSConfigKeys.DFS_JOURNALNODE_HTTP_ADDRESS_KEY, uri.substring(("http://".length()))); } else if (info.httpServerURI.startsWith("https://")) { conf.set(DFSConfigKeys.DFS_JOURNALNODE_HTTPS_ADDRESS_KEY, uri.substring(("https://".length()))); } JournalNode newJN = new JournalNode(); newJN.setConf(conf); newJN.start(); info.node = newJN; }
cluster.getJournalNode(2).stopAndJoin(0);
cluster.getJournalNode(2).stopAndJoin(0); injectIOE().when(spies.get(1)).acceptRecovery( Mockito.<SegmentStateProto>any(), Mockito.<URL>any()); cluster.getJournalNode(2).stopAndJoin(0);
private void doOutOfSyncTest(int missingOnRecoveryIdx, long expectedRecoveryTxnId) throws Exception { setupLoggers345(); QJMTestUtil.assertExistsInQuorum(cluster, NNStorage.getInProgressEditsFileName(1)); // Shut down the specified JN, so it's not present during recovery. cluster.getJournalNode(missingOnRecoveryIdx).stopAndJoin(0); // Make a new QJM qjm = createSpyingQJM(); qjm.recoverUnfinalizedSegments(); checkRecovery(cluster, 1, expectedRecoveryTxnId); }
cluster.getJournalNode(2).stopAndJoin(0); cluster.getJournalNode(0).stopAndJoin(0); cluster.restartJournalNode(2);
/** * Regression test for HDFS-3725. One of the journal nodes is down * during the writing of one segment, then comes back up later to * take part in a later segment. Thus, its local edits are * not a contiguous sequence. This should be handled correctly. */ @Test public void testOneJNMissingSegments() throws Exception { writeSegment(cluster, qjm, 1, 3, true); waitForAllPendingCalls(qjm.getLoggerSetForTests()); cluster.getJournalNode(0).stopAndJoin(0); writeSegment(cluster, qjm, 4, 3, true); waitForAllPendingCalls(qjm.getLoggerSetForTests()); cluster.restartJournalNode(0); writeSegment(cluster, qjm, 7, 3, true); waitForAllPendingCalls(qjm.getLoggerSetForTests()); cluster.getJournalNode(1).stopAndJoin(0); QuorumJournalManager readerQjm = createSpyingQJM(); List<EditLogInputStream> streams = Lists.newArrayList(); try { readerQjm.selectInputStreams(streams, 1, false); verifyEdits(streams, 1, 9); } finally { IOUtils.cleanup(LOG, streams.toArray(new Closeable[0])); readerQjm.close(); } }
cluster.getJournalNode(0).stopAndJoin(0); cluster.getJournalNode(1).stopAndJoin(0); cluster.getJournalNode(2).stopAndJoin(0);
cluster.getJournalNode(0).stopAndJoin(0);
cluster.getJournalNode(nodeMissingSegment).stopAndJoin(0); cluster.getJournalNode(2).stopAndJoin(0);