@Override public void close() { super.close(); stopSyncThread(); }
/** * Close file. * @param path * @param file */ private void closeFile(String path, INodeFile file) { assert hasWriteLock(); // file is closed getEditLog().logCloseFile(path, file); NameNode.stateChangeLog.debug("closeFile: {} with {} blocks is persisted" + " to the file system", path, file.getBlocks().length); }
private static INodesInPath createSingleDirectory(FSDirectory fsd, INodesInPath existing, byte[] localName, PermissionStatus perm) throws IOException { assert fsd.hasWriteLock(); existing = unprotectedMkdir(fsd, fsd.allocateNewInodeId(), existing, localName, perm, null, now()); if (existing == null) { return null; } final INode newNode = existing.getLastINode(); // Directory creation also count towards FilesCreated // to match count of FilesDeleted metric. NameNode.getNameNodeMetrics().incrFilesCreated(); String cur = existing.getPath(); fsd.getEditLog().logMkDir(cur, newNode); if (NameNode.stateChangeLog.isDebugEnabled()) { NameNode.stateChangeLog.debug("mkdirs: created directory " + cur); } return existing; }
/** Write the batch of edits to edit log. */ public synchronized void journal(long firstTxId, int numTxns, byte[] data) { final long expectedTxId = getLastWrittenTxId() + 1; Preconditions.checkState(firstTxId == expectedTxId, "received txid batch starting at %s but expected txid %s", firstTxId, expectedTxId); setNextTxId(firstTxId + numTxns - 1); logEdit(data.length, data); logSync(); }
/** * Blocks until all ongoing edits have been synced to disk. * This differs from logSync in that it waits for edits that have been * written by other threads, not just edits from the calling thread. * * NOTE: this should be done while holding the FSNamesystem lock, or * else more operations can start writing while this is in progress. */ void logSyncAll() { // Make sure we're synced up to the most recent transaction ID. long lastWrittenTxId = getLastWrittenTxId(); LOG.info("logSyncAll toSyncToTxId=" + lastWrittenTxId + " lastSyncedTxid=" + synctxid + " mostRecentTxid=" + txid); logSync(lastWrittenTxId); lastWrittenTxId = getLastWrittenTxId(); LOG.info("Done logSyncAll lastWrittenTxId=" + lastWrittenTxId + " lastSyncedTxid=" + synctxid + " mostRecentTxid=" + txid); }
synchronized void startLogSegmentAndWriteHeaderTxn(final long segmentTxId, int layoutVersion) throws IOException { startLogSegment(segmentTxId, layoutVersion); logEdit(LogSegmentOp.getInstance(cache.get(), FSEditLogOpCodes.OP_START_LOG_SEGMENT)); logSync(); }
FSEditLog newSharedEditLog = new FSEditLog(conf, newSharedStorage, sharedEditsUris); newSharedEditLog.initJournalsForWrite(); newSharedEditLog.recoverUnclosedStreams(); streams = sourceEditLog.selectInputStreams(fromTxId + 1, 0); newSharedEditLog.setNextTxId(fromTxId + 1); newSharedEditLog.startLogSegment(op.txid, false, fsns.getEffectiveLayoutVersion()); segmentOpen = true; newSharedEditLog.logEdit(op); newSharedEditLog.endCurrentLogSegment(false); LOG.debug("ending log segment because of END_LOG_SEGMENT op in " + stream); newSharedEditLog.logSync(); newSharedEditLog.endCurrentLogSegment(false); segmentOpen = false; FSEditLog.closeAllStreams(streams);
editLog.logSetReplication("fakefile", (short) 1); editLog.logSync(); editLog.rollEditLog(); editLog.logSetReplication("fakefile", (short) 2); editLog.logSync(); editLog.close(); } finally { if(fileSys != null) fileSys.close();
new FsPermission((short)0777)); INodeDirectory dirInode = new INodeDirectory(p, 0L); editLog.logMkDir(BASE_PATH, dirInode); long blockSize = 10; BlockInfo[] blocks = new BlockInfo[blocksPerFile]; String currentDir = nameGenerator.getCurrentDir(); dirInode = new INodeDirectory(p, 0L); editLog.logMkDir(currentDir, dirInode); editLog.logOpenFile(filePath, inode); editLog.logCloseFile(filePath, inode); editLog.logSync(); bidAtSync = currentBlockId;
editlog.initJournalsForWrite(); editlog.openForWrite(); for (int i = 2; i < TXNS_PER_ROLL; i++) { editlog.logGenerationStampV2((long) 0); editlog.logSync(); editlog.rollEditLog(); editlog.logGenerationStampV2((long) i); editlog.logSync(); editlog.getJournals().get(spec.logindex).abort(); editlog.logGenerationStampV2((long) i); editlog.logSync(); editlog.close();
", " + getLastWrittenTxId()); Preconditions.checkState(isSegmentOpen(), "Bad state: %s", state); logEdit(LogSegmentOp.getInstance(cache.get(), FSEditLogOpCodes.OP_END_LOG_SEGMENT)); logSyncAll(); printStatistics(true); final long lastTxId = getLastWrittenTxId(); final long lastSyncedTxId = getSyncTxId(); Preconditions.checkArgument(lastTxId == lastSyncedTxId, "LastWrittenTxId %s is expected to be the same as lastSyncedTxId %s",
doNothing().when(spyLog).endCurrentLogSegment(true); spyLog.openForWrite(); assertTrue("should exist: " + inProgressFile, inProgressFile.exists()); long thisTxId = spyLog.getLastWrittenTxId() + 1; offsetToTxId.put(trueOffset, thisTxId); System.err.println("txid " + thisTxId + " at offset " + trueOffset); spyLog.logDelete("path" + i, i, false); spyLog.logSync(); spyLog.close(); } else if (fsel != null) { fsel.close();
public void run() { PermissionStatus p = namesystem.createFsOwnerPermissions( new FsPermission((short)0777)); FSEditLog editLog = namesystem.getEditLog(); for (int i = 0; i < numTransactions; i++) { INodeFileUnderConstruction inode = new INodeFileUnderConstruction( p, replication, blockSize, 0, "", "", null); editLog.logOpenFile("/filename" + i, inode); editLog.logCloseFile("/filename" + i, inode); editLog.logSync(); } } }
/** * Create an aborted in-progress log in the given directory, containing * only a specified number of "mkdirs" operations. */ public static void createAbortedLogWithMkdirs(File editsLogDir, int numDirs, long firstTxId, long newInodeId) throws IOException { FSEditLog editLog = FSImageTestUtil.createStandaloneEditLog(editsLogDir); editLog.setNextTxId(firstTxId); editLog.openForWrite(); PermissionStatus perms = PermissionStatus.createImmutable("fakeuser", "fakegroup", FsPermission.createImmutable((short)0755)); for (int i = 1; i <= numDirs; i++) { String dirName = "dir" + i; INodeDirectory dir = new INodeDirectory(newInodeId + i - 1, DFSUtil.string2Bytes(dirName), perms, 0L); editLog.logMkDir("/" + dirName, dir); } editLog.logSync(); editLog.abortCurrentLogSegment(); }
NamenodeCommand startCheckpoint(NamenodeRegistration backupNode, NamenodeRegistration activeNamenode) throws IOException { checkOperation(OperationCategory.CHECKPOINT); writeLock(); try { checkOperation(OperationCategory.CHECKPOINT); checkNameNodeSafeMode("Checkpoint not started"); LOG.info("Start checkpoint for " + backupNode.getAddress()); NamenodeCommand cmd = getFSImage().startCheckpoint(backupNode, activeNamenode, getEffectiveLayoutVersion()); getEditLog().logSync(); return cmd; } finally { writeUnlock("startCheckpoint"); } }
/** rollback for rolling upgrade. */ private void rollingRollback(long discardSegmentTxId, long ckptId) throws IOException { // discard discard unnecessary editlog segments starting from the given id this.editLog.discardSegments(discardSegmentTxId); // rename the special checkpoint renameCheckpoint(ckptId, NameNodeFile.IMAGE_ROLLBACK, NameNodeFile.IMAGE, true); // purge all the checkpoints after the marker archivalManager.purgeCheckpoinsAfter(NameNodeFile.IMAGE, ckptId); // HDFS-7939: purge all old fsimage_rollback_* archivalManager.purgeCheckpoints(NameNodeFile.IMAGE_ROLLBACK); String nameserviceId = DFSUtil.getNamenodeNameServiceId(conf); if (HAUtil.isHAEnabled(conf, nameserviceId)) { // close the editlog since it is currently open for write this.editLog.close(); // reopen the editlog for read this.editLog.initSharedJournalsForRead(); } }
editLog.setBufferCapacity(2048); editLog.close(); editLog.open(); editLog.rollEditLog(); LOG.info("Roll complete " + i + "."); editLog.purgeEditLog(); LOG.info("Complete purge " + i + ".");
fsd.getEditLog().logOpenFile(src, newNode, overwrite, logRetryEntry); if (NameNode.stateChangeLog.isDebugEnabled()) { NameNode.stateChangeLog.debug("DIR* NameSystem.startFile: added " +
CheckpointSignature rollEditLog(int layoutVersion) throws IOException { getEditLog().rollEditLog(layoutVersion); // Record this log segment ID in all of the storage directories, so // we won't miss this log segment on a restart if the edits directories // go missing. storage.writeTransactionIdFileToStorage(getEditLog().getCurSegmentTxId()); //Update NameDirSize Metric getStorage().updateNameDirSize(); return new CheckpointSignature(this); }
public Void call() { log.logSetReplication(filename, (short)1); return null; } }).get();