lastAppliedSN, gainPrimacySN); CompletableFuture<Void> future = client.submit(new JournalEntryCommand( JournalEntry.newBuilder().setSequenceNumber(gainPrimacySN).build())); try { future.get(5, TimeUnit.SECONDS);
/** * <code>optional .alluxio.proto.journal.AddMountPointEntry add_mount_point = 2;</code> */ public alluxio.proto.journal.File.AddMountPointEntry.Builder getAddMountPointBuilder() { bitField0_ |= 0x00000008; onChanged(); return getAddMountPointFieldBuilder().getBuilder(); } /**
/** * <code>optional .alluxio.proto.journal.AsyncPersistRequestEntry async_persist_request = 16;</code> */ public alluxio.proto.journal.File.AsyncPersistRequestEntry.Builder getAsyncPersistRequestBuilder() { bitField0_ |= 0x00000010; onChanged(); return getAsyncPersistRequestFieldBuilder().getBuilder(); } /**
public Builder mergeFrom(alluxio.proto.journal.Journal.JournalEntry other) { if (other == alluxio.proto.journal.Journal.JournalEntry.getDefaultInstance()) return this; if (other.hasSequenceNumber()) { setSequenceNumber(other.getSequenceNumber()); mergeAddMountPoint(other.getAddMountPoint()); mergeAsyncPersistRequest(other.getAsyncPersistRequest()); mergeBlockContainerIdGenerator(other.getBlockContainerIdGenerator()); mergeBlockInfo(other.getBlockInfo()); mergeCompleteFile(other.getCompleteFile()); mergeCompletePartition(other.getCompletePartition()); mergeCompleteStore(other.getCompleteStore()); mergeCreateStore(other.getCreateStore()); mergeDeleteBlock(other.getDeleteBlock()); mergeDeleteFile(other.getDeleteFile()); mergeDeleteLineage(other.getDeleteLineage()); mergeDeleteMountPoint(other.getDeleteMountPoint()); mergeDeleteStore(other.getDeleteStore());
File.PersistDirectoryEntry.newBuilder().setId(dir.getId()).build(); rpcContext.journal( Journal.JournalEntry.newBuilder().setPersistDirectory(persistDirectory).build()); success = true; } finally {
if (entry.getSequenceNumber() > lastPersistSeq) { try { entry.toBuilder().build().writeDelimitedTo(mJournalOutputStream); retryEndSeq = entry.getSequenceNumber(); } catch (IOJournalClosedException e) {
.setReadOnly(options.isReadOnly()) .addAllProperties(protoProperties).setShared(options.isShared()).build(); rpcContext.journal(JournalEntry.newBuilder().setAddMountPoint(addMountPoint).build());
if (mBlocks.remove(blockId) != null) { JournalEntry entry = JournalEntry.newBuilder() .setDeleteBlock(DeleteBlockEntry.newBuilder().setBlockId(blockId)).build(); journalContext.append(entry);
builder.setUfsFingerprint(options.getUfsFingerprint()); journalContext.append(JournalEntry.newBuilder().setSetAttribute(builder).build());
.setRecursive(deleteOptions.isRecursive()) .setOpTimeMs(opTimeMs).build(); rpcContext.journal(Journal.JournalEntry.newBuilder().setDeleteFile(deleteFile).build());
rpcContext.journal(JournalEntry.newBuilder().setDeleteMountPoint(deleteMountPoint).build());
.setUpdateUfsMode(UpdateUfsModeEntry.newBuilder() .setUfsPath(key) .setUfsMode(UfsMode.valueOf(ufsMode.name()))) .build());
public synchronized void write(JournalEntry entry) throws IOException, JournalClosedException { try { maybeRecoverFromUfsFailures(); maybeRotateLog(); } catch (IOJournalClosedException e) { throw e.toJournalClosedException(); } try { JournalEntry entryToWrite = entry.toBuilder().setSequenceNumber(mNextSequenceNumber).build(); entryToWrite.writeDelimitedTo(mJournalOutputStream); LOG.debug("Adding journal entry (seq={}) to retryList with {} entries.", entryToWrite.getSequenceNumber(), mEntriesToFlush.size()); mEntriesToFlush.add(entryToWrite); mNextSequenceNumber++; } catch (IOJournalClosedException e) { throw e.toJournalClosedException(); } catch (IOException e) { // Set mNeedsRecovery to true so that {@code maybeRecoverFromUfsFailures} // can know a UFS failure has occurred. mNeedsRecovery = true; throw new IOException(ExceptionMessage.JOURNAL_WRITE_FAILURE .getMessageWithUrl(RuntimeConstants.ALLUXIO_DEBUG_DOCS_URL, mJournalOutputStream.currentLog(), e.getMessage()), e); } }
@Override public JournalEntry toJournalEntry() { InodeFileEntry inodeFile = InodeFileEntry.newBuilder() .addAllBlocks(getBlockIds()) .setBlockSizeBytes(getBlockSizeBytes()) .setCacheable(isCacheable()) .setCompleted(isCompleted()) .setCreationTimeMs(getCreationTimeMs()) .setGroup(getGroup()) .setId(getId()) .setLastModificationTimeMs(getLastModificationTimeMs()) .setLength(getLength()) .setMode(getMode()) .setName(getName()) .setOwner(getOwner()) .setParentId(getParentId()) .setPersistenceState(getPersistenceState().name()) .setPinned(isPinned()) .setTtl(getTtl()) .setTtlAction(ProtobufUtils.toProtobuf(getTtlAction())) .setUfsFingerprint(getUfsFingerprint()) .build(); return JournalEntry.newBuilder().setInodeFile(inodeFile).build(); }
private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { getAddMountPointFieldBuilder(); getAsyncPersistRequestFieldBuilder(); getBlockContainerIdGeneratorFieldBuilder(); getBlockInfoFieldBuilder(); getCompleteFileFieldBuilder(); getCompletePartitionFieldBuilder(); getCompleteStoreFieldBuilder(); getCreateStoreFieldBuilder(); getDeleteBlockFieldBuilder(); getDeleteFileFieldBuilder(); getDeleteLineageFieldBuilder(); getDeleteMountPointFieldBuilder(); getDeleteStoreFieldBuilder(); getInodeDirectoryFieldBuilder(); getInodeDirectoryIdGeneratorFieldBuilder(); getInodeFileFieldBuilder(); getInodeLastModificationTimeFieldBuilder(); getLineageFieldBuilder(); getLineageIdGeneratorFieldBuilder(); getMergeStoreFieldBuilder(); getPersistDirectoryFieldBuilder(); getReinitializeFileFieldBuilder(); getRenameFieldBuilder(); getRenameStoreFieldBuilder(); getSetAttributeFieldBuilder(); getUpdateUfsModeFieldBuilder();
@Override public Journal.JournalEntry next() { if (!hasNext()) { throw new NoSuchElementException(); } String alluxioPath = mEntry.getKey(); MountInfo info = mEntry.getValue(); mEntry = null; Map<String, String> properties = info.getOptions().getProperties(); List<File.StringPairEntry> protoProperties = new ArrayList<>(properties.size()); for (Map.Entry<String, String> property : properties.entrySet()) { protoProperties.add(File.StringPairEntry.newBuilder() .setKey(property.getKey()) .setValue(property.getValue()) .build()); } AddMountPointEntry addMountPoint = AddMountPointEntry.newBuilder().setAlluxioPath(alluxioPath) .setMountId(info.getMountId()).setUfsPath(info.getUfsUri().toString()) .setReadOnly(info.getOptions().isReadOnly()).addAllProperties(protoProperties) .setShared(info.getOptions().isShared()).build(); return Journal.JournalEntry.newBuilder().setAddMountPoint(addMountPoint).build(); }
/** * The given entry should not have its sequence number set. This method will add the proper * sequence number to the passed in entry. * * @param entry an entry to write to the journal checkpoint file */ @Override public synchronized void write(JournalEntry entry) throws IOException { if (mIsClosed) { throw new IOException(ExceptionMessage.JOURNAL_WRITE_AFTER_CLOSE.getMessage()); } if (mRotateLogForNextWrite) { rotateLog(); mRotateLogForNextWrite = false; } try { mJournalFormatter.serialize( entry.toBuilder().setSequenceNumber(mJournalWriter.getNextSequenceNumber()).build(), mDataOutputStream); } catch (IOException e) { mRotateLogForNextWrite = true; throw new IOException(ExceptionMessage.JOURNAL_WRITE_FAILURE.getMessageWithUrl( RuntimeConstants.ALLUXIO_DEBUG_DOCS_URL, mCurrentLog, e.getMessage()), e); } }
@Override public JournalEntry toJournalEntry() { InodeDirectoryEntry inodeDirectory = InodeDirectoryEntry.newBuilder() .setCreationTimeMs(getCreationTimeMs()) .setId(getId()) .setName(getName()) .setParentId(getParentId()) .setPersistenceState(getPersistenceState().name()) .setPinned(isPinned()) .setLastModificationTimeMs(getLastModificationTimeMs()) .setMountPoint(isMountPoint()) .setTtl(getTtl()) .setTtlAction(ProtobufUtils.toProtobuf(getTtlAction())) .setDirectChildrenLoaded(isDirectChildrenLoaded()) .setOwner(getOwner()).setGroup(getGroup()).setMode(getMode()) .build(); return JournalEntry.newBuilder().setInodeDirectory(inodeDirectory).build(); } }
@Override public void commitBlockInUFS(long blockId, long length) throws UnavailableException { LOG.debug("Commit block in ufs. blockId: {}, length: {}", blockId, length); if (mBlocks.get(blockId) != null) { // Block metadata already exists, so do not need to create a new one. return; } // The block has not been committed previously, so add the metadata to commit the block. MasterBlockInfo block = new MasterBlockInfo(blockId, length); try (JournalContext journalContext = createJournalContext()) { synchronized (block) { if (mBlocks.putIfAbsent(blockId, block) == null) { // Successfully added the new block metadata. Append a journal entry for the new metadata. BlockInfoEntry blockInfo = BlockInfoEntry.newBuilder().setBlockId(blockId).setLength(length).build(); journalContext.append(JournalEntry.newBuilder().setBlockInfo(blockInfo).build()); } } } }
/** * Writes a backup to the specified stream. * * @param os the stream to write to */ public void backup(OutputStream os) throws IOException { int count = 0; GzipCompressorOutputStream zipStream = new GzipCompressorOutputStream(os); for (Master master : mRegistry.getServers()) { Iterator<JournalEntry> it = master.getJournalEntryIterator(); while (it.hasNext()) { it.next().toBuilder().clearSequenceNumber().build().writeDelimitedTo(zipStream); count++; } } // finish() instead of close() since close would close os, which is owned by the caller. zipStream.finish(); LOG.info("Created backup with {} entries", count); }