@Override public long getFileId(FileSystem fs, String path) throws IOException { return ensureDfs(fs).getClient().getFileInfo(path).getFileId(); }
public static long getFileId(FileSystem fs, String path) throws IOException { return ensureDfs(fs).getClient().getFileInfo(path).getFileId(); }
beginFileLease(client, stat.getFileId()); boolean succ = false; LocatedBlock locatedBlock = null; DataChecksum summer = createChecksum(client); locatedBlock = BLOCK_ADDER.addBlock(namenode, src, client.getClientName(), null, excludesNodes, stat.getFileId(), null); List<Channel> datanodeList = new ArrayList<>(); futureList = connectToDataNodes(conf, client, clientName, locatedBlock, 0L, 0L, FanOutOneBlockAsyncDFSOutput output = new FanOutOneBlockAsyncDFSOutput(conf, fsUtils, dfs, client, namenode, clientName, src, stat.getFileId(), locatedBlock, encryptor, datanodeList, summer, ALLOC); succ = true; return output; endFileLease(client, stat.getFileId());
m.put("blockSize", status.getBlockSize()); m.put("replication", status.getReplication()); m.put("fileId", status.getFileId()); m.put("childrenNum", status.getChildrenNum()); m.put("storagePolicy", status.getStoragePolicy());
+ " Adding to attempt monitor queue for the storage " + "movement attempt finished report", status.status, fileStatus.getFileId()); LOG.debug("Adding trackID:{} for the file id:{} back to" + " retry queue as none of the blocks found its eligible" + " targets.", trackId, fileStatus.getFileId()); LOG.debug("Adding trackID:{} for the file id:{} back to " + "retry queue as some of the blocks are low redundant.", trackId, fileStatus.getFileId()); LOG.debug("Adding trackID:{} for the file id:{} back to " + "retry queue as some of the blocks movement failed.", trackId, fileStatus.getFileId()); LOG.info("Block analysis status:{} for the file id:{}." + " So, Cleaning up the Xattrs.", status.status, fileStatus.getFileId()); storageMovementNeeded.removeItemTrackInfo(itemInfo, true); break;
service.addFileToProcess(new ItemInfo(startID, child.getFileId()), false); checkProcessingQueuesFree();
private LocatedBlock locateFollowingBlock(DatanodeInfo[] excluded, ExtendedBlock oldBlock) throws IOException { return DFSOutputStream.addBlock(excluded, dfsClient, src, oldBlock, stat.getFileId(), favoredNodes, addBlockFlags); }
@Override public long getFileId(FileSystem fs, String path) throws IOException { return ensureDfs(fs).getClient().getFileInfo(path).getFileId(); }
m.put("blockSize", status.getBlockSize()); m.put("replication", status.getReplication()); m.put("fileId", status.getFileId()); m.put("childrenNum", status.getChildrenNum()); m.put("storagePolicy", status.getStoragePolicy());
m.put("blockSize", status.getBlockSize()); m.put("replication", status.getReplication()); m.put("fileId", status.getFileId()); m.put("childrenNum", status.getChildrenNum()); m.put("storagePolicy", status.getStoragePolicy());
this.dfsClient = dfsClient; this.src = src; this.fileId = stat.getFileId(); this.blockSize = stat.getBlockSize(); this.blockReplication = stat.getReplication();
this.dfsClient = dfsClient; this.src = src; this.fileId = stat.getFileId(); this.blockSize = stat.getBlockSize(); this.blockReplication = stat.getReplication();
private void testPlacement(String clientMachine, String clientRack) throws IOException { // write 5 files and check whether all times block placed for (int i = 0; i < 5; i++) { String src = "/test-" + i; // Create the file with client machine HdfsFileStatus fileStatus = namesystem.startFile(src, perm, clientMachine, clientMachine, EnumSet.of(CreateFlag.CREATE), true, REPLICATION_FACTOR, DEFAULT_BLOCK_SIZE, null, false); LocatedBlock locatedBlock = nameNodeRpc.addBlock(src, clientMachine, null, null, fileStatus.getFileId(), null); assertEquals("Block should be allocated sufficient locations", REPLICATION_FACTOR, locatedBlock.getLocations().length); if (clientRack != null) { assertEquals("First datanode should be rack local", clientRack, locatedBlock.getLocations()[0].getNetworkLocation()); } nameNodeRpc.abandonBlock(locatedBlock.getBlock(), fileStatus.getFileId(), src, clientMachine); } } }
LOG.warn("Abandoning " + block); dfsClient.namenode.abandonBlock(block.getCurrentBlock(), stat.getFileId(), src, dfsClient.clientName); block.setCurrentBlock(null); final DatanodeInfo badNode = nodes[errorState.getBadNodeIndex()];
this.dfsClient = dfsClient; this.src = src; this.fileId = stat.getFileId(); this.blockSize = stat.getBlockSize(); this.blockReplication = stat.getReplication();
assertEquals(1, dirs.length); assertEquals(bar, dirs[0].getFullPath()); assertEquals(fooId, dirs[0].getDirStatus().getFileId());
public static Nfs3FileAttributes getNfs3FileAttrFromFileStatus( HdfsFileStatus fs, IdMappingServiceProvider iug) { /** * Some 32bit Linux client has problem with 64bit fileId: it seems the 32bit * client takes only the lower 32bit of the fileId and treats it as signed * int. When the 32th bit is 1, the client considers it invalid. */ NfsFileType fileType = fs.isDirectory() ? NfsFileType.NFSDIR : NfsFileType.NFSREG; fileType = fs.isSymlink() ? NfsFileType.NFSLNK : fileType; int nlink = (fileType == NfsFileType.NFSDIR) ? fs.getChildrenNum() + 2 : 1; long size = (fileType == NfsFileType.NFSDIR) ? getDirSize(fs .getChildrenNum()) : fs.getLen(); return new Nfs3FileAttributes(fileType, nlink, fs.getPermission().toShort(), iug.getUidAllowingUnknown(fs.getOwner()), iug.getGidAllowingUnknown(fs.getGroup()), size, 0 /* fsid */, fs.getFileId(), fs.getModificationTime(), fs.getAccessTime(), new Nfs3FileAttributes.Specdata3()); }
setOwner(fs.getOwner()). setGroup(fs.getGroup()). setFileId(fs.getFileId()). setChildrenNum(fs.getChildrenNum()). setPath(ByteString.copyFrom(fs.getLocalNameInBytes())).