@Override public BlockLocation[] getFileBlockLocations(FileStatus file, long start, long len) throws IOException { return fs.getFileBlockLocations(file, start, len); }
@Override public BlockLocation[] getLocations(FileSystem fs, FileStatus status) throws IOException { if (status instanceof LocatedFileStatus) { return ((LocatedFileStatus) status).getBlockLocations(); } else { return fs.getFileBlockLocations(status, 0, status.getLen()); } }
@Override public Object call() throws Exception { // Argument is checked by Hadoop. return fs.getFileBlockLocations((Path)null, 1, 2); } }, NullPointerException.class, null);
@Override public BlockLocation[] getFileBlockLocations(Path f, long start, long len) throws IOException { checkPath(f); return fsImpl.getFileBlockLocations(f, start, len); }
public BlockLocation[] getFileBlockLocations(FileStatus file, long start, long len) throws java.io.IOException { return this.underlyingFs.getFileBlockLocations(replaceScheme(file, this.replacementScheme, this.underlyingScheme), start, len); }
public BlockLocation[] getFileBlockLocations(Path p, long start, long len) throws java.io.IOException { return this.underlyingFs.getFileBlockLocations(replaceScheme(p, this.replacementScheme, this.underlyingScheme), start, len); }
/** * Return an array containing hostnames, offset and size of * portions of the given file. For a nonexistent * file or regions, {@code null} is returned. * * This call is most helpful with location-aware distributed * filesystems, where it returns hostnames of machines that * contain the given file. * * A FileSystem will normally return the equivalent result * of passing the {@code FileStatus} of the path to * {@link #getFileBlockLocations(FileStatus, long, long)} * * @param p path is used to identify an FS since an FS could have * another FS that it could be delegating the call to * @param start offset into the given file * @param len length for which to get locations for * @throws FileNotFoundException when the path does not exist * @throws IOException IO failure */ public BlockLocation[] getFileBlockLocations(Path p, long start, long len) throws IOException { if (p == null) { throw new NullPointerException(); } FileStatus file = getFileStatus(p); return getFileBlockLocations(file, start, len); }
@Override public BlockLocation[] getFileBlockLocations(final FileStatus file, final long start, final long len) throws IOException { if (!(file instanceof HadoopFileStatus)) { throw new IOException("file is not an instance of DistributedFileStatus"); } final HadoopFileStatus f = (HadoopFileStatus) file; final org.apache.hadoop.fs.BlockLocation[] blkLocations = fs.getFileBlockLocations(f.getInternalFileStatus(), start, len); // Wrap up HDFS specific block location objects final HadoopBlockLocation[] distBlkLocations = new HadoopBlockLocation[blkLocations.length]; for (int i = 0; i < distBlkLocations.length; i++) { distBlkLocations[i] = new HadoopBlockLocation(blkLocations[i]); } return distBlkLocations; }
FileStatus stat = fs.getFileStatus(path); length += stat.getLen(); BlockLocation[] locs = fs.getFileBlockLocations(stat, 0, length); for (int i = 0; i < locs.length; i++) { String[] hosts = locs[i].getHosts();
@Override public LocatedFileStatus next() throws IOException { if (!hasNext()) { throw new NoSuchElementException("No more entries in " + f); } FileStatus result = stats[i++]; // for files, use getBlockLocations(FileStatus, int, int) to avoid // calling getFileStatus(Path) to load the FileStatus again BlockLocation[] locs = result.isFile() ? getFileBlockLocations(result, 0, result.getLen()) : null; return new LocatedFileStatus(result, locs); } };
item.refreshStatus(); BlockLocation[] locations = item.fs.getFileBlockLocations(item.stat, 0, item.stat.getLen());
@Override public BlockLocation[] getFileBlockLocations(FileStatus fs, long start, long len) throws IOException { final InodeTree.ResolveResult<FileSystem> res = fsState.resolve(getUriPath(fs.getPath()), true); return res.targetFileSystem.getFileBlockLocations( new ViewFsFileStatus(fs, res.remainingPath), start, len); }
public Optional<InternalHiveSplit> createInternalHiveSplit(FileSplit split) throws IOException { FileStatus file = fileSystem.getFileStatus(split.getPath()); return createInternalHiveSplit( split.getPath(), fileSystem.getFileBlockLocations(file, split.getStart(), split.getLength()), split.getStart(), split.getLength(), file.getLen(), OptionalInt.empty(), false); }
@Override @Nullable public List<String> getFileLocations(String path, FileLocationOptions options) throws IOException { // If the user has hinted the underlying storage nodes are not co-located with Alluxio // workers, short circuit without querying the locations. if (Boolean.valueOf(mUfsConf.get(PropertyKey.UNDERFS_HDFS_REMOTE))) { return null; } FileSystem hdfs = getFs(); List<String> ret = new ArrayList<>(); try { // The only usage of fileStatus is to get the path in getFileBlockLocations. // In HDFS 2, there is an API getFileBlockLocation(Path path, long offset, long len), // but in HDFS 1, the only API is // getFileBlockLocation(FileStatus stat, long offset, long len). // By constructing the file status manually, we can save one RPC call to getFileStatus. FileStatus fileStatus = new FileStatus(0L, false, 0, 0L, 0L, 0L, null, null, null, new Path(path)); BlockLocation[] bLocations = hdfs.getFileBlockLocations(fileStatus, options.getOffset(), 1); if (bLocations.length > 0) { String[] names = bLocations[0].getHosts(); Collections.addAll(ret, names); } } catch (IOException e) { LOG.warn("Unable to get file location for {} : {}", path, e.getMessage()); } return ret; }
/** {@inheritDoc} */ @Override public Collection<IgfsBlockLocation> affinity(IgfsPath path, long start, long len, long maxLen) throws IgniteException { try { BlockLocation[] hadoopBlocks = fileSystemForUser().getFileBlockLocations(convert(path), start, len); List<IgfsBlockLocation> blks = new ArrayList<>(hadoopBlocks.length); for (int i = 0; i < hadoopBlocks.length; ++i) blks.add(convertBlockLocation(hadoopBlocks[i])); return blks; } catch (FileNotFoundException ignored) { return Collections.emptyList(); } catch (IOException e) { throw handleSecondaryFsError(e, "Failed affinity for path: " + path); } }
@Override public Object call() throws Exception { return fs.getFileBlockLocations(new FileStatus(1L, false, 1, 1L, 1L, new Path("path")), 0L, 256L); } }, IOException.class, "File system is stopped.");
/** @throws Exception If failed. */ @Test public void testGetFileBlockLocationsIfFileStatusReferenceNotExistingPath() throws Exception { Path path = new Path("someFile"); fs.create(path).close(); final FileStatus status = fs.getFileStatus(path); fs.delete(path, true); BlockLocation[] locations = fs.getFileBlockLocations(status, 1, 2); assertEquals(0, locations.length); }
/** * Get block locations from the underlying fs and fix their * offsets and lengths. * @param file the input file status to get block locations * @param start the start of the desired range in the contained file * @param len the length of the desired range * @return block locations for this segment of file * @throws IOException */ @Override public BlockLocation[] getFileBlockLocations(FileStatus file, long start, long len) throws IOException { HarStatus hstatus = getFileHarStatus(file.getPath()); Path partPath = new Path(archivePath, hstatus.getPartName()); FileStatus partStatus = metadata.getPartFileStatus(partPath); // get all part blocks that overlap with the desired file blocks BlockLocation[] locations = fs.getFileBlockLocations(partStatus, hstatus.getStartIndex() + start, len); return fixBlockLocations(locations, start, len, hstatus.getStartIndex()); }
/** * Compute HDFS blocks distribution of a given file, or a portion of the file * @param fs file system * @param status file status of the file * @param start start position of the portion * @param length length of the portion * @return The HDFS blocks distribution */ static public HDFSBlocksDistribution computeHDFSBlocksDistribution( final FileSystem fs, FileStatus status, long start, long length) throws IOException { HDFSBlocksDistribution blocksDistribution = new HDFSBlocksDistribution(); BlockLocation [] blockLocations = fs.getFileBlockLocations(status, start, length); for(BlockLocation bl : blockLocations) { String [] hosts = bl.getHosts(); long len = bl.getLength(); blocksDistribution.addHostsAndBlockWeight(hosts, len); } return blocksDistribution; }
/** @throws Exception If failed. */ @Test public void testGetFileBlockLocations() throws Exception { Path igfsHome = new Path(PRIMARY_URI); Path file = new Path(igfsHome, "someFile"); try (OutputStream out = new BufferedOutputStream(fs.create(file, true, 1024 * 1024))) { byte[] data = new byte[128 * 1024]; for (int i = 0; i < 100; i++) out.write(data); out.flush(); } try (FSDataInputStream in = fs.open(file, 1024 * 1024)) { byte[] data = new byte[128 * 1024]; int read; do { read = in.read(data); } while (read > 0); } FileStatus status = fs.getFileStatus(file); int grpLen = 128 * 512 * 1024; int grpCnt = (int)((status.getLen() + grpLen - 1) / grpLen); BlockLocation[] locations = fs.getFileBlockLocations(status, 0, status.getLen()); assertEquals(grpCnt, locations.length); }