Tabnine Logo
StorageLocation.getFile
Code IndexAdd Tabnine to your IDE (free)

How to use
getFile
method
in
org.apache.hadoop.hdfs.server.datanode.StorageLocation

Best Java code snippets using org.apache.hadoop.hdfs.server.datanode.StorageLocation.getFile (Showing top 20 results out of 315)

origin: ch.cern.hadoop/hadoop-hdfs

private StorageType getStorageTypeFromLocations(
  Collection<StorageLocation> dataLocations, File dir) {
 for (StorageLocation dataLocation : dataLocations) {
  if (dataLocation.getFile().equals(dir)) {
   return dataLocation.getStorageType();
  }
 }
 return StorageType.DEFAULT;
}
origin: io.prestosql.hadoop/hadoop-apache

private StorageType getStorageTypeFromLocations(
  Collection<StorageLocation> dataLocations, File dir) {
 for (StorageLocation dataLocation : dataLocations) {
  if (dataLocation.getFile().equals(dir)) {
   return dataLocation.getStorageType();
  }
 }
 return StorageType.DEFAULT;
}
origin: ch.cern.hadoop/hadoop-hdfs

/**
 * Remove volumes from DataNode.
 * See {@link removeVolumes(final Set<File>, boolean)} for details.
 *
 * @param locations the StorageLocations of the volumes to be removed.
 * @throws IOException
 */
private void removeVolumes(final Collection<StorageLocation> locations)
 throws IOException {
 if (locations.isEmpty()) {
  return;
 }
 Set<File> volumesToRemove = new HashSet<>();
 for (StorageLocation loc : locations) {
  volumesToRemove.add(loc.getFile().getAbsoluteFile());
 }
 removeVolumes(volumesToRemove, true);
}
origin: io.prestosql.hadoop/hadoop-apache

/**
 * Remove volumes from DataNode.
 * See {@link removeVolumes(final Set<File>, boolean)} for details.
 *
 * @param locations the StorageLocations of the volumes to be removed.
 * @throws IOException
 */
private void removeVolumes(final Collection<StorageLocation> locations)
 throws IOException {
 if (locations.isEmpty()) {
  return;
 }
 Set<File> volumesToRemove = new HashSet<>();
 for (StorageLocation loc : locations) {
  volumesToRemove.add(loc.getFile().getAbsoluteFile());
 }
 removeVolumes(volumesToRemove, true);
}
origin: ch.cern.hadoop/hadoop-hdfs

static List<StorageLocation> checkStorageLocations(
  Collection<StorageLocation> dataDirs,
  LocalFileSystem localFS, DataNodeDiskChecker dataNodeDiskChecker)
    throws IOException {
 ArrayList<StorageLocation> locations = new ArrayList<StorageLocation>();
 StringBuilder invalidDirs = new StringBuilder();
 for (StorageLocation location : dataDirs) {
  final URI uri = location.getUri();
  try {
   dataNodeDiskChecker.checkDir(localFS, new Path(uri));
   locations.add(location);
  } catch (IOException ioe) {
   LOG.warn("Invalid " + DFS_DATANODE_DATA_DIR_KEY + " "
     + location.getFile() + " : ", ioe);
   invalidDirs.append("\"").append(uri.getPath()).append("\" ");
  }
 }
 if (locations.size() == 0) {
  throw new IOException("All directories in "
    + DFS_DATANODE_DATA_DIR_KEY + " are invalid: "
    + invalidDirs);
 }
 return locations;
}
origin: io.prestosql.hadoop/hadoop-apache

static List<StorageLocation> checkStorageLocations(
  Collection<StorageLocation> dataDirs,
  LocalFileSystem localFS, DataNodeDiskChecker dataNodeDiskChecker)
    throws IOException {
 ArrayList<StorageLocation> locations = new ArrayList<StorageLocation>();
 StringBuilder invalidDirs = new StringBuilder();
 for (StorageLocation location : dataDirs) {
  final URI uri = location.getUri();
  try {
   dataNodeDiskChecker.checkDir(localFS, new Path(uri));
   locations.add(location);
  } catch (IOException ioe) {
   LOG.warn("Invalid " + DFS_DATANODE_DATA_DIR_KEY + " "
     + location.getFile() + " : ", ioe);
   invalidDirs.append("\"").append(uri.getPath()).append("\" ");
  }
 }
 if (locations.size() == 0) {
  throw new IOException("All directories in "
    + DFS_DATANODE_DATA_DIR_KEY + " are invalid: "
    + invalidDirs);
 }
 return locations;
}
origin: ch.cern.hadoop/hadoop-hdfs

if (absoluteVolumePaths.contains(loc.getFile().getAbsoluteFile())) {
 it.remove();
origin: ch.cern.hadoop/hadoop-hdfs

/**
 * Gets initial volume failure information for all volumes that failed
 * immediately at startup.  The method works by determining the set difference
 * between all configured storage locations and the actual storage locations in
 * use after attempting to put all of them into service.
 *
 * @return each storage location that has failed
 */
private static List<VolumeFailureInfo> getInitialVolumeFailureInfos(
  Collection<StorageLocation> dataLocations, DataStorage storage) {
 Set<String> failedLocationSet = Sets.newHashSetWithExpectedSize(
   dataLocations.size());
 for (StorageLocation sl: dataLocations) {
  failedLocationSet.add(sl.getFile().getAbsolutePath());
 }
 for (Iterator<Storage.StorageDirectory> it = storage.dirIterator();
    it.hasNext(); ) {
  Storage.StorageDirectory sd = it.next();
  failedLocationSet.remove(sd.getRoot().getAbsolutePath());
 }
 List<VolumeFailureInfo> volumeFailureInfos = Lists.newArrayListWithCapacity(
   failedLocationSet.size());
 long failureDate = Time.now();
 for (String failedStorageLocation: failedLocationSet) {
  volumeFailureInfos.add(new VolumeFailureInfo(failedStorageLocation,
    failureDate));
 }
 return volumeFailureInfos;
}
origin: ch.cern.hadoop/hadoop-hdfs

  sl.hasNext(); ) {
StorageLocation location = sl.next();
if (location.getFile().getCanonicalPath().equals(
  dir.getRoot().getCanonicalPath())) {
 sl.remove();
origin: io.prestosql.hadoop/hadoop-apache

  sl.hasNext(); ) {
StorageLocation location = sl.next();
if (location.getFile().getCanonicalPath().equals(
  dir.getRoot().getCanonicalPath())) {
 sl.remove();
origin: io.prestosql.hadoop/hadoop-apache

/**
 * Gets initial volume failure information for all volumes that failed
 * immediately at startup.  The method works by determining the set difference
 * between all configured storage locations and the actual storage locations in
 * use after attempting to put all of them into service.
 *
 * @return each storage location that has failed
 */
private static List<VolumeFailureInfo> getInitialVolumeFailureInfos(
  Collection<StorageLocation> dataLocations, DataStorage storage) {
 Set<String> failedLocationSet = Sets.newHashSetWithExpectedSize(
   dataLocations.size());
 for (StorageLocation sl: dataLocations) {
  failedLocationSet.add(sl.getFile().getAbsolutePath());
 }
 for (Iterator<Storage.StorageDirectory> it = storage.dirIterator();
    it.hasNext(); ) {
  Storage.StorageDirectory sd = it.next();
  failedLocationSet.remove(sd.getRoot().getAbsolutePath());
 }
 List<VolumeFailureInfo> volumeFailureInfos = Lists.newArrayListWithCapacity(
   failedLocationSet.size());
 long failureDate = Time.now();
 for (String failedStorageLocation: failedLocationSet) {
  volumeFailureInfos.add(new VolumeFailureInfo(failedStorageLocation,
    failureDate));
 }
 return volumeFailureInfos;
}
origin: ch.cern.hadoop/hadoop-hdfs

File lockFile = new File(sl.getFile(), Storage.STORAGE_FILE_LOCK);
try (RandomAccessFile raf = new RandomAccessFile(lockFile, "rws");
  FileChannel channel = raf.getChannel()) {
origin: ch.cern.hadoop/hadoop-hdfs

@Test
public void testParseChangedVolumes() throws IOException {
 startDFSCluster(1, 1);
 DataNode dn = cluster.getDataNodes().get(0);
 Configuration conf = dn.getConf();
 String oldPaths = conf.get(DFS_DATANODE_DATA_DIR_KEY);
 List<StorageLocation> oldLocations = new ArrayList<StorageLocation>();
 for (String path : oldPaths.split(",")) {
  oldLocations.add(StorageLocation.parse(path));
 }
 assertFalse(oldLocations.isEmpty());
 String newPaths = oldLocations.get(0).getFile().getAbsolutePath() +
   ",/foo/path1,/foo/path2";
 DataNode.ChangedVolumes changedVolumes =
   dn.parseChangedVolumes(newPaths);
 List<StorageLocation> newVolumes = changedVolumes.newLocations;
 assertEquals(2, newVolumes.size());
 assertEquals(new File("/foo/path1").getAbsolutePath(),
  newVolumes.get(0).getFile().getAbsolutePath());
 assertEquals(new File("/foo/path2").getAbsolutePath(),
  newVolumes.get(1).getFile().getAbsolutePath());
 List<StorageLocation> removedVolumes = changedVolumes.deactivateLocations;
 assertEquals(1, removedVolumes.size());
 assertEquals(oldLocations.get(1).getFile(),
   removedVolumes.get(0).getFile());
 assertEquals(1, changedVolumes.unchangedLocations.size());
 assertEquals(oldLocations.get(0).getFile(),
   changedVolumes.unchangedLocations.get(0).getFile());
}
origin: ch.cern.hadoop/hadoop-hdfs

 @Test (timeout = 30000)
 public void testDataDirValidation() throws Throwable {
  
  DataNodeDiskChecker diskChecker = mock(DataNodeDiskChecker.class);
  doThrow(new IOException()).doThrow(new IOException()).doNothing()
   .when(diskChecker).checkDir(any(LocalFileSystem.class), any(Path.class));
  LocalFileSystem fs = mock(LocalFileSystem.class);
  AbstractList<StorageLocation> locations = new ArrayList<StorageLocation>();

  locations.add(StorageLocation.parse("file:/p1/"));
  locations.add(StorageLocation.parse("file:/p2/"));
  locations.add(StorageLocation.parse("file:/p3/"));

  List<StorageLocation> checkedLocations =
    DataNode.checkStorageLocations(locations, fs, diskChecker);
  assertEquals("number of valid data dirs", 1, checkedLocations.size());
  String validDir = checkedLocations.iterator().next().getFile().getPath();
  assertThat("p3 should be valid", new File("/p3/").getPath(), is(validDir));
 }
}
origin: ch.cern.hadoop/hadoop-hdfs

 final List<NamespaceInfo> nsInfos)
 throws IOException {
final File dir = location.getFile();
 builder = dataStorage.prepareVolume(datanode, location.getFile(), nsInfos);
} catch (IOException e) {
 volumes.addVolumeFailureInfo(new VolumeFailureInfo(
   location.getFile().getAbsolutePath(), Time.now()));
 throw e;
origin: ch.cern.hadoop/hadoop-hdfs

@Test(timeout = 5000)
public void testRemoveNewlyAddedVolume() throws IOException {
 final int numExistingVolumes = dataset.getVolumes().size();
 List<NamespaceInfo> nsInfos = new ArrayList<>();
 for (String bpid : BLOCK_POOL_IDS) {
  nsInfos.add(new NamespaceInfo(0, CLUSTER_ID, bpid, 1));
 }
 String newVolumePath = BASE_DIR + "/newVolumeToRemoveLater";
 StorageLocation loc = StorageLocation.parse(newVolumePath);
 Storage.StorageDirectory sd = createStorageDirectory(new File(newVolumePath));
 DataStorage.VolumeBuilder builder =
   new DataStorage.VolumeBuilder(storage, sd);
 when(storage.prepareVolume(eq(datanode), eq(loc.getFile()),
   anyListOf(NamespaceInfo.class)))
   .thenReturn(builder);
 dataset.addVolume(loc, nsInfos);
 assertEquals(numExistingVolumes + 1, dataset.getVolumes().size());
 when(storage.getNumStorageDirs()).thenReturn(numExistingVolumes + 1);
 when(storage.getStorageDir(numExistingVolumes)).thenReturn(sd);
 Set<File> volumesToRemove = new HashSet<>();
 volumesToRemove.add(loc.getFile());
 dataset.removeVolumes(volumesToRemove, true);
 assertEquals(numExistingVolumes, dataset.getVolumes().size());
}
origin: ch.cern.hadoop/hadoop-hdfs

DataStorage.VolumeBuilder builder =
  new DataStorage.VolumeBuilder(storage, sd);
when(storage.prepareVolume(eq(datanode), eq(loc.getFile()),
  anyListOf(NamespaceInfo.class)))
  .thenReturn(builder);
origin: io.prestosql.hadoop/hadoop-apache

 final List<NamespaceInfo> nsInfos)
 throws IOException {
final File dir = location.getFile();
 builder = dataStorage.prepareVolume(datanode, location.getFile(), nsInfos);
} catch (IOException e) {
 volumes.addVolumeFailureInfo(new VolumeFailureInfo(
   location.getFile().getAbsolutePath(), Time.now()));
 throw e;
origin: ch.cern.hadoop/hadoop-hdfs

final String volumePathToRemove = dataDirs[0];
Set<File> volumesToRemove = new HashSet<>();
volumesToRemove.add(StorageLocation.parse(volumePathToRemove).getFile());
origin: ch.cern.hadoop/hadoop-hdfs

@Test
public void testAddStorageDirectories() throws IOException,
  URISyntaxException {
 final int numLocations = 3;
 final int numNamespace = 3;
 List<StorageLocation> locations = createStorageLocations(numLocations);
 // Add volumes for multiple namespaces.
 List<NamespaceInfo> namespaceInfos = createNamespaceInfos(numNamespace);
 for (NamespaceInfo ni : namespaceInfos) {
  storage.addStorageLocations(mockDN, ni, locations, START_OPT);
  for (StorageLocation sl : locations) {
   checkDir(sl.getFile());
   checkDir(sl.getFile(), ni.getBlockPoolID());
  }
 }
 assertEquals(numLocations, storage.getNumStorageDirs());
 locations = createStorageLocations(numLocations);
 List<StorageDirectory> addedLocation =
   storage.addStorageLocations(mockDN, namespaceInfos.get(0),
     locations, START_OPT);
 assertTrue(addedLocation.isEmpty());
 // The number of active storage dirs has not changed, since it tries to
 // add the storage dirs that are under service.
 assertEquals(numLocations, storage.getNumStorageDirs());
 // Add more directories.
 locations = createStorageLocations(6);
 storage.addStorageLocations(mockDN, nsInfo, locations, START_OPT);
 assertEquals(6, storage.getNumStorageDirs());
}
org.apache.hadoop.hdfs.server.datanodeStorageLocationgetFile

Popular methods of StorageLocation

  • getStorageType
  • getUri
  • parse
    Attempt to parse a storage uri with storage class and URI. The storage class component of the uri is
  • toString
  • <init>
  • compareTo
  • equals
  • getBpURI
  • getNormalizedUri
  • makeBlockPoolDir
    Create physical directory for block pools on the data node.
  • matchesStorageDirectory
  • normalizeFileURI
  • matchesStorageDirectory,
  • normalizeFileURI

Popular in Java

  • Making http post requests using okhttp
  • onCreateOptionsMenu (Activity)
  • runOnUiThread (Activity)
  • setContentView (Activity)
  • BorderLayout (java.awt)
    A border layout lays out a container, arranging and resizing its components to fit in five regions:
  • FileInputStream (java.io)
    An input stream that reads bytes from a file. File file = ...finally if (in != null) in.clos
  • Iterator (java.util)
    An iterator over a sequence of objects, such as a collection.If a collection has been changed since
  • Options (org.apache.commons.cli)
    Main entry-point into the library. Options represents a collection of Option objects, which describ
  • DateTimeFormat (org.joda.time.format)
    Factory that creates instances of DateTimeFormatter from patterns and styles. Datetime formatting i
  • Option (scala)
  • Github Copilot alternatives
Tabnine Logo
  • Products

    Search for Java codeSearch for JavaScript code
  • IDE Plugins

    IntelliJ IDEAWebStormVisual StudioAndroid StudioEclipseVisual Studio CodePyCharmSublime TextPhpStormVimGoLandRubyMineEmacsJupyter NotebookJupyter LabRiderDataGripAppCode
  • Company

    About UsContact UsCareers
  • Resources

    FAQBlogTabnine AcademyTerms of usePrivacy policyJava Code IndexJavascript Code Index
Get Tabnine for your IDE now