Tabnine Logo
StorageLocation.parse
Code IndexAdd Tabnine to your IDE (free)

How to use
parse
method
in
org.apache.hadoop.hdfs.server.datanode.StorageLocation

Best Java code snippets using org.apache.hadoop.hdfs.server.datanode.StorageLocation.parse (Showing top 19 results out of 315)

origin: org.apache.hadoop/hadoop-hdfs

public static List<StorageLocation> getStorageLocations(Configuration conf) {
 Collection<String> rawLocations =
   conf.getTrimmedStringCollection(DFS_DATANODE_DATA_DIR_KEY);
 List<StorageLocation> locations =
   new ArrayList<StorageLocation>(rawLocations.size());
 for(String locationString : rawLocations) {
  final StorageLocation location;
  try {
   location = StorageLocation.parse(locationString);
  } catch (IOException | SecurityException ioe) {
   LOG.error("Failed to initialize storage directory {}." +
     "Exception details: {}", locationString, ioe.toString());
   // Ignore the exception.
   continue;
  }
  locations.add(location);
 }
 return locations;
}
origin: org.apache.hadoop/hadoop-hdfs

  dir.getRoot());
results.deactivateLocations.add(
  StorageLocation.parse(dir.getRoot().toString()));
 LOG.info("Deactivation request received for failed volume: {}",
   failedStorageLocation);
 results.deactivateLocations.add(StorageLocation.parse(
   failedStorageLocation));
origin: org.apache.hadoop/hadoop-hdfs

  + diskFile.getAbsolutePath());
memBlockInfo.updateWithReplica(
  StorageLocation.parse(diskFile.toString()));
origin: ch.cern.hadoop/hadoop-hdfs

public static List<StorageLocation> getStorageLocations(Configuration conf) {
 Collection<String> rawLocations =
   conf.getTrimmedStringCollection(DFS_DATANODE_DATA_DIR_KEY);
 List<StorageLocation> locations =
   new ArrayList<StorageLocation>(rawLocations.size());
 for(String locationString : rawLocations) {
  final StorageLocation location;
  try {
   location = StorageLocation.parse(locationString);
  } catch (IOException ioe) {
   LOG.error("Failed to initialize storage directory " + locationString
     + ". Exception details: " + ioe);
   // Ignore the exception.
   continue;
  } catch (SecurityException se) {
   LOG.error("Failed to initialize storage directory " + locationString
          + ". Exception details: " + se);
   // Ignore the exception.
   continue;
  }
  locations.add(location);
 }
 return locations;
}
origin: io.prestosql.hadoop/hadoop-apache

public static List<StorageLocation> getStorageLocations(Configuration conf) {
 Collection<String> rawLocations =
   conf.getTrimmedStringCollection(DFS_DATANODE_DATA_DIR_KEY);
 List<StorageLocation> locations =
   new ArrayList<StorageLocation>(rawLocations.size());
 for(String locationString : rawLocations) {
  final StorageLocation location;
  try {
   location = StorageLocation.parse(locationString);
  } catch (IOException ioe) {
   LOG.error("Failed to initialize storage directory " + locationString
     + ". Exception details: " + ioe);
   // Ignore the exception.
   continue;
  } catch (SecurityException se) {
   LOG.error("Failed to initialize storage directory " + locationString
          + ". Exception details: " + se);
   // Ignore the exception.
   continue;
  }
  locations.add(location);
 }
 return locations;
}
origin: ch.cern.hadoop/hadoop-hdfs

StorageLocation.parse(dir.getRoot().toString()));
origin: io.prestosql.hadoop/hadoop-apache

StorageLocation.parse(dir.getRoot().toString()));
origin: ch.cern.hadoop/hadoop-hdfs

/**
 * Create a list of StorageLocations.
 * If asFile sets to true, create StorageLocation as regular files, otherwise
 * create directories for each location.
 * @param numLocs the total number of StorageLocations to be created.
 * @param asFile set to true to create as file.
 * @return a list of StorageLocations.
 */
private static List<StorageLocation> createStorageLocations(
  int numLocs, boolean asFile) throws IOException {
 List<StorageLocation> locations = new ArrayList<StorageLocation>();
 for (int i = 0; i < numLocs; i++) {
  String uri = TEST_DIR + "/data" + i;
  File file = new File(uri);
  if (asFile) {
   file.getParentFile().mkdirs();
   file.createNewFile();
  } else {
   file.mkdirs();
  }
  StorageLocation loc = StorageLocation.parse(uri);
  locations.add(loc);
 }
 return locations;
}
origin: ch.cern.hadoop/hadoop-hdfs

/**
 * Starts an instance of DataNode
 * @throws IOException
 */
@Before
public void startUp() throws IOException, URISyntaxException {
 tearDownDone = false;
 conf = new HdfsConfiguration();
 conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, DATA_DIR);
 conf.set(DFSConfigKeys.DFS_DATANODE_ADDRESS_KEY, "0.0.0.0:0");
 conf.set(DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_KEY, "0.0.0.0:0");
 conf.set(DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_KEY, "0.0.0.0:0");
 conf.setInt(CommonConfigurationKeys.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY, 0);
 FileSystem.setDefaultUri(conf,
   "hdfs://" + NN_ADDR.getHostName() + ":" + NN_ADDR.getPort());
 File dataDir = new File(DATA_DIR);
 FileUtil.fullyDelete(dataDir);
 dataDir.mkdirs();
 StorageLocation location = StorageLocation.parse(dataDir.getPath());
 locations.add(location);
}
origin: ch.cern.hadoop/hadoop-hdfs

StorageLocation sl = StorageLocation.parse(dir);
File lockFile = new File(sl.getFile(), Storage.STORAGE_FILE_LOCK);
try (RandomAccessFile raf = new RandomAccessFile(lockFile, "rws");
origin: ch.cern.hadoop/hadoop-hdfs

 @Test (timeout = 30000)
 public void testDataDirValidation() throws Throwable {
  
  DataNodeDiskChecker diskChecker = mock(DataNodeDiskChecker.class);
  doThrow(new IOException()).doThrow(new IOException()).doNothing()
   .when(diskChecker).checkDir(any(LocalFileSystem.class), any(Path.class));
  LocalFileSystem fs = mock(LocalFileSystem.class);
  AbstractList<StorageLocation> locations = new ArrayList<StorageLocation>();

  locations.add(StorageLocation.parse("file:/p1/"));
  locations.add(StorageLocation.parse("file:/p2/"));
  locations.add(StorageLocation.parse("file:/p3/"));

  List<StorageLocation> checkedLocations =
    DataNode.checkStorageLocations(locations, fs, diskChecker);
  assertEquals("number of valid data dirs", 1, checkedLocations.size());
  String validDir = checkedLocations.iterator().next().getFile().getPath();
  assertThat("p3 should be valid", new File("/p3/").getPath(), is(validDir));
 }
}
origin: ch.cern.hadoop/hadoop-hdfs

  .thenReturn(builder);
StorageLocation location = StorageLocation.parse(badDir.toString());
List<NamespaceInfo> nsInfos = Lists.newArrayList();
for (String bpid : BLOCK_POOL_IDS) {
origin: ch.cern.hadoop/hadoop-hdfs

 newDataDirBuf.append(",");
 newDataDirBuf.append(
   StorageLocation.parse(volumeDir.toString()).toString());
List<StorageLocation> effectiveStorageLocations = new ArrayList<>();
for (int i = 0; i < expectDataDirs.length; i++) {
 StorageLocation expectLocation = StorageLocation.parse(expectDataDirs[i]);
 StorageLocation effectiveLocation = StorageLocation
   .parse(effectiveDataDirs[i]);
 expectedStorageLocations.add(expectLocation);
 effectiveStorageLocations.add(effectiveLocation);
origin: ch.cern.hadoop/hadoop-hdfs

String pathUri = new Path(path).toUri().toString();
expectedVolumes.add(new File(pathUri).toString());
StorageLocation loc = StorageLocation.parse(pathUri);
Storage.StorageDirectory sd = createStorageDirectory(new File(path));
DataStorage.VolumeBuilder builder =
origin: ch.cern.hadoop/hadoop-hdfs

@Test(timeout = 5000)
public void testRemoveNewlyAddedVolume() throws IOException {
 final int numExistingVolumes = dataset.getVolumes().size();
 List<NamespaceInfo> nsInfos = new ArrayList<>();
 for (String bpid : BLOCK_POOL_IDS) {
  nsInfos.add(new NamespaceInfo(0, CLUSTER_ID, bpid, 1));
 }
 String newVolumePath = BASE_DIR + "/newVolumeToRemoveLater";
 StorageLocation loc = StorageLocation.parse(newVolumePath);
 Storage.StorageDirectory sd = createStorageDirectory(new File(newVolumePath));
 DataStorage.VolumeBuilder builder =
   new DataStorage.VolumeBuilder(storage, sd);
 when(storage.prepareVolume(eq(datanode), eq(loc.getFile()),
   anyListOf(NamespaceInfo.class)))
   .thenReturn(builder);
 dataset.addVolume(loc, nsInfos);
 assertEquals(numExistingVolumes + 1, dataset.getVolumes().size());
 when(storage.getNumStorageDirs()).thenReturn(numExistingVolumes + 1);
 when(storage.getStorageDir(numExistingVolumes)).thenReturn(sd);
 Set<File> volumesToRemove = new HashSet<>();
 volumesToRemove.add(loc.getFile());
 dataset.removeVolumes(volumesToRemove, true);
 assertEquals(numExistingVolumes, dataset.getVolumes().size());
}
origin: ch.cern.hadoop/hadoop-hdfs

@Test
public void testParseChangedVolumes() throws IOException {
 startDFSCluster(1, 1);
 DataNode dn = cluster.getDataNodes().get(0);
 Configuration conf = dn.getConf();
 String oldPaths = conf.get(DFS_DATANODE_DATA_DIR_KEY);
 List<StorageLocation> oldLocations = new ArrayList<StorageLocation>();
 for (String path : oldPaths.split(",")) {
  oldLocations.add(StorageLocation.parse(path));
 }
 assertFalse(oldLocations.isEmpty());
 String newPaths = oldLocations.get(0).getFile().getAbsolutePath() +
   ",/foo/path1,/foo/path2";
 DataNode.ChangedVolumes changedVolumes =
   dn.parseChangedVolumes(newPaths);
 List<StorageLocation> newVolumes = changedVolumes.newLocations;
 assertEquals(2, newVolumes.size());
 assertEquals(new File("/foo/path1").getAbsolutePath(),
  newVolumes.get(0).getFile().getAbsolutePath());
 assertEquals(new File("/foo/path2").getAbsolutePath(),
  newVolumes.get(1).getFile().getAbsolutePath());
 List<StorageLocation> removedVolumes = changedVolumes.deactivateLocations;
 assertEquals(1, removedVolumes.size());
 assertEquals(oldLocations.get(1).getFile(),
   removedVolumes.get(0).getFile());
 assertEquals(1, changedVolumes.unchangedLocations.size());
 assertEquals(oldLocations.get(0).getFile(),
   changedVolumes.unchangedLocations.get(0).getFile());
}
origin: ch.cern.hadoop/hadoop-hdfs

FileUtil.fullyDelete(dataDir);
dataDir.mkdirs();
StorageLocation location = StorageLocation.parse(dataDir.getPath());
locations.add(location);
final DatanodeProtocolClientSideTranslatorPB namenode =
origin: ch.cern.hadoop/hadoop-hdfs

assertEquals(4, effectiveVolumes.length);
for (String ev : effectiveVolumes) {
 assertThat(StorageLocation.parse(ev).getFile().getCanonicalPath(),
   is(not(anyOf(is(newDirs.get(0)), is(newDirs.get(2))))));
origin: ch.cern.hadoop/hadoop-hdfs

final String volumePathToRemove = dataDirs[0];
Set<File> volumesToRemove = new HashSet<>();
volumesToRemove.add(StorageLocation.parse(volumePathToRemove).getFile());
org.apache.hadoop.hdfs.server.datanodeStorageLocationparse

Javadoc

Attempt to parse a storage uri with storage class and URI. The storage class component of the uri is case-insensitive.

Popular methods of StorageLocation

  • getFile
  • getStorageType
  • getUri
  • toString
  • <init>
  • compareTo
  • equals
  • getBpURI
  • getNormalizedUri
  • makeBlockPoolDir
    Create physical directory for block pools on the data node.
  • matchesStorageDirectory
  • normalizeFileURI
  • matchesStorageDirectory,
  • normalizeFileURI

Popular in Java

  • Making http requests using okhttp
  • setRequestProperty (URLConnection)
  • scheduleAtFixedRate (Timer)
  • getContentResolver (Context)
  • BigInteger (java.math)
    An immutable arbitrary-precision signed integer.FAST CRYPTOGRAPHY This implementation is efficient f
  • InetAddress (java.net)
    An Internet Protocol (IP) address. This can be either an IPv4 address or an IPv6 address, and in pra
  • Callable (java.util.concurrent)
    A task that returns a result and may throw an exception. Implementors define a single method with no
  • ImageIO (javax.imageio)
  • JTextField (javax.swing)
  • Get (org.apache.hadoop.hbase.client)
    Used to perform Get operations on a single row. To get everything for a row, instantiate a Get objec
  • CodeWhisperer alternatives
Tabnine Logo
  • Products

    Search for Java codeSearch for JavaScript code
  • IDE Plugins

    IntelliJ IDEAWebStormVisual StudioAndroid StudioEclipseVisual Studio CodePyCharmSublime TextPhpStormVimGoLandRubyMineEmacsJupyter NotebookJupyter LabRiderDataGripAppCode
  • Company

    About UsContact UsCareers
  • Resources

    FAQBlogTabnine AcademyTerms of usePrivacy policyJava Code IndexJavascript Code Index
Get Tabnine for your IDE now