Tabnine Logo
StorageLocation
Code IndexAdd Tabnine to your IDE (free)

How to use
StorageLocation
in
org.apache.hadoop.hdfs.server.datanode

Best Java code snippets using org.apache.hadoop.hdfs.server.datanode.StorageLocation (Showing top 20 results out of 315)

origin: org.apache.hadoop/hadoop-hdfs

private static File getStorageLocationFile(StorageLocation location) {
 if (location == null ||
   location.getStorageType() == StorageType.PROVIDED) {
  return null;
 }
 try {
  return new File(location.getUri());
 } catch (IllegalArgumentException e) {
  //if location does not refer to a File
  return null;
 }
}
origin: org.apache.hadoop/hadoop-hdfs

public static List<StorageLocation> getStorageLocations(Configuration conf) {
 Collection<String> rawLocations =
   conf.getTrimmedStringCollection(DFS_DATANODE_DATA_DIR_KEY);
 List<StorageLocation> locations =
   new ArrayList<StorageLocation>(rawLocations.size());
 for(String locationString : rawLocations) {
  final StorageLocation location;
  try {
   location = StorageLocation.parse(locationString);
  } catch (IOException | SecurityException ioe) {
   LOG.error("Failed to initialize storage directory {}." +
     "Exception details: {}", locationString, ioe.toString());
   // Ignore the exception.
   continue;
  }
  locations.add(location);
 }
 return locations;
}
origin: org.apache.hadoop/hadoop-hdfs

@Override
public int hashCode() {
 return toString().hashCode();
}
origin: org.apache.hadoop/hadoop-hdfs

private static File getBlockPoolCurrentDir(String bpid,
  StorageLocation location) {
 if (location == null ||
   location.getStorageType() == StorageType.PROVIDED) {
  return null;
 } else {
  return new File(location.getBpURI(bpid, STORAGE_DIR_CURRENT));
 }
}
origin: org.apache.hadoop/hadoop-hdfs

@Override
public int compareTo(StorageLocation obj) {
 if (obj == this) {
  return 0;
 } else if (obj == null) {
  return -1;
 }
 StorageLocation otherStorage = (StorageLocation) obj;
 if (this.getNormalizedUri() != null &&
   otherStorage.getNormalizedUri() != null) {
  return this.getNormalizedUri().compareTo(
    otherStorage.getNormalizedUri());
 } else if (this.getNormalizedUri() == null &&
   otherStorage.getNormalizedUri() == null) {
  return this.storageType.compareTo(otherStorage.getStorageType());
 } else if (this.getNormalizedUri() == null) {
  return -1;
 } else {
  return 1;
 }
}
origin: ch.cern.hadoop/hadoop-hdfs

  sl.hasNext(); ) {
StorageLocation location = sl.next();
if (location.getFile().getCanonicalPath().equals(
  dir.getRoot().getCanonicalPath())) {
 sl.remove();
  StorageLocation.parse(dir.getRoot().toString()));
origin: ch.cern.hadoop/hadoop-hdfs

private StorageType getStorageTypeFromLocations(
  Collection<StorageLocation> dataLocations, File dir) {
 for (StorageLocation dataLocation : dataLocations) {
  if (dataLocation.getFile().equals(dir)) {
   return dataLocation.getStorageType();
  }
 }
 return StorageType.DEFAULT;
}
origin: ch.cern.hadoop/hadoop-hdfs

/**
 * Remove volumes from DataNode.
 * See {@link removeVolumes(final Set<File>, boolean)} for details.
 *
 * @param locations the StorageLocations of the volumes to be removed.
 * @throws IOException
 */
private void removeVolumes(final Collection<StorageLocation> locations)
 throws IOException {
 if (locations.isEmpty()) {
  return;
 }
 Set<File> volumesToRemove = new HashSet<>();
 for (StorageLocation loc : locations) {
  volumesToRemove.add(loc.getFile().getAbsoluteFile());
 }
 removeVolumes(volumesToRemove, true);
}
origin: org.apache.hadoop/hadoop-hdfs

FsVolumeImpl(FsDatasetImpl dataset, String storageID, StorageDirectory sd,
  FileIoProvider fileIoProvider, Configuration conf, DF usage)
  throws IOException {
 if (sd.getStorageLocation() == null) {
  throw new IOException("StorageLocation specified for storage directory " +
    sd + " is null");
 }
 this.dataset = dataset;
 this.storageID = storageID;
 this.reservedForReplicas = new AtomicLong(0L);
 this.storageLocation = sd.getStorageLocation();
 this.currentDir = sd.getCurrentDir();
 this.storageType = storageLocation.getStorageType();
 this.configuredCapacity = -1;
 this.usage = usage;
 if (currentDir != null) {
  File parent = currentDir.getParentFile();
  cacheExecutor = initializeCacheExecutor(parent);
  this.metrics = DataNodeVolumeMetrics.create(conf, parent.getPath());
 } else {
  cacheExecutor = null;
  this.metrics = null;
 }
 this.conf = conf;
 this.fileIoProvider = fileIoProvider;
 this.reserved = new ReservedSpaceCalculator.Builder(conf)
   .setUsage(usage).setStorageType(storageType).build();
}
origin: ch.cern.hadoop/hadoop-hdfs

static List<StorageLocation> checkStorageLocations(
  Collection<StorageLocation> dataDirs,
  LocalFileSystem localFS, DataNodeDiskChecker dataNodeDiskChecker)
    throws IOException {
 ArrayList<StorageLocation> locations = new ArrayList<StorageLocation>();
 StringBuilder invalidDirs = new StringBuilder();
 for (StorageLocation location : dataDirs) {
  final URI uri = location.getUri();
  try {
   dataNodeDiskChecker.checkDir(localFS, new Path(uri));
   locations.add(location);
  } catch (IOException ioe) {
   LOG.warn("Invalid " + DFS_DATANODE_DATA_DIR_KEY + " "
     + location.getFile() + " : ", ioe);
   invalidDirs.append("\"").append(uri.getPath()).append("\" ");
  }
 }
 if (locations.size() == 0) {
  throw new IOException("All directories in "
    + DFS_DATANODE_DATA_DIR_KEY + " are invalid: "
    + invalidDirs);
 }
 return locations;
}
origin: org.apache.hadoop/hadoop-hdfs

existingStorageLocations.put(loc.getNormalizedUri().toString(), loc);
   results.newLocations.iterator(); newLocationItr.hasNext();) {
 StorageLocation newLocation = newLocationItr.next();
 if (newLocation.matchesStorageDirectory(dir)) {
  StorageLocation oldLocation = existingStorageLocations.get(
    newLocation.getNormalizedUri().toString());
  if (oldLocation != null &&
    oldLocation.getStorageType() != newLocation.getStorageType()) {
   throw new IOException("Changing storage type is not allowed.");
   dir.getRoot());
 results.deactivateLocations.add(
   StorageLocation.parse(dir.getRoot().toString()));
    results.newLocations.iterator(); newLocationItr.hasNext();) {
  StorageLocation newLocation = newLocationItr.next();
  if (newLocation.getNormalizedUri().toString().equals(
    failedStorageLocation)) {
  LOG.info("Deactivation request received for failed volume: {}",
    failedStorageLocation);
  results.deactivateLocations.add(StorageLocation.parse(
    failedStorageLocation));
origin: ch.cern.hadoop/hadoop-hdfs

 newDataDirBuf.append(",");
 newDataDirBuf.append(
   StorageLocation.parse(volumeDir.toString()).toString());
List<StorageLocation> effectiveStorageLocations = new ArrayList<>();
for (int i = 0; i < expectDataDirs.length; i++) {
 StorageLocation expectLocation = StorageLocation.parse(expectDataDirs[i]);
 StorageLocation effectiveLocation = StorageLocation
   .parse(effectiveDataDirs[i]);
 expectedStorageLocations.add(expectLocation);
 effectiveStorageLocations.add(effectiveLocation);
origin: org.apache.hadoop/hadoop-hdfs

public boolean matchesStorageDirectory(StorageDirectory sd,
  String bpid) throws IOException {
 if (sd.getStorageLocation().getStorageType() == StorageType.PROVIDED &&
   storageType == StorageType.PROVIDED) {
  return matchesStorageDirectory(sd);
 }
 if (sd.getStorageLocation().getStorageType() == StorageType.PROVIDED ||
   storageType == StorageType.PROVIDED) {
  // only one PROVIDED storage directory can exist; so this cannot match!
  return false;
 }
 // both storage directories are local
 return this.getBpURI(bpid, Storage.STORAGE_DIR_CURRENT).normalize()
   .equals(sd.getRoot().toURI().normalize());
}
origin: org.apache.hadoop/hadoop-hdfs

public URI getBpURI(String bpid, String currentStorageDir) {
 try {
  File localFile = new File(getUri());
  return new File(new File(localFile, currentStorageDir), bpid).toURI();
 } catch (IllegalArgumentException e) {
  return null;
 }
}
origin: org.apache.hadoop/hadoop-hdfs

/**
 * Attempt to parse a storage uri with storage class and URI. The storage
 * class component of the uri is case-insensitive.
 *
 * @param rawLocation Location string of the format [type]uri, where [type] is
 *                    optional.
 * @return A StorageLocation object if successfully parsed, null otherwise.
 *         Does not throw any exceptions.
 */
public static StorageLocation parse(String rawLocation)
  throws IOException, SecurityException {
 Matcher matcher = regex.matcher(rawLocation);
 StorageType storageType = StorageType.DEFAULT;
 String location = rawLocation;
 if (matcher.matches()) {
  String classString = matcher.group(1);
  location = matcher.group(2).trim();
  if (!classString.isEmpty()) {
   storageType =
     StorageType.valueOf(StringUtils.toUpperCase(classString));
  }
 }
 //do Path.toURI instead of new URI(location) as this ensures that
 //"/a/b" and "/a/b/" are represented in a consistent manner
 return new StorageLocation(storageType, new Path(location).toUri());
}
origin: org.apache.hadoop/hadoop-hdfs

@Override
public boolean equals(Object obj) {
 if (obj == null || !(obj instanceof StorageLocation)) {
  return false;
 }
 int comp = compareTo((StorageLocation) obj);
 return comp == 0;
}
origin: io.prestosql.hadoop/hadoop-apache

  sl.hasNext(); ) {
StorageLocation location = sl.next();
if (location.getFile().getCanonicalPath().equals(
  dir.getRoot().getCanonicalPath())) {
 sl.remove();
  StorageLocation.parse(dir.getRoot().toString()));
origin: io.prestosql.hadoop/hadoop-apache

private StorageType getStorageTypeFromLocations(
  Collection<StorageLocation> dataLocations, File dir) {
 for (StorageLocation dataLocation : dataLocations) {
  if (dataLocation.getFile().equals(dir)) {
   return dataLocation.getStorageType();
  }
 }
 return StorageType.DEFAULT;
}
origin: io.prestosql.hadoop/hadoop-apache

/**
 * Remove volumes from DataNode.
 * See {@link removeVolumes(final Set<File>, boolean)} for details.
 *
 * @param locations the StorageLocations of the volumes to be removed.
 * @throws IOException
 */
private void removeVolumes(final Collection<StorageLocation> locations)
 throws IOException {
 if (locations.isEmpty()) {
  return;
 }
 Set<File> volumesToRemove = new HashSet<>();
 for (StorageLocation loc : locations) {
  volumesToRemove.add(loc.getFile().getAbsoluteFile());
 }
 removeVolumes(volumesToRemove, true);
}
origin: org.apache.hadoop/hadoop-hdfs

/** Create an ID for this storage.
 * @return true if a new storage ID was generated.
 * */
public static boolean createStorageID(
  StorageDirectory sd, boolean regenerateStorageIds, Configuration conf) {
 final String oldStorageID = sd.getStorageUuid();
 if (sd.getStorageLocation() != null &&
   sd.getStorageLocation().getStorageType() == StorageType.PROVIDED) {
  // Only one provided storage id is supported.
  // TODO support multiple provided storage ids
  sd.setStorageUuid(conf.get(DFSConfigKeys.DFS_PROVIDER_STORAGEUUID,
    DFSConfigKeys.DFS_PROVIDER_STORAGEUUID_DEFAULT));
  return false;
 }
 if (oldStorageID == null || regenerateStorageIds) {
  sd.setStorageUuid(DatanodeStorage.generateUuid());
  LOG.info("Generated new storageID {} for directory {} {}", sd
      .getStorageUuid(), sd.getRoot(),
    (oldStorageID == null ? "" : (" to replace " + oldStorageID)));
  return true;
 }
 return false;
}
org.apache.hadoop.hdfs.server.datanodeStorageLocation

Javadoc

Encapsulates the URI and storage medium that together describe a storage directory. The default storage medium is assumed to be DISK, if none is specified.

Most used methods

  • getFile
  • getStorageType
  • getUri
  • parse
    Attempt to parse a storage uri with storage class and URI. The storage class component of the uri is
  • toString
  • <init>
  • compareTo
  • equals
  • getBpURI
  • getNormalizedUri
  • makeBlockPoolDir
    Create physical directory for block pools on the data node.
  • matchesStorageDirectory
  • makeBlockPoolDir,
  • matchesStorageDirectory,
  • normalizeFileURI

Popular in Java

  • Finding current android device location
  • getOriginalFilename (MultipartFile)
    Return the original filename in the client's filesystem.This may contain path information depending
  • getContentResolver (Context)
  • getExternalFilesDir (Context)
  • SecureRandom (java.security)
    This class generates cryptographically secure pseudo-random numbers. It is best to invoke SecureRand
  • SQLException (java.sql)
    An exception that indicates a failed JDBC operation. It provides the following information about pro
  • Arrays (java.util)
    This class contains various methods for manipulating arrays (such as sorting and searching). This cl
  • DataSource (javax.sql)
    An interface for the creation of Connection objects which represent a connection to a database. This
  • JFileChooser (javax.swing)
  • Scheduler (org.quartz)
    This is the main interface of a Quartz Scheduler. A Scheduler maintains a registry of org.quartz.Job
  • Best plugins for Eclipse
Tabnine Logo
  • Products

    Search for Java codeSearch for JavaScript code
  • IDE Plugins

    IntelliJ IDEAWebStormVisual StudioAndroid StudioEclipseVisual Studio CodePyCharmSublime TextPhpStormVimGoLandRubyMineEmacsJupyter NotebookJupyter LabRiderDataGripAppCode
  • Company

    About UsContact UsCareers
  • Resources

    FAQBlogTabnine AcademyTerms of usePrivacy policyJava Code IndexJavascript Code Index
Get Tabnine for your IDE now