congrats Icon
New! Tabnine Pro 14-day free trial
Start a free trial
Tabnine Logo
InconsistentFSStateException.<init>
Code IndexAdd Tabnine to your IDE (free)

How to use
org.apache.hadoop.hdfs.server.common.InconsistentFSStateException
constructor

Best Java code snippets using org.apache.hadoop.hdfs.server.common.InconsistentFSStateException.<init> (Showing top 20 results out of 315)

origin: org.apache.hadoop/hadoop-hdfs

protected static String getProperty(Properties props, StorageDirectory sd,
  String name) throws InconsistentFSStateException {
 String property = props.getProperty(name);
 if (property == null) {
  throw new InconsistentFSStateException(sd.root, "file "
    + STORAGE_FILE_VERSION + " has " + name + " missing.");
 }
 return property;
}
origin: org.apache.hadoop/hadoop-hdfs

/** Validate and set block pool ID */
private void setBlockPoolID(File storage, String bpid)
  throws InconsistentFSStateException {
 if (bpid == null || bpid.equals("")) {
  throw new InconsistentFSStateException(storage, "file "
    + STORAGE_FILE_VERSION + " is invalid.");
 }
 
 if (!blockpoolID.equals("") && !blockpoolID.equals(bpid)) {
  throw new InconsistentFSStateException(storage,
    "Unexpected blockpoolID " + bpid + ". Expected " + blockpoolID);
 }
 blockpoolID = bpid;
}

origin: org.apache.hadoop/hadoop-hdfs

/** Validate and set block pool ID. */
private void setBlockPoolID(File storage, String bpid)
  throws InconsistentFSStateException {
 if (bpid == null || bpid.equals("")) {
  throw new InconsistentFSStateException(storage, "file "
    + Storage.STORAGE_FILE_VERSION + " has no block pool Id.");
 }
 
 if (!blockpoolID.equals("") && !blockpoolID.equals(bpid)) {
  throw new InconsistentFSStateException(storage,
    "Unexepcted blockpoolID " + bpid + " . Expected " + blockpoolID);
 }
 setBlockPoolID(bpid);
}

origin: org.apache.hadoop/hadoop-hdfs

/** Validate and set namespaceID version from {@link Properties}*/
protected void setNamespaceID(Properties props, StorageDirectory sd)
  throws InconsistentFSStateException {
 int nsId = Integer.parseInt(getProperty(props, sd, "namespaceID"));
 if (namespaceID != 0 && nsId != 0 && namespaceID != nsId) {
  throw new InconsistentFSStateException(sd.root,
    "namespaceID is incompatible with others.");
 }
 namespaceID = nsId;
}
origin: org.apache.hadoop/hadoop-hdfs

/** Check if upgrade is in progress. */
public static void checkUpgrade(NNStorage storage) throws IOException {
 // Upgrade or rolling upgrade is allowed only if there are 
 // no previous fs states in any of the local directories
 for (Iterator<StorageDirectory> it = storage.dirIterator(false); it.hasNext();) {
  StorageDirectory sd = it.next();
  if (sd.getPreviousDir().exists())
   throw new InconsistentFSStateException(sd.getRoot(),
     "previous fs state should not exist during upgrade. "
     + "Finalize or rollback first.");
 }
}
origin: org.apache.hadoop/hadoop-hdfs

/**
 * Check to see if current/ directory is empty. This method is used
 * before determining to format the directory.
 *
 * @throws InconsistentFSStateException if not empty.
 * @throws IOException if unable to list files under the directory.
 */
private void checkEmptyCurrent() throws InconsistentFSStateException,
  IOException {
 File currentDir = getCurrentDir();
 if(currentDir == null || !currentDir.exists()) {
  // if current/ does not exist, it's safe to format it.
  return;
 }
 try(DirectoryStream<java.nio.file.Path> dirStream =
   Files.newDirectoryStream(currentDir.toPath())) {
  if (dirStream.iterator().hasNext()) {
   throw new InconsistentFSStateException(root,
     "Can't format the storage directory because the current "
       + "directory is not empty.");
  }
 }
}
origin: org.apache.hadoop/hadoop-hdfs

/** Validate and set clusterId from {@link Properties}*/
protected void setClusterId(Properties props, int layoutVersion,
  StorageDirectory sd) throws InconsistentFSStateException {
 // Set cluster ID in version that supports federation
 if (LayoutVersion.supports(getServiceLayoutFeatureMap(),
   Feature.FEDERATION, layoutVersion)) {
  String cid = getProperty(props, sd, "clusterID");
  if (!(clusterID.equals("") || cid.equals("") || clusterID.equals(cid))) {
   throw new InconsistentFSStateException(sd.getRoot(),
     "cluster Id is incompatible with others.");
  }
  clusterID = cid;
 }
}

origin: org.apache.hadoop/hadoop-hdfs

/** Validate and set storage type from {@link Properties}*/
protected void checkStorageType(Properties props, StorageDirectory sd)
  throws InconsistentFSStateException {
 if (storageType == null) { //don't care about storage type
  return;
 }
 NodeType type = NodeType.valueOf(getProperty(props, sd, "storageType"));
 if (!storageType.equals(type)) {
  throw new InconsistentFSStateException(sd.root,
    "Incompatible node types: storageType=" + storageType
    + " but StorageDirectory type=" + type);
 }
}

origin: org.apache.hadoop/hadoop-hdfs

 return StorageState.NORMAL;
if (hasPrevious)
 throw new InconsistentFSStateException(root,
           "version file in current directory is missing.");
if (checkCurrentIsEmpty) {
 + (hasFinalizedTmp?1:0) + (hasCheckpointTmp?1:0) > 1)
throw new InconsistentFSStateException(root,
                    "too many temporary directories.");
 throw new InconsistentFSStateException(root,
                     STORAGE_DIR_PREVIOUS + " and " + STORAGE_TMP_FINALIZED
                     + "cannot exist together.");
 throw new InconsistentFSStateException(root,
                     STORAGE_DIR_PREVIOUS + " and " + STORAGE_TMP_PREVIOUS
                     + " cannot exist together.");
throw new InconsistentFSStateException(root,
                    "one and only one directory " + STORAGE_DIR_CURRENT 
                    + " or " + STORAGE_DIR_PREVIOUS 
origin: org.apache.hadoop/hadoop-hdfs

case NON_EXISTENT:
 throw new InconsistentFSStateException(sd.getRoot(),
    "checkpoint directory does not exist or is not accessible.");
case NOT_FORMATTED:
origin: org.apache.hadoop/hadoop-hdfs

 throw new InconsistentFSStateException(sd.getRoot(), "file "
   + STORAGE_FILE_VERSION + " is invalid.");
if (!(sid == null || sid.equals("") ||
   ssid.equals("") || sid.equals(ssid))) {
 throw new InconsistentFSStateException(sd.getRoot(),
   "has incompatible storage Id.");
  setDatanodeUuid(dnUuid);
 } else if (getDatanodeUuid().compareTo(dnUuid) != 0) {
  throw new InconsistentFSStateException(sd.getRoot(),
    "Root " + sd.getRoot() + ": DatanodeUuid=" + dnUuid +
    ", does not match " + getDatanodeUuid() + " from other" +
origin: org.apache.hadoop/hadoop-hdfs

throw new InconsistentFSStateException(sd.getRoot(),
  "cannot access checkpoint directory.");
case NON_EXISTENT:
 throw new InconsistentFSStateException(sd.getRoot(),
    "checkpoint directory does not exist or is not accessible.");
case NOT_FORMATTED:
origin: org.apache.hadoop/hadoop-hdfs

case NON_EXISTENT:
 throw new InconsistentFSStateException(sd.getRoot(),
       "storage directory does not exist or is not accessible.");
case NOT_FORMATTED:
origin: org.apache.hadoop/hadoop-hdfs

throw new InconsistentFSStateException(curFile, 
  "imgVersion " + imgVersion +
  " expected to be " + getLayoutVersion());
origin: org.apache.hadoop/hadoop-hdfs

throw new InconsistentFSStateException(bpSd.getRoot(),
  "Cannot rollback to a newer state.\nDatanode previous state: LV = "
    + prevInfo.getLayoutVersion() + " CTime = " + prevInfo.getCTime()
origin: org.apache.hadoop/hadoop-hdfs

throw new InconsistentFSStateException(sd.getRoot(),
  "Cannot rollback to a newer state.\nDatanode previous state: LV = "
    + prevInfo.getLayoutVersion() + " CTime = " + prevInfo.getCTime()
origin: io.prestosql.hadoop/hadoop-apache

protected static String getProperty(Properties props, StorageDirectory sd,
  String name) throws InconsistentFSStateException {
 String property = props.getProperty(name);
 if (property == null) {
  throw new InconsistentFSStateException(sd.root, "file "
    + STORAGE_FILE_VERSION + " has " + name + " missing.");
 }
 return property;
}

origin: org.apache.hadoop/hadoop-hdfs

void loadFSImageFile(FSNamesystem target, MetaRecoveryContext recovery,
  FSImageFile imageFile, StartupOption startupOption) throws IOException {
 LOG.info("Planning to load image: " + imageFile);
 StorageDirectory sdForProperties = imageFile.sd;
 storage.readProperties(sdForProperties, startupOption);
 if (NameNodeLayoutVersion.supports(
   LayoutVersion.Feature.TXID_BASED_LAYOUT, getLayoutVersion())) {
  // For txid-based layout, we should have a .md5 file
  // next to the image file
  boolean isRollingRollback = RollingUpgradeStartupOption.ROLLBACK
    .matches(startupOption);
  loadFSImage(imageFile.getFile(), target, recovery, isRollingRollback);
 } else if (NameNodeLayoutVersion.supports(
   LayoutVersion.Feature.FSIMAGE_CHECKSUM, getLayoutVersion())) {
  // In 0.22, we have the checksum stored in the VERSION file.
  String md5 = storage.getDeprecatedProperty(
    NNStorage.DEPRECATED_MESSAGE_DIGEST_PROPERTY);
  if (md5 == null) {
   throw new InconsistentFSStateException(sdForProperties.getRoot(),
     "Message digest property " +
     NNStorage.DEPRECATED_MESSAGE_DIGEST_PROPERTY +
     " not set for storage directory " + sdForProperties.getRoot());
  }
  loadFSImage(imageFile.getFile(), new MD5Hash(md5), target, recovery,
    false);
 } else {
  // We don't have any record of the md5sum
  loadFSImage(imageFile.getFile(), null, target, recovery, false);
 }
}
origin: com.facebook.hadoop/hadoop-core

/** Validate and set namespaceID version from {@link Properties}*/
protected void setNamespaceID(Properties props, StorageDirectory sd)
  throws InconsistentFSStateException {
 int nsId = Integer.parseInt(getProperty(props, sd, NAMESPACE_ID));
 if (namespaceID != 0 && nsId != 0 && namespaceID != nsId) {
  throw new InconsistentFSStateException(sd.root,
    "namespaceID is incompatible with others.");
 }
 namespaceID = nsId;
}

origin: ch.cern.hadoop/hadoop-hdfs

/** Check if upgrade is in progress. */
public static void checkUpgrade(NNStorage storage) throws IOException {
 // Upgrade or rolling upgrade is allowed only if there are 
 // no previous fs states in any of the directories
 for (Iterator<StorageDirectory> it = storage.dirIterator(false); it.hasNext();) {
  StorageDirectory sd = it.next();
  if (sd.getPreviousDir().exists())
   throw new InconsistentFSStateException(sd.getRoot(),
     "previous fs state should not exist during upgrade. "
     + "Finalize or rollback first.");
 }
}
org.apache.hadoop.hdfs.server.commonInconsistentFSStateException<init>

Popular methods of InconsistentFSStateException

  • getFilePath

Popular in Java

  • Reading from database using SQL prepared statement
  • findViewById (Activity)
  • addToBackStack (FragmentTransaction)
  • setContentView (Activity)
  • ObjectMapper (com.fasterxml.jackson.databind)
    ObjectMapper provides functionality for reading and writing JSON, either to and from basic POJOs (Pl
  • BufferedReader (java.io)
    Wraps an existing Reader and buffers the input. Expensive interaction with the underlying reader is
  • MalformedURLException (java.net)
    This exception is thrown when a program attempts to create an URL from an incorrect specification.
  • JarFile (java.util.jar)
    JarFile is used to read jar entries and their associated data from jar files.
  • JOptionPane (javax.swing)
  • JPanel (javax.swing)
  • 21 Best Atom Packages for 2021
Tabnine Logo
  • Products

    Search for Java codeSearch for JavaScript code
  • IDE Plugins

    IntelliJ IDEAWebStormVisual StudioAndroid StudioEclipseVisual Studio CodePyCharmSublime TextPhpStormVimAtomGoLandRubyMineEmacsJupyter NotebookJupyter LabRiderDataGripAppCode
  • Company

    About UsContact UsCareers
  • Resources

    FAQBlogTabnine AcademyStudentsTerms of usePrivacy policyJava Code IndexJavascript Code Index
Get Tabnine for your IDE now