Path p = hbi.getHdfsRegionDir(); if (p == null) { errors.report("No regioninfo in Meta or HDFS. " + hbi); continue; if (hbi.containsOnlyHdfsEdits()) { errors.detail("Skipping region because it only contains edits" + hbi); continue; if (!hbi.isSkipChecks()) { modTInfo.addRegionInfo(hbi);
byte[] endKey = rng.getEndKey(); endKey = (endKey.length == 0) ? null : endKey; if (Bytes.equals(rng.getStartKey(),endKey)) { handler.handleDegenerateRegion(rng); if (r1.getReplicaId() != RegionInfo.DEFAULT_REPLICA_ID) continue; subRange.remove(r1); for (HbckInfo r2 : subRange) { if (r2.getReplicaId() != RegionInfo.DEFAULT_REPLICA_ID) continue; if (Bytes.compareTo(r1.getStartKey(), r2.getStartKey())==0) { handler.handleDuplicateStartKeys(r1,r2); } else if (Bytes.compareTo(r1.getEndKey(), r2.getStartKey())==0 && r1.getHdfsHRI().getRegionId() == r2.getHdfsHRI().getRegionId()) { LOG.info("this is a split, log to splits"); handler.handleSplit(r1, r2);
if (hbi.isSkipChecks()) return; String descriptiveName = hbi.toString(); boolean inMeta = hbi.metaEntry != null; boolean inHdfs = !shouldCheckHdfs() || hbi.getHdfsRegionDir() != null; boolean hasMetaAssignment = inMeta && hbi.metaEntry.regionServer != null; boolean isDeployed = !hbi.deployedOn.isEmpty(); boolean shouldBeDeployed = inMeta && !isTableDisabled(hbi.metaEntry); boolean recentlyModified = inHdfs && hbi.getModTime() + timelag > EnvironmentEdgeManager.currentTime(); if (hbi.containsOnlyHdfsEdits()) { return; if (hbi.isMerged()) { hbi.setSkipChecks(true); LOG.info("Region " + descriptiveName + " got merge recently, its file(s) will be cleaned by CatalogJanitor later"); if (!hbi.isHdfsRegioninfoPresent()) { LOG.error("Region " + hbi.getHdfsHRI() + " could have been repaired" + " in table integrity repair phase if -fixHdfsOrphans was" + " used."); HRegionInfo hri = hbi.getHdfsHRI(); TableInfo tableInfo = tablesInfo.get(hri.getTable());
String descriptiveName = hbi.toString(); boolean inHdfs = !shouldCheckHdfs() || hbi.getHdfsRegionDir() != null; boolean hasMetaAssignment = inMeta && hbi.metaEntry.regionServer != null; boolean isDeployed = !hbi.deployedOn.isEmpty(); boolean shouldBeDeployed = inMeta && !isTableDisabled(hbi.metaEntry); boolean recentlyModified = inHdfs && hbi.getModTime() + timelag > System.currentTimeMillis(); if (hbi.containsOnlyHdfsEdits()) { return; if (!hbi.isHdfsRegioninfoPresent()) { LOG.error("Region " + hbi.getHdfsHRI() + " could have been repaired" + " in table integrity repair phase if -fixHdfsOrphans was" + " used."); LOG.info("Patching .META. with .regioninfo: " + hbi.getHdfsHRI()); HBaseFsckRepair.fixMetaHoleOnline(getConf(), hbi.getHdfsHRI()); errors.reportError(ERROR_CODE.NOT_IN_META, "Region " + descriptiveName + " not in META, but deployed on " + Joiner.on(", ").join(hbi.deployedOn)); debugLsr(hbi.getHdfsRegionDir()); if (shouldFixMeta()) { if (!hbi.isHdfsRegioninfoPresent()) { LOG.error("This should have been repaired in table integrity repair phase"); return;
byte[] endKey = rng.getEndKey(); endKey = (endKey.length == 0) ? null : endKey; if (Bytes.equals(rng.getStartKey(),endKey)) { handler.handleDegenerateRegion(rng); if (r1.getReplicaId() != HRegionInfo.DEFAULT_REPLICA_ID) continue; subRange.remove(r1); for (HbckInfo r2 : subRange) { if (r2.getReplicaId() != HRegionInfo.DEFAULT_REPLICA_ID) continue; if (Bytes.compareTo(r1.getStartKey(), r2.getStartKey())==0) { handler.handleDuplicateStartKeys(r1,r2); } else {
byte[] endKey = rng.getEndKey(); endKey = (endKey.length == 0) ? null : endKey; if (Bytes.equals(rng.getStartKey(),endKey)) { handler.handleDegenerateRegion(rng); subRange.remove(r1); for (HbckInfo r2 : subRange) { if (Bytes.compareTo(r1.getStartKey(), r2.getStartKey())==0) { handler.handleDuplicateStartKeys(r1,r2); } else {
Path p = hi.getHdfsRegionDir(); FileSystem fs = p.getFileSystem(getConf()); FileStatus[] dirs = fs.listStatus(p); TableName tableName = hi.getTableName(); TableInfo tableInfo = tablesInfo.get(tableName); Preconditions.checkNotNull(tableInfo, "Table '" + tableName + "' not present!");
Path p = hi.getHdfsRegionDir(); FileSystem fs = p.getFileSystem(getConf()); FileStatus[] dirs = fs.listStatus(p); String tableName = Bytes.toString(hi.getTableName()); TableInfo tableInfo = tablesInfo.get(tableName); Preconditions.checkNotNull("Table " + tableName + "' not present!", tableInfo);
TableName tableName = hi.getTableName(); Path regionDir = hi.getHdfsRegionDir();
} catch(ExecutionException e) { LOG.warn("Failed to read .regioninfo file for region " + work.hbi.getRegionNameAsString(), e.getCause()); if (hbi.getHdfsHRI() == null) { TableName tableName = hbi.getTableName(); if (tableName == null) { if (!hbi.isSkipChecks()) { modTInfo.addRegionInfo(hbi);
String tableName = Bytes.toString(hi.getTableName()); Path regionDir = hi.getHdfsRegionDir();
} catch(ExecutionException e) { LOG.warn("Failed to read .regioninfo file for region " + work.hbi.getRegionNameAsString(), e.getCause()); if (hbi.getHdfsHRI() == null) { String tableName = Bytes.toString(hbi.getTableName()); if (tableName == null) { if (!hbi.isSkipChecks()) { modTInfo.addRegionInfo(hbi);
String thread = Thread.currentThread().getName(); LOG.debug("[" + thread + "] Contained region dir after close and pause"); debugLsr(contained.getHdfsRegionDir()); FileStatus[] dirs = null; try { dirs = fs.listStatus(contained.getHdfsRegionDir()); } catch (FileNotFoundException fnfe) { if (!fs.exists(contained.getHdfsRegionDir())) { LOG.warn("[" + thread + "] HDFS region dir " + contained.getHdfsRegionDir() + " is missing. Assuming already sidelined or moved."); } else { if (!fs.exists(contained.getHdfsRegionDir())) { LOG.warn("[" + thread + "] HDFS region dir " + contained.getHdfsRegionDir() + " already sidelined."); } else { LOG.info("[" + thread + "] Sidelined region dir "+ contained.getHdfsRegionDir() + " into " + getSidelineDir()); debugLsr(contained.getHdfsRegionDir());
Path p = hbi.getHdfsRegionDir(); if (p == null) { errors.report("No regioninfo in Meta or HDFS. " + hbi); continue; if (hbi.containsOnlyHdfsEdits()) { errors.detail("Skipping region because it only contains edits" + hbi); continue; if (!hbi.isSkipChecks()) { modTInfo.addRegionInfo(hbi);
for (HbckInfo hi : overlap) { if (range == null) { range = new Pair<byte[], byte[]>(hi.getStartKey(), hi.getEndKey()); } else { if (RegionSplitCalculator.BYTES_COMPARATOR .compare(hi.getStartKey(), range.getFirst()) < 0) { range.setFirst(hi.getStartKey()); .compare(hi.getEndKey(), range.getSecond()) > 0) { range.setSecond(hi.getEndKey()); debugLsr(hi.getHdfsRegionDir()); try { LOG.info("[" + thread + "] Closing region: " + hi); offline(hi.getRegionName()); } catch (IOException ioe) { LOG.warn("[" + thread + "] Unable to offline region from master: " + hi
Get get = new Get(hi.getRegionName()); get.addColumn(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER); get.addColumn(HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER); get.addColumn(HConstants.CATALOG_FAMILY, HConstants.STARTCODE_QUALIFIER); if (hi.getReplicaId() == HRegionInfo.DEFAULT_REPLICA_ID) { int numReplicas = admin.getTableDescriptor(hi.getTableName()).getRegionReplication(); for (int i = 0; i < numReplicas; i++) { get.addColumn(HConstants.CATALOG_FAMILY, MetaTableAccessor.getServerColumn(i)); RegionLocations rl = MetaTableAccessor.getRegionLocations(r); if (rl == null) { LOG.warn("Unable to close region " + hi.getRegionNameAsString() + " since meta does not have handle to reach it"); return; if (serverName == null) { errors.reportError("Unable to close region " + hi.getRegionNameAsString() + " because meta does not " + "have handle to reach it."); continue; LOG.warn("Unable to close region " + hi.getRegionNameAsString() + " because hbase:meta had invalid or missing " + HConstants.CATALOG_FAMILY_STR + ":"
Path p = hbi.getHdfsRegionDir(); if (p == null) { errors.report("No regioninfo in Meta or HDFS. " + hbi); continue; if (hbi.containsOnlyHdfsEdits()) { errors.detail("Skipping region because it only contains edits" + hbi); continue; if (!hbi.isSkipChecks()) { modTInfo.addRegionInfo(hbi);
HBaseFsckRepair.waitUntilAssigned(admin, root.getHdfsHRI());