public void dumpSidelinedRegions(Map<Path, HbckInfo> regions) { for (Map.Entry<Path, HbckInfo> entry: regions.entrySet()) { TableName tableName = entry.getValue().getTableName(); Path path = entry.getKey(); errors.print("This sidelined region dir should be bulk loaded: " + path.toString()); errors.print("Bulk load command looks like: " + "hbase org.apache.hadoop.hbase.tool.LoadIncrementalHFiles " + path.toUri().getPath() + " "+ tableName); } }
private void addSkippedRegion(final HbckInfo hbi) { Set<String> skippedRegionNames = skippedRegions.get(hbi.getTableName()); if (skippedRegionNames == null) { skippedRegionNames = new HashSet<>(); } skippedRegionNames.add(hbi.getRegionNameAsString()); skippedRegions.put(hbi.getTableName(), skippedRegionNames); }
TableName tableName = hi.getTableName(); Path regionDir = hi.getHdfsRegionDir();
int tableCompare = l.getTableName().compareTo(r.getTableName()); if (tableCompare != 0) { return tableCompare;
l.getTableName(), r.getTableName()); if (tableCompare != 0) { return tableCompare;
@Override public synchronized Void call() throws IOException { // only load entries that haven't been loaded yet. if (hbi.getHdfsHRI() == null) { try { errors.progress(); hbck.loadHdfsRegioninfo(hbi); } catch (IOException ioe) { String msg = "Orphan region in HDFS: Unable to load .regioninfo from table " + hbi.getTableName() + " in hdfs dir " + hbi.getHdfsRegionDir() + "! It may be an invalid format or version file. Treating as " + "an orphaned regiondir."; errors.reportError(ERROR_CODE.ORPHAN_HDFS_REGION, msg); try { hbck.debugLsr(hbi.getHdfsRegionDir()); } catch (IOException ioe2) { LOG.error("Unable to read directory " + hbi.getHdfsRegionDir(), ioe2); throw ioe2; } hbck.orphanHdfsDirs.add(hbi); throw ioe; } } return null; } };
@Override public synchronized Void call() throws IOException { // only load entries that haven't been loaded yet. if (hbi.getHdfsHRI() == null) { try { hbck.loadHdfsRegioninfo(hbi); } catch (IOException ioe) { String msg = "Orphan region in HDFS: Unable to load .regioninfo from table " + Bytes.toString(hbi.getTableName()) + " in hdfs dir " + hbi.getHdfsRegionDir() + "! It may be an invalid format or version file. Treating as " + "an orphaned regiondir."; errors.reportError(ERROR_CODE.ORPHAN_HDFS_REGION, msg); try { hbck.debugLsr(hbi.getHdfsRegionDir()); } catch (IOException ioe2) { LOG.error("Unable to read directory " + hbi.getHdfsRegionDir(), ioe2); throw ioe2; } hbck.orphanHdfsDirs.add(hbi); throw ioe; } } return null; } };
private void undeployRegions(HbckInfo hi) throws IOException, InterruptedException { undeployRegionsForHbi(hi); // undeploy replicas of the region (but only if the method is invoked for the primary) if (hi.getReplicaId() != HRegionInfo.DEFAULT_REPLICA_ID) { return; } int numReplicas = admin.getTableDescriptor(hi.getTableName()).getRegionReplication(); for (int i = 1; i < numReplicas; i++) { if (hi.getPrimaryHRIForDeployedReplica() == null) continue; HRegionInfo hri = RegionReplicaUtil.getRegionInfoForReplica( hi.getPrimaryHRIForDeployedReplica(), i); HbckInfo h = regionInfoMap.get(hri.getEncodedName()); if (h != null) { undeployRegionsForHbi(h); //set skip checks; we undeployed it, and we don't want to evaluate this anymore //in consistency checks h.setSkipChecks(true); } } }
public synchronized ImmutableList<HRegionInfo> getRegionsFromMeta() { // lazy loaded, synchronized to ensure a single load if (regionsFromMeta == null) { List<HRegionInfo> regions = new ArrayList<HRegionInfo>(); for (HbckInfo h : HBaseFsck.this.regionInfoMap.values()) { if (tableName.equals(h.getTableName())) { if (h.metaEntry != null) { regions.add((HRegionInfo) h.metaEntry); } } } regionsFromMeta = Ordering.natural().immutableSortedCopy(regions); } return regionsFromMeta; }
public void dumpSidelinedRegions(Map<Path, HbckInfo> regions) { for (Map.Entry<Path, HbckInfo> entry: regions.entrySet()) { TableName tableName = entry.getValue().getTableName(); Path path = entry.getKey(); errors.print("This sidelined region dir should be bulk loaded: " + path.toString()); errors.print("Bulk load command looks like: " + "hbase org.apache.hadoop.hbase.mapreduce.LoadIncrementalHFiles " + path.toUri().getPath() + " "+ tableName); } }
private void addSkippedRegion(final HbckInfo hbi) { Set<String> skippedRegionNames = skippedRegions.get(hbi.getTableName()); if (skippedRegionNames == null) { skippedRegionNames = new HashSet<String>(); } skippedRegionNames.add(hbi.getRegionNameAsString()); skippedRegions.put(hbi.getTableName(), skippedRegionNames); }
public void dumpSidelinedRegions(Map<Path, HbckInfo> regions) { for (Map.Entry<Path, HbckInfo> entry: regions.entrySet()) { String tableName = Bytes.toStringBinary(entry.getValue().getTableName()); Path path = entry.getKey(); errors.print("This sidelined region dir should be bulk loaded: " + path.toString()); errors.print("Bulk load command looks like: " + "hbase org.apache.hadoop.hbase.mapreduce.LoadIncrementalHFiles " + path.toUri().getPath() + " "+ tableName); } }
int numReplicas = admin.getTableDescriptor(hbi.getTableName()).getRegionReplication(); HBaseFsckRepair.fixMetaHoleOnlineAndAddReplicas(getConf(), hbi.getHdfsHRI(), admin.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS)) int numReplicas = admin.getTableDescriptor(hbi.getTableName()).getRegionReplication(); HBaseFsckRepair.fixMetaHoleOnlineAndAddReplicas(getConf(), hbi.getHdfsHRI(), admin.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS))
TableName tableName = hi.getTableName(); TableInfo tableInfo = tablesInfo.get(tableName); Preconditions.checkNotNull(tableInfo, "Table '" + tableName + "' not present!");
TableName tableName = hbi.getTableName(); if (tableName == null) {
int numReplicas = admin.getTableDescriptor(hi.getTableName()).getRegionReplication(); for (int i = 0; i < numReplicas; i++) { get.addColumn(HConstants.CATALOG_FAMILY, MetaTableAccessor.getServerColumn(i));
int tableCompare = l.getTableName().compareTo(r.getTableName()); if (tableCompare != 0) { return tableCompare;
@Override public synchronized Void call() throws IOException { // only load entries that haven't been loaded yet. if (hbi.getHdfsHRI() == null) { try { errors.progress(); hbck.loadHdfsRegioninfo(hbi); } catch (IOException ioe) { String msg = "Orphan region in HDFS: Unable to load .regioninfo from table " + hbi.getTableName() + " in hdfs dir " + hbi.getHdfsRegionDir() + "! It may be an invalid format or version file. Treating as " + "an orphaned regiondir."; errors.reportError(ERROR_CODE.ORPHAN_HDFS_REGION, msg); try { hbck.debugLsr(hbi.getHdfsRegionDir()); } catch (IOException ioe2) { LOG.error("Unable to read directory " + hbi.getHdfsRegionDir(), ioe2); throw ioe2; } hbck.orphanHdfsDirs.add(hbi); throw ioe; } } return null; } }
private void undeployRegions(HbckInfo hi) throws IOException, InterruptedException { undeployRegionsForHbi(hi); // undeploy replicas of the region (but only if the method is invoked for the primary) if (hi.getReplicaId() != RegionInfo.DEFAULT_REPLICA_ID) { return; } int numReplicas = admin.getTableDescriptor(hi.getTableName()).getRegionReplication(); for (int i = 1; i < numReplicas; i++) { if (hi.getPrimaryHRIForDeployedReplica() == null) continue; RegionInfo hri = RegionReplicaUtil.getRegionInfoForReplica( hi.getPrimaryHRIForDeployedReplica(), i); HbckInfo h = regionInfoMap.get(hri.getEncodedName()); if (h != null) { undeployRegionsForHbi(h); //set skip checks; we undeployed it, and we don't want to evaluate this anymore //in consistency checks h.setSkipChecks(true); } } }
public synchronized ImmutableList<RegionInfo> getRegionsFromMeta() { // lazy loaded, synchronized to ensure a single load if (regionsFromMeta == null) { List<RegionInfo> regions = new ArrayList<>(); for (HbckInfo h : HBaseFsck.this.regionInfoMap.values()) { if (tableName.equals(h.getTableName())) { if (h.metaEntry != null) { regions.add(h.metaEntry); } } } regionsFromMeta = Ordering.from(RegionInfo.COMPARATOR).immutableSortedCopy(regions); } return regionsFromMeta; }