private HLog createWAL(final Configuration c) throws IOException { HLog wal = new HLog(FileSystem.get(c), logDir, oldLogDir, c); // Set down maximum recovery so we dfsclient doesn't linger retrying something // long gone. HBaseTestingUtility.setMaxRecoveryErrorCount(wal.getOutputStream(), 1); return wal; }
region0.getLog().closeAndDelete(); HLog wal = createWAL(this.conf); RegionServerServices mockRS = Mockito.mock(RegionServerServices.class); long seqid = region.initialize(); wal.setSequenceNumber(seqid); Mockito.any(Exception.class)); region.close(true); wal.close();
HLogKey key; if (delegate.keyClass == null) { key = HLog.newKey(delegate.conf); } else { try {
private void closeWAL(final boolean delete) { try { if (this.hlog != null) { if (delete) { hlog.closeAndDelete(); } else { hlog.close(); } } } catch (Throwable e) { LOG.error("Close and delete failed", RemoteExceptionHandler.checkThrowable(e)); } }
if (listeners != null) { for (WALActionsListener i: listeners) { registerWALActionsListener(i); getDefaultBlockSize()); "hlog" : URLEncoder.encode(prefix, "UTF8"); rollWriter(); this.getNumCurrentReplicas = getGetNumCurrentReplicas(this.hdfs_out);
void process() throws IOException { try { for (HRegionInfo[] regionsToMerge = next(); regionsToMerge != null; regionsToMerge = next()) { if (!merge(regionsToMerge)) { return; } } } finally { try { hlog.closeAndDelete(); } catch(IOException e) { LOG.error(e); } } }
protected Merger(Configuration conf, FileSystem fs, final byte [] tableName) throws IOException { this.conf = conf; this.fs = fs; this.maxFilesize = conf.getLong(HConstants.HREGION_MAX_FILESIZE, HConstants.DEFAULT_MAX_FILE_SIZE); this.tabledir = new Path( fs.makeQualified(new Path(conf.get(HConstants.HBASE_DIR))), Bytes.toString(tableName) ); this.htd = FSTableDescriptors.getTableDescriptor(this.fs, this.tabledir); Path logdir = new Path(tabledir, "merge_" + System.currentTimeMillis() + HConstants.HREGION_LOGDIR_NAME); Path oldLogDir = new Path(tabledir, HConstants.HREGION_OLDLOGDIR_NAME); this.hlog = new HLog(fs, logdir, oldLogDir, conf); }
close(); if (!fs.exists(this.dir)) return; FileStatus[] files = fs.listStatus(this.dir); for(FileStatus file : files) { Path p = getHLogArchivePath(this.oldLogDir, file.getPath());
this.log.rollWriter(); this.log.closeAndDelete();
wal.startCacheFlush(this.regionInfo.getEncodedNameAsBytes()); completeSequenceId = this.getCompleteCacheFlushSequenceId(sequenceId); wal.abortCacheFlush(this.regionInfo.getEncodedNameAsBytes()); wal.completeCacheFlush(this.regionInfo.getEncodedNameAsBytes(), regionInfo.getTableName(), completeSequenceId, this.getRegionInfo().isMetaRegion());
log.registerWALActionsListener(new IndexLogRollSynchronizer(INDEX_READ_WRITE_LOCK.writeLock()));
/** * Only used in tests. * * @param info * @param tableName * @param edits * @param now * @param htd * @throws IOException */ public void append(HRegionInfo info, byte [] tableName, WALEdit edits, final long now, HTableDescriptor htd) throws IOException { append(info, tableName, edits, HConstants.DEFAULT_CLUSTER_ID, now, htd); }
txid = this.log.appendNoSync(regionInfo, this.htableDescriptor.getName(), walEdits, HConstants.DEFAULT_CLUSTER_ID, EnvironmentEdgeManager.currentTimeMillis(), this.htableDescriptor);
/** * This method gets the datanode replication count for the current HLog. * * If the pipeline isn't started yet or is empty, you will get the default * replication factor. Therefore, if this function returns 0, it means you * are not properly running with the HDFS-826 patch. * @throws InvocationTargetException * @throws IllegalAccessException * @throws IllegalArgumentException * * @throws Exception */ int getLogReplication() throws IllegalArgumentException, IllegalAccessException, InvocationTargetException { if (this.getNumCurrentReplicas != null && this.hdfs_out != null) { Object repl = this.getNumCurrentReplicas.invoke(getOutputStream(), NO_ARGS); if (repl instanceof Integer) { return ((Integer)repl).intValue(); } } return 0; }
/** * Open HRegion. * Calls initialize and sets sequenceid. * @param reporter * @return Returns <code>this</code> * @throws IOException */ protected HRegion openHRegion(final CancelableProgressable reporter) throws IOException { checkCompressionCodecs(); long seqid = initialize(reporter); if (this.log != null) { this.log.setSequenceNumber(seqid); } return this; }
/** * This will do the necessary cleanup a call to {@link #createHRegion(HRegionInfo, Path, Configuration, HTableDescriptor)} * requires. This method will close the region and then close its * associated {@link HLog} file. You use it if you call the other createHRegion, * the one that takes an {@link HLog} instance but don't be surprised by the * call to the {@link HLog#closeAndDelete()} on the {@link HLog} the * HRegion was carrying. * @param r * @throws IOException */ public static void closeHRegion(final HRegion r) throws IOException { if (r == null) return; r.close(); if (r.getLog() == null) return; r.getLog().closeAndDelete(); }
/** * @return the HLog * @throws IOException e */ public synchronized HLog getLog() throws IOException { if (this.log == null) { Path logdir = new Path(this.fs.getHomeDirectory(), HConstants.HREGION_LOGDIR_NAME + "_" + System.currentTimeMillis()); Path oldLogDir = new Path(this.fs.getHomeDirectory(), HConstants.HREGION_OLDLOGDIR_NAME); this.log = new HLog(this.fs, logdir, oldLogDir, this.conf); } return this.log; }
/** * Append a set of edits to the log. Log edits are keyed by (encoded) * regionName, rowname, and log-sequence-id. The HLog is not flushed * after this transaction is written to the log. * * @param info * @param tableName * @param edits * @param clusterId The originating clusterId for this edit (for replication) * @param now * @return txid of this transaction * @throws IOException */ public long appendNoSync(HRegionInfo info, byte [] tableName, WALEdit edits, UUID clusterId, final long now, HTableDescriptor htd) throws IOException { return append(info, tableName, edits, clusterId, now, htd, false); }
txid = this.log.appendNoSync(regionInfo, this.htableDescriptor.getName(), walEdits, HConstants.DEFAULT_CLUSTER_ID, EnvironmentEdgeManager.currentTimeMillis(),