@Override public HTable run() throws Exception { HTable table = new HTable(config, tableName); table.setAutoFlush(false); // Flush is controlled by us. This ensures that HBase changing // their criteria for flushing does not change how we flush. return table; } });
public static HTable getHTable(Map<String, String> configMap) throws IOException { String tableName = configMap.get(TABLE_NAME_TAG); HTable table = htableMap.get(tableName); if (table == null) { if (hbConfig == null) { Configuration config = new Configuration(true); config.set("hbase.zookeeper.quorum", configMap.get(ZOOKEEPER_QUORUM_TAG)); for (Entry<String, String> entry : configMap.entrySet()) { config.set(entry.getKey(), entry.getValue()); } hbConfig = HBaseConfiguration.create(config); } table = new HTable(hbConfig, tableName); if (configMap.containsKey(AUTOFLUSH_TAG)) { Boolean flushFlag = Boolean.valueOf(configMap.get(AUTOFLUSH_TAG)); table.setAutoFlush(flushFlag); } htableMap.put(tableName, table); } return table; }
primary.setAutoFlush(false); primary.put(Arrays.asList(p1, p2)); primary.flushCommits();
/** * See {@link #setAutoFlush(boolean, boolean)} * * @param autoFlush * Whether or not to enable 'auto-flush'. */ public void setAutoFlush(boolean autoFlush) { setAutoFlush(autoFlush, autoFlush); }
@Override public void setAutoFlush(boolean autoFlush) { table.setAutoFlush(autoFlush); }
@Override public void setAutoFlush(boolean autoFlush, boolean clearBufferOnFail) { table.setAutoFlush(autoFlush, clearBufferOnFail); }
/** * Create the primary table (to which you should write), setup properly for indexing the given * {@link ColumnGroup}s. Also creates the necessary index tables to match the passes groups. * @param groups {@link ColumnGroup}s to index, creating one index table per column group. * @return reference to the primary table * @throws IOException if there is an issue communicating with HBase */ private HTable createSetupTables(ColumnGroup... groups) throws IOException { HBaseAdmin admin = UTIL.getHBaseAdmin(); // setup the index CoveredColumnIndexSpecifierBuilder builder = new CoveredColumnIndexSpecifierBuilder(); for (ColumnGroup group : groups) { builder.addIndexGroup(group); // create the index tables CoveredColumnIndexer.createIndexTable(admin, group.getTable()); } // setup the primary table String indexedTableName = Bytes.toString(TestTable.getTableName()); HTableDescriptor pTable = new HTableDescriptor(indexedTableName); pTable.addFamily(new HColumnDescriptor(FAM)); pTable.addFamily(new HColumnDescriptor(FAM2)); builder.build(pTable); // create the primary table admin.createTable(pTable); HTable primary = new HTable(UTIL.getConfiguration(), indexedTableName); primary.setAutoFlush(false); return primary; }
conf.setLong(HConstants.HBASE_CLIENT_PAUSE, 1000); HTable primary = new HTable(conf, primaryTable); primary.setAutoFlush(false, true);
private HTable getTable() throws IOException { HTable table = tables.get(); if (table == null) { table = new HTable(conf, tableName) { @Override public synchronized void flushCommits() throws IOException { super.flushCommits(); } }; table.setAutoFlush(autoflush); pool.add(table); //keep track tables.set(table); } return table; }
@Override public HTable run() throws Exception { HTable table = new HTable(config, tableName); table.setAutoFlush(false); // Flush is controlled by us. This ensures that HBase changing // their criteria for flushing does not change how we flush. return table; } });
HBaseWriter(Configuration hbaseConfig, String tableName) throws IOException { table = new HTable(hbaseConfig, tableName); table.setAutoFlush(false, true); }
HBaseClient() throws IOException { table = new HTable(hbaseConfig, tableName); table.setAutoFlush(false, true); deleteTable = new HTable(hbaseConfig, tableName); deleteQueue = new ArrayBlockingQueue<>(DELETE_BATCH_SIZE); deleteBatchExecutor = Executors.newSingleThreadExecutor( new ThreadFactoryBuilder().setNameFormat("omid-completor-%d").build()); deleteBatchExecutor.submit(this); }
/** * Does the necessary HBase initializations. */ public boolean connect(Configuration hiveconf) { try { htable = new HTable(HBaseConfiguration.create(hiveconf), HBaseStatsSetupConstants.PART_STAT_TABLE_NAME); // for performance reason, defer update until the closeConnection htable.setAutoFlush(false); } catch (IOException e) { LOG.error("Error during HBase connection. " + e); return false; } return true; }
/** * Does the necessary HBase initializations. */ public boolean connect(Configuration hiveconf) { try { htable = new HTable(HBaseConfiguration.create(hiveconf), HBaseStatsSetupConstants.PART_STAT_TABLE_NAME); // for performance reason, defer update until the closeConnection htable.setAutoFlush(false); } catch (IOException e) { LOG.error("Error during HBase connection. " + e); return false; } return true; }
HBaseClient(Configuration hbaseConfig, String tableName) throws IOException { table = new HTable(hbaseConfig, tableName); table.setAutoFlush(false, true); deleteTable = new HTable(hbaseConfig, tableName); deleteQueue = new ArrayBlockingQueue<DeleteRequest>(DELETE_BATCH_SIZE); deleteBatchExecutor = Executors.newSingleThreadExecutor( new ThreadFactoryBuilder().setNameFormat("omid-completor-%d").build()); deleteBatchExecutor.submit(this); }
@Override public HTableInterface createHTableInterface(Configuration config, byte[] tableName) { try { HTable table = new HTable(config, tableName); table.setAutoFlush(autoFlush); table.setWriteBufferSize(writeBufferSize); return table; } catch (IOException e) { throw new RuntimeException(e); } }
@Override @SuppressWarnings("unchecked") public RecordWriter getRecordWriter(FileSystem ignored, JobConf job, String name, Progressable progress) throws IOException { // expecting exactly one path String tableName = job.get(OUTPUT_TABLE); HTable table = null; try { table = new HTable(HBaseConfiguration.create(job), tableName); } catch(IOException e) { LOG.error(e); throw e; } table.setAutoFlush(false); return new TableRecordWriter(table); }
/** * @param tableName * the name of the table, as a string * @return the named table * @throws IOException * if there is a problem opening a table */ HTable getTable(ImmutableBytesWritable tableName) throws IOException { if (!tables.containsKey(tableName)) { LOG.debug("Opening HTable \"" + Bytes.toString(tableName.get())+ "\" for writing"); HTable table = new HTable(conf, tableName.get()); table.setAutoFlush(false); tables.put(tableName, table); } return tables.get(tableName); }
@Override public org.apache.hadoop.mapred.RecordWriter<ImmutableBytesWritable, Object> getRecordWriter( FileSystem fileSystem, JobConf jobConf, String name, Progressable progressable) throws IOException { String hbaseTableName = jobConf.get(HBaseSerDe.HBASE_TABLE_NAME); jobConf.set(TableOutputFormat.OUTPUT_TABLE, hbaseTableName); final boolean walEnabled = HiveConf.getBoolVar( jobConf, HiveConf.ConfVars.HIVE_HBASE_WAL_ENABLED); final HTable table = new HTable(HBaseConfiguration.create(jobConf), hbaseTableName); table.setAutoFlush(false); return new MyRecordWriter(table,walEnabled); }
private HTable ensureTable(String tableName) throws Exception { if (_knownTables.containsKey(tableName)) { return _knownTables.get(tableName); } HBaseAdmin admin = getHBaseAdmin(); if (!admin.tableExists(tableName)) { HTableDescriptor tableDesc = _translator.describeHBaseTable(tableName); admin.createTable(tableDesc); } HTable table = new HTable(_conf, tableName); if (_bufferWrites) { table.setAutoFlush(false, true); } _knownTables.put(tableName, table); return table; }