@Override protected void doTableOp(final Shell shellState, final String tableName) throws AccumuloException, AccumuloSecurityException, TableNotFoundException { shellState.getAccumuloClient().tableOperations().flush(tableName, startRow, endRow, wait); Shell.log.info("Flush of table " + tableName + (wait ? " completed." : " initiated...")); }
continue; try { context.tableOperations().flush(table, null, null, false); flushesStarted.incrementAndGet(); } catch (TableNotFoundException e) {
/** Flush a table, writing entries in memory to disk. */ public void Flush(String table) throws AccumuloSecurityException, AccumuloException, TableNotFoundException { System.out.println("Flushing " + table + "..."); // try { connector.tableOperations().flush(table, null, null, true); // } catch (AccumuloException | AccumuloSecurityException e) { // log.error("error trying to compact " + table, e); // } catch (TableNotFoundException e) { // log.error("", e); // } }
@Override protected void doTableOp(final Shell shellState, final String tableName) throws AccumuloException, AccumuloSecurityException, TableNotFoundException { shellState.getConnector().tableOperations().flush(tableName, startRow, endRow, wait); Shell.log.info("Flush of table " + tableName + (wait ? " completed." : " initiated...")); }
@Override public void visit(State state, Environment env, Properties props) throws Exception { String indexTableName = (String) state.get("indexTableName"); String tmpIndexTableName = indexTableName + "_tmp"; long t1 = System.currentTimeMillis(); env.getConnector().tableOperations().flush(indexTableName, null, null, true); long t2 = System.currentTimeMillis(); env.getConnector().tableOperations().clone(indexTableName, tmpIndexTableName, false, new HashMap<String,String>(), new HashSet<String>()); long t3 = System.currentTimeMillis(); log.debug("Cloned " + tmpIndexTableName + " from " + indexTableName + " flush: " + (t2 - t1) + "ms clone: " + (t3 - t2) + "ms"); }
@Override public void visit(State state, Environment env, Properties props) throws Exception { String indexTableName = (String) state.get("indexTableName"); String dataTableName = (String) state.get("docTableName"); Random rand = (Random) state.get("rand"); String table; if (rand.nextDouble() < .5) table = indexTableName; else table = dataTableName; env.getConnector().tableOperations().flush(table, null, null, true); log.debug("Flushed " + table); }
@Override public void visit(State state, Environment env, Properties props) throws Exception { String table = state.getString("tableName"); Random rand = (Random) state.get("rand"); Connector conn = env.getConnector(); Text row1 = new Text(Utils.getBank(rand.nextInt((Integer) state.get("numBanks")))); Text row2 = new Text(Utils.getBank(rand.nextInt((Integer) state.get("numBanks")))); if (row1.compareTo(row2) >= 0) { row1 = null; row2 = null; } log.debug("flushing " + row1 + " " + row2); conn.tableOperations().flush(table, row1, row2, rand.nextBoolean()); } }
void writeRandomValue(Connector c, String tableName, int size) throws Exception { Random rand = new Random(); byte data1[] = new byte[size]; rand.nextBytes(data1); BatchWriter bw = c.createBatchWriter(tableName, new BatchWriterConfig()); Mutation m1 = new Mutation("r" + rand.nextInt(909090)); m1.put("data", "bl0b", new Value(data1)); bw.addMutation(m1); bw.close(); c.tableOperations().flush(tableName, null, null, true); }
/** * Sets up the {@link AccumuloRyaDAO}. * @throws Exception */ public void setUpDao() throws Exception { // Setup dao log.info("Creating " + driverName + " DAO"); dao = new AccumuloRyaDAO(); dao.setConnector(connector); dao.setConf(config); // Flush the tables before initializing the DAO for (final String tableName : tableList) { connector.tableOperations().flush(tableName, null, null, false); } dao.init(); }
/** * Sets up the {@link AccumuloRyaDAO}. * @throws Exception */ public void setUpDao() throws Exception { // Setup dao log.info("Creating " + driverName + " DAO"); dao = new AccumuloRyaDAO(); dao.setConnector(connector); dao.setConf(config); // Flush the tables before initializing the DAO for (final String tableName : tableList) { connector.tableOperations().flush(tableName, null, null, false); } dao.init(); }
public void writeAndReadData(Connector connector, String tableName) throws AccumuloException, AccumuloSecurityException, TableNotFoundException { // Write some data to the table BatchWriter bw = connector.createBatchWriter(tableName, new BatchWriterConfig()); for (String s : rows) { Mutation m = new Mutation(new Text(s)); m.put(EMPTY, EMPTY, EMPTY_VALUE); bw.addMutation(m); } bw.close(); // Write the data to disk, read it back connector.tableOperations().flush(tableName, null, null, true); Scanner scanner = connector.createScanner(tableName, Authorizations.EMPTY); int i = 0; for (Entry<Key,Value> entry : scanner) { assertEquals("Data read is not data written", rows[i++], entry.getKey().getRow().toString()); } }
@Override public void flushTable(ByteBuffer login, String tableName, ByteBuffer startRow, ByteBuffer endRow, boolean wait) throws org.apache.accumulo.proxy.thrift.AccumuloException, org.apache.accumulo.proxy.thrift.AccumuloSecurityException, org.apache.accumulo.proxy.thrift.TableNotFoundException, TException { try { getConnector(login).tableOperations().flush(tableName, ByteBufferUtil.toText(startRow), ByteBufferUtil.toText(endRow), wait); } catch (Exception e) { handleExceptionTNF(e); } }
private void makeFile(Connector conn, String tablename) throws Exception { BatchWriter bw = conn.createBatchWriter(tablename, new BatchWriterConfig()); byte[] empty = {}; byte[] row = new byte[10]; r.nextBytes(row); Mutation m = new Mutation(row, 0, 10); m.put(empty, empty, empty); bw.addMutation(m); bw.flush(); bw.close(); conn.tableOperations().flush(tablename, null, null, true); }
private void writeFlush(Connector conn, String tablename, String row) throws Exception { BatchWriter bw = conn.createBatchWriter(tablename, new BatchWriterConfig()); Mutation m = new Mutation(row); m.put("", "", ""); bw.addMutation(m); bw.close(); conn.tableOperations().flush(tablename, null, null, true); }
private void writeFile(Connector conn, String tableName) throws Exception { BatchWriter bw = conn.createBatchWriter(tableName, new BatchWriterConfig()); Mutation m = new Mutation("row"); m.put("cf", "cq", "value"); bw.addMutation(m); bw.close(); conn.tableOperations().flush(tableName, null, null, true); }
private void writeFlush(Connector conn, String tablename, String row) throws Exception { BatchWriter bw = conn.createBatchWriter(tablename, new BatchWriterConfig()); Mutation m = new Mutation(row); m.put("", "", ""); bw.addMutation(m); bw.close(); conn.tableOperations().flush(tablename, null, null, true); } }
@Test public void test() throws Exception { Connector c = getConnector(); c.tableOperations().addSplits(MetadataTable.NAME, FunctionalTestUtils.splits("0 1 2 3 4 5 6 7 8 9 a".split(" "))); String[] names = getUniqueNames(10); for (String name : names) { c.tableOperations().create(name); c.tableOperations().flush(MetadataTable.NAME, null, null, true); c.tableOperations().flush(RootTable.NAME, null, null, true); } cluster.stop(); cluster.start(); assertTrue( Iterators.size(c.createScanner(RootTable.NAME, Authorizations.EMPTY).iterator()) > 0); }
@Test public void test() throws Exception { Connector c = getConnector(); assertEquals(1, c.tableOperations().listSplits(MetadataTable.NAME).size()); c.tableOperations().setProperty(MetadataTable.NAME, Property.TABLE_SPLIT_THRESHOLD.getKey(), "500"); for (int i = 0; i < 10; i++) { c.tableOperations().create("table" + i); c.tableOperations().flush(MetadataTable.NAME, null, null, true); } sleepUninterruptibly(10, TimeUnit.SECONDS); assertTrue(c.tableOperations().listSplits(MetadataTable.NAME).size() > 2); } }
private static void runTest(Connector conn, int numlg, ArrayList<byte[]> cfset, Map<String,Stat> stats) throws Exception { String table = "immlgb"; try { conn.tableOperations().delete(table); } catch (TableNotFoundException tnfe) {} conn.tableOperations().create(table); conn.tableOperations().setProperty(table, Property.TABLE_FILE_COMPRESSION_TYPE.getKey(), "snappy"); setupLocalityGroups(conn, numlg, cfset, table); addStat(stats, "write", write(conn, cfset, table)); addStat(stats, "scan cf", scan(conn, cfset, table, false)); addStat(stats, "scan cf:cq", scan(conn, cfset, table, true)); // TODO time reading all data long t1 = System.currentTimeMillis(); conn.tableOperations().flush(table, null, null, true); long t2 = System.currentTimeMillis(); addStat(stats, "flush", t2 - t1); }
@Test(timeout = 2 * 60 * 1000) public void test() throws Exception { final Connector conn = this.getConnector(); // Yes, there's a tabletserver assertEquals(1, conn.instanceOperations().getTabletServers().size()); final String tableName = getUniqueNames(1)[0]; conn.tableOperations().create(tableName); BatchWriter bw = conn.createBatchWriter(tableName, null); for (int i = 0; i < N; i++) { Mutation m = new Mutation("" + i); m.put("", "", ""); bw.addMutation(m); } bw.close(); conn.tableOperations().flush(tableName, null, null, true); // Kill dfs cluster.getMiniDfs().restartNameNode(false); assertEquals(N, Iterators.size(conn.createScanner(tableName, Authorizations.EMPTY).iterator())); }