/** * Creates a BatchWriter with the expected configuration. * * @param table The table to write to */ private BatchWriter createBatchWriter(String table) throws TableNotFoundException { BatchWriterConfig bwc = new BatchWriterConfig(); bwc.setMaxLatency( Long.parseLong(getProperties() .getProperty("accumulo.batchWriterMaxLatency", "30000")), TimeUnit.MILLISECONDS); bwc.setMaxMemory(Long.parseLong( getProperties().getProperty("accumulo.batchWriterSize", "100000"))); final String numThreadsValue = getProperties().getProperty("accumulo.batchWriterThreads"); // Try to saturate the client machine. int numThreads = Math.max(1, Runtime.getRuntime().availableProcessors() / 2); if (null != numThreadsValue) { numThreads = Integer.parseInt(numThreadsValue); } System.err.println("Using " + numThreads + " threads to write data"); bwc.setMaxWriteThreads(numThreads); return connector.createBatchWriter(table, bwc); }
@Override public int execute(final String fullCommand, final CommandLine cl, final Shell shellState) throws AccumuloException, AccumuloSecurityException, TableNotFoundException, IOException, ConstraintViolationException { shellState.checkTableState(); final Mutation m = new Mutation(new Text(cl.getArgs()[0].getBytes(Shell.CHARSET))); final Text colf = new Text(cl.getArgs()[1].getBytes(Shell.CHARSET)); final Text colq = new Text(cl.getArgs()[2].getBytes(Shell.CHARSET)); if (cl.hasOption(deleteOptAuths.getOpt())) { final ColumnVisibility le = new ColumnVisibility(cl.getOptionValue(deleteOptAuths.getOpt())); if (cl.hasOption(timestampOpt.getOpt())) { m.putDelete(colf, colq, le, Long.parseLong(cl.getOptionValue(timestampOpt.getOpt()))); } else { m.putDelete(colf, colq, le); } } else if (cl.hasOption(timestampOpt.getOpt())) { m.putDelete(colf, colq, Long.parseLong(cl.getOptionValue(timestampOpt.getOpt()))); } else { m.putDelete(colf, colq); } final BatchWriter bw = shellState.getAccumuloClient().createBatchWriter( shellState.getTableName(), new BatchWriterConfig().setMaxMemory(Math.max(m.estimatedMemoryUsed(), 1024)) .setMaxWriteThreads(1).setTimeout(getTimeout(cl), TimeUnit.MILLISECONDS)); bw.addMutation(m); bw.close(); return 0; }
/** * Record the updated Status for this file and target * * @param filePath * Path to file being replicated * @param status * Updated Status after replication * @param target * Peer that was replicated to */ public void recordNewStatus(Path filePath, Status status, ReplicationTarget target) throws AccumuloException, TableNotFoundException { try (BatchWriter bw = context.createBatchWriter(ReplicationTable.NAME, new BatchWriterConfig())) { log.debug("Recording new status for {}, {}", filePath, ProtobufUtil.toString(status)); Mutation m = new Mutation(filePath.toString()); WorkSection.add(m, target.toText(), ProtobufUtil.toValue(status)); bw.addMutation(m); } } }
BatchWriterConfig bwConfig = new BatchWriterConfig(); bwConfig.setMaxMemory(memoryInBytes); try { Mutation copy = new Mutation(orig.getRow()); for (ColumnUpdate update : orig.getUpdates()) { long timestamp; new ColumnVisibility(update.getColumnVisibility()), timestamp); } else { copy.put(update.getColumnFamily(), update.getColumnQualifier(), new ColumnVisibility(update.getColumnVisibility()), timestamp, update.getValue()); if (bw != null) { try { bw.close(); } catch (MutationsRejectedException e) { log.error("Could not apply mutations to {}", tableName);
ExportTask(String instanceName, String zookeepers, String user, String password, String table) throws TableNotFoundException, AccumuloException, AccumuloSecurityException { ZooKeeperInstance zki = new ZooKeeperInstance( new ClientConfiguration().withInstance(instanceName).withZkHosts(zookeepers)); // TODO need to close batch writer Connector conn = zki.getConnector(user, new PasswordToken(password)); try { bw = conn.createBatchWriter(table, new BatchWriterConfig()); } catch (TableNotFoundException tnfe) { try { conn.tableOperations().create(table); } catch (TableExistsException e) { // nothing to do } bw = conn.createBatchWriter(table, new BatchWriterConfig()); } }
shellState.checkTableState(); final Mutation m = new Mutation(new Text(cl.getArgs()[0].getBytes(Shell.CHARSET))); final Text colf = new Text(cl.getArgs()[1].getBytes(Shell.CHARSET)); final Text colq = new Text(cl.getArgs()[2].getBytes(Shell.CHARSET)); m.put(colf, colq, le, Long.parseLong(cl.getOptionValue(timestampOpt.getOpt())), val); else m.put(colf, colq, le, val); } else if (cl.hasOption(timestampOpt.getOpt())) m.put(colf, colq, Long.parseLong(cl.getOptionValue(timestampOpt.getOpt())), val); else m.put(colf, colq, val); final BatchWriterConfig cfg = new BatchWriterConfig() .setMaxMemory(Math.max(m.estimatedMemoryUsed(), 1024)).setMaxWriteThreads(1) .setTimeout(getTimeout(cl), TimeUnit.MILLISECONDS); bw.addMutation(m); try { bw.close(); } catch (MutationsRejectedException e) { final ArrayList<String> lines = new ArrayList<>();
/** * Creates a BatchWriter with the expected configuration. * * @param table The table to write to */ private BatchWriter createBatchWriter(String table) throws TableNotFoundException { BatchWriterConfig bwc = new BatchWriterConfig(); bwc.setMaxLatency( Long.parseLong(getProperties() .getProperty("accumulo.batchWriterMaxLatency", "30000")), TimeUnit.MILLISECONDS); bwc.setMaxMemory(Long.parseLong( getProperties().getProperty("accumulo.batchWriterSize", "100000"))); final String numThreadsValue = getProperties().getProperty("accumulo.batchWriterThreads"); // Try to saturate the client machine. int numThreads = Math.max(1, Runtime.getRuntime().availableProcessors() / 2); if (null != numThreadsValue) { numThreads = Integer.parseInt(numThreadsValue); } System.err.println("Using " + numThreads + " threads to write data"); bwc.setMaxWriteThreads(numThreads); return connector.createBatchWriter(table, bwc); }
public ExportTask(String instanceName, String zookeepers, String user, String password, String table) throws TableNotFoundException, AccumuloException, AccumuloSecurityException { ZooKeeperInstance zki = new ZooKeeperInstance(new ClientConfiguration().withInstance(instanceName).withZkHosts( zookeepers)); // TODO need to close batch writer Connector conn = zki.getConnector(user, new PasswordToken(password)); try { bw = conn.createBatchWriter(table, new BatchWriterConfig()); } catch (TableNotFoundException tnfe) { try { conn.tableOperations().create(table); } catch (TableExistsException e) { // nothing to do } bw = conn.createBatchWriter(table, new BatchWriterConfig()); } }
Master.log.info("Removing entry {}", entry); BatchWriter bw = this.master.getContext().createBatchWriter(table, new BatchWriterConfig()); Mutation m = new Mutation(entry.getKey().getRow()); m.putDelete(entry.getKey().getColumnFamily(), entry.getKey().getColumnQualifier()); bw.addMutation(m); bw.close(); return;
/** * Creates a BatchWriter with the expected configuration. * * @param table The table to write to */ private BatchWriter createBatchWriter(String table) throws TableNotFoundException { BatchWriterConfig bwc = new BatchWriterConfig(); bwc.setMaxLatency( Long.parseLong(getProperties() .getProperty("accumulo.batchWriterMaxLatency", "30000")), TimeUnit.MILLISECONDS); bwc.setMaxMemory(Long.parseLong( getProperties().getProperty("accumulo.batchWriterSize", "100000"))); final String numThreadsValue = getProperties().getProperty("accumulo.batchWriterThreads"); // Try to saturate the client machine. int numThreads = Math.max(1, Runtime.getRuntime().availableProcessors() / 2); if (null != numThreadsValue) { numThreads = Integer.parseInt(numThreadsValue); } System.err.println("Using " + numThreads + " threads to write data"); bwc.setMaxWriteThreads(numThreads); return connector.createBatchWriter(table, bwc); }
public static void removeBulkLoadEntries(AccumuloClient client, Table.ID tableId, long tid) throws Exception { try ( Scanner mscanner = new IsolatedScanner( client.createScanner(MetadataTable.NAME, Authorizations.EMPTY)); BatchWriter bw = client.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig())) { mscanner.setRange(new KeyExtent(tableId, null, null).toMetadataRange()); mscanner.fetchColumnFamily(TabletsSection.BulkFileColumnFamily.NAME); byte[] tidAsBytes = Long.toString(tid).getBytes(UTF_8); for (Entry<Key,Value> entry : mscanner) { log.trace("Looking at entry {} with tid {}", entry, tid); if (Arrays.equals(entry.getValue().get(), tidAsBytes)) { log.trace("deleting entry {}", entry); Key key = entry.getKey(); Mutation m = new Mutation(key.getRow()); m.putDelete(key.getColumnFamily(), key.getColumnQualifier()); bw.addMutation(m); } } } }
BatchWriterConfig conf = new BatchWriterConfig(); writer = connector.createBatchWriter(table.getFullTableName(), conf);
/** * Because there is only one active Master, and thus one active StatusMaker, the only safe time * that we can issue the delete for a Status which is closed is immediately after writing it to * the replication table. * <p> * If we try to defer and delete these entries in another thread/process, we will have no * assurance that the Status message was propagated to the replication table. It is easiest, in * terms of concurrency, to do this all in one step. * * @param k * The Key to delete */ protected void deleteStatusRecord(Key k) { log.debug("Deleting {} from metadata table as it's no longer needed", k.toStringNoTruncate()); if (metadataWriter == null) { try { metadataWriter = client.createBatchWriter(sourceTableName, new BatchWriterConfig()); } catch (TableNotFoundException e) { throw new RuntimeException("Metadata table doesn't exist"); } } try { Mutation m = new Mutation(k.getRow()); m.putDelete(k.getColumnFamily(), k.getColumnQualifier()); metadataWriter.addMutation(m); metadataWriter.flush(); } catch (MutationsRejectedException e) { log.warn("Failed to delete status mutations for metadata table, will retry", e); } }
public static BatchWriter getBatchWriter(Connector conn) throws ReplicationTableOfflineException { try { return conn.createBatchWriter(NAME, new BatchWriterConfig()); } catch (TableNotFoundException e) { throw new AssertionError(NAME + " should exist, but doesn't."); } catch (TableOfflineException e) { throw new ReplicationTableOfflineException(e); } }
VolumeManager fs = master.getFileSystem(); mbw = master.getContext().createBatchWriter(MetadataTable.NAME, new BatchWriterConfig()); m = new Mutation(metadataRow); TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.put(m, new Value(absolutePath.getBytes(UTF_8))); mbw.addMutation(m); m = new Mutation(metadataRow); TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.put(m, new Value(absolutePath.getBytes(UTF_8))); m.put(key.getColumnFamily(), cq, val); mbw.addMutation(m); break; // its the last column in the last row mbw.close();
private BatchWriter getBatchWriter() throws IOException { if (batchWriter == null) try { BatchWriterConfig batchWriterConfig = new BatchWriterConfig(); batchWriterConfig.setMaxMemory(10000000); batchWriterConfig.setMaxLatency(60000L, TimeUnit.MILLISECONDS); batchWriterConfig.setMaxWriteThreads(4); batchWriter = conn.createBatchWriter(mapping.tableName, batchWriterConfig); } catch (TableNotFoundException e) { throw new IOException(e); } return batchWriter; }
try (Scanner ms = new ScannerImpl(context, MetadataTable.ID, Authorizations.EMPTY); BatchWriter bw = new BatchWriterImpl(context, MetadataTable.ID, new BatchWriterConfig().setMaxMemory(1000000) .setMaxLatency(120000L, TimeUnit.MILLISECONDS).setMaxWriteThreads(2))) { bw.addMutation(createDeleteMutation(context, tableId, ref.meta().toString())); bw.addMutation(createDeleteMutation(context, tableId, cell.getValue().toString())); m = new Mutation(key.getRow()); if (lock != null) putLockID(context, lock, m); bw.addMutation(m); m = new Mutation(key.getRow()); if (lock != null) putLockID(context, lock, m);
BatchWriter createBatchWriter() { try { return context.getConnector().createBatchWriter(targetTableName, new BatchWriterConfig().setMaxMemory(MAX_MEMORY) .setMaxLatency(LATENCY, TimeUnit.MILLISECONDS).setMaxWriteThreads(THREADS)); } catch (TableNotFoundException e) { // ya, I don't think so throw new RuntimeException(e); } catch (Exception e) { throw new RuntimeException(e); } }
AccumuloClient client = this.master.getContext(); bw = client.createBatchWriter(targetSystemTable, new BatchWriterConfig()); Scanner scanner = client.createScanner(targetSystemTable, Authorizations.EMPTY); scanner.setRange(scanRange); TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.fetch(scanner); scanner.fetchColumnFamily(DataFileColumnFamily.NAME); Mutation m = new Mutation(stopRow); String maxLogicalTime = null; for (Entry<Key,Value> entry : scanner) { maxLogicalTime = TabletTime.maxMetadataTime(maxLogicalTime, value.toString()); } else if (TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.hasColumns(key)) { bw.addMutation(MetadataTableUtil.createDeleteMutation(master.getContext(), range.getTableId(), entry.getValue().toString())); bw.addMutation(m); Mutation updatePrevRow = stop.getPrevRowUpdateMutation(); Master.log.debug("Setting the prevRow for last tablet: {}", stop); bw.addMutation(updatePrevRow); bw.flush(); m = new Mutation(stopRow);
public BatchWriter createBatchWriter (String table, long maxMemory, long maxLatency,int maxWriteThreads) throws TableNotFoundException { BatchWriterConfig bwc = new BatchWriterConfig() .setMaxLatency(maxLatency, TimeUnit.MILLISECONDS) .setMaxMemory(maxMemory) .setMaxWriteThreads(maxWriteThreads); return connector.createBatchWriter(table, bwc); } public BatchWriter createBatchWriter (String table) throws TableNotFoundException {