private void flush() { try { if (indexer.isPresent()) { indexer.get().flush(); // MetricsWriter is non-null if Indexer is present } writer.flush(); } catch (MutationsRejectedException e) { throw new PrestoException(UNEXPECTED_ACCUMULO_ERROR, "Mutation rejected by server on flush", e); } } }
@Override public CompletableFuture<Collection<Slice>> finish() { try { // Done serializing rows, so flush and close the writer and indexer writer.flush(); writer.close(); if (indexer.isPresent()) { indexer.get().close(); } } catch (MutationsRejectedException e) { throw new PrestoException(UNEXPECTED_ACCUMULO_ERROR, "Mutation rejected by server on flush", e); } // TODO Look into any use of the metadata for writing out the rows return completedFuture(ImmutableList.of()); }
/** * Flushes all Mutations in the index writer. And all metric mutations to the metrics table. * Note that the metrics table is not updated until this method is explicitly called (or implicitly via close). */ public void flush() { try { // Flush index writer indexWriter.flush(); // Write out metrics mutations BatchWriter metricsWriter = connector.createBatchWriter(table.getMetricsTableName(), writerConfig); metricsWriter.addMutations(getMetricsMutations()); metricsWriter.close(); // Re-initialize the metrics metrics.clear(); metrics.put(METRICS_TABLE_ROW_COUNT, new AtomicLong(0)); } catch (MutationsRejectedException e) { throw new PrestoException(UNEXPECTED_ACCUMULO_ERROR, "Index mutation was rejected by server on flush", e); } catch (TableNotFoundException e) { throw new PrestoException(ACCUMULO_TABLE_DNE, "Accumulo table does not exist", e); } }
@VisibleForTesting public static void initializeClone(String testTableName, Table.ID srcTableId, Table.ID tableId, AccumuloClient client, BatchWriter bw) throws TableNotFoundException, MutationsRejectedException { Iterator<TabletMetadata> ti = createCloneScanner(testTableName, srcTableId, client).iterator(); if (!ti.hasNext()) throw new RuntimeException(" table deleted during clone? srcTableId = " + srcTableId); while (ti.hasNext()) bw.addMutation(createCloneMutation(srcTableId, tableId, ti.next().getKeyValues())); bw.flush(); }
private void flush() { try { final BatchWriter writer = this.writer.get(); if (writer != null) { writer.flush(); } else { // We don't have a writer. If the table exists, try to make a new writer. if (accumuloClient.tableOperations().exists(tableName)) { resetWriter(); } } } catch (MutationsRejectedException | RuntimeException exception) { log.warn("Problem flushing traces, resetting writer. Set log level to" + " DEBUG to see stacktrace. cause: " + exception); log.debug("flushing traces failed due to exception", exception); resetWriter(); /* XXX e.g. if the writer was closed between when we grabbed it and when we called flush. */ } }
/** * Create a status record in the replication table */ protected boolean addStatusRecord(Text file, Table.ID tableId, Value v) { try { Mutation m = new Mutation(file); m.put(StatusSection.NAME, new Text(tableId.getUtf8()), v); try { replicationWriter.addMutation(m); } catch (MutationsRejectedException e) { log.warn("Failed to write work mutations for replication, will retry", e); return false; } } finally { try { replicationWriter.flush(); } catch (MutationsRejectedException e) { log.warn("Failed to write work mutations for replication, will retry", e); return false; } } return true; }
private long setAndGetCreatedTime(Path file, String tableId) throws IOException, MutationsRejectedException { long createdTime; if (fs.exists(file)) { createdTime = fs.getFileStatus(file).getModificationTime(); } else { createdTime = System.currentTimeMillis(); } Status status = Status.newBuilder().setCreatedTime(createdTime).build(); Mutation m = new Mutation(new Text(ReplicationSection.getRowPrefix() + file)); m.put(MetadataSchema.ReplicationSection.COLF, new Text(tableId), ProtobufUtil.toValue(status)); replicationWriter.addMutation(m); replicationWriter.flush(); return createdTime; } }
bw.flush(); } catch (MutationsRejectedException e) { log.error("Could not submit mutation to remove columns for {} in replication table", row, e);
} finally { try { writer.flush(); } catch (MutationsRejectedException e) { log.warn("Failed to write work mutations for replication, will retry", e);
replicationWriter.flush(); } catch (MutationsRejectedException e) { log.warn("Failed to write order mutation for replication, will retry", e);
/** * Because there is only one active Master, and thus one active StatusMaker, the only safe time * that we can issue the delete for a Status which is closed is immediately after writing it to * the replication table. * <p> * If we try to defer and delete these entries in another thread/process, we will have no * assurance that the Status message was propagated to the replication table. It is easiest, in * terms of concurrency, to do this all in one step. * * @param k * The Key to delete */ protected void deleteStatusRecord(Key k) { log.debug("Deleting {} from metadata table as it's no longer needed", k.toStringNoTruncate()); if (metadataWriter == null) { try { metadataWriter = client.createBatchWriter(sourceTableName, new BatchWriterConfig()); } catch (TableNotFoundException e) { throw new RuntimeException("Metadata table doesn't exist"); } } try { Mutation m = new Mutation(k.getRow()); m.putDelete(k.getColumnFamily(), k.getColumnQualifier()); metadataWriter.addMutation(m); metadataWriter.flush(); } catch (MutationsRejectedException e) { log.warn("Failed to delete status mutations for metadata table, will retry", e); } }
private void deleteTablets(MergeInfo info, Range scanRange, BatchWriter bw, AccumuloClient client) throws TableNotFoundException, MutationsRejectedException { Scanner scanner; Mutation m; // Delete everything in the other tablets // group all deletes into tablet into one mutation, this makes tablets // either disappear entirely or not all.. this is important for the case // where the process terminates in the loop below... scanner = client.createScanner(info.getExtent().isMeta() ? RootTable.NAME : MetadataTable.NAME, Authorizations.EMPTY); Master.log.debug("Deleting range {}", scanRange); scanner.setRange(scanRange); RowIterator rowIter = new RowIterator(scanner); while (rowIter.hasNext()) { Iterator<Entry<Key,Value>> row = rowIter.next(); m = null; while (row.hasNext()) { Entry<Key,Value> entry = row.next(); Key key = entry.getKey(); if (m == null) m = new Mutation(key.getRow()); m.putDelete(key.getColumnFamily(), key.getColumnQualifier()); Master.log.debug("deleting entry {}", key); } bw.addMutation(m); } bw.flush(); }
bw.flush(); return rewrites;
ChoppedColumnFamily.CHOPPED_COLUMN.putDelete(m); bw.addMutation(m); bw.flush(); } finally { bw.close();
@Override public void run() { try { bw.addMutations(allMuts.get(idx)); bw.flush(); } catch (MutationsRejectedException e) { Assert.fail("Error adding mutations to batch writer"); } } });
@Override public void close(TaskAttemptContext context) throws IOException, InterruptedException { try { for (BatchWriter w : writerMap.values()) { w.flush(); w.close(); } } catch (MutationsRejectedException e) { throw new IOException("Error closing Batch Writer", e); } }
static void initializeClone(String srcTableId, String tableId, Connector conn, BatchWriter bw) throws TableNotFoundException, MutationsRejectedException { TabletIterator ti = new TabletIterator(createCloneScanner(srcTableId, conn), new KeyExtent(new Text(srcTableId), null, null).toMetadataRange(), true, true); if (!ti.hasNext()) throw new RuntimeException(" table deleted during clone? srcTableId = " + srcTableId); while (ti.hasNext()) bw.addMutation(createCloneMutation(srcTableId, tableId, ti.next())); bw.flush(); }