private void addIndexMutation(ByteBuffer row, ByteBuffer family, ColumnVisibility visibility, byte[] qualifier) { // Create the mutation and add it to the batch writer Mutation indexMutation = new Mutation(row.array()); indexMutation.put(family.array(), qualifier, visibility, EMPTY_BYTES); try { indexWriter.addMutation(indexMutation); } catch (MutationsRejectedException e) { throw new PrestoException(UNEXPECTED_ACCUMULO_ERROR, "Index mutation rejected by server", e); } // Increment the cardinality metrics for this value of index // metrics is a mapping of row ID to column family MetricsKey key = new MetricsKey(row, family, visibility); AtomicLong count = metrics.get(key); if (count == null) { count = new AtomicLong(0); metrics.put(key, count); } count.incrementAndGet(); }
private Collection<Mutation> getMetricsMutations() { ImmutableList.Builder<Mutation> mutationBuilder = ImmutableList.builder(); // Mapping of column value to column to number of row IDs that contain that value for (Entry<MetricsKey, AtomicLong> entry : metrics.entrySet()) { // Row ID: Column value // Family: columnfamily_columnqualifier // Qualifier: CARDINALITY_CQ // Visibility: Inherited from indexed Mutation // Value: Cardinality Mutation mut = new Mutation(entry.getKey().row.array()); mut.put(entry.getKey().family.array(), CARDINALITY_CQ, entry.getKey().visibility, ENCODER.encode(entry.getValue().get())); // Add to our list of mutations mutationBuilder.add(mut); } // If the first row and last row are both not null, // which would really be for a brand new table that has zero rows and no indexed elements... // Talk about your edge cases! if (firstRow != null && lastRow != null) { // Add a some columns to the special metrics table row ID for the first/last row. // Note that if the values on the server side are greater/lesser, // the configured iterator will take care of this at scan/compaction time Mutation firstLastMutation = new Mutation(METRICS_TABLE_ROW_ID.array()); firstLastMutation.put(METRICS_TABLE_ROWS_CF.array(), METRICS_TABLE_FIRST_ROW_CQ.array(), firstRow); firstLastMutation.put(METRICS_TABLE_ROWS_CF.array(), METRICS_TABLE_LAST_ROW_CQ.array(), lastRow); mutationBuilder.add(firstLastMutation); } return mutationBuilder.build(); }
/** * Deletes a row, given a Scanner of JUST that row. */ private void deleteRow(Scanner scanner, BatchWriter bw) throws MutationsRejectedException { Mutation deleter = null; // iterate through the keys final Text row = new Text(); final Text cf = new Text(); final Text cq = new Text(); for (Entry<Key, Value> entry : scanner) { // create a mutation for the row if (deleter == null) { entry.getKey().getRow(row); deleter = new Mutation(row); } entry.getKey().getColumnFamily(cf); entry.getKey().getColumnQualifier(cq); // the remove function adds the key with the delete flag set to true deleter.putDelete(cf, cq); } bw.addMutation(deleter); } }
@Override public int execute(final String fullCommand, final CommandLine cl, final Shell shellState) throws AccumuloException, AccumuloSecurityException, TableNotFoundException, IOException, ConstraintViolationException { shellState.checkTableState(); final Mutation m = new Mutation(new Text(cl.getArgs()[0].getBytes(Shell.CHARSET))); final Text colf = new Text(cl.getArgs()[1].getBytes(Shell.CHARSET)); final Text colq = new Text(cl.getArgs()[2].getBytes(Shell.CHARSET)); if (cl.hasOption(deleteOptAuths.getOpt())) { final ColumnVisibility le = new ColumnVisibility(cl.getOptionValue(deleteOptAuths.getOpt())); if (cl.hasOption(timestampOpt.getOpt())) { m.putDelete(colf, colq, le, Long.parseLong(cl.getOptionValue(timestampOpt.getOpt()))); } else { m.putDelete(colf, colq, le); } } else if (cl.hasOption(timestampOpt.getOpt())) { m.putDelete(colf, colq, Long.parseLong(cl.getOptionValue(timestampOpt.getOpt()))); } else { m.putDelete(colf, colq); } final BatchWriter bw = shellState.getAccumuloClient().createBatchWriter( shellState.getTableName(), new BatchWriterConfig().setMaxMemory(Math.max(m.estimatedMemoryUsed(), 1024)) .setMaxWriteThreads(1).setTimeout(getTimeout(cl), TimeUnit.MILLISECONDS)); bw.addMutation(m); bw.close(); return 0; }
private static void moveDeleteEntry(ServerContext context, KeyExtent oldExtent, Entry<Key,Value> entry, String rowID, String prefix) { String filename = rowID.substring(prefix.length()); // add the new entry first log.info("Moving {} marker in {}", filename, RootTable.NAME); Mutation m = new Mutation(MetadataSchema.DeletesSection.getRowPrefix() + filename); m.put(EMPTY_BYTES, EMPTY_BYTES, EMPTY_BYTES); update(context, m, RootTable.EXTENT); // then remove the old entry m = new Mutation(entry.getKey().getRow()); m.putDelete(EMPTY_BYTES, EMPTY_BYTES); update(context, m, oldExtent); }
BatchWriterConfig bwConfig = new BatchWriterConfig(); bwConfig.setMaxMemory(memoryInBytes); try { Mutation copy = new Mutation(orig.getRow()); for (ColumnUpdate update : orig.getUpdates()) { long timestamp; new ColumnVisibility(update.getColumnVisibility()), timestamp); } else { copy.put(update.getColumnFamily(), update.getColumnQualifier(), new ColumnVisibility(update.getColumnVisibility()), timestamp, update.getValue()); if (bw != null) { try { bw.close(); } catch (MutationsRejectedException e) { log.error("Could not apply mutations to {}", tableName);
@Override public void unsuspend(Collection<TabletLocationState> tablets) throws DistributedStoreException { BatchWriter writer = createBatchWriter(); try { for (TabletLocationState tls : tablets) { if (tls.suspend != null) { continue; } Mutation m = new Mutation(tls.extent.getMetadataEntry()); SuspendingTServer.clearSuspension(m); writer.addMutation(m); } } catch (Exception ex) { throw new DistributedStoreException(ex); } finally { try { writer.close(); } catch (MutationsRejectedException e) { throw new DistributedStoreException(e); } } }
Mutation m = new Mutation(cloneTablet.getExtent().getMetadataEntry()); bw.addMutation(m); bw.addMutation(createCloneMutation(srcTableId, tableId, st.getKeyValues())); Mutation m = new Mutation(cloneTablet.getExtent().getMetadataEntry()); m.put(ClonedColumnFamily.NAME, new Text(""), new Value("OK".getBytes(UTF_8))); bw.addMutation(m);
/** * Record the updated Status for this file and target * * @param filePath * Path to file being replicated * @param status * Updated Status after replication * @param target * Peer that was replicated to */ public void recordNewStatus(Path filePath, Status status, ReplicationTarget target) throws AccumuloException, TableNotFoundException { try (BatchWriter bw = context.createBatchWriter(ReplicationTable.NAME, new BatchWriterConfig())) { log.debug("Recording new status for {}, {}", filePath, ProtobufUtil.toString(status)); Mutation m = new Mutation(filePath.toString()); WorkSection.add(m, target.toText(), ProtobufUtil.toValue(status)); bw.addMutation(m); } } }
log.info("Adding work records for {} to targets {}", file, targets); try { Mutation m = new Mutation(file); writer.addMutation(m); } catch (MutationsRejectedException e) { log.warn("Failed to write work mutations for replication, will retry", e);
@Override public void delete() throws MutationsRejectedException { BatchWriter bw = null; try { bw = new BatchWriterImpl(context, tableId, bwConfig); Iterator<Entry<Key,Value>> iter = super.iterator(); while (iter.hasNext()) { Entry<Key,Value> next = iter.next(); Key k = next.getKey(); Mutation m = new Mutation(k.getRow()); m.putDelete(k.getColumnFamily(), k.getColumnQualifier(), new ColumnVisibility(k.getColumnVisibility()), k.getTimestamp()); bw.addMutation(m); } } finally { if (bw != null) bw.close(); } }
@Override public Status update(String table, String key, Map<String, ByteIterator> values) { BatchWriter bw = null; try { bw = getWriter(table); } catch (TableNotFoundException e) { System.err.println("Error opening batch writer to Accumulo table " + table); e.printStackTrace(); return Status.ERROR; } Mutation mutInsert = new Mutation(key.getBytes(UTF_8)); for (Map.Entry<String, ByteIterator> entry : values.entrySet()) { mutInsert.put(colFamBytes, entry.getKey().getBytes(UTF_8), entry.getValue().toArray()); } try { bw.addMutation(mutInsert); } catch (MutationsRejectedException e) { System.err.println("Error performing update."); e.printStackTrace(); return Status.ERROR; } return Status.BATCHED_OK; }
@BeforeClass public void setupClass() { AccumuloColumnHandle c1 = new AccumuloColumnHandle("id", Optional.empty(), Optional.empty(), VARCHAR, 0, "", false); AccumuloColumnHandle c2 = new AccumuloColumnHandle("age", Optional.of("cf"), Optional.of("age"), BIGINT, 1, "", true); AccumuloColumnHandle c3 = new AccumuloColumnHandle("firstname", Optional.of("cf"), Optional.of("firstname"), VARCHAR, 2, "", true); AccumuloColumnHandle c4 = new AccumuloColumnHandle("arr", Optional.of("cf"), Optional.of("arr"), new ArrayType(VARCHAR), 3, "", true); table = new AccumuloTable("default", "index_test_table", ImmutableList.of(c1, c2, c3, c4), "id", true, LexicoderRowSerializer.class.getCanonicalName(), null); m1 = new Mutation(M1_ROWID); m1.put(CF, AGE, AGE_VALUE); m1.put(CF, FIRSTNAME, M1_FNAME_VALUE); m1.put(CF, SENDERS, M1_ARR_VALUE); m2 = new Mutation(M2_ROWID); m2.put(CF, AGE, AGE_VALUE); m2.put(CF, FIRSTNAME, M2_FNAME_VALUE); m2.put(CF, SENDERS, M2_ARR_VALUE); ColumnVisibility visibility1 = new ColumnVisibility("private"); ColumnVisibility visibility2 = new ColumnVisibility("moreprivate"); m1v = new Mutation(M1_ROWID); m1v.put(CF, AGE, visibility1, AGE_VALUE); m1v.put(CF, FIRSTNAME, visibility1, M1_FNAME_VALUE); m1v.put(CF, SENDERS, visibility2, M1_ARR_VALUE); m2v = new Mutation(M2_ROWID); m2v.put(CF, AGE, visibility1, AGE_VALUE); m2v.put(CF, FIRSTNAME, visibility2, M2_FNAME_VALUE); m2v.put(CF, SENDERS, visibility2, M2_ARR_VALUE); }
/** * Deletes a row, given a Scanner of JUST that row. */ private void deleteRow(Scanner scanner, BatchWriter bw) throws MutationsRejectedException { Mutation deleter = null; // iterate through the keys final Text row = new Text(); final Text cf = new Text(); final Text cq = new Text(); for (Entry<Key, Value> entry : scanner) { // create a mutation for the row if (deleter == null) { entry.getKey().getRow(row); deleter = new Mutation(row); } entry.getKey().getColumnFamily(cf); entry.getKey().getColumnQualifier(cq); // the remove function adds the key with the delete flag set to true deleter.putDelete(cf, cq); } bw.addMutation(deleter); } }
public static void finishSplit(Text metadataEntry, Map<FileRef,DataFileValue> datafileSizes, List<FileRef> highDatafilesToRemove, final ServerContext context, ZooLock zooLock) { Mutation m = new Mutation(metadataEntry); TabletsSection.TabletColumnFamily.SPLIT_RATIO_COLUMN.putDelete(m); TabletsSection.TabletColumnFamily.OLD_PREV_ROW_COLUMN.putDelete(m); ChoppedColumnFamily.CHOPPED_COLUMN.putDelete(m); for (Entry<FileRef,DataFileValue> entry : datafileSizes.entrySet()) { m.put(DataFileColumnFamily.NAME, entry.getKey().meta(), new Value(entry.getValue().encode())); } for (FileRef pathToRemove : highDatafilesToRemove) { m.putDelete(DataFileColumnFamily.NAME, pathToRemove.meta()); } update(context, zooLock, m, new KeyExtent(metadataEntry, (Text) null)); }
@Override public void setFutureLocations(Collection<Assignment> assignments) throws DistributedStoreException { BatchWriter writer = createBatchWriter(); try { for (Assignment assignment : assignments) { Mutation m = new Mutation(assignment.tablet.getMetadataEntry()); SuspendingTServer.clearSuspension(m); assignment.server.putFutureLocation(m); writer.addMutation(m); } } catch (Exception ex) { throw new DistributedStoreException(ex); } finally { try { writer.close(); } catch (MutationsRejectedException e) { throw new DistributedStoreException(e); } } }
shellState.checkTableState(); final Mutation m = new Mutation(new Text(cl.getArgs()[0].getBytes(Shell.CHARSET))); final Text colf = new Text(cl.getArgs()[1].getBytes(Shell.CHARSET)); final Text colq = new Text(cl.getArgs()[2].getBytes(Shell.CHARSET)); m.put(colf, colq, le, Long.parseLong(cl.getOptionValue(timestampOpt.getOpt())), val); else m.put(colf, colq, le, val); } else if (cl.hasOption(timestampOpt.getOpt())) m.put(colf, colq, Long.parseLong(cl.getOptionValue(timestampOpt.getOpt())), val); else m.put(colf, colq, val); final BatchWriterConfig cfg = new BatchWriterConfig() .setMaxMemory(Math.max(m.estimatedMemoryUsed(), 1024)).setMaxWriteThreads(1) .setTimeout(getTimeout(cl), TimeUnit.MILLISECONDS); bw.addMutation(m); try { bw.close(); } catch (MutationsRejectedException e) { final ArrayList<String> lines = new ArrayList<>();
@Override public Status update(String table, String key, Map<String, ByteIterator> values) { BatchWriter bw = null; try { bw = getWriter(table); } catch (TableNotFoundException e) { System.err.println("Error opening batch writer to Accumulo table " + table); e.printStackTrace(); return Status.ERROR; } Mutation mutInsert = new Mutation(key.getBytes(UTF_8)); for (Map.Entry<String, ByteIterator> entry : values.entrySet()) { mutInsert.put(colFamBytes, entry.getKey().getBytes(UTF_8), entry.getValue().toArray()); } try { bw.addMutation(mutInsert); } catch (MutationsRejectedException e) { System.err.println("Error performing update."); e.printStackTrace(); return Status.ERROR; } return Status.BATCHED_OK; }
/** * Deletes a row, given a Scanner of JUST that row. */ private void deleteRow(Scanner scanner, BatchWriter bw) throws MutationsRejectedException { Mutation deleter = null; // iterate through the keys final Text row = new Text(); final Text cf = new Text(); final Text cq = new Text(); for (Entry<Key, Value> entry : scanner) { // create a mutation for the row if (deleter == null) { entry.getKey().getRow(row); deleter = new Mutation(row); } entry.getKey().getColumnFamily(cf); entry.getKey().getColumnQualifier(cq); // the remove function adds the key with the delete flag set to true deleter.putDelete(cf, cq); } bw.addMutation(deleter); } }