void set(Key key) { this.key = key; hashCode = hash(key.getColumnFamilyData()); }
@Override protected Converter<ByteSequence> converter() { return (k, v, c) -> c.accept(k.getColumnFamilyData()); } }
@Override public boolean accept(Key key, Value v) { if (columnFamilies.contains(key.getColumnFamilyData())) return true; HashSet<ByteSequence> cfset = columnsQualifiers.get(key.getColumnQualifierData()); // ensure the column qualifier goes with a paired column family, // it is possible that a column qualifier could occur with a // column family it was not paired with return cfset != null && cfset.contains(key.getColumnFamilyData()); }
public static void getFile(Key k, Text buff) { requireNonNull(k); requireNonNull(buff); checkArgument(BYTE_SEQ_NAME.equals(k.getColumnFamilyData()), "Given replication work key with incorrect colfam"); _getFile(k, buff); }
void set(Key key) { this.key = key; hashValue = hash(key.getColumnFamilyData()) + hash(key.getColumnQualifierData()); }
/** * Extract the file name from the row suffix into the given {@link Text} * * @param k * Key to extract from * @param buff * Text to place file name into */ public static void getFile(Key k, Text buff) { requireNonNull(k); requireNonNull(buff); checkArgument(BYTE_SEQ_NAME.equals(k.getColumnFamilyData()), "Given replication status key with incorrect colfam"); _getFile(k, buff); }
private void addKeyValue(Key k, Value v) { if (dropEmptyColFams && k.getColumnFamilyData().equals(EMPTY)) { return; } keys.add(new Key(k)); values.add(new Value(v)); }
public static ReplicationTarget getTarget(Key k, Text buff) { checkArgument(BYTE_SEQ_NAME.equals(k.getColumnFamilyData()), "Given replication work key with incorrect colfam"); k.getColumnQualifier(buff); return ReplicationTarget.from(buff); }
/** * Make a new key with a column qualifier, and column visibility. Copy the rest of the parts of * the key (including delete flag) from {@code originalKey}. */ protected Key replaceKeyParts(Key originalKey, Text newColQual, Text newColVis) { byte[] row = originalKey.getRowData().toArray(); byte[] cf = originalKey.getColumnFamilyData().toArray(); byte[] cq = newColQual.getBytes(); byte[] cv = newColVis.getBytes(); long timestamp = originalKey.getTimestamp(); Key newKey = new Key(row, 0, row.length, cf, 0, cf.length, cq, 0, newColQual.getLength(), cv, 0, newColVis.getLength(), timestamp); newKey.setDeleted(originalKey.isDeleted()); return newKey; }
private void consumeEmptyColFams() throws IOException { while (dropEmptyColFams && source.hasTop() && source.getTopKey().getColumnFamilyData().length() == 0) { source.next(); consumeDeleted(); } }
/** * Make a new key with all parts (including delete flag) coming from {@code originalKey} but use * {@code newColQual} as the column qualifier. */ protected Key replaceColumnQualifier(Key originalKey, Text newColQual) { byte[] row = originalKey.getRowData().toArray(); byte[] cf = originalKey.getColumnFamilyData().toArray(); byte[] cq = newColQual.getBytes(); byte[] cv = originalKey.getColumnVisibilityData().toArray(); long timestamp = originalKey.getTimestamp(); Key newKey = new Key(row, 0, row.length, cf, 0, cf.length, cq, 0, newColQual.getLength(), cv, 0, cv.length, timestamp); newKey.setDeleted(originalKey.isDeleted()); return newKey; }
/** * Make a new key with all parts (including delete flag) coming from {@code originalKey} but use * {@code newColVis} as the column visibility. */ protected Key replaceColumnVisibility(Key originalKey, Text newColVis) { byte[] row = originalKey.getRowData().toArray(); byte[] cf = originalKey.getColumnFamilyData().toArray(); byte[] cq = originalKey.getColumnQualifierData().toArray(); byte[] cv = newColVis.getBytes(); long timestamp = originalKey.getTimestamp(); Key newKey = new Key(row, 0, row.length, cf, 0, cf.length, cq, 0, cq.length, cv, 0, newColVis.getLength(), timestamp); newKey.setDeleted(originalKey.isDeleted()); return newKey; }
@Override public Key transform(org.apache.accumulo.core.data.Key acuKey) { byte keyData[]; ByteSequence row = acuKey.getRowData(); ByteSequence cf = acuKey.getColumnFamilyData(); keyData = new byte[row.length() + cf.length()]; System.arraycopy(row.getBackingArray(), row.offset(), keyData, 0, row.length()); System.arraycopy(cf.getBackingArray(), cf.offset(), keyData, row.length(), cf.length()); return new Key(keyData, 1.0); }
private boolean isSuppressionMarker(Key key, Value val) { return key.getColumnFamilyData().length() == 0 && key.getColumnQualifierData().length() == 0 && key.getColumnVisibilityData().length() == 0 && val.equals(SUPPRESS_ROW_VALUE); }
private boolean isDeleteMarker(Key key, Value val) { return key.getColumnFamilyData().length() == 0 && key.getColumnQualifierData().length() == 0 && key.getColumnVisibilityData().length() == 0 && val.equals(DELETE_ROW_VALUE); }
NMIterator(Key key) { if (nmPointer == 0) { throw new IllegalStateException(); } expectedModCount = modCount; nmiPointer = createNMI(nmPointer, key.getRowData().toArray(), key.getColumnFamilyData().toArray(), key.getColumnQualifierData().toArray(), key.getColumnVisibilityData().toArray(), key.getTimestamp(), key.isDeleted(), fieldsLens); hasNext = nmiPointer != 0; }
@Override public boolean accept(Key k, Value v) { return match(v.get()) || match(k.getRowData()) || match(k.getColumnFamilyData()) || match(k.getColumnQualifierData()); }
@Override public boolean accept(Key key, Value value) { if (orFields) return ((matches(rowMatcher, rowMatcher == null ? null : key.getRowData())) || (matches(colfMatcher, colfMatcher == null ? null : key.getColumnFamilyData())) || (matches(colqMatcher, colqMatcher == null ? null : key.getColumnQualifierData())) || (matches(valueMatcher, value.get(), 0, value.get().length))); return ((matches(rowMatcher, rowMatcher == null ? null : key.getRowData())) && (matches(colfMatcher, colfMatcher == null ? null : key.getColumnFamilyData())) && (matches(colqMatcher, colqMatcher == null ? null : key.getColumnQualifierData())) && (matches(valueMatcher, value.get(), 0, value.get().length))); }
@Override public org.apache.hadoop.util.bloom.Key transform(org.apache.accumulo.core.data.Key acuKey) { byte keyData[]; ByteSequence row = acuKey.getRowData(); ByteSequence cf = acuKey.getColumnFamilyData(); ByteSequence cq = acuKey.getColumnQualifierData(); keyData = new byte[row.length() + cf.length() + cq.length()]; System.arraycopy(row.getBackingArray(), row.offset(), keyData, 0, row.length()); System.arraycopy(cf.getBackingArray(), cf.offset(), keyData, row.length(), cf.length()); System.arraycopy(cq.getBackingArray(), cq.offset(), keyData, row.length() + cf.length(), cq.length()); return new org.apache.hadoop.util.bloom.Key(keyData, 1.0); }
@Override public void accept(Key k, Value v) { keyStats.accept(k.getLength()); rowStats.accept(k.getRowData().length()); familyStats.accept(k.getColumnFamilyData().length()); qualifierStats.accept(k.getColumnQualifierData().length()); visibilityStats.accept(k.getColumnVisibilityData().length()); valueStats.accept(v.getSize()); total++; }