String peerName = prop.getKey().substring(targetPrefix.length()); String remoteIdentifier = prop.getValue(); ReplicationTarget target = new ReplicationTarget(peerName, remoteIdentifier, localId); Key k = entry.getKey(); k.getColumnQualifier(buffer); ReplicationTarget target = ReplicationTarget.from(buffer); String tableName = tableIdToName.get(configuredTarget.getSourceTableId()); if (tableName == null) { log.trace("Could not determine table name from id {}", configuredTarget.getSourceTableId()); continue; String replicaSystemClass = peers.get(configuredTarget.getPeerName()); if (replicaSystemClass == null) { log.trace("Could not determine configured ReplicaSystem for {}", configuredTarget.getPeerName()); continue; configuredTarget.getPeerName(), configuredTarget.getRemoteIdentifier(), replicaSystemClass, (numFiles == null) ? 0 : numFiles));
protected Status getStatus(String file, ReplicationTarget target) throws ReplicationTableOfflineException, InvalidProtocolBufferException { Scanner s = ReplicationTable.getScanner(context); s.setRange(Range.exact(file)); s.fetchColumn(WorkSection.NAME, target.toText()); return Status.parseFrom(Iterables.getOnlyElement(s).getValue().get()); } }
Mutation m = new Mutation(file); ReplicationTarget target = new ReplicationTarget(); DataOutputBuffer buffer = new DataOutputBuffer(); Text t = new Text(); target.setPeerName(entry.getKey()); target.setRemoteIdentifier(entry.getValue()); target.setSourceTableId(sourceTableId); target.write(buffer);
/** * Serialize a filename and a {@link ReplicationTarget} into the expected key format for use with * the {@link DistributedWorkQueue} * * @param filename * Filename for data to be replicated * @param replTarget * Information about replication peer * @return Key for identifying work in queue */ public static String getQueueKey(String filename, ReplicationTarget replTarget) { return filename + KEY_SEPARATOR + replTarget.getPeerName() + KEY_SEPARATOR + replTarget.getRemoteIdentifier() + KEY_SEPARATOR + replTarget.getSourceTableId(); }
@Override protected boolean shouldQueueWork(ReplicationTarget target) { Map<Table.ID,String> queuedWorkForPeer = this.queuedWorkByPeerName.get(target.getPeerName()); if (queuedWorkForPeer == null) { return true; } String queuedWork = queuedWorkForPeer.get(target.getSourceTableId()); // If we have no work for the local table to the given peer, submit some! return queuedWork == null; }
/** * Deserialize a ReplicationTarget * * @param s * Serialized copy * @return the deserialized version */ public static ReplicationTarget from(String s) { ReplicationTarget target = new ReplicationTarget(); DataInputBuffer buffer = new DataInputBuffer(); buffer.reset(s.getBytes(UTF_8), s.length()); try { target.readFields(buffer); } catch (IOException e) { throw new RuntimeException(e); } return target; }
@Test public void workAcrossPeersHappensConcurrently() throws Exception { ReplicationTarget target1 = new ReplicationTarget("cluster1", "table1", "1"); Text serializedTarget1 = target1.toText(); ReplicationTarget target2 = new ReplicationTarget("cluster2", "table1", "1"); Text serializedTarget2 = target2.toText(); OrderSection.add(m, target1.getSourceTableId(), ProtobufUtil.toValue(stat1)); bw.addMutation(m); OrderSection.add(m, target2.getSourceTableId(), ProtobufUtil.toValue(stat2)); bw.addMutation(m); Assert.assertTrue(cluster1Work.containsKey(target1.getSourceTableId())); Assert.assertEquals(DistributedWorkQueueWorkAssignerHelper.getQueueKey(filename1, target1), cluster1Work.get(target1.getSourceTableId())); Assert.assertTrue(cluster2Work.containsKey(target2.getSourceTableId())); Assert.assertEquals(DistributedWorkQueueWorkAssignerHelper.getQueueKey(filename2, target2), cluster2Work.get(target2.getSourceTableId()));
ReplicationTarget target = new ReplicationTarget("cluster1", "table1", "1"); String serializedTarget = target.getPeerName() + DistributedWorkQueueWorkAssignerHelper.KEY_SEPARATOR + target.getRemoteIdentifier() + DistributedWorkQueueWorkAssignerHelper.KEY_SEPARATOR + target.getSourceTableId(); String file1 = "/accumulo/wal/tserver+port/wal1"; Mutation m = new Mutation(file1); WorkSection.add(m, target.toText(), StatusUtil.openWithUnknownLengthValue()); bw.addMutation(m);
tableId = Table.ID.of(colq.toString()); } else if (WorkSection.NAME.equals(colf)) { ReplicationTarget target = ReplicationTarget.from(colq); tableId = target.getSourceTableId(); } else { throw new RuntimeException("Got unexpected column");
@Test public void doNotCreateWorkForFilesNotNeedingIt() throws Exception { ReplicationTarget target1 = new ReplicationTarget("cluster1", "table1", "1"), target2 = new ReplicationTarget("cluster1", "table2", "2"); Text serializedTarget1 = target1.toText(), serializedTarget2 = target2.toText(); // Create two mutations, both of which need replication work done BatchWriter bw = ReplicationTable.getBatchWriter(conn); String filename1 = UUID.randomUUID().toString(), filename2 = UUID.randomUUID().toString(); String file1 = "/accumulo/wal/tserver+port/" + filename1, file2 = "/accumulo/wal/tserver+port/" + filename2; Mutation m = new Mutation(file1); WorkSection.add(m, serializedTarget1, StatusUtil.fileCreatedValue(5)); bw.addMutation(m); m = new Mutation(file2); WorkSection.add(m, serializedTarget2, StatusUtil.fileCreatedValue(10)); bw.addMutation(m); bw.close(); DistributedWorkQueue workQueue = createMock(DistributedWorkQueue.class); HashSet<String> queuedWork = new HashSet<>(); assigner.setQueuedWork(queuedWork); assigner.setMaxQueueSize(Integer.MAX_VALUE); replay(workQueue); assigner.createWork(); verify(workQueue); }
protected String getPassword(AccumuloConfiguration localConf, ReplicationTarget target) { requireNonNull(localConf); requireNonNull(target); Map<String,String> peerPasswords = localConf .getAllPropertiesWithPrefix(Property.REPLICATION_PEER_PASSWORD); String password = peerPasswords .get(Property.REPLICATION_PEER_PASSWORD.getKey() + target.getPeerName()); if (password == null) { throw new IllegalArgumentException("Cannot get password for " + target.getPeerName()); } return password; }
} catch (InvalidProtocolBufferException e) { log.warn("Could not deserialize protobuf from work entry for {} to {}, will retry", file, ReplicationTarget.from(workEntry.getKey().getColumnQualifier()), e); continue; if (!isWorkRequired(status) && keysBeingReplicated.contains(key)) { log.debug("Removing {} from replication state to {} because replication is complete", key, target.getPeerName()); this.removeQueuedWork(target, key); if (keysBeingReplicated.contains(key)) { log.debug("Removing {} from replication state to {} because replication is complete", key, target.getPeerName()); this.removeQueuedWork(target, key);
Set<ReplicationTarget> expectedTargets = new HashSet<>(); for (Entry<String,String> cluster : targetClusters.entrySet()) { expectedTargets.add(new ReplicationTarget(cluster.getKey(), cluster.getValue(), tableId)); Assert.assertEquals(WorkSection.NAME, entry.getKey().getColumnFamily()); ReplicationTarget target = ReplicationTarget.from(entry.getKey().getColumnQualifier()); actualTargets.add(target);
public static ReplicationTarget getTarget(Key k, Text buff) { checkArgument(BYTE_SEQ_NAME.equals(k.getColumnFamilyData()), "Given replication work key with incorrect colfam"); k.getColumnQualifier(buff); return ReplicationTarget.from(buff); }
protected Set<Integer> consumeWalPrefix(ReplicationTarget target, DataInputStream wal, Status status) throws IOException { Set<Integer> tids = new HashSet<>(); LogFileKey key = new LogFileKey(); LogFileValue value = new LogFileValue(); Set<Integer> desiredTids = new HashSet<>(); // Read through the stuff we've already processed in a previous replication attempt // We also need to track the tids that occurred earlier in the file as mutations // later on might use that tid for (long i = 0; i < status.getBegin(); i++) { key.readFields(wal); value.readFields(wal); switch (key.event) { case DEFINE_TABLET: if (target.getSourceTableId().equals(key.tablet.getTableId())) { desiredTids.add(key.tabletId); } break; default: break; } } return tids; }
new ReplicationTarget(queueKey.substring(index + 1, secondIndex), queueKey.substring(secondIndex + 1, thirdIndex), Table.ID.of(queueKey.substring(thirdIndex + 1))));
final String remoteTableId = target.getRemoteIdentifier();
/** * Convenience method to serialize a ReplicationTarget to {@link Text} using the {@link Writable} * methods without caring about performance penalties due to excessive object creation * * @return The serialized representation of the object */ public Text toText() { DataOutputBuffer buffer = new DataOutputBuffer(); try { this.write(buffer); } catch (IOException e) { throw new RuntimeException(e); } Text t = new Text(); // Throw it in a text for the mutation t.set(buffer.getData(), 0, buffer.getLength()); return t; } }
@Test public void workAcrossTablesHappensConcurrently() throws Exception { ReplicationTarget target1 = new ReplicationTarget("cluster1", "table1", "1"); Text serializedTarget1 = target1.toText(); ReplicationTarget target2 = new ReplicationTarget("cluster1", "table2", "2"); Text serializedTarget2 = target2.toText(); OrderSection.add(m, target1.getSourceTableId(), ProtobufUtil.toValue(stat1)); bw.addMutation(m); OrderSection.add(m, target2.getSourceTableId(), ProtobufUtil.toValue(stat2)); bw.addMutation(m); Assert.assertTrue(cluster1Work.containsKey(target1.getSourceTableId())); Assert.assertEquals(DistributedWorkQueueWorkAssignerHelper.getQueueKey(filename1, target1), cluster1Work.get(target1.getSourceTableId())); Assert.assertTrue(cluster1Work.containsKey(target2.getSourceTableId())); Assert.assertEquals(DistributedWorkQueueWorkAssignerHelper.getQueueKey(filename2, target2), cluster1Work.get(target2.getSourceTableId()));
@Override protected Set<String> getQueuedWork(ReplicationTarget target) { String desiredQueueKeySuffix = DistributedWorkQueueWorkAssignerHelper.KEY_SEPARATOR + target.getPeerName() + DistributedWorkQueueWorkAssignerHelper.KEY_SEPARATOR + target.getRemoteIdentifier() + DistributedWorkQueueWorkAssignerHelper.KEY_SEPARATOR + target.getSourceTableId(); Set<String> queuedWorkForTarget = new HashSet<>(); for (String queuedWork : this.queuedWork) { if (queuedWork.endsWith(desiredQueueKeySuffix)) { queuedWorkForTarget.add(queuedWork); } } return queuedWorkForTarget; }