@Override protected boolean shouldQueueWork(ReplicationTarget target) { Map<Table.ID,String> queuedWorkForPeer = this.queuedWorkByPeerName.get(target.getPeerName()); if (queuedWorkForPeer == null) { return true; } String queuedWork = queuedWorkForPeer.get(target.getSourceTableId()); // If we have no work for the local table to the given peer, submit some! return queuedWork == null; }
@Override protected Set<String> getQueuedWork(ReplicationTarget target) { Map<Table.ID,String> queuedWorkForPeer = this.queuedWorkByPeerName.get(target.getPeerName()); if (queuedWorkForPeer == null) { return Collections.emptySet(); } String queuedWork = queuedWorkForPeer.get(target.getSourceTableId()); if (queuedWork == null) { return Collections.emptySet(); } else { return Collections.singleton(queuedWork); } }
@Override protected void removeQueuedWork(ReplicationTarget target, String queueKey) { Map<Table.ID,String> queuedWorkForPeer = this.queuedWorkByPeerName.get(target.getPeerName()); if (queuedWorkForPeer == null) { log.warn("removeQueuedWork called when no work was queued for {}", target.getPeerName()); return; } String queuedWork = queuedWorkForPeer.get(target.getSourceTableId()); if (queuedWork.equals(queueKey)) { queuedWorkForPeer.remove(target.getSourceTableId()); } else { log.warn("removeQueuedWork called on {} with differing queueKeys, expected {} but was {}", target, queueKey, queuedWork); return; } } }
@Override protected Set<String> getQueuedWork(ReplicationTarget target) { String desiredQueueKeySuffix = DistributedWorkQueueWorkAssignerHelper.KEY_SEPARATOR + target.getPeerName() + DistributedWorkQueueWorkAssignerHelper.KEY_SEPARATOR + target.getRemoteIdentifier() + DistributedWorkQueueWorkAssignerHelper.KEY_SEPARATOR + target.getSourceTableId(); Set<String> queuedWorkForTarget = new HashSet<>(); for (String queuedWork : this.queuedWork) { if (queuedWork.endsWith(desiredQueueKeySuffix)) { queuedWorkForTarget.add(queuedWork); } } return queuedWorkForTarget; }
/** * Serialize a filename and a {@link ReplicationTarget} into the expected key format for use with * the {@link DistributedWorkQueue} * * @param filename * Filename for data to be replicated * @param replTarget * Information about replication peer * @return Key for identifying work in queue */ public static String getQueueKey(String filename, ReplicationTarget replTarget) { return filename + KEY_SEPARATOR + replTarget.getPeerName() + KEY_SEPARATOR + replTarget.getRemoteIdentifier() + KEY_SEPARATOR + replTarget.getSourceTableId(); }
@Override protected boolean queueWork(Path path, ReplicationTarget target) { String queueKey = DistributedWorkQueueWorkAssignerHelper.getQueueKey(path.getName(), target); Map<Table.ID,String> workForPeer = this.queuedWorkByPeerName.get(target.getPeerName()); if (workForPeer == null) { workForPeer = new HashMap<>(); this.queuedWorkByPeerName.put(target.getPeerName(), workForPeer); } String queuedWork = workForPeer.get(target.getSourceTableId()); if (queuedWork == null) { try { workQueue.addWork(queueKey, path.toString()); workForPeer.put(target.getSourceTableId(), queueKey); } catch (KeeperException | InterruptedException e) { log.warn("Could not queue work for {} to {}", path, target, e); return false; } return true; } else if (queuedWork.startsWith(path.getName())) { log.debug("Not re-queueing work for {} as it has already been queued for replication to {}", path, target); return false; } else { log.debug("Not queueing {} for work as {} must be replicated to {} first", path, queuedWork, target.getPeerName()); return false; } }
String filename = entry.getKey(); String peerName = entry.getValue().getPeerName(); Table.ID sourceTableId = entry.getValue().getSourceTableId();
} else if (WorkSection.NAME.equals(colf)) { ReplicationTarget target = ReplicationTarget.from(colq); tableId = target.getSourceTableId(); } else { throw new RuntimeException("Got unexpected column");
protected Set<Integer> consumeWalPrefix(ReplicationTarget target, DataInputStream wal, Status status) throws IOException { Set<Integer> tids = new HashSet<>(); LogFileKey key = new LogFileKey(); LogFileValue value = new LogFileValue(); Set<Integer> desiredTids = new HashSet<>(); // Read through the stuff we've already processed in a previous replication attempt // We also need to track the tids that occurred earlier in the file as mutations // later on might use that tid for (long i = 0; i < status.getBegin(); i++) { key.readFields(wal); value.readFields(wal); switch (key.event) { case DEFINE_TABLET: if (target.getSourceTableId().equals(key.tablet.getTableId())) { desiredTids.add(key.tabletId); } break; default: break; } } return tids; }
String tableName = tableIdToName.get(configuredTarget.getSourceTableId()); if (tableName == null) { log.trace("Could not determine table name from id {}", configuredTarget.getSourceTableId()); continue;
if (!tableIdToProgress.containsKey(target.getSourceTableId())) { tableIdToProgress.put(target.getSourceTableId(), Long.MAX_VALUE); tableIdToProgress.put(target.getSourceTableId(), Math.min(tableIdToProgress.get(target.getSourceTableId()), status.getBegin()));
case DEFINE_TABLET: if (target.getSourceTableId().equals(key.tablet.getTableId())) { desiredTids.add(key.tabletId);
@Override protected boolean shouldQueueWork(ReplicationTarget target) { Map<String,String> queuedWorkForPeer = this.queuedWorkByPeerName.get(target.getPeerName()); if (null == queuedWorkForPeer) { return true; } String queuedWork = queuedWorkForPeer.get(target.getSourceTableId()); // If we have no work for the local table to the given peer, submit some! return null == queuedWork; }
@Override protected Set<String> getQueuedWork(ReplicationTarget target) { Map<String,String> queuedWorkForPeer = this.queuedWorkByPeerName.get(target.getPeerName()); if (null == queuedWorkForPeer) { return Collections.emptySet(); } String queuedWork = queuedWorkForPeer.get(target.getSourceTableId()); if (null == queuedWork) { return Collections.emptySet(); } else { return Collections.singleton(queuedWork); } }
@Override protected void removeQueuedWork(ReplicationTarget target, String queueKey) { Map<String,String> queuedWorkForPeer = this.queuedWorkByPeerName.get(target.getPeerName()); if (null == queuedWorkForPeer) { log.warn("removeQueuedWork called when no work was queued for {}", target.getPeerName()); return; } String queuedWork = queuedWorkForPeer.get(target.getSourceTableId()); if (queuedWork.equals(queueKey)) { queuedWorkForPeer.remove(target.getSourceTableId()); } else { log.warn("removeQueuedWork called on {} with differing queueKeys, expected {} but was {}", target, queueKey, queuedWork); return; } } }
@Override protected Set<String> getQueuedWork(ReplicationTarget target) { String desiredQueueKeySuffix = DistributedWorkQueueWorkAssignerHelper.KEY_SEPARATOR + target.getPeerName() + DistributedWorkQueueWorkAssignerHelper.KEY_SEPARATOR + target.getRemoteIdentifier() + DistributedWorkQueueWorkAssignerHelper.KEY_SEPARATOR + target.getSourceTableId(); Set<String> queuedWorkForTarget = new HashSet<>(); for (String queuedWork : this.queuedWork) { if (queuedWork.endsWith(desiredQueueKeySuffix)) { queuedWorkForTarget.add(queuedWork); } } return queuedWorkForTarget; }
/** * Serialize a filename and a {@link ReplicationTarget} into the expected key format for use with * the {@link DistributedWorkQueue} * * @param filename * Filename for data to be replicated * @param replTarget * Information about replication peer * @return Key for identifying work in queue */ public static String getQueueKey(String filename, ReplicationTarget replTarget) { return filename + KEY_SEPARATOR + replTarget.getPeerName() + KEY_SEPARATOR + replTarget.getRemoteIdentifier() + KEY_SEPARATOR + replTarget.getSourceTableId(); }
protected Set<Integer> consumeWalPrefix(ReplicationTarget target, DataInputStream wal, Path p, Status status, long sizeLimit) throws IOException { Set<Integer> tids = new HashSet<>(); LogFileKey key = new LogFileKey(); LogFileValue value = new LogFileValue(); Set<Integer> desiredTids = new HashSet<>(); // Read through the stuff we've already processed in a previous replication attempt // We also need to track the tids that occurred earlier in the file as mutations // later on might use that tid for (long i = 0; i < status.getBegin(); i++) { key.readFields(wal); value.readFields(wal); switch (key.event) { case DEFINE_TABLET: if (target.getSourceTableId().equals(key.tablet.getTableId())) { desiredTids.add(key.tabletId); } break; default: break; } } return tids; }
String serializedTarget = target.getPeerName() + DistributedWorkQueueWorkAssignerHelper.KEY_SEPARATOR + target.getRemoteIdentifier() + DistributedWorkQueueWorkAssignerHelper.KEY_SEPARATOR + target.getSourceTableId();
@Test public void recordsWithProgressUpdateBothTables() throws Exception { conn.securityOperations().grantTablePermission(conn.whoami(), ReplicationTable.NAME, TablePermission.READ); conn.securityOperations().grantTablePermission(conn.whoami(), ReplicationTable.NAME, TablePermission.WRITE); ReplicationTable.setOnline(conn); String file = "/accumulo/wals/tserver+port/" + UUID.randomUUID(); Status stat = Status.newBuilder().setBegin(100).setEnd(200).setClosed(true) .setInfiniteEnd(false).build(); ReplicationTarget target = new ReplicationTarget("peer", "table1", "1"); // Create a single work record for a file to some peer BatchWriter bw = ReplicationTable.getBatchWriter(conn); Mutation m = new Mutation(file); WorkSection.add(m, target.toText(), ProtobufUtil.toValue(stat)); bw.addMutation(m); bw.close(); updater.run(); Scanner s = ReplicationTable.getScanner(conn); s.setRange(Range.exact(file)); StatusSection.limit(s); Entry<Key,Value> entry = Iterables.getOnlyElement(s); Assert.assertEquals(entry.getKey().getColumnFamily(), StatusSection.NAME); Assert.assertEquals(entry.getKey().getColumnQualifier().toString(), target.getSourceTableId()); // We should only rely on the correct begin attribute being returned Status actual = Status.parseFrom(entry.getValue().get()); Assert.assertEquals(stat.getBegin(), actual.getBegin()); }