servicePoolBuilder.withPartitionFilter(new ConsistentHashPartitionFilter()) .withPartitionContextAnnotationsFrom(QueueServiceAuthenticatorProxy.class);
@Override public Iterable<ServiceEndPoint> filter(Iterable<ServiceEndPoint> endPoints, PartitionContext partitionContext) { HashCode partitionHash = getPartitionHash(partitionContext); if (partitionHash == null) { return endPoints; // No partition hash means any server can handle the request. } // The choose() method is synchronized. Do any prep work we can up front before calling into it. Map<String, ServiceEndPoint> endPointsById = indexById(endPoints); ServiceEndPoint endPoint = choose(endPointsById, partitionHash); return Collections.singleton(endPoint); }
/** * Returns a list of pseudo-random 32-bit values derived from the specified end point ID. */ private List<Integer> computeHashCodes(String endPointId) { // Use the libketama approach of using MD5 hashes to generate 32-bit random values. This assigns a set of // randomly generated ranges to each end point. The individual ranges may vary widely in size, but, with // sufficient # of entries per end point, the overall amount of data assigned to each server tends to even out // with minimal variation (256 entries per server yields roughly 5% variation in server load). List<Integer> list = Lists.newArrayListWithCapacity(_entriesPerEndPoint); for (int i = 0; list.size() < _entriesPerEndPoint; i++) { Hasher hasher = Hashing.md5().newHasher(); hasher.putInt(i); putUnencodedChars(hasher, endPointId); ByteBuffer buf = ByteBuffer.wrap(hasher.hash().asBytes()); while (buf.hasRemaining() && list.size() < _entriesPerEndPoint) { list.add(buf.getInt()); } } return list; }
private synchronized ServiceEndPoint choose(Map<String, ServiceEndPoint> endPointsById, HashCode partitionHash) { // Update the ring if the set of active end points has changed. for (String endPointId : Sets.difference(_endPointsById.keySet(), endPointsById.keySet())) { for (Integer hash : computeHashCodes(endPointId)) { _ring.remove(hash); } } for (String endPointId : Sets.difference(endPointsById.keySet(), _endPointsById.keySet())) { for (Integer hash : computeHashCodes(endPointId)) { _ring.put(hash, endPointId); } } if (!_endPointsById.equals(endPointsById)) { _endPointsById = endPointsById; } // For the given partition hash, find its location in the ring and return its associated end point. Map.Entry<Integer, String> entry = _ring.ceilingEntry(partitionHash.asInt()); if (entry == null) { entry = _ring.firstEntry(); } return _endPointsById.get(entry.getValue()); }
private HashCode getPartitionHash(PartitionContext partitionContext) { // The precise implementation of this method isn't particularly important. There are lots of ways we can hash // the data in the PartitionContext. It just needs to be deterministic and to take into account the values in // the PartitionContext for the configured partition keys. Hasher hasher = Hashing.md5().newHasher(); boolean empty = true; if (_partitionKeys.isEmpty()) { // Use the default context. Object value = partitionContext.get(); if (value != null) { putUnencodedChars(hasher, value.toString()); empty = false; } } for (String partitionKey : _partitionKeys) { Object value = partitionContext.get(partitionKey); if (value != null) { // Include both the key and value in the hash so "reviewId" of 1 and "reviewerId" of 1 hash differently. putUnencodedChars(hasher, partitionKey); putUnencodedChars(hasher, value.toString()); empty = false; } } if (empty) { // When the partition context has no relevant values that means we should ignore the partition context and // don't filter the end points based on partition. Return null to indicate this. return null; } return hasher.hash(); }
private synchronized ServiceEndPoint choose(Map<String, ServiceEndPoint> endPointsById, HashCode partitionHash) { // Update the ring if the set of active end points has changed. for (String endPointId : Sets.difference(_endPointsById.keySet(), endPointsById.keySet())) { for (Integer hash : computeHashCodes(endPointId)) { _ring.remove(hash); } } for (String endPointId : Sets.difference(endPointsById.keySet(), _endPointsById.keySet())) { for (Integer hash : computeHashCodes(endPointId)) { _ring.put(hash, endPointId); } } if (!_endPointsById.equals(endPointsById)) { _endPointsById = endPointsById; } // For the given partition hash, find its location in the ring and return its associated end point. Map.Entry<Integer, String> entry = _ring.ceilingEntry(partitionHash.asInt()); if (entry == null) { entry = _ring.firstEntry(); } return _endPointsById.get(entry.getValue()); }
servicePoolBuilder.withPartitionFilter(new ConsistentHashPartitionFilter()) .withPartitionContextAnnotationsFrom(DatabusAuthenticatorProxy.class);
@Override public Iterable<ServiceEndPoint> filter(Iterable<ServiceEndPoint> endPoints, PartitionContext partitionContext) { HashCode partitionHash = getPartitionHash(partitionContext); if (partitionHash == null) { return endPoints; // No partition hash means any server can handle the request. } // The choose() method is synchronized. Do any prep work we can up front before calling into it. Map<String, ServiceEndPoint> endPointsById = indexById(endPoints); ServiceEndPoint endPoint = choose(endPointsById, partitionHash); return Collections.singleton(endPoint); }
private HashCode getPartitionHash(PartitionContext partitionContext) { // The precise implementation of this method isn't particularly important. There are lots of ways we can hash // the data in the PartitionContext. It just needs to be deterministic and to take into account the values in // the PartitionContext for the configured partition keys. Hasher hasher = Hashing.md5().newHasher(); boolean empty = true; if (_partitionKeys.isEmpty()) { // Use the default context. Object value = partitionContext.get(); if (value != null) { putUnencodedChars(hasher, value.toString()); empty = false; } } for (String partitionKey : _partitionKeys) { Object value = partitionContext.get(partitionKey); if (value != null) { // Include both the key and value in the hash so "reviewId" of 1 and "reviewerId" of 1 hash differently. putUnencodedChars(hasher, partitionKey); putUnencodedChars(hasher, value.toString()); empty = false; } } if (empty) { // When the partition context has no relevant values that means we should ignore the partition context and // don't filter the end points based on partition. Return null to indicate this. return null; } return hasher.hash(); }
servicePoolBuilder.withPartitionFilter(new ConsistentHashPartitionFilter()) .withPartitionContextAnnotationsFrom(DedupQueueServiceAuthenticatorProxy.class);
/** * Returns a list of pseudo-random 32-bit values derived from the specified end point ID. */ private List<Integer> computeHashCodes(String endPointId) { // Use the libketama approach of using MD5 hashes to generate 32-bit random values. This assigns a set of // randomly generated ranges to each end point. The individual ranges may vary widely in size, but, with // sufficient # of entries per end point, the overall amount of data assigned to each server tends to even out // with minimal variation (256 entries per server yields roughly 5% variation in server load). List<Integer> list = Lists.newArrayListWithCapacity(_entriesPerEndPoint); for (int i = 0; list.size() < _entriesPerEndPoint; i++) { Hasher hasher = Hashing.md5().newHasher(); hasher.putInt(i); putUnencodedChars(hasher, endPointId); ByteBuffer buf = ByteBuffer.wrap(hasher.hash().asBytes()); while (buf.hasRemaining() && list.size() < _entriesPerEndPoint) { list.add(buf.getInt()); } } return list; }
@Override public void configure(ServicePoolBuilder<AuthQueueService> servicePoolBuilder) { servicePoolBuilder.withPartitionFilter(new ConsistentHashPartitionFilter()) .withPartitionContextAnnotationsFrom(QueueClient.class); }
@Override public void configure(ServicePoolBuilder<AuthDatabus> servicePoolBuilder) { servicePoolBuilder.withPartitionFilter(new ConsistentHashPartitionFilter()) .withPartitionContextAnnotationsFrom(DatabusClient.class); }
@Override public void configure(ServicePoolBuilder<Databus> servicePoolBuilder) { servicePoolBuilder.withPartitionFilter(new ConsistentHashPartitionFilter()) .withPartitionContextAnnotationsFrom(DatabusAuthenticatorProxy.class); }
@Override public void configure(ServicePoolBuilder<AuthDedupQueueService> servicePoolBuilder) { servicePoolBuilder.withPartitionFilter(new ConsistentHashPartitionFilter()) .withPartitionContextAnnotationsFrom(DedupQueueClient.class); }
@Override public void configure(ServicePoolBuilder<QueueService> servicePoolBuilder) { servicePoolBuilder.withPartitionFilter(new ConsistentHashPartitionFilter()) .withPartitionContextAnnotationsFrom(QueueServiceAuthenticatorProxy.class); }
@Override public void configure(ServicePoolBuilder<SubjectDatabus> servicePoolBuilder) { servicePoolBuilder.withPartitionFilter(new ConsistentHashPartitionFilter()) .withPartitionContextAnnotationsFrom(AbstractSubjectDatabus.class); }
@Override public void configure(ServicePoolBuilder<DedupQueueService> servicePoolBuilder) { servicePoolBuilder.withPartitionFilter(new ConsistentHashPartitionFilter()) .withPartitionContextAnnotationsFrom(DedupQueueServiceAuthenticatorProxy.class); }