private static long durationToUnits(Duration duration, TimeUnit units) { // TODO(user): Handle overflow. switch (units) { case NANOSECONDS: return Durations.toNanos(duration); case MICROSECONDS: return Durations.toMicros(duration); default: return units.convert(Durations.toMillis(duration), TimeUnit.MILLISECONDS); } } }
static long extractRetryDelay(Throwable cause) { if (cause != null) { Metadata trailers = Status.trailersFromThrowable(cause); if (trailers != null && trailers.containsKey(KEY_RETRY_INFO)) { RetryInfo retryInfo = trailers.get(KEY_RETRY_INFO); if (retryInfo.hasRetryDelay()) { return Durations.toMillis(retryInfo.getRetryDelay()); } } } return -1L; } }
/** * Convert a Duration to the number of milliseconds.The result will be * rounded towards 0 to the nearest millisecond. E.g., if the duration * represents -1 nanosecond, it will be rounded to 0. * * @deprecated Use {@link Durations#toMillis} instead. */ @Deprecated public static long toMillis(Duration duration) { return Durations.toMillis(duration); }
private static long durationToUnits(Duration duration, TimeUnit units) { // TODO(user): Handle overflow. switch (units) { case NANOSECONDS: return Durations.toNanos(duration); case MICROSECONDS: return Durations.toMicros(duration); default: return units.convert(Durations.toMillis(duration), TimeUnit.MILLISECONDS); } } }
private KafkaPublisherTask(StartRequest request) { super(request, "kafka", MetricsHandler.MetricName.PUBLISH_ACK_LATENCY); this.topic = request.getTopic(); this.payload = LoadTestRunner.createMessage(request.getMessageSize()); this.batchSize = request.getPublishBatchSize(); Properties props = new Properties(); props.putAll(new ImmutableMap.Builder<>() .put("max.block.ms", "30000") .put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer") .put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer") .put("acks", "all") .put("bootstrap.servers", request.getKafkaOptions().getBroker()) .put("buffer.memory", Integer.toString(1000 * 1000 * 1000)) // 1 GB // 10M, high enough to allow for duration to control batching .put("batch.size", Integer.toString(10 * 1000 * 1000)) .put("linger.ms", Long.toString(Durations.toMillis(request.getPublishBatchDuration()))) .build() ); this.publisher = new KafkaProducer<>(props); }
private KafkaSubscriberTask(StartRequest request) { super(request, "kafka", MetricsHandler.MetricName.END_TO_END_LATENCY); this.pollLength = Durations.toMillis(request.getKafkaOptions().getPollDuration()); Properties props = new Properties(); props.putAll(ImmutableMap.of( "key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer", "value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer", "group.id", "SUBSCRIBER_ID", "enable.auto.commit", "true", "session.timeout.ms", "30000" )); props.put("bootstrap.servers", request.getKafkaOptions().getBroker()); subscriber = new KafkaConsumer<>(props); subscriber.subscribe(Collections.singletonList(request.getTopic())); }
static long extractRetryDelay(Throwable cause) { if (cause != null) { Metadata trailers = Status.trailersFromThrowable(cause); if (trailers != null && trailers.containsKey(KEY_RETRY_INFO)) { RetryInfo retryInfo = trailers.get(KEY_RETRY_INFO); if (retryInfo.hasRetryDelay()) { return Durations.toMillis(retryInfo.getRetryDelay()); } } } return -1L; } }
Durations.toMillis(publishBatchDuration) >= 0, "--publish_batch_duration must be positive."); Preconditions.checkArgument(
private CPSPublisherTask(StartRequest request) { super(request, "gcloud", MetricsHandler.MetricName.PUBLISH_ACK_LATENCY); try { this.publisher = Publisher.defaultBuilder(TopicName.create(request.getProject(), request.getTopic())) .setBatchingSettings( BatchingSettings.newBuilder() .setElementCountThreshold(950L) .setRequestByteThreshold(9500000L) .setDelayThreshold( Duration.ofMillis(Durations.toMillis(request.getPublishBatchDuration()))) .build()) .build(); } catch (Exception e) { throw new RuntimeException(e); } this.payload = ByteString.copyFromUtf8(LoadTestRunner.createMessage(request.getMessageSize())); this.batchSize = request.getPublishBatchSize(); this.messageSize = request.getMessageSize(); this.id = (new Random()).nextInt(); }
InitialLoadBalanceResponse initialResponse = response.getInitialResponse(); loadReportIntervalMillis = Durations.toMillis(initialResponse.getClientStatsReportInterval()); scheduleNextLoadReport(); return;