private static Long toMillis(Duration duration) { if (duration == null) { return null; } return duration.toMillis(); }
private static Long toMillis(Duration duration) { if (duration == null) { return null; } return duration.toMillis(); }
public CachingHiveMetastore(ExtendedHiveMetastore delegate, ExecutorService executor, Duration cacheTtl, Duration refreshInterval, long maximumSize) { this( delegate, executor, OptionalLong.of(cacheTtl.toMillis()), refreshInterval.toMillis() >= cacheTtl.toMillis() ? OptionalLong.empty() : OptionalLong.of(refreshInterval.toMillis()), maximumSize); }
public BlackHolePageSink(ListeningScheduledExecutorService executorService, Duration pageProcessingDelay) { this.executorService = requireNonNull(executorService, "executorService is null"); this.pageProcessingDelayMillis = requireNonNull(pageProcessingDelay, "pageProcessingDelay is null").toMillis(); }
public HiveMetastoreClientFactory( Optional<SSLContext> sslContext, Optional<HostAndPort> socksProxy, Duration timeout, HiveMetastoreAuthentication metastoreAuthentication) { this.sslContext = requireNonNull(sslContext, "sslContext is null"); this.socksProxy = requireNonNull(socksProxy, "socksProxy is null"); this.timeoutMillis = toIntExact(timeout.toMillis()); this.metastoreAuthentication = requireNonNull(metastoreAuthentication, "metastoreAuthentication is null"); }
private void scheduleIdleChecks(Duration idleCheckInterval, ScheduledExecutorService idleCheckExecutor) { idleCheckExecutor.scheduleWithFixedDelay(() -> { try { cleanUpExpiredTransactions(); } catch (Throwable t) { log.error(t, "Unexpected exception while cleaning up expired transactions"); } }, idleCheckInterval.toMillis(), idleCheckInterval.toMillis(), MILLISECONDS); }
BlackHolePageSource(Page page, int count, ListeningScheduledExecutorService executorService, Duration pageProcessingDelay) { this.page = requireNonNull(page, "page is null"); checkArgument(count >= 0, "count is negative"); this.pagesLeft = count; this.executorService = requireNonNull(executorService, "executorService is null"); this.pageProcessingDelayInMillis = requireNonNull(pageProcessingDelay, "pageProcessingDelay is null").toMillis(); this.memoryUsageBytes = page.getSizeInBytes(); }
private static Duration randomizeWaitTime(Duration waitTime) { // Randomize in [T/2, T], so wait is not near zero and the client-supplied max wait time is respected long halfWaitMillis = waitTime.toMillis() / 2; return new Duration(halfWaitMillis + ThreadLocalRandom.current().nextLong(halfWaitMillis), MILLISECONDS); } }
private static <K, V> Cache<K, V> createCache(HiveClientConfig hiveClientConfig) { if (hiveClientConfig.isReplay()) { return CacheBuilder.<K, V>newBuilder() .build(); } return CacheBuilder.<K, V>newBuilder() .expireAfterWrite(hiveClientConfig.getRecordingDuration().toMillis(), MILLISECONDS) .build(); }
public Duration getTotalCpuTime() { long millis = stages.values().stream() .mapToLong(stage -> stage.getTotalCpuTime().toMillis()) .sum(); return new Duration(millis, MILLISECONDS); }
public NetworkLocationCache(NetworkTopology networkTopology) { this.networkTopology = requireNonNull(networkTopology, "networkTopology is null"); this.cache = CacheBuilder.newBuilder() .expireAfterWrite(1, DAYS) .refreshAfterWrite(12, HOURS) .build(asyncReloading(CacheLoader.from(this::locate), executor)); this.negativeCache = CacheBuilder.newBuilder() .expireAfterWrite(NEGATIVE_CACHE_DURATION.toMillis(), MILLISECONDS) .build(); }
private boolean isAbandoned(T query) { DateTime oldestAllowedHeartbeat = DateTime.now().minus(clientTimeout.toMillis()); DateTime lastHeartbeat = query.getLastHeartbeat(); return lastHeartbeat != null && lastHeartbeat.isBefore(oldestAllowedHeartbeat); }
private void updateStats(long currentRequestStartNanos) { stats.statusRoundTripMillis(nanosSince(currentRequestStartNanos).toMillis()); } }
private void updateStats(long currentRequestStartNanos) { Duration requestRoundTrip = Duration.nanosSince(currentRequestStartNanos); stats.updateRoundTripMillis(requestRoundTrip.toMillis()); } }
private void updateStats(long currentRequestStartNanos) { stats.infoRoundTripMillis(nanosSince(currentRequestStartNanos).toMillis()); }
@Override public void finish() { if (this.isBlocked == NOT_BLOCKED) { SettableFuture<?> isBlocked = SettableFuture.create(); this.isBlocked = isBlocked; executor.schedule(() -> isBlocked.set(null), unblockAfter.toMillis(), TimeUnit.MILLISECONDS); } }
public synchronized Duration getTotalCpuTime() { long millis = getAllTasks().stream() .mapToLong(task -> task.getTaskInfo().getStats().getTotalCpuTime().toMillis()) .sum(); return new Duration(millis, TimeUnit.MILLISECONDS); }
static BufferResult getFuture(ListenableFuture<BufferResult> future, Duration maxWait) { Optional<BufferResult> bufferResult = tryGetFutureValue(future, (int) maxWait.toMillis(), MILLISECONDS); checkArgument(bufferResult.isPresent(), "bufferResult is empty"); return bufferResult.get(); }
public SearchScrollRequestBuilder prepareSearchScroll(String scrollId) { return client.prepareSearchScroll(scrollId) .setScroll(new TimeValue(scrollTimeout.toMillis())); }
private JedisPool createConsumer(HostAddress host) { log.info("Creating new JedisPool for %s", host); return new JedisPool(jedisPoolConfig, host.getHostText(), host.getPort(), toIntExact(redisConnectorConfig.getRedisConnectTimeout().toMillis()), redisConnectorConfig.getRedisPassword(), redisConnectorConfig.getRedisDataBaseIndex()); } }