@SuppressWarnings("unchecked") public <T>T getEvent(String eventName, Class eventClass, Predicate<T> predicate, long timeout){ long start = System.currentTimeMillis(); while (System.currentTimeMillis() - start < timeout){ //the reversing is for starting with the newest events List<T> events = Lists.reverse(getEventsOfType(eventName, eventClass)); for (T event: events) { if(predicate.test(event)){ return event; } } new Sleeper().sleepNoException(10); } logger.error("The event {} is not found after {} ms", eventName, timeout); return null; }
@Override public RandomStringResponse handleRequest(GetRandomStringQuery request, OrangeContext ctx) throws RpcCallException { new Sleeper().sleepNoException(5000); return RandomStringResponse.getDefaultInstance(); }
@Override public void run() { if (isRegistered()) { return; } long sleepDuration = 1000; while (! isRegistered.get()) { try { attemptRegistration(); if (isRegistered.get()) { break; } sleeper.sleepNoException(sleepDuration); sleepDuration = (long) (sleepDuration * 1.5); } catch (Exception ex) { logger.warn("Caught exception attempting service registration", ex); } catch (Throwable t) { logger.error("Caught throwable attempting service registration", t); throw t; } } }
public int getExposedPortFromDocker(String serviceName, int internalPort) { Sleeper sleeper = new Sleeper(); String dockerJson = null; List<String> command = of("bash", "-c", "docker inspect `docker ps | grep -w '" + internalPort + "/tcp' | cut -f 1 -d ' '`"); int count = 0; while (count < 60) { dockerJson = processUtil.runProcess(command); if (dockerJson.startsWith("[")) { break; } else { sleeper.sleepNoException(1000); } count++; } int retval = -1; if (dockerJson != null && dockerJson.startsWith("[")) { retval = parseExposedPort(dockerJson, internalPort); } if (retval == -1) { logger.error("Could not determine host port mapping for {}, is it configured " + "for port {} internally, and also exposed?", serviceName, internalPort); } return retval; }
/** * Publish an event to kafka. We create one publisher per topic. * * @param topic The topic to publish under * @param key The key for the event * @param event The event to publish */ public void publishEventWithKey(String topic, String key, Message event) { KafkaPublisher publisher = topicToPublisher.get(topic); if (publisher == null) { publisher = factory.newBuilder(topic).build(); topicToPublisher.put(topic, publisher); } String jsonEvent = ProtobufUtil.protobufToJson(event).toString(); boolean isPublished = publisher.publishSyncWithKey(key, jsonEvent); if (isPublished){ logger.info("Published event: {}", jsonEvent); new Sleeper().sleepNoException(sleepAfterPublish); } else{ logger.warn("Publishing event message {} to Kafka topic {} failed", jsonEvent, topic); } }
@Override public void run() { if (serviceName == null) { throw new IllegalStateException("Service name was not set"); } if (StringUtils.isBlank(serviceProps.getRegistryServer())) { logger.error("registryServer was not specified"); return; } else { logger.info("Will use consul server at {}", serviceProps.getRegistryServer()); } //don't start polling until we are properly initialized, even if the initial result was empty List<ConsulHealthEntry> instances = loadCurrentHealthList(); while (instances.isEmpty()) { sleeper.sleepNoException(1000); instances = loadCurrentHealthList(); logger.debug("Received {} health entries for {}", instances.size(), serviceName); } reportInitialServicesList(instances); //TODO: detect if multiple AZs; if multiple AZs, determine AZ sort order //TODO: instances = sortByAvailabilityZone(instances); while (true) { watchForUpdates(); if (shutdownSemaphore.tryAcquire()) { logger.debug("Shutdown semaphore acquired"); break; } } }
private void initialize() throws Exception { buildMethodHandlers(serviceName.replaceAll("-", "_")); registrationManager.setRegisteredHandlers(methodHandlers.getMethodHandlers()); registrationManager.register(); messageHandler.setServiceName(serviceName); messageHandler.start(); while(! registrationManager.isRegistered()) { logger.info("Waiting for service registration of {}", serviceName); new Sleeper().sleepNoException(100); } healthCheckManager.initialize(); loadBalancerFactory = injector.getInstance(LoadBalancerFactory.class); loadBalancerFactory.getLoadBalancer(serviceName).waitForServiceInstance(); }
sleeper.sleepNoException(sleepDuration); sleepDuration = (long) (sleepDuration * 1.5);
@Test public void queue_processingEnded_retryScheduled() { messageQueue = new OffsetBlockingMessageQueue(messageExecutor, 100); ConsumerRecord record1 = new ConsumerRecord<>(topic, 0, 0, defaultKey, defaultValue); messageQueue.add(record1); messageQueue.add(new ConsumerRecord<>(topic, 0, 1, defaultKey, defaultValue)); messageQueue.add(new ConsumerRecord<>(topic, 0, 2, defaultKey, defaultValue)); messageQueue.processingEnded(new KafkaTopicInfo(topic, 0, 0, defaultKey)); new Sleeper().sleepNoException(120); messageQueue.processingEnded(new KafkaTopicInfo(topic, 0, 0, defaultKey)); new Sleeper().sleepNoException(120); ArgumentCaptor<ConsumerRecord> captor = ArgumentCaptor.forClass(ConsumerRecord.class); verify(messageExecutor, times(3)).execute(captor.capture()); assertThat(captor.getAllValues().get(0)).isEqualTo(record1); assertThat(captor.getAllValues().get(1)).isEqualTo(record1); assertThat(captor.getAllValues().get(2)).isEqualTo(record1); }
giveWarning = false; sleeper.sleepNoException(sleepTime); sleepTime = increaseSleepTime(sleepTime);
@Override public void run() { OrangeContext context = new OrangeContext(); Sleeper sleeper = new Sleeper(); try { while (produceMessages.get()) { String key = RandomStringUtils.randomAscii(5); SayHelloToCmd payload = SayHelloToCmd.newBuilder().setName(key).build(); Message request = Messages.requestFor(ping, pong, key, payload, context); producer.send(request); sentMessages.incrementAndGet(); sleeper.sleepNoException(250); } } catch (Throwable t) { logger.error("Exception in producer loop", t); } } });