Refine search
checkEnded(); Context resultCtx = resultHandler != null ? stream.vertx.getOrCreateContext() : null; File file = stream.vertx.resolveFile(filename); if (!file.exists()) { if (resultHandler != null) { resultCtx.runOnContext((v) -> resultHandler.handle(Future.failedFuture(new FileNotFoundException()))); } else { log.error("File not found: " + filename); } catch (IOException e) { if (resultHandler != null) { resultCtx.runOnContext((v) -> resultHandler.handle(Future.failedFuture(e))); } else { log.error("Failed to send file", e); Future<Long> result = Future.future(); result.setHandler(ar -> { if (ar.succeeded()) { bytesWritten += ar.result(); end(); resultCtx.runOnContext(v -> { resultHandler.handle(Future.succeededFuture()); });
private void doDeployVerticle(final String verticleName, DeploymentOptions deploymentOptions, final Handler<AsyncResult<String>> doneHandler) { final Handler<AsyncResult<String>> wrappedHandler = ar1 -> { vertx.<String>executeBlocking(fut -> { if (ar1.succeeded()) { // Tell the other nodes of the cluster about the verticle for HA purposes String deploymentID = ar1.result(); addToHA(deploymentID, verticleName, deploymentOptions); fut.complete(deploymentID); } else { fut.fail(ar1.cause()); } }, false, ar2 -> { if (doneHandler != null) { doneHandler.handle(ar2); } else if (ar2.failed()) { log.error("Failed to deploy verticle", ar2.cause()); } }); }; deploymentManager.deployVerticle(verticleName, deploymentOptions, wrappedHandler); }
protected Set<Deployment> takeDeploymentSnapshot(int pos) { Set<Deployment> snapshot = new ConcurrentHashSet<>(); VertxInternal v = (VertxInternal)vertices[pos]; for (String depID: v.deploymentIDs()) { snapshot.add(v.getDeployment(depID)); } return snapshot; }
@Override public NetSocket sendFile(String filename, long offset, long length, final Handler<AsyncResult<Void>> resultHandler) { File f = vertx.resolveFile(filename); if (f.isDirectory()) { throw new IllegalArgumentException("filename must point to a file and not to a directory"); final AsyncResult<Void> res; if (future.isSuccess()) { res = Future.succeededFuture(); } else { res = Future.failedFuture(future.cause()); vertx.runOnContext(v -> resultHandler.handle(res)); }); vertx.runOnContext(v -> resultHandler.handle(Future.failedFuture(e))); } else { log.error("Failed to send file", e);
protected void kill(int pos) { VertxInternal v = (VertxInternal)vertices[pos]; v.executeBlocking(fut -> { try { v.simulateKill(); fut.complete(); } catch (Exception e) { fut.fail(e); } }, false, ar -> { if (!ar.succeeded()) { fail(ar.cause()); } }); }
private void runOnContextAndWait(Runnable runnable) { CountDownLatch latch = new CountDownLatch(1); // The testsuite requires that this is called on a Vert.x thread vertx.runOnContext(v -> { try { runnable.run(); } finally { latch.countDown(); } }); try { latch.await(30, TimeUnit.SECONDS); } catch (InterruptedException ignore) { } }
@Test public void testSearchDomainWithNdots2() throws Exception { Map<String, String> records = new HashMap<>(); records.put("host1.sub.foo.com", "127.0.0.1"); records.put("host2.sub.foo.com", "127.0.0.2"); records.put("host2.sub", "127.0.0.3"); dnsServer.testResolveA(records); VertxInternal vertx = (VertxInternal) vertx(new VertxOptions().setAddressResolverOptions( new AddressResolverOptions(). addServer(dnsServerAddress.getAddress().getHostAddress() + ":" + dnsServerAddress.getPort()). setOptResourceEnabled(false). addSearchDomain("foo.com"). setNdots(2) )); CountDownLatch latch1 = new CountDownLatch(1); vertx.resolveAddress("host1.sub", onSuccess(resolved -> { assertEquals("127.0.0.1", resolved.getHostAddress()); latch1.countDown(); })); awaitLatch(latch1); // "host2.sub" is resolved with the foo.com search domain as ndots = 2 CountDownLatch latch2 = new CountDownLatch(1); vertx.resolveAddress("host2.sub", onSuccess(resolved -> { assertEquals("127.0.0.2", resolved.getHostAddress()); latch2.countDown(); })); awaitLatch(latch2); }
@Test public void testCloseRemovesFromCluster() throws Exception { vertx1 = startVertx(); vertx2 = startVertx(); vertx3 = startVertx(); CountDownLatch latch1 = new CountDownLatch(1); vertx3.deployVerticle("java:" + HAVerticle1.class.getName(), new DeploymentOptions().setHa(true), ar -> { assertTrue(ar.succeeded()); assertTrue(vertx3.deploymentIDs().contains(ar.result())); latch1.countDown(); }); awaitLatch(latch1); CountDownLatch latch2 = new CountDownLatch(1); // Close vertx2 - this should not then participate in failover vertx2.close(ar -> { ((VertxInternal) vertx1).failoverCompleteHandler((nodeID, haInfo, succeeded) -> { assertTrue(succeeded); latch2.countDown(); }); ((VertxInternal) vertx3).simulateKill(); }); awaitLatch(latch2); assertTrue(vertx1.deploymentIDs().size() == 1); String depID = vertx1.deploymentIDs().iterator().next(); assertTrue(((VertxInternal) vertx1).getDeployment(depID).verticleIdentifier().equals("java:" + HAVerticle1.class.getName())); }
@Test public void testGetInstanceCountMultipleVerticles() throws Exception { AtomicInteger messageCount = new AtomicInteger(0); AtomicInteger totalReportedInstances = new AtomicInteger(0); vertx.eventBus().consumer("instanceCount", event -> { messageCount.incrementAndGet(); totalReportedInstances.addAndGet((int)event.body()); if(messageCount.intValue() == 3) { assertEquals(9, totalReportedInstances.get()); testComplete(); } }); vertx.deployVerticle(TestVerticle3.class.getCanonicalName(), new DeploymentOptions().setInstances(3), ar -> { assertTrue(ar.succeeded()); }); await(); Deployment deployment = ((VertxInternal) vertx).getDeployment(vertx.deploymentIDs().iterator().next()); CountDownLatch latch = new CountDownLatch(1); vertx.undeploy(deployment.deploymentID(), ar -> latch.countDown()); awaitLatch(latch); }
@Override public void setUp() throws Exception { super.setUp(); startNodes(1); clusterManager = ((VertxInternal) vertices[0]).getClusterManager(); CountDownLatch latch = new CountDownLatch(1); clusterManager.<String, ServerID>getAsyncMultiMap("mymap", onSuccess(res -> { map = res; latch.countDown(); })); awaitLatch(latch); }
@Test public void testSubsRemovedForKilledNode() throws Exception { testSubsRemoved(latch -> { VertxInternal vi = (VertxInternal)vertices[1]; vi.getClusterManager().leave(onSuccess(v -> { latch.countDown(); })); }); }
@Override @SuppressWarnings("unchecked") public DatagramSocket send(Buffer packet, int port, String host, Handler<AsyncResult<DatagramSocket>> handler) { Objects.requireNonNull(packet, "no null packet accepted"); Objects.requireNonNull(host, "no null host accepted"); if (port < 0 || port > 65535) { throw new IllegalArgumentException("port out of range:" + port); } context.owner().resolveAddress(host, res -> { if (res.succeeded()) { doSend(packet, new InetSocketAddress(res.result(), port), handler); } else { handler.handle(Future.failedFuture(res.cause())); } }); if (metrics != null) { metrics.bytesWritten(null, new SocketAddressImpl(port, host), packet.length()); } return this; }
if (iter.hasNext()) { VerticleFactory verticleFactory = iter.next(); Future<String> fut = Future.future(); if (verticleFactory.requiresResolve()) { try { } catch (Exception e) { try { fut.fail(e); } catch (Exception ignore) { fut.complete(identifier); } catch (Exception e) { if (completionHandler != null) { completionHandler.handle(Future.failedFuture(e)); } else { if (verticleFactory.blockingCreate()) { vertx.<Verticle[]>executeBlocking(createFut -> { try { Verticle[] verticles = createVerticles(verticleFactory, identifier, options.getInstances(), cl); if (res.succeeded()) { doDeploy(identifier, options, parentContext, callingContext, completionHandler, cl, res.result()); } else {
@Override public void start(Handler<AsyncResult<Void>> resultHandler) { HAManager haManager = vertx.haManager(); setClusterViewChangedHandler(haManager); clusterManager.<String, ClusterNodeInfo>getAsyncMultiMap(SUBS_MAP_NAME, ar1 -> { if (ar1.succeeded()) { subs = ar1.result(); server = vertx.createNetServer(getServerOptions()); if (asyncResult.succeeded()) { int serverPort = getClusterPublicPort(options, server.actualPort()); String serverHost = getClusterPublicHost(options); serverID = new ServerID(serverPort, serverHost); nodeInfo = new ClusterNodeInfo(clusterManager.getNodeID(), serverID); vertx.executeBlocking(fut -> { haManager.addDataToAHAInfo(SERVER_ID_HA_KEY, new JsonObject().put("host", serverID.host).put("port", serverID.port)); fut.complete(); }, false, ar2 -> { if (ar2.succeeded()) { started = true; resultHandler.handle(Future.succeededFuture()); } else { resultHandler.handle(Future.failedFuture(ar2.cause())); resultHandler.handle(Future.failedFuture(asyncResult.cause()));
listenContext = vertx.getOrCreateContext(); listening = true; String host = address.host() != null ? address.host() : "localhost"; synchronized (vertx.sharedHttpServers()) { this.actualPort = port; // Will be updated on bind for a wildcard port id = new ServerID(port, host); HttpServerImpl shared = vertx.sharedHttpServers().get(id); if (shared == null || port == 0) { serverChannelGroup = new DefaultChannelGroup("vertx-acceptor-channels", GlobalEventExecutor.INSTANCE); ServerBootstrap bootstrap = new ServerBootstrap(); bootstrap.group(vertx.getAcceptorEventLoopGroup(), availableWorkers); applyConnectionOptions(address.path() != null, bootstrap); sslHelper.validate(vertx); bindFuture = AsyncResolveConnectHelper.doBind(vertx, address, bootstrap); bindFuture.addListener(res -> { if (res.failed()) { vertx.sharedHttpServers().remove(id); } else { Channel serverChannel = res.result(); if (serverChannel.localAddress() instanceof InetSocketAddress) { HttpServerImpl.this.actualPort = ((InetSocketAddress)serverChannel.localAddress()).getPort(); VertxMetrics metrics = vertx.metricsSPI(); this.metrics = metrics != null ? metrics.createHttpServerMetrics(options, address) : null; vertx.runOnContext(v -> listenHandler.handle(Future.failedFuture(t))); } else {
listenContext = vertx.getOrCreateContext(); registeredHandler = handler; synchronized (vertx.sharedNetServers()) { this.actualPort = socketAddress.port(); // Will be updated on bind for a wildcard port String hostOrPath = socketAddress.host() != null ? socketAddress.host() : socketAddress.path(); id = new ServerID(actualPort, hostOrPath); NetServerImpl shared = vertx.sharedNetServers().get(id); if (shared == null || actualPort == 0) { // Wildcard port will imply a new actual server each time serverChannelGroup = new DefaultChannelGroup("vertx-acceptor-channels", GlobalEventExecutor.INSTANCE); vertx.sharedNetServers().put(id, NetServerImpl.this); VertxMetrics metrics = vertx.metricsSPI(); if (metrics != null) { this.metrics = metrics.createNetServerMetrics(options, new SocketAddressImpl(id.port, id.host)); vertx.sharedNetServers().remove(id); vertx.runOnContext(v -> listenHandler.handle(Future.failedFuture(t))); } else { vertx.sharedNetServers().put(id, this); VertxMetrics metrics = vertx.metricsSPI(); this.metrics = metrics != null ? metrics.createNetServerMetrics(options, new SocketAddressImpl(id.port, id.host)) : null; actualServer.handlerManager.addHandler(new Handlers(this, handler, exceptionHandler), listenContext); listenContext.runOnContext(v -> listenHandler.handle(ares));
public static AsyncResolveConnectHelper doBind(VertxInternal vertx, SocketAddress socketAddress, ServerBootstrap bootstrap) { AsyncResolveConnectHelper asyncResolveConnectHelper = new AsyncResolveConnectHelper(); bootstrap.channelFactory(vertx.transport().serverChannelFactory(socketAddress.path() != null)); if (socketAddress.path() != null) { java.net.SocketAddress converted = vertx.transport().convert(socketAddress, true); ChannelFuture future = bootstrap.bind(converted); future.addListener(f -> { if (f.isSuccess()) { asyncResolveConnectHelper.handle(future, Future.succeededFuture(future.channel())); } else { asyncResolveConnectHelper.handle(future, Future.failedFuture(f.cause())); vertx.resolveAddress(socketAddress.host(), res -> { if (res.succeeded()) { InetSocketAddress t = new InetSocketAddress(res.result(), socketAddress.port()); ChannelFuture future = bootstrap.bind(t); future.addListener(f -> { if (f.isSuccess()) { asyncResolveConnectHelper.handle(future, Future.succeededFuture(future.channel())); } else { asyncResolveConnectHelper.handle(future, Future.failedFuture(f.cause())); asyncResolveConnectHelper.handle(null, Future.failedFuture(res.cause()));
status = ST_UNDEPLOYING; doUndeployChildren(undeployingContext, ar -> { if (ar.failed()) { reportFailure(ar.cause(), undeployingContext, completionHandler); } else { doUndeploy(undeployingContext, completionHandler); ContextImpl context = verticleHolder.context; context.runOnContext(v -> { Future<Void> stopFuture = Future.future(); AtomicBoolean failureReported = new AtomicBoolean(); stopFuture.setHandler(ar -> { deployments.remove(deploymentID); VertxMetrics metrics = vertx.metricsSPI(); if (metrics != null) { metrics.verticleUndeployed(verticleHolder.verticle); if (ar2.failed()) { log.error("Failed to run close hook", ar2.cause()); verticleHolder.verticle.stop(stopFuture); } catch (Throwable t) { stopFuture.fail(t); } finally {
@Override public void decrementAndGet(Handler<AsyncResult<Long>> resultHandler) { Objects.requireNonNull(resultHandler, "resultHandler"); Context context = vertx.getOrCreateContext(); context.runOnContext(v -> resultHandler.handle(Future.succeededFuture(counter.decrementAndGet()))); }
public void undeployAll(Handler<AsyncResult<Void>> completionHandler) { // TODO timeout if it takes too long - e.g. async stop verticle fails to call future // We only deploy the top level verticles as the children will be undeployed when the parent is Set<String> deploymentIDs = new HashSet<>(); for (Map.Entry<String, Deployment> entry: deployments.entrySet()) { if (!entry.getValue().isChild()) { deploymentIDs.add(entry.getKey()); } } if (!deploymentIDs.isEmpty()) { AtomicInteger count = new AtomicInteger(0); for (String deploymentID : deploymentIDs) { undeployVerticle(deploymentID, ar -> { if (ar.failed()) { // Log but carry on regardless log.error("Undeploy failed", ar.cause()); } if (count.incrementAndGet() == deploymentIDs.size()) { completionHandler.handle(Future.succeededFuture()); } }); } } else { Context context = vertx.getOrCreateContext(); context.runOnContext(v -> completionHandler.handle(Future.succeededFuture())); } }