@Override public void execute(ClusterConfiguration config) { int instanceCount = config.getInstanceConfigurationList().size(); if (instanceCount < 1) { throw new ElasticsearchSetupException(String.format( "The number of instances should not be smaller than 1; you configured: %d", instanceCount)); } }
@Override public void execute(ClusterConfiguration config) { String baseDir = config.getInstanceConfigurationList().get(0).getBaseDir(); try { Validate.notBlank(baseDir); new File(baseDir).getCanonicalPath(); // this should catch erroneous paths } catch (Exception e) { throw new ElasticsearchSetupException(String.format( "The value of the 'baseDir' parameter ('%1$s') is not a valid file path.", baseDir)); } }
@Override public void execute(ClusterConfiguration config) { List<Integer> ports = new ArrayList<>(); // Iterate twice, because I want to maintain the order: // HTTP ports first, then transport ports config.getInstanceConfigurationList().forEach(instanceConfig -> { ports.add(instanceConfig.getHttpPort()); }); config.getInstanceConfigurationList().forEach(instanceConfig -> { ports.add(instanceConfig.getTransportPort()); }); List<Integer> protectedPorts = ports.stream() .filter(port -> port < 1024) .collect(Collectors.toList()); if (protectedPorts.size() > 0) { throw new ElasticsearchSetupException(String.format( "The following provided or inferred ports are protected (below 1024): %s", StringUtils.join(protectedPorts, ','))); } }
@Override public void execute(ClusterConfiguration config) { List<Integer> httpPorts = new ArrayList<>(); List<Integer> transportPorts = new ArrayList<>(); config.getInstanceConfigurationList().forEach(instanceConfig -> { httpPorts.add(instanceConfig.getHttpPort()); transportPorts.add(instanceConfig.getTransportPort()); }); // create additional sets to verify that there are no duplicates within the source lists Set<Integer> httpPortsSet = new HashSet<>(httpPorts); Set<Integer> transportPortsSet = new HashSet<>(transportPorts); Set<Integer> intersection = new HashSet<>(httpPortsSet); intersection.retainAll(transportPortsSet); if (httpPortsSet.size() != httpPorts.size() || transportPortsSet.size() != transportPorts.size() || intersection.size() > 0) { throw new ElasticsearchSetupException( "We have conflicting ports in the list of HTTP ports [" + StringUtils.join(httpPorts, ',') + "] and the list of transport ports [" + StringUtils.join(transportPorts, ',') + "]"); } }
public ClusterConfiguration build() { ClusterConfiguration config = new ClusterConfiguration( instanceConfigurationList, artifactResolver, artifactInstaller, log); config.flavour = flavour; config.version = version; config.downloadUrl = downloadUrl; config.clusterName = clusterName; config.pathConf = pathConf; config.plugins = plugins; config.pathInitScript = pathInitScript; config.keepExistingData = keepExistingData; config.timeout = timeout; config.clientSocketTimeout = clientSocketTimeout; config.setAwait = setAwait; config.autoCreateIndex = autoCreateIndex; config.getInstanceConfigurationList().forEach(c -> c.setClusterConfiguration(config)); return config; } }
@Before public void setup() { when(config.getLog()).thenReturn(log); when(instanceConfig.getClusterConfiguration()).thenReturn(config); when(config.getInstanceConfigurationList()).thenReturn(Arrays.asList(instanceConfig)); }
@Override public void execute(ClusterConfiguration config) { // the instances have already started; // waiting just 10 seconds for them to form the cluster int timeout = 10; ElasticsearchClient client = new ElasticsearchClient.Builder() .withInstanceConfiguration(config.getInstanceConfigurationList().get(0)) .withHostname("localhost") .build(); Monitor monitor = new Monitor(client, config.getLog()); monitor.waitToStartCluster( config.getClusterName(), config.getInstanceConfigurationList().size(), timeout); } }
@Override public void execute() throws MojoExecutionException, MojoFailureException { if (skip) { getLog().info("Skipping plugin execution"); return; } ClusterConfiguration clusterConfig = buildClusterConfiguration(); for (InstanceConfiguration config : clusterConfig.getInstanceConfigurationList()) { try { getLog().info(String.format("Stopping Elasticsearch [%s]", config)); String baseDir = config.getBaseDir(); ProcessUtil.executeScript(config, getShutdownScriptCommand(baseDir)); getLog().info(String.format("Elasticsearch [%d] stopped", config.getId())); ProcessUtil.cleanupPid(baseDir); } catch (Exception e) { getLog().error("Exception while stopping Elasticsearch", e); } } }
@Override public void execute(ClusterConfiguration config) { if (StringUtils.isBlank(config.getPathInitScript())) { // nothing to do; return return; } String filePath = config.getPathInitScript(); validateFile(filePath); // we'll run all commands against the first node in the cluster ElasticsearchClient client = new ElasticsearchClient.Builder() .withInstanceConfiguration(config.getInstanceConfigurationList().get(0)) .withHostname("localhost") .build(); Path path = Paths.get(filePath); if ("json".equalsIgnoreCase(FilenameUtils.getExtension(filePath))) { parseJson(client, config.getLog(), path); } else { parseScript(client, config.getLog(), path); } }
new PreStartClusterSequence().execute(clusterConfig); for (InstanceConfiguration config : clusterConfig.getInstanceConfigurationList())
List<String> hosts = config.getClusterConfiguration().getInstanceConfigurationList() .stream() .filter(config -> config != this.config)