private void validate(Collection<HostSpec> hosts) { for (HostSpec host : hosts) { if ( ! host.membership().isPresent()) throw new IllegalArgumentException("Hosts must be assigned a cluster when activating, but got " + host); if ( ! host.membership().get().cluster().group().isPresent()) throw new IllegalArgumentException("Hosts must be assigned a group when activating, but got " + host); } }
public HostResource getHost(String hostAlias) { HostSpec hostSpec = provisioner.allocateHost(hostAlias); for (HostResource resource : hostname2host.values()) { if (resource.getHostname().equals(hostSpec.hostname())) { hostSpec.membership().ifPresent(resource::addClusterMembership); return resource; } } return addNewHost(hostSpec); }
public Map<HostResource, ClusterMembership> allocateHosts(ClusterSpec cluster, Capacity capacity, int groups, DeployLogger logger) { List<HostSpec> allocatedHosts = provisioner.prepare(cluster, capacity, groups, new ProvisionDeployLogger(logger)); // TODO: Even if HostResource owns a set of memberships, we need to return a map because the caller needs the current membership. Map<HostResource, ClusterMembership> retAllocatedHosts = new LinkedHashMap<>(); for (HostSpec spec : allocatedHosts) { // This is needed for single node host provisioner to work in unit tests for hosted vespa applications. HostResource host = getExistingHost(spec).orElseGet(() -> addNewHost(spec)); retAllocatedHosts.put(host, spec.membership().orElse(null)); if (! host.getFlavor().isPresent()) { host.setFlavor(spec.flavor()); log.log(DEBUG, () -> "Host resource " + host.getHostname() + " had no flavor, setting to " + spec.flavor()); } } retAllocatedHosts.keySet().forEach(host -> log.log(DEBUG, () -> "Allocated host " + host.getHostname() + " with flavor " + host.getFlavor())); return retAllocatedHosts; }
/** * Returns the input nodes with the changes resulting from applying the settings in hosts to the given list of nodes. */ private List<Node> updateFrom(Collection<HostSpec> hosts, List<Node> nodes) { List<Node> updated = new ArrayList<>(); for (Node node : nodes) { HostSpec hostSpec = getHost(node.hostname(), hosts); node = hostSpec.membership().get().retired() ? node.retire(nodeRepository.clock().instant()) : node.unretire(); node = node.with(node.allocation().get().with(hostSpec.membership().get())); if (hostSpec.flavor().isPresent()) // Docker nodes may change flavor node = node.with(hostSpec.flavor().get()); updated.add(node); } return updated; }
private HostSpec retire(HostSpec host) { return new HostSpec(host.hostname(), host.aliases(), host.flavor(), Optional.of(host.membership().get().retire()), host.version()); }
private HostResource addNewHost(HostSpec hostSpec) { Host host = Host.createHost(this, hostSpec.hostname()); HostResource hostResource = new HostResource(host, hostSpec.version()); hostResource.setFlavor(hostSpec.flavor()); hostSpec.membership().ifPresent(hostResource::addClusterMembership); hostname2host.put(host.getHostname(), hostResource); log.log(DEBUG, () -> "Added new host resource for " + host.getHostname() + " with flavor " + hostResource.getFlavor()); return hostResource; }
private void toSlime(HostSpec host, Cursor cursor) { cursor.setString(hostSpecHostName, host.hostname()); host.membership().ifPresent(membership -> { cursor.setString(hostSpecMembership, membership.stringValue()); cursor.setString(hostSpecVespaVersion, membership.cluster().vespaVersion().toFullString()); }); host.flavor().ifPresent(flavor -> cursor.setString(hostSpecFlavor, flavor.name())); host.version().ifPresent(version -> cursor.setString(hostSpecCurrentVespaVersion, version.toFullString())); }