private int getPlannedInstancesCount(String nodeTemplateId, Topology topology) { Capability scalableCapability = TopologyUtils.getScalableCapability(topology, nodeTemplateId, false); if (scalableCapability != null) { ScalingPolicy scalingPolicy = TopologyUtils.getScalingPolicy(scalableCapability); return scalingPolicy.getInitialInstances(); } return 1; }
/** * Get a {@link NodeTemplate} given its name from a topology * * @param topology the topology * @param nodeTemplateId the name of the node template * @return the found node template, throws NotFoundException if not found */ public static NodeTemplate getNodeTemplate(Topology topology, String nodeTemplateId) { Map<String, NodeTemplate> nodeTemplates = getNodeTemplates(topology); return getNodeTemplate(topology.getId(), nodeTemplateId, nodeTemplates); }
private static void refreshNodeTemplateNameInRelationships(String oldNodeTemplateName, String newNodeTemplateName, Map<String, RelationshipTemplate> relationshipTemplates) { Map<String, String> updatedKeys = Maps.newHashMap(); for (Map.Entry<String, RelationshipTemplate> relationshipTemplateEntry : relationshipTemplates.entrySet()) { String relationshipTemplateId = relationshipTemplateEntry.getKey(); RelationshipTemplate relationshipTemplate = relationshipTemplateEntry.getValue(); if (relationshipTemplate.getTarget().equals(oldNodeTemplateName)) { relationshipTemplate.setTarget(newNodeTemplateName); String formatedOldNodeName = getRelationShipName(relationshipTemplate.getType(), oldNodeTemplateName); // if the id/name of the relationship is auto-generated we should update it also as auto-generation is <typeName+targetId> if (relationshipTemplateId.equals(formatedOldNodeName)) { // check that the new name is not already used (so we won't override another relationship)... String validNewRelationshipTemplateId = getNexAvailableName(getRelationShipName(relationshipTemplate.getType(), newNodeTemplateName), "", relationshipTemplates.keySet()); updatedKeys.put(relationshipTemplateId, validNewRelationshipTemplateId); } } } // update the relationship keys if any has been impacted for (Map.Entry<String, String> updateKeyEntry : updatedKeys.entrySet()) { RelationshipTemplate relationshipTemplate = relationshipTemplates.remove(updateKeyEntry.getKey()); relationshipTemplates.put(updateKeyEntry.getValue(), relationshipTemplate); } }
/** * Rename formattedOldNodeName node template of a topology. * * @param topology * @param nodeTemplateName * @param newNodeTemplateName */ public static void renameNodeTemplate(Topology topology, String nodeTemplateName, String newNodeTemplateName) { Map<String, NodeTemplate> nodeTemplates = getNodeTemplates(topology); NodeTemplate nodeTemplate = getNodeTemplate(topology.getId(), nodeTemplateName, nodeTemplates); nodeTemplate.setName(newNodeTemplateName); nodeTemplates.put(newNodeTemplateName, nodeTemplate); nodeTemplates.remove(nodeTemplateName); refreshNodeTempNameInRelationships(nodeTemplateName, newNodeTemplateName, nodeTemplates); updateOnNodeTemplateNameChange(nodeTemplateName, newNodeTemplateName, topology); updateGroupMembers(topology, nodeTemplate, nodeTemplateName, newNodeTemplateName); updatePolicyMembers(topology, nodeTemplateName, newNodeTemplateName); }
private void updateRuntimeTopology(DeploymentTopology runtimeTopo, PaaSInstancePersistentResourceMonitorEvent persistentResourceEvent, Map<String, Object> persistentProperties) { NodeTemplate nodeTemplate = TopologyUtils.getNodeTemplate(runtimeTopo, persistentResourceEvent.getNodeTemplateId()); log.info("Updating Runtime topology: Storage NodeTemplate <{}.{}> to add a new volumeId", runtimeTopo.getId(), persistentResourceEvent.getNodeTemplateId()); for (String key : persistentProperties.keySet()) { nodeTemplate.getProperties().put(key, getPropertyValue(persistentProperties.get(key))); log.debug("Property [ {} ] to update: [ {} ]. New value is [ {} ]", key, persistentResourceEvent.getPersistentProperties().get(key), persistentProperties.get(key)); } alienMonitorDao.save(runtimeTopo); }
@Override public void process(Csar csar, Topology topology, ReplaceNodeOperation operation) { Map<String, NodeTemplate> nodeTemplates = TopologyUtils.getNodeTemplates(topology); NodeTemplate oldNodeTemplate = TopologyUtils.getNodeTemplate(topology.getId(), operation.getNodeName(), nodeTemplates); (relationshipId, relationshipTemplate) -> workflowBuilderService.addRelationship(topologyContext, newNodeTemplate.getName(), relationshipId)); TopologyUtils.getTargetRelationships(oldNodeTemplate.getName(), nodeTemplates).forEach(relationshipEntry -> workflowBuilderService .addRelationship(topologyContext, relationshipEntry.getSource().getName(), relationshipEntry.getRelationshipId())); if(!operation.isSkipAutoCompletion()) {
private void doScaleNode(final String nodeTemplateId, final int instances, final IPaaSCallback<Object> callback, final Deployment deployment, final DeploymentTopology topology, NodeTemplate nodeTemplate, SecretProviderConfigurationAndCredentials secretProviderConfigurationAndCredentials) { final Capability capability = NodeTemplateUtils.getCapabilityByTypeOrFail(nodeTemplate, NormativeCapabilityTypes.SCALABLE); final int previousInitialInstances = TopologyUtils.getScalingProperty(NormativeComputeConstants.SCALABLE_DEFAULT_INSTANCES, capability); final int newInitialInstances = previousInitialInstances + instances; log.info("Scaling [ {} ] node from [ {} ] to [ {} ]. Updating runtime topology...", nodeTemplateId, previousInitialInstances, newInitialInstances); TopologyUtils.setScalingProperty(NormativeComputeConstants.SCALABLE_DEFAULT_INSTANCES, newInitialInstances, capability); alienMonitorDao.save(topology); IOrchestratorPlugin orchestratorPlugin = orchestratorPluginService.getOrFail(deployment.getOrchestratorId()); PaaSDeploymentContext deploymentContext = new PaaSDeploymentContext(deployment, topology, secretProviderConfigurationAndCredentials); orchestratorPlugin.scale(deploymentContext, nodeTemplateId, instances, new IPaaSCallback() { @Override public void onFailure(Throwable throwable) { log.info("Failed to scale [ {} ] node from [ {} ] to [ {} ]. rolling back to {}...", nodeTemplateId, previousInitialInstances, newInitialInstances, previousInitialInstances); TopologyUtils.setScalingProperty(NormativeComputeConstants.SCALABLE_DEFAULT_INSTANCES, previousInitialInstances, capability); alienMonitorDao.save(topology); callback.onFailure(throwable); } @Override public void onSuccess(Object data) { callback.onSuccess(data); } }); } }
@Override protected void processNodeOperation(Csar csar, Topology topology, DeleteNodeOperation operation, NodeTemplate template) { Map<String, NodeTemplate> nodeTemplates = TopologyUtils.getNodeTemplates(topology); // Prepare to cleanup files (store artifacts reference in the operation and process deletion on before commit operation). Map<String, DeploymentArtifact> artifacts = template.getArtifacts(); operation.setArtifacts(artifacts); List<String> typesTobeUnloaded = Lists.newArrayList(); // Clean up dependencies of the topology removeRelationShipReferences(operation.getNodeName(), csar, topology, typesTobeUnloaded); topologyService.unloadType(topology, typesTobeUnloaded.toArray(new String[typesTobeUnloaded.size()])); // Cleanup from policies removeNodeFromPolicies(operation.getNodeName(), topology); nodeTemplates.remove(operation.getNodeName()); removeOutputs(operation.getNodeName(), topology); if (topology.getSubstitutionMapping() != null) { removeNodeTemplateSubstitutionTargetMapEntry(operation.getNodeName(), topology.getSubstitutionMapping().getCapabilities()); removeNodeTemplateSubstitutionTargetMapEntry(operation.getNodeName(), topology.getSubstitutionMapping().getRequirements()); } // group members removal TopologyUtils.updateGroupMembers(topology, template, operation.getNodeName(), null); // update the workflows workflowBuilderService.removeNode(topology, csar, operation.getNodeName()); log.debug("Removed node template [ {} ] from the topology [ {} ] .", operation.getNodeName(), topology.getId()); }
@Override public void onFailure(Throwable throwable) { log.info("Failed to scale [ {} ] node from [ {} ] to [ {} ]. rolling back to {}...", nodeTemplateId, currentInstances, expectedInstances, currentInstances); TopologyUtils.setScalingProperty(NormativeComputeConstants.SCALABLE_DEFAULT_INSTANCES, currentInstances, clusterControllerCapability); alienMonitorDao.save(topology); callback.onFailure(throwable); } });
Map<String, NodeTemplate> nodeTemplates = TopologyUtils.getNodeTemplates(topology); for (NodeTemplate nodeTemplate : nodeTemplates.values()) { if (nodeTemplate.getGroups() != null) {
renameNodeTemplate(topology, nodeName, newName); if (parsingErrors != null) { Node node = (Node) MapUtil.get(objectToNodeMap, nodeName);
private String copyName(String name, Collection<String> existingSet) { return TopologyUtils.getNexAvailableName(name + "_" + "copy", "", safe(existingSet)); }
public static ScalingPolicy getScalingPolicy(Capability capability) { int initialInstances = getScalingProperty(NormativeComputeConstants.SCALABLE_DEFAULT_INSTANCES, capability); int minInstances = getScalingProperty(NormativeComputeConstants.SCALABLE_MIN_INSTANCES, capability); int maxInstances = getScalingProperty(NormativeComputeConstants.SCALABLE_MAX_INSTANCES, capability); return new ScalingPolicy(minInstances, maxInstances, initialInstances); }
private void fillSubstituteAttributesFromTypeAtttributes(Topology topology, NodeType substituteNodeType) { Map<String, IValue> attributes = substituteNodeType.getAttributes(); Map<String, Set<String>> outputAttributes = topology.getOutputAttributes(); if (outputAttributes != null) { for (Map.Entry<String, Set<String>> oae : outputAttributes.entrySet()) { String nodeName = oae.getKey(); NodeTemplate nodeTemplate = TopologyUtils.getNodeTemplate(topology, nodeName); NodeType nodeTemplateType = ToscaContext.getOrFail(NodeType.class, nodeTemplate.getType()); for (String attributeName : oae.getValue()) { IValue ivalue = nodeTemplateType.getAttributes().get(attributeName); // FIXME we have an issue here : if several nodes have the same attribute name, or if an attribute and a property have the same name, there // is a conflict if (ivalue != null && !attributes.containsKey(attributeName)) { attributes.put(attributeName, ivalue); } } } } }
scaleOperationRequest.setOperationName(AlienInterfaceTypes.CLUSTER_CONTROL_OP_SCALE); int currentInstances = TopologyUtils.getScalingProperty(NormativeComputeConstants.SCALABLE_DEFAULT_INSTANCES, clusterControllerCapability); int expectedInstances = currentInstances + instances; log.info("Scaling [ {} ] node from [ {} ] to [ {} ]. Updating runtime topology...", nodeTemplateId, currentInstances, expectedInstances); TopologyUtils.setScalingProperty(NormativeComputeConstants.SCALABLE_DEFAULT_INSTANCES, expectedInstances, clusterControllerCapability); alienMonitorDao.save(topology);
@Override public void onFailure(Throwable throwable) { log.info("Failed to scale [ {} ] node from [ {} ] to [ {} ]. rolling back to {}...", nodeTemplateId, previousInitialInstances, newInitialInstances, previousInitialInstances); TopologyUtils.setScalingProperty(NormativeComputeConstants.SCALABLE_DEFAULT_INSTANCES, previousInitialInstances, capability); alienMonitorDao.save(topology); callback.onFailure(throwable); }
@Override public void process(Csar csar, Topology topology, RenameNodeOperation operation) { NameValidationUtils.validateNodeName(operation.getNewName()); AlienUtils.failIfExists(topology.getNodeTemplates(), operation.getNewName(), "A node template with the given name {} already exists in the topology {}.", operation.getNodeName(), topology.getId()); log.debug("Renaming the Node template [ {} ] with [ {} ] in the topology [ {} ] .", operation.getNodeName(), operation.getNewName(), topology.getId()); TopologyUtils.renameNodeTemplate(topology, operation.getNodeName(), operation.getNewName()); workflowBuilderService.renameNode(topology, csar, operation.getNodeName(), operation.getNewName()); } }
String danglingTemplateName = TopologyUtils.getNexAvailableName(nodeTemplate.getName() + "_" + requirementDefinition.getId(), "_", topology.getNodeTemplates().keySet()); danglingTemplate.setName(danglingTemplateName); String danglingRelationshipTemplateName = TopologyUtils.getNexAvailableName(nodeTemplate.getName() + "_" + requirementDefinition.getId(), "_", nodeTemplate.getRelationships().keySet());
public static int getDefaultInstanceCount(Topology topology, NodeTemplate template, int multiplicator) { Capability scalableCapability = TopologyUtils.getScalableCapability(topology, template.getName(), false); int defaultInstanceCount = 1; if (scalableCapability != null) { ScalingPolicy scalingPolicy = TopologyUtils.getScalingPolicy(scalableCapability); if (!ScalingPolicy.NOT_SCALABLE_POLICY.equals(scalingPolicy)) { defaultInstanceCount = scalingPolicy.getInitialInstances(); } } // now look for the host NodeTemplate host = getImmediateHostTemplate(topology, template); if (host != null) { return getDefaultInstanceCount(topology, host, multiplicator * defaultInstanceCount); } else { return multiplicator * defaultInstanceCount; } }
@Override public void process(Csar csar, Topology topology, DuplicateNodeOperation operation) { Map<String, NodeTemplate> nodeTemplates = TopologyUtils.getNodeTemplates(topology); // Retrieve existing node template NodeTemplate nodeTemplateToDuplicate = TopologyUtils.getNodeTemplate(topology.getId(), operation.getNodeName(), nodeTemplates); // map that will contains a mapping of the duplicated node and their new names Map<String, String> duplicatedNodesNameMappings = Maps.newHashMap(); // first duplicate the node templates duplicateNodeTemplate(nodeTemplateToDuplicate, duplicatedNodesNameMappings, nodeTemplates, topology, csar); // then clean the relationships, discarding all that targets a node not in hostedNodes processRelationships(duplicatedNodesNameMappings, nodeTemplates, topology, csar); }