summaryrefslogtreecommitdiffstats
path: root/azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/simple_v1_0/modeling
diff options
context:
space:
mode:
Diffstat (limited to 'azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/simple_v1_0/modeling')
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/simple_v1_0/modeling/__init__.py750
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/simple_v1_0/modeling/artifacts.py44
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/simple_v1_0/modeling/capabilities.py220
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/simple_v1_0/modeling/constraints.py144
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/simple_v1_0/modeling/copy.py32
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/simple_v1_0/modeling/data_types.py514
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/simple_v1_0/modeling/functions.py681
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/simple_v1_0/modeling/interfaces.py530
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/simple_v1_0/modeling/parameters.py230
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/simple_v1_0/modeling/policies.py79
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/simple_v1_0/modeling/requirements.py364
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/simple_v1_0/modeling/substitution_mappings.py167
12 files changed, 3755 insertions, 0 deletions
diff --git a/azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/simple_v1_0/modeling/__init__.py b/azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/simple_v1_0/modeling/__init__.py
new file mode 100644
index 0000000..d960e05
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/simple_v1_0/modeling/__init__.py
@@ -0,0 +1,750 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Creates ARIA service template models based on the TOSCA presentation.
+
+Relies on many helper methods in the presentation classes.
+"""
+
+#pylint: disable=unsubscriptable-object
+
+import os
+import re
+from types import FunctionType
+from datetime import datetime
+
+from ruamel import yaml
+
+from aria.parser.validation import Issue
+from aria.utils.formatting import string_list_as_string
+from aria.utils.collections import (StrictDict, OrderedDict)
+from aria.orchestrator import WORKFLOW_DECORATOR_RESERVED_ARGUMENTS
+from aria.modeling.models import (Type, ServiceTemplate, NodeTemplate,
+ RequirementTemplate, RelationshipTemplate, CapabilityTemplate,
+ GroupTemplate, PolicyTemplate, SubstitutionTemplate,
+ SubstitutionTemplateMapping, InterfaceTemplate, OperationTemplate,
+ ArtifactTemplate, Metadata, Input, Output, Property,
+ Attribute, Configuration, PluginSpecification)
+
+from .parameters import coerce_parameter_value
+from .constraints import (Equal, GreaterThan, GreaterOrEqual, LessThan, LessOrEqual, InRange,
+ ValidValues, Length, MinLength, MaxLength, Pattern)
+from ..data_types import coerce_value
+
+
+# These match the first un-escaped ">"
+# See: http://stackoverflow.com/a/11819111/849021
+IMPLEMENTATION_PREFIX_REGEX = re.compile(r'(?<!\\)(?:\\\\)*>')
+
+
+def create_service_template_model(context): # pylint: disable=too-many-locals,too-many-branches
+ model = ServiceTemplate(created_at=datetime.now(),
+ main_file_name=os.path.basename(str(context.presentation.location)))
+
+ model.description = context.presentation.get('service_template', 'description', 'value')
+
+ # Metadata
+ metadata = context.presentation.get('service_template', 'metadata')
+ if metadata is not None:
+ create_metadata_models(context, model, metadata)
+
+ # Types
+ model.node_types = Type(variant='node')
+ create_types(context,
+ model.node_types,
+ context.presentation.get('service_template', 'node_types'))
+ model.group_types = Type(variant='group')
+ create_types(context,
+ model.group_types,
+ context.presentation.get('service_template', 'group_types'))
+ model.policy_types = Type(variant='policy')
+ create_types(context,
+ model.policy_types,
+ context.presentation.get('service_template', 'policy_types'))
+ model.relationship_types = Type(variant='relationship')
+ create_types(context,
+ model.relationship_types,
+ context.presentation.get('service_template', 'relationship_types'))
+ model.capability_types = Type(variant='capability')
+ create_types(context,
+ model.capability_types,
+ context.presentation.get('service_template', 'capability_types'))
+ model.interface_types = Type(variant='interface')
+ create_types(context,
+ model.interface_types,
+ context.presentation.get('service_template', 'interface_types'))
+ model.artifact_types = Type(variant='artifact')
+ create_types(context,
+ model.artifact_types,
+ context.presentation.get('service_template', 'artifact_types'))
+
+ # Topology template
+ topology_template = context.presentation.get('service_template', 'topology_template')
+ if topology_template is not None:
+ model.inputs.update(
+ create_input_models_from_values(topology_template._get_input_values(context)))
+ model.outputs.update(
+ create_output_models_from_values(topology_template._get_output_values(context)))
+
+ # Plugin specifications
+ policies = context.presentation.get('service_template', 'topology_template', 'policies')
+ if policies:
+ for policy in policies.itervalues():
+ role = model.policy_types.get_descendant(policy.type).role
+ if role == 'plugin':
+ plugin_specification = create_plugin_specification_model(context, policy)
+ model.plugin_specifications[plugin_specification.name] = plugin_specification
+ elif role == 'workflow':
+ operation_template = create_workflow_operation_template_model(context,
+ model, policy)
+ model.workflow_templates[operation_template.name] = operation_template
+
+ # Node templates
+ node_templates = context.presentation.get('service_template', 'topology_template',
+ 'node_templates')
+ if node_templates:
+ for node_template in node_templates.itervalues():
+ node_template_model = create_node_template_model(context, model, node_template)
+ model.node_templates[node_template_model.name] = node_template_model
+ for node_template in node_templates.itervalues():
+ fix_node_template_model(context, model, node_template)
+
+ # Group templates
+ groups = context.presentation.get('service_template', 'topology_template', 'groups')
+ if groups:
+ for group in groups.itervalues():
+ group_template_model = create_group_template_model(context, model, group)
+ model.group_templates[group_template_model.name] = group_template_model
+
+ # Policy templates
+ policies = context.presentation.get('service_template', 'topology_template', 'policies')
+ if policies:
+ for policy in policies.itervalues():
+ policy_template_model = create_policy_template_model(context, model, policy)
+ model.policy_templates[policy_template_model.name] = policy_template_model
+
+ # Substitution template
+ substitution_mappings = context.presentation.get('service_template', 'topology_template',
+ 'substitution_mappings')
+ if substitution_mappings:
+ model.substitution_template = create_substitution_template_model(context, model,
+ substitution_mappings)
+
+ return model
+
+
+def create_metadata_models(context, service_template, metadata):
+ service_template.meta_data['template_name'] = Metadata(name='template_name',
+ value=metadata.template_name)
+ service_template.meta_data['template_author'] = Metadata(name='template_author',
+ value=metadata.template_author)
+ service_template.meta_data['template_version'] = Metadata(name='template_version',
+ value=metadata.template_version)
+ custom = metadata.custom
+ if custom:
+ for name, value in custom.iteritems():
+ service_template.meta_data[name] = Metadata(name=name,
+ value=value)
+
+
+def create_node_template_model(context, service_template, node_template):
+ node_type = node_template._get_type(context)
+ node_type = service_template.node_types.get_descendant(node_type._name)
+ model = NodeTemplate(name=node_template._name, type=node_type)
+
+ if node_template.description:
+ model.description = node_template.description.value
+
+ if node_template.directives:
+ model.directives = node_template.directives
+
+ model.properties.update(create_property_models_from_values(
+ template_properties=node_template._get_property_values(context)))
+ model.attributes.update(create_attribute_models_from_values(
+ template_attributes=node_template._get_attribute_default_values(context)))
+
+ create_interface_template_models(context, service_template, model.interface_templates,
+ node_template._get_interfaces(context))
+
+ artifacts = node_template._get_artifacts(context)
+ if artifacts:
+ for artifact_name, artifact in artifacts.iteritems():
+ model.artifact_templates[artifact_name] = \
+ create_artifact_template_model(context, service_template, artifact)
+
+ capabilities = node_template._get_capabilities(context)
+ if capabilities:
+ for capability_name, capability in capabilities.iteritems():
+ model.capability_templates[capability_name] = \
+ create_capability_template_model(context, service_template, capability)
+
+ if node_template.node_filter:
+ model.target_node_template_constraints = []
+ create_node_filter_constraints(context, node_template.node_filter,
+ model.target_node_template_constraints)
+
+ return model
+
+
+def fix_node_template_model(context, service_template, node_template):
+ # Requirements have to be created after all node templates have been created, because
+ # requirements might reference another node template
+ model = service_template.node_templates[node_template._name]
+ requirements = node_template._get_requirements(context)
+ if requirements:
+ for _, requirement in requirements:
+ model.requirement_templates.append(create_requirement_template_model(context,
+ service_template,
+ requirement))
+
+
+def create_group_template_model(context, service_template, group):
+ group_type = group._get_type(context)
+ group_type = service_template.group_types.get_descendant(group_type._name)
+ model = GroupTemplate(name=group._name,
+ type=group_type)
+
+ if group.description:
+ model.description = group.description.value
+
+ model.properties.update(create_property_models_from_values(group._get_property_values(context)))
+
+ create_interface_template_models(context, service_template, model.interface_templates,
+ group._get_interfaces(context))
+ members = group.members
+ if members:
+ for member in members:
+ node_template = service_template.node_templates[member]
+ assert node_template
+ model.node_templates.append(node_template)
+
+ return model
+
+
+def create_policy_template_model(context, service_template, policy):
+ policy_type = policy._get_type(context)
+ policy_type = service_template.policy_types.get_descendant(policy_type._name)
+ model = PolicyTemplate(name=policy._name,
+ type=policy_type)
+
+ if policy.description:
+ model.description = policy.description.value
+
+ model.properties.update(
+ create_property_models_from_values(policy._get_property_values(context)))
+
+ node_templates, groups = policy._get_targets(context)
+ if node_templates:
+ for target in node_templates:
+ node_template = service_template.node_templates[target._name]
+ assert node_template
+ model.node_templates.append(node_template)
+ if groups:
+ for target in groups:
+ group_template = service_template.group_templates[target._name]
+ assert group_template
+ model.group_templates.append(group_template)
+
+ return model
+
+
+def create_requirement_template_model(context, service_template, requirement):
+ model = {'name': requirement._name}
+
+ node, node_variant = requirement._get_node(context)
+ if node is not None:
+ if node_variant == 'node_type':
+ node_type = service_template.node_types.get_descendant(node._name)
+ model['target_node_type'] = node_type
+ else:
+ node_template = service_template.node_templates[node._name]
+ model['target_node_template'] = node_template
+
+ capability, capability_variant = requirement._get_capability(context)
+ if capability is not None:
+ if capability_variant == 'capability_type':
+ capability_type = \
+ service_template.capability_types.get_descendant(capability._name)
+ model['target_capability_type'] = capability_type
+ else:
+ model['target_capability_name'] = capability._name
+
+ model = RequirementTemplate(**model)
+
+ if requirement.node_filter:
+ model.target_node_template_constraints = []
+ create_node_filter_constraints(context, requirement.node_filter,
+ model.target_node_template_constraints)
+
+ relationship = requirement.relationship
+ if relationship is not None:
+ model.relationship_template = \
+ create_relationship_template_model(context, service_template, relationship)
+ model.relationship_template.name = requirement._name
+
+ return model
+
+
+def create_relationship_template_model(context, service_template, relationship):
+ relationship_type, relationship_type_variant = relationship._get_type(context)
+ if relationship_type_variant == 'relationship_type':
+ relationship_type = service_template.relationship_types.get_descendant(
+ relationship_type._name)
+ model = RelationshipTemplate(type=relationship_type)
+ else:
+ relationship_template = relationship_type
+ relationship_type = relationship_template._get_type(context)
+ relationship_type = service_template.relationship_types.get_descendant(
+ relationship_type._name)
+ model = RelationshipTemplate(type=relationship_type)
+ if relationship_template.description:
+ model.description = relationship_template.description.value
+
+ create_parameter_models_from_assignments(model.properties,
+ relationship.properties,
+ model_cls=Property)
+ create_interface_template_models(context, service_template, model.interface_templates,
+ relationship.interfaces)
+
+ return model
+
+
+def create_capability_template_model(context, service_template, capability):
+ capability_type = capability._get_type(context)
+ capability_type = service_template.capability_types.get_descendant(capability_type._name)
+ model = CapabilityTemplate(name=capability._name,
+ type=capability_type)
+
+ capability_definition = capability._get_definition(context)
+ if capability_definition.description:
+ model.description = capability_definition.description.value
+ occurrences = capability_definition.occurrences
+ if occurrences is not None:
+ model.min_occurrences = occurrences.value[0]
+ if occurrences.value[1] != 'UNBOUNDED':
+ model.max_occurrences = occurrences.value[1]
+
+ valid_source_types = capability_definition.valid_source_types
+ if valid_source_types:
+ for valid_source_type in valid_source_types:
+ # TODO: handle shortcut type names
+ node_type = service_template.node_types.get_descendant(valid_source_type)
+ model.valid_source_node_types.append(node_type)
+
+ create_parameter_models_from_assignments(model.properties,
+ capability.properties,
+ model_cls=Property)
+
+ return model
+
+
+def create_interface_template_model(context, service_template, interface):
+ interface_type = interface._get_type(context)
+ interface_type = service_template.interface_types.get_descendant(interface_type._name)
+ model = InterfaceTemplate(name=interface._name, type=interface_type)
+
+ if interface_type.description:
+ model.description = interface_type.description
+
+ create_parameter_models_from_assignments(model.inputs, interface.inputs, model_cls=Input)
+
+ operations = interface.operations
+ if operations:
+ for operation_name, operation in operations.iteritems():
+ model.operation_templates[operation_name] = \
+ create_operation_template_model(context, service_template, operation)
+
+ return model if model.operation_templates else None
+
+
+def create_operation_template_model(context, service_template, operation):
+ model = OperationTemplate(name=operation._name)
+
+ if operation.description:
+ model.description = operation.description.value
+
+ implementation = operation.implementation
+ if implementation is not None:
+ primary = implementation.primary
+ extract_implementation_primary(context, service_template, operation, model, primary)
+ relationship_edge = operation._get_extensions(context).get('relationship_edge')
+ if relationship_edge is not None:
+ if relationship_edge == 'source':
+ model.relationship_edge = False
+ elif relationship_edge == 'target':
+ model.relationship_edge = True
+
+ dependencies = implementation.dependencies
+ configuration = OrderedDict()
+ if dependencies:
+ for dependency in dependencies:
+ key, value = split_prefix(dependency)
+ if key is not None:
+ # Special ARIA prefix: signifies configuration parameters
+
+ # Parse as YAML
+ try:
+ value = yaml.load(value)
+ except yaml.parser.MarkedYAMLError as e:
+ context.validation.report(
+ 'YAML parser {0} in operation configuration: {1}'
+ .format(e.problem, value),
+ locator=implementation._locator,
+ level=Issue.FIELD)
+ continue
+
+ # Coerce to intrinsic functions, if there are any
+ value = coerce_parameter_value(context, implementation, None, value).value
+
+ # Support dot-notation nesting
+ set_nested(configuration, key.split('.'), value)
+ else:
+ if model.dependencies is None:
+ model.dependencies = []
+ model.dependencies.append(dependency)
+
+ # Convert configuration to Configuration models
+ for key, value in configuration.iteritems():
+ model.configurations[key] = Configuration.wrap(key, value,
+ description='Operation configuration.')
+
+ create_parameter_models_from_assignments(model.inputs, operation.inputs, model_cls=Input)
+ return model
+
+
+def create_artifact_template_model(context, service_template, artifact):
+ artifact_type = artifact._get_type(context)
+ artifact_type = service_template.artifact_types.get_descendant(artifact_type._name)
+ model = ArtifactTemplate(name=artifact._name,
+ type=artifact_type,
+ source_path=artifact.file)
+
+ if artifact.description:
+ model.description = artifact.description.value
+
+ model.target_path = artifact.deploy_path
+
+ repository = artifact._get_repository(context)
+ if repository is not None:
+ model.repository_url = repository.url
+ credential = repository._get_credential(context)
+ if credential:
+ model.repository_credential = {}
+ for k, v in credential.iteritems():
+ model.repository_credential[k] = v
+
+ model.properties.update(
+ create_property_models_from_values(artifact._get_property_values(context)))
+
+ return model
+
+
+def create_substitution_template_model(context, service_template, substitution_mappings):
+ node_type = service_template.node_types.get_descendant(substitution_mappings.node_type)
+ model = SubstitutionTemplate(node_type=node_type)
+
+ capabilities = substitution_mappings.capabilities
+ if capabilities:
+ for mapped_capability_name, capability in capabilities.iteritems():
+ name = 'capability.' + mapped_capability_name
+ node_template_model = service_template.node_templates[capability.node_template]
+ capability_template_model = \
+ node_template_model.capability_templates[capability.capability]
+ model.mappings[name] = \
+ SubstitutionTemplateMapping(name=name,
+ capability_template=capability_template_model)
+
+ requirements = substitution_mappings.requirements
+ if requirements:
+ for mapped_requirement_name, requirement in requirements.iteritems():
+ name = 'requirement.' + mapped_requirement_name
+ node_template_model = service_template.node_templates[requirement.node_template]
+ requirement_template_model = None
+ for a_model in node_template_model.requirement_templates:
+ if a_model.name == requirement.requirement:
+ requirement_template_model = a_model
+ break
+ model.mappings[name] = \
+ SubstitutionTemplateMapping(name=name,
+ requirement_template=requirement_template_model)
+
+ return model
+
+
+def create_plugin_specification_model(context, policy):
+ properties = policy.properties
+
+ def get(name, default=None):
+ prop = properties.get(name)
+ return prop.value if prop is not None else default
+
+ model = PluginSpecification(name=policy._name,
+ version=get('version'),
+ enabled=get('enabled', True))
+
+ return model
+
+
+def create_workflow_operation_template_model(context, service_template, policy):
+ model = OperationTemplate(name=policy._name)
+ # since we use backpopulates, these fields are populated upon commit, we get a weird(temporary)
+ # behavior where in previous code service_template.workflow_templates is a dict which has None
+ # as key for the value of model.
+ service_template.workflow_templates[model.name] = model
+
+ if policy.description:
+ model.description = policy.description.value
+
+ properties = policy._get_property_values(context)
+ for prop_name, prop in properties.iteritems():
+ if prop_name == 'implementation':
+ model.function = prop.value
+ else:
+ input_model = create_parameter_model_from_value(prop, prop_name, model_cls=Input)
+ input_model.required = prop.required
+ model.inputs[prop_name] = input_model
+
+ used_reserved_names = WORKFLOW_DECORATOR_RESERVED_ARGUMENTS.intersection(model.inputs.keys())
+ if used_reserved_names:
+ context.validation.report('using reserved arguments in workflow policy "{0}": {1}'
+ .format(
+ policy._name,
+ string_list_as_string(used_reserved_names)),
+ locator=policy._locator,
+ level=Issue.EXTERNAL)
+ return model
+
+
+#
+# Utils
+#
+
+def create_types(context, root, types):
+ if types is None:
+ return
+
+ def added_all():
+ for name in types:
+ if root.get_descendant(name) is None:
+ return False
+ return True
+
+ while not added_all():
+ for name, the_type in types.iteritems():
+ if root.get_descendant(name) is None:
+ parent_type = the_type._get_parent(context)
+ model = Type(name=the_type._name,
+ role=the_type._get_extension('role'))
+ if the_type.description:
+ model.description = the_type.description.value
+ if parent_type is None:
+ model.parent = root
+ model.variant = root.variant
+ root.children.append(model)
+ else:
+ container = root.get_descendant(parent_type._name)
+ if container is not None:
+ model.parent = container
+ model.variant = container.variant
+ container.children.append(model)
+
+
+def create_input_models_from_values(template_inputs):
+ model_inputs = {}
+ if template_inputs:
+ for template_input_name, template_input in template_inputs.iteritems():
+ model_input = create_parameter_model_from_value(template_input, template_input_name,
+ model_cls=Input)
+ model_input.required = template_input.required
+ model_inputs[model_input.name] = model_input
+ return model_inputs
+
+def create_output_models_from_values(template_outputs):
+ model_outputs = {}
+ for template_output_name, template_output in template_outputs.iteritems():
+ model_outputs[template_output_name] = \
+ create_parameter_model_from_value(template_output,
+ template_output_name,
+ model_cls=Output)
+ return model_outputs
+
+
+def create_property_models_from_values(template_properties):
+ model_properties = {}
+ for template_property_name, template_property in template_properties.iteritems():
+ model_properties[template_property_name] = \
+ create_parameter_model_from_value(template_property,
+ template_property_name,
+ model_cls=Property)
+ return model_properties
+
+def create_attribute_models_from_values(template_attributes):
+ model_attributes = {}
+ for template_attribute_name, template_attribute in template_attributes.iteritems():
+ model_attributes[template_attribute_name] = \
+ create_parameter_model_from_value(template_attribute,
+ template_attribute_name,
+ model_cls=Attribute)
+ return model_attributes
+
+
+def create_parameter_model_from_value(template_parameter, template_parameter_name, model_cls):
+ return model_cls(name=template_parameter_name,
+ type_name=template_parameter.type,
+ value=template_parameter.value,
+ description=template_parameter.description)
+
+
+def create_parameter_models_from_assignments(properties, source_properties, model_cls):
+ if source_properties:
+ for property_name, prop in source_properties.iteritems():
+ properties[property_name] = model_cls(name=property_name, # pylint: disable=unexpected-keyword-arg
+ type_name=prop.value.type,
+ value=prop.value.value,
+ description=prop.value.description)
+
+
+def create_interface_template_models(context, service_template, interfaces, source_interfaces):
+ if source_interfaces:
+ for interface_name, interface in source_interfaces.iteritems():
+ interface = create_interface_template_model(context, service_template, interface)
+ if interface is not None:
+ interfaces[interface_name] = interface
+
+
+def create_node_filter_constraints(context, node_filter, target_node_template_constraints):
+ properties = node_filter.properties
+ if properties is not None:
+ for property_name, constraint_clause in properties:
+ constraint = create_constraint(context, node_filter, constraint_clause, property_name,
+ None)
+ target_node_template_constraints.append(constraint)
+
+ capabilities = node_filter.capabilities
+ if capabilities is not None:
+ for capability_name, capability in capabilities:
+ properties = capability.properties
+ if properties is not None:
+ for property_name, constraint_clause in properties:
+ constraint = create_constraint(context, node_filter, constraint_clause,
+ property_name, capability_name)
+ target_node_template_constraints.append(constraint)
+
+
+def create_constraint(context, node_filter, constraint_clause, property_name, capability_name): # pylint: disable=too-many-return-statements
+ constraint_key = constraint_clause._raw.keys()[0]
+
+ the_type = constraint_clause._get_type(context)
+
+ def coerce_constraint(constraint):
+ if the_type is not None:
+ return coerce_value(context, node_filter, the_type, None, None, constraint,
+ constraint_key)
+ else:
+ return constraint
+
+ def coerce_constraints(constraints):
+ if the_type is not None:
+ return tuple(coerce_constraint(constraint) for constraint in constraints)
+ else:
+ return constraints
+
+ if constraint_key == 'equal':
+ return Equal(property_name, capability_name,
+ coerce_constraint(constraint_clause.equal))
+ elif constraint_key == 'greater_than':
+ return GreaterThan(property_name, capability_name,
+ coerce_constraint(constraint_clause.greater_than))
+ elif constraint_key == 'greater_or_equal':
+ return GreaterOrEqual(property_name, capability_name,
+ coerce_constraint(constraint_clause.greater_or_equal))
+ elif constraint_key == 'less_than':
+ return LessThan(property_name, capability_name,
+ coerce_constraint(constraint_clause.less_than))
+ elif constraint_key == 'less_or_equal':
+ return LessOrEqual(property_name, capability_name,
+ coerce_constraint(constraint_clause.less_or_equal))
+ elif constraint_key == 'in_range':
+ return InRange(property_name, capability_name,
+ coerce_constraints(constraint_clause.in_range))
+ elif constraint_key == 'valid_values':
+ return ValidValues(property_name, capability_name,
+ coerce_constraints(constraint_clause.valid_values))
+ elif constraint_key == 'length':
+ return Length(property_name, capability_name,
+ coerce_constraint(constraint_clause.length))
+ elif constraint_key == 'min_length':
+ return MinLength(property_name, capability_name,
+ coerce_constraint(constraint_clause.min_length))
+ elif constraint_key == 'max_length':
+ return MaxLength(property_name, capability_name,
+ coerce_constraint(constraint_clause.max_length))
+ elif constraint_key == 'pattern':
+ return Pattern(property_name, capability_name,
+ coerce_constraint(constraint_clause.pattern))
+ else:
+ raise ValueError('malformed node_filter: {0}'.format(constraint_key))
+
+
+def split_prefix(string):
+ """
+ Splits the prefix on the first non-escaped ">".
+ """
+
+ split = IMPLEMENTATION_PREFIX_REGEX.split(string, 1)
+ if len(split) < 2:
+ return None, None
+ return split[0].strip(), split[1].strip()
+
+
+def set_nested(the_dict, keys, value):
+ """
+ If the ``keys`` list has just one item, puts the value in the the dict. If there are more items,
+ puts the value in a sub-dict, creating sub-dicts as necessary for each key.
+
+ For example, if ``the_dict`` is an empty dict, keys is ``['first', 'second', 'third']`` and
+ value is ``'value'``, then the_dict will be: ``{'first':{'second':{'third':'value'}}}``.
+
+ :param the_dict: Dict to change
+ :type the_dict: {}
+ :param keys: Keys
+ :type keys: [basestring]
+ :param value: Value
+ """
+ key = keys.pop(0)
+ if len(keys) == 0:
+ the_dict[key] = value
+ else:
+ if key not in the_dict:
+ the_dict[key] = StrictDict(key_class=basestring)
+ set_nested(the_dict[key], keys, value)
+
+
+def extract_implementation_primary(context, service_template, presentation, model, primary):
+ prefix, postfix = split_prefix(primary)
+ if prefix:
+ # Special ARIA prefix
+ model.plugin_specification = service_template.plugin_specifications.get(prefix)
+ model.function = postfix
+ if model.plugin_specification is None:
+ context.validation.report(
+ 'no policy for plugin "{0}" specified in operation implementation: {1}'
+ .format(prefix, primary),
+ locator=presentation._get_child_locator('properties', 'implementation'),
+ level=Issue.BETWEEN_TYPES)
+ else:
+ # Standard TOSCA artifact with default plugin
+ model.implementation = primary
diff --git a/azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/simple_v1_0/modeling/artifacts.py b/azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/simple_v1_0/modeling/artifacts.py
new file mode 100644
index 0000000..b45615a
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/simple_v1_0/modeling/artifacts.py
@@ -0,0 +1,44 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from aria.utils.collections import OrderedDict
+
+
+#
+# NodeType, NodeTemplate
+#
+
+def get_inherited_artifact_definitions(context, presentation, for_presentation=None):
+ if for_presentation is None:
+ for_presentation = presentation
+
+ if hasattr(presentation, '_get_type'):
+ # In NodeTemplate
+ parent = presentation._get_type(context)
+ else:
+ # In NodeType
+ parent = presentation._get_parent(context)
+
+ # Get artifact definitions from parent
+ artifacts = get_inherited_artifact_definitions(context, parent, for_presentation) \
+ if parent is not None else OrderedDict()
+
+ # Add/override our artifact definitions
+ our_artifacts = presentation.artifacts
+ if our_artifacts:
+ for artifact_name, artifact in our_artifacts.iteritems():
+ artifacts[artifact_name] = artifact._clone(for_presentation)
+
+ return artifacts
diff --git a/azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/simple_v1_0/modeling/capabilities.py b/azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/simple_v1_0/modeling/capabilities.py
new file mode 100644
index 0000000..1b95bec
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/simple_v1_0/modeling/capabilities.py
@@ -0,0 +1,220 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from aria.utils.collections import deepcopy_with_locators, OrderedDict
+from aria.parser.validation import Issue
+
+from .parameters import (convert_parameter_definitions_to_values, merge_raw_parameter_definitions,
+ get_assigned_and_defined_parameter_values)
+
+
+#
+# CapabilityType
+#
+
+def get_inherited_valid_source_types(context, presentation):
+ """
+ If we haven't set the ``valid_source_types`` fields, uses that value from our parent, if we have
+ one (recursively).
+ """
+
+ valid_source_types = presentation.valid_source_types
+
+ if valid_source_types is None:
+ parent = presentation._get_parent(context)
+ valid_source_types = get_inherited_valid_source_types(context, parent) \
+ if parent is not None else None
+
+ return valid_source_types
+
+
+#
+# NodeType
+#
+
+def get_inherited_capability_definitions(context, presentation, for_presentation=None):
+ """
+ Returns our capability capability definitions added on top of those of our parent, if we have
+ one (recursively).
+
+ Allows overriding all aspects of parent capability properties except data type.
+ """
+
+ if for_presentation is None:
+ for_presentation = presentation
+
+ # Get capability definitions from parent
+ parent = presentation._get_parent(context)
+ capability_definitions = get_inherited_capability_definitions(
+ context, parent, for_presentation) if parent is not None else OrderedDict()
+
+ # Add/merge our capability definitions
+ our_capability_definitions = presentation.capabilities
+ if our_capability_definitions:
+ for capability_name, our_capability_definition in our_capability_definitions.iteritems():
+ if capability_name in capability_definitions:
+ capability_definition = capability_definitions[capability_name]
+
+ # Check if we changed the type
+ type1 = capability_definition._get_type(context)
+ type2 = our_capability_definition._get_type(context)
+
+ if not type1._is_descendant(context, type2):
+ context.validation.report(
+ 'capability definition type "{0}" is not a descendant of overridden '
+ 'capability definition type "{1}"' \
+ .format(type1._name, type2._name),
+ locator=our_capability_definition._locator, level=Issue.BETWEEN_TYPES)
+
+ merge_capability_definition(context, presentation, capability_definition,
+ our_capability_definition)
+ else:
+ capability_definition = our_capability_definition._clone(for_presentation)
+ if isinstance(capability_definition._raw, basestring):
+ # Make sure we have a dict
+ the_type = capability_definition._raw
+ capability_definition._raw = OrderedDict()
+ capability_definition._raw['type'] = the_type
+ capability_definitions[capability_name] = capability_definition
+
+ merge_capability_definition_from_type(context, presentation, capability_definition)
+
+ for capability_definition in capability_definitions.itervalues():
+ capability_definition._reset_method_cache()
+
+ return capability_definitions
+
+
+#
+# NodeTemplate
+#
+
+def get_template_capabilities(context, presentation):
+ """
+ Returns the node type's capabilities with our assignments to properties and attributes merged
+ in.
+
+ Capability properties' default values, if available, will be used if we did not assign them.
+
+ Makes sure that required properties indeed end up with a value.
+ """
+
+ capability_assignments = OrderedDict()
+
+ the_type = presentation._get_type(context) # NodeType
+ capability_definitions = the_type._get_capabilities(context) if the_type is not None else None
+
+ # Copy over capability definitions from the type (will initialize properties with default
+ # values)
+ if capability_definitions:
+ for capability_name, capability_definition in capability_definitions.iteritems():
+ capability_assignments[capability_name] = \
+ convert_capability_from_definition_to_assignment(context, capability_definition,
+ presentation)
+
+ # Fill in our capability assignments
+ our_capability_assignments = presentation.capabilities
+ if our_capability_assignments:
+ for capability_name, our_capability_assignment in our_capability_assignments.iteritems():
+ if capability_name in capability_assignments:
+ capability_assignment = capability_assignments[capability_name]
+
+ # Assign properties
+ values = get_assigned_and_defined_parameter_values(context,
+ our_capability_assignment,
+ 'property')
+
+ if values:
+ capability_assignment._raw['properties'] = values
+ capability_assignment._reset_method_cache()
+ else:
+ context.validation.report(
+ 'capability "{0}" not declared at node type "{1}" in "{2}"'
+ .format(capability_name, presentation.type, presentation._fullname),
+ locator=our_capability_assignment._locator, level=Issue.BETWEEN_TYPES)
+
+ return capability_assignments
+
+
+#
+# Utils
+#
+
+def convert_capability_from_definition_to_assignment(context, presentation, container):
+ from ..assignments import CapabilityAssignment
+
+ raw = OrderedDict()
+
+ properties = presentation.properties
+ if properties is not None:
+ raw['properties'] = convert_parameter_definitions_to_values(context, properties)
+
+ # TODO attributes
+
+ return CapabilityAssignment(name=presentation._name, raw=raw, container=container)
+
+
+def merge_capability_definition(context, presentation, capability_definition,
+ from_capability_definition):
+ raw_properties = OrderedDict()
+
+ capability_definition._raw['type'] = from_capability_definition.type
+
+ # Merge properties from type
+ from_property_defintions = from_capability_definition.properties
+ merge_raw_parameter_definitions(context, presentation, raw_properties, from_property_defintions,
+ 'properties')
+
+ # Merge our properties
+ merge_raw_parameter_definitions(context, presentation, raw_properties,
+ capability_definition.properties, 'properties')
+
+ if raw_properties:
+ capability_definition._raw['properties'] = raw_properties
+ capability_definition._reset_method_cache()
+
+ # Merge occurrences
+ occurrences = from_capability_definition._raw.get('occurrences')
+ if (occurrences is not None) and (capability_definition._raw.get('occurrences') is None):
+ capability_definition._raw['occurrences'] = \
+ deepcopy_with_locators(occurrences)
+
+
+def merge_capability_definition_from_type(context, presentation, capability_definition):
+ """
+ Merge ``properties`` and ``valid_source_types`` from the node type's capability definition
+ over those taken from the parent node type.
+ """
+ raw_properties = OrderedDict()
+
+ # Merge properties from parent
+ the_type = capability_definition._get_type(context)
+ type_property_defintions = the_type._get_properties(context)
+ merge_raw_parameter_definitions(context, presentation, raw_properties, type_property_defintions,
+ 'properties')
+
+ # Merge our properties (might override definitions in parent)
+ merge_raw_parameter_definitions(context, presentation, raw_properties,
+ capability_definition.properties, 'properties')
+
+ if raw_properties:
+ capability_definition._raw['properties'] = raw_properties
+
+ # Override valid_source_types
+ if capability_definition._raw.get('valid_source_types') is None:
+ valid_source_types = the_type._get_valid_source_types(context)
+ if valid_source_types is not None:
+ capability_definition._raw['valid_source_types'] = \
+ deepcopy_with_locators(valid_source_types)
diff --git a/azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/simple_v1_0/modeling/constraints.py b/azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/simple_v1_0/modeling/constraints.py
new file mode 100644
index 0000000..9a30cc1
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/simple_v1_0/modeling/constraints.py
@@ -0,0 +1,144 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import re
+
+from aria.modeling.constraints import NodeTemplateConstraint
+from aria.modeling.utils import NodeTemplateContainerHolder
+from aria.modeling.functions import evaluate
+from aria.parser import implements_specification
+
+
+@implements_specification('3.5.2-2', 'tosca-simple-1.0')
+class EvaluatingNodeTemplateConstraint(NodeTemplateConstraint):
+ """
+ A version of :class:`NodeTemplateConstraint` with boilerplate initialization for TOSCA
+ constraints.
+ """
+
+ def __init__(self, property_name, capability_name, constraint, as_list=False):
+ self.property_name = property_name
+ self.capability_name = capability_name
+ self.constraint = constraint
+ self.as_list = as_list
+
+ def matches(self, source_node_template, target_node_template):
+ # TOSCA node template constraints can refer to either capability properties or node
+ # template properties
+ if self.capability_name is not None:
+ # Capability property
+ capability = target_node_template.capability_templates.get(self.capability_name)
+ value = capability.properties.get(self.property_name) \
+ if capability is not None else None # Parameter
+ else:
+ # Node template property
+ value = target_node_template.properties.get(self.property_name) # Parameter
+
+ value = value.value if value is not None else None
+
+ container_holder = NodeTemplateContainerHolder(source_node_template)
+
+ if self.as_list:
+ constraints = []
+ for constraint in self.constraint:
+ evaluation = evaluate(constraint, container_holder)
+ if evaluation is not None:
+ constraints.append(evaluation.value)
+ else:
+ constraints.append(constraint)
+ constraint = constraints
+ else:
+ evaluation = evaluate(self.constraint, container_holder)
+ if evaluation is not None:
+ constraint = evaluation.value
+ else:
+ constraint = self.constraint
+
+ return self.matches_evaluated(value, constraint)
+
+ def matches_evaluated(self, value, constraint):
+ raise NotImplementedError
+
+
+class Equal(EvaluatingNodeTemplateConstraint):
+ def matches_evaluated(self, value, constraint):
+ return value == constraint
+
+
+class GreaterThan(EvaluatingNodeTemplateConstraint):
+ def matches_evaluated(self, value, constraint):
+ return value > constraint
+
+
+class GreaterOrEqual(EvaluatingNodeTemplateConstraint):
+ def matches_evaluated(self, value, constraint):
+ return value >= constraint
+
+
+class LessThan(EvaluatingNodeTemplateConstraint):
+ def matches_evaluated(self, value, constraint):
+ return value < constraint
+
+
+class LessOrEqual(EvaluatingNodeTemplateConstraint):
+ def matches_evaluated(self, value, constraint):
+ return value <= constraint
+
+
+class InRange(EvaluatingNodeTemplateConstraint):
+ def __init__(self, property_name, capability_name, constraint):
+ super(InRange, self).__init__(property_name, capability_name, constraint, as_list=True)
+
+ def matches_evaluated(self, value, constraints):
+ lower, upper = constraints
+ if value < lower:
+ return False
+ if (upper != 'UNBOUNDED') and (value > upper):
+ return False
+ return True
+
+
+class ValidValues(EvaluatingNodeTemplateConstraint):
+ def __init__(self, property_name, capability_name, constraint):
+ super(ValidValues, self).__init__(property_name, capability_name, constraint, as_list=True)
+
+ def matches_evaluated(self, value, constraints):
+ return value in constraints
+
+
+class Length(EvaluatingNodeTemplateConstraint):
+ def matches_evaluated(self, value, constraint):
+ return len(value) == constraint
+
+
+class MinLength(EvaluatingNodeTemplateConstraint):
+ def matches_evaluated(self, value, constraint):
+ return len(value) >= constraint
+
+
+class MaxLength(EvaluatingNodeTemplateConstraint):
+ def matches_evaluated(self, value, constraint):
+ return len(value) <= constraint
+
+
+class Pattern(EvaluatingNodeTemplateConstraint):
+ def matches_evaluated(self, value, constraint):
+ # From TOSCA 1.0 3.5.2.1:
+ #
+ # "Note: Future drafts of this specification will detail the use of regular expressions and
+ # reference an appropriate standardized grammar."
+ #
+ # So we will just use Python's.
+ return re.match(constraint, unicode(value)) is not None
diff --git a/azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/simple_v1_0/modeling/copy.py b/azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/simple_v1_0/modeling/copy.py
new file mode 100644
index 0000000..bd9037f
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/simple_v1_0/modeling/copy.py
@@ -0,0 +1,32 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#
+# NodeTemplate, RelationshipTemplate
+#
+
+def get_default_raw_from_copy(presentation, field_name):
+ """
+ Used for the ``_get_default_raw`` field hook.
+ """
+
+ copy = presentation._raw.get('copy')
+ if copy is not None:
+ templates = getattr(presentation._container, field_name)
+ if templates is not None:
+ template = templates.get(copy)
+ if template is not None:
+ return template._raw
+ return None
diff --git a/azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/simple_v1_0/modeling/data_types.py b/azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/simple_v1_0/modeling/data_types.py
new file mode 100644
index 0000000..13ce9a3
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/simple_v1_0/modeling/data_types.py
@@ -0,0 +1,514 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import re
+
+from aria.utils.collections import OrderedDict
+from aria.utils.formatting import safe_repr
+from aria.utils.type import full_type_name
+from aria.utils.imports import import_fullname
+from aria.parser import implements_specification
+from aria.parser.presentation import (get_locator, validate_primitive)
+from aria.parser.validation import Issue
+
+from .functions import get_function
+from ..presentation.types import get_type_by_name
+
+
+#
+# DataType
+#
+
+def get_inherited_constraints(context, presentation):
+ """
+ If we don't have constraints, will return our parent's constraints (if we have one),
+ recursively.
+
+ Implication: if we define even one constraint, the parent's constraints will not be inherited.
+ """
+
+ constraints = presentation.constraints
+
+ if constraints is None:
+ # If we don't have any, use our parent's
+ parent = presentation._get_parent(context)
+ parent_constraints = get_inherited_constraints(context, parent) \
+ if parent is not None else None
+ if parent_constraints is not None:
+ constraints = parent_constraints
+
+ return constraints
+
+
+def coerce_data_type_value(context, presentation, data_type, entry_schema, constraints, value, # pylint: disable=unused-argument
+ aspect):
+ """
+ Handles the ``_coerce_data()`` hook for complex data types.
+
+ There are two kinds of handling:
+
+ 1. If we have a primitive type as our great ancestor, then we do primitive type coersion, and
+ just check for constraints.
+
+ 2. Otherwise, for normal complex data types we return the assigned property values while making
+ sure they are defined in our type. The property definition's default value, if available,
+ will be used if we did not assign it. We also make sure that required definitions indeed end
+ up with a value.
+ """
+
+ primitive_type = data_type._get_primitive_ancestor(context)
+ if primitive_type is not None:
+ # Must be coercible to primitive ancestor
+ value = coerce_to_primitive(context, presentation, primitive_type, constraints, value,
+ aspect)
+ else:
+ definitions = data_type._get_properties(context)
+ if isinstance(value, dict):
+ temp = OrderedDict()
+
+ # Fill in our values, but make sure they are defined
+ for name, v in value.iteritems():
+ if name in definitions:
+ definition = definitions[name]
+ definition_type = definition._get_type(context)
+ definition_entry_schema = definition.entry_schema
+ definition_constraints = definition._get_constraints(context)
+ temp[name] = coerce_value(context, presentation, definition_type,
+ definition_entry_schema, definition_constraints, v,
+ aspect)
+ else:
+ context.validation.report(
+ 'assignment to undefined property "%s" in type "%s" in "%s"'
+ % (name, data_type._fullname, presentation._fullname),
+ locator=get_locator(v, value, presentation), level=Issue.BETWEEN_TYPES)
+
+ # Fill in defaults from the definitions, and check if required definitions have not been
+ # assigned
+ for name, definition in definitions.iteritems():
+ if (temp.get(name) is None) and hasattr(definition, 'default') \
+ and (definition.default is not None):
+ definition_type = definition._get_type(context)
+ definition_entry_schema = definition.entry_schema
+ definition_constraints = definition._get_constraints(context)
+ temp[name] = coerce_value(context, presentation, definition_type,
+ definition_entry_schema, definition_constraints,
+ definition.default, 'default')
+
+ if getattr(definition, 'required', False) and (temp.get(name) is None):
+ context.validation.report(
+ 'required property "%s" in type "%s" is not assigned a value in "%s"'
+ % (name, data_type._fullname, presentation._fullname),
+ locator=presentation._get_child_locator('definitions'),
+ level=Issue.BETWEEN_TYPES)
+
+ value = temp
+ elif value is not None:
+ context.validation.report('value of type "%s" is not a dict in "%s"'
+ % (data_type._fullname, presentation._fullname),
+ locator=get_locator(value, presentation),
+ level=Issue.BETWEEN_TYPES)
+ value = None
+
+ return value
+
+
+def validate_data_type_name(context, presentation):
+ """
+ Makes sure the complex data type's name is not that of a built-in type.
+ """
+
+ name = presentation._name
+ if get_primitive_data_type(name) is not None:
+ context.validation.report('data type name is that of a built-in type: %s'
+ % safe_repr(name),
+ locator=presentation._locator, level=Issue.BETWEEN_TYPES)
+
+
+#
+# PropertyDefinition, AttributeDefinition, EntrySchema, DataType
+#
+
+def get_data_type(context, presentation, field_name, allow_none=False):
+ """
+ Returns the type, whether it's a complex data type (a DataType instance) or a primitive (a
+ Python primitive type class).
+
+ If the type is not specified, defaults to :class:`str`, per note in section 3.2.1.1 of the
+ `TOSCA Simple Profile v1.0 cos01 specification <http://docs.oasis-open.org/tosca
+ /TOSCA-Simple-Profile-YAML/v1.0/cos01/TOSCA-Simple-Profile-YAML-v1.0-cos01.html
+ #_Toc379455072>`__
+ """
+
+ type_name = getattr(presentation, field_name)
+
+ if type_name is None:
+ if allow_none:
+ return None
+ else:
+ return str
+
+ # Avoid circular definitions
+ container_data_type = get_container_data_type(presentation)
+ if (container_data_type is not None) and (container_data_type._name == type_name):
+ return None
+
+ # Try complex data type
+ data_type = get_type_by_name(context, type_name, 'data_types')
+ if data_type is not None:
+ return data_type
+
+ # Try primitive data type
+ return get_primitive_data_type(type_name)
+
+
+#
+# PropertyDefinition, EntrySchema
+#
+
+def get_property_constraints(context, presentation):
+ """
+ If we don't have constraints, will return our type's constraints (if we have one), recursively.
+
+ Implication: if we define even one constraint, the type's constraints will not be inherited.
+ """
+
+ constraints = presentation.constraints
+
+ if constraints is None:
+ # If we don't have any, use our type's
+ the_type = presentation._get_type(context)
+ type_constraints = the_type._get_constraints(context) \
+ if hasattr(the_type, '_get_constraints') else None
+ if type_constraints is not None:
+ constraints = type_constraints
+
+ return constraints
+
+
+#
+# ConstraintClause
+#
+
+def apply_constraint_to_value(context, presentation, constraint_clause, value): # pylint: disable=too-many-statements,too-many-return-statements,too-many-branches
+ """
+ Returns false if the value does not conform to the constraint.
+ """
+
+ constraint_key = constraint_clause._raw.keys()[0]
+ the_type = constraint_clause._get_type(context)
+ # PropertyAssignment does not have this:
+ entry_schema = getattr(presentation, 'entry_schema', None)
+
+ def coerce_constraint(constraint):
+ return coerce_value(context, presentation, the_type, entry_schema, None, constraint,
+ constraint_key)
+
+ def report(message, constraint):
+ context.validation.report('value %s %s per constraint in "%s": %s'
+ % (message, safe_repr(constraint),
+ presentation._name or presentation._container._name,
+ safe_repr(value)),
+ locator=presentation._locator, level=Issue.BETWEEN_FIELDS)
+
+ if constraint_key == 'equal':
+ constraint = coerce_constraint(constraint_clause.equal)
+ if value != constraint:
+ report('is not equal to', constraint)
+ return False
+
+ elif constraint_key == 'greater_than':
+ constraint = coerce_constraint(constraint_clause.greater_than)
+ if value <= constraint:
+ report('is not greater than', constraint)
+ return False
+
+ elif constraint_key == 'greater_or_equal':
+ constraint = coerce_constraint(constraint_clause.greater_or_equal)
+ if value < constraint:
+ report('is not greater than or equal to', constraint)
+ return False
+
+ elif constraint_key == 'less_than':
+ constraint = coerce_constraint(constraint_clause.less_than)
+ if value >= constraint:
+ report('is not less than', constraint)
+ return False
+
+ elif constraint_key == 'less_or_equal':
+ constraint = coerce_constraint(constraint_clause.less_or_equal)
+ if value > constraint:
+ report('is not less than or equal to', constraint)
+ return False
+
+ elif constraint_key == 'in_range':
+ lower, upper = constraint_clause.in_range
+ lower, upper = coerce_constraint(lower), coerce_constraint(upper)
+ if value < lower:
+ report('is not greater than or equal to lower bound', lower)
+ return False
+ if (upper != 'UNBOUNDED') and (value > upper):
+ report('is not lesser than or equal to upper bound', upper)
+ return False
+
+ elif constraint_key == 'valid_values':
+ constraint = tuple(coerce_constraint(v) for v in constraint_clause.valid_values)
+ if value not in constraint:
+ report('is not one of', constraint)
+ return False
+
+ elif constraint_key == 'length':
+ constraint = constraint_clause.length
+ try:
+ if len(value) != constraint:
+ report('is not of length', constraint)
+ return False
+ except TypeError:
+ pass # should be validated elsewhere
+
+ elif constraint_key == 'min_length':
+ constraint = constraint_clause.min_length
+ try:
+ if len(value) < constraint:
+ report('has a length lesser than', constraint)
+ return False
+ except TypeError:
+ pass # should be validated elsewhere
+
+ elif constraint_key == 'max_length':
+ constraint = constraint_clause.max_length
+ try:
+ if len(value) > constraint:
+ report('has a length greater than', constraint)
+ return False
+ except TypeError:
+ pass # should be validated elsewhere
+
+ elif constraint_key == 'pattern':
+ constraint = constraint_clause.pattern
+ try:
+ # From TOSCA 1.0 3.5.2.1:
+ #
+ # "Note: Future drafts of this specification will detail the use of regular expressions
+ # and reference an appropriate standardized grammar."
+ #
+ # So we will just use Python's.
+ if re.match(constraint, str(value)) is None:
+ report('does not match regular expression', constraint)
+ return False
+ except re.error:
+ pass # should be validated elsewhere
+
+ return True
+
+
+#
+# Repository
+#
+
+def get_data_type_value(context, presentation, field_name, type_name):
+ the_type = get_type_by_name(context, type_name, 'data_types')
+ if the_type is not None:
+ value = getattr(presentation, field_name)
+ if value is not None:
+ return coerce_data_type_value(context, presentation, the_type, None, None, value, None)
+ else:
+ context.validation.report('field "%s" in "%s" refers to unknown data type "%s"'
+ % (field_name, presentation._fullname, type_name),
+ locator=presentation._locator, level=Issue.BETWEEN_TYPES)
+ return None
+
+
+#
+# Utils
+#
+
+PRIMITIVE_DATA_TYPES = {
+ # YAML 1.2:
+ 'tag:yaml.org,2002:str': unicode,
+ 'tag:yaml.org,2002:integer': int,
+ 'tag:yaml.org,2002:float': float,
+ 'tag:yaml.org,2002:bool': bool,
+ 'tag:yaml.org,2002:null': None.__class__,
+
+ # TOSCA aliases:
+ 'string': unicode,
+ 'integer': int,
+ 'float': float,
+ 'boolean': bool,
+ 'null': None.__class__}
+
+
+@implements_specification('3.2.1-3', 'tosca-simple-1.0')
+def get_primitive_data_type(type_name):
+ """
+ Many of the types we use in this profile are built-in types from the YAML 1.2 specification
+ (i.e., those identified by the "tag:yaml.org,2002" version tag) [YAML-1.2].
+
+ See the `TOSCA Simple Profile v1.0 cos01 specification <http://docs.oasis-open.org/tosca
+ /TOSCA-Simple-Profile-YAML/v1.0/cos01/TOSCA-Simple-Profile-YAML-v1.0-cos01.html
+ #_Toc373867862>`__
+ """
+
+ return PRIMITIVE_DATA_TYPES.get(type_name)
+
+
+def get_data_type_name(the_type):
+ """
+ Returns the name of the type, whether it's a DataType, a primitive type, or another class.
+ """
+
+ return the_type._name if hasattr(the_type, '_name') else full_type_name(the_type)
+
+
+def coerce_value(context, presentation, the_type, entry_schema, constraints, value, aspect=None): # pylint: disable=too-many-return-statements
+ """
+ Returns the value after it's coerced to its type, reporting validation errors if it cannot be
+ coerced.
+
+ Supports both complex data types and primitives.
+
+ Data types can use the ``coerce_value`` extension to hook their own specialized function.
+ If the extension is present, we will delegate to that hook.
+ """
+
+ # TODO: should support models as well as presentations
+
+ is_function, func = get_function(context, presentation, value)
+ if is_function:
+ return func
+
+ if the_type is None:
+ return value
+
+ if the_type == None.__class__:
+ if value is not None:
+ context.validation.report('field "%s" is of type "null" but has a non-null value: %s'
+ % (presentation._name, safe_repr(value)),
+ locator=presentation._locator, level=Issue.BETWEEN_FIELDS)
+ return None
+
+ # Delegate to 'coerce_value' extension
+ if hasattr(the_type, '_get_extension'):
+ coerce_value_fn_name = the_type._get_extension('coerce_value')
+ if coerce_value_fn_name is not None:
+ if value is None:
+ return None
+ coerce_value_fn = import_fullname(coerce_value_fn_name)
+ return coerce_value_fn(context, presentation, the_type, entry_schema, constraints,
+ value, aspect)
+
+ if hasattr(the_type, '_coerce_value'):
+ # Delegate to '_coerce_value' (likely a DataType instance)
+ return the_type._coerce_value(context, presentation, entry_schema, constraints, value,
+ aspect)
+
+ # Coerce to primitive type
+ return coerce_to_primitive(context, presentation, the_type, constraints, value, aspect)
+
+
+def coerce_to_primitive(context, presentation, primitive_type, constraints, value, aspect=None):
+ """
+ Returns the value after it's coerced to a primitive type, translating exceptions to validation
+ errors if it cannot be coerced.
+ """
+
+ if value is None:
+ return None
+
+ try:
+ # Coerce
+ value = validate_primitive(value, primitive_type,
+ context.validation.allow_primitive_coersion)
+
+ # Check constraints
+ apply_constraints_to_value(context, presentation, constraints, value)
+ except (ValueError, TypeError) as e:
+ report_issue_for_bad_format(context, presentation, primitive_type, value, aspect, e)
+ value = None
+
+ return value
+
+
+def coerce_to_data_type_class(context, presentation, cls, entry_schema, constraints, value,
+ aspect=None):
+ """
+ Returns the value after it's coerced to a data type class, reporting validation errors if it
+ cannot be coerced. Constraints will be applied after coersion.
+
+ Will either call a ``_create`` static function in the class, or instantiate it using a
+ constructor if ``_create`` is not available.
+
+ This will usually be called by a ``coerce_value`` extension hook in a :class:`DataType`.
+ """
+
+ try:
+ if hasattr(cls, '_create'):
+ # Instantiate using creator function
+ value = cls._create(context, presentation, entry_schema, constraints, value, aspect)
+ else:
+ # Normal instantiation
+ value = cls(entry_schema, constraints, value, aspect)
+ except ValueError as e:
+ report_issue_for_bad_format(context, presentation, cls, value, aspect, e)
+ value = None
+
+ # Check constraints
+ value = apply_constraints_to_value(context, presentation, constraints, value)
+
+ return value
+
+
+def apply_constraints_to_value(context, presentation, constraints, value):
+ """
+ Applies all constraints to the value. If the value conforms, returns the value. If it does not
+ conform, returns None.
+ """
+
+ if (value is not None) and (constraints is not None):
+ valid = True
+ for constraint in constraints:
+ if not constraint._apply_to_value(context, presentation, value):
+ valid = False
+ if not valid:
+ value = None
+ return value
+
+
+def get_container_data_type(presentation):
+ if presentation is None:
+ return None
+ if type(presentation).__name__ == 'DataType':
+ return presentation
+ return get_container_data_type(presentation._container)
+
+
+def report_issue_for_bad_format(context, presentation, the_type, value, aspect, e):
+ if aspect == 'default':
+ aspect = '"default" value'
+ elif aspect is not None:
+ aspect = '"%s" aspect' % aspect
+
+ if aspect is not None:
+ context.validation.report('%s for field "%s" is not a valid "%s": %s'
+ % (aspect, presentation._name or presentation._container._name,
+ get_data_type_name(the_type), safe_repr(value)),
+ locator=presentation._locator, level=Issue.BETWEEN_FIELDS,
+ exception=e)
+ else:
+ context.validation.report('field "%s" is not a valid "%s": %s'
+ % (presentation._name or presentation._container._name,
+ get_data_type_name(the_type), safe_repr(value)),
+ locator=presentation._locator, level=Issue.BETWEEN_FIELDS,
+ exception=e)
diff --git a/azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/simple_v1_0/modeling/functions.py b/azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/simple_v1_0/modeling/functions.py
new file mode 100644
index 0000000..ecbfde9
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/simple_v1_0/modeling/functions.py
@@ -0,0 +1,681 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from StringIO import StringIO # Note: cStringIO does not support Unicode
+import re
+
+from aria.utils.collections import FrozenList
+from aria.utils.formatting import (as_raw, safe_repr)
+from aria.utils.type import full_type_name
+from aria.parser import implements_specification
+from aria.parser.exceptions import InvalidValueError
+from aria.parser.validation import Issue
+from aria.modeling.exceptions import CannotEvaluateFunctionException
+from aria.modeling.models import (Node, NodeTemplate, Relationship, RelationshipTemplate)
+from aria.modeling.functions import (Function, Evaluation)
+
+
+#
+# Intrinsic
+#
+
+@implements_specification('4.3.1', 'tosca-simple-1.0')
+class Concat(Function):
+ """
+ The ``concat`` function is used to concatenate two or more string values within a TOSCA
+ service template.
+ """
+
+ def __init__(self, context, presentation, argument):
+ self.locator = presentation._locator
+
+ if not isinstance(argument, list):
+ raise InvalidValueError(
+ 'function "concat" argument must be a list of string expressions: {0}'
+ .format(safe_repr(argument)),
+ locator=self.locator)
+
+ string_expressions = []
+ for index, an_argument in enumerate(argument):
+ string_expressions.append(parse_string_expression(context, presentation, 'concat',
+ index, None, an_argument))
+ self.string_expressions = FrozenList(string_expressions)
+
+ @property
+ def as_raw(self):
+ string_expressions = []
+ for string_expression in self.string_expressions:
+ if hasattr(string_expression, 'as_raw'):
+ string_expression = as_raw(string_expression)
+ string_expressions.append(string_expression)
+ return {'concat': string_expressions}
+
+ def __evaluate__(self, container_holder):
+ final = True
+ value = StringIO()
+ for e in self.string_expressions:
+ e, final = evaluate(e, final, container_holder)
+ if e is not None:
+ value.write(unicode(e))
+ value = value.getvalue() or u''
+ return Evaluation(value, final)
+
+
+@implements_specification('4.3.2', 'tosca-simple-1.0')
+class Token(Function):
+ """
+ The ``token`` function is used within a TOSCA service template on a string to parse out
+ (tokenize) substrings separated by one or more token characters within a larger string.
+ """
+
+ def __init__(self, context, presentation, argument):
+ self.locator = presentation._locator
+
+ if (not isinstance(argument, list)) or (len(argument) != 3):
+ raise InvalidValueError('function "token" argument must be a list of 3 parameters: {0}'
+ .format(safe_repr(argument)),
+ locator=self.locator)
+
+ self.string_with_tokens = parse_string_expression(context, presentation, 'token', 0,
+ 'the string to tokenize', argument[0])
+ self.string_of_token_chars = parse_string_expression(context, presentation, 'token', 1,
+ 'the token separator characters',
+ argument[1])
+ self.substring_index = parse_int(context, presentation, 'token', 2,
+ 'the 0-based index of the token to return', argument[2])
+
+ @property
+ def as_raw(self):
+ string_with_tokens = self.string_with_tokens
+ if hasattr(string_with_tokens, 'as_raw'):
+ string_with_tokens = as_raw(string_with_tokens)
+ string_of_token_chars = self.string_of_token_chars
+ if hasattr(string_of_token_chars, 'as_raw'):
+ string_of_token_chars = as_raw(string_of_token_chars)
+ return {'token': [string_with_tokens, string_of_token_chars, self.substring_index]}
+
+ def __evaluate__(self, container_holder):
+ final = True
+ string_with_tokens, final = evaluate(self.string_with_tokens, final, container_holder)
+ string_of_token_chars, final = evaluate(self.string_of_token_chars, final, container_holder)
+
+ if string_of_token_chars:
+ regex = '[' + ''.join(re.escape(c) for c in string_of_token_chars) + ']'
+ split = re.split(regex, string_with_tokens)
+ if self.substring_index < len(split):
+ return Evaluation(split[self.substring_index], final)
+
+ raise CannotEvaluateFunctionException()
+
+
+#
+# Property
+#
+
+@implements_specification('4.4.1', 'tosca-simple-1.0')
+class GetInput(Function):
+ """
+ The ``get_input`` function is used to retrieve the values of properties declared within the
+ inputs section of a TOSCA Service Template.
+ """
+
+ def __init__(self, context, presentation, argument):
+ self.locator = presentation._locator
+
+ self.input_property_name = parse_string_expression(context, presentation, 'get_input',
+ None, 'the input property name',
+ argument)
+
+ if isinstance(self.input_property_name, basestring):
+ the_input = context.presentation.get_from_dict('service_template', 'topology_template',
+ 'inputs', self.input_property_name)
+ if the_input is None:
+ raise InvalidValueError(
+ 'function "get_input" argument is not a valid input name: {0}'
+ .format(safe_repr(argument)),
+ locator=self.locator)
+
+ @property
+ def as_raw(self):
+ return {'get_input': as_raw(self.input_property_name)}
+
+ def __evaluate__(self, container_holder):
+ service = container_holder.service
+ if service is None:
+ raise CannotEvaluateFunctionException()
+
+ value = service.inputs.get(self.input_property_name)
+ if value is not None:
+ value = value.value
+ return Evaluation(value, False) # We never return final evaluations!
+
+ raise InvalidValueError(
+ 'function "get_input" argument is not a valid input name: {0}'
+ .format(safe_repr(self.input_property_name)),
+ locator=self.locator)
+
+
+@implements_specification('4.4.2', 'tosca-simple-1.0')
+class GetProperty(Function):
+ """
+ The ``get_property`` function is used to retrieve property values between modelable entities
+ defined in the same service template.
+ """
+
+ def __init__(self, context, presentation, argument):
+ self.locator = presentation._locator
+
+ if (not isinstance(argument, list)) or (len(argument) < 2):
+ raise InvalidValueError(
+ 'function "get_property" argument must be a list of at least 2 string expressions: '
+ '{0}'.format(safe_repr(argument)),
+ locator=self.locator)
+
+ self.modelable_entity_name = parse_modelable_entity_name(context, presentation,
+ 'get_property', 0, argument[0])
+ # The first of these will be tried as a req-or-cap name:
+ self.nested_property_name_or_index = argument[1:]
+
+ @property
+ def as_raw(self):
+ return {'get_property': [self.modelable_entity_name] + self.nested_property_name_or_index}
+
+ def __evaluate__(self, container_holder):
+ modelable_entities = get_modelable_entities(container_holder, 'get_property', self.locator,
+ self.modelable_entity_name)
+ req_or_cap_name = self.nested_property_name_or_index[0]
+
+ for modelable_entity in modelable_entities:
+ properties = None
+
+ # First argument refers to a requirement template?
+ if hasattr(modelable_entity, 'requirement_templates') \
+ and modelable_entity.requirement_templates \
+ and (req_or_cap_name in [v.name for v in modelable_entity.requirement_templates]):
+ for requirement in modelable_entity.requirement_templates:
+ if requirement.name == req_or_cap_name:
+ # TODO
+ raise CannotEvaluateFunctionException()
+ # First argument refers to a capability?
+ elif hasattr(modelable_entity, 'capabilities') \
+ and modelable_entity.capabilities \
+ and (req_or_cap_name in modelable_entity.capabilities):
+ properties = modelable_entity.capabilities[req_or_cap_name].properties
+ nested_property_name_or_index = self.nested_property_name_or_index[1:]
+ # First argument refers to a capability template?
+ elif hasattr(modelable_entity, 'capability_templates') \
+ and modelable_entity.capability_templates \
+ and (req_or_cap_name in modelable_entity.capability_templates):
+ properties = modelable_entity.capability_templates[req_or_cap_name].properties
+ nested_property_name_or_index = self.nested_property_name_or_index[1:]
+ else:
+ properties = modelable_entity.properties
+ nested_property_name_or_index = self.nested_property_name_or_index
+
+ evaluation = get_modelable_entity_parameter(modelable_entity, properties,
+ nested_property_name_or_index)
+ if evaluation is not None:
+ return evaluation
+
+ raise InvalidValueError(
+ 'function "get_property" could not find "{0}" in modelable entity "{1}"'
+ .format('.'.join(self.nested_property_name_or_index), self.modelable_entity_name),
+ locator=self.locator)
+
+
+#
+# Attribute
+#
+
+@implements_specification('4.5.1', 'tosca-simple-1.0')
+class GetAttribute(Function):
+ """
+ The ``get_attribute`` function is used to retrieve the values of named attributes declared
+ by the referenced node or relationship template name.
+ """
+
+ def __init__(self, context, presentation, argument):
+ self.locator = presentation._locator
+
+ if (not isinstance(argument, list)) or (len(argument) < 2):
+ raise InvalidValueError(
+ 'function "get_attribute" argument must be a list of at least 2 string expressions:'
+ ' {0}'.format(safe_repr(argument)),
+ locator=self.locator)
+
+ self.modelable_entity_name = parse_modelable_entity_name(context, presentation,
+ 'get_attribute', 0, argument[0])
+ # The first of these will be tried as a req-or-cap name:
+ self.nested_attribute_name_or_index = argument[1:]
+
+ @property
+ def as_raw(self):
+ return {'get_attribute': [self.modelable_entity_name] + self.nested_attribute_name_or_index}
+
+ def __evaluate__(self, container_holder):
+ modelable_entities = get_modelable_entities(container_holder, 'get_attribute', self.locator,
+ self.modelable_entity_name)
+ for modelable_entity in modelable_entities:
+ attributes = modelable_entity.attributes
+ nested_attribute_name_or_index = self.nested_attribute_name_or_index
+ evaluation = get_modelable_entity_parameter(modelable_entity, attributes,
+ nested_attribute_name_or_index)
+ if evaluation is not None:
+ evaluation.final = False # We never return final evaluations!
+ return evaluation
+
+ raise InvalidValueError(
+ 'function "get_attribute" could not find "{0}" in modelable entity "{1}"'
+ .format('.'.join(self.nested_attribute_name_or_index), self.modelable_entity_name),
+ locator=self.locator)
+
+
+#
+# Operation
+#
+
+@implements_specification('4.6.1', 'tosca-simple-1.0') # pylint: disable=abstract-method
+class GetOperationOutput(Function):
+ """
+ The ``get_operation_output`` function is used to retrieve the values of variables exposed /
+ exported from an interface operation.
+ """
+
+ def __init__(self, context, presentation, argument):
+ self.locator = presentation._locator
+
+ if (not isinstance(argument, list)) or (len(argument) != 4):
+ raise InvalidValueError(
+ 'function "get_operation_output" argument must be a list of 4 parameters: {0}'
+ .format(safe_repr(argument)),
+ locator=self.locator)
+
+ self.modelable_entity_name = parse_string_expression(context, presentation,
+ 'get_operation_output', 0,
+ 'modelable entity name', argument[0])
+ self.interface_name = parse_string_expression(context, presentation, 'get_operation_output',
+ 1, 'the interface name', argument[1])
+ self.operation_name = parse_string_expression(context, presentation, 'get_operation_output',
+ 2, 'the operation name', argument[2])
+ self.output_variable_name = parse_string_expression(context, presentation,
+ 'get_operation_output', 3,
+ 'the output name', argument[3])
+
+ @property
+ def as_raw(self):
+ interface_name = self.interface_name
+ if hasattr(interface_name, 'as_raw'):
+ interface_name = as_raw(interface_name)
+ operation_name = self.operation_name
+ if hasattr(operation_name, 'as_raw'):
+ operation_name = as_raw(operation_name)
+ output_variable_name = self.output_variable_name
+ if hasattr(output_variable_name, 'as_raw'):
+ output_variable_name = as_raw(output_variable_name)
+ return {'get_operation_output': [self.modelable_entity_name, interface_name, operation_name,
+ output_variable_name]}
+
+
+#
+# Navigation
+#
+
+@implements_specification('4.7.1', 'tosca-simple-1.0')
+class GetNodesOfType(Function):
+ """
+ The ``get_nodes_of_type`` function can be used to retrieve a list of all known instances of
+ nodes of the declared Node Type.
+ """
+
+ def __init__(self, context, presentation, argument):
+ self.locator = presentation._locator
+
+ self.node_type_name = parse_string_expression(context, presentation, 'get_nodes_of_type',
+ None, 'the node type name', argument)
+
+ if isinstance(self.node_type_name, basestring):
+ node_types = context.presentation.get('service_template', 'node_types')
+ if (node_types is None) or (self.node_type_name not in node_types):
+ raise InvalidValueError(
+ 'function "get_nodes_of_type" argument is not a valid node type name: {0}'
+ .format(safe_repr(argument)),
+ locator=self.locator)
+
+ @property
+ def as_raw(self):
+ node_type_name = self.node_type_name
+ if hasattr(node_type_name, 'as_raw'):
+ node_type_name = as_raw(node_type_name)
+ return {'get_nodes_of_type': node_type_name}
+
+ def __evaluate__(self, container):
+ pass
+
+
+#
+# Artifact
+#
+
+@implements_specification('4.8.1', 'tosca-simple-1.0') # pylint: disable=abstract-method
+class GetArtifact(Function):
+ """
+ The ``get_artifact`` function is used to retrieve artifact location between modelable
+ entities defined in the same service template.
+ """
+
+ def __init__(self, context, presentation, argument):
+ self.locator = presentation._locator
+
+ if (not isinstance(argument, list)) or (len(argument) < 2) or (len(argument) > 4):
+ raise InvalidValueError(
+ 'function "get_artifact" argument must be a list of 2 to 4 parameters: {0}'
+ .format(safe_repr(argument)),
+ locator=self.locator)
+
+ self.modelable_entity_name = parse_string_expression(context, presentation, 'get_artifact',
+ 0, 'modelable entity name',
+ argument[0])
+ self.artifact_name = parse_string_expression(context, presentation, 'get_artifact', 1,
+ 'the artifact name', argument[1])
+ self.location = parse_string_expression(context, presentation, 'get_artifact', 2,
+ 'the location or "LOCAL_FILE"', argument[2])
+ self.remove = parse_bool(context, presentation, 'get_artifact', 3, 'the removal flag',
+ argument[3])
+
+ @property
+ def as_raw(self):
+ artifact_name = self.artifact_name
+ if hasattr(artifact_name, 'as_raw'):
+ artifact_name = as_raw(artifact_name)
+ location = self.location
+ if hasattr(location, 'as_raw'):
+ location = as_raw(location)
+ return {'get_artifacts': [self.modelable_entity_name, artifact_name, location, self.remove]}
+
+
+#
+# Utils
+#
+
+def get_function(context, presentation, value):
+ functions = context.presentation.presenter.functions
+ if isinstance(value, dict) and (len(value) == 1):
+ key = value.keys()[0]
+ if key in functions:
+ try:
+ return True, functions[key](context, presentation, value[key])
+ except InvalidValueError as e:
+ context.validation.report(issue=e.issue)
+ return True, None
+ return False, None
+
+
+def parse_string_expression(context, presentation, name, index, explanation, value): # pylint: disable=unused-argument
+ is_function, func = get_function(context, presentation, value)
+ if is_function:
+ return func
+ else:
+ value = str(value)
+ return value
+
+
+def parse_int(context, presentation, name, index, explanation, value): # pylint: disable=unused-argument
+ if not isinstance(value, int):
+ try:
+ value = int(value)
+ except ValueError:
+ raise invalid_value(name, index, 'an integer', explanation, value,
+ presentation._locator)
+ return value
+
+
+def parse_bool(context, presentation, name, index, explanation, value): # pylint: disable=unused-argument
+ if not isinstance(value, bool):
+ raise invalid_value(name, index, 'a boolean', explanation, value, presentation._locator)
+ return value
+
+
+def parse_modelable_entity_name(context, presentation, name, index, value):
+ value = parse_string_expression(context, presentation, name, index, 'the modelable entity name',
+ value)
+ if value == 'SELF':
+ the_self, _ = parse_self(presentation)
+ if the_self is None:
+ raise invalid_modelable_entity_name(name, index, value, presentation._locator,
+ 'a node template or a relationship template')
+ elif value == 'HOST':
+ _, self_variant = parse_self(presentation)
+ if self_variant != 'node_template':
+ raise invalid_modelable_entity_name(name, index, value, presentation._locator,
+ 'a node template')
+ elif (value == 'SOURCE') or (value == 'TARGET'):
+ _, self_variant = parse_self(presentation)
+ if self_variant != 'relationship_template':
+ raise invalid_modelable_entity_name(name, index, value, presentation._locator,
+ 'a relationship template')
+ elif isinstance(value, basestring):
+ node_templates = \
+ context.presentation.get('service_template', 'topology_template', 'node_templates') \
+ or {}
+ relationship_templates = \
+ context.presentation.get('service_template', 'topology_template',
+ 'relationship_templates') \
+ or {}
+ if (value not in node_templates) and (value not in relationship_templates):
+ raise InvalidValueError(
+ 'function "{0}" parameter {1:d} is not a valid modelable entity name: {2}'
+ .format(name, index + 1, safe_repr(value)),
+ locator=presentation._locator, level=Issue.BETWEEN_TYPES)
+ return value
+
+
+def parse_self(presentation):
+ from ..types import (NodeType, RelationshipType)
+ from ..templates import (
+ NodeTemplate as NodeTemplatePresentation,
+ RelationshipTemplate as RelationshipTemplatePresentation
+ )
+
+ if presentation is None:
+ return None, None
+ elif isinstance(presentation, NodeTemplatePresentation) or isinstance(presentation, NodeType):
+ return presentation, 'node_template'
+ elif isinstance(presentation, RelationshipTemplatePresentation) \
+ or isinstance(presentation, RelationshipType):
+ return presentation, 'relationship_template'
+ else:
+ return parse_self(presentation._container)
+
+
+def evaluate(value, final, container_holder):
+ """
+ Calls ``__evaluate__`` and passes on ``final`` state.
+ """
+
+ if hasattr(value, '__evaluate__'):
+ value = value.__evaluate__(container_holder)
+ if not value.final:
+ final = False
+ return value.value, final
+ else:
+ return value, final
+
+
+@implements_specification('4.1', 'tosca-simple-1.0')
+def get_modelable_entities(container_holder, name, locator, modelable_entity_name):
+ """
+ The following keywords MAY be used in some TOSCA function in place of a TOSCA Node or
+ Relationship Template name.
+ """
+
+ if modelable_entity_name == 'SELF':
+ return get_self(container_holder, name, locator)
+ elif modelable_entity_name == 'HOST':
+ return get_hosts(container_holder, name, locator)
+ elif modelable_entity_name == 'SOURCE':
+ return get_source(container_holder, name, locator)
+ elif modelable_entity_name == 'TARGET':
+ return get_target(container_holder, name, locator)
+ elif isinstance(modelable_entity_name, basestring):
+ modelable_entities = []
+
+ service = container_holder.service
+ if service is not None:
+ for node in service.nodes.itervalues():
+ if node.node_template.name == modelable_entity_name:
+ modelable_entities.append(node)
+ else:
+ service_template = container_holder.service_template
+ if service_template is not None:
+ for node_template in service_template.node_templates.itervalues():
+ if node_template.name == modelable_entity_name:
+ modelable_entities.append(node_template)
+
+ if not modelable_entities:
+ raise CannotEvaluateFunctionException()
+
+ return modelable_entities
+
+ raise InvalidValueError('function "{0}" could not find modelable entity "{1}"'
+ .format(name, modelable_entity_name),
+ locator=locator)
+
+
+def get_self(container_holder, name, locator):
+ """
+ A TOSCA orchestrator will interpret this keyword as the Node or Relationship Template instance
+ that contains the function at the time the function is evaluated.
+ """
+
+ container = container_holder.container
+ if (not isinstance(container, Node)) and \
+ (not isinstance(container, NodeTemplate)) and \
+ (not isinstance(container, Relationship)) and \
+ (not isinstance(container, RelationshipTemplate)):
+ raise InvalidValueError('function "{0}" refers to "SELF" but it is not contained in '
+ 'a node or a relationship: {1}'.format(name,
+ full_type_name(container)),
+ locator=locator)
+
+ return [container]
+
+
+def get_hosts(container_holder, name, locator):
+ """
+ A TOSCA orchestrator will interpret this keyword to refer to the all nodes that "host" the node
+ using this reference (i.e., as identified by its HostedOn relationship).
+
+ Specifically, TOSCA orchestrators that encounter this keyword when evaluating the get_attribute
+ or ``get_property`` functions SHALL search each node along the "HostedOn" relationship chain
+ starting at the immediate node that hosts the node where the function was evaluated (and then
+ that node's host node, and so forth) until a match is found or the "HostedOn" relationship chain
+ ends.
+ """
+
+ container = container_holder.container
+ if (not isinstance(container, Node)) and (not isinstance(container, NodeTemplate)):
+ raise InvalidValueError('function "{0}" refers to "HOST" but it is not contained in '
+ 'a node: {1}'.format(name, full_type_name(container)),
+ locator=locator)
+
+ if not isinstance(container, Node):
+ # NodeTemplate does not have "host"; we'll wait until instantiation
+ raise CannotEvaluateFunctionException()
+
+ host = container.host
+ if host is None:
+ # We might have a host later
+ raise CannotEvaluateFunctionException()
+
+ return [host]
+
+
+def get_source(container_holder, name, locator):
+ """
+ A TOSCA orchestrator will interpret this keyword as the Node Template instance that is at the
+ source end of the relationship that contains the referencing function.
+ """
+
+ container = container_holder.container
+ if (not isinstance(container, Relationship)) and \
+ (not isinstance(container, RelationshipTemplate)):
+ raise InvalidValueError('function "{0}" refers to "SOURCE" but it is not contained in '
+ 'a relationship: {1}'.format(name, full_type_name(container)),
+ locator=locator)
+
+ if not isinstance(container, RelationshipTemplate):
+ # RelationshipTemplate does not have "source_node"; we'll wait until instantiation
+ raise CannotEvaluateFunctionException()
+
+ return [container.source_node]
+
+
+def get_target(container_holder, name, locator):
+ """
+ A TOSCA orchestrator will interpret this keyword as the Node Template instance that is at the
+ target end of the relationship that contains the referencing function.
+ """
+
+ container = container_holder.container
+ if (not isinstance(container, Relationship)) and \
+ (not isinstance(container, RelationshipTemplate)):
+ raise InvalidValueError('function "{0}" refers to "TARGET" but it is not contained in '
+ 'a relationship: {1}'.format(name, full_type_name(container)),
+ locator=locator)
+
+ if not isinstance(container, RelationshipTemplate):
+ # RelationshipTemplate does not have "target_node"; we'll wait until instantiation
+ raise CannotEvaluateFunctionException()
+
+ return [container.target_node]
+
+
+def get_modelable_entity_parameter(modelable_entity, parameters, nested_parameter_name_or_index):
+ if not parameters:
+ return Evaluation(None, True)
+
+ found = True
+ final = True
+ value = parameters
+
+ for name_or_index in nested_parameter_name_or_index:
+ if (isinstance(value, dict) and (name_or_index in value)) \
+ or ((isinstance(value, list) and (name_or_index < len(value)))):
+ value = value[name_or_index] # Parameter
+ # We are not using Parameter.value, but rather Parameter._value, because we want to make
+ # sure to get "final" (it is swallowed by Parameter.value)
+ value, final = evaluate(value._value, final, value)
+ else:
+ found = False
+ break
+
+ return Evaluation(value, final) if found else None
+
+
+def invalid_modelable_entity_name(name, index, value, locator, contexts):
+ return InvalidValueError('function "{0}" parameter {1:d} can be "{2}" only in {3}'
+ .format(name, index + 1, value, contexts),
+ locator=locator, level=Issue.FIELD)
+
+
+def invalid_value(name, index, the_type, explanation, value, locator):
+ return InvalidValueError(
+ 'function "{0}" {1} is not {2}{3}: {4}'
+ .format(name,
+ 'parameter {0:d}'.format(index + 1) if index is not None else 'argument',
+ the_type,
+ ', {0}'.format(explanation) if explanation is not None else '',
+ safe_repr(value)),
+ locator=locator, level=Issue.FIELD)
diff --git a/azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/simple_v1_0/modeling/interfaces.py b/azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/simple_v1_0/modeling/interfaces.py
new file mode 100644
index 0000000..23a03b7
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/simple_v1_0/modeling/interfaces.py
@@ -0,0 +1,530 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from aria.utils.collections import (merge, deepcopy_with_locators, OrderedDict)
+from aria.parser.presentation import get_locator
+from aria.parser.validation import Issue
+
+from .parameters import (coerce_parameter_value, convert_parameter_definitions_to_values)
+
+
+#
+# InterfaceType
+#
+
+def get_inherited_operations(context, presentation):
+ """
+ Returns our operation definitions added on top of those of our parent, if we have one
+ (recursively).
+
+ Allows overriding all aspects of parent operations except input data types.
+ """
+
+ # Get operations from parent
+ parent = presentation._get_parent(context)
+ operations = get_inherited_operations(context, parent) if parent is not None else OrderedDict()
+
+ # Add/merge our operations
+ our_operations = presentation.operations # OperationDefinition
+ merge_operation_definitions(context, operations, our_operations, presentation._name,
+ presentation, 'type')
+
+ for operation in operations.itervalues():
+ operation._reset_method_cache()
+
+ return operations
+
+
+#
+# InterfaceDefinition
+#
+
+def get_and_override_input_definitions_from_type(context, presentation):
+ """
+ Returns our input definitions added on top of those of the interface type, if specified.
+
+ Allows overriding all aspects of parent interface type inputs except data types.
+ """
+
+ inputs = OrderedDict()
+
+ # Get inputs from type
+ the_type = presentation._get_type(context) # InterfaceType
+ type_inputs = the_type._get_inputs(context) if the_type is not None else None
+ if type_inputs:
+ for input_name, type_input in type_inputs.iteritems():
+ inputs[input_name] = type_input._clone(presentation)
+
+ # Add/merge our inputs
+ our_inputs = presentation.inputs # PropertyDefinition
+ if our_inputs:
+ merge_input_definitions(context, inputs, our_inputs, presentation._name, None, presentation,
+ 'definition')
+
+ return inputs
+
+
+def get_and_override_operation_definitions_from_type(context, presentation):
+ """
+ Returns our operation definitions added on top of those of the interface type, if specified.
+
+ Allows overriding all aspects of parent interface type inputs except data types.
+ """
+
+ operations = OrderedDict()
+
+ # Get operations from type
+ the_type = presentation._get_type(context) # InterfaceType
+ type_operations = the_type._get_operations(context) if the_type is not None else None
+ if type_operations:
+ for operations_name, type_operation in type_operations.iteritems():
+ operations[operations_name] = type_operation._clone(presentation)
+
+ # Add/merge our operations
+ our_operations = presentation.operations # OperationDefinition
+ merge_operation_definitions(context, operations, our_operations, presentation._name,
+ presentation, 'definition')
+
+ return operations
+
+
+#
+# NodeType, RelationshipType, GroupType
+#
+
+def get_inherited_interface_definitions(context, presentation, type_name, for_presentation=None):
+ """
+ Returns our interface definitions added on top of those of our parent, if we have one
+ (recursively).
+
+ Allows overriding all aspects of parent interfaces except interface and operation input data
+ types.
+ """
+
+ if for_presentation is None:
+ for_presentation = presentation
+
+ # Get interfaces from parent
+ parent = presentation._get_parent(context)
+ interfaces = get_inherited_interface_definitions(context, parent, type_name, for_presentation) \
+ if parent is not None else OrderedDict()
+
+ # Add/merge interfaces from their types
+ merge_interface_definitions_from_their_types(context, interfaces, presentation)
+
+ # Add/merge our interfaces
+ our_interfaces = presentation.interfaces
+ merge_interface_definitions(context, interfaces, our_interfaces, presentation, for_presentation)
+
+ return interfaces
+
+
+#
+# NodeTemplate, RelationshipTemplate, GroupTemplate
+#
+
+def get_template_interfaces(context, presentation, type_name):
+ """
+ Returns the assigned interface_template values while making sure they are defined in the type.
+ This includes the interfaces themselves, their operations, and inputs for interfaces and
+ operations.
+
+ Interface and operation inputs' default values, if available, will be used if we did not assign
+ them.
+
+ Makes sure that required inputs indeed end up with a value.
+
+ This code is especially complex due to the many levels of nesting involved.
+ """
+
+ template_interfaces = OrderedDict()
+
+ the_type = presentation._get_type(context) # NodeType, RelationshipType, GroupType
+ # InterfaceDefinition (or InterfaceAssignment in the case of RelationshipTemplate):
+ interface_definitions = the_type._get_interfaces(context) if the_type is not None else None
+
+ # Copy over interfaces from the type (will initialize inputs with default values)
+ if interface_definitions is not None:
+ for interface_name, interface_definition in interface_definitions.iteritems():
+ # Note that in the case of a RelationshipTemplate, we will already have the values as
+ # InterfaceAssignment. It will not be converted, just cloned.
+ template_interfaces[interface_name] = \
+ convert_interface_definition_from_type_to_template(context, interface_definition,
+ presentation)
+
+ # Fill in our interfaces
+ our_interface_assignments = presentation.interfaces
+ if our_interface_assignments:
+ # InterfaceAssignment:
+ for interface_name, our_interface_assignment in our_interface_assignments.iteritems():
+ if interface_name in template_interfaces:
+ interface_assignment = template_interfaces[interface_name] # InterfaceAssignment
+ # InterfaceDefinition (or InterfaceAssignment in the case of RelationshipTemplate):
+ interface_definition = interface_definitions[interface_name]
+ merge_interface(context, presentation, interface_assignment,
+ our_interface_assignment, interface_definition, interface_name)
+ else:
+ context.validation.report(
+ 'interface definition "%s" not declared at %s "%s" in "%s"'
+ % (interface_name, type_name, presentation.type, presentation._fullname),
+ locator=our_interface_assignment._locator, level=Issue.BETWEEN_TYPES)
+
+ # Check that there are no required inputs that we haven't assigned
+ for interface_name, interface_template in template_interfaces.iteritems():
+ if interface_name in interface_definitions:
+ # InterfaceDefinition (or InterfaceAssignment in the case of RelationshipTemplate):
+ interface_definition = interface_definitions[interface_name]
+ our_interface_assignment = our_interface_assignments.get(interface_name) \
+ if our_interface_assignments is not None else None
+ validate_required_inputs(context, presentation, interface_template,
+ interface_definition, our_interface_assignment, interface_name)
+
+ return template_interfaces
+
+
+#
+# Utils
+#
+
+def convert_interface_definition_from_type_to_template(context, presentation, container):
+ from ..assignments import InterfaceAssignment
+
+ if isinstance(presentation, InterfaceAssignment):
+ # Nothing to convert, so just clone
+ return presentation._clone(container)
+
+ raw = convert_interface_definition_from_type_to_raw_template(context, presentation)
+ return InterfaceAssignment(name=presentation._name, raw=raw, container=container)
+
+
+def convert_interface_definition_from_type_to_raw_template(context, presentation): # pylint: disable=invalid-name
+ raw = OrderedDict()
+
+ # Copy default values for inputs
+ interface_inputs = presentation._get_inputs(context)
+ if interface_inputs is not None:
+ raw['inputs'] = convert_parameter_definitions_to_values(context, interface_inputs)
+
+ # Copy operations
+ operations = presentation._get_operations(context)
+ if operations:
+ for operation_name, operation in operations.iteritems():
+ raw[operation_name] = OrderedDict()
+ description = operation.description
+ if description is not None:
+ raw[operation_name]['description'] = deepcopy_with_locators(description._raw)
+ implementation = operation.implementation
+ if implementation is not None:
+ raw[operation_name]['implementation'] = deepcopy_with_locators(implementation._raw)
+ inputs = operation.inputs
+ if inputs is not None:
+ raw[operation_name]['inputs'] = convert_parameter_definitions_to_values(context,
+ inputs)
+
+ return raw
+
+
+def convert_requirement_interface_definitions_from_type_to_raw_template(context, raw_requirement, # pylint: disable=invalid-name
+ interface_definitions):
+ if not interface_definitions:
+ return
+ if 'interfaces' not in raw_requirement:
+ raw_requirement['interfaces'] = OrderedDict()
+ for interface_name, interface_definition in interface_definitions.iteritems():
+ raw_interface = convert_interface_definition_from_type_to_raw_template(context,
+ interface_definition)
+ if interface_name in raw_requirement['interfaces']:
+ merge(raw_requirement['interfaces'][interface_name], raw_interface)
+ else:
+ raw_requirement['interfaces'][interface_name] = raw_interface
+
+
+def merge_interface(context, presentation, interface_assignment, our_interface_assignment,
+ interface_definition, interface_name):
+ # Assign/merge interface inputs
+ assign_raw_inputs(context, interface_assignment._raw, our_interface_assignment.inputs,
+ interface_definition._get_inputs(context), interface_name, None, presentation)
+
+ # Assign operation implementations and inputs
+ our_operation_templates = our_interface_assignment.operations # OperationAssignment
+ # OperationDefinition or OperationAssignment:
+ operation_definitions = interface_definition._get_operations(context) \
+ if hasattr(interface_definition, '_get_operations') else interface_definition.operations
+ if our_operation_templates:
+ # OperationAssignment:
+ for operation_name, our_operation_template in our_operation_templates.iteritems():
+ operation_definition = operation_definitions.get(operation_name) # OperationDefinition
+
+ our_input_assignments = our_operation_template.inputs
+ our_implementation = our_operation_template.implementation
+
+ if operation_definition is None:
+ context.validation.report(
+ 'interface definition "%s" refers to an unknown operation "%s" in "%s"'
+ % (interface_name, operation_name, presentation._fullname),
+ locator=our_operation_template._locator, level=Issue.BETWEEN_TYPES)
+
+ if (our_input_assignments is not None) or (our_implementation is not None):
+ # Make sure we have the dict
+ if (operation_name not in interface_assignment._raw) \
+ or (interface_assignment._raw[operation_name] is None):
+ interface_assignment._raw[operation_name] = OrderedDict()
+
+ if our_implementation is not None:
+ interface_assignment._raw[operation_name]['implementation'] = \
+ deepcopy_with_locators(our_implementation._raw)
+
+ # Assign/merge operation inputs
+ input_definitions = operation_definition.inputs \
+ if operation_definition is not None else None
+ assign_raw_inputs(context, interface_assignment._raw[operation_name],
+ our_input_assignments, input_definitions, interface_name,
+ operation_name, presentation)
+
+
+def merge_raw_input_definition(context, the_raw_input, our_input, interface_name, operation_name,
+ presentation, type_name):
+ # Check if we changed the type
+ # TODO: allow a sub-type?
+ input_type1 = the_raw_input.get('type')
+ input_type2 = our_input.type
+ if input_type1 != input_type2:
+ if operation_name is not None:
+ context.validation.report(
+ 'interface %s "%s" changes operation input "%s.%s" type from "%s" to "%s" in "%s"'
+ % (type_name, interface_name, operation_name, our_input._name, input_type1,
+ input_type2, presentation._fullname),
+ locator=input_type2._locator, level=Issue.BETWEEN_TYPES)
+ else:
+ context.validation.report(
+ 'interface %s "%s" changes input "%s" type from "%s" to "%s" in "%s"'
+ % (type_name, interface_name, our_input._name, input_type1, input_type2,
+ presentation._fullname),
+ locator=input_type2._locator, level=Issue.BETWEEN_TYPES)
+
+ # Merge
+ merge(the_raw_input, our_input._raw)
+
+
+def merge_input_definitions(context, inputs, our_inputs, interface_name, operation_name,
+ presentation, type_name):
+ for input_name, our_input in our_inputs.iteritems():
+ if input_name in inputs:
+ merge_raw_input_definition(context, inputs[input_name]._raw, our_input, interface_name,
+ operation_name, presentation, type_name)
+ else:
+ inputs[input_name] = our_input._clone(presentation)
+
+
+def merge_raw_input_definitions(context, raw_inputs, our_inputs, interface_name, operation_name,
+ presentation, type_name):
+ for input_name, our_input in our_inputs.iteritems():
+ if input_name in raw_inputs:
+ merge_raw_input_definition(context, raw_inputs[input_name], our_input, interface_name,
+ operation_name, presentation, type_name)
+ else:
+ raw_inputs[input_name] = deepcopy_with_locators(our_input._raw)
+
+
+def merge_raw_operation_definition(context, raw_operation, our_operation, interface_name,
+ presentation, type_name):
+ if not isinstance(our_operation._raw, dict):
+ # Convert short form to long form
+ raw_operation['implementation'] = deepcopy_with_locators(our_operation._raw)
+ return
+
+ # Add/merge inputs
+ our_operation_inputs = our_operation.inputs
+ if our_operation_inputs:
+ # Make sure we have the dict
+ if ('inputs' not in raw_operation) or (raw_operation.get('inputs') is None):
+ raw_operation['inputs'] = OrderedDict()
+
+ merge_raw_input_definitions(context, raw_operation['inputs'], our_operation_inputs,
+ interface_name, our_operation._name, presentation, type_name)
+
+ # Override the description
+ if our_operation._raw.get('description') is not None:
+ raw_operation['description'] = deepcopy_with_locators(our_operation._raw['description'])
+
+ # Add/merge implementation
+ if our_operation._raw.get('implementation') is not None:
+ if raw_operation.get('implementation') is not None:
+ merge(raw_operation['implementation'],
+ deepcopy_with_locators(our_operation._raw['implementation']))
+ else:
+ raw_operation['implementation'] = \
+ deepcopy_with_locators(our_operation._raw['implementation'])
+
+
+def merge_operation_definitions(context, operations, our_operations, interface_name, presentation,
+ type_name):
+ if not our_operations:
+ return
+ for operation_name, our_operation in our_operations.iteritems():
+ if operation_name in operations:
+ merge_raw_operation_definition(context, operations[operation_name]._raw, our_operation,
+ interface_name, presentation, type_name)
+ else:
+ operations[operation_name] = our_operation._clone(presentation)
+
+
+def merge_raw_operation_definitions(context, raw_operations, our_operations, interface_name,
+ presentation, type_name):
+ for operation_name, our_operation in our_operations.iteritems():
+ if operation_name in raw_operations:
+ raw_operation = raw_operations[operation_name]
+ if isinstance(raw_operation, basestring):
+ # Convert short form to long form
+ raw_operations[operation_name] = OrderedDict((('implementation', raw_operation),))
+ raw_operation = raw_operations[operation_name]
+ merge_raw_operation_definition(context, raw_operation, our_operation, interface_name,
+ presentation, type_name)
+ else:
+ raw_operations[operation_name] = deepcopy_with_locators(our_operation._raw)
+
+
+# From either an InterfaceType or an InterfaceDefinition:
+def merge_interface_definition(context, interface, our_source, presentation, type_name):
+ if hasattr(our_source, 'type'):
+ # Check if we changed the interface type
+ type1 = interface._get_type(context)
+ type2 = our_source._get_type(context)
+
+ if (type2 is not None) and not type1._is_descendant(context, type2):
+ context.validation.report(
+ 'interface definition type "{0}" is not a descendant of overridden '
+ 'interface definition type "{1}"' \
+ .format(type1._name, type2._name),
+ locator=our_source._locator, level=Issue.BETWEEN_TYPES)
+
+ # Add/merge inputs
+ our_interface_inputs = our_source._get_inputs(context) \
+ if hasattr(our_source, '_get_inputs') else our_source.inputs
+ if our_interface_inputs:
+ # Make sure we have the dict
+ if ('inputs' not in interface._raw) or (interface._raw.get('inputs') is None):
+ interface._raw['inputs'] = OrderedDict()
+
+ merge_raw_input_definitions(context, interface._raw['inputs'], our_interface_inputs,
+ our_source._name, None, presentation, type_name)
+
+ # Add/merge operations
+ our_operations = our_source._get_operations(context) \
+ if hasattr(our_source, '_get_operations') else our_source.operations
+ if our_operations is not None:
+ merge_raw_operation_definitions(context, interface._raw, our_operations, our_source._name,
+ presentation, type_name)
+
+
+def merge_interface_definitions(context, interfaces, our_interfaces, presentation,
+ for_presentation=None):
+ if not our_interfaces:
+ return
+ for name, our_interface in our_interfaces.iteritems():
+ if name in interfaces:
+ merge_interface_definition(context, interfaces[name], our_interface, presentation,
+ 'definition')
+ else:
+ interfaces[name] = our_interface._clone(for_presentation)
+
+
+def merge_interface_definitions_from_their_types(context, interfaces, presentation):
+ for interface in interfaces.itervalues():
+ the_type = interface._get_type(context) # InterfaceType
+ if the_type is not None:
+ merge_interface_definition(context, interface, the_type, presentation, 'type')
+
+
+def assign_raw_inputs(context, values, assignments, definitions, interface_name, operation_name,
+ presentation):
+ if not assignments:
+ return
+
+ # Make sure we have the dict
+ if ('inputs' not in values) or (values['inputs'] is None):
+ values['inputs'] = OrderedDict()
+
+ # Assign inputs
+ for input_name, assignment in assignments.iteritems():
+ if (definitions is not None) and (input_name not in definitions):
+ if operation_name is not None:
+ context.validation.report(
+ 'interface definition "%s" assigns a value to an unknown operation input'
+ ' "%s.%s" in "%s"'
+ % (interface_name, operation_name, input_name, presentation._fullname),
+ locator=assignment._locator, level=Issue.BETWEEN_TYPES)
+ else:
+ context.validation.report(
+ 'interface definition "%s" assigns a value to an unknown input "%s" in "%s"'
+ % (interface_name, input_name, presentation._fullname),
+ locator=assignment._locator, level=Issue.BETWEEN_TYPES)
+
+ definition = definitions.get(input_name) if definitions is not None else None
+
+ # Note: default value has already been assigned
+
+ # Coerce value
+ values['inputs'][input_name] = coerce_parameter_value(context, assignment, definition,
+ assignment.value)
+
+
+def validate_required_inputs(context, presentation, assignment, definition, original_assignment,
+ interface_name, operation_name=None):
+ # The validation of the `required` field of inputs that belong to operations and interfaces
+ # (as opposed to topology template and workflow inputs) is done only in the parsing stage.
+ # This reasoning follows the TOSCA spirit, where anything that is declared as required in the
+ # type, must be assigned in the corresponding template.
+ input_definitions = definition.inputs
+ if input_definitions:
+ for input_name, input_definition in input_definitions.iteritems():
+ if input_definition.required:
+ prop = assignment.inputs.get(input_name) \
+ if ((assignment is not None) and (assignment.inputs is not None)) else None
+ value = prop.value if prop is not None else None
+ value = value.value if value is not None else None
+ if value is None:
+ if operation_name is not None:
+ context.validation.report(
+ 'interface definition "%s" does not assign a value to a required'
+ ' operation input "%s.%s" in "%s"'
+ % (interface_name, operation_name, input_name, presentation._fullname),
+ locator=get_locator(original_assignment, presentation._locator),
+ level=Issue.BETWEEN_TYPES)
+ else:
+ context.validation.report(
+ 'interface definition "%s" does not assign a value to a required input'
+ ' "%s" in "%s"'
+ % (interface_name, input_name, presentation._fullname),
+ locator=get_locator(original_assignment, presentation._locator),
+ level=Issue.BETWEEN_TYPES)
+
+ if operation_name is not None:
+ return
+
+ assignment_operations = assignment.operations
+ operation_definitions = definition._get_operations(context)
+ if operation_definitions:
+ for operation_name, operation_definition in operation_definitions.iteritems():
+ assignment_operation = assignment_operations.get(operation_name) \
+ if assignment_operations is not None else None
+ original_operation = \
+ original_assignment.operations.get(operation_name, original_assignment) \
+ if (original_assignment is not None) \
+ and (original_assignment.operations is not None) \
+ else original_assignment
+ validate_required_inputs(context, presentation, assignment_operation,
+ operation_definition, original_operation, interface_name,
+ operation_name)
diff --git a/azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/simple_v1_0/modeling/parameters.py b/azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/simple_v1_0/modeling/parameters.py
new file mode 100644
index 0000000..9bafeec
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/simple_v1_0/modeling/parameters.py
@@ -0,0 +1,230 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from aria.utils.collections import (merge, deepcopy_with_locators, OrderedDict)
+from aria.utils.formatting import pluralize
+from aria.parser.presentation import Value
+from aria.parser.validation import Issue
+
+from .data_types import (coerce_value, get_primitive_data_type)
+from ..presentation.types import get_type_by_name
+
+
+#
+# ArtifactType, DataType, CapabilityType, RelationshipType, NodeType, GroupType, PolicyType
+#
+
+def get_inherited_parameter_definitions(context, presentation, field_name, for_presentation=None):
+ """
+ Returns our parameter definitions added on top of those of our parent, if we have one
+ (recursively).
+
+ Allows overriding all aspects of parent properties except data type.
+ """
+
+ if for_presentation is None:
+ for_presentation = presentation
+
+ # Get definitions from parent
+ # If we inherit from a primitive, it does not have a parent:
+ parent = presentation._get_parent(context) if hasattr(presentation, '_get_parent') else None
+ definitions = get_inherited_parameter_definitions(context, parent, field_name,
+ for_presentation) \
+ if parent is not None else OrderedDict()
+
+ # Add/merge our definitions
+ # If we inherit from a primitive, it does not have our field
+ our_definitions = getattr(presentation, field_name, None)
+ if our_definitions:
+ our_definitions_clone = OrderedDict()
+ for name, our_definition in our_definitions.iteritems():
+ our_definitions_clone[name] = our_definition._clone(for_presentation)
+ our_definitions = our_definitions_clone
+ merge_parameter_definitions(context, presentation, definitions, our_definitions, field_name)
+
+ for definition in definitions.itervalues():
+ definition._reset_method_cache()
+
+ return definitions
+
+
+#
+# NodeTemplate, RelationshipTemplate, GroupTemplate, PolicyTemplate
+#
+
+def get_assigned_and_defined_parameter_values(context, presentation, field_name):
+ """
+ Returns the assigned parameter values while making sure they are defined in our type.
+
+ The parameter definition's default value, if available, will be used if we did not assign it.
+
+ Makes sure that required parameters indeed end up with a value.
+ """
+
+ values = OrderedDict()
+
+ the_type = presentation._get_type(context)
+ field_name_plural = pluralize(field_name)
+ assignments = getattr(presentation, field_name_plural)
+ get_fn_name = '_get_{0}'.format(field_name_plural)
+ definitions = getattr(the_type, get_fn_name)(context) if the_type is not None else None
+
+ # Fill in our assignments, but make sure they are defined
+ if assignments:
+ for name, value in assignments.iteritems():
+ if (definitions is not None) and (name in definitions):
+ definition = definitions[name]
+ values[name] = coerce_parameter_value(context, value, definition, value.value)
+ else:
+ context.validation.report('assignment to undefined {0} "{1}" in "{2}"'
+ .format(field_name, name, presentation._fullname),
+ locator=value._locator, level=Issue.BETWEEN_TYPES)
+
+ # Fill in defaults from the definitions
+ if definitions:
+ for name, definition in definitions.iteritems():
+ # Note: attributes will always have a default value, even if it's None
+ if (name not in values) and \
+ (('default' in definition._raw) or (field_name == 'attribute')):
+ values[name] = coerce_parameter_value(context, presentation, definition,
+ definition.default)
+
+ validate_required_values(context, presentation, values, definitions)
+
+ # Fill in nulls for missing values that are *not* required
+ if definitions:
+ for name, definition in definitions.iteritems():
+ if (name not in values) and not getattr(definition, 'required', False):
+ values[name] = coerce_parameter_value(context, presentation, definition, None)
+
+ return values
+
+
+#
+# TopologyTemplate
+#
+
+def get_parameter_values(context, presentation, field_name):
+ values = OrderedDict()
+
+ parameters = getattr(presentation, field_name)
+
+ # Fill in defaults and values
+ if parameters:
+ for name, parameter in parameters.iteritems():
+ if values.get(name) is None:
+ if hasattr(parameter, 'value') and (parameter.value is not None):
+ # For parameters only:
+ values[name] = coerce_parameter_value(context, presentation, parameter,
+ parameter.value)
+ else:
+ default = parameter.default if hasattr(parameter, 'default') else None
+ values[name] = coerce_parameter_value(context, presentation, parameter, default)
+
+ return values
+
+
+#
+# Utils
+#
+
+def validate_required_values(context, presentation, values, definitions):
+ """
+ Check if required properties have not been assigned.
+ """
+
+ if not definitions:
+ return
+ for name, definition in definitions.iteritems():
+ if getattr(definition, 'required', False) and \
+ ((values is None) or (values.get(name) is None)):
+ context.validation.report('required property "%s" is not assigned a value in "%s"'
+ % (name, presentation._fullname),
+ locator=presentation._get_child_locator('properties'),
+ level=Issue.BETWEEN_TYPES)
+
+
+def merge_raw_parameter_definition(context, presentation, raw_property_definition,
+ our_property_definition, field_name, property_name):
+ # Check if we changed the parameter type
+ type1_name = raw_property_definition.get('type')
+ type1 = get_type_by_name(context, type1_name, 'data_types')
+ if type1 is None:
+ type1 = get_primitive_data_type(type1_name)
+ our_property_definition._reset_method_cache()
+ type2 = our_property_definition._get_type(context)
+
+ if type1 != type2:
+ if not hasattr(type1, '_is_descendant') or not type1._is_descendant(context, type2):
+ context.validation.report(
+ 'property definition type "{0}" is not a descendant of overridden '
+ 'property definition type "{1}"' \
+ .format(type1_name, type2._name),
+ locator=presentation._get_child_locator(field_name, property_name),
+ level=Issue.BETWEEN_TYPES)
+
+ merge(raw_property_definition, our_property_definition._raw)
+
+
+def merge_raw_parameter_definitions(context, presentation, raw_property_definitions,
+ our_property_definitions, field_name):
+ if not our_property_definitions:
+ return
+ for property_name, our_property_definition in our_property_definitions.iteritems():
+ if property_name in raw_property_definitions:
+ raw_property_definition = raw_property_definitions[property_name]
+ merge_raw_parameter_definition(context, presentation, raw_property_definition,
+ our_property_definition, field_name, property_name)
+ else:
+ raw_property_definitions[property_name] = \
+ deepcopy_with_locators(our_property_definition._raw)
+
+
+def merge_parameter_definitions(context, presentation, property_definitions,
+ our_property_definitions, field_name):
+ if not our_property_definitions:
+ return
+ for property_name, our_property_definition in our_property_definitions.iteritems():
+ if property_name in property_definitions:
+ property_definition = property_definitions[property_name]
+ merge_raw_parameter_definition(context, presentation, property_definition._raw,
+ our_property_definition, field_name, property_name)
+ else:
+ property_definitions[property_name] = our_property_definition
+
+
+# Works on properties, inputs, and parameters
+def coerce_parameter_value(context, presentation, definition, value, aspect=None):
+ the_type = definition._get_type(context) if definition is not None else None
+ entry_schema = definition.entry_schema if definition is not None else None
+ constraints = definition._get_constraints(context) \
+ if ((definition is not None) and hasattr(definition, '_get_constraints')) else None
+ value = coerce_value(context, presentation, the_type, entry_schema, constraints, value, aspect)
+ if (the_type is not None) and hasattr(the_type, '_name'):
+ type_name = the_type._name
+ else:
+ type_name = getattr(definition, 'type', None)
+ description = getattr(definition, 'description', None)
+ description = description.value if description is not None else None
+ required = getattr(definition, 'required', None)
+ return Value(type_name, value, description, required)
+
+
+def convert_parameter_definitions_to_values(context, definitions):
+ values = OrderedDict()
+ for name, definition in definitions.iteritems():
+ default = definition.default
+ values[name] = coerce_parameter_value(context, definition, definition, default)
+ return values
diff --git a/azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/simple_v1_0/modeling/policies.py b/azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/simple_v1_0/modeling/policies.py
new file mode 100644
index 0000000..0376798
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/simple_v1_0/modeling/policies.py
@@ -0,0 +1,79 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from ..presentation.types import convert_name_to_full_type_name
+
+
+#
+# PolicyType
+#
+
+def get_inherited_targets(context, presentation):
+ """
+ Returns our target node types and group types if we have them or those of our parent, if we have
+ one (recursively).
+ """
+
+ parent = presentation._get_parent(context)
+
+ node_types, group_types = get_inherited_targets(context, parent) \
+ if parent is not None else ([], [])
+
+ our_targets = presentation.targets
+ if our_targets:
+ all_node_types = context.presentation.get('service_template', 'node_types') or {}
+ all_group_types = context.presentation.get('service_template', 'group_types') or {}
+ node_types = []
+ group_types = []
+
+ for our_target in our_targets:
+ if our_target in all_node_types:
+ our_target = convert_name_to_full_type_name(context, our_target, all_node_types)
+ node_types.append(all_node_types[our_target])
+ elif our_target in all_group_types:
+ our_target = convert_name_to_full_type_name(context, our_target, all_group_types)
+ group_types.append(all_group_types[our_target])
+
+ return node_types, group_types
+
+
+#
+# PolicyTemplate
+#
+
+def get_policy_targets(context, presentation):
+ """
+ Returns our target node templates and groups if we have them.
+ """
+
+ node_templates = []
+ groups = []
+
+ our_targets = presentation.targets
+ if our_targets:
+ all_node_templates = \
+ context.presentation.get('service_template', 'topology_template', 'node_templates') \
+ or {}
+ all_groups = \
+ context.presentation.get('service_template', 'topology_template', 'groups') \
+ or {}
+
+ for our_target in our_targets:
+ if our_target in all_node_templates:
+ node_templates.append(all_node_templates[our_target])
+ elif our_target in all_groups:
+ groups.append(all_groups[our_target])
+
+ return node_templates, groups
diff --git a/azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/simple_v1_0/modeling/requirements.py b/azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/simple_v1_0/modeling/requirements.py
new file mode 100644
index 0000000..6bdb5b1
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/simple_v1_0/modeling/requirements.py
@@ -0,0 +1,364 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from aria.parser.validation import Issue
+from aria.utils.collections import (deepcopy_with_locators, OrderedDict)
+
+from .parameters import (convert_parameter_definitions_to_values, validate_required_values,
+ coerce_parameter_value)
+from .interfaces import (convert_requirement_interface_definitions_from_type_to_raw_template,
+ merge_interface_definitions, merge_interface, validate_required_inputs)
+
+
+#
+# NodeType
+#
+
+def get_inherited_requirement_definitions(context, presentation):
+ """
+ Returns our requirement definitions added on top of those of our parent, if we have one
+ (recursively).
+
+ Allows overriding requirement definitions if they have the same name.
+ """
+
+ parent = presentation._get_parent(context)
+ requirement_definitions = get_inherited_requirement_definitions(context, parent) \
+ if parent is not None else []
+
+ our_requirement_definitions = presentation.requirements
+ if our_requirement_definitions:
+ for requirement_name, our_requirement_definition in our_requirement_definitions:
+ # Remove existing requirement definitions of this name if they exist
+ for name, requirement_definition in requirement_definitions:
+ if name == requirement_name:
+ requirement_definitions.remove((name, requirement_definition))
+
+ requirement_definitions.append((requirement_name, our_requirement_definition))
+
+ return requirement_definitions
+
+
+#
+# NodeTemplate
+#
+
+def get_template_requirements(context, presentation):
+ """
+ Returns our requirements added on top of those of the node type if they exist there.
+
+ If the requirement has a relationship, the relationship properties and interfaces are assigned.
+
+ Returns the assigned property, interface input, and interface operation input values while
+ making sure they are defined in our type. Default values, if available, will be used if we did
+ not assign them. Also makes sure that required properties and inputs indeed end up with a value.
+ """
+
+ requirement_assignments = []
+
+ the_type = presentation._get_type(context) # NodeType
+ requirement_definitions = the_type._get_requirements(context) if the_type is not None else None
+
+ # Add our requirement assignments
+ our_requirement_assignments = presentation.requirements
+ if our_requirement_assignments:
+ add_requirement_assignments(context, presentation, requirement_assignments,
+ requirement_definitions, our_requirement_assignments)
+
+ # Validate occurrences
+ if requirement_definitions:
+ for requirement_name, requirement_definition in requirement_definitions:
+ # Allowed occurrences
+ allowed_occurrences = requirement_definition.occurrences
+ allowed_occurrences = allowed_occurrences if allowed_occurrences is not None else None
+
+ # Count actual occurrences
+ actual_occurrences = 0
+ for name, _ in requirement_assignments:
+ if name == requirement_name:
+ actual_occurrences += 1
+
+ if allowed_occurrences is None:
+ # If not specified, we interpret this to mean that exactly 1 occurrence is required
+ if actual_occurrences == 0:
+ # If it's not there, we will automatically add it (this behavior is not in the
+ # TOSCA spec, but seems implied)
+ requirement_assignment, \
+ relationship_property_definitions, \
+ relationship_interface_definitions = \
+ convert_requirement_from_definition_to_assignment(context,
+ requirement_definition,
+ None, presentation)
+ validate_requirement_assignment(context, presentation, requirement_assignment,
+ relationship_property_definitions,
+ relationship_interface_definitions)
+ requirement_assignments.append((requirement_name, requirement_assignment))
+ elif actual_occurrences > 1:
+ context.validation.report(
+ 'requirement "%s" is allowed only one occurrence in "%s": %d'
+ % (requirement_name, presentation._fullname, actual_occurrences),
+ locator=presentation._locator, level=Issue.BETWEEN_TYPES)
+ else:
+ if not allowed_occurrences.is_in(actual_occurrences):
+ if allowed_occurrences.value[1] == 'UNBOUNDED':
+ context.validation.report(
+ 'requirement "%s" does not have at least %d occurrences in "%s": has %d'
+ % (requirement_name, allowed_occurrences.value[0],
+ presentation._fullname, actual_occurrences),
+ locator=presentation._locator, level=Issue.BETWEEN_TYPES)
+ else:
+ context.validation.report(
+ 'requirement "%s" is allowed between %d and %d occurrences in "%s":'
+ ' has %d'
+ % (requirement_name, allowed_occurrences.value[0],
+ allowed_occurrences.value[1], presentation._fullname,
+ actual_occurrences),
+ locator=presentation._locator, level=Issue.BETWEEN_TYPES)
+
+ return requirement_assignments
+
+
+#
+# Utils
+#
+
+def convert_requirement_from_definition_to_assignment(context, requirement_definition, # pylint: disable=too-many-branches
+ our_requirement_assignment, container):
+ from ..assignments import RequirementAssignment
+
+ raw = OrderedDict()
+
+ # Capability type name:
+ raw['capability'] = deepcopy_with_locators(requirement_definition.capability)
+
+ node_type = requirement_definition._get_node_type(context)
+ if node_type is not None:
+ raw['node'] = deepcopy_with_locators(node_type._name)
+
+ relationship_type = None
+ relationship_template = None
+ relationship_property_definitions = None
+ relationship_interface_definitions = None
+
+ # First try to find the relationship if we declared it
+ # RelationshipAssignment:
+ our_relationship = our_requirement_assignment.relationship \
+ if our_requirement_assignment is not None else None
+ if our_relationship is not None:
+ relationship_type, relationship_type_variant = our_relationship._get_type(context)
+ if relationship_type_variant == 'relationship_template':
+ relationship_template = relationship_type
+ relationship_type = relationship_template._get_type(context)
+
+ definition_relationship_type = None
+ relationship_definition = requirement_definition.relationship # RelationshipDefinition
+ if relationship_definition is not None:
+ definition_relationship_type = relationship_definition._get_type(context)
+
+ # If not exists, try at the node type
+ if relationship_type is None:
+ relationship_type = definition_relationship_type
+ else:
+ # Make sure the type is derived
+ if not definition_relationship_type._is_descendant(context, relationship_type):
+ context.validation.report(
+ 'assigned relationship type "%s" is not a descendant of declared relationship type'
+ ' "%s"' \
+ % (relationship_type._name, definition_relationship_type._name),
+ locator=container._locator, level=Issue.BETWEEN_TYPES)
+
+ if relationship_type is not None:
+ raw['relationship'] = OrderedDict()
+
+ type_name = our_relationship.type if our_relationship is not None else None
+ if type_name is None:
+ type_name = relationship_type._name
+
+ raw['relationship']['type'] = deepcopy_with_locators(type_name)
+
+ # These are our property definitions
+ relationship_property_definitions = relationship_type._get_properties(context)
+
+ if relationship_template is not None:
+ # Property values from template
+ raw['relationship']['properties'] = relationship_template._get_property_values(context)
+ else:
+ if relationship_property_definitions:
+ # Convert property definitions to values
+ raw['relationship']['properties'] = \
+ convert_parameter_definitions_to_values(context,
+ relationship_property_definitions)
+
+ # These are our interface definitions
+ # InterfaceDefinition:
+ relationship_interface_definitions = OrderedDict(relationship_type._get_interfaces(context))
+
+ # Convert interface definitions to templates
+ convert_requirement_interface_definitions_from_type_to_raw_template(
+ context,
+ raw['relationship'],
+ relationship_interface_definitions)
+
+ if relationship_definition:
+ # Merge extra interface definitions
+ # InterfaceDefinition:
+ definition_interface_definitions = relationship_definition.interfaces
+ merge_interface_definitions(context, relationship_interface_definitions,
+ definition_interface_definitions, requirement_definition,
+ container)
+
+ if relationship_template is not None:
+ # Interfaces from template
+ interfaces = relationship_template._get_interfaces(context)
+ if interfaces:
+ raw['relationship']['interfaces'] = OrderedDict()
+ for interface_name, interface in interfaces.iteritems():
+ raw['relationship']['interfaces'][interface_name] = interface._raw
+
+ return \
+ RequirementAssignment(name=requirement_definition._name, raw=raw, container=container), \
+ relationship_property_definitions, \
+ relationship_interface_definitions
+
+
+def add_requirement_assignments(context, presentation, requirement_assignments,
+ requirement_definitions, our_requirement_assignments):
+ for requirement_name, our_requirement_assignment in our_requirement_assignments:
+ requirement_definition = get_first_requirement(requirement_definitions, requirement_name)
+ if requirement_definition is not None:
+ requirement_assignment, \
+ relationship_property_definitions, \
+ relationship_interface_definitions = \
+ convert_requirement_from_definition_to_assignment(context, requirement_definition,
+ our_requirement_assignment,
+ presentation)
+ merge_requirement_assignment(context,
+ relationship_property_definitions,
+ relationship_interface_definitions,
+ requirement_assignment, our_requirement_assignment)
+ validate_requirement_assignment(context,
+ our_requirement_assignment.relationship \
+ or our_requirement_assignment,
+ requirement_assignment,
+ relationship_property_definitions,
+ relationship_interface_definitions)
+ requirement_assignments.append((requirement_name, requirement_assignment))
+ else:
+ context.validation.report('requirement "%s" not declared at node type "%s" in "%s"'
+ % (requirement_name, presentation.type,
+ presentation._fullname),
+ locator=our_requirement_assignment._locator,
+ level=Issue.BETWEEN_TYPES)
+
+
+def merge_requirement_assignment(context, relationship_property_definitions,
+ relationship_interface_definitions, requirement, our_requirement):
+ our_capability = our_requirement.capability
+ if our_capability is not None:
+ requirement._raw['capability'] = deepcopy_with_locators(our_capability)
+
+ our_node = our_requirement.node
+ if our_node is not None:
+ requirement._raw['node'] = deepcopy_with_locators(our_node)
+
+ our_node_filter = our_requirement.node_filter
+ if our_node_filter is not None:
+ requirement._raw['node_filter'] = deepcopy_with_locators(our_node_filter._raw)
+
+ our_relationship = our_requirement.relationship # RelationshipAssignment
+ if (our_relationship is not None) and (our_relationship.type is None):
+ # Make sure we have a dict
+ if 'relationship' not in requirement._raw:
+ requirement._raw['relationship'] = OrderedDict()
+
+ merge_requirement_assignment_relationship(context, our_relationship,
+ relationship_property_definitions,
+ relationship_interface_definitions,
+ requirement, our_relationship)
+
+
+def merge_requirement_assignment_relationship(context, presentation, property_definitions,
+ interface_definitions, requirement, our_relationship):
+ our_relationship_properties = our_relationship._raw.get('properties')
+ if our_relationship_properties:
+ # Make sure we have a dict
+ if 'properties' not in requirement._raw['relationship']:
+ requirement._raw['relationship']['properties'] = OrderedDict()
+
+ # Merge our properties
+ for property_name, prop in our_relationship_properties.iteritems():
+ if property_name in property_definitions:
+ definition = property_definitions[property_name]
+ requirement._raw['relationship']['properties'][property_name] = \
+ coerce_parameter_value(context, presentation, definition, prop)
+ else:
+ context.validation.report(
+ 'relationship property "%s" not declared at definition of requirement "%s"'
+ ' in "%s"'
+ % (property_name, requirement._fullname,
+ presentation._container._container._fullname),
+ locator=our_relationship._get_child_locator('properties', property_name),
+ level=Issue.BETWEEN_TYPES)
+
+ our_interfaces = our_relationship.interfaces
+ if our_interfaces:
+ # Make sure we have a dict
+ if 'interfaces' not in requirement._raw['relationship']:
+ requirement._raw['relationship']['interfaces'] = OrderedDict()
+
+ # Merge interfaces
+ for interface_name, our_interface in our_interfaces.iteritems():
+ if interface_name not in requirement._raw['relationship']['interfaces']:
+ requirement._raw['relationship']['interfaces'][interface_name] = OrderedDict()
+
+ if (interface_definitions is not None) and (interface_name in interface_definitions):
+ interface_definition = interface_definitions[interface_name]
+ interface_assignment = requirement.relationship.interfaces[interface_name]
+ merge_interface(context, presentation, interface_assignment, our_interface,
+ interface_definition, interface_name)
+ else:
+ context.validation.report(
+ 'relationship interface "%s" not declared at definition of requirement "%s"'
+ ' in "%s"'
+ % (interface_name, requirement._fullname,
+ presentation._container._container._fullname),
+ locator=our_relationship._locator, level=Issue.BETWEEN_TYPES)
+
+
+def validate_requirement_assignment(context, presentation, requirement_assignment,
+ relationship_property_definitions,
+ relationship_interface_definitions):
+ relationship = requirement_assignment.relationship
+ if relationship is None:
+ return
+
+ validate_required_values(context, presentation, relationship.properties,
+ relationship_property_definitions)
+
+ if relationship_interface_definitions:
+ for interface_name, relationship_interface_definition \
+ in relationship_interface_definitions.iteritems():
+ interface_assignment = relationship.interfaces.get(interface_name) \
+ if relationship.interfaces is not None else None
+ validate_required_inputs(context, presentation, interface_assignment,
+ relationship_interface_definition, None, interface_name)
+
+
+def get_first_requirement(requirement_definitions, name):
+ if requirement_definitions is not None:
+ for requirement_name, requirement_definition in requirement_definitions:
+ if requirement_name == name:
+ return requirement_definition
+ return None
diff --git a/azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/simple_v1_0/modeling/substitution_mappings.py b/azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/simple_v1_0/modeling/substitution_mappings.py
new file mode 100644
index 0000000..e2af4b8
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/simple_v1_0/modeling/substitution_mappings.py
@@ -0,0 +1,167 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from aria.utils.formatting import safe_repr
+from aria.parser.validation import Issue
+
+
+def validate_substitution_mappings_requirement(context, presentation):
+
+ # validate that the requirement in substitution_mapping is defined in the substitution node type
+ substitution_node_type = presentation._container._get_type(context)
+ if substitution_node_type is None:
+ return
+ for req_name, req in substitution_node_type._get_requirements(context):
+ if req_name == presentation._name:
+ substitution_type_requirement = req
+ break
+ else:
+ context.validation.report(
+ 'substitution mapping requirement "{0}" is not declared in node type "{1}"'.format(
+ presentation._name, substitution_node_type._name),
+ locator=presentation._locator, level=Issue.BETWEEN_TYPES)
+ return
+
+ if not _validate_mapping_format(presentation):
+ _report_invalid_mapping_format(context, presentation, field='requirement')
+ return
+
+ # validate that the mapped requirement is defined in the corresponding node template
+ node_template = _get_node_template(context, presentation)
+ if node_template is None:
+ _report_missing_node_template(context, presentation, field='requirement')
+ return
+ mapped_requirement_name = presentation._raw[1]
+ for req_name, req in node_template._get_requirements(context):
+ if req_name == mapped_requirement_name:
+ node_template_requirement = req
+ break
+ else:
+ context.validation.report(
+ 'substitution mapping requirement "{0}" refers to an unknown requirement of node '
+ 'template "{1}": {mapped_requirement_name}'.format(
+ presentation._name, node_template._name,
+ mapped_requirement_name=safe_repr(mapped_requirement_name)),
+ locator=presentation._locator, level=Issue.BETWEEN_TYPES)
+ return
+
+ # validate that the requirement's capability type in substitution_mapping is derived from the
+ # requirement's capability type in the corresponding node template
+ substitution_type_requirement_capability_type = \
+ substitution_type_requirement._get_capability_type(context)
+ node_template_requirement_capability_type = \
+ node_template_requirement._get_capability(context)[0]
+ if not node_template_requirement_capability_type._is_descendant(
+ context, substitution_type_requirement_capability_type):
+ context.validation.report(
+ 'substitution mapping requirement "{0}" of capability type "{1}" is not a descendant '
+ 'of the mapped node template capability type "{2}"'.format(
+ presentation._name,
+ substitution_type_requirement_capability_type._name,
+ node_template_requirement_capability_type._name),
+ locator=presentation._locator, level=Issue.BETWEEN_TYPES)
+
+
+def validate_substitution_mappings_capability(context, presentation):
+
+ # validate that the capability in substitution_mapping is defined in the substitution node type
+ substitution_node_type = presentation._container._get_type(context)
+ if substitution_node_type is None:
+ return
+ substitution_type_capabilities = substitution_node_type._get_capabilities(context)
+ substitution_type_capability = substitution_type_capabilities.get(presentation._name)
+ if substitution_type_capability is None:
+ context.validation.report(
+ 'substitution mapping capability "{0}" '
+ 'is not declared in node type "{substitution_type}"'.format(
+ presentation._name, substitution_type=substitution_node_type._name),
+ locator=presentation._locator, level=Issue.BETWEEN_TYPES)
+ return
+
+ if not _validate_mapping_format(presentation):
+ _report_invalid_mapping_format(context, presentation, field='capability')
+ return
+
+ # validate that the capability in substitution_mapping is declared in the corresponding
+ # node template
+ node_template = _get_node_template(context, presentation)
+ if node_template is None:
+ _report_missing_node_template(context, presentation, field='capability')
+ return
+ mapped_capability_name = presentation._raw[1]
+ node_template_capability = node_template._get_capabilities(context).get(mapped_capability_name)
+
+ if node_template_capability is None:
+ context.validation.report(
+ 'substitution mapping capability "{0}" refers to an unknown '
+ 'capability of node template "{1}": {mapped_capability_name}'.format(
+ presentation._name, node_template._name,
+ mapped_capability_name=safe_repr(mapped_capability_name)),
+ locator=presentation._locator, level=Issue.BETWEEN_TYPES)
+ return
+
+ # validate that the capability type in substitution_mapping is derived from the capability type
+ # in the corresponding node template
+ substitution_type_capability_type = substitution_type_capability._get_type(context)
+ node_template_capability_type = node_template_capability._get_type(context)
+
+ if not substitution_type_capability_type._is_descendant(context, node_template_capability_type):
+ context.validation.report(
+ 'node template capability type "{0}" is not a descendant of substitution mapping '
+ 'capability "{1}" of type "{2}"'.format(
+ node_template_capability_type._name,
+ presentation._name,
+ substitution_type_capability_type._name),
+ locator=presentation._locator, level=Issue.BETWEEN_TYPES)
+
+
+#
+# Utils
+#
+
+def _validate_mapping_format(presentation):
+ """Validate that the mapping is a list of 2 strings"""
+ if not isinstance(presentation._raw, list) or \
+ len(presentation._raw) != 2 or \
+ not isinstance(presentation._raw[0], basestring) or \
+ not isinstance(presentation._raw[1], basestring):
+ return False
+ return True
+
+
+def _get_node_template(context, presentation):
+ node_template_name = presentation._raw[0]
+ node_template = context.presentation.get_from_dict('service_template', 'topology_template',
+ 'node_templates', node_template_name)
+ return node_template
+
+
+def _report_missing_node_template(context, presentation, field):
+ context.validation.report(
+ 'substitution mappings {field} "{node_template_mapping}" '
+ 'refers to an unknown node template: {node_template_name}'.format(
+ field=field,
+ node_template_mapping=presentation._name,
+ node_template_name=safe_repr(presentation._raw[0])),
+ locator=presentation._locator, level=Issue.FIELD)
+
+
+def _report_invalid_mapping_format(context, presentation, field):
+ context.validation.report(
+ 'substitution mapping {field} "{field_name}" is not a list of 2 strings: {value}'.format(
+ field=field,
+ field_name=presentation._name,
+ value=safe_repr(presentation._raw)),
+ locator=presentation._locator, level=Issue.FIELD)