summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--azure/aria/aria-extension-cloudify/.DS_Storebin0 -> 8196 bytes
-rw-r--r--azure/aria/aria-extension-cloudify/.flake82
-rw-r--r--azure/aria/aria-extension-cloudify/.gitignore64
-rw-r--r--azure/aria/aria-extension-cloudify/.travis.yml54
-rw-r--r--azure/aria/aria-extension-cloudify/LICENCE201
-rw-r--r--azure/aria/aria-extension-cloudify/README.md47
-rw-r--r--azure/aria/aria-extension-cloudify/adapters/__init__.py15
-rw-r--r--azure/aria/aria-extension-cloudify/adapters/context_adapter.py461
-rw-r--r--azure/aria/aria-extension-cloudify/adapters/extension.py92
-rw-r--r--azure/aria/aria-extension-cloudify/appveyor.yml40
-rw-r--r--azure/aria/aria-extension-cloudify/aria_extension_tests/__init__.py15
-rw-r--r--azure/aria/aria-extension-cloudify/aria_extension_tests/adapters/__init__.py15
-rw-r--r--azure/aria/aria-extension-cloudify/aria_extension_tests/adapters/test_context_adapter.py541
-rw-r--r--azure/aria/aria-extension-cloudify/aria_extension_tests/requirements.txt9
-rw-r--r--azure/aria/aria-extension-cloudify/examples/aws-hello-world/aws-helloworld.yaml101
-rw-r--r--azure/aria/aria-extension-cloudify/examples/aws-hello-world/images/aria-logo.pngbin0 -> 23601 bytes
-rw-r--r--azure/aria/aria-extension-cloudify/examples/aws-hello-world/index.html14
-rw-r--r--azure/aria/aria-extension-cloudify/examples/aws-hello-world/scripts/configure.sh22
-rw-r--r--azure/aria/aria-extension-cloudify/examples/aws-hello-world/scripts/start.sh50
-rw-r--r--azure/aria/aria-extension-cloudify/examples/aws-hello-world/scripts/stop.sh14
-rw-r--r--azure/aria/aria-extension-cloudify/examples/openstack-hello-world/images/aria-logo.pngbin0 -> 23601 bytes
-rw-r--r--azure/aria/aria-extension-cloudify/examples/openstack-hello-world/index.html14
-rw-r--r--azure/aria/aria-extension-cloudify/examples/openstack-hello-world/openstack-helloworld.yaml144
-rw-r--r--azure/aria/aria-extension-cloudify/examples/openstack-hello-world/scripts/configure.sh20
-rw-r--r--azure/aria/aria-extension-cloudify/examples/openstack-hello-world/scripts/start.sh50
-rw-r--r--azure/aria/aria-extension-cloudify/examples/openstack-hello-world/scripts/stop.sh14
-rw-r--r--azure/aria/aria-extension-cloudify/plugins/aws/plugin.yaml1754
-rw-r--r--azure/aria/aria-extension-cloudify/plugins/openstack/plugin.yaml1174
-rw-r--r--azure/aria/aria-extension-cloudify/requirements.txt1
-rw-r--r--azure/aria/aria-extension-cloudify/setup.py44
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/.gitignore64
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/.rat-excludes24
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/.travis.yml64
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/=35.0.00
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/=35.0.0,0
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/CHANGELOG.rst16
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/CONTRIBUTING3
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/DISCLAIMER10
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/LICENSE191
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/MANIFEST.in12
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/Makefile63
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/NOTICE5
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/README.rst167
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/VERSION1
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/appveyor.yml41
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/.pylintrc423
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/__init__.py89
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/cli/__init__.py18
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/cli/ascii_art.py24
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/cli/color.py108
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/cli/commands/__init__.py30
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/cli/commands/executions.py246
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/cli/commands/logs.py72
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/cli/commands/node_templates.py100
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/cli/commands/nodes.py94
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/cli/commands/plugins.py111
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/cli/commands/reset.py45
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/cli/commands/service_templates.py244
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/cli/commands/services.py238
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/cli/commands/workflows.py111
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/cli/config/__init__.py18
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/cli/config/config.py93
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/cli/config/config_template.yaml42
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/cli/core/__init__.py18
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/cli/core/aria.py507
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/cli/csar.py187
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/cli/defaults.py30
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/cli/env.py127
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/cli/exceptions.py24
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/cli/execution_logging.py243
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/cli/helptexts.py62
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/cli/inputs.py124
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/cli/logger.py134
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/cli/main.py65
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/cli/service_template_utils.py129
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/cli/table.py125
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/cli/utils.py117
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/core.py133
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/exceptions.py73
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/extension.py154
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/logger.py186
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/modeling/__init__.py54
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/modeling/constraints.py31
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/modeling/exceptions.py63
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/modeling/functions.py140
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/modeling/mixins.py333
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/modeling/models.py427
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/modeling/orchestration.py715
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/modeling/relationship.py395
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/modeling/service_changes.py253
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/modeling/service_common.py601
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/modeling/service_instance.py1695
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/modeling/service_template.py1758
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/modeling/types.py318
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/modeling/utils.py185
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/__init__.py32
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/context/__init__.py21
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/context/common.py217
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/context/exceptions.py27
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/context/operation.py174
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/context/toolbelt.py59
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/context/workflow.py135
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/decorators.py85
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/events.py34
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/exceptions.py85
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/execution_plugin/__init__.py39
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/execution_plugin/common.py154
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/execution_plugin/constants.py57
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/execution_plugin/ctx_proxy/__init__.py20
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/execution_plugin/ctx_proxy/client.py114
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/execution_plugin/ctx_proxy/server.py244
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/execution_plugin/environment_globals.py57
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/execution_plugin/exceptions.py47
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/execution_plugin/instantiation.py217
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/execution_plugin/local.py128
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/execution_plugin/operations.py75
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/execution_plugin/ssh/__init__.py18
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/execution_plugin/ssh/operations.py195
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/execution_plugin/ssh/tunnel.py107
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/plugin.py171
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/topology/__init__.py16
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/topology/common.py69
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/topology/instance_handler.py671
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/topology/template_handler.py609
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/topology/topology.py223
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/topology/utils.py48
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/workflow_runner.py194
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/workflows/__init__.py21
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/workflows/api/__init__.py20
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/workflows/api/task.py272
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/workflows/api/task_graph.py295
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/workflows/builtin/__init__.py36
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/workflows/builtin/execute_operation.py101
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/workflows/builtin/heal.py179
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/workflows/builtin/install.py34
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/workflows/builtin/start.py31
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/workflows/builtin/stop.py31
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/workflows/builtin/uninstall.py34
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/workflows/builtin/workflows.py149
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/workflows/core/__init__.py20
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/workflows/core/engine.py185
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/workflows/core/events_handler.py170
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/workflows/core/graph_compiler.py118
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/workflows/events_logging.py85
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/workflows/exceptions.py91
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/workflows/executor/__init__.py22
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/workflows/executor/base.py75
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/workflows/executor/celery.py97
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/workflows/executor/dry.py54
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/workflows/executor/process.py350
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/workflows/executor/thread.py79
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/parser/__init__.py34
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/parser/consumption/__init__.py84
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/parser/consumption/consumer.py93
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/parser/consumption/context.py106
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/parser/consumption/exceptions.py23
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/parser/consumption/inputs.py53
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/parser/consumption/modeling.py198
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/parser/consumption/presentation.py137
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/parser/consumption/validation.py30
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/parser/exceptions.py33
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/parser/loading/__init__.py80
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/parser/loading/context.py33
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/parser/loading/exceptions.py35
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/parser/loading/file.py64
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/parser/loading/literal.py31
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/parser/loading/loader.py34
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/parser/loading/location.py82
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/parser/loading/request.py88
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/parser/loading/source.py44
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/parser/loading/uri.py97
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/parser/modeling/__init__.py26
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/parser/modeling/context.py107
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/parser/presentation/__init__.py158
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/parser/presentation/context.py65
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/parser/presentation/exceptions.py29
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/parser/presentation/field_validators.py164
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/parser/presentation/fields.py757
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/parser/presentation/null.py67
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/parser/presentation/presentation.py248
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/parser/presentation/presenter.py70
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/parser/presentation/source.py55
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/parser/presentation/utils.py187
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/parser/reading/__init__.py60
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/parser/reading/context.py31
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/parser/reading/exceptions.py44
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/parser/reading/jinja.py55
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/parser/reading/json.py33
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/parser/reading/locator.py119
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/parser/reading/raw.py24
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/parser/reading/reader.py44
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/parser/reading/source.py59
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/parser/reading/yaml.py113
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/parser/specification.py69
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/parser/validation/__init__.py25
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/parser/validation/context.py36
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/parser/validation/issue.py190
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/storage/__init__.py41
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/storage/api.py186
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/storage/collection_instrumentation.py314
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/storage/core.py160
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/storage/exceptions.py31
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/storage/filesystem_rapi.py165
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/storage/sql_mapi.py439
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/utils/__init__.py65
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/utils/archive.py66
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/utils/argparse.py118
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/utils/caching.py137
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/utils/collections.py303
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/utils/console.py132
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/utils/exceptions.py120
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/utils/file.py46
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/utils/formatting.py235
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/utils/http.py66
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/utils/imports.py96
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/utils/openclose.py36
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/utils/plugin.py24
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/utils/process.py51
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/utils/specification.py57
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/utils/threading.py286
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/utils/type.py156
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/utils/uris.py48
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/utils/uuid.py70
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/utils/validation.py97
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/aria/utils/versions.py163
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/docs/.gitignore1
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/docs/_static/.gitkeep0
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/docs/aria.cli.rst100
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/docs/aria.modeling.models.rst21
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/docs/aria.modeling.rst56
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/docs/aria.orchestrator.context.rst46
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/docs/aria.orchestrator.execution_plugin.ctx_proxy.rst31
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/docs/aria.orchestrator.execution_plugin.rst56
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/docs/aria.orchestrator.execution_plugin.ssh.rst31
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/docs/aria.orchestrator.rst46
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/docs/aria.orchestrator.workflows.api.rst31
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/docs/aria.orchestrator.workflows.builtin.rst57
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/docs/aria.orchestrator.workflows.executor.rst46
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/docs/aria.orchestrator.workflows.rst51
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/docs/aria.parser.consumption.rst21
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/docs/aria.parser.loading.rst21
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/docs/aria.parser.modeling.rst21
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/docs/aria.parser.presentation.rst21
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/docs/aria.parser.reading.rst21
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/docs/aria.parser.rst31
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/docs/aria.parser.validation.rst21
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/docs/aria.rst40
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/docs/aria.storage.rst51
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/docs/aria.utils.rst121
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/docs/aria_extension_tosca.simple_nfv_v1_0.rst20
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/docs/aria_extension_tosca.simple_v1_0.modeling.rst75
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/docs/aria_extension_tosca.simple_v1_0.presentation.rst40
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/docs/aria_extension_tosca.simple_v1_0.rst20
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/docs/cli.rst57
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/docs/conf.py441
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/docs/index.rst86
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/docs/requirements.txt15
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/docs/rest.rst20
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/examples/clearwater/clearwater-live-test-existing.yaml54
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/examples/clearwater/clearwater-single-existing.yaml147
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/examples/clearwater/scripts/bono/create.sh20
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/examples/clearwater/scripts/bono/delete.sh17
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/examples/clearwater/scripts/dime/create.sh21
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/examples/clearwater/scripts/dime/delete.sh17
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/examples/clearwater/scripts/ellis/configure.sh29
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/examples/clearwater/scripts/ellis/create.sh19
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/examples/clearwater/scripts/ellis/delete.sh17
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/examples/clearwater/scripts/homer/create.sh27
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/examples/clearwater/scripts/homer/delete.sh17
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/examples/clearwater/scripts/homestead/create.sh25
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/examples/clearwater/scripts/homestead/delete.sh17
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/examples/clearwater/scripts/host-base/configure.sh23
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/examples/clearwater/scripts/host/configure.sh183
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/examples/clearwater/scripts/live-test/create.sh69
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/examples/clearwater/scripts/live-test/delete.sh23
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/examples/clearwater/scripts/memento/create.sh20
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/examples/clearwater/scripts/memento/delete.sh17
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/examples/clearwater/scripts/ralf/create.sh15
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/examples/clearwater/scripts/ralf/delete.sh17
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/examples/clearwater/scripts/sprout/create.sh19
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/examples/clearwater/scripts/sprout/delete.sh17
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/examples/clearwater/scripts/vellum/create.sh23
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/examples/clearwater/scripts/vellum/delete.sh17
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/examples/clearwater/types/cassandra.yaml30
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/examples/clearwater/types/clearwater.yaml728
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/examples/clearwater/types/ims.yaml446
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/examples/clearwater/types/smtp.yaml35
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/examples/hello-world/hello-world.yaml38
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/examples/hello-world/images/aria-logo.pngbin0 -> 23601 bytes
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/examples/hello-world/index.html14
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/examples/hello-world/scripts/configure.sh36
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/examples/hello-world/scripts/start.sh64
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/examples/hello-world/scripts/stop.sh28
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/examples/tosca-simple-1.0/use-cases/block-storage-1/block-storage-1.yaml68
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/examples/tosca-simple-1.0/use-cases/block-storage-1/inputs.yaml3
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/examples/tosca-simple-1.0/use-cases/block-storage-2/block-storage-2.yaml75
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/examples/tosca-simple-1.0/use-cases/block-storage-2/inputs.yaml3
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/examples/tosca-simple-1.0/use-cases/block-storage-3/block-storage-3.yaml68
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/examples/tosca-simple-1.0/use-cases/block-storage-3/inputs.yaml2
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/examples/tosca-simple-1.0/use-cases/block-storage-4/block-storage-4.yaml96
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/examples/tosca-simple-1.0/use-cases/block-storage-4/inputs.yaml2
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/examples/tosca-simple-1.0/use-cases/block-storage-5/block-storage-5.yaml109
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/examples/tosca-simple-1.0/use-cases/block-storage-5/inputs.yaml3
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/examples/tosca-simple-1.0/use-cases/block-storage-6/block-storage-6.yaml102
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/examples/tosca-simple-1.0/use-cases/block-storage-6/inputs.yaml3
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/examples/tosca-simple-1.0/use-cases/compute-1/compute-1.yaml42
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/examples/tosca-simple-1.0/use-cases/compute-1/inputs.yaml1
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/examples/tosca-simple-1.0/use-cases/container-1/container-1.yaml68
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/examples/tosca-simple-1.0/use-cases/multi-tier-1/custom_types/collectd.yaml10
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/examples/tosca-simple-1.0/use-cases/multi-tier-1/custom_types/elasticsearch.yaml8
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/examples/tosca-simple-1.0/use-cases/multi-tier-1/custom_types/kibana.yaml12
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/examples/tosca-simple-1.0/use-cases/multi-tier-1/custom_types/logstash.yaml12
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/examples/tosca-simple-1.0/use-cases/multi-tier-1/custom_types/rsyslog.yaml10
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/examples/tosca-simple-1.0/use-cases/multi-tier-1/inputs.yaml1
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/examples/tosca-simple-1.0/use-cases/multi-tier-1/multi-tier-1.yaml237
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/examples/tosca-simple-1.0/use-cases/network-1/inputs.yaml1
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/examples/tosca-simple-1.0/use-cases/network-1/network-1.yaml49
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/examples/tosca-simple-1.0/use-cases/network-2/inputs.yaml1
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/examples/tosca-simple-1.0/use-cases/network-2/network-2.yaml46
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/examples/tosca-simple-1.0/use-cases/network-3/inputs.yaml1
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/examples/tosca-simple-1.0/use-cases/network-3/network-3.yaml81
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/examples/tosca-simple-1.0/use-cases/network-4/network-4.yaml70
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/examples/tosca-simple-1.0/use-cases/non-normative-types.yaml177
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/examples/tosca-simple-1.0/use-cases/object-storage-1/inputs.yaml1
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/examples/tosca-simple-1.0/use-cases/object-storage-1/object-storage-1.yaml24
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/examples/tosca-simple-1.0/use-cases/software-component-1/inputs.yaml1
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/examples/tosca-simple-1.0/use-cases/software-component-1/software-component-1.yaml54
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/examples/tosca-simple-1.0/use-cases/webserver-dbms-1/webserver-dbms-1.yaml122
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/examples/tosca-simple-1.0/use-cases/webserver-dbms-2/custom_types/paypalpizzastore_nodejs_app.yaml15
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/examples/tosca-simple-1.0/use-cases/webserver-dbms-2/webserver-dbms-2.yaml115
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/__init__.py56
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/profiles/aria-1.0/aria-1.0.yaml97
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/profiles/azure-plugin/azureplugin.yaml1981
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/profiles/tosca-simple-1.0/artifacts.yaml121
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/profiles/tosca-simple-1.0/capabilities.yaml322
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/profiles/tosca-simple-1.0/data.yaml268
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/profiles/tosca-simple-1.0/groups.yaml28
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/profiles/tosca-simple-1.0/interfaces.yaml107
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/profiles/tosca-simple-1.0/nodes.yaml525
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/profiles/tosca-simple-1.0/policies.yaml71
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/profiles/tosca-simple-1.0/relationships.yaml158
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/profiles/tosca-simple-1.0/tosca-simple-1.0.yaml24
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/profiles/tosca-simple-nfv-1.0/artifacts.yaml84
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/profiles/tosca-simple-nfv-1.0/capabilities.yaml70
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/profiles/tosca-simple-nfv-1.0/data.yaml318
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/profiles/tosca-simple-nfv-1.0/nodes.yaml260
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/profiles/tosca-simple-nfv-1.0/relationships.yaml43
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/profiles/tosca-simple-nfv-1.0/tosca-simple-nfv-1.0.yaml21
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/simple_nfv_v1_0/__init__.py19
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/simple_nfv_v1_0/presenter.py43
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/simple_v1_0/__init__.py199
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/simple_v1_0/assignments.py453
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/simple_v1_0/data_types.py561
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/simple_v1_0/definitions.py518
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/simple_v1_0/filters.py107
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/simple_v1_0/misc.py444
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/simple_v1_0/modeling/__init__.py750
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/simple_v1_0/modeling/artifacts.py44
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/simple_v1_0/modeling/capabilities.py220
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/simple_v1_0/modeling/constraints.py144
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/simple_v1_0/modeling/copy.py32
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/simple_v1_0/modeling/data_types.py514
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/simple_v1_0/modeling/functions.py681
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/simple_v1_0/modeling/interfaces.py530
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/simple_v1_0/modeling/parameters.py230
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/simple_v1_0/modeling/policies.py79
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/simple_v1_0/modeling/requirements.py364
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/simple_v1_0/modeling/substitution_mappings.py167
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/simple_v1_0/presentation/__init__.py14
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/simple_v1_0/presentation/extensible.py33
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/simple_v1_0/presentation/field_getters.py37
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/simple_v1_0/presentation/field_validators.py588
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/simple_v1_0/presentation/types.py63
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/simple_v1_0/presenter.py83
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/simple_v1_0/templates.py736
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/simple_v1_0/types.py892
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/release/asf-release.sh283
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/requirements.in41
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/requirements.txt43
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/setup.py174
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/test_ssh.py528
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/tests/.pylintrc422
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/tests/__init__.py20
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/tests/cli/__init__.py14
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/tests/cli/base_test.py77
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/tests/cli/runner.py27
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/tests/cli/test_node_templates.py133
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/tests/cli/test_nodes.py101
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/tests/cli/test_service_templates.py273
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/tests/cli/test_services.py227
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/tests/cli/utils.py101
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/tests/conftest.py47
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/tests/end2end/__init__.py14
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/tests/end2end/test_hello_world.py61
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/tests/end2end/test_nodecellar.py42
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/tests/end2end/testenv.py102
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/tests/fixtures.py70
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/tests/helpers.py82
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/tests/instantiation/__init__.py14
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/tests/instantiation/test_configuration.py172
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/tests/mock/__init__.py16
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/tests/mock/context.py57
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/tests/mock/models.py358
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/tests/mock/operations.py59
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/tests/mock/topology.py96
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/tests/mock/workflow.py26
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/tests/modeling/__init__.py34
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/tests/modeling/test_mixins.py215
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/tests/modeling/test_models.py872
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/tests/orchestrator/__init__.py14
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/tests/orchestrator/context/__init__.py32
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/tests/orchestrator/context/test_context_instrumentation.py108
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/tests/orchestrator/context/test_operation.py498
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/tests/orchestrator/context/test_resource_render.py72
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/tests/orchestrator/context/test_serialize.py104
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/tests/orchestrator/context/test_toolbelt.py164
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/tests/orchestrator/context/test_workflow.py126
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/tests/orchestrator/execution_plugin/__init__.py14
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/tests/orchestrator/execution_plugin/test_common.py193
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/tests/orchestrator/execution_plugin/test_ctx_proxy_server.py285
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/tests/orchestrator/execution_plugin/test_global_ctx.py28
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/tests/orchestrator/execution_plugin/test_local.py598
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/tests/orchestrator/execution_plugin/test_ssh.py523
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/tests/orchestrator/test_workflow_runner.py726
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/tests/orchestrator/workflows/__init__.py16
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/tests/orchestrator/workflows/api/__init__.py14
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/tests/orchestrator/workflows/api/test_task.py223
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/tests/orchestrator/workflows/api/test_task_graph.py745
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/tests/orchestrator/workflows/builtin/__init__.py70
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/tests/orchestrator/workflows/builtin/test_execute_operation.py64
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/tests/orchestrator/workflows/builtin/test_heal.py100
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/tests/orchestrator/workflows/builtin/test_install.py46
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/tests/orchestrator/workflows/builtin/test_uninstall.py47
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/tests/orchestrator/workflows/core/__init__.py14
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/tests/orchestrator/workflows/core/test_engine.py564
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/tests/orchestrator/workflows/core/test_events.py171
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/tests/orchestrator/workflows/core/test_task.py153
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/tests/orchestrator/workflows/core/test_task_graph_into_execution_graph.py172
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/tests/orchestrator/workflows/executor/__init__.py98
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/tests/orchestrator/workflows/executor/test_executor.py149
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/tests/orchestrator/workflows/executor/test_process_executor.py172
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/tests/orchestrator/workflows/executor/test_process_executor_concurrent_modifications.py167
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/tests/orchestrator/workflows/executor/test_process_executor_extension.py99
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/tests/orchestrator/workflows/executor/test_process_executor_tracked_changes.py168
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/tests/orchestrator/workflows/helpers.py37
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/tests/parser/__init__.py14
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/tests/parser/service_templates.py86
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/tests/parser/test_reqs_caps.py29
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/tests/parser/test_tosca_simple_v1_0/__init__.py14
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/tests/parser/test_tosca_simple_v1_0/presentation/__init__.py0
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/tests/parser/test_tosca_simple_v1_0/presentation/test_types.py23
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/tests/parser/test_tosca_simple_v1_0/test_end2end.py112
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/tests/parser/utils.py67
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/tests/requirements.txt22
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/tests/resources/__init__.py19
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/tests/resources/plugins/mock-plugin1/mock_plugin1.py27
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/tests/resources/plugins/mock-plugin1/setup.py28
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/tests/resources/scripts/test_ssh.sh96
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/tests/resources/service-templates/tosca-simple-1.0/node-cellar/inputs.yaml3
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/tests/resources/service-templates/tosca-simple-1.0/node-cellar/node-cellar.yaml357
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/tests/resources/service-templates/tosca-simple-1.0/node-cellar/types/mongodb.yaml72
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/tests/resources/service-templates/tosca-simple-1.0/node-cellar/types/nginx.yaml29
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/tests/resources/service-templates/tosca-simple-1.0/node-cellar/types/nodejs.yaml69
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/tests/resources/service-templates/tosca-simple-1.0/node-cellar/types/openstack.yaml201
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/tests/resources/service-templates/tosca-simple-1.0/node-cellar/types/os.yaml74
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/tests/resources/service-templates/tosca-simple-1.0/node-cellar/workflows.py40
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/tests/resources/service-templates/tosca-simple-1.0/reqs_caps/reqs_caps1.yaml40
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/tests/resources/service-templates/tosca-simple-1.0/types/shorthand-1/shorthand-1.yaml23
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/tests/resources/service-templates/tosca-simple-1.0/types/typequalified-1/typequalified-1.yaml23
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/tests/storage/__init__.py53
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/tests/storage/test_collection_instrumentation.py257
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/tests/storage/test_model_storage.py213
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/tests/storage/test_resource_storage.py280
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/tests/test_extension.py156
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/tests/test_logger.py129
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/tests/utils/__init__.py14
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/tests/utils/test_exceptions.py73
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/tests/utils/test_plugin.py58
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/tests/utils/test_threading.py33
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/tests/utils/test_validation.py35
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/tests/utils/test_versions.py85
-rw-r--r--azure/aria/aria-extension-cloudify/src/aria/tox.ini.bkp116
-rw-r--r--azure/aria/aria-extension-cloudify/tox.ini.bkp56
-rw-r--r--azure/assembly.xml1
-rw-r--r--azure/docker/Dockerfile17
-rw-r--r--azure/docker/build_image.sh4
-rw-r--r--azure/docker/cloudify_azure_plugin-1.4.2-py27-none-linux_x86_64.wgnbin0 -> 4357947 bytes
-rw-r--r--azure/multicloud_azure/pub/aria/__init__.py11
-rw-r--r--azure/multicloud_azure/pub/aria/service.py159
-rw-r--r--azure/multicloud_azure/pub/aria/util.py40
-rw-r--r--azure/multicloud_azure/settings.py3
-rw-r--r--azure/multicloud_azure/swagger/urls.py14
-rw-r--r--azure/multicloud_azure/swagger/views/infra_workload/__init__.py11
-rw-r--r--azure/multicloud_azure/swagger/views/infra_workload/views.py82
-rw-r--r--azure/multicloud_azure/tests/test_aria_view.py171
-rw-r--r--azure/requirements.txt9
-rw-r--r--azure/tox.ini7
-rw-r--r--pom.xml2
498 files changed, 65876 insertions, 11 deletions
diff --git a/azure/aria/aria-extension-cloudify/.DS_Store b/azure/aria/aria-extension-cloudify/.DS_Store
new file mode 100644
index 0000000..d3c7e50
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/.DS_Store
Binary files differ
diff --git a/azure/aria/aria-extension-cloudify/.flake8 b/azure/aria/aria-extension-cloudify/.flake8
new file mode 100644
index 0000000..7da1f96
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/.flake8
@@ -0,0 +1,2 @@
+[flake8]
+max-line-length = 100
diff --git a/azure/aria/aria-extension-cloudify/.gitignore b/azure/aria/aria-extension-cloudify/.gitignore
new file mode 100644
index 0000000..29c4e9c
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/.gitignore
@@ -0,0 +1,64 @@
+# Byte-compiled / optimized / DLL files
+__pycache__/
+*.py[cod]
+
+# C extensions
+*.so
+
+# Distribution / packaging
+.Python
+env/
+bin/
+build/
+develop-eggs/
+dist/
+eggs/
+lib/
+lib64/
+parts/
+sdist/
+var/
+*.egg-info/
+.installed.cfg
+*.egg
+
+# Installer logs
+pip-log.txt
+pip-delete-this-directory.txt
+
+# Unit test / coverage reports
+htmlcov/
+.tox/
+.coverage
+.cache
+nosetests.xml
+coverage.xml
+
+# Translations
+*.mo
+
+# Mr Developer
+.mr.developer.cfg
+.project
+.pydevproject
+
+# Rope
+.ropeproject
+
+# Django stuff:
+*.log
+*.pot
+
+# Sphinx documentation
+docs/_build/
+
+*.iml
+
+*COMMIT_MSG
+
+*.noseids
+
+# QuickBuild
+.qbcache/
+
+.idea/
diff --git a/azure/aria/aria-extension-cloudify/.travis.yml b/azure/aria/aria-extension-cloudify/.travis.yml
new file mode 100644
index 0000000..dc57eae
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/.travis.yml
@@ -0,0 +1,54 @@
+# Copyright (c) 2016 GigaSpaces Technologies Ltd. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+sudo: false
+
+language: python
+
+python:
+ - '2.7'
+ - '2.6'
+
+env:
+ - TOX_ENV=flake8
+ - TOX_ENV=py27
+ - TOX_ENV=py26
+
+matrix:
+ # since python2.6 no longer comes with the default travis machine, we need to specify it
+ # explicitly. Since it doesn't make sense to require a machine with python 2.6 to run python2.7
+ # tests, we remove any redundant tests via the exclude matrix.
+ exclude:
+ - python: '2.6'
+ env: TOX_ENV=flake8
+ - python: '2.6'
+ env: TOX_ENV=py27
+ - python: '2.7'
+ env: TOX_ENV=py26
+
+
+install:
+ - pip install --upgrade pip
+ - pip install tox
+
+script:
+ - pip --version
+ - tox --version
+ - PYTEST_PROCESSES=1 tox -e $TOX_ENV
+
+# The PYTEST_PROCESSES environment var is used in tox.ini to override the --numprocesses argument
+# for PyTest's xdist plugin. The reason this is necessary is that conventional Travis environments
+# may report a large amount of available CPUs, but they they are greatly restricted. Through trial
+# and error we found that more than 1 process may result in failures.
+ \ No newline at end of file
diff --git a/azure/aria/aria-extension-cloudify/LICENCE b/azure/aria/aria-extension-cloudify/LICENCE
new file mode 100644
index 0000000..404713c
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/LICENCE
@@ -0,0 +1,201 @@
+Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright {yyyy} {name of copyright owner}
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+limitations under the License. \ No newline at end of file
diff --git a/azure/aria/aria-extension-cloudify/README.md b/azure/aria/aria-extension-cloudify/README.md
new file mode 100644
index 0000000..9506472
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/README.md
@@ -0,0 +1,47 @@
+## Cloudify ARIA Extensions
+
+This repository provides ARIA with support for running Cloudify-based plugins.
+Using an adapter that serves as a translation layer between the Cloudify and ARIA APIs, ARIA is able to make use of plugins that were designed to work with Cloudify.
+### Installation
+
+1. clone the repository
+
+`git clone https://github.com/cloudify-cosmo/aria-extension-cloudify.git`
+
+2. install the requirements:
+
+`pip install -r requirements.txt`
+
+3. install Cloudify ARIA extensions. This installs the adapter, and it should be done in the same environment in which ARIA is installed:
+
+`pip install .`
+
+
+4. (optional, for developing purposes) install the test requirements:
+
+`pip install -r aria_extension_tests/requirements.txt`
+
+Using the adapter, ARIA is expected to support any Cloudify plugin. However, depending on their implementation, some plugins may not work out-of-the-box with ARIA, and small adapter modifications may be need.
+
+Specifically, The [Cloudify AWS Plugin](https://github.com/cloudify-cosmo/cloudify-aws-plugin) 1.4.10 and the [Cloudify Openstack Plugin](https://github.com/cloudify-cosmo/cloudify-openstack-plugin) 2.0.1 were explicitly translated and tested using the adapter. Newer versions are expected to work as well.
+
+#### Installing a plugin
+In order to use any Cloudify plugin, you'll need to install it using a `.wgn` ([wagon](https://github.com/cloudify-cosmo/wagon)) file. For CentOS or RHEL, you can obtain the plugin `.wgn` from the [Cloudify plugin downloads page](http://cloudify.co/downloads/plugin-packages.html).
+
+After obtaining the `.wgn`, you can install the plugin:
+
+`aria plugins install <path to .wgn>`
+
+Another, more generic way, of obtaining a plugin `.wgn` is to create it from source. Here's an example, using the AWS plugin:
+
+1. clone/download the Cloudify AWS Plugin:
+
+`git clone https://github.com/cloudify-cosmo/cloudify-aws-plugin.git`
+
+2. (optional) if you want to install a specific version of the plugin, checkout the corresponding tag.
+
+`git checkout <version number>`
+
+3. create a `.wgn` file from the repository:
+
+`wagon create <path to plugin repository>`
diff --git a/azure/aria/aria-extension-cloudify/adapters/__init__.py b/azure/aria/aria-extension-cloudify/adapters/__init__.py
new file mode 100644
index 0000000..eb15bc2
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/adapters/__init__.py
@@ -0,0 +1,15 @@
+#
+# Copyright (c) 2017 GigaSpaces Technologies Ltd. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
diff --git a/azure/aria/aria-extension-cloudify/adapters/context_adapter.py b/azure/aria/aria-extension-cloudify/adapters/context_adapter.py
new file mode 100644
index 0000000..9e540ae
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/adapters/context_adapter.py
@@ -0,0 +1,461 @@
+#
+# Copyright (c) 2017 GigaSpaces Technologies Ltd. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+
+import os
+import tempfile
+
+from aria.orchestrator.context import operation
+
+
+DEPLOYMENT = 'deployment'
+NODE_INSTANCE = 'node-instance'
+RELATIONSHIP_INSTANCE = 'relationship-instance'
+
+
+class CloudifyContextAdapter(object):
+
+ def __init__(self, ctx):
+ self._ctx = ctx
+ self._blueprint = BlueprintAdapter(ctx)
+ self._deployment = DeploymentAdapter(ctx)
+ self._operation = OperationAdapter(ctx)
+ self._bootstrap_context = BootstrapAdapter(ctx)
+ self._plugin = PluginAdapter(ctx)
+ self._agent = CloudifyAgentAdapter()
+ self._node = None
+ self._node_instance = None
+ self._source = None
+ self._target = None
+ if isinstance(ctx, operation.NodeOperationContext):
+ self._node = NodeAdapter(ctx, ctx.node_template, ctx.node)
+ self._instance = NodeInstanceAdapter(ctx, ctx.node, True)
+ elif isinstance(ctx, operation.RelationshipOperationContext):
+ self._source = RelationshipTargetAdapter(
+ ctx,
+ ctx.source_node_template,
+ ctx.source_node,
+ True
+ )
+ self._target = RelationshipTargetAdapter(
+ ctx,
+ ctx.target_node_template,
+ ctx.target_node,
+ True
+ )
+
+ def __getattr__(self, item):
+ try:
+ return getattr(self._ctx, item)
+ except AttributeError:
+ return super(CloudifyContextAdapter, self).__getattribute__(item)
+
+ @property
+ def blueprint(self):
+ return self._blueprint
+
+ @property
+ def deployment(self):
+ return self._deployment
+
+ @property
+ def operation(self):
+ return self._operation
+
+ @property
+ def bootstrap_context(self):
+ return self._bootstrap_context
+
+ @property
+ def plugin(self):
+ return self._plugin
+
+ @property
+ def agent(self):
+ return self._agent
+
+ @property
+ def type(self):
+ if self._source:
+ return RELATIONSHIP_INSTANCE
+ elif self._instance:
+ return NODE_INSTANCE
+ else:
+ return DEPLOYMENT
+
+ @property
+ def instance(self):
+ self._verify_in_node_operation()
+ return self._instance
+
+ @property
+ def node(self):
+ self._verify_in_node_operation()
+ return self._node
+
+ @property
+ def source(self):
+ self._verify_in_relationship_operation()
+ return self._source
+
+ @property
+ def target(self):
+ self._verify_in_relationship_operation()
+ return self._target
+
+ @property
+ def execution_id(self):
+ return self._ctx.task.execution.id
+
+ @property
+ def workflow_id(self):
+ return self._ctx.task.execution.workflow_name
+
+ @property
+ def rest_token(self):
+ return None
+
+ @property
+ def task_id(self):
+ return self._ctx.task.id
+
+ @property
+ def task_name(self):
+ return self._ctx.task.function
+
+ @property
+ def task_target(self):
+ return None
+
+ @property
+ def task_queue(self):
+ return None
+
+ @property
+ def logger(self):
+
+
+ def getChild( self, name ):
+ loggertype = type(self)
+
+ childlogger = self._logger.getChild(name)
+ finallogger = loggertype(childlogger, self._task_id)
+ return finallogger
+
+
+ loggertype = type(self._ctx.logger)
+
+ childloggertype = type(self._ctx.logger.getChild("test"))
+ if loggertype != childloggertype:
+ loggertype.getChild = getChild
+
+ return self._ctx.logger
+
+ def send_event(self, event):
+ self.logger.info(event)
+
+ @property
+ def provider_context(self):
+ return {}
+
+ def get_resource(self, resource_path):
+ return self._ctx.get_resource(resource_path)
+
+ def get_resource_and_render(self, resource_path, template_variables=None):
+ return self._ctx.get_resource_and_render(resource_path, variables=template_variables)
+
+ def download_resource(self, resource_path, target_path=None):
+ target_path = self._get_target_path(target_path, resource_path)
+ self._ctx.download_resource(
+ destination=target_path,
+ path=resource_path
+ )
+ return target_path
+
+ def download_resource_and_render(self,
+ resource_path,
+ target_path=None,
+ template_variables=None):
+ target_path = self._get_target_path(target_path, resource_path)
+ self._ctx.download_resource_and_render(
+ destination=target_path,
+ path=resource_path,
+ variables=template_variables
+ )
+ return target_path
+
+ @staticmethod
+ def _get_target_path(target_path, resource_path):
+ if target_path:
+ return target_path
+ fd, target_path = tempfile.mkstemp(suffix=os.path.basename(resource_path))
+ os.close(fd)
+ return target_path
+
+ def _verify_in_node_operation(self):
+ if self.type != NODE_INSTANCE:
+ self._ctx.task.abort(
+ 'ctx.node/ctx.instance can only be used in a {0} context but '
+ 'used in a {1} context.'.format(NODE_INSTANCE, self.type)
+ )
+
+ def _verify_in_relationship_operation(self):
+ if self.type != RELATIONSHIP_INSTANCE:
+ self._ctx.task.abort(
+ 'ctx.source/ctx.target can only be used in a {0} context but '
+ 'used in a {1} context.'.format(RELATIONSHIP_INSTANCE,
+ self.type)
+ )
+
+
+class BlueprintAdapter(object):
+
+ def __init__(self, ctx):
+ self._ctx = ctx
+
+ @property
+ def id(self):
+ return self._ctx.service_template.id
+
+
+class DeploymentAdapter(object):
+
+ def __init__(self, ctx):
+ self._ctx = ctx
+
+ @property
+ def id(self):
+ return self._ctx.service.id
+
+
+class NodeAdapter(object):
+
+ def __init__(self, ctx, node_template, node):
+ self._ctx = ctx
+ self._node_template = node_template
+ self._node = node
+
+ @property
+ def id(self):
+ return self._node_template.id
+
+ @property
+ def name(self):
+ return self._node_template.name
+
+ @property
+ def properties(self):
+ # Cloudify Azure plugin will request the resource_config and merge it with new configurations.
+ # This creates an problem when the resource_config is None. Fix this by replacing an empty
+ # resource_config with an empth dict.
+ if 'resource_config' in self._node.properties and self._node.properties.get('resource_config') == None:
+ self._node.properties['resource_config']={}
+ return self._node.properties
+
+ @property
+ def type(self):
+ return self._node_template.type.name
+
+ @property
+ def type_hierarchy(self):
+ # We needed to modify the type hierarchy to be a list of strings that include the word
+ # 'cloudify' in each one of them instead of 'aria', since in the Cloudify AWS plugin, that
+ # we currently wish to support, if we want to attach an ElasticIP to a node, this node's
+ # type_hierarchy property must be a list of strings only, and it must contain either the
+ # string 'cloudify.aws.nodes.Instance', or the string 'cloudify.aws.nodes.Interface'.
+ # In any other case, we won't be able to attach an ElasticIP to a node using the Cloudify
+ # AWS plugin.
+ type_hierarchy_names = [type_.name for type_ in self._node_template.type.hierarchy
+ if type_.name is not None]
+ return [type_name.replace('aria', 'cloudify') for type_name in type_hierarchy_names]
+
+
+class NodeInstanceAdapter(object):
+
+ def __init__(self, ctx, node, modifiable):
+ self._ctx = ctx
+ self._node = node
+ self._modifiable = modifiable
+
+ @property
+ def id(self):
+ return self._node.id
+
+ @property
+ def runtime_properties(self):
+ return self._node.attributes
+
+ @runtime_properties.setter
+ def runtime_properties(self, value):
+ self._node.attributes = value
+
+ def update(self, on_conflict=None):
+ self._ctx.model.node.update(self._node)
+
+ def refresh(self, force=False):
+ self._ctx.model.node.refresh(self._node)
+
+ @property
+ def host_ip(self):
+ return self._node.host_address
+
+ @property
+ def relationships(self):
+ return [RelationshipAdapter(self._ctx, relationship=relationship) for
+ relationship in self._node.outbound_relationships]
+
+ #def __getattr__(self, item):
+ # print "requsting "
+ # print self._node.attributes
+ # print dir(self._ctx)
+ # return getattr(self._ctx.instance, item)
+
+
+class RelationshipAdapter(object):
+
+ def __init__(self, ctx, relationship):
+ self._ctx = ctx
+ self._relationship = relationship
+ node = relationship.target_node
+ node_template = node.node_template
+ self.target = RelationshipTargetAdapter(ctx, node_template, node, False)
+
+ @property
+ def type(self):
+ return self._relationship.type.name
+
+ #@property
+ #def type_hierarchy(self):
+ # return self._relationship.type.hierarchy
+
+
+
+ @property
+ def type_hierarchy(self):
+ # We needed to modify the type hierarchy to be a list of strings that include the word
+ # 'cloudify' in each one of them instead of 'aria', since in the Cloudify AWS plugin, that
+ # we currently wish to support, if we want to attach an ElasticIP to a node, this node's
+ # type_hierarchy property must be a list of strings only, and it must contain either the
+ # string 'cloudify.aws.nodes.Instance', or the string 'cloudify.aws.nodes.Interface'.
+ # In any other case, we won't be able to attach an ElasticIP to a node using the Cloudify
+ # AWS plugin.
+ type_hierarchy_names = [type_.name for type_ in self._relationship.type.hierarchy
+ if type_.name is not None]
+ return [type_name.replace('aria', 'cloudify') for type_name in type_hierarchy_names]
+
+
+
+
+
+
+
+
+
+
+
+class RelationshipTargetAdapter(object):
+
+ def __init__(self, ctx, node_template, node, modifiable):
+ self._ctx = ctx
+ self.node = NodeAdapter(ctx, node_template=node_template, node=node)
+ self.instance = NodeInstanceAdapter(ctx, node=node, modifiable=modifiable)
+
+
+class OperationAdapter(object):
+
+ def __init__(self, ctx):
+ self._ctx = ctx
+
+ @property
+ def name(self):
+ # We needed to modify the operation's 'name' property in order to support the Cloudify AWS
+ # plugin. It can't use ARIA's operation naming convention, as any operation we want to run
+ # using the Cloudify AWS plugin must have its name in the format:
+ # '<something>.<operation_name>'.
+ aria_name = self._ctx.task.name
+ return aria_name.split('@')[0].replace(':', '.')
+
+ @property
+ def retry_number(self):
+ return self._ctx.task.attempts_count - 1
+
+ @property
+ def max_retries(self):
+ task = self._ctx.task
+ if task.max_attempts == task.INFINITE_RETRIES:
+ return task.INFINITE_RETRIES
+ else:
+ return task.max_attempts - 1 if task.max_attempts > 0 else 0
+
+ def retry(self, message=None, retry_after=None):
+ self._ctx.task.retry(message, retry_after)
+
+
+class BootstrapAdapter(object):
+
+ def __init__(self, ctx):
+ self._ctx = ctx
+ self.cloudify_agent = _Stub()
+ self.resources_prefix = ''
+
+ def broker_config(self, *args, **kwargs):
+ return {}
+
+
+class CloudifyAgentAdapter(object):
+
+ def init_script(self, *args, **kwargs):
+ return None
+
+
+class PluginAdapter(object):
+
+ def __init__(self, ctx):
+ self._ctx = ctx
+ self._plugin = None
+
+ @property
+ def name(self):
+ return self._ctx.task.plugin.name
+
+ @property
+ def package_name(self):
+ return self._plugin_attr('package_name')
+
+ @property
+ def package_version(self):
+ return self._plugin_attr('package_version')
+
+ @property
+ def prefix(self):
+ # TODO
+ return self._plugin_attr('prefix')
+
+ @property
+ def workdir(self):
+ return self._ctx.plugin_workdir
+
+ def _plugin_attr(self, attr):
+ if not self._plugin:
+ self._plugin = self._ctx.task.plugin
+ if not self._plugin:
+ return None
+ return getattr(self._plugin, attr, None)
+
+
+
+class _Stub(object):
+ def __getattr__(self, _):
+ return None
diff --git a/azure/aria/aria-extension-cloudify/adapters/extension.py b/azure/aria/aria-extension-cloudify/adapters/extension.py
new file mode 100644
index 0000000..bab472e
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/adapters/extension.py
@@ -0,0 +1,92 @@
+#
+# Copyright (c) 2017 GigaSpaces Technologies Ltd. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+
+from functools import wraps
+from contextlib import contextmanager
+
+from aria import extension as aria_extension
+
+from .context_adapter import CloudifyContextAdapter
+
+
+@aria_extension.process_executor
+class CloudifyExecutorExtension(object):
+
+ def decorate(self):
+ def decorator(function):
+ @wraps(function)
+ def wrapper(ctx, **operation_inputs):
+ # We assume that any Cloudify-based plugin would use the plugins-common, thus two
+ # different paths are created
+ is_cloudify_dependent = ctx.task.plugin and any(
+ 'cloudify_plugins_common' in w for w in ctx.task.plugin.wheels)
+
+ if is_cloudify_dependent:
+ from cloudify import context
+ from cloudify.exceptions import (NonRecoverableError, RecoverableError)
+
+ with ctx.model.instrument(*ctx.INSTRUMENTATION_FIELDS):
+ # We need to create a new class dynamically, since CloudifyContextAdapter
+ # doesn't exist at runtime
+ ctx_adapter = type('_CloudifyContextAdapter',
+ (CloudifyContextAdapter, context.CloudifyContext),
+ {}, )(ctx)
+
+ exception = None
+ with _push_cfy_ctx(ctx_adapter, operation_inputs):
+ try:
+ function(ctx=ctx_adapter, **operation_inputs)
+ except NonRecoverableError as e:
+ ctx.task.abort(str(e))
+ except RecoverableError as e:
+ ctx.task.retry(str(e), retry_interval=e.retry_after)
+ except BaseException as e:
+ # Keep exception and raise it outside of "with", because
+ # contextmanager does not allow raising exceptions
+ exception = e
+ if exception is not None:
+ raise exception
+ else:
+ function(ctx=ctx, **operation_inputs)
+ return wrapper
+ return decorator
+
+
+@contextmanager
+def _push_cfy_ctx(ctx, params):
+ from cloudify import state
+
+ try:
+ # Support for Cloudify > 4.0
+ with state.current_ctx.push(ctx, params) as current_ctx:
+ yield current_ctx
+
+ except AttributeError:
+ # Support for Cloudify < 4.0
+ try:
+ original_ctx = state.current_ctx.get_ctx()
+ except RuntimeError:
+ original_ctx = None
+ try:
+ original_params = state.current_ctx.get_parameters()
+ except RuntimeError:
+ original_params = None
+
+ state.current_ctx.set(ctx, params)
+ try:
+ yield state.current_ctx.get_ctx()
+ finally:
+ state.current_ctx.set(original_ctx, original_params)
diff --git a/azure/aria/aria-extension-cloudify/appveyor.yml b/azure/aria/aria-extension-cloudify/appveyor.yml
new file mode 100644
index 0000000..f9812d9
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/appveyor.yml
@@ -0,0 +1,40 @@
+# Copyright (c) 2017 GigaSpaces Technologies Ltd. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+environment:
+
+ TOX_ENV: pywin
+
+ matrix:
+ - PYTHON: "C:\\Python27"
+ PYTHON_VERSION: 2.7.8
+ PYTHON_ARCH: 32
+
+build: false
+
+install:
+ - SET PATH=%PYTHON%;%PYTHON%\\Scripts;%PATH%
+ - ps: (new-object System.Net.WebClient).Downloadfile('https://bootstrap.pypa.io/get-pip.py', 'C:\Users\appveyor\get-pip.py')
+ - ps: Start-Process -FilePath "C:\Python27\python.exe" -ArgumentList "C:\Users\appveyor\get-pip.py" -Wait -Passthru
+
+before_test:
+ - pip install virtualenv --upgrade
+ - virtualenv env
+ - 'env\Scripts\activate.bat'
+ - pip install tox
+
+test_script:
+ - pip --version
+ - tox --version
+ - tox -e %TOX_ENV% \ No newline at end of file
diff --git a/azure/aria/aria-extension-cloudify/aria_extension_tests/__init__.py b/azure/aria/aria-extension-cloudify/aria_extension_tests/__init__.py
new file mode 100644
index 0000000..eb15bc2
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/aria_extension_tests/__init__.py
@@ -0,0 +1,15 @@
+#
+# Copyright (c) 2017 GigaSpaces Technologies Ltd. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
diff --git a/azure/aria/aria-extension-cloudify/aria_extension_tests/adapters/__init__.py b/azure/aria/aria-extension-cloudify/aria_extension_tests/adapters/__init__.py
new file mode 100644
index 0000000..eb15bc2
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/aria_extension_tests/adapters/__init__.py
@@ -0,0 +1,15 @@
+#
+# Copyright (c) 2017 GigaSpaces Technologies Ltd. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
diff --git a/azure/aria/aria-extension-cloudify/aria_extension_tests/adapters/test_context_adapter.py b/azure/aria/aria-extension-cloudify/aria_extension_tests/adapters/test_context_adapter.py
new file mode 100644
index 0000000..267f211
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/aria_extension_tests/adapters/test_context_adapter.py
@@ -0,0 +1,541 @@
+#
+# Copyright (c) 2017 GigaSpaces Technologies Ltd. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+
+import os
+import copy
+import datetime
+import contextlib
+
+import pytest
+
+from aria import (workflow, operation)
+from aria.modeling import models
+from aria.orchestrator import events
+from aria.orchestrator.workflows import api
+from aria.orchestrator.workflows.exceptions import ExecutorException
+from aria.orchestrator.workflows.executor import process
+from aria.orchestrator.workflows.core import (engine, graph_compiler)
+from aria.orchestrator.exceptions import (TaskAbortException, TaskRetryException)
+from aria.utils import type as type_
+
+import tests
+from tests import (mock, storage, conftest)
+from tests.orchestrator.workflows.helpers import events_collector
+
+from adapters import context_adapter
+
+
+@pytest.fixture(autouse=True)
+def cleanup_logger(request):
+ conftest.logging_handler_cleanup(request)
+
+
+class TestCloudifyContextAdapter(object):
+
+ def test_node_instance_operation(self, executor, workflow_context):
+ node_template = self._get_node_template(workflow_context)
+ node_type = 'aria.plugin.nodes.App'
+ node_instance_property = models.Property.wrap('hello', 'world')
+ node_template.type = models.Type(variant='variant', name=node_type)
+ node = self._get_node(workflow_context)
+ node_instance_attribute = models.Attribute.wrap('hello2', 'world2')
+ node.attributes[node_instance_attribute.name] = node_instance_attribute
+ node.properties[node_instance_property.name] = node_instance_property
+ workflow_context.model.node.update(node)
+ workflow_context.model.node_template.update(node_template)
+
+ out = self._run(executor, workflow_context, _test_node_instance_operation)
+
+ node_template = self._get_node_template(workflow_context)
+ node = self._get_node(workflow_context)
+ assert out['type'] == context_adapter.NODE_INSTANCE
+ assert out['node']['id'] == node_template.id
+ assert out['node']['name'] == node_template.name
+ assert out['node']['properties'] == \
+ {node_instance_property.name: node_instance_property.value}
+ assert out['node']['type'] == node_type
+ assert out['node']['type_hierarchy'] == ['cloudify.plugin.nodes.App']
+ assert out['instance']['id'] == node.id
+ assert out['instance']['runtime_properties'] == \
+ {node_instance_attribute.name: node_instance_attribute.value}
+ assert not out['source']
+ assert not out['target']
+
+ def test_node_instance_relationships(self, executor, workflow_context):
+ relationship_node_template = self._get_dependency_node_template(workflow_context)
+ relationship_node_instance = self._get_dependency_node(workflow_context)
+ relationship = relationship_node_instance.inbound_relationships[0]
+ relationship_type = models.Type(variant='variant', name='test.relationships.Relationship')
+ relationship.type = relationship_type
+ workflow_context.model.relationship.update(relationship)
+
+ out = self._run(executor, workflow_context, _test_node_instance_relationships)
+
+ assert len(out['instance']['relationships']) == 1
+ relationship = out['instance']['relationships'][0]
+ assert relationship['type'] == relationship_type.name
+ assert relationship['type_hierarchy'] == [relationship_type.name]
+ assert relationship['target']['node']['id'] == relationship_node_template.id
+ assert relationship['target']['instance']['id'] == relationship_node_instance.id
+
+ def test_source_operation(self, executor, workflow_context):
+ self._test_relationship_operation(executor, workflow_context, operation_end='source')
+
+ def test_target_operation(self, executor, workflow_context):
+ self._test_relationship_operation(executor, workflow_context, operation_end='target')
+
+ def _test_relationship_operation(self, executor, workflow_context, operation_end):
+ out = self._run(
+ executor, workflow_context, _test_relationship_operation, operation_end=operation_end)
+
+ source_node = self._get_node_template(workflow_context)
+ source_node_instance = self._get_node(workflow_context)
+ target_node = self._get_dependency_node_template(workflow_context)
+ target_node_instance = self._get_dependency_node(workflow_context)
+ assert out['type'] == context_adapter.RELATIONSHIP_INSTANCE
+ assert out['source']['node']['id'] == source_node.id
+ assert out['source']['instance']['id'] == source_node_instance.id
+ assert out['target']['node']['id'] == target_node.id
+ assert out['target']['instance']['id'] == target_node_instance.id
+ assert not out['node']
+ assert not out['instance']
+
+ def test_host_ip(self, executor, workflow_context):
+ node = self._get_node_template(workflow_context)
+ node.type_hierarchy = ['aria.nodes.Compute']
+ node_instance = self._get_node(workflow_context)
+ node_instance.host_fk = node_instance.id
+ node_instance_ip = '120.120.120.120'
+ node_instance.attributes['ip'] = models.Attribute.wrap('ip', node_instance_ip)
+ workflow_context.model.node_template.update(node)
+ workflow_context.model.node.update(node_instance)
+
+ out = self._run(executor, workflow_context, _test_host_ip)
+
+ assert out['instance']['host_ip'] == node_instance_ip
+
+ def test_get_and_download_resource_and_render(self, tmpdir, executor, workflow_context):
+ resource_path = 'resource'
+ variable = 'VALUE'
+ content = '{{ctx.service.name}}-{{variable}}'
+ rendered = '{0}-{1}'.format(workflow_context.service.name, variable)
+ source = tmpdir.join(resource_path)
+ source.write(content)
+ workflow_context.resource.service.upload(
+ entry_id=str(workflow_context.service.id),
+ source=str(source),
+ path=resource_path)
+
+ out = self._run(executor, workflow_context, _test_get_and_download_resource_and_render,
+ inputs={'resource': resource_path,
+ 'variable': variable})
+
+ assert out['get_resource'] == content
+ assert out['get_resource_and_render'] == rendered
+ with open(out['download_resource'], 'rb') as f:
+ assert f.read() == content
+ with open(out['download_resource_and_render'], 'rb') as f:
+ assert f.read() == rendered
+
+ os.remove(out['download_resource'])
+ os.remove(out['download_resource_and_render'])
+
+ def test_retry(self, executor, workflow_context):
+ message = 'retry-message'
+ retry_interval = 0.01
+
+ exception = self._run_and_get_task_exceptions(
+ executor, workflow_context, _test_retry,
+ inputs={'message': message, 'retry_interval': retry_interval},
+ max_attempts=2
+ )[-1]
+
+ assert isinstance(exception, TaskRetryException)
+ assert exception.message == message
+ assert exception.retry_interval == retry_interval
+
+ out = self._get_node(workflow_context).attributes['out'].value
+ assert out['operation']['retry_number'] == 1
+ assert out['operation']['max_retries'] == 1
+
+ def test_logger_and_send_event(self, executor, workflow_context):
+ # TODO: add assertions of output once process executor output can be captured
+ message = 'logger-message'
+ event = 'event-message'
+ self._run(executor, workflow_context, _test_logger_and_send_event,
+ inputs={'message': message, 'event': event})
+
+ def test_plugin(self, executor, workflow_context, tmpdir):
+ plugin = self._put_plugin(workflow_context)
+ out = self._run(executor, workflow_context, _test_plugin, plugin=plugin)
+
+ expected_workdir = tmpdir.join(
+ 'workdir', 'plugins', str(workflow_context.service.id), plugin.name)
+ assert out['plugin']['name'] == plugin.name
+ assert out['plugin']['package_name'] == plugin.package_name
+ assert out['plugin']['package_version'] == plugin.package_version
+ assert out['plugin']['workdir'] == str(expected_workdir)
+
+ def test_importable_ctx_and_inputs(self, executor, workflow_context):
+ test_inputs = {'input1': 1, 'input2': 2}
+ plugin = self._put_plugin(workflow_context, mock_cfy_plugin=True)
+
+ out = self._run(executor, workflow_context, _test_importable_ctx_and_inputs,
+ inputs=test_inputs,
+ skip_common_assert=True,
+ plugin=plugin)
+ assert out['inputs'] == test_inputs
+
+ def test_non_recoverable_error(self, executor, workflow_context):
+ message = 'NON_RECOVERABLE_MESSAGE'
+ plugin = self._put_plugin(workflow_context, mock_cfy_plugin=True)
+
+ exception = self._run_and_get_task_exceptions(
+ executor, workflow_context, _test_non_recoverable_error,
+ inputs={'message': message},
+ skip_common_assert=True,
+ plugin=plugin
+ )[0]
+ assert isinstance(exception, TaskAbortException)
+ assert exception.message == message
+
+ def test_recoverable_error(self, executor, workflow_context):
+ message = 'RECOVERABLE_MESSAGE'
+ plugin = self._put_plugin(workflow_context, mock_cfy_plugin=True)
+
+ retry_interval = 0.01
+ exception = self._run_and_get_task_exceptions(
+ executor, workflow_context, _test_recoverable_error,
+ inputs={'message': message, 'retry_interval': retry_interval},
+ skip_common_assert=True,
+ plugin=plugin
+ )[0]
+ assert isinstance(exception, TaskRetryException)
+ assert message in exception.message
+ assert exception.retry_interval == retry_interval
+
+ def _test_common(self, out, workflow_context):
+ assert out['execution_id'] == workflow_context.execution.id
+ assert out['workflow_id'] == workflow_context.execution.workflow_name
+ assert out['rest_token'] is None
+ assert out['task_id'][0] == out['task_id'][1]
+ assert out['task_name'][0] == out['task_name'][1]
+ assert out['task_target'] is None
+ assert out['task_queue'] is None
+ assert out['provider_context'] == {}
+ assert out['blueprint']['id'] == workflow_context.service_template.id
+ assert out['deployment']['id'] == workflow_context.service.id
+ assert out['operation']['name'][0] == out['operation']['name'][1]
+ assert out['operation']['retry_number'][0] == out['operation']['retry_number'][1]
+ assert out['operation']['max_retries'][0] == out['operation']['max_retries'][1] - 1
+ assert out['bootstrap_context']['resources_prefix'] == ''
+ assert out['bootstrap_context']['broker_config'] == {}
+ assert out['bootstrap_context']['cloudify_agent']['any'] is None
+ assert out['agent']['init_script'] is None
+
+ def _run(self,
+ executor,
+ workflow_context,
+ func,
+ inputs=None,
+ max_attempts=None,
+ skip_common_assert=False,
+ operation_end=None,
+ plugin=None):
+ interface_name = 'test'
+ operation_name = 'op'
+ op_dict = {'function': '{0}.{1}'.format(__name__, func.__name__),
+ 'plugin': plugin,
+ 'arguments': inputs or {}}
+ node = self._get_node(workflow_context)
+
+ if operation_end:
+ actor = relationship = node.outbound_relationships[0]
+ relationship.interfaces[interface_name] = mock.models.create_interface(
+ relationship.source_node.service,
+ interface_name,
+ operation_name,
+ operation_kwargs=op_dict
+ )
+ workflow_context.model.relationship.update(relationship)
+
+ else:
+ actor = node
+ node.interfaces[interface_name] = mock.models.create_interface(
+ node.service,
+ interface_name,
+ operation_name,
+ operation_kwargs=op_dict
+ )
+ workflow_context.model.node.update(node)
+
+ if inputs:
+ operation_inputs = \
+ actor.interfaces[interface_name].operations[operation_name].inputs
+ for input_name, input in inputs.iteritems():
+ operation_inputs[input_name] = models.Input(name=input_name,
+ type_name=type_.full_type_name(input))
+
+ @workflow
+ def mock_workflow(graph, **kwargs):
+ task = api.task.OperationTask(
+ actor,
+ interface_name,
+ operation_name,
+ arguments=inputs or {},
+ max_attempts=max_attempts
+ )
+ graph.add_tasks(task)
+
+ tasks_graph = mock_workflow(ctx=workflow_context)
+ graph_compiler.GraphCompiler(workflow_context, executor.__class__).compile(tasks_graph)
+ eng = engine.Engine(executors={executor.__class__: executor})
+ eng.execute(workflow_context)
+ out = self._get_node(workflow_context).attributes['out'].value
+ if not skip_common_assert:
+ self._test_common(out, workflow_context)
+ return out
+
+ def _get_dependency_node_template(self, workflow_context):
+ return workflow_context.model.node_template.get_by_name(
+ mock.models.DEPENDENCY_NODE_TEMPLATE_NAME)
+
+ def _get_dependency_node(self, workflow_context):
+ return workflow_context.model.node.get_by_name(mock.models.DEPENDENCY_NODE_NAME)
+
+ def _get_node_template(self, workflow_context):
+ return workflow_context.model.node_template.get_by_name(
+ mock.models.DEPENDENT_NODE_TEMPLATE_NAME)
+
+ def _get_node(self, workflow_context):
+ return workflow_context.model.node.get_by_name(mock.models.DEPENDENT_NODE_NAME)
+
+ def _run_and_get_task_exceptions(self, *args, **kwargs):
+ signal = events.on_failure_task_signal
+ with events_collector(signal) as collected:
+ with pytest.raises(ExecutorException):
+ self._run(*args, **kwargs)
+ return [event['kwargs']['exception'] for event in collected[signal]]
+
+ @pytest.fixture
+ def executor(self):
+ result = process.ProcessExecutor(python_path=[tests.ROOT_DIR])
+ yield result
+ result.close()
+
+ @pytest.fixture
+ def workflow_context(self, tmpdir):
+ result = mock.context.simple(
+ str(tmpdir),
+ context_kwargs=dict(workdir=str(tmpdir.join('workdir')))
+ )
+ yield result
+ storage.release_sqlite_storage(result.model)
+
+ def _put_plugin(self, workflow_context, mock_cfy_plugin=False):
+ name = 'PLUGIN'
+ archive_name = 'ARCHIVE'
+ package_name = 'PACKAGE'
+ package_version = '0.1.1'
+
+ plugin = models.Plugin(
+ name=name,
+ archive_name=archive_name,
+ package_name=package_name,
+ package_version=package_version,
+ uploaded_at=datetime.datetime.now(),
+ wheels=['cloudify_plugins_common'] if mock_cfy_plugin else []
+ )
+
+ workflow_context.model.plugin.put(plugin)
+
+ return plugin
+
+
+@operation
+def _test_node_instance_operation(ctx):
+ with _adapter(ctx) as (adapter, out):
+ node = adapter.node
+ instance = adapter.instance
+ out.update({
+ 'node': {
+ 'id': node.id,
+ 'name': node.name,
+ 'properties': copy.deepcopy(node.properties),
+ 'type': node.type,
+ 'type_hierarchy': node.type_hierarchy
+ },
+ 'instance': {
+ 'id': instance.id,
+ 'runtime_properties': copy.deepcopy(instance.runtime_properties)
+ }
+ })
+ try:
+ assert adapter.source
+ out['source'] = True
+ except TaskAbortException:
+ out['source'] = False
+ try:
+ assert adapter.target
+ out['target'] = True
+ except TaskAbortException:
+ out['target'] = False
+
+
+@operation
+def _test_node_instance_relationships(ctx):
+ with _adapter(ctx) as (adapter, out):
+ relationships = [{'type': r.type,
+ 'type_hierarchy': [t.name for t in r.type_hierarchy],
+ 'target': {'node': {'id': r.target.node.id},
+ 'instance': {'id': r.target.instance.id}}}
+ for r in adapter.instance.relationships]
+ out['instance'] = {'relationships': relationships}
+
+
+@operation
+def _test_relationship_operation(ctx):
+ with _adapter(ctx) as (adapter, out):
+ out.update({
+ 'source': {'node': {'id': adapter.source.node.id},
+ 'instance': {'id': adapter.source.instance.id}},
+ 'target': {'node': {'id': adapter.target.node.id},
+ 'instance': {'id': adapter.target.instance.id}}
+ })
+ try:
+ assert adapter.node
+ out['node'] = True
+ except TaskAbortException:
+ out['node'] = False
+ try:
+ assert adapter.instance
+ out['instance'] = True
+ except TaskAbortException:
+ out['instance'] = False
+
+
+@operation
+def _test_host_ip(ctx):
+ with _adapter(ctx) as (adapter, out):
+ out['instance'] = {'host_ip': adapter.instance.host_ip}
+
+
+@operation
+def _test_get_and_download_resource_and_render(ctx, resource, variable):
+ with _adapter(ctx) as (adapter, out):
+ out.update({
+ 'get_resource': adapter.get_resource(resource),
+ 'get_resource_and_render': adapter.get_resource_and_render(
+ resource, template_variables={'variable': variable}
+ ),
+ 'download_resource': adapter.download_resource(resource),
+ 'download_resource_and_render': adapter.download_resource_and_render(
+ resource, template_variables={'variable': variable}
+ )
+ })
+
+
+@operation
+def _test_retry(ctx, message, retry_interval):
+ with _adapter(ctx) as (adapter, out):
+ op = adapter.operation
+ out['operation'] = {'retry_number': op.retry_number, 'max_retries': op.max_retries}
+ op.retry(message, retry_after=retry_interval)
+
+
+@operation
+def _test_logger_and_send_event(ctx, message, event):
+ with _adapter(ctx) as (adapter, _):
+ adapter.logger.info(message)
+ adapter.send_event(event)
+
+
+@operation
+def _test_plugin(ctx):
+ with _adapter(ctx) as (adapter, out):
+ plugin = adapter.plugin
+ out['plugin'] = {
+ 'name': plugin.name,
+ 'package_name': plugin.package_name,
+ 'package_version': plugin.package_version,
+ 'workdir': plugin.workdir
+ }
+
+
+@operation
+def _test_importable_ctx_and_inputs(**_):
+ from cloudify import ctx
+ from cloudify.state import ctx_parameters
+ ctx.instance.runtime_properties['out'] = {'inputs': dict(ctx_parameters)}
+
+
+@operation
+def _test_non_recoverable_error(message, **_):
+ from cloudify.exceptions import NonRecoverableError
+ raise NonRecoverableError(message)
+
+
+@operation
+def _test_recoverable_error(message, retry_interval, **_):
+ from cloudify.exceptions import RecoverableError
+ raise RecoverableError(message, retry_interval)
+
+
+def _test_common(out, ctx, adapter):
+ op = adapter.operation
+ bootstrap_context = adapter.bootstrap_context
+ out.update({
+ 'type': adapter.type,
+ 'execution_id': adapter.execution_id,
+ 'workflow_id': adapter.workflow_id,
+ 'rest_token': adapter.rest_token,
+ 'task_id': (adapter.task_id, ctx.task.id),
+ 'task_name': (adapter.task_name, ctx.task.function),
+ 'task_target': adapter.task_target,
+ 'task_queue': adapter.task_queue,
+ 'provider_context': adapter.provider_context,
+ 'blueprint': {'id': adapter.blueprint.id},
+ 'deployment': {'id': adapter.deployment.id},
+ 'operation': {
+ 'name': [op.name, ctx.name.split('@')[0].replace(':', '.')],
+ 'retry_number': [op.retry_number, ctx.task.attempts_count - 1],
+ 'max_retries': [op.max_retries, ctx.task.max_attempts]
+ },
+ 'bootstrap_context': {
+ 'broker_config': bootstrap_context.broker_config('arg1', 'arg2', arg3='arg3'),
+ # All attribute access of cloudify_agent returns none
+ 'cloudify_agent': {'any': bootstrap_context.cloudify_agent.any},
+ 'resources_prefix': bootstrap_context.resources_prefix
+ },
+ 'agent': {
+ 'init_script': adapter.agent.init_script('arg1', 'arg2', arg3='arg3')
+ }
+ })
+
+
+@contextlib.contextmanager
+def _adapter(ctx):
+ out = {}
+ adapter = context_adapter.CloudifyContextAdapter(ctx)
+ _test_common(out, ctx, adapter)
+ try:
+ yield adapter, out
+ finally:
+ try:
+ instance = adapter.instance
+ except TaskAbortException:
+ instance = adapter.source.instance
+ instance.runtime_properties['out'] = out
diff --git a/azure/aria/aria-extension-cloudify/aria_extension_tests/requirements.txt b/azure/aria/aria-extension-cloudify/aria_extension_tests/requirements.txt
new file mode 100644
index 0000000..bfb7074
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/aria_extension_tests/requirements.txt
@@ -0,0 +1,9 @@
+https://github.com/cloudify-cosmo/cloudify-dsl-parser/archive/master.zip
+https://github.com/cloudify-cosmo/cloudify-rest-client/archive/master.zip
+https://github.com/cloudify-cosmo/cloudify-plugins-common/archive/master.zip
+
+flake8==3.4.1
+pytest==3.2.0
+pytest-cov==2.5.1
+pytest-mock==1.6.2
+pytest-xdist==1.18.2
diff --git a/azure/aria/aria-extension-cloudify/examples/aws-hello-world/aws-helloworld.yaml b/azure/aria/aria-extension-cloudify/examples/aws-hello-world/aws-helloworld.yaml
new file mode 100644
index 0000000..52fd458
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/examples/aws-hello-world/aws-helloworld.yaml
@@ -0,0 +1,101 @@
+tosca_definitions_version: tosca_simple_yaml_1_0
+
+imports:
+ - https://raw.githubusercontent.com/cloudify-cosmo/aria-extension-cloudify/master/plugins/aws/plugin.yaml
+ - aria-1.0
+
+node_types:
+ http_web_server:
+ derived_from: tosca.nodes.WebApplication
+ properties:
+ port:
+ type: integer
+
+topology_template:
+ inputs:
+ webserver_port:
+ description: The HTTP web server port
+ type: integer
+ default: 8080
+ image_id:
+ description: AWS EC2 image id to use for the server
+ type: string
+ instance_type:
+ description: AWS EC2 instance type to use for the server
+ type: string
+ default: m3.medium
+ ssh_username:
+ type: string
+ default: ubuntu
+ ssh_port:
+ type: integer
+ default: 22
+ private_key_path:
+ description: Path to the private key used to authenticate into the instance
+ type: string
+
+ node_templates:
+ elastic_ip:
+ type: aria.aws.nodes.ElasticIP
+
+ security_group:
+ type: aria.aws.nodes.SecurityGroup
+ properties:
+ description: Security group for Hello World VM
+ rules:
+ - ip_protocol: tcp
+ cidr_ip: 0.0.0.0/0
+ from_port: { get_property: [ http_web_server, port ] }
+ to_port: { get_property: [ http_web_server, port ] }
+ - ip_protocol: tcp
+ cidr_ip: 0.0.0.0/0
+ from_port: { get_input: ssh_port }
+ to_port: { get_input: ssh_port }
+
+ vm:
+ type: aria.aws.nodes.Instance
+ properties:
+ image_id: { get_input: image_id }
+ instance_type: { get_input: instance_type }
+ name: aria-aws-hello-world-instance
+ parameters:
+ key_name: { get_attribute: [ keypair, aws_resource_id ] }
+ requirements:
+ - elastic_ip: elastic_ip
+ - security_group: security_group
+ - keypair: keypair
+
+ keypair:
+ type: aria.aws.nodes.KeyPair
+ properties:
+ private_key_path: { get_input: private_key_path }
+
+ http_web_server:
+ type: http_web_server
+ properties:
+ port: { get_input: webserver_port }
+ requirements:
+ - host: vm
+ interfaces:
+ Standard:
+ configure:
+ implementation:
+ primary: scripts/configure.sh
+ dependencies:
+ - "ssh.user > { get_input: ssh_username }"
+ - "ssh.key_filename > { get_input: private_key_path }"
+ - "ssh.address > { get_attribute: [ vm, public_ip_address ] }"
+ start:
+ implementation:
+ primary: scripts/start.sh
+ dependencies:
+ - "ssh.user > { get_input: ssh_username }"
+ - "ssh.key_filename > { get_input: private_key_path }"
+ - "ssh.address > { get_attribute: [ vm, public_ip_address ] }"
+ stop:
+ implementation:
+ primary: scripts/stop.sh
+ dependencies:
+ - "ssh.user > { get_input: ssh_username }"
+ - "ssh.key_filename > { get_input: private_key_path }"
+ - "ssh.address > { get_attribute: [ vm, public_ip_address ] }" \ No newline at end of file
diff --git a/azure/aria/aria-extension-cloudify/examples/aws-hello-world/images/aria-logo.png b/azure/aria/aria-extension-cloudify/examples/aws-hello-world/images/aria-logo.png
new file mode 100644
index 0000000..3505844
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/examples/aws-hello-world/images/aria-logo.png
Binary files differ
diff --git a/azure/aria/aria-extension-cloudify/examples/aws-hello-world/index.html b/azure/aria/aria-extension-cloudify/examples/aws-hello-world/index.html
new file mode 100644
index 0000000..597632b
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/examples/aws-hello-world/index.html
@@ -0,0 +1,14 @@
+<html>
+ <header>
+ <title>ARIA Hello World</title>
+ </header>
+<body>
+ <h1>Hello, World!</h1>
+ <p>
+ service_template_name = {{ ctx.service_template.name }}<br/>
+ service_name = {{ ctx.service.name }}<br/>
+ node_name = {{ ctx.node.name }}
+ </p>
+ <img src="aria-logo.png">
+</body>
+</html> \ No newline at end of file
diff --git a/azure/aria/aria-extension-cloudify/examples/aws-hello-world/scripts/configure.sh b/azure/aria/aria-extension-cloudify/examples/aws-hello-world/scripts/configure.sh
new file mode 100644
index 0000000..aa3ea5f
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/examples/aws-hello-world/scripts/configure.sh
@@ -0,0 +1,22 @@
+#!/bin/bash
+
+set -e
+
+TEMP_DIR=/tmp
+PYTHON_FILE_SERVER_ROOT="$TEMP_DIR/python-simple-http-webserver"
+INDEX_PATH=index.html
+IMAGE_PATH=images/aria-logo.png
+
+if [ -d "$PYTHON_FILE_SERVER_ROOT" ]; then
+ ctx logger info [ "Removing old web server root folder: $PYTHON_FILE_SERVER_ROOT." ]
+ rm -rf "$PYTHON_FILE_SERVER_ROOT"
+fi
+
+ctx logger info [ "Creating web server root folder: $PYTHON_FILE_SERVER_ROOT." ]
+
+mkdir -p "$PYTHON_FILE_SERVER_ROOT"
+cd "$PYTHON_FILE_SERVER_ROOT"
+
+ctx logger info [ "Downloading service template resources..." ]
+ctx download-resource-and-render [ "$PYTHON_FILE_SERVER_ROOT/index.html" "$INDEX_PATH" ]
+ctx download-resource [ "$PYTHON_FILE_SERVER_ROOT/aria-logo.png" "$IMAGE_PATH" ]
diff --git a/azure/aria/aria-extension-cloudify/examples/aws-hello-world/scripts/start.sh b/azure/aria/aria-extension-cloudify/examples/aws-hello-world/scripts/start.sh
new file mode 100644
index 0000000..7471cfc
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/examples/aws-hello-world/scripts/start.sh
@@ -0,0 +1,50 @@
+#!/bin/bash
+
+set -e
+
+TEMP_DIR=/tmp
+PYTHON_FILE_SERVER_ROOT="$TEMP_DIR/python-simple-http-webserver"
+PID_FILE=server.pid
+PORT=$(ctx node properties port)
+URL="http://localhost:$PORT"
+
+ctx logger info [ "Starting web server at: $PYTHON_FILE_SERVER_ROOT." ]
+
+cd "$PYTHON_FILE_SERVER_ROOT"
+nohup python -m SimpleHTTPServer "$PORT" > /dev/null 2>&1 &
+echo $! > "$PID_FILE"
+
+server_is_up() {
+ if which wget >/dev/null; then
+ if wget "$URL" >/dev/null; then
+ return 0
+ fi
+ elif which curl >/dev/null; then
+ if curl "$URL" >/dev/null; then
+ return 0
+ fi
+ else
+ ctx logger error [ "Both curl and wget were not found in path." ]
+ exit 1
+ fi
+ return 1
+}
+
+ctx logger info [ "Waiting for web server to launch on port $PORT..." ]
+STARTED=false
+for i in $(seq 1 15)
+do
+ if server_is_up; then
+ ctx logger info [ "Web server is up." ]
+ STARTED=true
+ break
+ else
+ ctx logger info [ "Web server not up. waiting 1 second." ]
+ sleep 1
+ fi
+done
+
+if [ "$STARTED" = false ]; then
+ ctx logger error [ "Web server did not start within 15 seconds." ]
+ exit 1
+fi
diff --git a/azure/aria/aria-extension-cloudify/examples/aws-hello-world/scripts/stop.sh b/azure/aria/aria-extension-cloudify/examples/aws-hello-world/scripts/stop.sh
new file mode 100644
index 0000000..9f3bfc5
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/examples/aws-hello-world/scripts/stop.sh
@@ -0,0 +1,14 @@
+#!/bin/bash
+
+set -e
+
+TEMP_DIR=/tmp
+PYTHON_FILE_SERVER_ROOT="${TEMP_DIR}/python-simple-http-webserver"
+PID_FILE=server.pid
+PID=$(cat "$PYTHON_FILE_SERVER_ROOT/$PID_FILE")
+
+ctx logger info [ "Shutting down web server, pid = ${PID}." ]
+kill -9 "$PID" || exit $?
+
+ctx logger info [ "Removing web server root folder: $PYTHON_FILE_SERVER_ROOT." ]
+rm -rf "$PYTHON_FILE_SERVER_ROOT"
diff --git a/azure/aria/aria-extension-cloudify/examples/openstack-hello-world/images/aria-logo.png b/azure/aria/aria-extension-cloudify/examples/openstack-hello-world/images/aria-logo.png
new file mode 100644
index 0000000..3505844
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/examples/openstack-hello-world/images/aria-logo.png
Binary files differ
diff --git a/azure/aria/aria-extension-cloudify/examples/openstack-hello-world/index.html b/azure/aria/aria-extension-cloudify/examples/openstack-hello-world/index.html
new file mode 100644
index 0000000..597632b
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/examples/openstack-hello-world/index.html
@@ -0,0 +1,14 @@
+<html>
+ <header>
+ <title>ARIA Hello World</title>
+ </header>
+<body>
+ <h1>Hello, World!</h1>
+ <p>
+ service_template_name = {{ ctx.service_template.name }}<br/>
+ service_name = {{ ctx.service.name }}<br/>
+ node_name = {{ ctx.node.name }}
+ </p>
+ <img src="aria-logo.png">
+</body>
+</html> \ No newline at end of file
diff --git a/azure/aria/aria-extension-cloudify/examples/openstack-hello-world/openstack-helloworld.yaml b/azure/aria/aria-extension-cloudify/examples/openstack-hello-world/openstack-helloworld.yaml
new file mode 100644
index 0000000..1fb031c
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/examples/openstack-hello-world/openstack-helloworld.yaml
@@ -0,0 +1,144 @@
+tosca_definitions_version: tosca_simple_yaml_1_0
+
+imports:
+ - https://raw.githubusercontent.com/cloudify-cosmo/aria-extension-cloudify/master/plugins/openstack/plugin.yaml
+ - aria-1.0
+
+node_types:
+ web_app:
+ derived_from: tosca.nodes.WebApplication
+ properties:
+ port:
+ type: integer
+ default:
+
+topology_template:
+
+ inputs:
+ ssh_username:
+ type: string
+ default: ubuntu
+ external_network_name:
+ type: string
+ webserver_port:
+ type: integer
+ private_key_path:
+ type: string
+ image:
+ type: string
+ flavor:
+ type: string
+ openstack_config:
+ type: map
+ entry_schema: string
+
+ node_templates:
+ network:
+ type: aria.openstack.nodes.Network
+ properties:
+ resource_id: aria_helloworld_network
+ create_if_missing: true
+ openstack_config: { get_input: openstack_config }
+
+ router:
+ type: aria.openstack.nodes.Router
+ properties:
+ external_network: { get_input: external_network_name }
+ create_if_missing: true
+ resource_id: aria_helloworld_rtr
+ openstack_config: { get_input: openstack_config }
+
+ subnet:
+ type: aria.openstack.nodes.Subnet
+ properties:
+ resource_id: aria_helloworld_subnet
+ create_if_missing: true
+ openstack_config: { get_input: openstack_config }
+ requirements:
+ - router: router
+ - network: network
+
+ port:
+ type: aria.openstack.nodes.Port
+ properties:
+ create_if_missing: true
+ resource_id: aria_helloworld_port
+ openstack_config: { get_input: openstack_config }
+ requirements:
+ - security_group: security_group
+ - subnet: subnet
+ - network: network
+
+ virtual_ip:
+ type: aria.openstack.nodes.FloatingIP
+ properties:
+ resource_id: aria_helloworld_floatingip
+ create_if_missing: true
+ openstack_config: { get_input: openstack_config }
+ floatingip:
+ floating_network_name: { get_input: external_network_name }
+
+ security_group:
+ type: aria.openstack.nodes.SecurityGroup
+ properties:
+ create_if_missing: true
+ resource_id: aria_helloworld_sg
+ openstack_config: { get_input: openstack_config }
+ rules:
+ - remote_ip_prefix: 0.0.0.0/0
+ port: { get_input: webserver_port }
+ - port: 22
+ remote_ip_prefix: 0.0.0.0/0
+
+ keypair:
+ type: aria.openstack.nodes.KeyPair
+ properties:
+ create_if_missing: true
+ resource_id: aria_helloworld_kp
+ private_key_path: { get_input: private_key_path }
+ openstack_config: { get_input: openstack_config }
+
+ vm:
+ type: aria.openstack.nodes.Server
+ properties:
+ image: { get_input: image }
+ flavor: { get_input: flavor }
+ create_if_missing: true
+ resource_id: aria_helloworld_vm
+ management_network_name: aria_helloworld_network
+ openstack_config: { get_input: openstack_config }
+ requirements:
+ - floating_ip: virtual_ip
+ - security_group: security_group
+ - key_pair: keypair
+ - port: port
+
+ web_app:
+ type: web_app
+ properties:
+ port: { get_input: webserver_port }
+ requirements:
+ - host: vm
+ interfaces:
+ Standard:
+ configure:
+ implementation:
+ primary: scripts/configure.sh
+ dependencies:
+ - "ssh.user > { get_input: ssh_username }"
+ - "ssh.key_filename > { get_input: private_key_path }"
+ - "ssh.address > { get_attribute: [ virtual_ip, floating_ip_address ] }"
+ start:
+ implementation:
+ primary: scripts/start.sh
+ dependencies:
+ - "ssh.user > { get_input: ssh_username }"
+ - "ssh.key_filename > { get_input: private_key_path }"
+ - "ssh.address > { get_attribute: [ virtual_ip, floating_ip_address ] }"
+ stop:
+ implementation:
+ primary: scripts/stop.sh
+ dependencies:
+ - "ssh.user > { get_input: ssh_username }"
+ - "ssh.key_filename > { get_input: private_key_path }"
+ - "ssh.address > { get_attribute: [ virtual_ip, floating_ip_address ] }"
diff --git a/azure/aria/aria-extension-cloudify/examples/openstack-hello-world/scripts/configure.sh b/azure/aria/aria-extension-cloudify/examples/openstack-hello-world/scripts/configure.sh
new file mode 100644
index 0000000..400ae71
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/examples/openstack-hello-world/scripts/configure.sh
@@ -0,0 +1,20 @@
+#!/bin/bash
+
+TEMP_DIR=/tmp
+PYTHON_FILE_SERVER_ROOT="$TEMP_DIR/python-simple-http-webserver"
+INDEX_PATH=index.html
+IMAGE_PATH=images/aria-logo.png
+
+if [ -d "$PYTHON_FILE_SERVER_ROOT" ]; then
+ ctx logger info [ "Removing old web server root folder: $PYTHON_FILE_SERVER_ROOT." ]
+ rm -rf "$PYTHON_FILE_SERVER_ROOT"
+fi
+
+ctx logger info [ "Creating web server root folder: $PYTHON_FILE_SERVER_ROOT." ]
+
+mkdir -p "$PYTHON_FILE_SERVER_ROOT"
+cd "$PYTHON_FILE_SERVER_ROOT"
+
+ctx logger info [ "Downloading service template resources..." ]
+ctx download-resource-and-render [ "$PYTHON_FILE_SERVER_ROOT/index.html" "$INDEX_PATH" ]
+ctx download-resource [ "$PYTHON_FILE_SERVER_ROOT/aria-logo.png" "$IMAGE_PATH" ]
diff --git a/azure/aria/aria-extension-cloudify/examples/openstack-hello-world/scripts/start.sh b/azure/aria/aria-extension-cloudify/examples/openstack-hello-world/scripts/start.sh
new file mode 100644
index 0000000..7471cfc
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/examples/openstack-hello-world/scripts/start.sh
@@ -0,0 +1,50 @@
+#!/bin/bash
+
+set -e
+
+TEMP_DIR=/tmp
+PYTHON_FILE_SERVER_ROOT="$TEMP_DIR/python-simple-http-webserver"
+PID_FILE=server.pid
+PORT=$(ctx node properties port)
+URL="http://localhost:$PORT"
+
+ctx logger info [ "Starting web server at: $PYTHON_FILE_SERVER_ROOT." ]
+
+cd "$PYTHON_FILE_SERVER_ROOT"
+nohup python -m SimpleHTTPServer "$PORT" > /dev/null 2>&1 &
+echo $! > "$PID_FILE"
+
+server_is_up() {
+ if which wget >/dev/null; then
+ if wget "$URL" >/dev/null; then
+ return 0
+ fi
+ elif which curl >/dev/null; then
+ if curl "$URL" >/dev/null; then
+ return 0
+ fi
+ else
+ ctx logger error [ "Both curl and wget were not found in path." ]
+ exit 1
+ fi
+ return 1
+}
+
+ctx logger info [ "Waiting for web server to launch on port $PORT..." ]
+STARTED=false
+for i in $(seq 1 15)
+do
+ if server_is_up; then
+ ctx logger info [ "Web server is up." ]
+ STARTED=true
+ break
+ else
+ ctx logger info [ "Web server not up. waiting 1 second." ]
+ sleep 1
+ fi
+done
+
+if [ "$STARTED" = false ]; then
+ ctx logger error [ "Web server did not start within 15 seconds." ]
+ exit 1
+fi
diff --git a/azure/aria/aria-extension-cloudify/examples/openstack-hello-world/scripts/stop.sh b/azure/aria/aria-extension-cloudify/examples/openstack-hello-world/scripts/stop.sh
new file mode 100644
index 0000000..9f3bfc5
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/examples/openstack-hello-world/scripts/stop.sh
@@ -0,0 +1,14 @@
+#!/bin/bash
+
+set -e
+
+TEMP_DIR=/tmp
+PYTHON_FILE_SERVER_ROOT="${TEMP_DIR}/python-simple-http-webserver"
+PID_FILE=server.pid
+PID=$(cat "$PYTHON_FILE_SERVER_ROOT/$PID_FILE")
+
+ctx logger info [ "Shutting down web server, pid = ${PID}." ]
+kill -9 "$PID" || exit $?
+
+ctx logger info [ "Removing web server root folder: $PYTHON_FILE_SERVER_ROOT." ]
+rm -rf "$PYTHON_FILE_SERVER_ROOT"
diff --git a/azure/aria/aria-extension-cloudify/plugins/aws/plugin.yaml b/azure/aria/aria-extension-cloudify/plugins/aws/plugin.yaml
new file mode 100644
index 0000000..5912d23
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/plugins/aws/plugin.yaml
@@ -0,0 +1,1754 @@
+#
+# Copyright (c) 2017 GigaSpaces Technologies Ltd. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+
+tosca_definitions_version: tosca_simple_yaml_1_0
+
+topology_template:
+ policies:
+ cloudify-aws-plugin:
+ description: >-
+ aws plugin executes operations.
+ type: aria.Plugin
+ properties:
+ version: 1.4.10
+
+
+data_types:
+ aria.aws.datatypes.Config:
+ properties:
+ # Partially based on: http://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html
+ aws_access_key_id:
+ description: >
+ The ID of your AWS ACCESS KEY.
+ type: string
+ required: false
+ aws_secret_access_key:
+ description: >
+ The ID of your AWS SECRET KEY.
+ type: string
+ required: false
+ region:
+ description: >
+ This is for backward compatibility with version 1.2.
+ type: string
+ required: false
+ ec2_region_name:
+ description: >
+ The EC2 Region RegionName, such as us-east-1.
+ (Not us-east-1b, which is an availability zone, or US East, which is a Region.)
+ type: string
+ required: false
+ ec2_region_endpoint:
+ description: >
+ The endpoint for the given region.
+ type: string
+ required: false
+ elb_region_name:
+ description: >
+ The ELB Region RegionName, such as us-east-1.
+ (Not us-east-1b, which is an availability zone, or US East, which is a Region.)
+ Required for aws_config for node type aria.aws.nodes.ElasticLoadBalancer.
+ type: string
+ required: false
+ elb_region_endpoint:
+ description: >
+ The endpoint for the given ELB region.
+ type: string
+ required: false
+
+ aria.aws.datatypes.Route:
+ properties:
+ # Based on: http://docs.aws.amazon.com/cli/latest/reference/ec2/create-route.html
+ route_table_id:
+ description: >
+ In most cases, leave this blank, because the route table is implicit from the node or
+ relationship that is creating the route.
+ type: string
+ required: false
+ destination_cidr_block:
+ description: >
+ This is the cidr_block that you want to route traffic for to the device.
+ type: string
+ gateway_id:
+ description: >
+ The id of the gateway (either internet gateway, customer gateway, or vpn gateway).
+ type: string
+ required: false
+ instance_id:
+ description: >
+ The id of the instance (if you are routing to a NAT instance).
+ type: string
+ required: false
+ interface_id:
+ description: >
+ The id of an attached network interface.
+ type: string
+ required: false
+ vpc_peering_connection_id:
+ description: >
+ The id of a VPC peering connection.
+ type: string
+ required: false
+
+ aria.aws.datatypes.NetworkAclEntry:
+ # Based on: http://docs.aws.amazon.com/cli/latest/reference/ec2/create-network-acl-entry.html
+ properties:
+ rule_number:
+ description: >
+ Some number to identify this rule. Cannot duplicate an existing rule number.
+ type: integer
+ protocol:
+ description: >
+ The Assigned Internet Protocol Number for the protocol (e.g. 1 is ICMP, 6 is TCP, and 17 is UDP).
+ type: integer
+ rule_action:
+ description: Either ALLOW or DENY.
+ type: string
+ constraints:
+ - valid_values: [ ALLOW, DENY ]
+ cidr_block:
+ description: >
+ The cidr_block.
+ type: string
+ egress:
+ description: >
+ Whether the rule applies to egress traffic from the subnet.
+ type: boolean
+ default: false
+ required: false
+ icmp_type:
+ description: >
+ If in protocol you chose 1 for ICMP, the ICMP type, -1 for all ICMP types.
+ type: integer
+ required: false
+ icmp_code:
+ description: >
+ If in protocol you chose 1 for ICMP, the ICMP code, -1 for all ICMP codes.
+ type: integer
+ required: false
+ port_range_from:
+ description: >
+ The first port in the range.
+ type: integer
+ constraints:
+ - in_range: [ 1, 65535 ]
+ port_range_to:
+ description: >
+ The last port in the range.
+ type: integer
+ constraints:
+ - in_range: [ 1, 65535 ]
+
+ aria.aws.datatypes.SecurityGroupRule:
+ # Based on: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-security-group-rule.html
+ properties:
+ egress:
+ description: >
+ Whether the rule applies to egress traffic.
+ type: boolean
+ default: false
+ required: false
+ ip_protocol:
+ description: >
+ The Assigned Internet Protocol Number for the protocol.
+ type: string
+ required: false
+ from_port:
+ description: >
+ The first port in the range.
+ type: integer
+ required: false
+ constraints:
+ - in_range: [ 1, 65535 ]
+ to_port:
+ description: >
+ The last port in the range.
+ type: integer
+ required: false
+ constraints:
+ - in_range: [ 1, 65535 ]
+ cidr_ip:
+ description: >
+ The cidr_block.
+ type: string
+ required: false
+ src_group_id:
+ description: >
+ The security group ID.
+ type: string
+ required: false
+
+ aria.aws.datatypes.BlockDeviceMapping:
+ derived_from: tosca.datatypes.Root
+ properties:
+ # Based on: http://docs.aws.amazon.com/cli/latest/reference/ec2/run-instances.html#options
+ virtual_name:
+ type: string
+ required: false
+ device_name:
+ type: string
+ required: false
+ ebs:
+ type: aria.aws.datatypes.Ebs
+ required: false
+ no_device:
+ type: string
+ required: false
+
+ aria.aws.datatypes.Ebs:
+ derived_from: tosca.datatypes.Root
+ properties:
+ # Based on: http://docs.aws.amazon.com/cli/latest/reference/ec2/run-instances.html#options
+ snapshot_id:
+ type: string
+ required: false
+ volume_size:
+ type: integer
+ required: false
+ delete_on_termination:
+ type: boolean
+ required: false
+ volume_type:
+ type: string
+ required: false
+ constraints:
+ - valid_values: [ standard, io1, gp2, sc1, st1]
+ required: false
+ iops:
+ type: integer
+ required: false
+ encrypted:
+ type: boolean
+ required: false
+
+ aria.aws.datatypes.NetworkInterfacePrivateIPAddress:
+ # Based on: http://docs.aws.amazon.com/cli/latest/reference/ec2/create-network-interface.html
+ # Specifically, look under --private-ip-addresses, and notice the differences from
+ # --private-ip-address.
+ derived_from: tosca.datatypes.Root
+ properties:
+ private_ip_address:
+ type: string
+ required: false
+ primary:
+ type: boolean
+ required: false
+
+ aria.aws.datatypes.NetworkInterface:
+ # Based on: http://docs.aws.amazon.com/cli/latest/reference/ec2/create-network-interface.html
+ derived_from: tosca.datatypes.Root
+ properties:
+ description:
+ type: string
+ required: false
+ dry_run:
+ type: boolean
+ required: false
+ groups:
+ type: list
+ entry_schema:
+ type: string
+ required: false
+ ipv6_address_count:
+ type: integer
+ required: false
+ ipv6_addresses:
+ type: map
+ entry_schema:
+ type: string
+ required: false
+ private_ip_address:
+ type: string
+ required: false
+ private_ip_addresses:
+ type: map
+ entry_schema:
+ type: aria.aws.datatypes.NetworkInterfacePrivateIPAddress
+ required: false
+ secondary_private_ip_address_count:
+ type: integer
+ required: false
+ subnet_id:
+ type: string
+ required: false
+ cli_input_json:
+ type: string
+ required: false
+ generate_cli_skeleton:
+ type: string
+ required: false
+
+ aria.aws.datatypes.RunInstancesParameters:
+ derived_from: tosca.datatypes.Root
+ properties:
+ # These properties were derived from the parameters of boto.e2c.connection.run_instances:
+ # https://github.com/boto/boto/blob/master/boto/ec2/connection.py#L738
+ # In the corresponding aws documentation,
+ # http://docs.aws.amazon.com/cli/latest/reference/ec2/run-instances.html,
+ # The properties 'placement', 'placement_group' and 'tenancy' of the boto api are part of a
+ # structure called 'placement', in addition to 'affinity' and 'host_id' which do not exist
+ # in the boto api.
+ image_id:
+ type: string
+ required: false
+ min_count:
+ type: integer
+ required: false
+ max_count:
+ type: integer
+ required: false
+ key_name:
+ type: string
+ required: false
+ security_groups:
+ type: list
+ entry_schema:
+ type: string
+ required: false
+ user_data:
+ type: string
+ required: false
+ addressing_type:
+ type: string
+ required: false
+ instance_type:
+ type: string
+ required: false
+ placement:
+ type: string
+ required: false
+ kernel_id:
+ type: string
+ required: false
+ ramdisk_id:
+ type: string
+ required: false
+ monitoring_enabled:
+ type: boolean
+ required: false
+ subnet_id:
+ type: string
+ required: false
+ block_device_map:
+ type: list
+ entry_schema:
+ type: aria.aws.datatypes.BlockDeviceMapping
+ required: false
+ disable_api_termination:
+ type: boolean
+ required: false
+ instance_initiated_shutdown_behavior:
+ type: string
+ constraints:
+ - valid_values: [ stop, terminate ]
+ required: false
+ private_id_address:
+ type: string
+ required: false
+ placement_group:
+ type: string
+ required: false
+ client_token:
+ type: string
+ required: false
+ security_group_ids:
+ type: list
+ entry_schema:
+ type: string
+ required: false
+ additional_info:
+ type: string
+ required: false
+ instance_profile_name:
+ type: string
+ required: false
+ instance_profile_arn:
+ type: string
+ required: false
+ tenancy:
+ type: string
+ required: false
+ constraints:
+ - valid_values: [ default, dedicated]
+ ebs_optimized:
+ type: boolean
+ required: false
+ network_interfaces:
+ type: list
+ entry_schema:
+ type: aria.aws.datatypes.NetworkInterface
+ required: false
+ dry_run:
+ type: boolean
+ required: false
+
+ aria.aws.datatypes.LoadBalancerListener:
+ # According to the description of the 'listeners' property of aria.aws.node.LoadBalancer
+ derived_from: tosca.datatypes.Root
+ properties:
+ LoadBalancerPortNumber:
+ type: integer
+ constraints:
+ - in_range: [ 1, 65535 ]
+ InstancePortNumber:
+ type: integer
+ constraints:
+ - in_range: [ 1, 65535 ]
+ protocol:
+ type: string
+ constraints:
+ - valid_values: [ tcp, ssl, http, https ]
+ SSLCertificateID:
+ type: string
+ required: false
+
+ aria.aws.datatypes.LoadBalancerComplexListener:
+ # According to the description of the 'complex_listeners' property of aria.aws.node.LoadBalancer
+ derived_from: aria.aws.datatypes.LoadBalancerListener
+ properties:
+ InstanceProtocol:
+ type: integer
+ constraints:
+ - in_range: [ 1, 65535 ]
+
+ aria.aws.datatypes.LoadBalancerHealthCheck:
+ # Based on: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-health-check.html
+ derived_from: tosca.datatypes.Root
+ properties:
+ healthy_threshold:
+ type: string
+ required: false
+ interval:
+ type: string
+ required: false
+ target:
+ type: string
+ required: false
+ timeout:
+ type: string
+ required: false
+ unhealthy_threshold:
+ type: string
+ required: false
+
+ aria.aws.datatypes.NetworkInterfaceCreateParameters:
+ # These properties were derived from the parameters of boto.e2c.connection.create_network_interface
+ # https://github.com/boto/boto/blob/master/boto/ec2/connection.py#L4286, that are based on:
+ # http://docs.aws.amazon.com/cli/latest/reference/ec2/create-network-interface.html
+ derived_from: tosca.datatypes.Root
+ properties:
+ subnet_id:
+ type: string
+ required: false
+ private_ip_address:
+ type: string
+ required: false
+ description:
+ type: string
+ required: false
+ groups:
+ type: list
+ entry_schema:
+ type: string
+ required: false
+ dry_run:
+ type: boolean
+ required: false
+
+ aria.aws.datatypes.VolumeCreateParameters:
+ # Based on http://docs.aws.amazon.com/cli/latest/reference/ec2/create-volume.html#synopsis
+ derived_from: tosca.datatypes.Root
+ properties:
+ size:
+ type: integer
+ required: false
+ zone:
+ type: string
+ required: false
+ snapshot:
+ type: string
+ required: false
+ volume_type:
+ type: string
+ required: false
+ iops:
+ type: integer
+ required: false
+ encrypted:
+ type: boolean
+ required: false
+ kms_key_id:
+ type: string
+ required: false
+ dry_run:
+ type: boolean
+ required: false
+
+ aria.aws.datatypes.VolumeDeleteParameters:
+ # Based on: http://docs.aws.amazon.com/cli/latest/reference/ec2/delete-volume.html
+ derived_from: tosca.datatypes.Root
+ properties:
+ volume_id:
+ type: string
+ required: false
+ dry_run:
+ type: boolean
+ required: false
+
+interface_types:
+ aria.aws.interfaces.Validation:
+ derived_from: tosca.interfaces.Root
+ creation:
+ description: >
+ creation operation for the aws validation interface
+ aria.aws.interfaces.Snapshot:
+ derived_from: tosca.interfaces.Root
+ create:
+ description: >
+ creation operation for the aws snapshot interface
+
+
+node_types:
+ aria.aws.nodes.Instance:
+ derived_from: tosca.nodes.Compute
+ properties:
+ use_external_resource:
+ description: >
+ Indicate whether the resource exists or it should be created,
+ true if you are bringing an existing resource, false if you want to create it.
+ type: boolean
+ default: false
+ resource_id:
+ description: >
+ The AWS resource ID of the external resource, if use_external_resource is true.
+ Otherwise it is an empty string.
+ type: string
+ default: ''
+ tags:
+ description: >
+ A dictionary of key/value pairs of tags you want to add.
+ type: map
+ default: {}
+ entry_schema:
+ type: string # http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html
+ name:
+ description: >
+ Optional field if you want to add a specific name to the instance.
+ type: string
+ default: ''
+ required: false
+ image_id:
+ description: >
+ The ID of the AMI image in your Amazon account.
+ type: string
+ instance_type:
+ description: >
+ The instance's size.
+ type: string
+ use_password:
+ type: boolean
+ default: false
+ parameters:
+ description: >
+ The key value pair parameters allowed by Amazon API to the
+ ec2.connection.EC2Connection.run_instances command. It should be mentioned that
+ although this field is listed as optional, A non-trivial use case requires
+ that both the key_name parameter and the security_groups parameter be specified.
+ type: aria.aws.datatypes.RunInstancesParameters
+ default: {}
+ required: false
+ aws_config:
+ description: >
+ A dictionary of values to pass to authenticate with the AWS API.
+ type: aria.aws.datatypes.Config
+ required: false
+ attributes:
+ public_ip_address:
+ type: string
+ interfaces:
+ Standard:
+ create:
+ implementation: cloudify-aws-plugin > cloudify_aws.ec2.instance.create
+ start:
+ implementation: cloudify-aws-plugin > cloudify_aws.ec2.instance.start
+ inputs:
+ start_retry_interval:
+ description: Polling interval until the server is active in seconds
+ type: integer
+ default: 30
+ private_key_path:
+ description: >
+ Path to private key which matches the server's
+ public key. Will be used to decrypt password in case
+ the "use_password" property is set to "true"
+ type: string
+ default: ''
+ stop:
+ implementation: cloudify-aws-plugin > cloudify_aws.ec2.instance.stop
+ delete:
+ implementation: cloudify-aws-plugin > cloudify_aws.ec2.instance.delete
+ Validation:
+ type: aria.aws.interfaces.Validation
+ creation:
+ implementation: cloudify-aws-plugin > cloudify_aws.ec2.instance.creation_validation
+ requirements:
+ - elastic_ip:
+ capability: tosca.capabilities.Node
+ node: aria.aws.nodes.ElasticIP
+ relationship: aria.aws.relationships.InstanceConnectedToElasticIP
+ occurrences: [ 0, UNBOUNDED ]
+ - keypair:
+ capability: tosca.capabilities.Node
+ node: aria.aws.nodes.KeyPair
+ relationship: aria.aws.relationships.InstanceConnectedToKeypair
+ occurrences: [ 0, UNBOUNDED ]
+ - security_group:
+ capability: tosca.capabilities.Node
+ node: aria.aws.nodes.SecurityGroup
+ relationship: aria.aws.relationships.instance_connected_to_security_group
+ occurrences: [ 0, UNBOUNDED ]
+ - load_balancer:
+ capability: tosca.capabilities.Node
+ node: aria.aws.nodes.ElasticLoadBalancer
+ relationship: aria.aws.relationships.InstanceConnectedToLoadBalancer
+ occurrences: [ 0, UNBOUNDED ]
+ - subnet_to_be_contained_in:
+ capability: tosca.capabilities.Node
+ node: aria.aws.nodes.Subnet
+ relationship: aria.aws.relationships.InstanceContainedInSubnet
+ occurrences: [ 0, UNBOUNDED ]
+ - subnet_to_connect_to:
+ capability: tosca.capabilities.Node
+ node: aria.aws.nodes.Subnet
+ relationship: aria.aws.relationships.InstanceConnectedToSubnet
+ occurrences: [ 0, UNBOUNDED ]
+ - eni:
+ capability: tosca.capabilities.Root
+ node: aria.aws.nodes.Interface
+ relationship: aria.aws.relationships.InstanceConnectedToENI
+ occurrences: [ 0, UNBOUNDED ]
+
+ aria.aws.nodes.WindowsInstance:
+ derived_from: aria.aws.nodes.Instance
+ properties:
+ use_password:
+ type: boolean
+ default: true
+ os_family:
+ type: string
+ default: windows
+
+ aria.aws.nodes.ElasticIP:
+ derived_from: tosca.nodes.Root
+ properties:
+ use_external_resource:
+ description: >
+ Indicate whether the resource exists or it should be created,
+ true if you are bringing an existing resource, false if you want to create it.
+ type: boolean
+ default: false
+ resource_id:
+ description: >
+ The AWS resource ID of the external resource, if use_external_resource is true.
+ Otherwise it is an empty string.
+ type: string
+ default: ''
+ tags:
+ description: >
+ A dictionary of key/value pairs of tags you want to add.
+ type: map
+ default: {}
+ entry_schema:
+ type: string # http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html
+ domain:
+ description: >
+ Set this to 'vpc' if you want to use VPC.
+ type: string
+ required: false
+ aws_config:
+ description: >
+ A dictionary of values to pass to authenticate with the AWS API.
+ type: aria.aws.datatypes.Config
+ required: false
+ interfaces:
+ Standard:
+ create:
+ implementation: cloudify-aws-plugin > cloudify_aws.ec2.elasticip.create
+ delete:
+ implementation: cloudify-aws-plugin > cloudify_aws.ec2.elasticip.delete
+ Validation:
+ type: aria.aws.interfaces.Validation
+ creation:
+ implementation: cloudify-aws-plugin > cloudify_aws.ec2.elasticip.creation_validation
+
+ aria.aws.nodes.SecurityGroup:
+ derived_from: tosca.nodes.Root
+ properties:
+ use_external_resource:
+ description: >
+ Indicate whether the resource exists or it should be created,
+ true if you are bringing an existing resource, false if you want to create it.
+ type: boolean
+ default: false
+ resource_id:
+ description: >
+ The AWS resource ID of the external resource, if use_external_resource is true.
+ Otherwise it is an empty string.
+ type: string
+ default: ''
+ tags:
+ description: >
+ A dictionary of key/value pairs of tags you want to add.
+ type: map
+ default: {}
+ entry_schema:
+ type: string # http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html
+ description:
+ description: >
+ The description field that is required for every security group that you create
+ in Amazon.
+ type: string
+ rules:
+ description: >
+ You need to pass in either src_group_id (security group ID) OR cidr_ip,
+ and then the following three: ip_protocol, from_port and to_port.
+ type: list
+ entry_schema:
+ type: aria.aws.datatypes.SecurityGroupRule
+ default: []
+ aws_config:
+ description: >
+ A dictionary of values to pass to authenticate with the AWS API.
+ type: aria.aws.datatypes.Config
+ required: false
+ interfaces:
+ Standard:
+ create:
+ implementation: cloudify-aws-plugin > cloudify_aws.ec2.securitygroup.create
+ start:
+ implementation: cloudify-aws-plugin > cloudify_aws.ec2.securitygroup.start
+ delete:
+ implementation: cloudify-aws-plugin > cloudify_aws.ec2.securitygroup.delete
+ Validation:
+ type: aria.aws.interfaces.Validation
+ creation:
+ implementation: cloudify-aws-plugin > cloudify_aws.ec2.securitygroup.creation_validation
+ requirements:
+ - vpc:
+ capability: tosca.capabilities.Node
+ node: aria.aws.nodes.VPC
+ relationship: aria.aws.relationships.SecurityGroupContainedInVPC
+ occurrences: [ 0, UNBOUNDED ]
+ - security_group_rule:
+ capability: tosca.capabilities.Node
+ node: aria.aws.nodes.SecurityGroupRule
+ relationship: aria.aws.relationships.SecurityGroupUsesRule
+ occurrences: [ 0, UNBOUNDED ]
+
+ aria.aws.nodes.Volume:
+ derived_from: tosca.nodes.Root
+ properties:
+ use_external_resource:
+ description: >
+ Indicate whether the resource exists or it should be created,
+ true if you are bringing an existing resource, false if you want to create it.
+ type: boolean
+ default: false
+ resource_id:
+ description: >
+ The AWS resource ID of the external resource, if use_external_resource is true.
+ Otherwise it is an empty string.
+ type: string
+ default: ''
+ tags:
+ description: >
+ A dictionary of key/value pairs of tags you want to add.
+ type: map
+ default: {}
+ entry_schema:
+ type: string # http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html
+ size:
+ description: >
+ The size of the volume in GB.
+ type: string
+ zone:
+ description: >
+ A string representing the AWS availability zone.
+ type: string
+ device:
+ description: >
+ The device on the instance
+ type: string
+ aws_config:
+ description: >
+ A dictionary of values to pass to authenticate with the AWS API.
+ type: aria.aws.datatypes.Config
+ required: false
+ interfaces:
+ Standard:
+ create:
+ implementation: cloudify-aws-plugin > cloudify_aws.ec2.ebs.create
+ inputs:
+ args:
+ type: map
+ entry_schema:
+ type: aria.aws.datatypes.VolumeCreateParameters
+ default: {}
+ start:
+ implementation: cloudify-aws-plugin > cloudify_aws.ec2.ebs.start
+ delete:
+ implementation: cloudify-aws-plugin > cloudify_aws.ec2.ebs.delete
+ inputs:
+ args:
+ type: map
+ entry_schema:
+ type: aria.aws.datatypes.VolumeDeleteParameters
+ default: {}
+ Validation:
+ type: aria.aws.interfaces.Validation
+ creation:
+ implementation: cloudify-aws-plugin > cloudify_aws.ec2.ebs.creation_validation
+ Snapshot:
+ type: aria.aws.interfaces.Snapshot
+ create:
+ implementation: cloudify-aws-plugin > cloudify_aws.ec2.ebs.create_snapshot
+ inputs:
+ args:
+ type: map
+ entry_schema:
+ type: string
+ default: {}
+ requirements:
+ - instance:
+ capability: tosca.capabilities.Node
+ node: aria.aws.nodes.Instance
+ relationship: aria.aws.relationships.VolumeConnectedToInstance
+ occurrences: [ 0, UNBOUNDED ]
+
+ aria.aws.nodes.KeyPair:
+ derived_from: tosca.nodes.Root
+ properties:
+ use_external_resource:
+ description: >
+ Indicate whether the resource exists or if the resource should be created.
+ type: boolean
+ default: false
+ resource_id:
+ description: >
+ The AWS resource ID of the external resource, if use_external_resource is true.
+ If use_external_resource is false, this will be the keys name and ID in AWS.
+ If left blank, the plugin will set a name for you.
+ Otherwise it is an empty string.
+ type: string
+ default: ''
+ tags:
+ description: >
+ A dictionary of key/value pairs of tags you want to add.
+ type: map
+ default: {}
+ entry_schema:
+ type: string # http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html
+ private_key_path:
+ description: >
+ The path where the key should be saved on the machine. If this is a bootstrap
+ process, this refers to the local computer. If this will run on the manager,
+ this will be saved on the manager.
+ type: string
+ aws_config:
+ description: >
+ A dictionary of values to pass to authenticate with the AWS API.
+ type: aria.aws.datatypes.Config
+ required: false
+ attributes:
+ aws_resource_id:
+ type: string
+ interfaces:
+ Standard:
+ create:
+ implementation: cloudify-aws-plugin > cloudify_aws.ec2.keypair.create
+ delete:
+ implementation: cloudify-aws-plugin > cloudify_aws.ec2.keypair.delete
+ Validation:
+ type: aria.aws.interfaces.Validation
+ creation:
+ implementation: cloudify-aws-plugin > cloudify_aws.ec2.keypair.creation_validation
+
+ aria.aws.nodes.ElasticLoadBalancer:
+ derived_from: tosca.nodes.LoadBalancer
+ properties:
+ use_external_resource:
+ description: >
+ Indicate whether the resource exists or it should be created,
+ true if you are bringing an existing resource, false if you want to create it.
+ type: boolean
+ default: false
+ resource_id:
+ description: >
+ The AWS resource ID of the external resource, if use_external_resource is true.
+ Otherwise it is an empty string.
+ type: string
+ default: ''
+ tags:
+ description: >
+ A dictionary of key/value pairs of tags you want to add.
+ type: map
+ default: {}
+ entry_schema:
+ type: string # http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html
+ elb_name:
+ description: >
+ The mnemonic name associated with the new load balancer
+ type: string
+ zones:
+ description: >
+ zones (List of strings) - The names of the availability zone(s) to add.
+ example: ['us-east-1b','us-east-1b']
+ type: string
+ security_groups:
+ description: >
+ security_groups (list of strings) - The security groups assigned to your LoadBalancer
+ within your VPC.
+ example: ['sg-123456','sg-7891011']
+ FYI: security groups only supported with vpc
+ type: list
+ entry_schema:
+ type: string
+ default: []
+ required: false
+ listeners:
+ description: >
+ listeners (List of tuples) - Each tuple contains three or four values, (LoadBalancerPortNumber,
+ InstancePortNumber, Protocol, [SSLCertificateId]) where LoadBalancerPortNumber and
+ InstancePortNumber are integer values between 1 and 65535, Protocol is a string containing
+ either 'TCP', 'SSL', 'HTTP', or 'HTTPS'; SSLCertificateID is the ARN of a AWS IAM certificate,
+ and must be specified when doing HTTPS.
+ example: [[80, 8080, 'http'], [443, 8443, 'tcp']]
+ type: list
+ entry_schema:
+ type: aria.aws.datatypes.LoadBalancerListener
+ health_checks:
+ description: >
+ list of healthchecks (dicts) to use as criteria for instance health
+ example: [{'target': 'HTTP:8080/health'}, {'target': 'HTTP:80/alive'}]
+ type: list
+ entry_schema:
+ type: aria.aws.datatypes.LoadBalancerHealthCheck
+ default: []
+ required: false
+ scheme:
+ description: >
+ The type of a LoadBalancer. By default, Elastic Load Balancing creates an internet-facing
+ LoadBalancer with a publicly resolvable DNS name, which resolves to public IP addresses.
+ Specify the value internal for this option to create an internal LoadBalancer with a DNS
+ name that resolves to private IP addresses.
+ This option is only available for LoadBalancers attached to an Amazon VPC.
+ type: string
+ default: ''
+ required: false
+ subnets:
+ description: >
+ list of strings - A list of subnet IDs in your VPC to attach to your LoadBalancer.
+ example:
+ type: list
+ entry_schema:
+ type: string
+ default: []
+ required: false
+ complex_listeners:
+ description: >
+ List of tuples - Each tuple contains four or five values, (LoadBalancerPortNumber,
+ InstancePortNumber, Protocol, InstanceProtocol, SSLCertificateId).
+ Where:
+ LoadBalancerPortNumber and InstancePortNumber are integer values between 1 and 65535
+ Protocol and InstanceProtocol is a string containing either 'TCP', 'SSL', 'HTTP', or 'HTTPS'
+ SSLCertificateId is the ARN of an SSL certificate loaded into AWS IAM
+ type: list
+ entry_schema:
+ type: aria.aws.datatypes.LoadBalancerComplexListener
+ default: []
+ required: false
+ aws_config:
+ description: >
+ A dictionary of values to pass to authenticate with the AWS API.
+ type: aria.aws.datatypes.Config
+ required: false
+ interfaces:
+ Standard:
+ create:
+ implementation: cloudify-aws-plugin > cloudify_aws.ec2.elasticloadbalancer.create
+ start:
+ implementation: cloudify-aws-plugin > cloudify_aws.ec2.elasticloadbalancer.start
+ delete:
+ implementation: cloudify-aws-plugin > cloudify_aws.ec2.elasticloadbalancer.delete
+ Validation:
+ type: aria.aws.interfaces.Validation
+ creation:
+ implementation: cloudify-aws-plugin > cloudify_aws.ec2.elasticloadbalancer.creation_validation
+
+ aria.aws.nodes.VPC:
+ derived_from: tosca.nodes.network.Network
+ properties:
+ use_external_resource:
+ description: >
+ Indicate whether the resource exists or it should be created,
+ true if you are bringing an existing resource, false if you want to create it.
+ type: boolean
+ default: false
+ resource_id:
+ description: >
+ The AWS resource ID of the external resource, if use_external_resource is true.
+ Otherwise it is an empty string.
+ type: string
+ default: ''
+ tags:
+ description: >
+ A dictionary of key/value pairs of tags you want to add.
+ type: map
+ default: {}
+ entry_schema:
+ type: string # http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html
+ cidr_block:
+ description: >
+ The CIDR Block that you will split this VPCs subnets across.
+ type: string
+ instance_tenancy:
+ description: >
+ Default or dedicated.
+ type: string
+ default: default
+ required: false
+ aws_config:
+ description: >
+ A dictionary of values to pass to authenticate with the AWS API.
+ type: aria.aws.datatypes.Config
+ required: false
+ interfaces:
+ Standard:
+ create:
+ implementation: cloudify-aws-plugin > cloudify_aws.vpc.vpc.create_vpc
+ start:
+ implementation: cloudify-aws-plugin > cloudify_aws.vpc.vpc.start
+ delete:
+ implementation: cloudify-aws-plugin > cloudify_aws.vpc.vpc.delete
+ Validation:
+ type: aria.aws.interfaces.Validation
+ creation:
+ implementation: cloudify-aws-plugin > cloudify_aws.vpc.vpc.creation_validation
+ requirements:
+ - vpc:
+ capability: tosca.capabilities.Node
+ node: aria.aws.nodes.VPC
+ relationship: aria.aws.relationships.RouteTableOfSourceVPCConnectedToTargetPeerVPC
+ occurrences: [ 0, UNBOUNDED ]
+
+ aria.aws.nodes.Subnet:
+ derived_from: tosca.nodes.Root
+ properties:
+ use_external_resource:
+ description: >
+ Indicate whether the resource exists or it should be created,
+ true if you are bringing an existing resource, false if you want to create it.
+ type: boolean
+ default: false
+ resource_id:
+ description: >
+ The AWS resource ID of the external resource, if use_external_resource is true.
+ Otherwise it is an empty string.
+ type: string
+ default: ''
+ tags:
+ description: >
+ A dictionary of key/value pairs of tags you want to add.
+ type: map
+ default: {}
+ entry_schema:
+ type: string # http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html
+ cidr_block:
+ description: >
+ The CIDR Block that instances will be on.
+ type: string
+ availability_zone:
+ description: >
+ The availability zone that you want your subnet in.
+ type: string
+ default: ''
+ required: false
+ aws_config:
+ description: >
+ A dictionary of values to pass to authenticate with the AWS API.
+ type: aria.aws.datatypes.Config
+ required: false
+ interfaces:
+ Standard:
+ create:
+ implementation: cloudify-aws-plugin > cloudify_aws.vpc.subnet.create_subnet
+ start:
+ implementation: cloudify-aws-plugin > cloudify_aws.vpc.subnet.start_subnet
+ delete:
+ implementation: cloudify-aws-plugin > cloudify_aws.vpc.subnet.delete_subnet
+ Validation:
+ type: aria.aws.interfaces.Validation
+ creation:
+ implementation: cloudify-aws-plugin > cloudify_aws.vpc.subnet.creation_validation
+ requirements:
+ - vpc:
+ capability: tosca.capabilities.Node
+ node: aria.aws.nodes.VPC
+ relationship: aria.aws.relationships.SubnetContainedInVPC
+ occurrences: [ 0, UNBOUNDED ]
+
+ aria.aws.nodes.Gateway:
+ derived_from: tosca.nodes.Root
+ properties:
+ use_external_resource:
+ description: >
+ Indicate whether the resource exists or it should be created,
+ true if you are bringing an existing resource, false if you want to create it.
+ type: boolean
+ default: false
+ resource_id:
+ description: >
+ The AWS resource ID of the external resource, if use_external_resource is true.
+ Otherwise it is an empty string.
+ type: string
+ default: ''
+ tags:
+ description: >
+ A dictionary of key/value pairs of tags you want to add.
+ type: map
+ default: {}
+ entry_schema:
+ type: string # http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html
+ aws_config:
+ description: >
+ A dictionary of values to pass to authenticate with the AWS API.
+ type: aria.aws.datatypes.Config
+ required: false
+ interfaces:
+ Validation:
+ type: aria.aws.interfaces.Validation
+ creation:
+ implementation: cloudify-aws-plugin > cloudify_aws.vpc.gateway.creation_validation
+ requirements:
+ - vpc:
+ capability: tosca.capabilities.Node
+ node: aria.aws.nodes.VPC
+ relationship: aria.aws.relationships.GatewayConnectedToVPC
+ occurrences: [ 0, UNBOUNDED ]
+
+ aria.aws.nodes.InternetGateway:
+ derived_from: aria.aws.nodes.Gateway
+ properties:
+ cidr_block:
+ description: >
+ The cidr_block that you want this internet gateway to service. Default is for all internet
+ traffic.
+ type: string
+ default: '0.0.0.0/0'
+ interfaces:
+ Standard:
+ create:
+ implementation: cloudify-aws-plugin > cloudify_aws.vpc.gateway.create_internet_gateway
+ start:
+ implementation: cloudify-aws-plugin > cloudify_aws.vpc.gateway.start_internet_gateway
+ delete:
+ implementation: cloudify-aws-plugin > cloudify_aws.vpc.gateway.delete_internet_gateway
+
+ aria.aws.nodes.VPNGateway:
+ derived_from: aria.aws.nodes.Gateway
+ properties:
+ type:
+ description: >
+ Type of VPN Connection. Only valid value currently is ipsec.1
+ type: string
+ default: ipsec.1
+ availability_zone:
+ description: >
+ The Availability Zone where you want the VPN gateway.
+ type: string
+ default: ''
+ required: false
+ interfaces:
+ Standard:
+ create:
+ implementation: cloudify-aws-plugin > cloudify_aws.vpc.gateway.create_vpn_gateway
+ start:
+ implementation: cloudify-aws-plugin > cloudify_aws.vpc.gateway.start_vpn_gateway
+ delete:
+ implementation: cloudify-aws-plugin > cloudify_aws.vpc.gateway.delete_vpn_gateway
+
+ aria.aws.nodes.CustomerGateway:
+ derived_from: aria.aws.nodes.Gateway
+ properties:
+ type:
+ description: >
+ Type of VPN Connection. Only valid value currently is ipsec.1
+ type: string
+ default: ipsec.1
+ ip_address:
+ description: >
+ Internet-routable IP address for customers gateway. Must be a static address
+ type: string
+ bgp_asn:
+ description: >
+ Customer gateways Border Gateway Protocol (BGP) Autonomous System Number (ASN)
+ type: integer
+ interfaces:
+ Standard:
+ create:
+ implementation: cloudify-aws-plugin > cloudify_aws.vpc.gateway.create_customer_gateway
+ start:
+ implementation: cloudify-aws-plugin > cloudify_aws.vpc.gateway.start_customer_gateway
+ delete:
+ implementation: cloudify-aws-plugin > cloudify_aws.vpc.gateway.delete_customer_gateway
+ requirements:
+ - vpn_gateway:
+ capability: tosca.capabilities.Node
+ node: aria.aws.nodes.VPNGateway
+ relationship: aria.aws.relationships.CustomerGatewayConnectedToVPNGateway
+ occurrences: [ 0, UNBOUNDED ]
+
+ aria.aws.nodes.ACL:
+ derived_from: tosca.nodes.Root
+ properties:
+ use_external_resource:
+ description: >
+ Indicate whether the resource exists or it should be created,
+ true if you are bringing an existing resource, false if you want to create it.
+ type: boolean
+ default: false
+ resource_id:
+ description: >
+ The AWS resource ID of the external resource, if use_external_resource is true.
+ Otherwise it is an empty string.
+ type: string
+ default: ''
+ tags:
+ description: >
+ A dictionary of key/value pairs of tags you want to add.
+ type: map
+ default: {}
+ entry_schema:
+ type: string # http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html
+ acl_network_entries:
+ description: >
+ A list of rules of data type aria.datatypes.aws.NetworkAclEntry (see above).
+ type: list
+ entry_schema:
+ type: aria.aws.datatypes.NetworkAclEntry
+ default: []
+ required: false
+ aws_config:
+ description: >
+ A dictionary of values to pass to authenticate with the AWS API.
+ type: aria.aws.datatypes.Config
+ required: false
+ interfaces:
+ Standard:
+ create:
+ implementation: cloudify-aws-plugin > cloudify_aws.vpc.networkacl.create_network_acl
+ start:
+ implementation: cloudify-aws-plugin > cloudify_aws.vpc.networkacl.start_network_acl
+ delete:
+ implementation: cloudify-aws-plugin > cloudify_aws.vpc.networkacl.delete_network_acl
+ Validation:
+ type: aria.aws.interfaces.Validation
+ creation:
+ implementation: cloudify-aws-plugin > cloudify_aws.vpc.networkacl.creation_validation
+ requirements:
+ - vpc:
+ capability: tosca.capabilities.Node
+ node: aria.aws.nodes.VPC
+ relationship: aria.aws.relationships.NetworkACLContainedInVPC
+ occurrences: [ 0, UNBOUNDED ]
+ - subnet:
+ capability: tosca.capabilities.Node
+ node: aria.aws.nodes.Subnet
+ relationship: aria.aws.relationships.NetworkACLAssociatedWithSubnet
+ occurrences: [ 0, UNBOUNDED ]
+
+ aria.aws.nodes.DHCPOptions:
+ derived_from: tosca.nodes.Root
+ properties:
+ use_external_resource:
+ description: >
+ Indicate whether the resource exists or it should be created,
+ true if you are bringing an existing resource, false if you want to create it.
+ type: boolean
+ default: false
+ resource_id:
+ description: >
+ The AWS resource ID of the external resource, if use_external_resource is true.
+ Otherwise it is an empty string.
+ type: string
+ default: ''
+ tags:
+ description: >
+ A dictionary of key/value pairs of tags you want to add.
+ type: map
+ default: {}
+ entry_schema:
+ type: string # http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html
+ domain_name:
+ description: >
+ A domain name.
+ type: string
+ required: false
+ domain_name_servers:
+ description: >
+ A list of up to four DNS servers.
+ type: list
+ entry_schema:
+ type: string
+ default: []
+ required: false
+ ntp_servers:
+ description: >
+ A list of up to four NTP servers.
+ type: list
+ entry_schema:
+ type: string
+ default: []
+ required: false
+ netbios_name_servers:
+ description: >
+ A list of up to four netbios servers.
+ type: list
+ entry_schema:
+ type: string
+ default: []
+ required: false
+ netbios_node_type:
+ description: >
+ netbios type. recommended two.
+ type: string
+ default: ''
+ required: false
+ aws_config:
+ description: >
+ A dictionary of values to pass to authenticate with the AWS API.
+ type: aria.aws.datatypes.Config
+ required: false
+ interfaces:
+ Standard:
+ create:
+ implementation: cloudify-aws-plugin > cloudify_aws.vpc.dhcp.create_dhcp_options
+ start:
+ implementation: cloudify-aws-plugin > cloudify_aws.vpc.dhcp.start_dhcp_options
+ delete:
+ implementation: cloudify-aws-plugin > cloudify_aws.vpc.dhcp.delete_dhcp_options
+ Validation:
+ type: aria.aws.interfaces.Validation
+ creation:
+ implementation: cloudify-aws-plugin > cloudify_aws.vpc.dhcp.creation_validation
+ requirements:
+ - vpc:
+ capability: tosca.capabilities.Node
+ node: aria.aws.nodes.VPC
+ relationship: aria.aws.relationships.DHCPOptionsAssociatedWithVPC
+ occurrences: [ 0, UNBOUNDED ]
+
+ aria.aws.nodes.RouteTable:
+ derived_from: tosca.nodes.Root
+ properties:
+ use_external_resource:
+ description: >
+ Indicate whether the resource exists or it should be created,
+ true if you are bringing an existing resource, false if you want to create it.
+ type: boolean
+ default: false
+ required: true
+ resource_id:
+ description: >
+ The AWS resource ID of the external resource, if use_external_resource is true.
+ Otherwise it is an empty string.
+ type: string
+ default: ''
+ tags:
+ description: >
+ A dictionary of key/value pairs of tags you want to add.
+ type: map
+ default: {}
+ entry_schema:
+ type: string # http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html
+ aws_config:
+ description: >
+ A dictionary of values to pass to authenticate with the AWS API.
+ type: aria.aws.datatypes.Config
+ required: false
+ interfaces:
+ Standard:
+ create:
+ implementation: cloudify_aws.vpc.routetable.create_route_table
+ inputs:
+ routes:
+ description: >
+ A list of aria.aws.datatypes.Route.
+ type: list
+ entry_schema:
+ type: aria.aws.datatypes.Route
+ default: []
+ start:
+ implementation: cloudify-aws-plugin > cloudify_aws.vpc.routetable.start_route_table
+ delete:
+ implementation: cloudify-aws-plugin > cloudify_aws.vpc.routetable.delete_route_table
+ Validation:
+ type: aria.aws.interfaces.Validation
+ creation:
+ implementation: cloudify-aws-plugin > cloudify_aws.vpc.routetable.creation_validation
+ requirements:
+ - vpc:
+ capability: tosca.capabilities.Node
+ node: aria.aws.nodes.VPC
+ relationship: aria.aws.relationships.SubnetContainedInVPC
+ occurrences: [ 0, UNBOUNDED ]
+ - subnet:
+ capability: tosca.capabilities.Node
+ node: aria.aws.nodes.Subnet
+ relationship: aria.aws.relationships.RoutetableAssociatedWithSubnet
+ occurrences: [ 0, UNBOUNDED ]
+ - gateway:
+ capability: tosca.capabilities.Node
+ node: aria.aws.nodes.Gateway
+ relationship: aria.aws.relationships.RouteTableToGateway
+ occurrences: [ 0, UNBOUNDED ]
+
+ aria.aws.nodes.Interface:
+ derived_from: tosca.nodes.network.Port
+ properties:
+ use_external_resource:
+ description: >
+ Indicate whether the resource exists or it should be created,
+ true if you are bringing an existing resource, false if you want to create it.
+ type: boolean
+ default: false
+ resource_id:
+ description: >
+ The AWS resource ID of the external resource, if use_external_resource is true.
+ Otherwise it is an empty string.
+ type: string
+ default: ''
+ tags:
+ description: >
+ A dictionary of key/value pairs of tags you want to add.
+ type: map
+ default: {}
+ entry_schema:
+ type: string # http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html
+ parameters:
+ description: >
+ Any parameters accepted by the create_network_interface operation.
+ type: aria.aws.datatypes.NetworkInterfaceCreateParameters
+ required: false
+ aws_config:
+ description: >
+ A dictionary of values to pass to authenticate with the AWS API.
+ type: aria.aws.datatypes.Config
+ required: false
+ interfaces:
+ Standard:
+ create:
+ implementation: cloudify-aws-plugin > cloudify_aws.ec2.eni.create
+ inputs:
+ args:
+ type: aria.aws.datatypes.NetworkInterfaceCreateParameters
+ default: {}
+ start:
+ implementation: cloudify-aws-plugin > cloudify_aws.ec2.eni.start
+ delete:
+ implementation: cloudify-aws-plugin > cloudify_aws.ec2.eni.delete
+ inputs:
+ args:
+ type: map
+ entry_schema:
+ type: string
+ default: {}
+ requirements:
+ - instance:
+ capability: tosca.capabilities.Node
+ node: aria.aws.nodes.Instance
+ relationship: aria.aws.relationships.ENIConnectedToInstance
+ occurrences: [ 0, UNBOUNDED ]
+
+ aria.aws.nodes.SecurityGroupRule:
+ derived_from: tosca.nodes.Root
+ properties:
+ use_external_resource:
+ type: boolean
+ default: False
+ resource_id:
+ description: >
+ The resource ID.
+ type: string
+ default: ''
+ required: false
+ rule:
+ description: >
+ A list of rules of data type aria.aws.datatypes.SecurityGroupRule (see above).
+ type: list
+ entry_schema:
+ type: aria.aws.datatypes.SecurityGroupRule
+ default: []
+ required: false
+ aws_config:
+ description: >
+ A dictionary of values to pass to authenticate with the AWS API.
+ type: aria.aws.datatypes.Config
+ requirements:
+ - security_group_to_depend_on:
+ capability: tosca.capabilities.Node
+ node: aria.aws.nodes.SecurityGroup
+ relationship: aria.aws.relationships.RuleDependsOnSecurityGroup
+ occurrences: [ 0, UNBOUNDED ]
+ - security_group_to_be_contained_in:
+ capability: tosca.capabilities.Node
+ node: aria.aws.nodes.SecurityGroup
+ relationship: aria.aws.relationships.RuleContainedInSecurityGroup
+ occurrences: [ 0, UNBOUNDED ]
+
+ aria.aws.nodes.SecurityGroupRule.Multi:
+ derived_from: aria.aws.nodes.SecurityGroupRule
+ interfaces:
+ Standard:
+ create:
+ implementation: cloudify-aws-plugin > cloudify_aws.ec2.securitygroup.create_rule
+ inputs:
+ args:
+ type: map
+ entry_schema:
+ type: aria.aws.datatypes.SecurityGroupRule
+ default: {}
+ delete:
+ implementation: cloudify-aws-plugin > cloudify_aws.ec2.securitygroup.delete_rule
+ inputs:
+ args:
+ type: map
+ entry_schema:
+ type: aria.aws.datatypes.SecurityGroupRule
+ default: {}
+
+
+relationship_types:
+ aria.aws.relationships.ConnectedToElasticIP:
+ derived_from: tosca.relationships.ConnectsTo
+ interfaces:
+ Configure:
+ add_source:
+ implementation: cloudify-aws-plugin > cloudify_aws.ec2.elasticip.associate
+ remove_source:
+ implementation: cloudify-aws-plugin > cloudify_aws.ec2.elasticip.disassociate
+
+ aria.aws.relationships.InstanceConnectedToElasticIP:
+ derived_from: aria.aws.relationships.ConnectedToElasticIP
+
+ aria.aws.relationships.InstanceConnectedToKeypair:
+ derived_from: tosca.relationships.ConnectsTo
+
+ aria.aws.relationships.ConnectedToSecurityGroup:
+ derived_from: tosca.relationships.ConnectsTo
+
+ # The name of this relationship is not in CamelCase since in order to attach security group to an
+ # instance using the Cloudify AWS plugin, the relationship between the instance and the security
+ # group must be include the string 'instance_connected_to_security_group' in its name.
+ aria.aws.relationships.instance_connected_to_security_group:
+ derived_from: aria.aws.relationships.ConnectedToSecurityGroup
+
+ aria.aws.relationships.InstanceConnectedToLoadBalancer:
+ derived_from: tosca.relationships.ConnectsTo
+ interfaces:
+ Configure:
+ add_source:
+ implementation: cloudify-aws-plugin > cloudify_aws.ec2.elasticloadbalancer.associate
+ remove_source:
+ implementation: cloudify-aws-plugin > cloudify_aws.ec2.elasticloadbalancer.disassociate
+
+ aria.aws.relationships.VolumeConnectedToInstance:
+ derived_from: tosca.relationships.ConnectsTo
+ interfaces:
+ Configure:
+ add_source:
+ implementation: cloudify-aws-plugin > cloudify_aws.ec2.ebs.associate
+ inputs:
+ args:
+ type: map
+ entry_schema:
+ type: string
+ default: {}
+ force:
+ type: boolean
+ default: False
+ remove_source:
+ implementation: cloudify-aws-plugin > cloudify_aws.ec2.ebs.disassociate
+ inputs:
+ args:
+ type: map
+ entry_schema:
+ type: string
+ default: {}
+ force:
+ type: boolean
+ default: False
+
+ aria.aws.relationships.SubnetContainedInVPC:
+ derived_from: tosca.relationships.HostedOn
+
+ aria.aws.relationships.RoutetableContainedInVPC:
+ derived_from: tosca.relationships.HostedOn
+
+ aria.aws.relationships.RoutetableAssociatedWithSubnet:
+ derived_from: tosca.relationships.ConnectsTo
+ interfaces:
+ Configure:
+ add_target:
+ implementation: cloudify-aws-plugin > cloudify_aws.vpc.routetable.associate_route_table
+ remove_target:
+ implementation: cloudify-aws-plugin > cloudify_aws.vpc.routetable.disassociate_route_table
+
+ aria.aws.relationships.RouteTableToGateway:
+ derived_from: tosca.relationships.ConnectsTo
+ interfaces:
+ Configure:
+ add_target:
+ implementation: cloudify-aws-plugin > cloudify_aws.vpc.routetable.create_route_to_gateway
+ inputs:
+ destination_cidr_block:
+ description: >
+ Provide a specific value for the destination cidr block.
+ If the target is an internet gateway, then this is not necessary.
+ It will resolve to the cidr_block node property.
+ Otherwise, you need to provide this value.
+ type: string
+ default: ''
+ remove_target:
+ implementation: cloudify-aws-plugin > cloudify_aws.vpc.routetable.delete_route_from_gateway
+
+ aria.aws.relationships.GatewayConnectedToVPC:
+ derived_from: tosca.relationships.ConnectsTo
+ interfaces:
+ Configure:
+ add_target:
+ implementation: cloudify-aws-plugin > cloudify_aws.vpc.gateway.attach_gateway
+ remove_target:
+ implementation: cloudify-aws-plugin > cloudify_aws.vpc.gateway.detach_gateway
+
+ aria.aws.relationships.NetworkACLContainedInVPC:
+ derived_from: tosca.relationships.HostedOn
+
+ aria.aws.relationships.NetworkACLAssociatedWithSubnet:
+ derived_from: tosca.relationships.ConnectsTo
+ interfaces:
+ Configure:
+ add_target:
+ implementation: cloudify-aws-plugin > cloudify_aws.vpc.networkacl.associate_network_acl
+ remove_target:
+ implementation: cloudify-aws-plugin > cloudify_aws.vpc.networkacl.disassociate_network_acl
+
+ aria.aws.relationships.RouteTableOfSourceVPCConnectedToTargetPeerVPC:
+ derived_from: tosca.relationships.ConnectsTo
+ interfaces:
+ Configure:
+ pre_configure_target:
+ implementation: cloudify-aws-plugin > cloudify_aws.vpc.vpc.create_vpc_peering_connection
+ inputs:
+ target_account_id:
+ description: >
+ The 12 digit account ID that the target VPC belongs to.
+ type: string
+ default: ''
+ routes:
+ description: >
+ A list of aria.aws.datatypes.Route for assignment to the source Route Table.
+ type: list
+ entry_schema:
+ type: aria.aws.datatypes.Route
+ default: []
+ post_configure_target:
+ implementation: cloudify-aws-plugin > cloudify_aws.vpc.vpc.accept_vpc_peering_connection
+ remove_target:
+ implementation: cloudify-aws-plugin > cloudify_aws.vpc.vpc.delete_vpc_peering_connection
+
+ aria.aws.relationships.DHCPOptionsAssociatedWithVPC:
+ derived_from: tosca.relationships.ConnectsTo
+ interfaces:
+ Configure:
+ add_target:
+ implementation: cloudify-aws-plugin > cloudify_aws.vpc.dhcp.associate_dhcp_options
+ remove_target:
+ implementation: cloudify-aws-plugin > cloudify_aws.vpc.dhcp.restore_dhcp_options
+
+ aria.aws.relationships.CustomerGatewayConnectedToVPNGateway:
+ derived_from: tosca.relationships.ConnectsTo
+ interfaces:
+ Configure:
+ add_target:
+ implementation: cloudify-aws-plugin > cloudify_aws.vpc.gateway.create_vpn_connection
+ inputs:
+ routes:
+ description: >
+ A list of static routes to add to this vpn_connection.
+ The routes will be of type aria.aws.datatypes.Route.
+ However, you can only provide the destination_cidr_block and a vpn_connection_id.
+ type: list
+ entry_schema:
+ type: aria.aws.datatypes.Route
+ default: []
+ remove_target:
+ implementation: cloudify-aws-plugin > cloudify_aws.vpc.gateway.delete_vpn_connection
+
+ aria.aws.relationships.InstanceContainedInSubnet:
+ derived_from: tosca.relationships.HostedOn
+
+ aria.aws.relationships.InstanceConnectedToSubnet:
+ derived_from: tosca.relationships.ConnectsTo
+
+ aria.aws.relationships.SecurityGroupContainedInVPC:
+ derived_from: tosca.relationships.HostedOn
+
+ aria.aws.relationships.ConnectedToSubnet: # ARIA NOTE: I don't see a use for this relationship
+ derived_from: tosca.relationships.ConnectsTo
+
+ aria.aws.relationships.ENIConnectedToInstance:
+ derived_from: tosca.relationships.ConnectsTo
+ interfaces:
+ Configure:
+ add_source:
+ implementation: cloudify-aws-plugin > cloudify_aws.ec2.eni.associate
+ inputs:
+ args:
+ type: map
+ entry_schema:
+ type: string
+ default: {}
+ remove_source:
+ implementation: cloudify-aws-plugin > cloudify_aws.ec2.eni.disassociate
+ inputs:
+ args:
+ type: map
+ entry_schema:
+ type: string
+ default: {}
+
+ aria.aws.relationships.InstanceConnectedToENI:
+ derived_from: tosca.relationships.ConnectsTo
+
+ aria.aws.relationships.SecurityGroupUsesRule:
+ derived_from: tosca.relationships.DependsOn
+
+ aria.aws.relationships.RuleDependsOnSecurityGroup:
+ derived_from: tosca.relationships.DependsOn
+
+ aria.aws.relationships.RuleContainedInSecurityGroup:
+ derived_from: tosca.relationships.HostedOn
diff --git a/azure/aria/aria-extension-cloudify/plugins/openstack/plugin.yaml b/azure/aria/aria-extension-cloudify/plugins/openstack/plugin.yaml
new file mode 100644
index 0000000..8f3e6e6
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/plugins/openstack/plugin.yaml
@@ -0,0 +1,1174 @@
+#
+# Copyright (c) 2017 GigaSpaces Technologies Ltd. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+tosca_definitions_version: tosca_simple_yaml_1_0
+
+
+topology_template:
+ policies:
+ cloudify-openstack-plugin:
+ description: >-
+ openstack plugin executes operations.
+ type: aria.Plugin
+ properties:
+ version: 2.0.1
+
+
+data_types:
+
+ aria.openstack.datatypes.Config:
+ description: >-
+ openstack configuration
+ properties:
+ username:
+ type: string
+ password:
+ type: string
+ tenant_name:
+ type: string
+ auth_url:
+ type: string
+ region:
+ type: string
+ required: false
+ nova_url:
+ type: string
+ required: false
+ neutron_url:
+ type: string
+ required: false
+
+ aria.openstack.datatypes.Rules:
+ description: >-
+ openstack security group rules
+ properties:
+ remote_ip_prefix:
+ type: string
+ default: 0.0.0.0/0
+ port:
+ type: integer
+ default:
+
+ # source: https://developer.openstack.org/api-ref/compute/
+
+ aria.openstack.datatypes.Server:
+ description: >-
+ openstack Server args.
+ properties:
+ security_groups:
+ type: list
+ entry_schema: string
+ required: false
+ availability_zone:
+ type: string
+ required: false
+ userdata:
+ type: string
+ required: false
+
+ aria.openstack.datatypes.Keypair:
+ description: >-
+ openstack keypair args.
+ properties:
+ public_key:
+ type: string
+ required: false
+ type:
+ type: string
+ required: false
+ user_id:
+ type: string
+ required: false
+
+ # source: https://developer.openstack.org/api-ref/block-storage/v2/index.html
+
+ aria.openstack.datatypes.Volume:
+ description: >-
+ openstack volume args.
+ properties:
+ size:
+ type: integer
+ required: false
+ description:
+ type: string
+ required: false
+ availability_zone:
+ type: string
+ required: false
+ consistencygroup_id:
+ type: string
+ required: false
+ volume_type:
+ type: string
+ required: false
+ snapshot_id:
+ type: string
+ required: false
+ source_replica:
+ type: string
+ required: false
+ tenant_id:
+ type: string
+ required: false
+
+ # source: https://developer.openstack.org/api-ref/image/
+
+ aria.openstack.datatypes.Image:
+ description: >-
+ openstack image args.
+ properties:
+ id:
+ type: string
+ required: false
+ min_disk:
+ type: integer
+ required: false
+ min_ram:
+ type: integer
+ required: false
+ name:
+ type: string
+ required: false
+ protected:
+ type: boolean
+ required: false
+ tags:
+ type: list
+ entry_schema: string
+ required: false
+ visibility:
+ type: string
+ required: false
+
+ # source: https://developer.openstack.org/api-ref/identity/v3/
+
+ aria.openstack.datatypes.Project:
+ description: >-
+ openstack image args.
+ properties:
+ is_domain:
+ type: boolean
+ required: false
+ description:
+ type: string
+ required: false
+ domain_id:
+ type: string
+ required: false
+ name:
+ type: string
+ required: false
+ enabled:
+ type: boolean
+ required: false
+ parent_id:
+ type: string
+ required: false
+
+ # source: https://developer.openstack.org/api-ref/networking/v2/index.html
+
+ aria.openstack.datatypes.Subnet:
+ description: >-
+ openstack subnet args.
+ properties:
+ network_id:
+ type: string
+ required: false
+ ip_version:
+ type: integer
+ required: false
+ default: 4
+ cidr:
+ type: string
+ required: false
+ gateway_ip:
+ type: string
+ required: false
+ dns_nameservers:
+ type: list
+ entry_schema: string
+ required: false
+ enable_dhcp:
+ type: boolean
+ required: false
+ tenant_id:
+ type: string
+ required: false
+
+ aria.openstack.datatypes.Port:
+ description: >-
+ openstack port args
+ properties:
+ network_id:
+ type: string
+ required: false
+ admin_state_up:
+ type: boolean
+ required: false
+ status:
+ type: string
+ required: false
+ mac_address:
+ type: string
+ required: false
+ device_id:
+ type: string
+ required: false
+ device_owner:
+ type: string
+ required: false
+ tenant_id:
+ type: string
+ required: false
+
+ aria.openstack.datatypes.Network:
+ description: >-
+ openstack network args
+ properties:
+ admin_state_up:
+ type: boolean
+ required: false
+ status:
+ type: string
+ required: false
+ subnets:
+ type: list
+ entry_schema: string
+ required: false
+ shared:
+ type: boolean
+ required: false
+ tenant_id:
+ type: string
+ required: false
+
+ aria.openstack.datatypes.SecurityGroup:
+ description: >-
+ openstack network args
+ properties:
+ admin_state_up:
+ type: boolean
+ required: false
+ port_security_enabled:
+ type: boolean
+ required: false
+ project_id:
+ type: string
+ required: false
+ qos_policy_id:
+ type: string
+ required: false
+ segments:
+ type: list
+ entry_schema: string
+ required: false
+ shared:
+ type: boolean
+ required: false
+ vlan_transparent:
+ type: boolean
+ required: false
+ tenant_id:
+ type: string
+ required: false
+
+ aria.openstack.datatypes.Router:
+ description: >-
+ openstack network args
+ properties:
+ bgpvpn_id:
+ type: string
+ required: false
+ router_id:
+ type: string
+ required: false
+
+ aria.openstack.datatypes.FloatingIP:
+ description: >-
+ openstack network args
+ properties:
+ tenant_id:
+ type: string
+ required: false
+ project_id:
+ type: string
+ required: false
+ floating_network_id:
+ type: string
+ required: false
+ floating_network_name:
+ type: string
+ required: false
+ fixed_ip_address:
+ type: string
+ required: false
+ floating_ip_address:
+ type: string
+ required: false
+ port_id:
+ type: string
+ required: false
+ subnet_id:
+ type: string
+ required: false
+
+
+interface_types:
+
+ aria.openstack.interfaces.validation:
+ derived_from: tosca.interfaces.Root
+ creation:
+ description: >-
+ creation operation for the openstack validation interface
+ deletion:
+ description: >-
+ deletion operation for the openstack validation interface
+
+
+node_types:
+
+ aria.openstack.nodes.Server:
+ derived_from: tosca.nodes.Compute
+ properties:
+ server:
+ default: {}
+ type: aria.openstack.datatypes.Server
+ required: false
+ ip:
+ default:
+ type: string
+ os_family:
+ description: >-
+ Property specifying what type of operating system family
+ this compute node will run.
+ default: linux
+ type: string
+ use_external_resource:
+ type: boolean
+ default: false
+ description: >-
+ a boolean for setting whether to create the resource or use an existing one.
+ See the using existing resources section.
+ create_if_missing:
+ default: false
+ type: boolean
+ description: >-
+ If use_external_resource is ``true`` and the resource is missing,
+ create it instead of failing.
+ resource_id:
+ default: ''
+ type: string
+ description: >-
+ name to give to the new resource or the name or ID of an existing resource when the ``use_external_resource`` property is set to ``true`` (see the using existing resources section). Defaults to '' (empty string).
+ image:
+ default: ''
+ type: string
+ description: >-
+ The image for the server.
+ May receive either the ID or the name of the image.
+ note: This property is currently optional for backwards compatibility,
+ but will be modified to become a required property in future versions
+ (Default: '').
+ flavor:
+ default: ''
+ type: string
+ description: >-
+ The flavor for the server.
+ May receive either the ID or the name of the flavor.
+ note: This property is currently optional for backwards compatibility,
+ but will be modified to become a required property in future versions
+ (Default: '').
+ use_password:
+ default: false
+ type: boolean
+ description: >-
+ A boolean describing whether this server image supports user-password authentication.
+ Images that do should post the administrator user's password to the Openstack metadata service (e.g. via cloudbase);
+ The password would then be retrieved by the plugin,
+ decrypted using the server's keypair and then saved in the server's runtime properties.
+ management_network_name:
+ type: string
+ description: >-
+ The current implementation of the openstack plugin requires this field. The value of
+ this field should be set to the openstack name of a network this server is attached to.
+ openstack_config:
+ type: aria.openstack.datatypes.Config
+ required: false
+ description: >-
+ see Openstack Configuraion
+ interfaces:
+ Standard:
+ create:
+ implementation: cloudify-openstack-plugin > nova_plugin.server.create
+ inputs:
+ args:
+ required: false
+ default: {}
+ type: aria.openstack.datatypes.Server
+ start:
+ implementation: cloudify-openstack-plugin > nova_plugin.server.start
+ inputs:
+ start_retry_interval:
+ default: 30
+ type: integer
+ private_key_path:
+ type: string
+ default: ''
+ required: true
+ stop: cloudify-openstack-plugin > nova_plugin.server.stop
+ delete: cloudify-openstack-plugin > nova_plugin.server.delete
+ Validation:
+ type: aria.openstack.interfaces.validation
+ creation:
+ implementation: cloudify-openstack-plugin > nova_plugin.server.creation_validation
+ inputs:
+ args:
+ required: false
+ default: {}
+ type: aria.openstack.datatypes.Server
+
+ requirements:
+ - floating_ip:
+ capability: tosca.capabilities.Node
+ node: aria.openstack.nodes.FloatingIP
+ relationship: aria.openstack.server_connected_to_floating_ip
+ occurrences: [ 0, UNBOUNDED ]
+ - security_group:
+ capability: tosca.capabilities.Node
+ node: aria.openstack.nodes.SecurityGroup
+ relationship: aria.openstack.server_connected_to_security_group
+ occurrences: [ 0, UNBOUNDED ]
+ - port:
+ capability: tosca.capabilities.Node
+ node: aria.openstack.nodes.Port
+ relationship: aria.openstack.server_connected_to_port
+ occurrences: [ 0, UNBOUNDED ]
+ - key_pair:
+ capability: tosca.capabilities.Node
+ node: aria.openstack.nodes.KeyPair
+ relationship: aria.openstack.server_connected_to_keypair
+ occurrences: [ 0, UNBOUNDED ]
+ capabilities:
+ openstack_container:
+ type: Node
+
+ aria.openstack.nodes.WindowsServer:
+ derived_from: aria.openstack.nodes.Server
+ properties:
+ use_password:
+ default: true
+ type: boolean
+ description: >-
+ Default changed for derived type
+ because Windows instances need a password for agent installation
+ os_family:
+ default: windows
+ type: string
+ description: >-
+ (updates the os_family default as a convenience)
+
+ aria.openstack.nodes.KeyPair:
+ derived_from: tosca.nodes.Root
+ properties:
+ keypair:
+ default: {}
+ type: aria.openstack.datatypes.Keypair
+ required: false
+ description: >-
+ the path (on the machine the plugin is running on) to
+ where the private key should be stored. If
+ use_external_resource is set to "true", the existing
+ private key is expected to be at this path.
+ private_key_path:
+ description: >
+ the path (on the machine the plugin is running on) to
+ where the private key should be stored. If
+ use_external_resource is set to "true", the existing
+ private key is expected to be at this path.
+ type: string
+ use_external_resource:
+ type: boolean
+ default: false
+ description: >-
+ a boolean describing whether this resource should be
+ created or rather that it already exists on Openstack
+ and should be used as-is.
+ create_if_missing:
+ default: false
+ type: boolean
+ description: >-
+ If use_external_resource is ``true`` and the resource is missing,
+ create it instead of failing.
+ resource_id:
+ default: ''
+ type: string
+ description: >-
+ the name that will be given to the resource on Openstack (excluding optional prefix).
+ If not provided, a default name will be given instead.
+ If use_external_resource is set to "true", this exact
+ value (without any prefixes applied) will be looked for
+ as either the name or id of an existing keypair to be used.
+ openstack_config:
+ type: aria.openstack.datatypes.Config
+ required: false
+ interfaces:
+ Standard:
+ create:
+ implementation: cloudify-openstack-plugin > nova_plugin.keypair.create
+ inputs:
+ args:
+ required: false
+ default: {}
+ type: aria.openstack.datatypes.Keypair
+
+ delete: cloudify-openstack-plugin > nova_plugin.keypair.delete
+
+ Validation:
+ type: aria.openstack.interfaces.validation
+ creation: cloudify-openstack-plugin > nova_plugin.keypair.creation_validation
+
+ capabilities:
+ keypair:
+ type: tosca.capabilities.Node
+
+ aria.openstack.nodes.Subnet:
+ derived_from: tosca.nodes.Root
+ properties:
+ subnet:
+ type: aria.openstack.datatypes.Subnet
+ required: false
+ default:
+ cidr: 172.16.0.0/16
+ use_external_resource:
+ type: boolean
+ default: false
+ description: >-
+ a boolean for setting whether to create the resource or use an existing one.
+ See the using existing resources section.
+ create_if_missing:
+ default: false
+ type: boolean
+ description: >-
+ If use_external_resource is ``true`` and the resource is missing,
+ create it instead of failing.
+ resource_id:
+ default: ''
+ type: string
+ description: >-
+ name to give to the new resource or the name or ID of an existing resource when the ``use_external_resource`` property is set to ``true`` (see the using existing resources section). Defaults to '' (empty string).
+ openstack_config:
+ type: aria.openstack.datatypes.Config
+ required: false
+ interfaces:
+ Standard:
+ create:
+ implementation: cloudify-openstack-plugin > neutron_plugin.subnet.create
+ inputs:
+ args:
+ required: false
+ type: aria.openstack.datatypes.Subnet
+ default:
+ cidr: 172.16.0.0/16
+ delete: cloudify-openstack-plugin > neutron_plugin.subnet.delete
+ Validation:
+ type: aria.openstack.interfaces.validation
+ creation:
+ implementation: cloudify-openstack-plugin > neutron_plugin.subnet.creation_validation
+ inputs:
+ args:
+ type: aria.openstack.datatypes.Subnet
+ required: false
+ default:
+ cidr: 172.16.0.0/16
+
+ requirements:
+ - router:
+ capability: tosca.capabilities.Node
+ node: aria.openstack.nodes.Router
+ relationship: aria.openstack.subnet_connected_to_router
+ occurrences: [ 0, UNBOUNDED ]
+ - network:
+ capability: tosca.capabilities.Node
+ node: aria.openstack.nodes.Network
+ capabilities:
+ subnet:
+ type: tosca.capabilities.Node
+
+ aria.openstack.nodes.SecurityGroup:
+ derived_from: tosca.nodes.Root
+ properties:
+ security_group:
+ type: aria.openstack.datatypes.SecurityGroup
+ required: false
+ default: {}
+ description:
+ type: string
+ default: ''
+ description: >-
+ SecurityGroup description.
+ create_if_missing:
+ default: false
+ type: boolean
+ description: >-
+ If use_external_resource is ``true`` and the resource is missing,
+ create it instead of failing.
+ use_external_resource:
+ type: boolean
+ default: false
+ description: >-
+ a boolean for setting whether to create the resource or use an existing one.
+ See the using existing resources section.
+ resource_id:
+ default: ''
+ type: string
+ description: >-
+ name to give to the new resource or the name or ID of an existing resource when the ``use_external_resource`` property is set to ``true`` (see the using existing resources section). Defaults to '' (empty string).
+ openstack_config:
+ type: aria.openstack.datatypes.Config
+ required: false
+ rules:
+ default: []
+ type: list
+ entry_schema: aria.openstack.datatypes.Rules
+ disable_default_egress_rules:
+ default: false
+ type: boolean
+ description: >-
+ a flag for removing the default rules which https://wiki.openstack.org/wiki/Neutron/SecurityGroups#Behavior. If not set to `true`, these rules will remain, and exist alongside any additional rules passed using the `rules` property.
+ interfaces:
+ Standard:
+ create:
+ implementation: cloudify-openstack-plugin > neutron_plugin.security_group.create
+ inputs:
+ args:
+ type: aria.openstack.datatypes.SecurityGroup
+ required: false
+ default: {}
+ delete: cloudify-openstack-plugin > neutron_plugin.security_group.delete
+
+ Validation:
+ type: aria.openstack.interfaces.validation
+ creation: cloudify-openstack-plugin > neutron_plugin.security_group.creation_validation
+
+ capabilities:
+ security:
+ type: tosca.capabilities.Node
+
+ aria.openstack.nodes.Router:
+ derived_from: tosca.nodes.Root
+ properties:
+ router:
+ type: aria.openstack.datatypes.Router
+ required: false
+ default: {}
+ external_network:
+ default: ''
+ type: string
+ description: >-
+ An external network name or ID.
+ If given, the router will use this external network as a gateway.
+ use_external_resource:
+ type: boolean
+ default: false
+ description: >-
+ a boolean for setting whether to create the resource or use an existing one.
+ See the using existing resources section.
+ create_if_missing:
+ default: false
+ type: boolean
+ description: >-
+ If use_external_resource is ``true`` and the resource is missing,
+ create it instead of failing.
+ resource_id:
+ default: ''
+ description: >-
+ name to give to the new resource or the name or ID of an existing resource when the ``use_external_resource`` property is set to ``true`` (see the using existing resources section). Defaults to '' (empty string).
+ type: string
+ openstack_config:
+ type: aria.openstack.datatypes.Config
+ required: false
+
+ interfaces:
+ Standard:
+ create:
+ implementation: cloudify-openstack-plugin > neutron_plugin.router.create
+ inputs:
+ args:
+ default: {}
+ type: aria.openstack.datatypes.Router
+ required: false
+ delete: cloudify-openstack-plugin > neutron_plugin.router.delete
+ Validation:
+ type: aria.openstack.interfaces.validation
+ creation: cloudify-openstack-plugin > neutron_plugin.router.creation_validation
+
+ capabilities:
+ gateway:
+ type: tosca.capabilities.Node
+
+ aria.openstack.nodes.Port:
+ derived_from: tosca.nodes.Root
+ properties:
+ port:
+ type: aria.openstack.datatypes.Port
+ required: false
+ default: {}
+ fixed_ip:
+ default: ''
+ type: string
+ description: >-
+ may be used to request a specific fixed IP for the port.
+ If the IP is unavailable
+ (either already taken or does not belong to a subnet the port is on)
+ an error will be raised.
+ use_external_resource:
+ type: boolean
+ default: false
+ description: >-
+ a boolean for setting whether to create the resource or use an existing one.
+ See the using existing resources section.
+ create_if_missing:
+ default: false
+ type: boolean
+ description: >-
+ If use_external_resource is ``true`` and the resource is missing,
+ create it instead of failing.
+ resource_id:
+ default: ''
+ type: string
+ description: >-
+ name to give to the new resource or the name or ID of an existing resource when the ``use_external_resource`` property is set to ``true`` (see the using existing resources section). Defaults to '' (empty string).
+ openstack_config:
+ type: aria.openstack.datatypes.Config
+ required: false
+
+ interfaces:
+ Standard:
+ create:
+ implementation: cloudify-openstack-plugin > neutron_plugin.port.create
+ inputs:
+ args:
+ default: {}
+ type: aria.openstack.datatypes.Port
+ required: false
+
+ delete: cloudify-openstack-plugin > neutron_plugin.port.delete
+
+ Validation:
+ type: aria.openstack.interfaces.validation
+ creation: cloudify-openstack-plugin > neutron_plugin.port.creation_validation
+
+ requirements:
+ - security_group:
+ capability: tosca.capabilities.Node
+ node: aria.openstack.nodes.SecurityGroup
+ relationship: aria.openstack.port_connected_to_security_group
+ occurrences: [ 0, UNBOUNDED ]
+ - floating_ip:
+ capability: tosca.capabilities.Node
+ node: aria.openstack.nodes.FloatingIP
+ relationship: aria.openstack.port_connected_to_floating_ip
+ occurrences: [ 0, UNBOUNDED ]
+ - subnet:
+ capability: tosca.capabilities.Node
+ node: aria.openstack.nodes.Subnet
+ relationship: aria.openstack.port_connected_to_subnet
+ - network:
+ capability: tosca.capabilities.Node
+ node: aria.openstack.nodes.Network
+ occurrences: [ 0, UNBOUNDED ]
+ capabilities:
+ entry_point:
+ type: tosca.capabilities.Node
+
+ aria.openstack.nodes.Network:
+ derived_from: tosca.nodes.Root
+ properties:
+ network:
+ type: aria.openstack.datatypes.Network
+ required: false
+ default: {}
+ use_external_resource:
+ type: boolean
+ default: false
+ description: >-
+ a boolean for setting whether to create the resource or use an existing one.
+ See the using existing resources section.
+ create_if_missing:
+ default: false
+ type: boolean
+ description: >-
+ If use_external_resource is ``true`` and the resource is missing,
+ create it instead of failing.
+ resource_id:
+ default: ''
+ type: string
+ description: >-
+ name to give to the new resource or the name or ID of an existing resource when the ``use_external_resource`` property is set to ``true`` (see the using existing resources section). Defaults to '' (empty string).
+ openstack_config:
+ type: aria.openstack.datatypes.Config
+ required: false
+ interfaces:
+ Standard:
+ create:
+ implementation: cloudify-openstack-plugin > neutron_plugin.network.create
+ inputs:
+ args:
+ default: {}
+ type: aria.openstack.datatypes.Network
+ required: false
+
+ delete: cloudify-openstack-plugin > neutron_plugin.network.delete
+
+ Validation:
+ type: aria.openstack.interfaces.validation
+ creation: cloudify-openstack-plugin > neutron_plugin.network.creation_validation
+
+ capabilities:
+ address_space:
+ type: tosca.capabilities.Node
+
+ aria.openstack.nodes.FloatingIP:
+ derived_from: tosca.nodes.Root
+ attributes:
+ floating_ip_address:
+ type: string
+ properties:
+ floatingip:
+ type: aria.openstack.datatypes.FloatingIP
+ required: false
+ default: {}
+ use_external_resource:
+ type: boolean
+ default: false
+ description: >-
+ a boolean for setting whether to create the resource or use an existing one.
+ See the using existing resources section.
+ create_if_missing:
+ default: false
+ type: boolean
+ description: >-
+ If use_external_resource is ``true`` and the resource is missing,
+ create it instead of failing.
+ resource_id:
+ description: IP address of the floating IP
+ default: ''
+ type: string
+ description: >-
+ name to give to the new resource or the name or ID of an existing resource when the ``use_external_resource`` property is set to ``true`` (see the using existing resources section). Defaults to '' (empty string).
+ openstack_config:
+ type: aria.openstack.datatypes.Config
+ required: false
+
+ interfaces:
+ Standard:
+ create:
+ implementation: cloudify-openstack-plugin > neutron_plugin.floatingip.create
+ inputs:
+ args:
+ default: {}
+ type: aria.openstack.datatypes.FloatingIP
+ required: false
+
+ delete: cloudify-openstack-plugin > neutron_plugin.floatingip.delete
+
+ Validation:
+ type: aria.openstack.interfaces.validation
+ creation: cloudify-openstack-plugin > neutron_plugin.floatingip.creation_validation
+
+ capabilities:
+ address:
+ type: tosca.capabilities.Node
+
+ aria.openstack.nodes.Volume:
+ derived_from: tosca.nodes.Root
+ properties:
+ volume:
+ default: {}
+ type: aria.openstack.datatypes.Volume
+ description: >-
+ key-value volume configuration as described in http://developer.openstack.org/api-ref-blockstorage-v1.html#volumes-v1. (**DEPRECATED - Use the `args` input in create operation instead**)
+ use_external_resource:
+ type: boolean
+ default: false
+ description: >-
+ a boolean for setting whether to create the resource or use an existing one.
+ See the using existing resources section.
+ create_if_missing:
+ default: false
+ type: boolean
+ description: >-
+ If use_external_resource is ``true`` and the resource is missing,
+ create it instead of failing.
+ resource_id:
+ default:
+ type: string
+ description: >-
+ name to give to the new resource or the name or ID of an existing resource when the ``use_external_resource`` property is set to ``true`` (see the using existing resources section). Defaults to '' (empty string).
+ device_name:
+ default: auto
+ type: string
+ description: >-
+ The device name this volume will be attached to.
+ Default value is *auto*,
+ which means openstack will auto-assign a device.
+ Note that if you do explicitly set a value,
+ this value may not be the actual device name assigned.
+ Sometimes the device requested will not be available and openstack will assign it to a different device,
+ this is why we recommend using *auto*.
+ openstack_config:
+ type: aria.openstack.datatypes.Config
+ required: false
+ boot:
+ type: boolean
+ default: false
+ description: >-
+ If a Server instance is connected to this Volume by a relationship,
+ this volume will be used as the boot volume for that Server.
+ interfaces:
+ Standard:
+ create:
+ implementation: cloudify-openstack-plugin > cinder_plugin.volume.create
+ inputs:
+ args:
+ default: {}
+ type: aria.openstack.datatypes.Volume
+ required: false
+
+ status_attempts:
+ description: >-
+ Number of times to check for the creation's status before failing
+ type: integer
+ default: 20
+ status_timeout:
+ description: >-
+ Interval (in seconds) between subsequent inquiries of the creation's
+ status
+ type: integer
+ default: 15
+ delete: cloudify-openstack-plugin > cinder_plugin.volume.delete
+
+ Validation:
+ type: aria.openstack.interfaces.validation
+ creation: cloudify-openstack-plugin > cinder_plugin.volume.creation_validation
+
+ requirements:
+ - server:
+ capability: tosca.capabilities.Node
+ node: aria.openstack.nodes.Server
+ relationship: aria.openstack.volume_attached_to_server
+
+ aria.openstack.nodes.Image:
+ derived_from: tosca.nodes.Root
+ properties:
+ image:
+ description: >-
+ Required parameters are (container_format, disk_format). Accepted
+ types are available on
+ http://docs.openstack.org/developer/glance/formats.html
+ To create an image from the local file its path should be added
+ in data parameter.
+ default: {}
+ type: map
+ entry_schema: string
+ image_url:
+ default: ''
+ type: string
+ description: >-
+ The openstack resource URL for the image.
+ use_external_resource:
+ default: false
+ type: boolean
+ description: >-
+ a boolean for setting whether to create the resource or use an existing one.
+ See the using existing resources section.
+ create_if_missing:
+ default: false
+ type: boolean
+ description: >-
+ If use_external_resource is ``true`` and the resource is missing,
+ create it instead of failing.
+ resource_id:
+ default: ''
+ type: string
+ description: >-
+ name to give to the new resource or the name or ID of an existing resource when the ``use_external_resource`` property is set to ``true`` (see the using existing resources section). Defaults to '' (empty string).
+ openstack_config:
+ type: aria.openstack.datatypes.Config
+ required: false
+ interfaces:
+ Standard:
+ create: cloudify-openstack-plugin > glance_plugin.image.create
+
+ start:
+ implementation: cloudify-openstack-plugin > glance_plugin.image.start
+ inputs:
+ start_retry_interval:
+ default: 30
+ type: integer
+
+ delete: cloudify-openstack-plugin > glance_plugin.image.delete
+
+ Validation:
+ type: aria.openstack.interfaces.validation
+ creation: cloudify-openstack-plugin > glance_plugin.image.creation_validation
+
+ aria.openstack.nodes.Project:
+ derived_from: tosca.nodes.Root
+ properties:
+ project:
+ default: {}
+ type: aria.openstack.datatypes.Project
+ description: >-
+ key-value project configuration.
+ users:
+ default: []
+ type: list
+ entry_schema: string
+ description: >-
+ List of users assigned to this project in the following format:
+ { name: string, roles: [string] }
+ quota:
+ default: {}
+ type: map
+ entry_schema: string
+ description: |
+ A dictionary mapping service names to quota definitions for a proejct
+
+ e.g::
+
+ quota:
+ neutron: <quota>
+ nova: <quota>
+ use_external_resource:
+ default: false
+ type: boolean
+ description: >-
+ a boolean for setting whether to create the resource or use an existing one.
+ See the using existing resources section.
+ create_if_missing:
+ default: false
+ type: boolean
+ description: >-
+ If use_external_resource is ``true`` and the resource is missing,
+ create it instead of failing.
+ resource_id:
+ default: ''
+ type: string
+ description: >-
+ name to give to the new resource or the name or ID of an existing resource when the ``use_external_resource`` property is set to ``true`` (see the using existing resources section). Defaults to '' (empty string).
+ openstack_config:
+ type: aria.openstack.datatypes.Config
+ required: false
+ interfaces:
+ Standard:
+ create: openstack.keystone_plugin.project.create
+ start: openstack.keystone_plugin.project.start
+ delete: openstack.keystone_plugin.project.delete
+ Validation:
+ type: aria.openstack.interfaces.validation
+ creation: openstack.keystone_plugin.project.creation_validation
+
+
+relationship_types:
+
+ aria.openstack.port_connected_to_security_group:
+ derived_from: ConnectsTo
+ interfaces:
+ Configure:
+ add_source: cloudify-openstack-plugin > neutron_plugin.port.connect_security_group
+
+ aria.openstack.subnet_connected_to_router:
+ derived_from: ConnectsTo
+ interfaces:
+ Configure:
+ add_target: cloudify-openstack-plugin > neutron_plugin.router.connect_subnet
+ remove_target: cloudify-openstack-plugin > neutron_plugin.router.disconnect_subnet
+
+ aria.openstack.server_connected_to_floating_ip:
+ derived_from: ConnectsTo
+ interfaces:
+ Configure:
+ add_source:
+ implementation: cloudify-openstack-plugin > nova_plugin.server.connect_floatingip
+ inputs:
+ fixed_ip:
+ description: >
+ The fixed IP to be associated with the floating IP.
+ If omitted, Openstack will choose which port to associate.
+ type: string
+ default: ''
+ remove_source: cloudify-openstack-plugin > nova_plugin.server.disconnect_floatingip
+
+ aria.openstack.port_connected_to_floating_ip:
+ derived_from: ConnectsTo
+ interfaces:
+ Configure:
+ add_source: cloudify-openstack-plugin > neutron_plugin.floatingip.connect_port
+ remove_source: cloudify-openstack-plugin > neutron_plugin.floatingip.disconnect_port
+
+ aria.openstack.server_connected_to_security_group:
+ derived_from: ConnectsTo
+ interfaces:
+ Configure:
+ add_source: cloudify-openstack-plugin > nova_plugin.server.connect_security_group
+ remove_source: cloudify-openstack-plugin > nova_plugin.server.disconnect_security_group
+
+ aria.openstack.server_connected_to_port:
+ derived_from: ConnectsTo
+ interfaces:
+ Configure:
+ remove_source: cloudify-openstack-plugin > neutron_plugin.port.detach
+
+ aria.openstack.server_connected_to_keypair:
+ derived_from: ConnectsTo
+
+ aria.openstack.port_connected_to_subnet:
+ derived_from: ConnectsTo
+
+ aria.openstack.volume_attached_to_server:
+ derived_from: ConnectsTo
+ interfaces:
+ Configure:
+ add_target:
+ implementation: cloudify-openstack-plugin > nova_plugin.server.attach_volume
+ inputs:
+
+ status_attempts:
+ description: >
+ Number of times to check for the attachment's status before failing
+ type: integer
+ default: 10
+ status_timeout:
+ description: >
+ Interval (in seconds) between subsequent inquiries of the attachment's
+ status
+ type: integer
+ default: 2
+ remove_target:
+ implementation: cloudify-openstack-plugin > nova_plugin.server.detach_volume
+ inputs:
+
+ status_attempts:
+ description: >
+ Number of times to check for the detachment's status before failing
+ type: integer
+ default: 10
+ status_timeout:
+ description: >
+ Interval (in seconds) between subsequent inquiries of the detachment's
+ status
+ type: integer
+ default: 2
diff --git a/azure/aria/aria-extension-cloudify/requirements.txt b/azure/aria/aria-extension-cloudify/requirements.txt
new file mode 100644
index 0000000..ca46872
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/requirements.txt
@@ -0,0 +1 @@
+-e git+https://github.com/apache/incubator-ariatosca@master#egg=aria
diff --git a/azure/aria/aria-extension-cloudify/setup.py b/azure/aria/aria-extension-cloudify/setup.py
new file mode 100644
index 0000000..9a8d891
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/setup.py
@@ -0,0 +1,44 @@
+#
+# Copyright (c) 2016 GigaSpaces Technologies Ltd. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+
+import sys
+
+from setuptools import setup, find_packages
+
+_PACKAGE_NAME = 'aria-extension-cloudify'
+_PYTHON_SUPPORTED_VERSIONS = [(2, 6), (2, 7)]
+
+if (sys.version_info[0], sys.version_info[1]) not in _PYTHON_SUPPORTED_VERSIONS:
+ raise NotImplementedError('{0} Package support Python version 2.6 & 2.7 Only'
+ .format(_PACKAGE_NAME))
+
+setup(
+ name=_PACKAGE_NAME,
+ version='4.1',
+ description="Enable ARIA to utilize some of Cloudify's abilities, such as interfacing with AWS "
+ "and Openstack.",
+ author='Gigaspaces',
+ author_email='cosmo-admin@gigaspaces.com',
+ license='LICENSE',
+
+ packages=find_packages(include=['adapters*']),
+ install_requires=['apache-ariatosca'],
+ entry_points={
+ 'aria_extension': [
+ 'adapter = adapters.extension'
+ ]
+ }
+)
diff --git a/azure/aria/aria-extension-cloudify/src/aria/.gitignore b/azure/aria/aria-extension-cloudify/src/aria/.gitignore
new file mode 100644
index 0000000..482383a
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/.gitignore
@@ -0,0 +1,64 @@
+# Byte-compiled / optimized / DLL files
+__pycache__/
+*.py[cod]
+
+# C extensions
+*.so
+
+# Distribution / packaging
+.Python
+env/
+bin/
+build/
+develop-eggs/
+dist/
+eggs/
+lib/
+lib64/
+parts/
+sdist/
+var/
+*.egg-info/
+.installed.cfg
+*.egg
+
+# Installer logs
+pip-log.txt
+pip-delete-this-directory.txt
+
+# Unit test / coverage reports
+htmlcov/
+.tox/
+.coverage*
+.cache
+nosetests.xml
+coverage.xml
+
+# Translations
+*.mo
+
+# Mr Developer
+.mr.developer.cfg
+.project
+.pydevproject
+
+# Rope
+.ropeproject
+
+# Django stuff:
+*.log
+*.pot
+
+# Sphinx documentation
+docs/_build/
+
+*.iml
+
+*COMMIT_MSG
+
+*.noseids
+
+# QuickBuild
+.qbcache/
+
+.idea/
diff --git a/azure/aria/aria-extension-cloudify/src/aria/.rat-excludes b/azure/aria/aria-extension-cloudify/src/aria/.rat-excludes
new file mode 100644
index 0000000..aa0e3b8
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/.rat-excludes
@@ -0,0 +1,24 @@
+CONTRIBUTING
+DISCLAIMER
+VERSION
+NOTICE
+MANIFEST.in
+requirements.txt
+requirements.in
+docs
+dist
+build
+apache_ariatosca.egg-info
+.git
+.gitignore
+.gitattributes
+.coverage
+.coveragerc
+.rat-excludes
+.*.yaml
+.*.html
+.*.pyc
+.*.md
+.*.rst
+.*.iml
+.idea
diff --git a/azure/aria/aria-extension-cloudify/src/aria/.travis.yml b/azure/aria/aria-extension-cloudify/src/aria/.travis.yml
new file mode 100644
index 0000000..958be80
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/.travis.yml
@@ -0,0 +1,64 @@
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# We need to set "sudo: true" in order to use a virtual machine instead of a container, because
+# SSH tests fail in the container. See:
+# https://docs.travis-ci.com/user/reference/overview/#Virtualization-environments
+
+dist: trusty
+sudo: true
+
+language: python
+
+addons:
+ apt:
+ sources:
+ - sourceline: 'ppa:fkrull/deadsnakes'
+ packages:
+ # Ubuntu 14.04 (trusty) does not come with Python 2.6, so we will install it from Felix
+ # Krull's PPA
+ - python2.6
+ - python2.6-dev
+
+python:
+ # We handle Python 2.6 testing from within tox (see tox.ini); note that this means that we run
+ # tox itself always from Python 2.7
+ - '2.7'
+
+env:
+ # The PYTEST_PROCESSES environment var is used in tox.ini to override the --numprocesses argument
+ # for PyTest's xdist plugin. The reason this is necessary is that conventional Travis environments
+ # may report a large amount of available CPUs, but they they are greatly restricted. Through trial
+ # and error we found that more than 1 process may result in failures.
+ - PYTEST_PROCESSES=1 TOX_ENV=pylint_code
+ - PYTEST_PROCESSES=1 TOX_ENV=pylint_tests
+ - PYTEST_PROCESSES=1 TOX_ENV=py27
+ - PYTEST_PROCESSES=1 TOX_ENV=py26
+ - PYTEST_PROCESSES=1 TOX_ENV=py27e2e
+ - PYTEST_PROCESSES=1 TOX_ENV=py26e2e
+ - PYTEST_PROCESSES=1 TOX_ENV=py27ssh
+ - PYTEST_PROCESSES=1 TOX_ENV=py26ssh
+ - PYTEST_PROCESSES=1 TOX_ENV=docs
+
+before_install:
+ # Create SSH keys for SSH tests
+ - ssh-keygen -f $HOME/.ssh/id_rsa -t rsa -N ''
+ - cat $HOME/.ssh/id_rsa.pub >> $HOME/.ssh/authorized_keys
+
+ # Python dependencies
+ - pip install --upgrade pip
+ - pip install --upgrade setuptools
+ - pip install tox
+ - tox --version
+
+script:
+ - tox -e $TOX_ENV
diff --git a/azure/aria/aria-extension-cloudify/src/aria/=35.0.0 b/azure/aria/aria-extension-cloudify/src/aria/=35.0.0
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/=35.0.0
diff --git a/azure/aria/aria-extension-cloudify/src/aria/=35.0.0, b/azure/aria/aria-extension-cloudify/src/aria/=35.0.0,
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/=35.0.0,
diff --git a/azure/aria/aria-extension-cloudify/src/aria/CHANGELOG.rst b/azure/aria/aria-extension-cloudify/src/aria/CHANGELOG.rst
new file mode 100644
index 0000000..a0ca089
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/CHANGELOG.rst
@@ -0,0 +1,16 @@
+0.1.1
+-----
+
+[ARIA-312] Validation of workflow and operation kwargs raise false alarms
+[ARIA-301] Environment-marked dependencies are installed regardless of environment when installing from wheel
+[ARIA-299] Resuming canceled execution with non-finished tasks fails
+[ARIA-298] Test suite sometimes fails or freezes despite all tests passing
+[ARIA-296] Process termination test fails on windows
+[ARIA-287] New tox suite to make sure that Sphinx documentation generation isn't broken
+[ARIA-202] Execution plugin assumes '/tmp' for temp directory on the local/remote machine
+
+
+0.1.0
+-----
+
+ * Initial release. \ No newline at end of file
diff --git a/azure/aria/aria-extension-cloudify/src/aria/CONTRIBUTING b/azure/aria/aria-extension-cloudify/src/aria/CONTRIBUTING
new file mode 100644
index 0000000..4124003
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/CONTRIBUTING
@@ -0,0 +1,3 @@
+Contribution guide is available on our Confluence:
+
+https://cwiki.apache.org/confluence/display/ARIATOSCA/Contributing+to+ARIA \ No newline at end of file
diff --git a/azure/aria/aria-extension-cloudify/src/aria/DISCLAIMER b/azure/aria/aria-extension-cloudify/src/aria/DISCLAIMER
new file mode 100644
index 0000000..358d8e1
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/DISCLAIMER
@@ -0,0 +1,10 @@
+Apache AriaTosca is an effort undergoing incubation at the Apache Software
+Foundation (ASF), sponsored by the Apache Incubator.
+
+Incubation is required of all newly accepted projects until a further review
+indicates that the infrastructure, communications, and decision making process
+have stabilized in a manner consistent with other successful ASF projects.
+
+While incubation status is not necessarily a reflection of the completeness
+or stability of the code, it does indicate that the project has yet to be
+fully endorsed by the ASF. \ No newline at end of file
diff --git a/azure/aria/aria-extension-cloudify/src/aria/LICENSE b/azure/aria/aria-extension-cloudify/src/aria/LICENSE
new file mode 100644
index 0000000..37ec93a
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/LICENSE
@@ -0,0 +1,191 @@
+Apache License
+Version 2.0, January 2004
+http://www.apache.org/licenses/
+
+TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+1. Definitions.
+
+"License" shall mean the terms and conditions for use, reproduction, and
+distribution as defined by Sections 1 through 9 of this document.
+
+"Licensor" shall mean the copyright owner or entity authorized by the copyright
+owner that is granting the License.
+
+"Legal Entity" shall mean the union of the acting entity and all other entities
+that control, are controlled by, or are under common control with that entity.
+For the purposes of this definition, "control" means (i) the power, direct or
+indirect, to cause the direction or management of such entity, whether by
+contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the
+outstanding shares, or (iii) beneficial ownership of such entity.
+
+"You" (or "Your") shall mean an individual or Legal Entity exercising
+permissions granted by this License.
+
+"Source" form shall mean the preferred form for making modifications, including
+but not limited to software source code, documentation source, and configuration
+files.
+
+"Object" form shall mean any form resulting from mechanical transformation or
+translation of a Source form, including but not limited to compiled object code,
+generated documentation, and conversions to other media types.
+
+"Work" shall mean the work of authorship, whether in Source or Object form, made
+available under the License, as indicated by a copyright notice that is included
+in or attached to the work (an example is provided in the Appendix below).
+
+"Derivative Works" shall mean any work, whether in Source or Object form, that
+is based on (or derived from) the Work and for which the editorial revisions,
+annotations, elaborations, or other modifications represent, as a whole, an
+original work of authorship. For the purposes of this License, Derivative Works
+shall not include works that remain separable from, or merely link (or bind by
+name) to the interfaces of, the Work and Derivative Works thereof.
+
+"Contribution" shall mean any work of authorship, including the original version
+of the Work and any modifications or additions to that Work or Derivative Works
+thereof, that is intentionally submitted to Licensor for inclusion in the Work
+by the copyright owner or by an individual or Legal Entity authorized to submit
+on behalf of the copyright owner. For the purposes of this definition,
+"submitted" means any form of electronic, verbal, or written communication sent
+to the Licensor or its representatives, including but not limited to
+communication on electronic mailing lists, source code control systems, and
+issue tracking systems that are managed by, or on behalf of, the Licensor for
+the purpose of discussing and improving the Work, but excluding communication
+that is conspicuously marked or otherwise designated in writing by the copyright
+owner as "Not a Contribution."
+
+"Contributor" shall mean Licensor and any individual or Legal Entity on behalf
+of whom a Contribution has been received by Licensor and subsequently
+incorporated within the Work.
+
+2. Grant of Copyright License.
+
+Subject to the terms and conditions of this License, each Contributor hereby
+grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
+irrevocable copyright license to reproduce, prepare Derivative Works of,
+publicly display, publicly perform, sublicense, and distribute the Work and such
+Derivative Works in Source or Object form.
+
+3. Grant of Patent License.
+
+Subject to the terms and conditions of this License, each Contributor hereby
+grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
+irrevocable (except as stated in this section) patent license to make, have
+made, use, offer to sell, sell, import, and otherwise transfer the Work, where
+such license applies only to those patent claims licensable by such Contributor
+that are necessarily infringed by their Contribution(s) alone or by combination
+of their Contribution(s) with the Work to which such Contribution(s) was
+submitted. If You institute patent litigation against any entity (including a
+cross-claim or counterclaim in a lawsuit) alleging that the Work or a
+Contribution incorporated within the Work constitutes direct or contributory
+patent infringement, then any patent licenses granted to You under this License
+for that Work shall terminate as of the date such litigation is filed.
+
+4. Redistribution.
+
+You may reproduce and distribute copies of the Work or Derivative Works thereof
+in any medium, with or without modifications, and in Source or Object form,
+provided that You meet the following conditions:
+
+You must give any other recipients of the Work or Derivative Works a copy of
+this License; and
+You must cause any modified files to carry prominent notices stating that You
+changed the files; and
+You must retain, in the Source form of any Derivative Works that You distribute,
+all copyright, patent, trademark, and attribution notices from the Source form
+of the Work, excluding those notices that do not pertain to any part of the
+Derivative Works; and
+If the Work includes a "NOTICE" text file as part of its distribution, then any
+Derivative Works that You distribute must include a readable copy of the
+attribution notices contained within such NOTICE file, excluding those notices
+that do not pertain to any part of the Derivative Works, in at least one of the
+following places: within a NOTICE text file distributed as part of the
+Derivative Works; within the Source form or documentation, if provided along
+with the Derivative Works; or, within a display generated by the Derivative
+Works, if and wherever such third-party notices normally appear. The contents of
+the NOTICE file are for informational purposes only and do not modify the
+License. You may add Your own attribution notices within Derivative Works that
+You distribute, alongside or as an addendum to the NOTICE text from the Work,
+provided that such additional attribution notices cannot be construed as
+modifying the License.
+You may add Your own copyright statement to Your modifications and may provide
+additional or different license terms and conditions for use, reproduction, or
+distribution of Your modifications, or for any such Derivative Works as a whole,
+provided Your use, reproduction, and distribution of the Work otherwise complies
+with the conditions stated in this License.
+
+5. Submission of Contributions.
+
+Unless You explicitly state otherwise, any Contribution intentionally submitted
+for inclusion in the Work by You to the Licensor shall be under the terms and
+conditions of this License, without any additional terms or conditions.
+Notwithstanding the above, nothing herein shall supersede or modify the terms of
+any separate license agreement you may have executed with Licensor regarding
+such Contributions.
+
+6. Trademarks.
+
+This License does not grant permission to use the trade names, trademarks,
+service marks, or product names of the Licensor, except as required for
+reasonable and customary use in describing the origin of the Work and
+reproducing the content of the NOTICE file.
+
+7. Disclaimer of Warranty.
+
+Unless required by applicable law or agreed to in writing, Licensor provides the
+Work (and each Contributor provides its Contributions) on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied,
+including, without limitation, any warranties or conditions of TITLE,
+NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are
+solely responsible for determining the appropriateness of using or
+redistributing the Work and assume any risks associated with Your exercise of
+permissions under this License.
+
+8. Limitation of Liability.
+
+In no event and under no legal theory, whether in tort (including negligence),
+contract, or otherwise, unless required by applicable law (such as deliberate
+and grossly negligent acts) or agreed to in writing, shall any Contributor be
+liable to You for damages, including any direct, indirect, special, incidental,
+or consequential damages of any character arising as a result of this License or
+out of the use or inability to use the Work (including but not limited to
+damages for loss of goodwill, work stoppage, computer failure or malfunction, or
+any and all other commercial damages or losses), even if such Contributor has
+been advised of the possibility of such damages.
+
+9. Accepting Warranty or Additional Liability.
+
+While redistributing the Work or Derivative Works thereof, You may choose to
+offer, and charge a fee for, acceptance of support, warranty, indemnity, or
+other liability obligations and/or rights consistent with this License. However,
+in accepting such obligations, You may act only on Your own behalf and on Your
+sole responsibility, not on behalf of any other Contributor, and only if You
+agree to indemnify, defend, and hold each Contributor harmless for any liability
+incurred by, or claims asserted against, such Contributor by reason of your
+accepting any such warranty or additional liability.
+
+END OF TERMS AND CONDITIONS
+
+APPENDIX: How to apply the Apache License to your work
+
+To apply the Apache License to your work, attach the following boilerplate
+notice, with the fields enclosed by brackets "[]" replaced with your own
+identifying information. (Don't include the brackets!) The text should be
+enclosed in the appropriate comment syntax for the file format. We also
+recommend that a file or class name and description of purpose be included on
+the same "printed page" as the copyright notice for easier identification within
+third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/azure/aria/aria-extension-cloudify/src/aria/MANIFEST.in b/azure/aria/aria-extension-cloudify/src/aria/MANIFEST.in
new file mode 100644
index 0000000..6b67894
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/MANIFEST.in
@@ -0,0 +1,12 @@
+include CONTRIBUTING
+include DISCLAIMER
+include LICENSE
+include NOTICE
+include VERSION
+include CHANGELOG.rst
+include README.rst
+include requirements.in
+include requirements.txt
+recursive-include docs/html *
+recursive-include examples *
+prune docs/html/.doctrees
diff --git a/azure/aria/aria-extension-cloudify/src/aria/Makefile b/azure/aria/aria-extension-cloudify/src/aria/Makefile
new file mode 100644
index 0000000..9fef3ab
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/Makefile
@@ -0,0 +1,63 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+EXTENSIONS = ./extensions
+DIST = ./dist
+DOCS = ./docs
+HTML = ./docs/html
+EASY_INSTALL_PTH = $(VIRTUAL_ENV)/lib/python2.7/site-packages/easy-install.pth
+PYTHON_VERSION = $$(python -V 2>&1 | cut -f2 -d' ' | cut -f1,2 -d'.' --output-delimiter='')
+
+.DEFAULT_GOAL = default
+.PHONY: clean install install-virtual docs test dist deploy
+
+default:
+ @echo "Please choose one of the following targets: clean, install, install-virtual, docs, test, dist, requirements.txt"
+
+clean:
+ rm -rf "$(DIST)" "$(HTML)" build .tox .coverage*
+ -find . -maxdepth 1 -type f -name '.coverage' -delete
+ -find . -type f -name '*.pyc' -delete
+ -find . -type d -name '__pycache__' -prune -exec rm -rf {} \; 2>/dev/null
+
+install:
+ pip install .[ssh]
+
+install-virtual:
+ pip install --editable .[ssh]
+
+ # "pip install --editable" will not add our extensions to the path, so we will patch the virtualenv
+ EXTENSIONS_PATH="$$(head -n 1 "$(EASY_INSTALL_PTH)")/extensions" && \
+ if ! grep -Fxq "$$EXTENSIONS_PATH" "$(EASY_INSTALL_PTH)"; then \
+ echo "$$EXTENSIONS_PATH" >> "$(EASY_INSTALL_PTH)"; \
+ fi
+
+docs:
+ pip install --requirement "$(DOCS)/requirements.txt"
+ rm -rf "$(HTML)"
+ sphinx-build -W -T -b html "$(DOCS)" "$(HTML)"
+
+test:
+ pip install --upgrade "tox>=2.7.0"
+ tox -e pylint_code \
+ -e pylint_tests \
+ -e py$(PYTHON_VERSION) \
+ -e py$(PYTHON_VERSION)e2e \
+ -e py$(PYTHON_VERSION)ssh \
+ -e docs
+
+./requirements.txt: ./requirements.in
+ pip install --upgrade "pip-tools>=1.9.0"
+ pip-compile --output-file ./requirements.txt ./requirements.in
diff --git a/azure/aria/aria-extension-cloudify/src/aria/NOTICE b/azure/aria/aria-extension-cloudify/src/aria/NOTICE
new file mode 100644
index 0000000..bf03ab5
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/NOTICE
@@ -0,0 +1,5 @@
+Apache AriaTosca
+Copyright 2016-2017 The Apache Software Foundation
+
+This product includes software developed at
+The Apache Software Foundation (http://www.apache.org/). \ No newline at end of file
diff --git a/azure/aria/aria-extension-cloudify/src/aria/README.rst b/azure/aria/aria-extension-cloudify/src/aria/README.rst
new file mode 100644
index 0000000..6f8885c
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/README.rst
@@ -0,0 +1,167 @@
+ARIA
+====
+
+|Build Status| |Appveyor Build Status| |License| |PyPI release| |Python Versions| |Wheel|
+|Contributors| |Open Pull Requests| |Closed Pull Requests|
+
+
+What is ARIA?
+-------------
+
+`ARIA <http://ariatosca.incubator.apache.org/>`__ is a an open-source,
+`TOSCA <https://www.oasis-open.org/committees/tosca/>`__-based, lightweight library and CLI for
+orchestration and for consumption by projects building TOSCA-based solutions for resources and
+services orchestration.
+
+ARIA can be utilized by any organization that wants to implement TOSCA-based orchestration in its
+solutions, whether a multi-cloud enterprise application, or an NFV or SDN solution for multiple
+virtual infrastructure managers.
+
+With ARIA, you can utilize TOSCA's cloud portability out-of-the-box, to develop, test and run your
+applications, from template to deployment.
+
+ARIA is an incubation project under the `Apache Software Foundation <https://www.apache.org/>`__.
+
+
+Installation
+------------
+
+ARIA is `available on PyPI <https://pypi.python.org/pypi/apache-ariatosca>`__.
+
+ARIA requires Python 2.6/2.7. Python 3 is currently not supported.
+
+To install ARIA directly from PyPI (using a ``wheel``), use::
+
+ pip install --upgrade pip setuptools
+ pip install apache-ariatosca
+
+To install ARIA from source, download the source tarball from
+`PyPI <https://pypi.python.org/pypi/apache-ariatosca>`__, extract and ``cd`` into the extract dir,
+and run::
+
+ pip install --upgrade pip setuptools
+ pip install .
+
+| The source package comes along with relevant examples, documentation, ``requirements.txt`` (for
+| installing specifically the frozen dependencies' versions with which ARIA was tested) and more.
+|
+| ARIA has additional optional dependencies. These are required for running operations over SSH.
+| Below are instructions on how to install these dependencies, including required system
+| dependencies per OS.
+|
+| Note: These dependencies may have varying licenses which may not be compatible with Apache license
+| 2.0.
+
+**Ubuntu/Debian** (tested on Ubuntu 14.04, Ubuntu 16.04)::
+
+ apt-get install -y python-dev gcc libffi-dev libssl-dev
+ pip install apache-ariatosca[ssh]
+
+**CentOS/Fedora** (tested on CentOS 6.6, CentOS 7)::
+
+ yum install -y python-devel gcc libffi-devel openssl-devel
+ pip install apache-ariatosca[ssh]
+
+**Archlinux**::
+
+ pacman -Syu --noconfirm python2 gcc libffi openssl
+ pip2 install apache-ariatosca[ssh]
+
+**Windows** (tested on Windows 10)::
+
+ # no additional system requirements are needed
+ pip install apache-ariatosca[ssh]
+
+**MacOS**::
+
+ # TODO
+
+
+
+To install ``pip``, either use your operating system's package management system, or run::
+
+ wget http://bootstrap.pypa.io/get-pip.py
+ python get-pip.py
+
+
+
+Getting Started
+---------------
+
+This section will describe how to run a simple "Hello World" example.
+
+First, provide ARIA with the ARIA "hello world" service-template and name it (e.g.
+``my-service-template``)::
+
+ aria service-templates store examples/hello-world/hello-world.yaml my-service-template
+
+Now create a service based on this service-template and name it (e.g. ``my-service``)::
+
+ aria services create my-service -t my-service-template
+
+Finally, start an ``install`` workflow execution on ``my-service`` like so::
+
+ aria executions start install -s my-service
+
+You should now have a simple web-server running on your local machine. You can try visiting
+``http://localhost:9090`` to view your deployed application.
+
+To uninstall and clean your environment, follow these steps::
+
+ aria executions start uninstall -s my-service
+ aria services delete my-service
+ aria service-templates delete my-service-template
+
+
+Contribution
+------------
+
+You are welcome and encouraged to participate and contribute to the ARIA project.
+
+Please see our guide to
+`Contributing to ARIA
+<https://cwiki.apache.org/confluence/display/ARIATOSCA/Contributing+to+ARIA>`__.
+
+Feel free to also provide feedback on the mailing lists (see `Resources <#user-content-resources>`__
+section).
+
+
+Resources
+---------
+
+- `ARIA homepage <http://ariatosca.incubator.apache.org/>`__
+- `ARIA wiki <https://cwiki.apache.org/confluence/display/AriaTosca>`__
+- `Issue tracker <https://issues.apache.org/jira/browse/ARIA>`__
+
+- Dev mailing list: dev@ariatosca.incubator.apache.org
+- User mailing list: user@ariatosca.incubator.apache.org
+
+Subscribe by sending a mail to ``<group>-subscribe@ariatosca.incubator.apache.org`` (e.g.
+``dev-subscribe@ariatosca.incubator.apache.org``). See information on how to subscribe to mailing
+lists `here <https://www.apache.org/foundation/mailinglists.html>`__.
+
+For past correspondence, see the
+`dev mailing list archive <https://lists.apache.org/list.html?dev@ariatosca.apache.org>`__.
+
+
+License
+-------
+
+ARIA is licensed under the
+`Apache License 2.0 <https://github.com/apache/incubator-ariatosca/blob/master/LICENSE>`__.
+
+.. |Build Status| image:: https://img.shields.io/travis/apache/incubator-ariatosca/master.svg
+ :target: https://travis-ci.org/apache/incubator-ariatosca
+.. |Appveyor Build Status| image:: https://img.shields.io/appveyor/ci/ApacheSoftwareFoundation/incubator-ariatosca/master.svg
+ :target: https://ci.appveyor.com/project/ApacheSoftwareFoundation/incubator-ariatosca/history
+.. |License| image:: https://img.shields.io/github/license/apache/incubator-ariatosca.svg
+ :target: http://www.apache.org/licenses/LICENSE-2.0
+.. |PyPI release| image:: https://img.shields.io/pypi/v/apache-ariatosca.svg
+ :target: https://pypi.python.org/pypi/apache-ariatosca
+.. |Python Versions| image:: https://img.shields.io/pypi/pyversions/apache-ariatosca.svg
+.. |Wheel| image:: https://img.shields.io/pypi/wheel/apache-ariatosca.svg
+.. |Contributors| image:: https://img.shields.io/github/contributors/apache/incubator-ariatosca.svg
+.. |Open Pull Requests| image:: https://img.shields.io/github/issues-pr/apache/incubator-ariatosca.svg
+ :target: https://github.com/apache/incubator-ariatosca/pulls
+.. |Closed Pull Requests| image:: https://img.shields.io/github/issues-pr-closed-raw/apache/incubator-ariatosca.svg
+ :target: https://github.com/apache/incubator-ariatosca/pulls?q=is%3Apr+is%3Aclosed
diff --git a/azure/aria/aria-extension-cloudify/src/aria/VERSION b/azure/aria/aria-extension-cloudify/src/aria/VERSION
new file mode 100644
index 0000000..341cf11
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/VERSION
@@ -0,0 +1 @@
+0.2.0 \ No newline at end of file
diff --git a/azure/aria/aria-extension-cloudify/src/aria/appveyor.yml b/azure/aria/aria-extension-cloudify/src/aria/appveyor.yml
new file mode 100644
index 0000000..a03b180
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/appveyor.yml
@@ -0,0 +1,41 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+environment:
+
+ TOX_ENV: pywin
+
+ matrix:
+ - PYTHON: "C:\\Python27"
+ PYTHON_VERSION: 2.7.8
+ PYTHON_ARCH: 32
+
+build: false
+
+install:
+ - SET PATH=%PYTHON%;%PYTHON%\\Scripts;%PATH%
+ - ps: (new-object System.Net.WebClient).Downloadfile('https://bootstrap.pypa.io/get-pip.py', 'C:\Users\appveyor\get-pip.py')
+ - ps: Start-Process -FilePath "C:\Python27\python.exe" -ArgumentList "C:\Users\appveyor\get-pip.py" -Wait -Passthru
+
+before_test:
+ - pip install virtualenv --upgrade
+ - virtualenv env
+ - 'env\Scripts\activate.bat'
+ - pip install tox
+
+test_script:
+ - pip --version
+ - tox --version
+ - tox -e %TOX_ENV%
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/.pylintrc b/azure/aria/aria-extension-cloudify/src/aria/aria/.pylintrc
new file mode 100644
index 0000000..4d77556
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/.pylintrc
@@ -0,0 +1,423 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+[MASTER]
+
+# Python code to execute, usually for sys.path manipulation such as
+# pygtk.require().
+#init-hook=
+
+# Add files or directories to the blacklist. They should be base names, not
+# paths.
+ignore=.git
+
+# Add files or directories matching the regex patterns to the blacklist. The
+# regex matches against base names, not paths.
+ignore-patterns=
+
+# Pickle collected data for later comparisons.
+persistent=yes
+
+# List of plugins (as comma separated values of python modules names) to load,
+# usually to register additional checkers.
+load-plugins=
+
+# Use multiple processes to speed up Pylint.
+jobs=4
+
+# Allow loading of arbitrary C extensions. Extensions are imported into the
+# active Python interpreter and may run arbitrary code.
+unsafe-load-any-extension=no
+
+# A comma-separated list of package or module names from where C extensions may
+# be loaded. Extensions are loading into the active Python interpreter and may
+# run arbitrary code
+extension-pkg-whitelist=
+
+# Allow optimization of some AST trees. This will activate a peephole AST
+# optimizer, which will apply various small optimizations. For instance, it can
+# be used to obtain the result of joining multiple strings with the addition
+# operator. Joining a lot of strings can lead to a maximum recursion error in
+# Pylint and this flag can prevent that. It has one side effect, the resulting
+# AST will be different than the one from reality. This option is deprecated
+# and it will be removed in Pylint 2.0.
+optimize-ast=no
+
+
+[MESSAGES CONTROL]
+
+# Only show warnings with the listed confidence levels. Leave empty to show
+# all. Valid levels: HIGH, INFERENCE, INFERENCE_FAILURE, UNDEFINED
+confidence=
+
+# Enable the message, report, category or checker with the given id(s). You can
+# either give multiple identifier separated by comma (,) or put this option
+# multiple time (only on the command line, not in the configuration file where
+# it should appear only once). See also the "--disable" option for examples.
+#enable=
+
+# Disable the message, report, category or checker with the given id(s). You
+# can either give multiple identifiers separated by comma (,) or put this
+# option multiple times (only on the command line, not in the configuration
+# file where it should appear only once).You can also use "--disable=all" to
+# disable everything first and then reenable specific checks. For example, if
+# you want to run only the similarities checker, you can use "--disable=all
+# --enable=similarities". If you want to run only the classes checker, but have
+# no Warning level messages displayed, use"--disable=all --enable=classes
+# --disable=W"
+disable=import-star-module-level,old-octal-literal,oct-method,print-statement,unpacking-in-except,parameter-unpacking,backtick,old-raise-syntax,old-ne-operator,long-suffix,dict-view-method,dict-iter-method,metaclass-assignment,next-method-called,raising-string,indexing-exception,raw_input-builtin,long-builtin,file-builtin,execfile-builtin,coerce-builtin,cmp-builtin,buffer-builtin,basestring-builtin,apply-builtin,filter-builtin-not-iterating,using-cmp-argument,useless-suppression,range-builtin-not-iterating,suppressed-message,no-absolute-import,old-division,cmp-method,reload-builtin,zip-builtin-not-iterating,intern-builtin,unichr-builtin,reduce-builtin,standarderror-builtin,unicode-builtin,xrange-builtin,coerce-method,delslice-method,getslice-method,setslice-method,input-builtin,round-builtin,hex-method,nonzero-method,map-builtin-not-iterating,redefined-builtin,logging-format-interpolation,import-error,redefined-variable-type,broad-except,protected-access,global-statement,no-member,unused-argument
+
+[REPORTS]
+
+# Set the output format. Available formats are text, parseable, colorized, msvs
+# (visual studio) and html. You can also give a reporter class, eg
+# mypackage.mymodule.MyReporterClass.
+output-format=colorized
+
+# Put messages in a separate file for each module / package specified on the
+# command line instead of printing them on stdout. Reports (if any) will be
+# written in a file name "pylint_global.[txt|html]". This option is deprecated
+# and it will be removed in Pylint 2.0.
+files-output=no
+
+# Tells whether to display a full report or only the messages
+reports=yes
+
+# Python expression which should return a note less than 10 (10 is the highest
+# note). You have access to the variables errors warning, statement which
+# respectively contain the number of errors / warnings messages and the total
+# number of statements analyzed. This is used by the global evaluation report
+# (RP0004).
+evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10)
+
+# Template used to display messages. This is a python new-style format string
+# used to format the message information. See doc for all details
+#msg-template=
+
+
+[VARIABLES]
+
+# Tells whether we should check for unused import in __init__ files.
+init-import=no
+
+# A regular expression matching the name of dummy variables (i.e. expectedly
+# not used).
+dummy-variables-rgx=(_+[a-zA-Z0-9]*?$)|dummy|args|kwargs
+
+# List of additional names supposed to be defined in builtins. Remember that
+# you should avoid to define new builtins when possible.
+additional-builtins=
+
+# List of strings which can identify a callback function by name. A callback
+# name must start or end with one of those strings.
+callbacks=cb_,_cb
+
+# List of qualified module names which can have objects that can redefine
+# builtins.
+redefining-builtins-modules=six.moves,future.builtins
+
+
+[SPELLING]
+
+# Spelling dictionary name. Available dictionaries: none. To make it working
+# install python-enchant package.
+spelling-dict=
+
+# List of comma separated words that should not be checked.
+spelling-ignore-words=
+
+# A path to a file that contains private dictionary; one word per line.
+spelling-private-dict-file=
+
+# Tells whether to store unknown words to indicated private dictionary in
+# --spelling-private-dict-file option instead of raising a message.
+spelling-store-unknown-words=no
+
+
+[MISCELLANEOUS]
+
+# List of note tags to take in consideration, separated by a comma.
+notes=FIXME,XXX,TODO
+
+
+[LOGGING]
+
+# Logging modules to check that the string format arguments are in logging
+# function parameter format
+logging-modules=logging
+
+
+[BASIC]
+
+# Good variable names which should always be accepted, separated by a comma
+good-names=i,j,k,v,f,ex,e,_,id,ip
+
+# Bad variable names which should always be refused, separated by a comma
+bad-names=foo,bar,baz,toto,tutu,tata
+
+# Colon-delimited sets of names that determine each other's naming style when
+# the name regexes allow several styles.
+name-group=
+
+# Include a hint for the correct naming format with invalid-name
+include-naming-hint=no
+
+# List of decorators that produce properties, such as abc.abstractproperty. Add
+# to this list to register other decorators that produce valid properties.
+property-classes=abc.abstractproperty
+
+# Regular expression matching correct function names
+function-rgx=[a-z_][a-z0-9_]{2,50}$
+
+# Naming hint for function names
+function-name-hint=[a-z_][a-z0-9_]{2,50}$
+
+# Regular expression matching correct variable names
+variable-rgx=[a-z_][a-z0-9_]{2,50}$
+
+# Naming hint for variable names
+variable-name-hint=[a-z_][a-z0-9_]{2,50}$
+
+# Regular expression matching correct constant names
+const-rgx=(([A-Za-z_][A-Za-z0-9_]*)|(__.*__))$
+
+# Naming hint for constant names
+const-name-hint=(([A-Z_][A-Z0-9_]*)|(__.*__))$
+
+# Regular expression matching correct attribute names
+attr-rgx=[a-z_][a-z0-9_]{2,50}$
+
+# Naming hint for attribute names
+attr-name-hint=[a-z_][a-z0-9_]{2,50}$
+
+# Regular expression matching correct argument names
+argument-rgx=[a-z_][a-z0-9_]{2,50}$
+
+# Naming hint for argument names
+argument-name-hint=[a-z_][a-z0-9_]{2,50}$
+
+# Regular expression matching correct class attribute names
+class-attribute-rgx=([A-Za-z_][A-Za-z0-9_]{2,50}|(__.*__))$
+
+# Naming hint for class attribute names
+class-attribute-name-hint=([A-Za-z_][A-Za-z0-9_]{2,50}|(__.*__))$
+
+# Regular expression matching correct inline iteration names
+inlinevar-rgx=[A-Za-z_][A-Za-z0-9_]*$
+
+# Naming hint for inline iteration names
+inlinevar-name-hint=[A-Za-z_][A-Za-z0-9_]*$
+
+# Regular expression matching correct class names
+class-rgx=[A-Z_][a-zA-Z0-9]+$
+
+# Naming hint for class names
+class-name-hint=[A-Z_][a-zA-Z0-9]+$
+
+# Regular expression matching correct module names
+module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$
+
+# Naming hint for module names
+module-name-hint=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$
+
+# Regular expression matching correct method names
+method-rgx=[a-z_][a-z0-9_]{2,50}$
+
+# Naming hint for method names
+method-name-hint=[a-z_][a-z0-9_]{2,50}$
+
+# Regular expression which should only match function or class names that do
+# not require a docstring.
+no-docstring-rgx=^_
+
+# Minimum line length for functions/classes that require docstrings, shorter
+# ones are exempt.
+docstring-min-length=-1
+
+
+[ELIF]
+
+# Maximum number of nested blocks for function / method body
+max-nested-blocks=7
+
+
+[SIMILARITIES]
+
+# Minimum lines number of a similarity.
+min-similarity-lines=4
+
+# Ignore comments when computing similarities.
+ignore-comments=yes
+
+# Ignore docstrings when computing similarities.
+ignore-docstrings=yes
+
+# Ignore imports when computing similarities.
+ignore-imports=no
+
+
+[TYPECHECK]
+
+# Tells whether missing members accessed in mixin class should be ignored. A
+# mixin class is detected if its name ends with "mixin" (case insensitive).
+ignore-mixin-members=yes
+
+# List of module names for which member attributes should not be checked
+# (useful for modules/projects where namespaces are manipulated during runtime
+# and thus existing member attributes cannot be deduced by static analysis. It
+# supports qualified module names, as well as Unix pattern matching.
+ignored-modules=
+
+# List of class names for which member attributes should not be checked (useful
+# for classes with dynamically set attributes). This supports the use of
+# qualified names.
+ignored-classes=optparse.Values,thread._local,_thread._local
+
+# List of members which are set dynamically and missed by pylint inference
+# system, and so shouldn't trigger E1101 when accessed. Python regular
+# expressions are accepted.
+generated-members=
+
+# List of decorators that produce context managers, such as
+# contextlib.contextmanager. Add to this list to register other decorators that
+# produce valid context managers.
+contextmanager-decorators=contextlib.contextmanager
+
+
+[FORMAT]
+
+# Maximum number of characters on a single line.
+max-line-length=100
+
+# Regexp for a line that is allowed to be longer than the limit.
+ignore-long-lines=^\s*(# )?<?https?://\S+>?$
+
+# Allow the body of an if to be on the same line as the test if there is no
+# else.
+single-line-if-stmt=no
+
+# List of optional constructs for which whitespace checking is disabled. `dict-
+# separator` is used to allow tabulation in dicts, etc.: {1 : 1,\n222: 2}.
+# `trailing-comma` allows a space between comma and closing bracket: (a, ).
+# `empty-line` allows space-only lines.
+no-space-check=trailing-comma,dict-separator
+
+# Maximum number of lines in a module
+max-module-lines=1500
+
+# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1
+# tab).
+indent-string=' '
+
+# Number of spaces of indent required inside a hanging or continued line.
+indent-after-paren=4
+
+# Expected format of line ending, e.g. empty (any line ending), LF or CRLF.
+expected-line-ending-format=
+
+
+[CLASSES]
+
+# List of method names used to declare (i.e. assign) instance attributes.
+defining-attr-methods=__init__,__new__,setUp
+
+# List of valid names for the first argument in a class method.
+valid-classmethod-first-arg=cls
+
+# List of valid names for the first argument in a metaclass class method.
+valid-metaclass-classmethod-first-arg=mcs
+
+# List of member names, which should be excluded from the protected access
+# warning.
+exclude-protected=_asdict,_fields,_replace,_source,_make
+
+
+[DESIGN]
+
+# Maximum number of arguments for function / method
+max-args=20
+
+# Argument names that match this expression will be ignored. Default to name
+# with leading underscore
+ignored-argument-names=_.*
+
+# Maximum number of locals for function / method body
+max-locals=20
+
+# Maximum number of return / yield for function / method body
+max-returns=10
+
+# Maximum number of branch for function / method body
+max-branches=15
+
+# Maximum number of statements in function / method body
+max-statements=50
+
+# Maximum number of parents for a class (see R0901).
+max-parents=7
+
+# Maximum number of attributes for a class (see R0902).
+max-attributes=20
+
+# Minimum number of public methods for a class (see R0903).
+min-public-methods=0
+
+# Maximum number of public methods for a class (see R0904).
+max-public-methods=50
+
+# Maximum number of boolean expressions in a if statement
+max-bool-expr=5
+
+
+[IMPORTS]
+
+# Deprecated modules which should not be used, separated by a comma
+deprecated-modules=regsub,TERMIOS,Bastion,rexec
+
+# Create a graph of every (i.e. internal and external) dependencies in the
+# given file (report RP0402 must not be disabled)
+import-graph=
+
+# Create a graph of external dependencies in the given file (report RP0402 must
+# not be disabled)
+ext-import-graph=
+
+# Create a graph of internal dependencies in the given file (report RP0402 must
+# not be disabled)
+int-import-graph=
+
+# Force import order to recognize a module as part of the standard
+# compatibility libraries.
+known-standard-library=
+
+# Force import order to recognize a module as part of a third party library.
+known-third-party=enchant
+
+# Analyse import fallback blocks. This can be used to support both Python 2 and
+# 3 compatible code, which means that the block might have code that exists
+# only in one or another interpreter, leading to false positives when analysed.
+analyse-fallback-blocks=no
+
+
+[EXCEPTIONS]
+
+# Exceptions that will emit a warning when being caught. Defaults to
+# "Exception"
+overgeneral-exceptions=Exception
+
+
+[pre-commit-hook]
+params=--reports=no
+limit=9.5
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/__init__.py b/azure/aria/aria-extension-cloudify/src/aria/aria/__init__.py
new file mode 100644
index 0000000..76a62ce
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/__init__.py
@@ -0,0 +1,89 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+The ARIA root package provides entry points for extension and storage initialization.
+"""
+
+import sys
+
+import pkg_resources
+aria_package_name = 'apache-ariatosca'
+__version__ = pkg_resources.get_distribution(aria_package_name).version
+
+from .orchestrator.decorators import workflow, operation # pylint: disable=wrong-import-position
+from . import ( # pylint: disable=wrong-import-position
+ extension,
+ utils,
+ parser,
+ storage,
+ modeling,
+ orchestrator,
+ cli
+)
+
+if sys.version_info < (2, 7):
+ # pkgutil in python2.6 has a bug where it fails to import from protected modules, which causes
+ # the entire process to fail. In order to overcome this issue we use our custom iter_modules
+ from .utils.imports import iter_modules
+else:
+ from pkgutil import iter_modules
+
+__all__ = (
+ '__version__',
+ 'workflow',
+ 'operation',
+ 'install_aria_extensions',
+ 'application_model_storage',
+ 'application_resource_storage'
+)
+
+
+def install_aria_extensions():
+ """
+ Iterates all Python packages with names beginning with ``aria_extension_`` and all
+ ``aria_extension`` entry points and loads them.
+
+ It then invokes all registered extension functions.
+ """
+ for loader, module_name, _ in iter_modules():
+ if module_name.startswith('aria_extension_'):
+ loader.find_module(module_name).load_module(module_name)
+ for entry_point in pkg_resources.iter_entry_points(group='aria_extension'):
+ entry_point.load()
+ extension.init()
+
+
+def application_model_storage(api, api_kwargs=None, initiator=None, initiator_kwargs=None):
+ """
+ Initiate model storage.
+ """
+ return storage.ModelStorage(api_cls=api,
+ api_kwargs=api_kwargs,
+ items=modeling.models.models_to_register,
+ initiator=initiator,
+ initiator_kwargs=initiator_kwargs or {})
+
+
+def application_resource_storage(api, api_kwargs=None, initiator=None, initiator_kwargs=None):
+ """
+ Initiate resource storage.
+ """
+
+ return storage.ResourceStorage(api_cls=api,
+ api_kwargs=api_kwargs,
+ items=['service_template', 'service', 'plugin'],
+ initiator=initiator,
+ initiator_kwargs=initiator_kwargs)
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/cli/__init__.py b/azure/aria/aria-extension-cloudify/src/aria/aria/cli/__init__.py
new file mode 100644
index 0000000..c0ef46f
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/cli/__init__.py
@@ -0,0 +1,18 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+CLI package.
+"""
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/cli/ascii_art.py b/azure/aria/aria-extension-cloudify/src/aria/aria/cli/ascii_art.py
new file mode 100644
index 0000000..8a8b79f
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/cli/ascii_art.py
@@ -0,0 +1,24 @@
+# -*- coding: utf8 -*-
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+ARIA_ASCII_ART = r"""
+ █████╗ ██████╗ ██╗ █████╗
+ ██╔══██╗██╔══██╗██║██╔══██╗
+ ███████║██████╔╝██║███████║
+ ██╔══██║██╔══██╗██║██╔══██║
+ ██║ ██║██║ ██║██║██║ ██║
+ ╚═╝ ╚═╝╚═╝ ╚═╝╚═╝╚═╝ ╚═╝"""
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/cli/color.py b/azure/aria/aria-extension-cloudify/src/aria/aria/cli/color.py
new file mode 100644
index 0000000..d6a4cd6
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/cli/color.py
@@ -0,0 +1,108 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Terminal colorization utilities.
+"""
+
+from StringIO import StringIO
+import atexit
+import re
+
+import colorama
+
+from ..utils.formatting import safe_str
+
+
+def _restore_terminal():
+ colorama.deinit()
+
+
+colorama.init()
+atexit.register(_restore_terminal)
+
+
+class StringStylizer(object):
+ def __init__(self, str_, color_spec=None):
+ self._str = str_
+ self._color_spec = color_spec
+
+ def __repr__(self):
+ if self._color_spec:
+ return '{schema}{str}{reset}'.format(
+ schema=self._color_spec, str=safe_str(self._str), reset=Colors.Style.RESET_ALL)
+ return self._str
+
+ def __add__(self, other):
+ return safe_str(self) + other
+
+ def __radd__(self, other):
+ return other + safe_str(self)
+
+ def color(self, color_spec):
+ self._color_spec = color_spec
+
+ def replace(self, old, new, **kwargs):
+ self._str = self._str.replace(safe_str(old), safe_str(new), **kwargs)
+
+ def format(self, *args, **kwargs):
+ self._str = self._str.format(*args, **kwargs)
+
+ def highlight(self, pattern, schema):
+ if pattern is None:
+ return
+ for match in set(re.findall(re.compile(pattern), self._str)):
+ self.replace(match, schema + match + Colors.Style.RESET_ALL + self._color_spec)
+
+
+def _get_colors(color_type):
+ for name in dir(color_type):
+ if not name.startswith('_'):
+ yield (name.lower(), getattr(color_type, name))
+
+
+class Colors(object):
+ Fore = colorama.Fore
+ Back = colorama.Back
+ Style = colorama.Style
+
+ _colors = {
+ 'fore': dict(_get_colors(Fore)),
+ 'back': dict(_get_colors(Back)),
+ 'style': dict(_get_colors(Style))
+ }
+
+
+class ColorSpec(object):
+ def __init__(self, fore=None, back=None, style=None):
+ """
+ It is possible to provide fore, back and style arguments. Each could be either the color as
+ lowercase letters, or the full color name for Colorama.
+ """
+ self._kwargs = dict(fore=fore, back=back, style=style)
+ self._str = StringIO()
+ for type_, colors in Colors._colors.items():
+ value = self._kwargs.get(type_, None)
+ # the former case is if the value is a string, the latter is in case of an object.
+ self._str.write(colors.get(value) or value)
+
+ def __str__(self):
+ return self._str.getvalue()
+
+ def __add__(self, other):
+ return str(self) + str(other)
+
+ def __radd__(self, other):
+ return str(other) + str(self)
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/cli/commands/__init__.py b/azure/aria/aria-extension-cloudify/src/aria/aria/cli/commands/__init__.py
new file mode 100644
index 0000000..ba34a43
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/cli/commands/__init__.py
@@ -0,0 +1,30 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+CLI commands package.
+"""
+
+from . import (
+ executions,
+ logs,
+ node_templates,
+ nodes,
+ plugins,
+ reset,
+ service_templates,
+ services,
+ workflows
+)
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/cli/commands/executions.py b/azure/aria/aria-extension-cloudify/src/aria/aria/cli/commands/executions.py
new file mode 100644
index 0000000..cecbbc5
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/cli/commands/executions.py
@@ -0,0 +1,246 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+CLI ``executions`` sub-commands.
+"""
+
+import os
+
+from .. import helptexts
+from .. import table
+from .. import utils
+from .. import logger as cli_logger
+from .. import execution_logging
+from ..core import aria
+from ...modeling.models import Execution
+from ...orchestrator.workflow_runner import WorkflowRunner
+from ...orchestrator.workflows.executor.dry import DryExecutor
+from ...utils import formatting
+from ...utils import threading
+
+EXECUTION_COLUMNS = ('id', 'workflow_name', 'status', 'service_name',
+ 'created_at', 'error')
+
+
+@aria.group(name='executions')
+@aria.options.verbose()
+def executions():
+ """
+ Manage executions
+ """
+ pass
+
+
+@executions.command(name='show',
+ short_help='Show information for an execution')
+@aria.argument('execution-id')
+@aria.options.verbose()
+@aria.pass_model_storage
+@aria.pass_logger
+def show(execution_id, model_storage, logger):
+ """
+ Show information for an execution
+
+ EXECUTION_ID is the unique ID of the execution.
+ """
+ logger.info('Showing execution {0}'.format(execution_id))
+ execution = model_storage.execution.get(execution_id)
+
+ table.print_data(EXECUTION_COLUMNS, execution, 'Execution:', col_max_width=50)
+
+ # print execution parameters
+ logger.info('Execution Inputs:')
+ if execution.inputs:
+ #TODO check this section, havent tested it
+ execution_inputs = [ei.to_dict() for ei in execution.inputs]
+ for input_name, input_value in formatting.decode_dict(
+ execution_inputs).iteritems():
+ logger.info('\t{0}: \t{1}'.format(input_name, input_value))
+ else:
+ logger.info('\tNo inputs')
+
+
+@executions.command(name='list',
+ short_help='List executions')
+@aria.options.service_name(required=False)
+@aria.options.sort_by()
+@aria.options.descending
+@aria.options.verbose()
+@aria.pass_model_storage
+@aria.pass_logger
+def list(service_name,
+ sort_by,
+ descending,
+ model_storage,
+ logger):
+ """
+ List executions
+
+ If SERVICE_NAME is provided, list executions on that service. Otherwise, list executions on all
+ services.
+ """
+ if service_name:
+ logger.info('Listing executions for service {0}...'.format(
+ service_name))
+ service = model_storage.service.get_by_name(service_name)
+ filters = dict(service=service)
+ else:
+ logger.info('Listing all executions...')
+ filters = {}
+
+ executions_list = model_storage.execution.list(
+ filters=filters,
+ sort=utils.storage_sort_param(sort_by, descending)).items
+
+ table.print_data(EXECUTION_COLUMNS, executions_list, 'Executions:')
+
+
+@executions.command(name='start',
+ short_help='Start a workflow on a service')
+@aria.argument('workflow-name')
+@aria.options.service_name(required=True)
+@aria.options.inputs(help=helptexts.EXECUTION_INPUTS)
+@aria.options.dry_execution
+@aria.options.task_max_attempts()
+@aria.options.task_retry_interval()
+@aria.options.mark_pattern()
+@aria.options.verbose()
+@aria.pass_model_storage
+@aria.pass_resource_storage
+@aria.pass_plugin_manager
+@aria.pass_logger
+def start(workflow_name,
+ service_name,
+ inputs,
+ dry,
+ task_max_attempts,
+ task_retry_interval,
+ mark_pattern,
+ model_storage,
+ resource_storage,
+ plugin_manager,
+ logger):
+ """
+ Start a workflow on a service
+
+ SERVICE_NAME is the unique name of the service.
+
+ WORKFLOW_NAME is the unique name of the workflow within the service (e.g. "uninstall").
+ """
+ service = model_storage.service.get_by_name(service_name)
+ executor = DryExecutor() if dry else None # use WorkflowRunner's default executor
+
+ workflow_runner = \
+ WorkflowRunner(
+ model_storage, resource_storage, plugin_manager,
+ service_id=service.id, workflow_name=workflow_name, inputs=inputs, executor=executor,
+ task_max_attempts=task_max_attempts, task_retry_interval=task_retry_interval
+ )
+ logger.info('Starting {0}execution. Press Ctrl+C cancel'.format('dry ' if dry else ''))
+
+ _run_execution(workflow_runner, logger, model_storage, dry, mark_pattern)
+
+
+@executions.command(name='resume',
+ short_help='Resume a stopped execution')
+@aria.argument('execution-id')
+@aria.options.dry_execution
+@aria.options.retry_failed_tasks
+@aria.options.mark_pattern()
+@aria.options.verbose()
+@aria.pass_model_storage
+@aria.pass_resource_storage
+@aria.pass_plugin_manager
+@aria.pass_logger
+def resume(execution_id,
+ retry_failed_tasks,
+ dry,
+ mark_pattern,
+ model_storage,
+ resource_storage,
+ plugin_manager,
+ logger):
+ """
+ Resume a stopped execution
+
+ EXECUTION_ID is the unique ID of the execution.
+ """
+ executor = DryExecutor() if dry else None # use WorkflowRunner's default executor
+
+ execution = model_storage.execution.get(execution_id)
+ if execution.status != execution.CANCELLED:
+ logger.info("Can't resume execution {execution.id} - "
+ "execution is in status {execution.status}. "
+ "Can only resume executions in status {valid_status}"
+ .format(execution=execution, valid_status=execution.CANCELLED))
+ return
+
+ workflow_runner = \
+ WorkflowRunner(
+ model_storage, resource_storage, plugin_manager,
+ execution_id=execution_id, retry_failed_tasks=retry_failed_tasks, executor=executor,
+ )
+
+ logger.info('Resuming {0}execution. Press Ctrl+C cancel'.format('dry ' if dry else ''))
+ _run_execution(workflow_runner, logger, model_storage, dry, mark_pattern)
+
+
+def _run_execution(workflow_runner, logger, model_storage, dry, mark_pattern):
+ execution_thread_name = '{0}_{1}'.format(workflow_runner.service.name,
+ workflow_runner.execution.workflow_name)
+ execution_thread = threading.ExceptionThread(target=workflow_runner.execute,
+ name=execution_thread_name)
+
+ execution_thread.start()
+
+ last_task_id = workflow_runner.execution.logs[-1].id if workflow_runner.execution.logs else 0
+ log_iterator = cli_logger.ModelLogIterator(model_storage,
+ workflow_runner.execution_id,
+ offset=last_task_id)
+ try:
+ while execution_thread.is_alive():
+ execution_logging.log_list(log_iterator, mark_pattern=mark_pattern)
+ execution_thread.join(1)
+
+ except KeyboardInterrupt:
+ _cancel_execution(workflow_runner, execution_thread, logger, log_iterator)
+
+ # It might be the case where some logs were written and the execution was terminated, thus we
+ # need to drain the remaining logs.
+ execution_logging.log_list(log_iterator, mark_pattern=mark_pattern)
+
+ # raise any errors from the execution thread (note these are not workflow execution errors)
+ execution_thread.raise_error_if_exists()
+
+ execution = workflow_runner.execution
+ logger.info('Execution has ended with "{0}" status'.format(execution.status))
+ if execution.status == Execution.FAILED and execution.error:
+ logger.info('Execution error:{0}{1}'.format(os.linesep, execution.error))
+
+ if dry:
+ # remove traces of the dry execution (including tasks, logs, inputs..)
+ model_storage.execution.delete(execution)
+
+
+def _cancel_execution(workflow_runner, execution_thread, logger, log_iterator):
+ logger.info('Cancelling execution. Press Ctrl+C again to force-cancel.')
+ workflow_runner.cancel()
+ while execution_thread.is_alive():
+ try:
+ execution_logging.log_list(log_iterator)
+ execution_thread.join(1)
+ except KeyboardInterrupt:
+ pass
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/cli/commands/logs.py b/azure/aria/aria-extension-cloudify/src/aria/aria/cli/commands/logs.py
new file mode 100644
index 0000000..b751b97
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/cli/commands/logs.py
@@ -0,0 +1,72 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+CLI ``logs`` sub-commands.
+"""
+
+from .. import execution_logging
+from ..logger import ModelLogIterator
+from ..core import aria
+
+
+@aria.group(name='logs')
+@aria.options.verbose()
+def logs():
+ """
+ Manage logs of workflow executions
+ """
+ pass
+
+
+@logs.command(name='list',
+ short_help='List logs for an execution')
+@aria.argument('execution-id')
+@aria.options.verbose()
+@aria.options.mark_pattern()
+@aria.pass_model_storage
+@aria.pass_logger
+def list(execution_id, mark_pattern, model_storage, logger):
+ """
+ List logs for an execution
+
+ EXECUTION_ID is the unique ID of the execution.
+ """
+ logger.info('Listing logs for execution id {0}'.format(execution_id))
+ log_iterator = ModelLogIterator(model_storage, execution_id)
+
+ any_logs = execution_logging.log_list(log_iterator, mark_pattern=mark_pattern)
+
+ if not any_logs:
+ logger.info('\tNo logs')
+
+
+@logs.command(name='delete',
+ short_help='Delete logs of an execution')
+@aria.argument('execution-id')
+@aria.options.verbose()
+@aria.pass_model_storage
+@aria.pass_logger
+def delete(execution_id, model_storage, logger):
+ """
+ Delete logs of an execution
+
+ EXECUTION_ID is the unique ID of the execution.
+ """
+ logger.info('Deleting logs for execution id {0}'.format(execution_id))
+ logs_list = model_storage.log.list(filters=dict(execution_fk=execution_id))
+ for log in logs_list:
+ model_storage.log.delete(log)
+ logger.info('Deleted logs for execution id {0}'.format(execution_id))
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/cli/commands/node_templates.py b/azure/aria/aria-extension-cloudify/src/aria/aria/cli/commands/node_templates.py
new file mode 100644
index 0000000..ec160d2
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/cli/commands/node_templates.py
@@ -0,0 +1,100 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+CLI ``node-templates`` sub-commands.
+"""
+
+from .. import table
+from .. import utils
+from ..core import aria
+
+
+NODE_TEMPLATE_COLUMNS = ['id', 'name', 'description', 'service_template_name', 'type_name']
+
+
+@aria.group(name='node-templates')
+@aria.options.verbose()
+def node_templates():
+ """
+ Manages stored service templates' node templates
+ """
+ pass
+
+
+@node_templates.command(name='show',
+ short_help='Show information for a stored node template')
+@aria.argument('node-template-id')
+# @aria.options.service_template_name(required=True)
+@aria.options.verbose()
+@aria.pass_model_storage
+@aria.pass_logger
+def show(node_template_id, model_storage, logger):
+ """
+ Show information for a stored node template
+
+ NODE_TEMPLATE_ID is the unique node template ID.
+ """
+ logger.info('Showing node template {0}'.format(node_template_id))
+ node_template = model_storage.node_template.get(node_template_id)
+
+ table.print_data(NODE_TEMPLATE_COLUMNS, node_template, 'Node template:', col_max_width=50)
+
+ # print node template properties
+ logger.info('Node template properties:')
+ if node_template.properties:
+ logger.info(utils.get_parameter_templates_as_string(node_template.properties))
+ else:
+ logger.info('\tNo properties')
+
+ # print node IDs
+ nodes = node_template.nodes
+ logger.info('Nodes:')
+ if nodes:
+ for node in nodes:
+ logger.info('\t{0}'.format(node.name))
+ else:
+ logger.info('\tNo nodes')
+
+
+@node_templates.command(name='list',
+ short_help='List stored node templates')
+@aria.options.service_template_name()
+@aria.options.sort_by('service_template_name')
+@aria.options.descending
+@aria.options.verbose()
+@aria.pass_model_storage
+@aria.pass_logger
+def list(service_template_name, sort_by, descending, model_storage, logger):
+ """
+ List stored node templates
+
+ If SERVICE_TEMPLATE_NAME is provided, list node templates for that stored service template.
+ Otherwise, list node templates for all service templates.
+ """
+ if service_template_name:
+ logger.info('Listing node templates for service template {0}...'.format(
+ service_template_name))
+ service_template = model_storage.service_template.get_by_name(service_template_name)
+ filters = dict(service_template=service_template)
+ else:
+ logger.info('Listing all node templates...')
+ filters = {}
+
+ node_templates_list = model_storage.node_template.list(
+ filters=filters,
+ sort=utils.storage_sort_param(sort_by, descending))
+
+ table.print_data(NODE_TEMPLATE_COLUMNS, node_templates_list, 'Node templates:')
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/cli/commands/nodes.py b/azure/aria/aria-extension-cloudify/src/aria/aria/cli/commands/nodes.py
new file mode 100644
index 0000000..30f1dd4
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/cli/commands/nodes.py
@@ -0,0 +1,94 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+CLI ``nodes`` sub-commands.
+"""
+
+from .. import table
+from .. import utils
+from ..core import aria
+
+
+NODE_COLUMNS = ['id', 'name', 'service_name', 'node_template_name', 'state']
+
+
+@aria.group(name='nodes')
+@aria.options.verbose()
+def nodes():
+ """
+ Manage services' nodes
+ """
+ pass
+
+
+@nodes.command(name='show',
+ short_help='Show information for a node')
+@aria.argument('node_id')
+@aria.options.verbose()
+@aria.pass_model_storage
+@aria.pass_logger
+def show(node_id, model_storage, logger):
+ """
+ Show information for a node
+
+ NODE_ID is the unique node ID.
+ """
+ logger.info('Showing node {0}'.format(node_id))
+ node = model_storage.node.get(node_id)
+
+ table.print_data(NODE_COLUMNS, node, 'Node:', col_max_width=50)
+
+ # print node attributes
+ logger.info('Node attributes:')
+ if node.attributes:
+ for param_name, param in node.attributes.iteritems():
+ logger.info('\t{0}: {1}'.format(param_name, param.value))
+ else:
+ logger.info('\tNo attributes')
+
+
+@nodes.command(name='list',
+ short_help='List node')
+@aria.options.service_name(required=False)
+@aria.options.sort_by('service_name')
+@aria.options.descending
+@aria.options.verbose()
+@aria.pass_model_storage
+@aria.pass_logger
+def list(service_name,
+ sort_by,
+ descending,
+ model_storage,
+ logger):
+ """
+ List nodes
+
+ If SERVICE_NAME is provided, list nodes for that service. Otherwise, list nodes for all
+ services.
+ """
+ if service_name:
+ logger.info('Listing nodes for service {0}...'.format(service_name))
+ service = model_storage.service.get_by_name(service_name)
+ filters = dict(service=service)
+ else:
+ logger.info('Listing all nodes...')
+ filters = {}
+
+ nodes_list = model_storage.node.list(
+ filters=filters,
+ sort=utils.storage_sort_param(sort_by, descending))
+
+ table.print_data(NODE_COLUMNS, nodes_list, 'Nodes:')
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/cli/commands/plugins.py b/azure/aria/aria-extension-cloudify/src/aria/aria/cli/commands/plugins.py
new file mode 100644
index 0000000..b5d68a2
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/cli/commands/plugins.py
@@ -0,0 +1,111 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+CLI ``plugins`` sub-commands.
+"""
+
+from .. import table
+from .. import utils
+from ..core import aria
+
+
+PLUGIN_COLUMNS = ['id', 'package_name', 'package_version', 'supported_platform',
+ 'distribution', 'distribution_release', 'uploaded_at']
+
+
+@aria.group(name='plugins')
+@aria.options.verbose()
+def plugins():
+ """
+ Manage plugins
+ """
+ pass
+
+
+@plugins.command(name='validate',
+ short_help='Validate a plugin archive')
+@aria.argument('plugin-path')
+@aria.options.verbose()
+@aria.pass_plugin_manager
+@aria.pass_logger
+def validate(plugin_path, plugin_manager, logger):
+ """
+ Validate a plugin archive
+
+ A valid plugin is a wagon (`http://github.com/cloudify-cosmo/wagon`) in the ZIP format (suffix
+ may also be `.wgn`).
+
+ PLUGIN_PATH is the path to the wagon archive.
+ """
+ logger.info('Validating plugin {0}...'.format(plugin_path))
+ plugin_manager.validate_plugin(plugin_path)
+ logger.info('Plugin validated successfully')
+
+
+@plugins.command(name='install',
+ short_help='Install a plugin')
+@aria.argument('plugin-path')
+@aria.options.verbose()
+@aria.pass_context
+@aria.pass_plugin_manager
+@aria.pass_logger
+def install(ctx, plugin_path, plugin_manager, logger):
+ """
+ Install a plugin
+
+ A valid plugin is a wagon (`http://github.com/cloudify-cosmo/wagon`) in the ZIP format (suffix
+ may also be `.wgn`).
+
+ PLUGIN_PATH is the path to the wagon archive.
+ """
+ ctx.invoke(validate, plugin_path=plugin_path)
+ logger.info('Installing plugin {0}...'.format(plugin_path))
+ plugin = plugin_manager.install(plugin_path)
+ logger.info("Plugin installed. The plugin's id is {0}".format(plugin.id))
+
+
+@plugins.command(name='show',
+ short_help='Show information for an installed plugin')
+@aria.argument('plugin-id')
+@aria.options.verbose()
+@aria.pass_model_storage
+@aria.pass_logger
+def show(plugin_id, model_storage, logger):
+ """
+ Show information for an installed plugin
+
+ PLUGIN_ID is the unique installed plugin ID in this ARIA instance.
+ """
+ logger.info('Showing plugin {0}...'.format(plugin_id))
+ plugin = model_storage.plugin.get(plugin_id)
+ table.print_data(PLUGIN_COLUMNS, plugin, 'Plugin:')
+
+
+@plugins.command(name='list',
+ short_help='List all installed plugins')
+@aria.options.sort_by('uploaded_at')
+@aria.options.descending
+@aria.options.verbose()
+@aria.pass_model_storage
+@aria.pass_logger
+def list(sort_by, descending, model_storage, logger):
+ """
+ List all installed plugins
+ """
+ logger.info('Listing all plugins...')
+ plugins_list = model_storage.plugin.list(
+ sort=utils.storage_sort_param(sort_by, descending)).items
+ table.print_data(PLUGIN_COLUMNS, plugins_list, 'Plugins:')
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/cli/commands/reset.py b/azure/aria/aria-extension-cloudify/src/aria/aria/cli/commands/reset.py
new file mode 100644
index 0000000..c82c707
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/cli/commands/reset.py
@@ -0,0 +1,45 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+CLI ``reset`` command.
+"""
+
+from .. import helptexts
+from ..core import aria
+from ..env import env
+from ..exceptions import AriaCliError
+
+
+@aria.command(name='reset',
+ short_help="Reset ARIA working directory")
+@aria.options.force(help=helptexts.FORCE_RESET)
+@aria.options.reset_config
+@aria.pass_logger
+@aria.options.verbose()
+def reset(force, reset_config, logger):
+ """
+ Reset ARIA working directory
+
+ Deletes installed plugins, service templates, services, executions, and logs. The user
+ configuration will remain intact unless the `--reset_config` flag has been set as well, in
+ which case the entire ARIA working directory shall be removed.
+ """
+ if not force:
+ raise AriaCliError("To reset the ARIA's working directory, you must also provide the force"
+ " flag ('-f'/'--force').")
+
+ env.reset(reset_config=reset_config)
+ logger.info("ARIA's working directory has been reset")
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/cli/commands/service_templates.py b/azure/aria/aria-extension-cloudify/src/aria/aria/cli/commands/service_templates.py
new file mode 100644
index 0000000..5a7039c
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/cli/commands/service_templates.py
@@ -0,0 +1,244 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+CLI ``service-templates`` sub-commands.
+"""
+
+import os
+
+from .. import csar
+from .. import service_template_utils
+from .. import table
+from .. import utils
+from ..core import aria
+from ...core import Core
+from ...storage import exceptions as storage_exceptions
+from ...parser import consumption
+from ...utils import (formatting, collections, console)
+from ... orchestrator import topology
+
+DESCRIPTION_FIELD_LENGTH_LIMIT = 20
+SERVICE_TEMPLATE_COLUMNS = \
+ ('id', 'name', 'description', 'main_file_name', 'created_at', 'updated_at')
+
+
+@aria.group(name='service-templates')
+@aria.options.verbose()
+def service_templates():
+ """
+ Manage service templates
+ """
+ pass
+
+
+@service_templates.command(name='show',
+ short_help='Show information for a stored service template')
+@aria.argument('service-template-name')
+@aria.options.verbose()
+@aria.pass_model_storage
+@aria.options.service_template_mode_full
+@aria.options.mode_types
+@aria.options.format_json
+@aria.options.format_yaml
+@aria.pass_logger
+def show(service_template_name, model_storage, mode_full, mode_types, format_json, format_yaml,
+ logger):
+ """
+ Show information for a stored service template
+
+ SERVICE_TEMPLATE_NAME is the unique name of the stored service template.
+ """
+ service_template = model_storage.service_template.get_by_name(service_template_name)
+
+ if format_json or format_yaml:
+ mode_full = True
+
+ if mode_full:
+ consumption.ConsumptionContext()
+ if format_json:
+ console.puts(formatting.json_dumps(collections.prune(service_template.as_raw)))
+ elif format_yaml:
+ console.puts(formatting.yaml_dumps(collections.prune(service_template.as_raw)))
+ else:
+ console.puts(topology.Topology().dump(service_template))
+ elif mode_types:
+ console.puts(topology.Topology().dump_types(service_template=service_template))
+ else:
+ logger.info('Showing service template {0}...'.format(service_template_name))
+ service_template_dict = service_template.to_dict()
+ service_template_dict['#services'] = len(service_template.services)
+ columns = SERVICE_TEMPLATE_COLUMNS + ('#services',)
+ column_formatters = \
+ dict(description=table.trim_formatter_generator(DESCRIPTION_FIELD_LENGTH_LIMIT))
+ table.print_data(columns, service_template_dict, 'Service-template:',
+ column_formatters=column_formatters, col_max_width=50)
+
+ if service_template_dict['description'] is not None:
+ logger.info('Description:')
+ logger.info('{0}{1}'.format(service_template_dict['description'].encode('UTF-8') or '',
+ os.linesep))
+
+ if service_template.services:
+ logger.info('Existing services:')
+ for service_name in service_template.services:
+ logger.info('\t{0}'.format(service_name))
+
+
+@service_templates.command(name='list',
+ short_help='List all stored service templates')
+@aria.options.sort_by()
+@aria.options.descending
+@aria.options.verbose()
+@aria.pass_model_storage
+@aria.pass_logger
+def list(sort_by, descending, model_storage, logger):
+ """
+ List all stored service templates
+ """
+
+ logger.info('Listing all service templates...')
+ service_templates_list = model_storage.service_template.list(
+ sort=utils.storage_sort_param(sort_by, descending))
+
+ column_formatters = \
+ dict(description=table.trim_formatter_generator(DESCRIPTION_FIELD_LENGTH_LIMIT))
+ table.print_data(SERVICE_TEMPLATE_COLUMNS, service_templates_list, 'Service templates:',
+ column_formatters=column_formatters)
+
+
+@service_templates.command(name='store',
+ short_help='Parse and store a service template archive')
+@aria.argument('service-template-path')
+@aria.argument('service-template-name')
+@aria.options.service_template_filename
+@aria.options.verbose()
+@aria.pass_model_storage
+@aria.pass_resource_storage
+@aria.pass_plugin_manager
+@aria.pass_logger
+def store(service_template_path, service_template_name, service_template_filename,
+ model_storage, resource_storage, plugin_manager, logger):
+ """
+ Parse and store a service template archive
+
+ SERVICE_TEMPLATE_PATH is the path to the service template archive.
+
+ SERVICE_TEMPLATE_NAME is the unique name to give to the service template in storage.
+ """
+ logger.info('Storing service template {0}...'.format(service_template_name))
+
+ service_template_path = service_template_utils.get(service_template_path,
+ service_template_filename)
+ core = Core(model_storage, resource_storage, plugin_manager)
+ try:
+ core.create_service_template(service_template_path,
+ os.path.dirname(service_template_path),
+ service_template_name)
+ except storage_exceptions.StorageError as e:
+ utils.check_overriding_storage_exceptions(e, 'service template', service_template_name)
+ raise
+ logger.info('Service template {0} stored'.format(service_template_name))
+
+
+@service_templates.command(name='delete',
+ short_help='Delete a stored service template')
+@aria.argument('service-template-name')
+@aria.options.verbose()
+@aria.pass_model_storage
+@aria.pass_resource_storage
+@aria.pass_plugin_manager
+@aria.pass_logger
+def delete(service_template_name, model_storage, resource_storage, plugin_manager, logger):
+ """
+ Delete a stored service template
+
+ SERVICE_TEMPLATE_NAME is the unique name of the stored service template.
+ """
+ logger.info('Deleting service template {0}...'.format(service_template_name))
+ service_template = model_storage.service_template.get_by_name(service_template_name)
+ core = Core(model_storage, resource_storage, plugin_manager)
+ core.delete_service_template(service_template.id)
+ logger.info('Service template {0} deleted'.format(service_template_name))
+
+
+@service_templates.command(name='inputs',
+ short_help='Show stored service template inputs')
+@aria.argument('service-template-name')
+@aria.options.verbose()
+@aria.pass_model_storage
+@aria.pass_logger
+def inputs(service_template_name, model_storage, logger):
+ """
+ Show stored service template inputs
+
+ SERVICE_TEMPLATE_NAME is the unique name of the stored service template.
+ """
+ logger.info('Showing inputs for service template {0}...'.format(service_template_name))
+ print_service_template_inputs(model_storage, service_template_name, logger)
+
+
+@service_templates.command(name='validate',
+ short_help='Validate a service template archive')
+@aria.argument('service-template')
+@aria.options.service_template_filename
+@aria.options.verbose()
+@aria.pass_model_storage
+@aria.pass_resource_storage
+@aria.pass_plugin_manager
+@aria.pass_logger
+def validate(service_template, service_template_filename,
+ model_storage, resource_storage, plugin_manager, logger):
+ """
+ Validate a service template archive
+
+ SERVICE_TEMPLATE_PATH is the path to the service template archive.
+ """
+ logger.info('Validating service template: {0}'.format(service_template))
+ service_template_path = service_template_utils.get(service_template, service_template_filename)
+ core = Core(model_storage, resource_storage, plugin_manager)
+ core.validate_service_template(service_template_path)
+ logger.info('Service template validated successfully')
+
+
+@service_templates.command(name='create-archive',
+ short_help='Create a CSAR archive from a service template source')
+@aria.argument('service-template-path')
+@aria.argument('destination')
+@aria.options.verbose()
+@aria.pass_logger
+def create_archive(service_template_path, destination, logger):
+ """
+ Create a CSAR archive from a service template source
+
+ SERVICE_TEMPLATE_PATH is the path to the service template source.
+
+ DESTINATION is the path to the created CSAR archive.
+ """
+ logger.info('Creating a CSAR archive')
+ if not destination.endswith(csar.CSAR_FILE_EXTENSION):
+ destination += csar.CSAR_FILE_EXTENSION
+ csar.write(service_template_path, destination, logger)
+ logger.info('CSAR archive created at {0}'.format(destination))
+
+
+def print_service_template_inputs(model_storage, service_template_name, logger):
+ service_template = model_storage.service_template.get_by_name(service_template_name)
+
+ logger.info('Service template inputs:')
+ if service_template.inputs:
+ logger.info(utils.get_parameter_templates_as_string(service_template.inputs))
+ else:
+ logger.info('\tNo inputs')
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/cli/commands/services.py b/azure/aria/aria-extension-cloudify/src/aria/aria/cli/commands/services.py
new file mode 100644
index 0000000..6752899
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/cli/commands/services.py
@@ -0,0 +1,238 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+CLI ``services`` sub-commands.
+"""
+
+import os
+from StringIO import StringIO
+
+from . import service_templates
+from .. import helptexts
+from .. import table
+from .. import utils
+from ..core import aria
+from ...core import Core
+from ...modeling import exceptions as modeling_exceptions
+from ...storage import exceptions as storage_exceptions
+from ...parser import consumption
+from ...utils import (formatting, collections, console)
+from ...orchestrator import topology
+
+
+DESCRIPTION_FIELD_LENGTH_LIMIT = 20
+SERVICE_COLUMNS = ('id', 'name', 'description', 'service_template_name', 'created_at', 'updated_at')
+
+
+@aria.group(name='services')
+@aria.options.verbose()
+def services():
+ """
+ Manage services
+ """
+ pass
+
+
+@services.command(name='show',
+ short_help='Show information for a service')
+@aria.argument('service-name')
+@aria.options.verbose()
+@aria.options.service_mode_full
+@aria.options.mode_graph
+@aria.options.format_json
+@aria.options.format_yaml
+@aria.pass_model_storage
+@aria.pass_logger
+def show(service_name, model_storage, mode_full, mode_graph, format_json, format_yaml, logger):
+ """
+ Show information for a service
+
+ SERVICE_NAME is the unique name of the service.
+ """
+ service = model_storage.service.get_by_name(service_name)
+
+ if format_json or format_yaml:
+ mode_full = True
+
+ if mode_full:
+ consumption.ConsumptionContext()
+ if format_json:
+ console.puts(formatting.json_dumps(collections.prune(service.as_raw)))
+ elif format_yaml:
+ console.puts(formatting.yaml_dumps(collections.prune(service.as_raw)))
+ else:
+ console.puts(topology.Topology().dump(service))
+ elif mode_graph:
+ console.puts(topology.Topology().dump_graph(service))
+ else:
+ logger.info('Showing service {0}...'.format(service_name))
+ service_dict = service.to_dict()
+ columns = SERVICE_COLUMNS
+ column_formatters = \
+ dict(description=table.trim_formatter_generator(DESCRIPTION_FIELD_LENGTH_LIMIT))
+ table.print_data(columns, service_dict, 'Service:',
+ column_formatters=column_formatters, col_max_width=50)
+
+ if service_dict['description'] is not None:
+ logger.info('Description:')
+ logger.info('{0}{1}'.format(service_dict['description'].encode('UTF-8') or '',
+ os.linesep))
+
+
+@services.command(name='list', short_help='List services')
+@aria.options.service_template_name()
+@aria.options.sort_by()
+@aria.options.descending
+@aria.options.verbose()
+@aria.pass_model_storage
+@aria.pass_logger
+def list(service_template_name,
+ sort_by,
+ descending,
+ model_storage,
+ logger):
+ """
+ List services
+
+ If `--service-template-name` is provided, list services based on that service template.
+ Otherwise, list all services.
+ """
+ if service_template_name:
+ logger.info('Listing services for service template {0}...'.format(
+ service_template_name))
+ service_template = model_storage.service_template.get_by_name(service_template_name)
+ filters = dict(service_template=service_template)
+ else:
+ logger.info('Listing all services...')
+ filters = {}
+
+ services_list = model_storage.service.list(
+ sort=utils.storage_sort_param(sort_by=sort_by, descending=descending),
+ filters=filters)
+ table.print_data(SERVICE_COLUMNS, services_list, 'Services:')
+
+
+@services.command(name='create',
+ short_help='Create a service')
+@aria.argument('service-name', required=False)
+@aria.options.service_template_name(required=True)
+@aria.options.inputs(help=helptexts.SERVICE_INPUTS)
+@aria.options.verbose()
+@aria.pass_model_storage
+@aria.pass_resource_storage
+@aria.pass_plugin_manager
+@aria.pass_logger
+def create(service_template_name,
+ service_name,
+ inputs, # pylint: disable=redefined-outer-name
+ model_storage,
+ resource_storage,
+ plugin_manager,
+ logger):
+ """
+ Create a service
+
+ SERVICE_NAME is the unique name to give to the service.
+ """
+ logger.info('Creating new service from service template {0}...'.format(
+ service_template_name))
+ core = Core(model_storage, resource_storage, plugin_manager)
+ service_template = model_storage.service_template.get_by_name(service_template_name)
+
+ try:
+ service = core.create_service(service_template.id, inputs, service_name)
+ except storage_exceptions.StorageError as e:
+ utils.check_overriding_storage_exceptions(e, 'service', service_name)
+ raise
+ except modeling_exceptions.ParameterException:
+ service_templates.print_service_template_inputs(model_storage, service_template_name,
+ logger)
+ raise
+ logger.info("Service created. The service's name is {0}".format(service.name))
+
+
+@services.command(name='delete',
+ short_help='Delete a service')
+@aria.argument('service-name')
+@aria.options.force(help=helptexts.IGNORE_AVAILABLE_NODES)
+@aria.options.verbose()
+@aria.pass_model_storage
+@aria.pass_resource_storage
+@aria.pass_plugin_manager
+@aria.pass_logger
+def delete(service_name, force, model_storage, resource_storage, plugin_manager, logger):
+ """
+ Delete a service
+
+ SERVICE_NAME is the unique name of the service.
+ """
+ logger.info('Deleting service {0}...'.format(service_name))
+ service = model_storage.service.get_by_name(service_name)
+ core = Core(model_storage, resource_storage, plugin_manager)
+ core.delete_service(service.id, force=force)
+ logger.info('Service {0} deleted'.format(service_name))
+
+
+@services.command(name='outputs',
+ short_help='Show service outputs')
+@aria.argument('service-name')
+@aria.options.verbose()
+@aria.pass_model_storage
+@aria.pass_logger
+def outputs(service_name, model_storage, logger):
+ """
+ Show service outputs
+
+ SERVICE_NAME is the unique name of the service.
+ """
+ logger.info('Showing outputs for service {0}...'.format(service_name))
+ service = model_storage.service.get_by_name(service_name)
+
+ if service.outputs:
+ outputs_string = StringIO()
+ for output_name, output in service.outputs.iteritems():
+ outputs_string.write(' - "{0}":{1}'.format(output_name, os.linesep))
+ outputs_string.write(' Description: {0}{1}'.format(output.description, os.linesep))
+ outputs_string.write(' Value: {0}{1}'.format(output.value, os.linesep))
+ logger.info(outputs_string.getvalue())
+ else:
+ logger.info('\tNo outputs')
+
+
+@services.command(name='inputs',
+ short_help='Show service inputs')
+@aria.argument('service-name')
+@aria.options.verbose()
+@aria.pass_model_storage
+@aria.pass_logger
+def inputs(service_name, model_storage, logger):
+ """
+ Show service inputs
+
+ SERVICE_NAME is the unique name of the service.
+ """
+ logger.info('Showing inputs for service {0}...'.format(service_name))
+ service = model_storage.service.get_by_name(service_name)
+
+ if service.inputs:
+ inputs_string = StringIO()
+ for input_name, input_ in service.inputs.iteritems():
+ inputs_string.write(' - "{0}":{1}'.format(input_name, os.linesep))
+ inputs_string.write(' Description: {0}{1}'.format(input_.description, os.linesep))
+ inputs_string.write(' Value: {0}{1}'.format(input_.value, os.linesep))
+ logger.info(inputs_string.getvalue())
+ else:
+ logger.info('\tNo inputs')
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/cli/commands/workflows.py b/azure/aria/aria-extension-cloudify/src/aria/aria/cli/commands/workflows.py
new file mode 100644
index 0000000..ca191aa
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/cli/commands/workflows.py
@@ -0,0 +1,111 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+CLI ``worfklows`` sub-commands.
+"""
+
+from .. import table
+from ..core import aria
+from ..exceptions import AriaCliError
+
+WORKFLOW_COLUMNS = ['name', 'service_template_name', 'service_name']
+
+
+@aria.group(name='workflows')
+def workflows():
+ """
+ Manage service workflows
+ """
+ pass
+
+
+@workflows.command(name='show',
+ short_help='Show information for a service workflow')
+@aria.argument('workflow-name')
+@aria.options.service_name(required=True)
+@aria.options.verbose()
+@aria.pass_model_storage
+@aria.pass_logger
+def show(workflow_name, service_name, model_storage, logger):
+ """
+ Show information for a service workflow
+
+ SERVICE_NAME is the unique name of the service.
+
+ WORKFLOW_NAME is the unique name of the workflow within the service (e.g. "uninstall").
+ """
+ logger.info('Retrieving workflow {0} for service {1}'.format(
+ workflow_name, service_name))
+ service = model_storage.service.get_by_name(service_name)
+ workflow = next((wf for wf in service.workflows.itervalues() if
+ wf.name == workflow_name), None)
+ if not workflow:
+ raise AriaCliError(
+ 'Workflow {0} not found for service {1}'.format(workflow_name, service_name))
+
+ defaults = {
+ 'service_template_name': service.service_template_name,
+ 'service_name': service.name
+ }
+ table.print_data(WORKFLOW_COLUMNS, workflow, 'Workflows:', defaults=defaults)
+
+ # print workflow inputs
+ required_inputs = dict()
+ optional_inputs = dict()
+ for input_name, input in workflow.inputs.iteritems():
+ inputs_group = optional_inputs if input.value is not None else required_inputs
+ inputs_group[input_name] = input
+
+ logger.info('Workflow Inputs:')
+ logger.info('\tMandatory Inputs:')
+ for input_name, input in required_inputs.iteritems():
+ if input.description is not None:
+ logger.info('\t\t{0}\t({1})'.format(input_name,
+ input.description))
+ else:
+ logger.info('\t\t{0}'.format(input_name))
+
+ logger.info('\tOptional Inputs:')
+ for input_name, input in optional_inputs.iteritems():
+ if input.description is not None:
+ logger.info('\t\t{0}: \t{1}\t({2})'.format(
+ input_name, input.value, input.description))
+ else:
+ logger.info('\t\t{0}: \t{1}'.format(input_name,
+ input.value))
+
+
+@workflows.command(name='list',
+ short_help='List service workflows')
+@aria.options.service_name(required=True)
+@aria.options.verbose()
+@aria.pass_model_storage
+@aria.pass_logger
+def list(service_name, model_storage, logger):
+ """
+ List service workflows
+
+ SERVICE_NAME is the unique name of the service.
+ """
+ logger.info('Listing workflows for service {0}...'.format(service_name))
+ service = model_storage.service.get_by_name(service_name)
+ workflows_list = sorted(service.workflows.itervalues(), key=lambda w: w.name)
+
+ defaults = {
+ 'service_template_name': service.service_template_name,
+ 'service_name': service.name
+ }
+ table.print_data(WORKFLOW_COLUMNS, workflows_list, 'Workflows:', defaults=defaults)
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/cli/config/__init__.py b/azure/aria/aria-extension-cloudify/src/aria/aria/cli/config/__init__.py
new file mode 100644
index 0000000..738e8ed
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/cli/config/__init__.py
@@ -0,0 +1,18 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+CLI configuration package.
+"""
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/cli/config/config.py b/azure/aria/aria-extension-cloudify/src/aria/aria/cli/config/config.py
new file mode 100644
index 0000000..bbece80
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/cli/config/config.py
@@ -0,0 +1,93 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+CLI configuration mechanism.
+"""
+
+import os
+import pkg_resources
+from ruamel import yaml
+
+from jinja2.environment import Template
+
+
+CONFIG_FILE_NAME = 'config.yaml'
+
+
+class CliConfig(object):
+
+ def __init__(self, config_path):
+ with open(config_path) as f:
+ self._config = yaml.safe_load(f.read())
+
+ @classmethod
+ def create_config(cls, workdir):
+ config_path = os.path.join(workdir, CONFIG_FILE_NAME)
+ if not os.path.isfile(config_path):
+ config_template = pkg_resources.resource_string(
+ __package__,
+ 'config_template.yaml')
+
+ default_values = {
+ 'log_path': os.path.join(workdir, 'cli.log'),
+ 'enable_colors': True
+ }
+
+ template = Template(config_template)
+ rendered = template.render(**default_values)
+ with open(config_path, 'w') as f:
+ f.write(rendered)
+ f.write(os.linesep)
+
+ return cls(config_path)
+
+ @property
+ def logging(self):
+ return self.Logging(self._config.get('logging'))
+
+ class Logging(object):
+
+ def __init__(self, logging):
+ self._logging = logging or {}
+
+ @property
+ def filename(self):
+ return self._logging.get('filename')
+
+ @property
+ def loggers(self):
+ return self._logging.get('loggers', {})
+
+ @property
+ def execution(self):
+ return self.Execution(self._logging.get('execution'))
+
+ class Execution(object):
+
+ def __init__(self, execution_logging):
+ self._execution_logging = execution_logging
+
+ @property
+ def colors_enabled(self):
+ return self.colors.get('enabled', False)
+
+ @property
+ def colors(self):
+ return self._execution_logging.get('colors', {})
+
+ @property
+ def formats(self):
+ return self._execution_logging.get('formats', {})
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/cli/config/config_template.yaml b/azure/aria/aria-extension-cloudify/src/aria/aria/cli/config/config_template.yaml
new file mode 100644
index 0000000..94fcac3
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/cli/config/config_template.yaml
@@ -0,0 +1,42 @@
+
+logging:
+
+ # path to a file where cli logs will be saved.
+ filename: {{ log_path }}
+
+ # configuring level per logger
+ loggers:
+
+ # main logger of the cli. provides basic descriptions for executed operations.
+ aria.cli.main: info
+
+ execution:
+ formats:
+ # According to verbosity level 0 - no verbose. 3 - high verbose
+ 0: '{message}'
+ 1: '{timestamp:%H:%M:%S} | {level[0]} | {message}'
+ 2: '{timestamp:%H:%M:%S} | {level[0]} | {implementation} | {message}'
+ 3: '{timestamp:%H:%M:%S} | {level[0]} | {implementation} | {inputs} | {message}'
+
+ colors:
+ enabled: true
+
+ level:
+ default: {'fore': 'lightmagenta_ex'}
+ error: {'fore': 'red', 'style': 'bright'}
+ timestamp:
+ default: {'fore': 'lightmagenta_ex'}
+ error: {'fore': 'red', 'style': 'bright'}
+ message:
+ default: {'fore': 'lightblue_ex'}
+ error: {'fore': 'red', 'style': 'bright'}
+ implementation:
+ default: {'fore': 'lightblack_ex'}
+ error: {'fore': 'red', 'style': 'bright'}
+ inputs:
+ default: {'fore': 'blue'}
+ error: {'fore': 'red', 'style': 'bright'}
+ traceback:
+ default: {'fore': 'red'}
+
+ marker: 'lightyellow_ex'
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/cli/core/__init__.py b/azure/aria/aria-extension-cloudify/src/aria/aria/cli/core/__init__.py
new file mode 100644
index 0000000..88a9801
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/cli/core/__init__.py
@@ -0,0 +1,18 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+CLI core package.
+"""
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/cli/core/aria.py b/azure/aria/aria-extension-cloudify/src/aria/aria/cli/core/aria.py
new file mode 100644
index 0000000..b84507c
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/cli/core/aria.py
@@ -0,0 +1,507 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Enhancements and ARIA-specific conveniences for `Click <http://click.pocoo.org>`__.
+"""
+
+import os
+import sys
+import difflib
+import traceback
+import inspect
+from functools import wraps
+
+import click
+
+from ..env import (
+ env,
+ logger
+)
+from .. import defaults
+from .. import helptexts
+from ..ascii_art import ARIA_ASCII_ART
+from ..inputs import inputs_to_dict
+from ... import __version__
+from ...utils.exceptions import get_exception_as_string
+
+
+CLICK_CONTEXT_SETTINGS = dict(
+ help_option_names=['-h', '--help'],
+ token_normalize_func=lambda param: param.lower())
+
+
+class MutuallyExclusiveOption(click.Option):
+ def __init__(self, *args, **kwargs):
+ self.mutually_exclusive = set(kwargs.pop('mutually_exclusive', tuple()))
+ self.mutuality_description = kwargs.pop('mutuality_description',
+ ', '.join(self.mutually_exclusive))
+ self.mutuality_error = kwargs.pop('mutuality_error',
+ helptexts.DEFAULT_MUTUALITY_ERROR_MESSAGE)
+ if self.mutually_exclusive:
+ help = kwargs.get('help', '')
+ kwargs['help'] = '{0}. {1}'.format(help, self._message)
+ super(MutuallyExclusiveOption, self).__init__(*args, **kwargs)
+
+ def handle_parse_result(self, ctx, opts, args):
+ if (self.name in opts) and self.mutually_exclusive.intersection(opts):
+ raise click.UsageError('Illegal usage: {0}'.format(self._message))
+ return super(MutuallyExclusiveOption, self).handle_parse_result(ctx, opts, args)
+
+ @property
+ def _message(self):
+ return '{0} be used together with {1} ({2}).'.format(
+ '{0} cannot'.format(', '.join(self.opts)) if hasattr(self, 'opts') else 'Cannot',
+ self.mutuality_description,
+ self.mutuality_error)
+
+
+def mutually_exclusive_option(*param_decls, **attrs):
+ """
+ Decorator for mutually exclusive options.
+
+ This decorator works similarly to `click.option`, but supports an extra ``mutually_exclusive``
+ argument, which is a list of argument names with which the option is mutually exclusive.
+
+ You can optionally also supply ``mutuality_description`` and ``mutuality_error`` to override the
+ default messages.
+
+ NOTE: All mutually exclusive options must use this. It's not enough to use it in just one of the
+ options.
+ """
+
+ # NOTE: This code is copied and slightly modified from click.decorators.option and
+ # click.decorators._param_memo. Unfortunately, using click's ``cls`` parameter support does not
+ # work as is with extra decorator arguments.
+
+ def decorator(func):
+ if 'help' in attrs:
+ attrs['help'] = inspect.cleandoc(attrs['help'])
+ param = MutuallyExclusiveOption(param_decls, **attrs)
+ if not hasattr(func, '__click_params__'):
+ func.__click_params__ = []
+ func.__click_params__.append(param)
+ return func
+ return decorator
+
+
+def show_version(ctx, param, value):
+ if not value:
+ return
+
+ logger.info('{0} v{1}'.format(ARIA_ASCII_ART, __version__))
+ ctx.exit()
+
+
+def inputs_callback(ctx, param, value):
+ """
+ Allow to pass any inputs we provide to a command as processed inputs instead of having to call
+ ``inputs_to_dict`` inside the command.
+
+ ``@aria.options.inputs`` already calls this callback so that every time you use the option it
+ returns the inputs as a dictionary.
+ """
+ if not value:
+ return {}
+
+ return inputs_to_dict(value)
+
+
+def set_verbosity_level(ctx, param, value):
+ if not value:
+ return
+
+ env.logging.verbosity_level = value
+
+
+def set_cli_except_hook():
+ def recommend(possible_solutions):
+ logger.info('Possible solutions:')
+ for solution in possible_solutions:
+ logger.info(' - {0}'.format(solution))
+
+ def new_excepthook(tpe, value, trace):
+ if env.logging.is_high_verbose_level():
+ # log error including traceback
+ logger.error(get_exception_as_string(tpe, value, trace))
+ else:
+ # write the full error to the log file
+ with open(env.logging.log_file, 'a') as log_file:
+ traceback.print_exception(
+ etype=tpe,
+ value=value,
+ tb=trace,
+ file=log_file)
+ # print only the error message
+ print value
+
+ if hasattr(value, 'possible_solutions'):
+ recommend(getattr(value, 'possible_solutions'))
+
+ sys.excepthook = new_excepthook
+
+
+def pass_logger(func):
+ """
+ Simply passes the logger to a command.
+ """
+ # Wraps here makes sure the original docstring propagates to click
+ @wraps(func)
+ def wrapper(*args, **kwargs):
+ return func(logger=logger, *args, **kwargs)
+
+ return wrapper
+
+
+def pass_plugin_manager(func):
+ """
+ Simply passes the plugin manager to a command.
+ """
+ # Wraps here makes sure the original docstring propagates to click
+ @wraps(func)
+ def wrapper(*args, **kwargs):
+ return func(plugin_manager=env.plugin_manager, *args, **kwargs)
+
+ return wrapper
+
+
+def pass_model_storage(func):
+ """
+ Simply passes the model storage to a command.
+ """
+ # Wraps here makes sure the original docstring propagates to click
+ @wraps(func)
+ def wrapper(*args, **kwargs):
+ return func(model_storage=env.model_storage, *args, **kwargs)
+
+ return wrapper
+
+
+def pass_resource_storage(func):
+ """
+ Simply passes the resource storage to a command.
+ """
+ # Wraps here makes sure the original docstring propagates to click
+ @wraps(func)
+ def wrapper(*args, **kwargs):
+ return func(resource_storage=env.resource_storage, *args, **kwargs)
+
+ return wrapper
+
+
+def pass_context(func):
+ """
+ Make click context ARIA specific.
+
+ This exists purely for aesthetic reasons, otherwise some decorators are called
+ ``@click.something`` instead of ``@aria.something``.
+ """
+ return click.pass_context(func)
+
+
+class AliasedGroup(click.Group):
+ def __init__(self, *args, **kwargs):
+ self.max_suggestions = kwargs.pop("max_suggestions", 3)
+ self.cutoff = kwargs.pop("cutoff", 0.5)
+ super(AliasedGroup, self).__init__(*args, **kwargs)
+
+ def get_command(self, ctx, cmd_name):
+ cmd = click.Group.get_command(self, ctx, cmd_name)
+ if cmd is not None:
+ return cmd
+ matches = \
+ [x for x in self.list_commands(ctx) if x.startswith(cmd_name)]
+ if not matches:
+ return None
+ elif len(matches) == 1:
+ return click.Group.get_command(self, ctx, matches[0])
+ ctx.fail('Too many matches: {0}'.format(', '.join(sorted(matches))))
+
+ def resolve_command(self, ctx, args):
+ """
+ Override clicks ``resolve_command`` method and appends *Did you mean ...* suggestions to the
+ raised exception message.
+ """
+ try:
+ return super(AliasedGroup, self).resolve_command(ctx, args)
+ except click.exceptions.UsageError as error:
+ error_msg = str(error)
+ original_cmd_name = click.utils.make_str(args[0])
+ matches = difflib.get_close_matches(
+ original_cmd_name,
+ self.list_commands(ctx),
+ self.max_suggestions,
+ self.cutoff)
+ if matches:
+ error_msg += '{0}{0}Did you mean one of these?{0} {1}'.format(
+ os.linesep,
+ '{0} '.format(os.linesep).join(matches, ))
+ raise click.exceptions.UsageError(error_msg, error.ctx)
+
+
+def group(name):
+ """
+ Allow to create a group with a default click context and a class for Click's ``didyoueamn``
+ without having to repeat it for every group.
+ """
+ return click.group(
+ name=name,
+ context_settings=CLICK_CONTEXT_SETTINGS,
+ cls=AliasedGroup)
+
+
+def command(*args, **kwargs):
+ """
+ Make Click commands ARIA specific.
+
+ This exists purely for aesthetic reasons, otherwise some decorators are called
+ ``@click.something`` instead of ``@aria.something``.
+ """
+ return click.command(*args, **kwargs)
+
+
+def argument(*args, **kwargs):
+ """
+ Make Click arguments specific to ARIA.
+
+ This exists purely for aesthetic reasons, otherwise some decorators are called
+ ``@click.something`` instead of ``@aria.something``
+ """
+ return click.argument(*args, **kwargs)
+
+
+class Options(object):
+ def __init__(self):
+ """
+ The options API is nicer when you use each option by calling ``@aria.options.some_option``
+ instead of ``@aria.some_option``.
+
+ Note that some options are attributes and some are static methods. The reason for that is
+ that we want to be explicit regarding how a developer sees an option. If it can receive
+ arguments, it's a method - if not, it's an attribute.
+ """
+ self.version = click.option(
+ '--version',
+ is_flag=True,
+ callback=show_version,
+ expose_value=False,
+ is_eager=True,
+ help=helptexts.VERSION)
+
+ self.json_output = click.option(
+ '--json-output',
+ is_flag=True,
+ help=helptexts.JSON_OUTPUT)
+
+ self.dry_execution = click.option(
+ '--dry',
+ is_flag=True,
+ help=helptexts.DRY_EXECUTION)
+
+ self.retry_failed_tasks = click.option(
+ '--retry-failed-tasks',
+ is_flag=True,
+ help=helptexts.RETRY_FAILED_TASK
+ )
+
+ self.reset_config = click.option(
+ '--reset-config',
+ is_flag=True,
+ help=helptexts.RESET_CONFIG)
+
+ self.descending = click.option(
+ '--descending',
+ required=False,
+ is_flag=True,
+ default=defaults.SORT_DESCENDING,
+ help=helptexts.DESCENDING)
+
+ self.service_template_filename = click.option(
+ '-n',
+ '--service-template-filename',
+ default=defaults.SERVICE_TEMPLATE_FILENAME,
+ help=helptexts.SERVICE_TEMPLATE_FILENAME)
+
+ self.service_template_mode_full = mutually_exclusive_option(
+ '-f',
+ '--full',
+ 'mode_full',
+ mutually_exclusive=('mode_types',),
+ is_flag=True,
+ help=helptexts.SHOW_FULL,
+ mutuality_description='-t, --types',
+ mutuality_error=helptexts.MODE_MUTUALITY_ERROR_MESSAGE)
+
+ self.service_mode_full = mutually_exclusive_option(
+ '-f',
+ '--full',
+ 'mode_full',
+ mutually_exclusive=('mode_graph',),
+ is_flag=True,
+ help=helptexts.SHOW_FULL,
+ mutuality_description='-g, --graph',
+ mutuality_error=helptexts.MODE_MUTUALITY_ERROR_MESSAGE)
+
+ self.mode_types = mutually_exclusive_option(
+ '-t',
+ '--types',
+ 'mode_types',
+ mutually_exclusive=('mode_full',),
+ is_flag=True,
+ help=helptexts.SHOW_TYPES,
+ mutuality_description='-f, --full',
+ mutuality_error=helptexts.MODE_MUTUALITY_ERROR_MESSAGE)
+
+ self.mode_graph = mutually_exclusive_option(
+ '-g',
+ '--graph',
+ 'mode_graph',
+ mutually_exclusive=('mode_full',),
+ is_flag=True,
+ help=helptexts.SHOW_GRAPH,
+ mutuality_description='-f, --full',
+ mutuality_error=helptexts.MODE_MUTUALITY_ERROR_MESSAGE)
+
+ self.format_json = mutually_exclusive_option(
+ '-j',
+ '--json',
+ 'format_json',
+ mutually_exclusive=('format_yaml',),
+ is_flag=True,
+ help=helptexts.SHOW_JSON,
+ mutuality_description='-y, --yaml',
+ mutuality_error=helptexts.FORMAT_MUTUALITY_ERROR_MESSAGE)
+
+ self.format_yaml = mutually_exclusive_option(
+ '-y',
+ '--yaml',
+ 'format_yaml',
+ mutually_exclusive=('format_json',),
+ is_flag=True,
+ help=helptexts.SHOW_YAML,
+ mutuality_description='-j, --json',
+ mutuality_error=helptexts.FORMAT_MUTUALITY_ERROR_MESSAGE)
+
+ @staticmethod
+ def verbose(expose_value=False):
+ return click.option(
+ '-v',
+ '--verbose',
+ count=True,
+ callback=set_verbosity_level,
+ expose_value=expose_value,
+ is_eager=True,
+ help=helptexts.VERBOSE)
+
+ @staticmethod
+ def inputs(help):
+ return click.option(
+ '-i',
+ '--inputs',
+ multiple=True,
+ callback=inputs_callback,
+ help=help)
+
+ @staticmethod
+ def force(help):
+ return click.option(
+ '-f',
+ '--force',
+ is_flag=True,
+ help=help)
+
+ @staticmethod
+ def task_max_attempts(default=defaults.TASK_MAX_ATTEMPTS):
+ return click.option(
+ '--task-max-attempts',
+ type=int,
+ default=default,
+ help=helptexts.TASK_MAX_ATTEMPTS.format(default))
+
+ @staticmethod
+ def sort_by(default='created_at'):
+ return click.option(
+ '--sort-by',
+ required=False,
+ default=default,
+ help=helptexts.SORT_BY)
+
+ @staticmethod
+ def task_retry_interval(default=defaults.TASK_RETRY_INTERVAL):
+ return click.option(
+ '--task-retry-interval',
+ type=int,
+ default=default,
+ help=helptexts.TASK_RETRY_INTERVAL.format(default))
+
+ @staticmethod
+ def service_id(required=False):
+ return click.option(
+ '-s',
+ '--service-id',
+ required=required,
+ help=helptexts.SERVICE_ID)
+
+ @staticmethod
+ def execution_id(required=False):
+ return click.option(
+ '-e',
+ '--execution-id',
+ required=required,
+ help=helptexts.EXECUTION_ID)
+
+ @staticmethod
+ def service_template_id(required=False):
+ return click.option(
+ '-t',
+ '--service-template-id',
+ required=required,
+ help=helptexts.SERVICE_TEMPLATE_ID)
+
+ @staticmethod
+ def service_template_path(required=False):
+ return click.option(
+ '-p',
+ '--service-template-path',
+ required=required,
+ type=click.Path(exists=True))
+
+ @staticmethod
+ def service_name(required=False):
+ return click.option(
+ '-s',
+ '--service-name',
+ required=required,
+ help=helptexts.SERVICE_ID)
+
+ @staticmethod
+ def service_template_name(required=False):
+ return click.option(
+ '-t',
+ '--service-template-name',
+ required=required,
+ help=helptexts.SERVICE_ID)
+
+ @staticmethod
+ def mark_pattern():
+ return click.option(
+ '-m',
+ '--mark-pattern',
+ help=helptexts.MARK_PATTERN,
+ type=str,
+ required=False
+ )
+
+options = Options()
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/cli/csar.py b/azure/aria/aria-extension-cloudify/src/aria/aria/cli/csar.py
new file mode 100644
index 0000000..40b1699
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/cli/csar.py
@@ -0,0 +1,187 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Support for the CSAR (Cloud Service ARchive) packaging specification.
+
+See the `TOSCA Simple Profile v1.0 cos01 specification <http://docs.oasis-open.org/tosca
+/TOSCA-Simple-Profile-YAML/v1.0/cos01/TOSCA-Simple-Profile-YAML-v1.0-cos01.html#_Toc461787381>`__
+"""
+
+import os
+import logging
+import pprint
+import tempfile
+import zipfile
+
+import requests
+from ruamel import yaml
+
+CSAR_FILE_EXTENSION = '.csar'
+META_FILE = 'TOSCA-Metadata/TOSCA.meta'
+META_FILE_VERSION_KEY = 'TOSCA-Meta-File-Version'
+META_FILE_VERSION_VALUE = '1.0'
+META_CSAR_VERSION_KEY = 'CSAR-Version'
+META_CSAR_VERSION_VALUE = '1.1'
+META_CREATED_BY_KEY = 'Created-By'
+META_CREATED_BY_VALUE = 'ARIA'
+META_ENTRY_DEFINITIONS_KEY = 'Entry-Definitions'
+BASE_METADATA = {
+ META_FILE_VERSION_KEY: META_FILE_VERSION_VALUE,
+ META_CSAR_VERSION_KEY: META_CSAR_VERSION_VALUE,
+ META_CREATED_BY_KEY: META_CREATED_BY_VALUE,
+}
+
+
+def write(service_template_path, destination, logger):
+
+ service_template_path = os.path.abspath(os.path.expanduser(service_template_path))
+ source = os.path.dirname(service_template_path)
+ entry = os.path.basename(service_template_path)
+
+ meta_file = os.path.join(source, META_FILE)
+ if not os.path.isdir(source):
+ raise ValueError('{0} is not a directory. Please specify the service template '
+ 'directory.'.format(source))
+ if not os.path.isfile(service_template_path):
+ raise ValueError('{0} does not exists. Please specify a valid entry point.'
+ .format(service_template_path))
+ if os.path.exists(destination):
+ raise ValueError('{0} already exists. Please provide a path to where the CSAR should be '
+ 'created.'.format(destination))
+ if os.path.exists(meta_file):
+ raise ValueError('{0} already exists. This commands generates a meta file for you. Please '
+ 'remove the existing metafile.'.format(meta_file))
+ metadata = BASE_METADATA.copy()
+ metadata[META_ENTRY_DEFINITIONS_KEY] = entry
+ logger.debug('Compressing root directory to ZIP')
+ with zipfile.ZipFile(destination, 'w', zipfile.ZIP_DEFLATED) as f:
+ for root, _, files in os.walk(source):
+ for file in files:
+ file_full_path = os.path.join(root, file)
+ file_relative_path = os.path.relpath(file_full_path, source)
+ logger.debug('Writing to archive: {0}'.format(file_relative_path))
+ f.write(file_full_path, file_relative_path)
+ logger.debug('Writing new metadata file to {0}'.format(META_FILE))
+ f.writestr(META_FILE, yaml.dump(metadata, default_flow_style=False))
+
+
+class _CSARReader(object):
+
+ def __init__(self, source, destination, logger):
+ self.logger = logger
+ if os.path.isdir(destination) and os.listdir(destination):
+ raise ValueError('{0} already exists and is not empty. '
+ 'Please specify the location where the CSAR '
+ 'should be extracted.'.format(destination))
+ downloaded_csar = '://' in source
+ if downloaded_csar:
+ file_descriptor, download_target = tempfile.mkstemp()
+ os.close(file_descriptor)
+ self._download(source, download_target)
+ source = download_target
+ self.source = os.path.expanduser(source)
+ self.destination = os.path.expanduser(destination)
+ self.metadata = {}
+ try:
+ if not os.path.exists(self.source):
+ raise ValueError('{0} does not exists. Please specify a valid CSAR path.'
+ .format(self.source))
+ if not zipfile.is_zipfile(self.source):
+ raise ValueError('{0} is not a valid CSAR.'.format(self.source))
+ self._extract()
+ self._read_metadata()
+ self._validate()
+ finally:
+ if downloaded_csar:
+ os.remove(self.source)
+
+ @property
+ def created_by(self):
+ return self.metadata.get(META_CREATED_BY_KEY)
+
+ @property
+ def csar_version(self):
+ return self.metadata.get(META_CSAR_VERSION_KEY)
+
+ @property
+ def meta_file_version(self):
+ return self.metadata.get(META_FILE_VERSION_KEY)
+
+ @property
+ def entry_definitions(self):
+ return self.metadata.get(META_ENTRY_DEFINITIONS_KEY)
+
+ @property
+ def entry_definitions_yaml(self):
+ with open(os.path.join(self.destination, self.entry_definitions)) as f:
+ return yaml.load(f)
+
+ def _extract(self):
+ self.logger.debug('Extracting CSAR contents')
+ if not os.path.exists(self.destination):
+ os.mkdir(self.destination)
+ with zipfile.ZipFile(self.source) as f:
+ f.extractall(self.destination)
+ self.logger.debug('CSAR contents successfully extracted')
+
+ def _read_metadata(self):
+ csar_metafile = os.path.join(self.destination, META_FILE)
+ if not os.path.exists(csar_metafile):
+ raise ValueError('Metadata file {0} is missing from the CSAR'.format(csar_metafile))
+ self.logger.debug('CSAR metadata file: {0}'.format(csar_metafile))
+ self.logger.debug('Attempting to parse CSAR metadata YAML')
+ with open(csar_metafile) as f:
+ self.metadata.update(yaml.load(f))
+ self.logger.debug('CSAR metadata:{0}{1}'.format(os.linesep, pprint.pformat(self.metadata)))
+
+ def _validate(self):
+ def validate_key(key, expected=None):
+ if not self.metadata.get(key):
+ raise ValueError('{0} is missing from the metadata file.'.format(key))
+ actual = str(self.metadata[key])
+ if expected and actual != expected:
+ raise ValueError('{0} is expected to be {1} in the metadata file while it is in '
+ 'fact {2}.'.format(key, expected, actual))
+ validate_key(META_FILE_VERSION_KEY, expected=META_FILE_VERSION_VALUE)
+ validate_key(META_CSAR_VERSION_KEY, expected=META_CSAR_VERSION_VALUE)
+ validate_key(META_CREATED_BY_KEY)
+ validate_key(META_ENTRY_DEFINITIONS_KEY)
+ self.logger.debug('CSAR entry definitions: {0}'.format(self.entry_definitions))
+ entry_definitions_path = os.path.join(self.destination, self.entry_definitions)
+ if not os.path.isfile(entry_definitions_path):
+ raise ValueError('The entry definitions {0} referenced by the metadata file does not '
+ 'exist.'.format(entry_definitions_path))
+
+ def _download(self, url, target):
+ response = requests.get(url, stream=True)
+ if response.status_code != 200:
+ raise ValueError('Server at {0} returned a {1} status code'
+ .format(url, response.status_code))
+ self.logger.info('Downloading {0} to {1}'.format(url, target))
+ with open(target, 'wb') as f:
+ for chunk in response.iter_content(chunk_size=8192):
+ if chunk:
+ f.write(chunk)
+
+
+def read(source, destination=None, logger=None):
+ destination = destination or tempfile.mkdtemp()
+ logger = logger or logging.getLogger('dummy')
+ return _CSARReader(source=source, destination=destination, logger=logger)
+
+
+def is_csar_archive(source):
+ return source.endswith(CSAR_FILE_EXTENSION)
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/cli/defaults.py b/azure/aria/aria-extension-cloudify/src/aria/aria/cli/defaults.py
new file mode 100644
index 0000000..e84abc0
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/cli/defaults.py
@@ -0,0 +1,30 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Various CLI default values.
+"""
+
+#: Default service template filename
+SERVICE_TEMPLATE_FILENAME = 'service_template.yaml'
+
+#: Default task max attempts
+TASK_MAX_ATTEMPTS = 30
+
+#: Default task retry interval
+TASK_RETRY_INTERVAL = 30
+
+#: Default sort descending
+SORT_DESCENDING = False
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/cli/env.py b/azure/aria/aria-extension-cloudify/src/aria/aria/cli/env.py
new file mode 100644
index 0000000..84bdebe
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/cli/env.py
@@ -0,0 +1,127 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Environment (private)
+"""
+
+import os
+import shutil
+
+from .config import config
+from .logger import Logging
+from .. import (application_model_storage, application_resource_storage)
+from ..orchestrator.plugin import PluginManager
+from ..storage.sql_mapi import SQLAlchemyModelAPI
+from ..storage.filesystem_rapi import FileSystemResourceAPI
+
+
+ARIA_DEFAULT_WORKDIR_NAME = '.aria'
+
+
+class _Environment(object):
+
+ def __init__(self, workdir):
+
+ self._workdir = workdir
+ self._init_workdir()
+
+ self._config = config.CliConfig.create_config(workdir)
+ self._logging = Logging(self._config)
+
+ self._model_storage_dir = os.path.join(workdir, 'models')
+ self._resource_storage_dir = os.path.join(workdir, 'resources')
+ self._plugins_dir = os.path.join(workdir, 'plugins')
+
+ # initialized lazily
+ self._model_storage = None
+ self._resource_storage = None
+ self._plugin_manager = None
+
+ @property
+ def workdir(self):
+ return self._workdir
+
+ @property
+ def config(self):
+ return self._config
+
+ @property
+ def logging(self):
+ return self._logging
+
+ @property
+ def model_storage(self):
+ if not self._model_storage:
+ self._model_storage = self._init_sqlite_model_storage()
+ return self._model_storage
+
+ @property
+ def resource_storage(self):
+ if not self._resource_storage:
+ self._resource_storage = self._init_fs_resource_storage()
+ return self._resource_storage
+
+ @property
+ def plugin_manager(self):
+ if not self._plugin_manager:
+ self._plugin_manager = self._init_plugin_manager()
+ return self._plugin_manager
+
+ def reset(self, reset_config):
+ if reset_config:
+ shutil.rmtree(self._workdir)
+ else:
+ _, dirs, files = next(os.walk(self._workdir))
+ files.remove(config.CONFIG_FILE_NAME)
+
+ for dir_ in dirs:
+ shutil.rmtree(os.path.join(self._workdir, dir_))
+ for file_ in files:
+ os.remove(os.path.join(self._workdir, file_))
+
+ def _init_workdir(self):
+ if not os.path.exists(self._workdir):
+ os.makedirs(self._workdir)
+
+ def _init_sqlite_model_storage(self):
+ if not os.path.exists(self._model_storage_dir):
+ os.makedirs(self._model_storage_dir)
+
+ initiator_kwargs = dict(base_dir=self._model_storage_dir)
+ return application_model_storage(
+ SQLAlchemyModelAPI,
+ initiator_kwargs=initiator_kwargs)
+
+ def _init_fs_resource_storage(self):
+ if not os.path.exists(self._resource_storage_dir):
+ os.makedirs(self._resource_storage_dir)
+
+ fs_kwargs = dict(directory=self._resource_storage_dir)
+ return application_resource_storage(
+ FileSystemResourceAPI,
+ api_kwargs=fs_kwargs)
+
+ def _init_plugin_manager(self):
+ if not os.path.exists(self._plugins_dir):
+ os.makedirs(self._plugins_dir)
+
+ return PluginManager(self.model_storage, self._plugins_dir)
+
+
+env = _Environment(os.path.join(
+ os.environ.get('ARIA_WORKDIR', os.path.expanduser('~')), ARIA_DEFAULT_WORKDIR_NAME))
+
+logger = env.logging.logger
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/cli/exceptions.py b/azure/aria/aria-extension-cloudify/src/aria/aria/cli/exceptions.py
new file mode 100644
index 0000000..7da9836
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/cli/exceptions.py
@@ -0,0 +1,24 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+CLI exceptions.
+"""
+
+from ..exceptions import AriaError
+
+
+class AriaCliError(AriaError):
+ pass
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/cli/execution_logging.py b/azure/aria/aria-extension-cloudify/src/aria/aria/cli/execution_logging.py
new file mode 100644
index 0000000..915038b
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/cli/execution_logging.py
@@ -0,0 +1,243 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Formatting for ``executions`` sub-commands.
+"""
+
+import os
+import re
+from StringIO import StringIO
+from functools import partial
+
+from . import (
+ logger,
+ color
+)
+from .env import env
+
+
+FIELD_TYPE = 'field_type'
+LEVEL = 'level'
+TIMESTAMP = 'timestamp'
+MESSAGE = 'message'
+IMPLEMENTATION = 'implementation'
+INPUTS = 'inputs'
+TRACEBACK = 'traceback'
+MARKER = 'marker'
+
+FINAL_STATES = 'final_states'
+SUCCESS_STATE = 'succeeded'
+CANCEL_STATE = 'canceled'
+FAIL_STATE = 'failed'
+
+_EXECUTION_PATTERN = "\'.*\' workflow execution {0}".format
+# In order to be able to format a string into this regex pattern, we need to provide support
+# in adding this string into double curly brackets. This is an issue with python format, so we add
+# this via format itself.
+_FIELD_TYPE_PATTERN = partial('.*({starting}{0}{closing}).*'.format, starting='{', closing='.*?}')
+
+_PATTERNS = {
+ FINAL_STATES: {
+ SUCCESS_STATE: re.compile(_EXECUTION_PATTERN(SUCCESS_STATE)),
+ CANCEL_STATE: re.compile(_EXECUTION_PATTERN(CANCEL_STATE)),
+ FAIL_STATE: re.compile(_EXECUTION_PATTERN(FAIL_STATE)),
+ },
+ FIELD_TYPE: {
+ IMPLEMENTATION: re.compile(_FIELD_TYPE_PATTERN(IMPLEMENTATION)),
+ LEVEL: re.compile(_FIELD_TYPE_PATTERN(LEVEL)),
+ MESSAGE: re.compile(_FIELD_TYPE_PATTERN(MESSAGE)),
+ INPUTS: re.compile(_FIELD_TYPE_PATTERN(INPUTS)),
+ TIMESTAMP: re.compile(_FIELD_TYPE_PATTERN(TIMESTAMP))
+ }
+}
+
+_FINAL_STATES = {
+ SUCCESS_STATE: color.Colors.Fore.GREEN,
+ CANCEL_STATE: color.Colors.Fore.YELLOW,
+ FAIL_STATE: color.Colors.Fore.RED
+}
+
+_DEFAULT_COLORS = {
+ LEVEL: {
+ 'default': {'fore': 'lightmagenta_ex'},
+ 'error': {'fore': 'red', 'style': 'bright'},
+ },
+ TIMESTAMP: {
+ 'default': {'fore': 'lightmagenta_ex'},
+ 'error': {'fore': 'red', 'style': 'bright'},
+ },
+ MESSAGE: {
+ 'default': {'fore': 'lightblue_ex'},
+ 'error': {'fore': 'red', 'style': 'bright'},
+ },
+ IMPLEMENTATION:{
+ 'default': {'fore': 'lightblack_ex'},
+ 'error': {'fore': 'red', 'style': 'bright'},
+ },
+ INPUTS: {
+ 'default': {'fore': 'blue'},
+ 'error': {'fore': 'red', 'style': 'bright'},
+ },
+ TRACEBACK: {'default': {'fore': 'red'}},
+
+ MARKER: 'lightyellow_ex'
+}
+
+_DEFAULT_FORMATS = {
+ logger.NO_VERBOSE: '{message}',
+ logger.LOW_VERBOSE: '{timestamp:%H:%M:%S} | {level[0]} | {message}',
+ logger.MEDIUM_VERBOSE: '{timestamp:%H:%M:%S} | {level[0]} | {implementation} | {message}',
+ logger.HIGH_VERBOSE:
+ '{timestamp:%H:%M:%S} | {level[0]} | {implementation} | {inputs} | {message}'
+}
+
+
+def stylize_log(item, mark_pattern):
+
+ # implementation
+ if item.task:
+ # operation task
+ implementation = item.task.function
+ inputs = dict(arg.unwrapped for arg in item.task.arguments.itervalues())
+ else:
+ # execution task
+ implementation = item.execution.workflow_name
+ inputs = dict(inp.unwrapped for inp in item.execution.inputs.itervalues())
+
+ stylized_str = color.StringStylizer(_get_format())
+ _populate_level(stylized_str, item)
+ _populate_timestamp(stylized_str, item)
+ _populate_message(stylized_str, item, mark_pattern)
+ _populate_inputs(stylized_str, inputs, item, mark_pattern)
+ _populate_implementation(stylized_str, implementation, item, mark_pattern)
+
+ msg = StringIO()
+ msg.write(str(stylized_str))
+ # Add the exception and the error msg.
+ if item.traceback and env.logging.verbosity_level >= logger.MEDIUM_VERBOSE:
+ msg.write(os.linesep)
+ msg.writelines(_color_traceback('\t' + '|' + line, item, mark_pattern)
+ for line in item.traceback.splitlines(True))
+
+ return msg.getvalue()
+
+
+def log(item, mark_pattern=None, *args, **kwargs):
+ leveled_log = getattr(env.logging.logger, item.level.lower())
+ return leveled_log(stylize_log(item, mark_pattern), *args, **kwargs)
+
+
+def log_list(iterator, mark_pattern=None):
+ any_logs = False
+ for item in iterator:
+ log(item, mark_pattern)
+ any_logs = True
+ return any_logs
+
+
+def _get_format():
+ return (env.config.logging.execution.formats.get(env.logging.verbosity_level) or
+ _DEFAULT_FORMATS.get(env.logging.verbosity_level))
+
+
+def _get_styles(field_type):
+ return env.config.logging.execution.colors[field_type]
+
+
+def _is_color_enabled():
+ # If styling is enabled and the current log_item isn't final string
+ return env.config.logging.execution.colors_enabled
+
+
+def _get_marker_schema():
+ return color.ColorSpec(back=_get_styles(MARKER))
+
+
+def _populate_implementation(str_, implementation, log_item, mark_pattern=None):
+ _stylize(str_, implementation, log_item, IMPLEMENTATION, mark_pattern)
+
+
+def _populate_inputs(str_, inputs, log_item, mark_pattern=None):
+ _stylize(str_, inputs, log_item, INPUTS, mark_pattern)
+
+
+def _populate_timestamp(str_, log_item):
+ _stylize(str_, log_item.created_at, log_item, TIMESTAMP)
+
+
+def _populate_message(str_, log_item, mark_pattern=None):
+ _stylize(str_, log_item.msg, log_item, MESSAGE, mark_pattern)
+
+
+def _populate_level(str_, log_item):
+ _stylize(str_, log_item.level[0], log_item, LEVEL)
+
+
+def _stylize(stylized_str, msg, log_item, msg_type, mark_pattern=None):
+ match = re.match(_PATTERNS[FIELD_TYPE][msg_type], stylized_str._str)
+ if not match:
+ return
+ matched_substr = match.group(1)
+
+ substring = color.StringStylizer(matched_substr)
+
+ # handle format
+ substring.format(**{msg_type: msg})
+
+ if _is_color_enabled():
+ # handle color
+ substring.color(_resolve_schema(msg_type, log_item))
+ if not _is_end_execution_log(log_item):
+ # handle highlighting
+ substring.highlight(mark_pattern, _get_marker_schema())
+
+ stylized_str.replace(matched_substr, substring)
+
+
+def _color_traceback(traceback, log_item, mark_pattern):
+ if _is_color_enabled():
+ stylized_string = color.StringStylizer(traceback, _resolve_schema(TRACEBACK, log_item))
+ stylized_string.highlight(mark_pattern, _get_marker_schema())
+ return stylized_string
+ return traceback
+
+
+def _is_end_execution_log(log_item):
+ return not log_item.task and bool(_end_execution_schema(log_item))
+
+
+def _end_execution_schema(log_item):
+ for state, pattern in _PATTERNS[FINAL_STATES].items():
+ if re.match(pattern, log_item.msg):
+ return _FINAL_STATES[state]
+
+
+def _resolve_schema(msg_type, log_item):
+ if _is_end_execution_log(log_item):
+ return _end_execution_schema(log_item)
+ else:
+ return color.ColorSpec(
+ **(
+ # retrieve the schema from the user config according to the level
+ _get_styles(msg_type).get(log_item.level.lower()) or
+ # retrieve the default schema from the user config
+ _get_styles(msg_type).get('default') or
+ # retrieve the schema from the aria default config according to the level
+ _DEFAULT_COLORS[msg_type].get(log_item.level.lower()) or
+ # retrieve the default schema from the aria default config
+ _DEFAULT_COLORS[msg_type].get('default')
+ )
+ )
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/cli/helptexts.py b/azure/aria/aria-extension-cloudify/src/aria/aria/cli/helptexts.py
new file mode 100644
index 0000000..5ab353a
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/cli/helptexts.py
@@ -0,0 +1,62 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Gathers all CLI command help texts in one place.
+"""
+
+DEFAULT_MUTUALITY_ERROR_MESSAGE = 'mutually exclusive'
+VERBOSE = \
+ "Show verbose output; you can supply this up to three times (i.e. -vvv)"
+
+VERSION = "Display the version and exit"
+FORCE_RESET = "Confirmation for resetting ARIA's working directory"
+RESET_CONFIG = "Reset ARIA's user configuration"
+
+SERVICE_TEMPLATE_ID = "The unique identifier for the service template"
+SERVICE_ID = "The unique identifier for the service"
+EXECUTION_ID = "The unique identifier for the execution"
+
+SERVICE_TEMPLATE_PATH = "The path to the application's service template file"
+SERVICE_TEMPLATE_FILENAME = (
+ "The name of the archive's main service template file "
+ "(only relevant if uploading a non-CSAR archive)")
+INPUTS_PARAMS_USAGE = (
+ '(can be provided as wildcard based paths '
+ '("inp?.yaml", "/my_inputs/", etc.) to YAML files, a JSON string or as '
+ '"key1=value1;key2=value2"); this argument can be used multiple times')
+SERVICE_INPUTS = "Inputs for the service {0}".format(INPUTS_PARAMS_USAGE)
+EXECUTION_INPUTS = "Inputs for the execution {0}".format(INPUTS_PARAMS_USAGE)
+
+TASK_RETRY_INTERVAL = \
+ "How long of a minimal interval should occur between task retry attempts [default: {0}]"
+TASK_MAX_ATTEMPTS = \
+ "How many times should a task be attempted in case of failures [default: {0}]"
+DRY_EXECUTION = "Execute a workflow dry run (prints operations information without causing side " \
+ "effects)"
+RETRY_FAILED_TASK = "Retry tasks that failed in the previous execution attempt"
+IGNORE_AVAILABLE_NODES = "Delete the service even if it has available nodes"
+SORT_BY = "Key for sorting the list"
+DESCENDING = "Sort list in descending order [default: False]"
+JSON_OUTPUT = "Output logs in JSON format"
+MARK_PATTERN = "Mark a regular expression pattern in the logs"
+
+SHOW_FULL = "Show full information"
+SHOW_JSON = "Show in JSON format (implies --full)"
+SHOW_YAML = "Show in YAML format (implies --full)"
+SHOW_TYPES = "Show only the type hierarchies"
+SHOW_GRAPH = "Show only the node graph"
+MODE_MUTUALITY_ERROR_MESSAGE = 'only one mode is possible'
+FORMAT_MUTUALITY_ERROR_MESSAGE = 'only one format is possible'
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/cli/inputs.py b/azure/aria/aria-extension-cloudify/src/aria/aria/cli/inputs.py
new file mode 100644
index 0000000..bea3e1a
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/cli/inputs.py
@@ -0,0 +1,124 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Helpers for validating and coercing service template inputs.
+"""
+
+import os
+import glob
+from ruamel import yaml
+
+from .env import logger
+from .exceptions import AriaCliError
+
+
+def inputs_to_dict(resources):
+ """
+ Returns a dictionary of inputs
+
+ :param resources: can be:
+
+ * list of files
+ * single file
+ * directory containing multiple input files
+ * ``key1=value1;key2=value2`` pairs string.
+ * string formatted as JSON/YAML
+ * wildcard based string (e.g. ``*-inputs.yaml``)
+ """
+ if not resources:
+ return dict()
+
+ parsed_dict = {}
+
+ for resource in resources:
+ logger.debug('Processing inputs source: {0}'.format(resource))
+ # Workflow parameters always pass an empty dictionary. We ignore it
+ if isinstance(resource, basestring):
+ try:
+ parsed_dict.update(_parse_single_input(resource))
+ except AriaCliError:
+ raise AriaCliError(
+ "Invalid input: {0}. It must represent a dictionary. "
+ "Valid values can be one of:{1} "
+ "- A path to a YAML file{1} "
+ "- A path to a directory containing YAML files{1} "
+ "- A single quoted wildcard based path "
+ "(e.g. '*-inputs.yaml'){1} "
+ "- A string formatted as JSON/YAML{1} "
+ "- A string formatted as key1=value1;key2=value2".format(
+ resource, os.linesep))
+ return parsed_dict
+
+
+def _parse_single_input(resource):
+ try:
+ # parse resource as string representation of a dictionary
+ return _plain_string_to_dict(resource)
+ except AriaCliError:
+ input_files = glob.glob(resource)
+ parsed_dict = dict()
+ if os.path.isdir(resource):
+ for input_file in os.listdir(resource):
+ parsed_dict.update(
+ _parse_yaml_path(os.path.join(resource, input_file)))
+ elif input_files:
+ for input_file in input_files:
+ parsed_dict.update(_parse_yaml_path(input_file))
+ else:
+ parsed_dict.update(_parse_yaml_path(resource))
+ return parsed_dict
+
+
+def _parse_yaml_path(resource):
+
+ try:
+ # if resource is a path - parse as a yaml file
+ if os.path.isfile(resource):
+ with open(resource) as f:
+ content = yaml.load(f.read())
+ else:
+ # parse resource content as yaml
+ content = yaml.load(resource)
+ except yaml.error.YAMLError as e:
+ raise AriaCliError("'{0}' is not a valid YAML. {1}".format(
+ resource, str(e)))
+
+ # Emtpy files return None
+ content = content or dict()
+ if not isinstance(content, dict):
+ raise AriaCliError()
+
+ return content
+
+
+def _plain_string_to_dict(input_string):
+ input_string = input_string.strip()
+ input_dict = {}
+ mapped_inputs = input_string.split(';')
+ for mapped_input in mapped_inputs:
+ mapped_input = mapped_input.strip()
+ if not mapped_input:
+ continue
+ split_mapping = mapped_input.split('=')
+ try:
+ key = split_mapping[0].strip()
+ value = split_mapping[1].strip()
+ except IndexError:
+ raise AriaCliError(
+ "Invalid input format: {0}, the expected format is: "
+ "key1=value1;key2=value2".format(input_string))
+ input_dict[key] = value
+ return input_dict
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/cli/logger.py b/azure/aria/aria-extension-cloudify/src/aria/aria/cli/logger.py
new file mode 100644
index 0000000..14baae0
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/cli/logger.py
@@ -0,0 +1,134 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Centralized logging configuration and formatting.
+"""
+
+import os
+import copy
+import logging
+from logutils import dictconfig
+
+HIGH_VERBOSE = 3
+MEDIUM_VERBOSE = 2
+LOW_VERBOSE = 1
+NO_VERBOSE = 0
+
+LOGGER_CONFIG_TEMPLATE = {
+ "version": 1,
+ "formatters": {
+ "file": {
+ "format": "%(asctime)s [%(levelname)s] %(message)s"
+ },
+ "console": {
+ "format": "%(message)s"
+ }
+ },
+ "handlers": {
+ "file": {
+ "class": "logging.handlers.RotatingFileHandler",
+ "formatter": "file",
+ "maxBytes": "5000000",
+ "backupCount": "20"
+ },
+ "console": {
+ "class": "logging.StreamHandler",
+ "stream": "ext://sys.stdout",
+ "formatter": "console"
+ }
+ },
+ "disable_existing_loggers": False
+}
+
+
+class Logging(object):
+
+ def __init__(self, config):
+ self._log_file = None
+ self._verbosity_level = NO_VERBOSE
+ self._all_loggers_names = []
+ self._configure_loggers(config)
+ self._lgr = logging.getLogger('aria.cli.main')
+
+ @property
+ def logger(self):
+ return self._lgr
+
+ @property
+ def log_file(self):
+ return self._log_file
+
+ @property
+ def verbosity_level(self):
+ return self._verbosity_level
+
+ @verbosity_level.setter
+ def verbosity_level(self, level):
+ self._verbosity_level = level
+ if self.is_high_verbose_level():
+ for logger_name in self._all_loggers_names:
+ logging.getLogger(logger_name).setLevel(logging.DEBUG)
+
+ def is_high_verbose_level(self):
+ return self.verbosity_level == HIGH_VERBOSE
+
+ def _configure_loggers(self, config):
+ loggers_config = config.logging.loggers
+ logfile = config.logging.filename
+
+ logger_dict = copy.deepcopy(LOGGER_CONFIG_TEMPLATE)
+ if logfile:
+ # set filename on file handler
+ logger_dict['handlers']['file']['filename'] = logfile
+ logfile_dir = os.path.dirname(logfile)
+ if not os.path.exists(logfile_dir):
+ os.makedirs(logfile_dir)
+ self._log_file = logfile
+ else:
+ del logger_dict['handlers']['file']
+
+ # add handlers to all loggers
+ loggers = {}
+ for logger_name in loggers_config:
+ loggers[logger_name] = dict(handlers=list(logger_dict['handlers'].keys()))
+ self._all_loggers_names.append(logger_name)
+ logger_dict['loggers'] = loggers
+
+ # set level for all loggers
+ for logger_name, logging_level in loggers_config.iteritems():
+ log = logging.getLogger(logger_name)
+ level = logging._levelNames[logging_level.upper()]
+ log.setLevel(level)
+
+ dictconfig.dictConfig(logger_dict)
+
+
+class ModelLogIterator(object):
+
+ def __init__(self, model_storage, execution_id, filters=None, sort=None, offset=0):
+ self._last_visited_id = offset
+ self._model_storage = model_storage
+ self._execution_id = execution_id
+ self._additional_filters = filters or {}
+ self._sort = sort or {}
+
+ def __iter__(self):
+ filters = dict(execution_fk=self._execution_id, id=dict(gt=self._last_visited_id))
+ filters.update(self._additional_filters)
+
+ for log in self._model_storage.log.iter(filters=filters, sort=self._sort):
+ self._last_visited_id = log.id
+ yield log
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/cli/main.py b/azure/aria/aria-extension-cloudify/src/aria/aria/cli/main.py
new file mode 100644
index 0000000..640360b
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/cli/main.py
@@ -0,0 +1,65 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Executable entry point into the CLI.
+"""
+
+from aria import install_aria_extensions
+from aria.cli import commands
+from aria.cli.core import aria
+
+
+@aria.group(name='aria')
+@aria.options.verbose()
+@aria.options.version
+def _aria():
+ """
+ ARIA's Command Line Interface.
+
+ To activate bash-completion run::
+
+ eval "$(_ARIA_COMPLETE=source aria)"
+
+ ARIA's working directory resides by default in "~/.aria". To change it, set the environment
+ variable ARIA_WORKDIR to something else (e.g. "/tmp/").
+ """
+ aria.set_cli_except_hook()
+
+
+def _register_commands():
+ """
+ Register the CLI's commands.
+ """
+
+ _aria.add_command(commands.service_templates.service_templates)
+ _aria.add_command(commands.node_templates.node_templates)
+ _aria.add_command(commands.services.services)
+ _aria.add_command(commands.nodes.nodes)
+ _aria.add_command(commands.workflows.workflows)
+ _aria.add_command(commands.executions.executions)
+ _aria.add_command(commands.plugins.plugins)
+ _aria.add_command(commands.logs.logs)
+ _aria.add_command(commands.reset.reset)
+
+
+def main():
+ install_aria_extensions()
+ _register_commands()
+ _aria()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/cli/service_template_utils.py b/azure/aria/aria-extension-cloudify/src/aria/aria/cli/service_template_utils.py
new file mode 100644
index 0000000..2af72a0
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/cli/service_template_utils.py
@@ -0,0 +1,129 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Loading mechanism for service templates.
+"""
+
+import os
+from urlparse import urlparse
+
+from . import csar
+from . import utils
+from .exceptions import AriaCliError
+from ..utils import archive as archive_utils
+
+
+def get(source, service_template_filename):
+ """
+ Get a source and return a path to the main service template file
+
+ The behavior based on then source argument content is:
+
+ * local ``.yaml`` file: return the file
+ * local archive (``.csar``, ``.zip``, ``.tar``, ``.tar.gz``, and ``.tar.bz2``): extract it
+ locally and return path service template file
+ * URL: download and get service template from downloaded archive
+ * GitHub repo: download and get service template from downloaded archive
+
+ :param source: path/URL/GitHub repo to archive/service-template file
+ :type source: basestring
+ :param service_template_filename: path to service template if source is a non-CSAR archive
+ with CSAR archives, this is read from the metadata file)
+ :type service_template_filename: basestring
+ :return: path to main service template file
+ :rtype: basestring
+ """
+ if urlparse(source).scheme:
+ downloaded_file = utils.download_file(source)
+ return _get_service_template_file_from_archive(
+ downloaded_file, service_template_filename)
+ elif os.path.isfile(source):
+ if _is_archive(source):
+ return _get_service_template_file_from_archive(source, service_template_filename)
+ else:
+ # Maybe check if yaml.
+ return os.path.abspath(source)
+ elif len(source.split('/')) == 2:
+ url = _map_to_github_url(source)
+ downloaded_file = utils.download_file(url)
+ return _get_service_template_file_from_archive(
+ downloaded_file, service_template_filename)
+ else:
+ raise AriaCliError(
+ 'You must provide either a path to a local file, a remote URL '
+ 'or a GitHub `organization/repository[:tag/branch]`')
+
+
+def _get_service_template_file_from_archive(archive, service_template_filename):
+ """
+ Extract archive to temporary location and get path to service template file.
+
+ :param archive: path to archive file
+ :type archive: basestring
+ :param service_template_filename: path to service template file relative to archive
+ :type service_template_filename: basestring
+ :return: absolute path to service template file
+ :rtype: basestring
+
+ """
+ if csar.is_csar_archive(archive):
+ service_template_file = _extract_csar_archive(archive)
+ else:
+ extract_directory = archive_utils.extract_archive(archive)
+ print extract_directory
+ service_template_dir = os.path.join(
+ extract_directory,
+ os.listdir(extract_directory)[0],
+ )
+ print service_template_dir
+ service_template_file = os.path.join(service_template_dir, service_template_filename)
+ print service_template_file
+ print service_template_filename
+
+ if not os.path.isfile(service_template_file):
+ raise AriaCliError(
+ 'Could not find `{0}`. Please provide the name of the main '
+ 'service template file by using the `-n/--service-template-filename` flag'
+ .format(service_template_filename))
+ return service_template_file
+
+
+def _map_to_github_url(source):
+ """
+ Returns a path to a downloaded GitHub archive.
+
+ :param source: GitHub repo: ``org/repo[:tag/branch]``
+ :type source: basestring
+ :return: URL to the archive file for the given repo in GitHub
+ :rtype: basestring
+
+ """
+ source_parts = source.split(':', 1)
+ repo = source_parts[0]
+ tag = source_parts[1] if len(source_parts) == 2 else 'master'
+ url = 'https://github.com/{0}/archive/{1}.tar.gz'.format(repo, tag)
+ return url
+
+
+def _is_archive(source):
+ return archive_utils.is_archive(source) or csar.is_csar_archive(source)
+
+
+def _extract_csar_archive(archive):
+ reader = csar.read(source=archive)
+ main_service_template_file_name = os.path.basename(reader.entry_definitions)
+ return os.path.join(reader.destination,
+ main_service_template_file_name)
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/cli/table.py b/azure/aria/aria-extension-cloudify/src/aria/aria/cli/table.py
new file mode 100644
index 0000000..74487ae
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/cli/table.py
@@ -0,0 +1,125 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Tabular formatting utilities.
+"""
+
+import os
+from datetime import datetime
+
+from prettytable import PrettyTable
+
+from .env import logger
+
+
+def print_data(columns, items, header_text,
+ column_formatters=None, col_max_width=None, defaults=None):
+ """
+ Prints data in a tabular form.
+
+ :param columns: columns of the table, e.g. ``['id','name']``
+ :type columns: iterable of basestring
+ :param items: each element must have keys or attributes corresponding to the ``columns`` items,
+ e.g. ``[{'id':'123', 'name':'Pete'}]``
+ :type data: [{:obj:`basestring`: :obj:`basestring`}]
+ :param column_formatters: maps column name to formatter, a function that may manipulate the
+ string values printed for this column, e.g. ``{'created_at': timestamp_formatter}``
+ :type column_formatters: {:obj:`basestring`: :obj:`function`}
+ :param col_max_width: maximum width of table
+ :type col_max_width: int
+ :param defaults: default values for keys that don't exist in the data itself, e.g.
+ ``{'serviceId':'123'}``
+ :type defaults: {:obj:`basestring`: :obj:`basestring`}
+ """
+ if items is None:
+ items = []
+ elif not isinstance(items, list):
+ items = [items]
+
+ pretty_table = _generate(columns, data=items, column_formatters=column_formatters,
+ defaults=defaults)
+ if col_max_width:
+ pretty_table.max_width = col_max_width
+ _log(header_text, pretty_table)
+
+
+def _log(title, table):
+ logger.info('{0}{1}{0}{2}{0}'.format(os.linesep, title, table))
+
+
+def _generate(cols, data, column_formatters=None, defaults=None):
+ """
+ Return a new PrettyTable instance representing the list.
+
+ :param cols: columns of the table, e.g. ``['id','name']``
+ :type cols: iterable of :obj:`basestring`
+ :param data: each element must have keys or attributes corresponding to the ``cols`` items,
+ e.g. ``[{'id':'123', 'name':'Pete'}]``
+ :type data: [{:obj:`basestring`: :obj:`basestring`}]
+ :param column_formatters: maps column name to formatter, a function that may manipulate the
+ string values printed for this column, e.g. ``{'created_at': timestamp_formatter}``
+ :type column_formatters: {:obj:`basestring`: :obj:`function`}
+ :param defaults: default values for keys that don't exist in the data itself, e.g.
+ ``{'serviceId':'123'}``
+ :type defaults: {:obj:`basestring`: :obj:`basestring`}
+ """
+ def get_values_per_column(column, row_data):
+ if hasattr(row_data, column) or (isinstance(row_data, dict) and column in row_data):
+ val = row_data[column] if isinstance(row_data, dict) else getattr(row_data, column)
+
+ if val and isinstance(val, list):
+ val = [str(element) for element in val]
+ val = ','.join(val)
+ elif val is None or isinstance(val, list):
+ # don't print `[]` or `None` (but do print `0`, `False`, etc.)
+ val = ''
+
+ if column in column_formatters:
+ # calling the user's column formatter to manipulate the value
+ val = column_formatters[column](val)
+
+ return val
+ else:
+ return defaults.get(column)
+
+ column_formatters = column_formatters or dict()
+ defaults = defaults or dict()
+ pretty_table = PrettyTable(list(cols))
+
+ for datum in data:
+ values_row = []
+ for col in cols:
+ values_row.append(get_values_per_column(col, datum))
+ pretty_table.add_row(values_row)
+
+ return pretty_table
+
+
+def timestamp_formatter(value):
+ try:
+ datetime.strptime(value[:10], '%Y-%m-%d')
+ return value.replace('T', ' ').replace('Z', ' ')
+ except ValueError:
+ # not a timestamp
+ return value
+
+
+def trim_formatter_generator(max_length):
+ def trim_formatter(value):
+ if len(value) >= max_length:
+ value = '{0}..'.format(value[:max_length - 2])
+ return value
+ return trim_formatter
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/cli/utils.py b/azure/aria/aria-extension-cloudify/src/aria/aria/cli/utils.py
new file mode 100644
index 0000000..697ff37
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/cli/utils.py
@@ -0,0 +1,117 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Miscellaneous CLI utilities.
+"""
+
+import os
+import sys
+from StringIO import StringIO
+
+from backports.shutil_get_terminal_size import get_terminal_size
+
+from .env import logger
+from .exceptions import AriaCliError
+from ..utils import http
+
+
+def storage_sort_param(sort_by, descending):
+ return {sort_by: 'desc' if descending else 'asc'}
+
+
+def get_parameter_templates_as_string(parameter_templates):
+ params_string = StringIO()
+
+ for param_name, param_template in parameter_templates.iteritems():
+ params_string.write('\t{0}:{1}'.format(param_name, os.linesep))
+ param_dict = param_template.to_dict()
+ del param_dict['id'] # not interested in printing the id
+ for k, v in param_dict.iteritems():
+ params_string.write('\t\t{0}: {1}{2}'.format(k, v, os.linesep))
+
+ params_string.write(os.linesep)
+ return params_string.getvalue()
+
+
+def check_overriding_storage_exceptions(e, model_class, name):
+ """
+ Checks whether the storage exception is a known type where we'd like to override the exception
+ message; If so, it raises a new error. Otherwise it simply returns.
+ """
+ assert isinstance(e, BaseException)
+ if 'UNIQUE constraint failed' in e.message:
+ new_message = \
+ 'Could not store {model_class} `{name}`{linesep}' \
+ 'There already a exists a {model_class} with the same name' \
+ .format(model_class=model_class, name=name, linesep=os.linesep)
+ trace = sys.exc_info()[2]
+ raise type(e), type(e)(new_message), trace # pylint: disable=raising-non-exception
+
+
+def download_file(url):
+ progress_bar = generate_progress_handler(url, 'Downloading')
+ try:
+ destination = http.download_file(url, logger=logger, progress_handler=progress_bar)
+ except Exception as e:
+ raise AriaCliError(
+ 'Failed to download {0}. ({1})'.format(url, str(e)))
+ return destination
+
+
+def generate_progress_handler(file_path, action='', max_bar_length=80):
+ """
+ Returns a function that prints a progress bar in the terminal.
+
+ :param file_path: the name of the file being transferred
+ :param action: uploading/downloading
+ :param max_bar_length: maximum allowed length of the bar
+ :return: configured ``print_progress`` function
+ """
+ # We want to limit the maximum line length to 80, but allow for a smaller terminal size. We also
+ # include the action string, and some extra chars
+ terminal_width = get_terminal_size().columns
+
+ # This takes care of the case where there is no terminal (e.g. unittest)
+ terminal_width = terminal_width or max_bar_length
+ bar_length = min(max_bar_length, terminal_width) - len(action) - 12
+
+ # Shorten the file name if it's too long
+ file_name = os.path.basename(file_path)
+ if len(file_name) > (bar_length / 4) + 3:
+ file_name = file_name[:bar_length / 4] + '...'
+
+ bar_length -= len(file_name)
+
+ def print_progress(read_bytes, total_bytes):
+ """
+ Print upload/download progress on a single line.
+
+ Call this function in a loop to create a progress bar in the terminal.
+
+ :param read_bytes: number of bytes already processed
+ :param total_bytes: total number of bytes in the file
+ """
+
+ filled_length = min(bar_length, int(round(bar_length * read_bytes / float(total_bytes))))
+ percents = min(100.00, round(100.00 * (read_bytes / float(total_bytes)), 2))
+ bar = '#' * filled_length + '-' * (bar_length - filled_length) # pylint: disable=blacklisted-name
+
+ # The \r caret makes sure the cursor moves back to the beginning of the line
+ sys.stdout.write('\r{0} {1} |{2}| {3}%'.format(action, file_name, bar, percents))
+ if read_bytes >= total_bytes:
+ sys.stdout.write(os.linesep)
+
+ return print_progress
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/core.py b/azure/aria/aria-extension-cloudify/src/aria/aria/core.py
new file mode 100644
index 0000000..e364f48
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/core.py
@@ -0,0 +1,133 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+ARIA core module.
+"""
+
+from . import exceptions
+from .parser import consumption
+from .parser.loading.location import UriLocation
+from .orchestrator import topology
+
+
+class Core(object):
+
+ def __init__(self,
+ model_storage,
+ resource_storage,
+ plugin_manager):
+ self._model_storage = model_storage
+ self._resource_storage = resource_storage
+ self._plugin_manager = plugin_manager
+
+ @property
+ def model_storage(self):
+ return self._model_storage
+
+ @property
+ def resource_storage(self):
+ return self._resource_storage
+
+ @property
+ def plugin_manager(self):
+ return self._plugin_manager
+
+ def validate_service_template(self, service_template_path):
+ self._parse_service_template(service_template_path)
+
+ def create_service_template(self, service_template_path, service_template_dir,
+ service_template_name):
+ context = self._parse_service_template(service_template_path)
+ service_template = context.modeling.template
+ service_template.name = service_template_name
+ self.model_storage.service_template.put(service_template)
+ self.resource_storage.service_template.upload(
+ entry_id=str(service_template.id), source=service_template_dir)
+ return service_template.id
+
+ def delete_service_template(self, service_template_id):
+ service_template = self.model_storage.service_template.get(service_template_id)
+ if service_template.services:
+ raise exceptions.DependentServicesError(
+ 'Can\'t delete service template `{0}` - service template has existing services'
+ .format(service_template.name))
+
+ self.model_storage.service_template.delete(service_template)
+ self.resource_storage.service_template.delete(entry_id=str(service_template.id))
+
+ def create_service(self, service_template_id, inputs, service_name=None):
+ service_template = self.model_storage.service_template.get(service_template_id)
+
+ storage_session = self.model_storage._all_api_kwargs['session']
+ # setting no autoflush for the duration of instantiation - this helps avoid dependency
+ # constraints as they're being set up
+ with storage_session.no_autoflush:
+ topology_ = topology.Topology()
+ service = topology_.instantiate(
+ service_template, inputs=inputs, plugins=self.model_storage.plugin.list())
+ topology_.coerce(service, report_issues=True)
+
+ topology_.validate(service)
+ topology_.satisfy_requirements(service)
+ topology_.coerce(service, report_issues=True)
+
+ topology_.validate_capabilities(service)
+ topology_.assign_hosts(service)
+ topology_.configure_operations(service)
+ topology_.coerce(service, report_issues=True)
+ if topology_.dump_issues():
+ raise exceptions.InstantiationError('Failed to instantiate service template `{0}`'
+ .format(service_template.name))
+
+ storage_session.flush() # flushing so service.id would auto-populate
+ service.name = service_name or '{0}_{1}'.format(service_template.name, service.id)
+ self.model_storage.service.put(service)
+ return service
+
+ def delete_service(self, service_id, force=False):
+ service = self.model_storage.service.get(service_id)
+
+ active_executions = [e for e in service.executions if e.is_active()]
+ if active_executions:
+ raise exceptions.DependentActiveExecutionsError(
+ 'Can\'t delete service `{0}` - there is an active execution for this service. '
+ 'Active execution ID: {1}'.format(service.name, active_executions[0].id))
+
+ if not force:
+ available_nodes = [str(n.id) for n in service.nodes.itervalues() if n.is_available()]
+ if available_nodes:
+ raise exceptions.DependentAvailableNodesError(
+ 'Can\'t delete service `{0}` - there are available nodes for this service. '
+ 'Available node IDs: {1}'.format(service.name, ', '.join(available_nodes)))
+
+ self.model_storage.service.delete(service)
+
+ @staticmethod
+ def _parse_service_template(service_template_path):
+ context = consumption.ConsumptionContext()
+ context.presentation.location = UriLocation(service_template_path)
+ # Most of the parser uses the topology package in order to manipulate the models.
+ # However, here we use the Consumer mechanism, but this should change in the future.
+ consumption.ConsumerChain(
+ context,
+ (
+ consumption.Read,
+ consumption.Validate,
+ consumption.ServiceTemplate
+ )).consume()
+ if context.validation.dump_issues():
+ raise exceptions.ParsingError('Failed to parse service template')
+ return context
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/exceptions.py b/azure/aria/aria-extension-cloudify/src/aria/aria/exceptions.py
new file mode 100644
index 0000000..5d3e21d
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/exceptions.py
@@ -0,0 +1,73 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Base exception classes and other common exceptions used throughout ARIA.
+"""
+
+import sys
+
+
+class AriaError(Exception):
+ """
+ Base class for ARIA errors.
+ """
+ pass
+
+
+class AriaException(Exception):
+ """
+ Base class for ARIA exceptions.
+ """
+
+ def __init__(self, message=None, cause=None, cause_traceback=None):
+ super(AriaException, self).__init__(message)
+ self.cause = cause
+ self.issue = None
+ if cause_traceback is None:
+ _, e, traceback = sys.exc_info()
+ if cause == e:
+ # Make sure it's our traceback
+ cause_traceback = traceback
+ self.cause_traceback = cause_traceback
+
+
+class DependentServicesError(AriaError):
+ """
+ Raised when attempting to delete a service template which has existing services.
+ """
+ pass
+
+
+class DependentActiveExecutionsError(AriaError):
+ """
+ Raised when attempting to delete a service which has active executions.
+ """
+ pass
+
+
+class DependentAvailableNodesError(AriaError):
+ """
+ Raised when attempting to delete a service which has available nodes.
+ """
+ pass
+
+
+class ParsingError(AriaError):
+ pass
+
+
+class InstantiationError(AriaError):
+ pass
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/extension.py b/azure/aria/aria-extension-cloudify/src/aria/aria/extension.py
new file mode 100644
index 0000000..e90750d
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/extension.py
@@ -0,0 +1,154 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Mechanism for registering and loading ARIA extensions.
+"""
+
+# pylint: disable=no-self-use
+
+from .utils import collections
+
+
+class _Registrar(object):
+
+ def __init__(self, registry):
+ if not isinstance(registry, (dict, list)):
+ raise RuntimeError('Unsupported registry type')
+ self._registry = registry
+
+ def register(self, function):
+ result = function()
+ if isinstance(self._registry, dict):
+ for key in result:
+ if key in self._registry:
+ raise RuntimeError('Re-definition of {0} in {1}'.format(key, function.__name__))
+ self._registry.update(result)
+ elif isinstance(self._registry, list):
+ if not isinstance(result, (list, tuple, set)):
+ result = [result]
+ self._registry += list(result)
+ else:
+ raise RuntimeError('Illegal state')
+
+ def __call__(self):
+ return self._registry
+
+
+def _registrar(function):
+ function._registrar_function = True
+ return function
+
+
+class _ExtensionRegistration(object):
+ """
+ Base class for extension class decorators.
+ """
+
+ def __init__(self):
+ self._registrars = {}
+ self._registered_classes = []
+ for attr, value in vars(self.__class__).items():
+ try:
+ is_registrar_function = value._registrar_function
+ except AttributeError:
+ is_registrar_function = False
+ if is_registrar_function:
+ registrar = _Registrar(registry=getattr(self, attr)())
+ setattr(self, attr, registrar)
+ self._registrars[attr] = registrar
+
+ def __call__(self, cls):
+ self._registered_classes.append(cls)
+ return cls
+
+ def init(self):
+ """
+ Initialize all registrars by calling all registered functions.
+ """
+ registered_instances = [cls() for cls in self._registered_classes]
+ for name, registrar in self._registrars.items():
+ for instance in registered_instances:
+ registrating_function = getattr(instance, name, None)
+ if registrating_function:
+ registrar.register(registrating_function)
+
+
+class _ParserExtensionRegistration(_ExtensionRegistration):
+ """
+ Parser extensions class decorator.
+ """
+
+ @_registrar
+ def presenter_class(self):
+ """
+ Presentation class registration.
+
+ Implementing functions can return a single class or a list/tuple of classes.
+ """
+ return []
+
+ @_registrar
+ def specification_package(self):
+ """
+ Specification package registration.
+
+ Implementing functions can return a package name or a list/tuple of names.
+ """
+ return []
+
+ @_registrar
+ def specification_url(self):
+ """
+ Specification URL registration.
+
+ Implementing functions should return a dictionary from names to URLs.
+ """
+ return {}
+
+ @_registrar
+ def uri_loader_prefix(self):
+ """
+ URI loader prefix registration.
+
+ Implementing functions can return a single prefix or a list/tuple of prefixes.
+ """
+ return collections.StrictList(value_class=basestring)
+
+parser = _ParserExtensionRegistration()
+
+
+class _ProcessExecutorExtensionRegistration(_ExtensionRegistration):
+ """
+ Process executor extension class decorator.
+ """
+
+ @_registrar
+ def decorate(self):
+ """
+ The operation function executed by the process executor will be decorated with the function
+ returned from ``decorate()``.
+ """
+ return []
+
+process_executor = _ProcessExecutorExtensionRegistration()
+
+
+def init():
+ """
+ Initialize all registrars by calling all registered functions.
+ """
+ parser.init()
+ process_executor.init()
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/logger.py b/azure/aria/aria-extension-cloudify/src/aria/aria/logger.py
new file mode 100644
index 0000000..f4f6ec9
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/logger.py
@@ -0,0 +1,186 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Mix-ins and functions for logging, supporting multiple backends (such as SQL) and consistent
+formatting.
+"""
+
+import logging
+from logging import handlers as logging_handlers
+# NullHandler doesn't exist in < 27. this workaround is from
+# http://docs.python.org/release/2.6/library/logging.html#configuring-logging-for-a-library
+try:
+ from logging import NullHandler # pylint: disable=unused-import
+except ImportError:
+ class NullHandler(logging.Handler):
+ def emit(self, record):
+ pass
+from datetime import datetime
+
+
+TASK_LOGGER_NAME = 'aria.executions.task'
+
+
+_base_logger = logging.getLogger('aria')
+
+
+class LoggerMixin(object):
+ """
+ Provides logging functionality to a class.
+
+ :ivar logger_name: logger name; default to the class name
+ :ivar logger_level: logger level; defaults to ``logging.DEBUG``
+ :ivar base_logger: child loggers are created from this; defaults to the root logger
+ """
+ logger_name = None
+ logger_level = logging.DEBUG
+
+ def __init__(self, *args, **kwargs):
+ self.logger_name = self.logger_name or self.__class__.__name__
+ self.logger = logging.getLogger('{0}.{1}'.format(_base_logger.name, self.logger_name))
+ # Set the logger handler of any object derived from LoggerMixing to NullHandler.
+ # This is since the absence of a handler shows up while using the CLI in the form of:
+ # `No handlers could be found for logger "..."`.
+ self.logger.addHandler(NullHandler())
+ self.logger.setLevel(self.logger_level)
+ super(LoggerMixin, self).__init__(*args, **kwargs)
+
+ @classmethod
+ def with_logger(
+ cls,
+ logger_name=None,
+ logger_level=logging.DEBUG,
+ base_logger=logging.getLogger(),
+ **kwargs):
+ """
+ Set the logger used by the consuming class.
+ """
+ cls.logger_name = logger_name
+ cls.logger_level = logger_level
+ cls.base_logger = base_logger
+ return cls(**kwargs)
+
+ def __getstate__(self):
+ obj_dict = vars(self).copy()
+ del obj_dict['logger']
+ return obj_dict
+
+ def __setstate__(self, obj_dict):
+ vars(self).update(
+ logger=logging.getLogger('{0}.{1}'.format(_base_logger.name, obj_dict['logger_name'])),
+ **obj_dict)
+
+
+def create_logger(logger=_base_logger, handlers=(), **configs):
+ """
+ :param logger: logger name; defaults to ARIA logger
+ :type logger: logging.Logger
+ :param handlers: logger handlers
+ :type handlers: []
+ :param configs: logger configurations
+ :type configs: []
+ :return: logger
+ """
+ logger.handlers = []
+ for handler in handlers:
+ logger.addHandler(handler)
+
+ logger.setLevel(configs.get('level', logging.DEBUG))
+ logger.debug('Logger {0} configured'.format(logger.name))
+ return logger
+
+
+def create_console_log_handler(level=logging.DEBUG, formatter=None):
+ """
+ :param level:
+ :param formatter:
+ """
+ console = logging.StreamHandler()
+ console.setLevel(level)
+ console.formatter = formatter or _DefaultConsoleFormat()
+ return console
+
+
+def create_sqla_log_handler(model, log_cls, execution_id, level=logging.DEBUG):
+
+ # This is needed since the engine and session are entirely new we need to reflect the db
+ # schema of the logging model into the engine and session.
+ return _SQLAlchemyHandler(model=model, log_cls=log_cls, execution_id=execution_id, level=level)
+
+
+class _DefaultConsoleFormat(logging.Formatter):
+ """
+ Info level log format: ``%(message)s``.
+
+ Every other log level is formatted: ``%(levelname)s: %(message)s``.
+ """
+ def format(self, record):
+ try:
+ if hasattr(record, 'prefix'):
+ self._fmt = '<%(asctime)s: [%(levelname)s] @%(prefix)s> %(message)s'
+ else:
+ self._fmt = '<%(asctime)s: [%(levelname)s]> %(message)s'
+
+ except AttributeError:
+ return record.message
+ return logging.Formatter.format(self, record)
+
+
+def create_file_log_handler(
+ file_path,
+ level=logging.DEBUG,
+ max_bytes=5 * 1000 * 1024,
+ backup_count=10,
+ formatter=None):
+ """
+ Create a :class:`logging.handlers.RotatingFileHandler`.
+ """
+ rotating_file = logging_handlers.RotatingFileHandler(
+ filename=file_path,
+ maxBytes=max_bytes,
+ backupCount=backup_count,
+ delay=True,
+ )
+ rotating_file.setLevel(level)
+ rotating_file.formatter = formatter or _default_file_formatter
+ return rotating_file
+
+
+class _SQLAlchemyHandler(logging.Handler):
+ def __init__(self, model, log_cls, execution_id, **kwargs):
+ logging.Handler.__init__(self, **kwargs)
+ self._model = model
+ self._cls = log_cls
+ self._execution_id = execution_id
+
+ def emit(self, record):
+ created_at = datetime.strptime(logging.Formatter('%(asctime)s').formatTime(record),
+ '%Y-%m-%d %H:%M:%S,%f')
+ log = self._cls(
+ execution_fk=self._execution_id,
+ task_fk=record.task_id,
+ level=record.levelname,
+ msg=str(record.msg),
+ created_at=created_at,
+
+ # Not mandatory.
+ traceback=getattr(record, 'traceback', None)
+ )
+ self._model.log.put(log)
+
+
+_default_file_formatter = logging.Formatter(
+ '%(asctime)s [%(name)s:%(levelname)s] %(message)s <%(pathname)s:%(lineno)d>')
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/modeling/__init__.py b/azure/aria/aria-extension-cloudify/src/aria/aria/modeling/__init__.py
new file mode 100644
index 0000000..57bc188
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/modeling/__init__.py
@@ -0,0 +1,54 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+This package provides an API for modeling ARIA's state and serializing it to storage.
+"""
+
+from collections import namedtuple
+
+from . import (
+ mixins,
+ types,
+ models,
+ utils,
+ service_template as _service_template_bases,
+ service_instance as _service_instance_bases,
+ service_changes as _service_changes_bases,
+ service_common as _service_common_bases,
+ orchestration as _orchestration_bases
+)
+
+
+_ModelBasesCls = namedtuple('ModelBase', 'service_template,'
+ 'service_instance,'
+ 'service_changes,'
+ 'service_common,'
+ 'orchestration')
+
+model_bases = _ModelBasesCls(service_template=_service_template_bases,
+ service_instance=_service_instance_bases,
+ service_changes=_service_changes_bases,
+ service_common=_service_common_bases,
+ orchestration=_orchestration_bases)
+
+
+__all__ = (
+ 'mixins',
+ 'types',
+ 'models',
+ 'model_bases',
+ 'utils'
+)
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/modeling/constraints.py b/azure/aria/aria-extension-cloudify/src/aria/aria/modeling/constraints.py
new file mode 100644
index 0000000..8ed33d5
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/modeling/constraints.py
@@ -0,0 +1,31 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Constraints for the requirements-and-capabilities matching mechanism.
+"""
+
+class NodeTemplateConstraint(object):
+ """
+ Used to constrain requirements for node templates.
+
+ Must be serializable.
+ """
+
+ def matches(self, source_node_template, target_node_template):
+ """
+ Returns ``True`` if the target matches the constraint for the source.
+ """
+ raise NotImplementedError
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/modeling/exceptions.py b/azure/aria/aria-extension-cloudify/src/aria/aria/modeling/exceptions.py
new file mode 100644
index 0000000..cddc049
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/modeling/exceptions.py
@@ -0,0 +1,63 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Modeling exceptions.
+"""
+
+from ..exceptions import AriaException
+
+
+class ModelingException(AriaException):
+ """
+ ARIA modeling exception.
+ """
+
+
+class ParameterException(ModelingException):
+ """
+ ARIA parameter exception.
+ """
+ pass
+
+
+class ValueFormatException(ModelingException):
+ """
+ ARIA modeling exception: the value is in the wrong format.
+ """
+
+
+class CannotEvaluateFunctionException(ModelingException):
+ """
+ ARIA modeling exception: cannot evaluate the function at this time.
+ """
+
+
+class MissingRequiredInputsException(ParameterException):
+ """
+ ARIA modeling exception: Required parameters have been omitted.
+ """
+
+
+class ParametersOfWrongTypeException(ParameterException):
+ """
+ ARIA modeling exception: Parameters of the wrong types have been provided.
+ """
+
+
+class UndeclaredInputsException(ParameterException):
+ """
+ ARIA modeling exception: Undeclared parameters have been provided.
+ """
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/modeling/functions.py b/azure/aria/aria-extension-cloudify/src/aria/aria/modeling/functions.py
new file mode 100644
index 0000000..554bbfb
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/modeling/functions.py
@@ -0,0 +1,140 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Mechanism for evaluating intrinsic functions.
+"""
+from ..parser.exceptions import InvalidValueError
+from ..parser.consumption import ConsumptionContext
+from ..utils.collections import OrderedDict
+from ..utils.type import full_type_name
+from . import exceptions
+
+
+class Function(object):
+ """
+ Base class for intrinsic functions. Serves as a placeholder for a value that should eventually
+ be derived by "evaluating" (calling) the function.
+
+ Note that this base class is provided as a convenience and you do not have to inherit it: any
+ object with an ``__evaluate__`` method would be treated similarly.
+ """
+
+ @property
+ def as_raw(self):
+ raise NotImplementedError
+
+ def __evaluate__(self, container_holder):
+ """
+ Evaluates the function if possible.
+
+ :rtype: :class:`Evaluation` (or any object with ``value`` and ``final`` properties)
+ :raises CannotEvaluateFunctionException: if cannot be evaluated at this time (do *not* just
+ return ``None``)
+ """
+
+ raise NotImplementedError
+
+ def __deepcopy__(self, memo):
+ # Circumvent cloning in order to maintain our state
+ return self
+
+
+class Evaluation(object):
+ """
+ An evaluated :class:`Function` return value.
+
+ :ivar value: evaluated value
+ :ivar final: whether the value is final
+ :vartype final: boolean
+ """
+
+ def __init__(self, value, final=False):
+ self.value = value
+ self.final = final
+
+
+def evaluate(value, container_holder, report_issues=False): # pylint: disable=too-many-branches
+ """
+ Recursively attempts to call ``__evaluate__``. If an evaluation occurred will return an
+ :class:`Evaluation`, otherwise it will be ``None``. If any evaluation is non-final, then the
+ entire evaluation will also be non-final.
+
+ The ``container_holder`` argument should have three properties: ``container`` should return
+ the model that contains the value, ``service`` should return the containing
+ :class:`~aria.modeling.models.Service` model or None, and ``service_template`` should return the
+ containing :class:`~aria.modeling.models.ServiceTemplate` model or ``None``.
+ """
+
+ evaluated = False
+ final = True
+
+ if hasattr(value, '__evaluate__'):
+ try:
+ evaluation = value.__evaluate__(container_holder)
+
+ # Verify evaluation structure
+ if (evaluation is None) \
+ or (not hasattr(evaluation, 'value')) \
+ or (not hasattr(evaluation, 'final')):
+ raise InvalidValueError('bad __evaluate__ implementation: {0}'
+ .format(full_type_name(value)))
+
+ evaluated = True
+ value = evaluation.value
+ final = evaluation.final
+
+ # The evaluated value might itself be evaluable
+ evaluation = evaluate(value, container_holder, report_issues)
+ if evaluation is not None:
+ value = evaluation.value
+ if not evaluation.final:
+ final = False
+ except exceptions.CannotEvaluateFunctionException:
+ pass
+ except InvalidValueError as e:
+ if report_issues:
+ context = ConsumptionContext.get_thread_local()
+ context.validation.report(e.issue)
+
+ elif isinstance(value, list):
+ evaluated_list = []
+ for v in value:
+ evaluation = evaluate(v, container_holder, report_issues)
+ if evaluation is not None:
+ evaluated_list.append(evaluation.value)
+ evaluated = True
+ if not evaluation.final:
+ final = False
+ else:
+ evaluated_list.append(v)
+ if evaluated:
+ value = evaluated_list
+
+ elif isinstance(value, dict):
+ evaluated_dict = OrderedDict()
+ for k, v in value.iteritems():
+ evaluation = evaluate(v, container_holder, report_issues)
+ if evaluation is not None:
+ evaluated_dict[k] = evaluation.value
+ evaluated = True
+ if not evaluation.final:
+ final = False
+ else:
+ evaluated_dict[k] = v
+ if evaluated:
+ value = evaluated_dict
+
+ return Evaluation(value, final) if evaluated else None
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/modeling/mixins.py b/azure/aria/aria-extension-cloudify/src/aria/aria/modeling/mixins.py
new file mode 100644
index 0000000..d58c25a
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/modeling/mixins.py
@@ -0,0 +1,333 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+ARIA modeling mix-ins module
+"""
+
+from sqlalchemy.ext import associationproxy
+from sqlalchemy import (
+ Column,
+ Integer,
+ Text,
+ PickleType
+)
+
+from ..utils import collections, caching
+from ..utils.type import canonical_type_name, full_type_name
+from . import utils, functions
+
+
+class ModelMixin(object):
+
+ @utils.classproperty
+ def __modelname__(cls): # pylint: disable=no-self-argument
+ return getattr(cls, '__mapiname__', cls.__tablename__)
+
+ @classmethod
+ def id_column_name(cls):
+ raise NotImplementedError
+
+ @classmethod
+ def name_column_name(cls):
+ raise NotImplementedError
+
+ def to_dict(self, fields=None, suppress_error=False):
+ """
+ Create a dict representation of the model.
+
+ :param suppress_error: if set to ``True``, sets ``None`` to attributes that it's unable to
+ retrieve (e.g., if a relationship wasn't established yet, and so it's impossible to access
+ a property through it)
+ """
+
+ res = dict()
+ fields = fields or self.fields()
+ for field in fields:
+ try:
+ field_value = getattr(self, field)
+ except AttributeError:
+ if suppress_error:
+ field_value = None
+ else:
+ raise
+ if isinstance(field_value, list):
+ field_value = list(field_value)
+ elif isinstance(field_value, dict):
+ field_value = dict(field_value)
+ elif isinstance(field_value, ModelMixin):
+ field_value = field_value.to_dict()
+ res[field] = field_value
+
+ return res
+
+ @classmethod
+ def fields(cls):
+ """
+ List of field names for this table.
+
+ Mostly for backwards compatibility in the code (that uses ``fields``).
+ """
+
+ fields = set(cls._iter_association_proxies())
+ fields.update(cls.__table__.columns.keys())
+ return fields - set(getattr(cls, '__private_fields__', ()))
+
+ @classmethod
+ def _iter_association_proxies(cls):
+ for col, value in vars(cls).items():
+ if isinstance(value, associationproxy.AssociationProxy):
+ yield col
+
+ def __repr__(self):
+ return '<{cls} id=`{id}`>'.format(
+ cls=self.__class__.__name__,
+ id=getattr(self, self.name_column_name()))
+
+
+class ModelIDMixin(object):
+ id = Column(Integer, primary_key=True, autoincrement=True, doc="""
+ Unique ID.
+
+ :type: :obj:`int`
+ """)
+
+ name = Column(Text, index=True, doc="""
+ Model name.
+
+ :type: :obj:`basestring`
+ """)
+
+ @classmethod
+ def id_column_name(cls):
+ return 'id'
+
+ @classmethod
+ def name_column_name(cls):
+ return 'name'
+
+
+class InstanceModelMixin(ModelMixin):
+ """
+ Mix-in for service instance models.
+
+ All models support validation, diagnostic dumping, and representation as raw data (which can be
+ translated into JSON or YAML) via :meth:`as_raw`.
+ """
+
+ @property
+ def as_raw(self):
+ raise NotImplementedError
+
+ def coerce_values(self, report_issues):
+ pass
+
+
+class TemplateModelMixin(InstanceModelMixin): # pylint: disable=abstract-method
+ """
+ Mix-in for service template models.
+
+ All model models can be instantiated into service instance models.
+ """
+
+
+class ParameterMixin(TemplateModelMixin, caching.HasCachedMethods): #pylint: disable=abstract-method
+ """
+ Mix-in for typed values. The value can contain nested intrinsic functions.
+
+ This model can be used as the ``container_holder`` argument for
+ :func:`~aria.modeling.functions.evaluate`.
+ """
+
+ type_name = Column(Text, doc="""
+ Type name.
+
+ :type: :obj:`basestring`
+ """)
+
+ description = Column(Text, doc="""
+ Human-readable description.
+
+ :type: :obj:`basestring`
+ """)
+
+ _value = Column(PickleType)
+
+ @property
+ def value(self):
+ value = self._value
+ if value is not None:
+ evaluation = functions.evaluate(value, self)
+ if evaluation is not None:
+ value = evaluation.value
+ return value
+
+ @value.setter
+ def value(self, value):
+ self._value = value
+
+ @property
+ @caching.cachedmethod
+ def owner(self):
+ """
+ The sole owner of this parameter, which is another model that relates to it.
+
+ *All* parameters should have an owner model.
+
+ :raises ~exceptions.ValueError: if failed to find an owner, which signifies an abnormal,
+ orphaned parameter
+ """
+
+ # Find first non-null relationship
+ for the_relationship in self.__mapper__.relationships:
+ v = getattr(self, the_relationship.key)
+ if v:
+ return v
+
+ raise ValueError('orphaned {class_name}: does not have an owner: {name}'.format(
+ class_name=type(self).__name__, name=self.name))
+
+ @property
+ @caching.cachedmethod
+ def container(self): # pylint: disable=too-many-return-statements,too-many-branches
+ """
+ The logical container for this parameter, which would be another model: service, node,
+ group, or policy (or their templates).
+
+ The logical container is equivalent to the ``SELF`` keyword used by intrinsic functions in
+ TOSCA.
+
+ *All* parameters should have a container model.
+
+ :raises ~exceptions.ValueError: if failed to find a container model, which signifies an
+ abnormal, orphaned parameter
+ """
+
+ from . import models
+
+ container = self.owner
+
+ # Extract interface from operation
+ if isinstance(container, models.Operation):
+ container = container.interface
+ elif isinstance(container, models.OperationTemplate):
+ container = container.interface_template
+
+ # Extract from other models
+ if isinstance(container, models.Interface):
+ container = container.node or container.group or container.relationship
+ elif isinstance(container, models.InterfaceTemplate):
+ container = container.node_template or container.group_template \
+ or container.relationship_template
+ elif isinstance(container, models.Capability) or isinstance(container, models.Artifact):
+ container = container.node
+ elif isinstance(container, models.CapabilityTemplate) \
+ or isinstance(container, models.ArtifactTemplate):
+ container = container.node_template
+ elif isinstance(container, models.Task):
+ container = container.actor
+
+ # Extract node from relationship
+ if isinstance(container, models.Relationship):
+ container = container.source_node
+ elif isinstance(container, models.RelationshipTemplate):
+ container = container.requirement_template.node_template
+
+ if container is not None:
+ return container
+
+ raise ValueError('orphaned parameter: does not have a container: {0}'.format(self.name))
+
+ @property
+ @caching.cachedmethod
+ def service(self):
+ """
+ The :class:`~aria.modeling.models.Service` model containing this parameter, or ``None`` if
+ not contained in a service.
+
+ :raises ~exceptions.ValueError: if failed to find a container model, which signifies an
+ abnormal, orphaned parameter
+ """
+
+ from . import models
+ container = self.container
+ if isinstance(container, models.Service):
+ return container
+ elif hasattr(container, 'service'):
+ return container.service
+ return None
+
+ @property
+ @caching.cachedmethod
+ def service_template(self):
+ """
+ The :class:`~aria.modeling.models.ServiceTemplate` model containing this parameter, or
+ ``None`` if not contained in a service template.
+
+ :raises ~exceptions.ValueError: if failed to find a container model, which signifies an
+ abnormal, orphaned parameter
+ """
+
+ from . import models
+ container = self.container
+ if isinstance(container, models.ServiceTemplate):
+ return container
+ elif hasattr(container, 'service_template'):
+ return container.service_template
+ return None
+
+ @property
+ def as_raw(self):
+ return collections.OrderedDict((
+ ('name', self.name),
+ ('type_name', self.type_name),
+ ('value', self.value),
+ ('description', self.description)))
+
+ @property
+ def unwrapped(self):
+ return self.name, self.value
+
+ @classmethod
+ def wrap(cls, name, value, description=None):
+ """
+ Wraps an arbitrary value as a parameter. The type will be guessed via introspection.
+
+ For primitive types, we will prefer their TOSCA aliases. See the `TOSCA Simple Profile v1.0
+ cos01 specification <http://docs.oasis-open.org/tosca/TOSCA-Simple-Profile-YAML/v1.0/cos01
+ /TOSCA-Simple-Profile-YAML-v1.0-cos01.html#_Toc373867862>`__
+
+ :param name: parameter name
+ :type name: basestring
+ :param value: parameter value
+ :param description: human-readable description (optional)
+ :type description: basestring
+ """
+
+ type_name = canonical_type_name(value)
+ if type_name is None:
+ type_name = full_type_name(value)
+ return cls(name=name, # pylint: disable=unexpected-keyword-arg
+ type_name=type_name,
+ value=value,
+ description=description)
+
+ def as_other_parameter_model(self, other_model_cls):
+ name, value = self.unwrapped
+ return other_model_cls.wrap(name, value)
+
+ def as_argument(self):
+ from . import models
+ return self.as_other_parameter_model(models.Argument)
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/modeling/models.py b/azure/aria/aria-extension-cloudify/src/aria/aria/modeling/models.py
new file mode 100644
index 0000000..cf84fdb
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/modeling/models.py
@@ -0,0 +1,427 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Data models.
+
+Service template models
+-----------------------
+
+.. autosummary::
+ :nosignatures:
+
+ aria.modeling.models.ServiceTemplate
+ aria.modeling.models.NodeTemplate
+ aria.modeling.models.GroupTemplate
+ aria.modeling.models.PolicyTemplate
+ aria.modeling.models.SubstitutionTemplate
+ aria.modeling.models.SubstitutionTemplateMapping
+ aria.modeling.models.RequirementTemplate
+ aria.modeling.models.RelationshipTemplate
+ aria.modeling.models.CapabilityTemplate
+ aria.modeling.models.InterfaceTemplate
+ aria.modeling.models.OperationTemplate
+ aria.modeling.models.ArtifactTemplate
+ aria.modeling.models.PluginSpecification
+
+Service instance models
+-----------------------
+
+.. autosummary::
+ :nosignatures:
+
+ aria.modeling.models.Service
+ aria.modeling.models.Node
+ aria.modeling.models.Group
+ aria.modeling.models.Policy
+ aria.modeling.models.Substitution
+ aria.modeling.models.SubstitutionMapping
+ aria.modeling.models.Relationship
+ aria.modeling.models.Capability
+ aria.modeling.models.Interface
+ aria.modeling.models.Operation
+ aria.modeling.models.Artifact
+
+Common models
+-------------
+
+.. autosummary::
+ :nosignatures:
+
+ aria.modeling.models.Output
+ aria.modeling.models.Input
+ aria.modeling.models.Configuration
+ aria.modeling.models.Property
+ aria.modeling.models.Attribute
+ aria.modeling.models.Type
+ aria.modeling.models.Metadata
+
+Orchestration models
+--------------------
+
+.. autosummary::
+ :nosignatures:
+
+ aria.modeling.models.Execution
+ aria.modeling.models.Task
+ aria.modeling.models.Log
+ aria.modeling.models.Plugin
+ aria.modeling.models.Argument
+"""
+
+# pylint: disable=abstract-method
+
+from sqlalchemy.ext.declarative import declarative_base
+from sqlalchemy import (
+ Column,
+ Text
+)
+
+from . import (
+ service_template,
+ service_instance,
+ service_changes,
+ service_common,
+ orchestration,
+ mixins,
+ utils
+)
+
+
+aria_declarative_base = declarative_base(cls=mixins.ModelIDMixin)
+
+
+# See also models_to_register at the bottom of this file
+__all__ = (
+ 'models_to_register',
+
+ # Service template models
+ 'ServiceTemplate',
+ 'NodeTemplate',
+ 'GroupTemplate',
+ 'PolicyTemplate',
+ 'SubstitutionTemplate',
+ 'SubstitutionTemplateMapping',
+ 'RequirementTemplate',
+ 'RelationshipTemplate',
+ 'CapabilityTemplate',
+ 'InterfaceTemplate',
+ 'OperationTemplate',
+ 'ArtifactTemplate',
+ 'PluginSpecification',
+
+ # Service instance models
+ 'Service',
+ 'Node',
+ 'Group',
+ 'Policy',
+ 'Substitution',
+ 'SubstitutionMapping',
+ 'Relationship',
+ 'Capability',
+ 'Interface',
+ 'Operation',
+ 'Artifact',
+
+ # Service changes models
+ 'ServiceUpdate',
+ 'ServiceUpdateStep',
+ 'ServiceModification',
+
+ # Common service models
+ 'Input',
+ 'Configuration',
+ 'Output',
+ 'Property',
+ 'Attribute',
+ 'Type',
+ 'Metadata',
+
+ # Orchestration models
+ 'Execution',
+ 'Plugin',
+ 'Task',
+ 'Log',
+ 'Argument'
+)
+
+
+# region service template models
+
+@utils.fix_doc
+class ServiceTemplate(aria_declarative_base, service_template.ServiceTemplateBase):
+ name = Column(Text, index=True, unique=True)
+
+
+@utils.fix_doc
+class NodeTemplate(aria_declarative_base, service_template.NodeTemplateBase):
+ pass
+
+
+@utils.fix_doc
+class GroupTemplate(aria_declarative_base, service_template.GroupTemplateBase):
+ pass
+
+
+@utils.fix_doc
+class PolicyTemplate(aria_declarative_base, service_template.PolicyTemplateBase):
+ pass
+
+
+@utils.fix_doc
+class SubstitutionTemplate(aria_declarative_base, service_template.SubstitutionTemplateBase):
+ pass
+
+
+@utils.fix_doc
+class SubstitutionTemplateMapping(aria_declarative_base,
+ service_template.SubstitutionTemplateMappingBase):
+ pass
+
+
+@utils.fix_doc
+class RequirementTemplate(aria_declarative_base, service_template.RequirementTemplateBase):
+ pass
+
+
+@utils.fix_doc
+class RelationshipTemplate(aria_declarative_base, service_template.RelationshipTemplateBase):
+ pass
+
+
+@utils.fix_doc
+class CapabilityTemplate(aria_declarative_base, service_template.CapabilityTemplateBase):
+ pass
+
+
+@utils.fix_doc
+class InterfaceTemplate(aria_declarative_base, service_template.InterfaceTemplateBase):
+ pass
+
+
+@utils.fix_doc
+class OperationTemplate(aria_declarative_base, service_template.OperationTemplateBase):
+ pass
+
+
+@utils.fix_doc
+class ArtifactTemplate(aria_declarative_base, service_template.ArtifactTemplateBase):
+ pass
+
+
+@utils.fix_doc
+class PluginSpecification(aria_declarative_base, service_template.PluginSpecificationBase):
+ pass
+
+# endregion
+
+
+# region service instance models
+
+@utils.fix_doc
+class Service(aria_declarative_base, service_instance.ServiceBase):
+ name = Column(Text, index=True, unique=True)
+
+
+@utils.fix_doc
+class Node(aria_declarative_base, service_instance.NodeBase):
+ pass
+
+
+@utils.fix_doc
+class Group(aria_declarative_base, service_instance.GroupBase):
+ pass
+
+
+@utils.fix_doc
+class Policy(aria_declarative_base, service_instance.PolicyBase):
+ pass
+
+
+@utils.fix_doc
+class Substitution(aria_declarative_base, service_instance.SubstitutionBase):
+ pass
+
+
+@utils.fix_doc
+class SubstitutionMapping(aria_declarative_base, service_instance.SubstitutionMappingBase):
+ pass
+
+
+@utils.fix_doc
+class Relationship(aria_declarative_base, service_instance.RelationshipBase):
+ pass
+
+
+@utils.fix_doc
+class Capability(aria_declarative_base, service_instance.CapabilityBase):
+ pass
+
+
+@utils.fix_doc
+class Interface(aria_declarative_base, service_instance.InterfaceBase):
+ pass
+
+
+@utils.fix_doc
+class Operation(aria_declarative_base, service_instance.OperationBase):
+ pass
+
+
+@utils.fix_doc
+class Artifact(aria_declarative_base, service_instance.ArtifactBase):
+ pass
+
+# endregion
+
+
+# region service changes models
+
+@utils.fix_doc
+class ServiceUpdate(aria_declarative_base, service_changes.ServiceUpdateBase):
+ pass
+
+
+@utils.fix_doc
+class ServiceUpdateStep(aria_declarative_base, service_changes.ServiceUpdateStepBase):
+ pass
+
+
+@utils.fix_doc
+class ServiceModification(aria_declarative_base, service_changes.ServiceModificationBase):
+ pass
+
+# endregion
+
+
+# region common service models
+
+@utils.fix_doc
+class Input(aria_declarative_base, service_common.InputBase):
+ pass
+
+
+@utils.fix_doc
+class Configuration(aria_declarative_base, service_common.ConfigurationBase):
+ pass
+
+
+@utils.fix_doc
+class Output(aria_declarative_base, service_common.OutputBase):
+ pass
+
+
+@utils.fix_doc
+class Property(aria_declarative_base, service_common.PropertyBase):
+ pass
+
+
+@utils.fix_doc
+class Attribute(aria_declarative_base, service_common.AttributeBase):
+ pass
+
+
+@utils.fix_doc
+class Type(aria_declarative_base, service_common.TypeBase):
+ pass
+
+
+@utils.fix_doc
+class Metadata(aria_declarative_base, service_common.MetadataBase):
+ pass
+
+# endregion
+
+
+# region orchestration models
+
+@utils.fix_doc
+class Execution(aria_declarative_base, orchestration.ExecutionBase):
+ pass
+
+
+@utils.fix_doc
+class Plugin(aria_declarative_base, orchestration.PluginBase):
+ pass
+
+
+@utils.fix_doc
+class Task(aria_declarative_base, orchestration.TaskBase):
+ pass
+
+
+@utils.fix_doc
+class Log(aria_declarative_base, orchestration.LogBase):
+ pass
+
+
+@utils.fix_doc
+class Argument(aria_declarative_base, orchestration.ArgumentBase):
+ pass
+
+# endregion
+
+
+# See also __all__ at the top of this file
+models_to_register = (
+ # Service template models
+ ServiceTemplate,
+ NodeTemplate,
+ GroupTemplate,
+ PolicyTemplate,
+ SubstitutionTemplate,
+ SubstitutionTemplateMapping,
+ RequirementTemplate,
+ RelationshipTemplate,
+ CapabilityTemplate,
+ InterfaceTemplate,
+ OperationTemplate,
+ ArtifactTemplate,
+ PluginSpecification,
+
+ # Service instance models
+ Service,
+ Node,
+ Group,
+ Policy,
+ SubstitutionMapping,
+ Substitution,
+ Relationship,
+ Capability,
+ Interface,
+ Operation,
+ Artifact,
+
+ # Service changes models
+ ServiceUpdate,
+ ServiceUpdateStep,
+ ServiceModification,
+
+ # Common service models
+ Input,
+ Configuration,
+ Output,
+ Property,
+ Attribute,
+ Type,
+ Metadata,
+
+ # Orchestration models
+ Execution,
+ Plugin,
+ Task,
+ Log,
+ Argument
+)
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/modeling/orchestration.py b/azure/aria/aria-extension-cloudify/src/aria/aria/modeling/orchestration.py
new file mode 100644
index 0000000..4d4f0fe
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/modeling/orchestration.py
@@ -0,0 +1,715 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+ARIA modeling orchestration module
+"""
+
+# pylint: disable=no-self-argument, no-member, abstract-method
+from datetime import datetime
+
+from sqlalchemy import (
+ Column,
+ Integer,
+ Text,
+ DateTime,
+ Boolean,
+ Enum,
+ String,
+ Float,
+ orm,
+ PickleType)
+from sqlalchemy.ext.declarative import declared_attr
+
+from ..orchestrator.exceptions import (TaskAbortException, TaskRetryException)
+from . import mixins
+from . import (
+ relationship,
+ types as modeling_types
+)
+
+
+class ExecutionBase(mixins.ModelMixin):
+ """
+ Workflow execution.
+ """
+
+ __tablename__ = 'execution'
+
+ __private_fields__ = ('service_fk',
+ 'service_template')
+
+ SUCCEEDED = 'succeeded'
+ FAILED = 'failed'
+ CANCELLED = 'cancelled'
+ PENDING = 'pending'
+ STARTED = 'started'
+ CANCELLING = 'cancelling'
+
+ STATES = (SUCCEEDED, FAILED, CANCELLED, PENDING, STARTED, CANCELLING)
+ END_STATES = (SUCCEEDED, FAILED, CANCELLED)
+
+ VALID_TRANSITIONS = {
+ PENDING: (STARTED, CANCELLED),
+ STARTED: END_STATES + (CANCELLING,),
+ CANCELLING: END_STATES,
+ # Retrying
+ CANCELLED: PENDING,
+ FAILED: PENDING
+ }
+
+ # region one_to_many relationships
+
+ @declared_attr
+ def inputs(cls):
+ """
+ Execution parameters.
+
+ :type: {:obj:`basestring`: :class:`Input`}
+ """
+ return relationship.one_to_many(cls, 'input', dict_key='name')
+
+ @declared_attr
+ def tasks(cls):
+ """
+ Tasks.
+
+ :type: [:class:`Task`]
+ """
+ return relationship.one_to_many(cls, 'task')
+
+ @declared_attr
+ def logs(cls):
+ """
+ Log messages for the execution (including log messages for its tasks).
+
+ :type: [:class:`Log`]
+ """
+ return relationship.one_to_many(cls, 'log')
+
+ # endregion
+
+ # region many_to_one relationships
+
+ @declared_attr
+ def service(cls):
+ """
+ Associated service.
+
+ :type: :class:`Service`
+ """
+ return relationship.many_to_one(cls, 'service')
+
+ # endregion
+
+ # region association proxies
+
+ @declared_attr
+ def service_name(cls):
+ return relationship.association_proxy('service', cls.name_column_name())
+
+ @declared_attr
+ def service_template(cls):
+ return relationship.association_proxy('service', 'service_template')
+
+ @declared_attr
+ def service_template_name(cls):
+ return relationship.association_proxy('service', 'service_template_name')
+
+ # endregion
+
+ # region foreign keys
+
+ @declared_attr
+ def service_fk(cls):
+ return relationship.foreign_key('service')
+
+ # endregion
+
+ created_at = Column(DateTime, index=True, doc="""
+ Creation timestamp.
+
+ :type: :class:`~datetime.datetime`
+ """)
+
+ started_at = Column(DateTime, nullable=True, index=True, doc="""
+ Started timestamp.
+
+ :type: :class:`~datetime.datetime`
+ """)
+
+ ended_at = Column(DateTime, nullable=True, index=True, doc="""
+ Ended timestamp.
+
+ :type: :class:`~datetime.datetime`
+ """)
+
+ error = Column(Text, nullable=True, doc="""
+ Error message.
+
+ :type: :obj:`basestring`
+ """)
+
+ status = Column(Enum(*STATES, name='execution_status'), default=PENDING, doc="""
+ Status.
+
+ :type: :obj:`basestring`
+ """)
+
+ workflow_name = Column(Text, doc="""
+ Workflow name.
+
+ :type: :obj:`basestring`
+ """)
+
+ @orm.validates('status')
+ def validate_status(self, key, value):
+ """Validation function that verifies execution status transitions are OK"""
+ try:
+ current_status = getattr(self, key)
+ except AttributeError:
+ return
+ valid_transitions = self.VALID_TRANSITIONS.get(current_status, [])
+ if all([current_status is not None,
+ current_status != value,
+ value not in valid_transitions]):
+ raise ValueError('Cannot change execution status from {current} to {new}'.format(
+ current=current_status,
+ new=value))
+ return value
+
+ def has_ended(self):
+ return self.status in self.END_STATES
+
+ def is_active(self):
+ return not self.has_ended() and self.status != self.PENDING
+
+ def __str__(self):
+ return '<{0} id=`{1}` (status={2})>'.format(
+ self.__class__.__name__,
+ getattr(self, self.name_column_name()),
+ self.status
+ )
+
+
+class TaskBase(mixins.ModelMixin):
+ """
+ Represents the smallest unit of stateful execution in ARIA. The task state includes inputs,
+ outputs, as well as an atomic status, ensuring that the task can only be running once at any
+ given time.
+
+ The Python :attr:`function` is usually provided by an associated :class:`Plugin`. The
+ :attr:`arguments` of the function should be set according to the specific signature of the
+ function.
+
+ Tasks may be "one shot" or may be configured to run repeatedly in the case of failure.
+
+ Tasks are often based on :class:`Operation`, and thus act on either a :class:`Node` or a
+ :class:`Relationship`, however this is not required.
+ """
+
+ __tablename__ = 'task'
+
+ __private_fields__ = ('node_fk',
+ 'relationship_fk',
+ 'plugin_fk',
+ 'execution_fk')
+
+ START_WORKFLOW = 'start_workflow'
+ END_WORKFLOW = 'end_workflow'
+ START_SUBWROFKLOW = 'start_subworkflow'
+ END_SUBWORKFLOW = 'end_subworkflow'
+ STUB = 'stub'
+ CONDITIONAL = 'conditional'
+
+ STUB_TYPES = (
+ START_WORKFLOW,
+ START_SUBWROFKLOW,
+ END_WORKFLOW,
+ END_SUBWORKFLOW,
+ STUB,
+ CONDITIONAL,
+ )
+
+ PENDING = 'pending'
+ RETRYING = 'retrying'
+ SENT = 'sent'
+ STARTED = 'started'
+ SUCCESS = 'success'
+ FAILED = 'failed'
+ STATES = (
+ PENDING,
+ RETRYING,
+ SENT,
+ STARTED,
+ SUCCESS,
+ FAILED,
+ )
+ INFINITE_RETRIES = -1
+
+ # region one_to_many relationships
+
+ @declared_attr
+ def logs(cls):
+ """
+ Log messages.
+
+ :type: [:class:`Log`]
+ """
+ return relationship.one_to_many(cls, 'log')
+
+ @declared_attr
+ def arguments(cls):
+ """
+ Arguments sent to the Python :attr:`function``.
+
+ :type: {:obj:`basestring`: :class:`Argument`}
+ """
+ return relationship.one_to_many(cls, 'argument', dict_key='name')
+
+ # endregion
+
+ # region many_one relationships
+
+ @declared_attr
+ def execution(cls):
+ """
+ Containing execution.
+
+ :type: :class:`Execution`
+ """
+ return relationship.many_to_one(cls, 'execution')
+
+ @declared_attr
+ def node(cls):
+ """
+ Node actor (can be ``None``).
+
+ :type: :class:`Node`
+ """
+ return relationship.many_to_one(cls, 'node')
+
+ @declared_attr
+ def relationship(cls):
+ """
+ Relationship actor (can be ``None``).
+
+ :type: :class:`Relationship`
+ """
+ return relationship.many_to_one(cls, 'relationship')
+
+ @declared_attr
+ def plugin(cls):
+ """
+ Associated plugin.
+
+ :type: :class:`Plugin`
+ """
+ return relationship.many_to_one(cls, 'plugin')
+
+ # endregion
+
+ # region association proxies
+
+ @declared_attr
+ def node_name(cls):
+ return relationship.association_proxy('node', cls.name_column_name())
+
+ @declared_attr
+ def relationship_name(cls):
+ return relationship.association_proxy('relationship', cls.name_column_name())
+
+ @declared_attr
+ def execution_name(cls):
+ return relationship.association_proxy('execution', cls.name_column_name())
+
+ # endregion
+
+ # region foreign keys
+
+ @declared_attr
+ def execution_fk(cls):
+ return relationship.foreign_key('execution', nullable=True)
+
+ @declared_attr
+ def node_fk(cls):
+ return relationship.foreign_key('node', nullable=True)
+
+ @declared_attr
+ def relationship_fk(cls):
+ return relationship.foreign_key('relationship', nullable=True)
+
+ @declared_attr
+ def plugin_fk(cls):
+ return relationship.foreign_key('plugin', nullable=True)
+
+ # endregion
+
+ status = Column(Enum(*STATES, name='status'), default=PENDING, doc="""
+ Current atomic status ('pending', 'retrying', 'sent', 'started', 'success', 'failed').
+
+ :type: :obj:`basestring`
+ """)
+
+ due_at = Column(DateTime, nullable=False, index=True, default=datetime.utcnow(), doc="""
+ Timestamp to start the task.
+
+ :type: :class:`~datetime.datetime`
+ """)
+
+ started_at = Column(DateTime, default=None, doc="""
+ Started timestamp.
+
+ :type: :class:`~datetime.datetime`
+ """)
+
+ ended_at = Column(DateTime, default=None, doc="""
+ Ended timestamp.
+
+ :type: :class:`~datetime.datetime`
+ """)
+
+ attempts_count = Column(Integer, default=1, doc="""
+ How many attempts occurred.
+
+ :type: :class:`~datetime.datetime`
+ """)
+
+ function = Column(String, doc="""
+ Full path to Python function.
+
+ :type: :obj:`basestring`
+ """)
+
+ max_attempts = Column(Integer, default=1, doc="""
+ Maximum number of attempts allowed in case of task failure.
+
+ :type: :obj:`int`
+ """)
+
+ retry_interval = Column(Float, default=0, doc="""
+ Interval between task retry attemps (in seconds).
+
+ :type: :obj:`float`
+ """)
+
+ ignore_failure = Column(Boolean, default=False, doc="""
+ Set to ``True`` to ignore failures.
+
+ :type: :obj:`bool`
+ """)
+
+ interface_name = Column(String, doc="""
+ Name of interface on node or relationship.
+
+ :type: :obj:`basestring`
+ """)
+
+ operation_name = Column(String, doc="""
+ Name of operation in interface on node or relationship.
+
+ :type: :obj:`basestring`
+ """)
+
+ _api_id = Column(String)
+ _executor = Column(PickleType)
+ _context_cls = Column(PickleType)
+ _stub_type = Column(Enum(*STUB_TYPES))
+
+ @property
+ def actor(self):
+ """
+ Actor of the task (node or relationship).
+ """
+ return self.node or self.relationship
+
+ @orm.validates('max_attempts')
+ def validate_max_attempts(self, _, value): # pylint: disable=no-self-use
+ """
+ Validates that max attempts is either -1 or a positive number.
+ """
+ if value < 1 and value != TaskBase.INFINITE_RETRIES:
+ raise ValueError('Max attempts can be either -1 (infinite) or any positive number. '
+ 'Got {value}'.format(value=value))
+ return value
+
+ @staticmethod
+ def abort(message=None):
+ raise TaskAbortException(message)
+
+ @staticmethod
+ def retry(message=None, retry_interval=None):
+ raise TaskRetryException(message, retry_interval=retry_interval)
+
+ @declared_attr
+ def dependencies(cls):
+ return relationship.many_to_many(cls, self=True)
+
+ def has_ended(self):
+ return self.status in (self.SUCCESS, self.FAILED)
+
+ def is_waiting(self):
+ if self._stub_type:
+ return not self.has_ended()
+ else:
+ return self.status in (self.PENDING, self.RETRYING)
+
+ @classmethod
+ def from_api_task(cls, api_task, executor, **kwargs):
+ instantiation_kwargs = {}
+
+ if hasattr(api_task.actor, 'outbound_relationships'):
+ instantiation_kwargs['node'] = api_task.actor
+ elif hasattr(api_task.actor, 'source_node'):
+ instantiation_kwargs['relationship'] = api_task.actor
+ else:
+ raise RuntimeError('No operation context could be created for {actor.model_cls}'
+ .format(actor=api_task.actor))
+
+ instantiation_kwargs.update(
+ {
+ 'name': api_task.name,
+ 'status': cls.PENDING,
+ 'max_attempts': api_task.max_attempts,
+ 'retry_interval': api_task.retry_interval,
+ 'ignore_failure': api_task.ignore_failure,
+ 'execution': api_task._workflow_context.execution,
+ 'interface_name': api_task.interface_name,
+ 'operation_name': api_task.operation_name,
+
+ # Only non-stub tasks have these fields
+ 'plugin': api_task.plugin,
+ 'function': api_task.function,
+ 'arguments': api_task.arguments,
+ '_context_cls': api_task._context_cls,
+ '_executor': executor,
+ }
+ )
+
+ instantiation_kwargs.update(**kwargs)
+
+ return cls(**instantiation_kwargs)
+
+
+class LogBase(mixins.ModelMixin):
+ """
+ Single log message.
+ """
+
+ __tablename__ = 'log'
+
+ __private_fields__ = ('execution_fk',
+ 'task_fk')
+
+ # region many_to_one relationships
+
+ @declared_attr
+ def execution(cls):
+ """
+ Containing execution.
+
+ :type: :class:`Execution`
+ """
+ return relationship.many_to_one(cls, 'execution')
+
+ @declared_attr
+ def task(cls):
+ """
+ Containing task (can be ``None``).
+
+ :type: :class:`Task`
+ """
+ return relationship.many_to_one(cls, 'task')
+
+ # endregion
+
+ # region foreign keys
+
+ @declared_attr
+ def execution_fk(cls):
+ return relationship.foreign_key('execution')
+
+ @declared_attr
+ def task_fk(cls):
+ return relationship.foreign_key('task', nullable=True)
+
+ # endregion
+
+ level = Column(String, doc="""
+ Log level.
+
+ :type: :obj:`basestring`
+ """)
+
+ msg = Column(String, doc="""
+ Log message.
+
+ :type: :obj:`basestring`
+ """)
+
+ created_at = Column(DateTime, index=True, doc="""
+ Creation timestamp.
+
+ :type: :class:`~datetime.datetime`
+ """)
+
+ traceback = Column(Text, doc="""
+ Error traceback in case of failure.
+
+ :type: :class:`~datetime.datetime`
+ """)
+
+ def __str__(self):
+ return self.msg
+
+ def __repr__(self):
+ name = (self.task.actor if self.task else self.execution).name
+ return '{name}: {self.msg}'.format(name=name, self=self)
+
+
+class PluginBase(mixins.ModelMixin):
+ """
+ Installed plugin.
+
+ Plugins are usually packaged as `wagons <https://github.com/cloudify-cosmo/wagon>`__, which
+ are archives of one or more `wheels <https://packaging.python.org/distributing/#wheels>`__.
+ Most of these fields are indeed extracted from the installed wagon's metadata.
+ """
+
+ __tablename__ = 'plugin'
+
+ # region one_to_many relationships
+
+ @declared_attr
+ def tasks(cls):
+ """
+ Associated Tasks.
+
+ :type: [:class:`Task`]
+ """
+ return relationship.one_to_many(cls, 'task')
+
+ # endregion
+
+ archive_name = Column(Text, nullable=False, index=True, doc="""
+ Filename (not the full path) of the wagon's archive, often with a ``.wgn`` extension.
+
+ :type: :obj:`basestring`
+ """)
+
+ distribution = Column(Text, doc="""
+ Name of the operating system on which the wagon was installed (e.g. ``ubuntu``).
+
+ :type: :obj:`basestring`
+ """)
+
+ distribution_release = Column(Text, doc="""
+ Release of the operating system on which the wagon was installed (e.g. ``trusty``).
+
+ :type: :obj:`basestring`
+ """)
+
+ distribution_version = Column(Text, doc="""
+ Version of the operating system on which the wagon was installed (e.g. ``14.04``).
+
+ :type: :obj:`basestring`
+ """)
+
+ package_name = Column(Text, nullable=False, index=True, doc="""
+ Primary Python package name used when the wagon was installed, which is one of the wheels in the
+ wagon (e.g. ``cloudify-script-plugin``).
+
+ :type: :obj:`basestring`
+ """)
+
+ package_source = Column(Text, doc="""
+ Full install string for the primary Python package name used when the wagon was installed (e.g.
+ ``cloudify-script-plugin==1.2``).
+
+ :type: :obj:`basestring`
+ """)
+
+ package_version = Column(Text, doc="""
+ Version for the primary Python package name used when the wagon was installed (e.g. ``1.2``).
+
+ :type: :obj:`basestring`
+ """)
+
+ supported_platform = Column(Text, doc="""
+ If the wheels are *all* pure Python then this would be "any", otherwise it would be the
+ installed platform name (e.g. ``linux_x86_64``).
+
+ :type: :obj:`basestring`
+ """)
+
+ supported_py_versions = Column(modeling_types.StrictList(basestring), doc="""
+ Python versions supported by all the wheels (e.g. ``["py26", "py27"]``)
+
+ :type: [:obj:`basestring`]
+ """)
+
+ wheels = Column(modeling_types.StrictList(basestring), nullable=False, doc="""
+ Filenames of the wheels archived in the wagon, often with a ``.whl`` extension.
+
+ :type: [:obj:`basestring`]
+ """)
+
+ uploaded_at = Column(DateTime, nullable=False, index=True, doc="""
+ Timestamp for when the wagon was installed.
+
+ :type: :class:`~datetime.datetime`
+ """)
+
+
+class ArgumentBase(mixins.ParameterMixin):
+ """
+ Python function argument parameter.
+ """
+
+ __tablename__ = 'argument'
+
+ # region many_to_one relationships
+
+ @declared_attr
+ def task(cls):
+ """
+ Containing task (can be ``None``);
+
+ :type: :class:`Task`
+ """
+ return relationship.many_to_one(cls, 'task')
+
+ @declared_attr
+ def operation(cls):
+ """
+ Containing operation (can be ``None``);
+
+ :type: :class:`Operation`
+ """
+ return relationship.many_to_one(cls, 'operation')
+
+ # endregion
+
+ # region foreign keys
+
+ @declared_attr
+ def task_fk(cls):
+ return relationship.foreign_key('task', nullable=True)
+
+ @declared_attr
+ def operation_fk(cls):
+ return relationship.foreign_key('operation', nullable=True)
+
+ # endregion
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/modeling/relationship.py b/azure/aria/aria-extension-cloudify/src/aria/aria/modeling/relationship.py
new file mode 100644
index 0000000..0d906de
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/modeling/relationship.py
@@ -0,0 +1,395 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+ARIA modeling relationship module
+"""
+
+# pylint: disable=invalid-name, redefined-outer-name
+
+from sqlalchemy.orm import relationship, backref
+from sqlalchemy.orm.collections import attribute_mapped_collection
+from sqlalchemy.ext.associationproxy import association_proxy as original_association_proxy
+from sqlalchemy import (
+ Column,
+ ForeignKey,
+ Integer,
+ Table
+)
+
+from ..utils import formatting
+
+NO_BACK_POP = 'NO_BACK_POP'
+
+
+def foreign_key(other_table, nullable=False):
+ """
+ Declare a foreign key property, which will also create a foreign key column in the table with
+ the name of the property. By convention the property name should end in "_fk".
+
+ You are required to explicitly create foreign keys in order to allow for one-to-one,
+ one-to-many, and many-to-one relationships (but not for many-to-many relationships). If you do
+ not do so, SQLAlchemy will fail to create the relationship property and raise an exception with
+ a clear error message.
+
+ You should normally not have to access this property directly, but instead use the associated
+ relationship properties.
+
+ *This utility method should only be used during class creation.*
+
+ :param other_table: other table name
+ :type other_table: basestring
+ :param nullable: ``True`` to allow null values (meaning that there is no relationship)
+ :type nullable: bool
+ """
+
+ return Column(Integer,
+ ForeignKey('{table}.id'.format(table=other_table), ondelete='CASCADE'),
+ nullable=nullable)
+
+
+def one_to_one_self(model_class, fk):
+ """
+ Declare a one-to-one relationship property. The property value would be an instance of the same
+ model.
+
+ You will need an associated foreign key to our own table.
+
+ *This utility method should only be used during class creation.*
+
+ :param model_class: class in which this relationship will be declared
+ :type model_class: type
+ :param fk: foreign key name
+ :type fk: basestring
+ """
+
+ remote_side = '{model_class}.{remote_column}'.format(
+ model_class=model_class.__name__,
+ remote_column=model_class.id_column_name()
+ )
+
+ primaryjoin = '{remote_side} == {model_class}.{column}'.format(
+ remote_side=remote_side,
+ model_class=model_class.__name__,
+ column=fk
+ )
+ return _relationship(
+ model_class,
+ model_class.__tablename__,
+ relationship_kwargs={
+ 'primaryjoin': primaryjoin,
+ 'remote_side': remote_side,
+ 'post_update': True
+ }
+ )
+
+
+def one_to_one(model_class,
+ other_table,
+ fk=None,
+ other_fk=None,
+ back_populates=None):
+ """
+ Declare a one-to-one relationship property. The property value would be an instance of the other
+ table's model.
+
+ You have two options for the foreign key. Either this table can have an associated key to the
+ other table (use the ``fk`` argument) or the other table can have an associated foreign key to
+ this our table (use the ``other_fk`` argument).
+
+ *This utility method should only be used during class creation.*
+
+ :param model_class: class in which this relationship will be declared
+ :type model_class: type
+ :param other_table: other table name
+ :type other_table: basestring
+ :param fk: foreign key name at our table (no need specify if there's no ambiguity)
+ :type fk: basestring
+ :param other_fk: foreign key name at the other table (no need specify if there's no ambiguity)
+ :type other_fk: basestring
+ :param back_populates: override name of matching many-to-many property at other table; set to
+ ``False`` to disable
+ :type back_populates: basestring or bool
+ """
+ backref_kwargs = None
+ if back_populates is not NO_BACK_POP:
+ if back_populates is None:
+ back_populates = model_class.__tablename__
+ backref_kwargs = {'name': back_populates, 'uselist': False}
+ back_populates = None
+
+ return _relationship(model_class,
+ other_table,
+ fk=fk,
+ back_populates=back_populates,
+ backref_kwargs=backref_kwargs,
+ other_fk=other_fk)
+
+
+def one_to_many(model_class,
+ other_table=None,
+ other_fk=None,
+ dict_key=None,
+ back_populates=None,
+ rel_kwargs=None,
+ self=False):
+ """
+ Declare a one-to-many relationship property. The property value would be a list or dict of
+ instances of the child table's model.
+
+ The child table will need an associated foreign key to our table.
+
+ The declaration will automatically create a matching many-to-one property at the child model,
+ named after our table name. Use the ``child_property`` argument to override this name.
+
+ *This utility method should only be used during class creation.*
+
+ :param model_class: class in which this relationship will be declared
+ :type model_class: type
+ :param other_table: other table name
+ :type other_table: basestring
+ :param other_fk: foreign key name at the other table (no need specify if there's no ambiguity)
+ :type other_fk: basestring
+ :param dict_key: if set the value will be a dict with this key as the dict key; otherwise will
+ be a list
+ :type dict_key: basestring
+ :param back_populates: override name of matching many-to-one property at other table; set to
+ ``False`` to disable
+ :type back_populates: basestring or bool
+ :param rel_kwargs: additional relationship kwargs to be used by SQLAlchemy
+ :type rel_kwargs: dict
+ :param self: used for relationships between a table and itself. if set, other_table will
+ become the same as the source table.
+ :type self: bool
+ """
+ relationship_kwargs = rel_kwargs or {}
+ if self:
+ assert other_fk
+ other_table_name = model_class.__tablename__
+ back_populates = False
+ relationship_kwargs['remote_side'] = '{model}.{column}'.format(model=model_class.__name__,
+ column=other_fk)
+
+ else:
+ assert other_table
+ other_table_name = other_table
+ if back_populates is None:
+ back_populates = model_class.__tablename__
+ relationship_kwargs.setdefault('cascade', 'all')
+
+ return _relationship(
+ model_class,
+ other_table_name,
+ back_populates=back_populates,
+ other_fk=other_fk,
+ dict_key=dict_key,
+ relationship_kwargs=relationship_kwargs)
+
+
+def many_to_one(model_class,
+ parent_table,
+ fk=None,
+ parent_fk=None,
+ back_populates=None):
+ """
+ Declare a many-to-one relationship property. The property value would be an instance of the
+ parent table's model.
+
+ You will need an associated foreign key to the parent table.
+
+ The declaration will automatically create a matching one-to-many property at the child model,
+ named after the plural form of our table name. Use the ``parent_property`` argument to override
+ this name. Note: the automatic property will always be a SQLAlchemy query object; if you need a
+ Python collection then use :func:`one_to_many` at that model.
+
+ *This utility method should only be used during class creation.*
+
+ :param model_class: class in which this relationship will be declared
+ :type model_class: type
+ :param parent_table: parent table name
+ :type parent_table: basestring
+ :param fk: foreign key name at our table (no need specify if there's no ambiguity)
+ :type fk: basestring
+ :param back_populates: override name of matching one-to-many property at parent table; set to
+ ``False`` to disable
+ :type back_populates: basestring or bool
+ """
+ if back_populates is None:
+ back_populates = formatting.pluralize(model_class.__tablename__)
+
+ return _relationship(model_class,
+ parent_table,
+ back_populates=back_populates,
+ fk=fk,
+ other_fk=parent_fk)
+
+
+def many_to_many(model_class,
+ other_table=None,
+ prefix=None,
+ dict_key=None,
+ other_property=None,
+ self=False):
+ """
+ Declare a many-to-many relationship property. The property value would be a list or dict of
+ instances of the other table's model.
+
+ You do not need associated foreign keys for this relationship. Instead, an extra table will be
+ created for you.
+
+ The declaration will automatically create a matching many-to-many property at the other model,
+ named after the plural form of our table name. Use the ``other_property`` argument to override
+ this name. Note: the automatic property will always be a SQLAlchemy query object; if you need a
+ Python collection then use :func:`many_to_many` again at that model.
+
+ *This utility method should only be used during class creation.*
+
+ :param model_class: class in which this relationship will be declared
+ :type model_class: type
+ :param other_table: parent table name
+ :type other_table: basestring
+ :param prefix: optional prefix for extra table name as well as for ``other_property``
+ :type prefix: basestring
+ :param dict_key: if set the value will be a dict with this key as the dict key; otherwise will
+ be a list
+ :type dict_key: basestring
+ :param other_property: override name of matching many-to-many property at other table; set to
+ ``False`` to disable
+ :type other_property: basestring or bool
+ :param self: used for relationships between a table and itself. if set, other_table will
+ become the same as the source table.
+ :type self: bool
+ """
+
+ this_table = model_class.__tablename__
+ this_column_name = '{0}_id'.format(this_table)
+ this_foreign_key = '{0}.id'.format(this_table)
+
+ if self:
+ other_table = this_table
+
+ other_column_name = '{0}_{1}'.format(other_table, 'self_ref_id' if self else 'id')
+ other_foreign_key = '{0}.{1}'.format(other_table, 'id')
+
+ secondary_table_name = '{0}_{1}'.format(this_table, other_table)
+
+ if prefix is not None:
+ secondary_table_name = '{0}_{1}'.format(prefix, secondary_table_name)
+ if other_property is None:
+ other_property = '{0}_{1}'.format(prefix, formatting.pluralize(this_table))
+
+ secondary_table = _get_secondary_table(
+ model_class.metadata,
+ secondary_table_name,
+ this_column_name,
+ other_column_name,
+ this_foreign_key,
+ other_foreign_key
+ )
+
+ kwargs = {'relationship_kwargs': {'secondary': secondary_table}}
+
+ if self:
+ kwargs['back_populates'] = NO_BACK_POP
+ kwargs['relationship_kwargs']['primaryjoin'] = \
+ getattr(model_class, 'id') == getattr(secondary_table.c, this_column_name)
+ kwargs['relationship_kwargs']['secondaryjoin'] = \
+ getattr(model_class, 'id') == getattr(secondary_table.c, other_column_name)
+ else:
+ kwargs['backref_kwargs'] = \
+ {'name': other_property, 'uselist': True} if other_property else None
+ kwargs['dict_key'] = dict_key
+
+ return _relationship(model_class, other_table, **kwargs)
+
+
+def association_proxy(*args, **kwargs):
+ if 'type' in kwargs:
+ type_ = kwargs.get('type')
+ del kwargs['type']
+ else:
+ type_ = ':obj:`basestring`'
+ proxy = original_association_proxy(*args, **kwargs)
+ proxy.__doc__ = """
+ Internal. For use in SQLAlchemy queries.
+
+ :type: {0}
+ """.format(type_)
+ return proxy
+
+
+def _relationship(model_class,
+ other_table_name,
+ back_populates=None,
+ backref_kwargs=None,
+ relationship_kwargs=None,
+ fk=None,
+ other_fk=None,
+ dict_key=None):
+ relationship_kwargs = relationship_kwargs or {}
+
+ if fk:
+ relationship_kwargs.setdefault(
+ 'foreign_keys',
+ lambda: getattr(_get_class_for_table(model_class, model_class.__tablename__), fk)
+ )
+
+ elif other_fk:
+ relationship_kwargs.setdefault(
+ 'foreign_keys',
+ lambda: getattr(_get_class_for_table(model_class, other_table_name), other_fk)
+ )
+
+ if dict_key:
+ relationship_kwargs.setdefault('collection_class',
+ attribute_mapped_collection(dict_key))
+
+ if backref_kwargs:
+ assert back_populates is None
+ return relationship(
+ lambda: _get_class_for_table(model_class, other_table_name),
+ backref=backref(**backref_kwargs),
+ **relationship_kwargs
+ )
+ else:
+ if back_populates is not NO_BACK_POP:
+ relationship_kwargs['back_populates'] = back_populates
+ return relationship(lambda: _get_class_for_table(model_class, other_table_name),
+ **relationship_kwargs)
+
+
+def _get_class_for_table(model_class, tablename):
+ if tablename in (model_class.__name__, model_class.__tablename__):
+ return model_class
+
+ for table_cls in model_class._decl_class_registry.itervalues():
+ if tablename == getattr(table_cls, '__tablename__', None):
+ return table_cls
+
+ raise ValueError('unknown table: {0}'.format(tablename))
+
+
+def _get_secondary_table(metadata,
+ name,
+ first_column,
+ second_column,
+ first_foreign_key,
+ second_foreign_key):
+ return Table(
+ name,
+ metadata,
+ Column(first_column, Integer, ForeignKey(first_foreign_key)),
+ Column(second_column, Integer, ForeignKey(second_foreign_key))
+ )
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/modeling/service_changes.py b/azure/aria/aria-extension-cloudify/src/aria/aria/modeling/service_changes.py
new file mode 100644
index 0000000..061262a
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/modeling/service_changes.py
@@ -0,0 +1,253 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+ARIA modeling service changes module
+"""
+
+# pylint: disable=no-self-argument, no-member, abstract-method
+
+from collections import namedtuple
+
+from sqlalchemy import (
+ Column,
+ Text,
+ DateTime,
+ Enum,
+)
+from sqlalchemy.ext.declarative import declared_attr
+
+from .types import (List, Dict)
+from .mixins import ModelMixin
+from . import relationship
+
+
+class ServiceUpdateBase(ModelMixin):
+ """
+ Deployment update model representation.
+ """
+ __tablename__ = 'service_update'
+
+ __private_fields__ = ('service_fk',
+ 'execution_fk')
+
+ created_at = Column(DateTime, nullable=False, index=True)
+ service_plan = Column(Dict, nullable=False)
+ service_update_nodes = Column(Dict)
+ service_update_service = Column(Dict)
+ service_update_node_templates = Column(List)
+ modified_entity_ids = Column(Dict)
+ state = Column(Text)
+
+ # region association proxies
+
+ @declared_attr
+ def execution_name(cls):
+ return relationship.association_proxy('execution', cls.name_column_name())
+
+ @declared_attr
+ def service_name(cls):
+ return relationship.association_proxy('service', cls.name_column_name())
+
+ # endregion
+
+ # region one_to_one relationships
+
+ # endregion
+
+ # region one_to_many relationships
+
+ @declared_attr
+ def steps(cls):
+ return relationship.one_to_many(cls, 'service_update_step')
+
+ # endregion
+
+ # region many_to_one relationships
+
+ @declared_attr
+ def execution(cls):
+ return relationship.one_to_one(cls, 'execution', back_populates=relationship.NO_BACK_POP)
+
+ @declared_attr
+ def service(cls):
+ return relationship.many_to_one(cls, 'service', back_populates='updates')
+
+ # endregion
+
+ # region foreign keys
+
+ @declared_attr
+ def execution_fk(cls):
+ return relationship.foreign_key('execution', nullable=True)
+
+ @declared_attr
+ def service_fk(cls):
+ return relationship.foreign_key('service')
+
+ # endregion
+
+ def to_dict(self, suppress_error=False, **kwargs):
+ dep_update_dict = super(ServiceUpdateBase, self).to_dict(suppress_error) #pylint: disable=no-member
+ # Taking care of the fact the DeploymentSteps are _BaseModels
+ dep_update_dict['steps'] = [step.to_dict() for step in self.steps]
+ return dep_update_dict
+
+
+class ServiceUpdateStepBase(ModelMixin):
+ """
+ Deployment update step model representation.
+ """
+
+ __tablename__ = 'service_update_step'
+
+ __private_fields__ = ('service_update_fk',)
+
+ _action_types = namedtuple('ACTION_TYPES', 'ADD, REMOVE, MODIFY')
+ ACTION_TYPES = _action_types(ADD='add', REMOVE='remove', MODIFY='modify')
+
+ _entity_types = namedtuple(
+ 'ENTITY_TYPES',
+ 'NODE, RELATIONSHIP, PROPERTY, OPERATION, WORKFLOW, OUTPUT, DESCRIPTION, GROUP, PLUGIN')
+ ENTITY_TYPES = _entity_types(
+ NODE='node',
+ RELATIONSHIP='relationship',
+ PROPERTY='property',
+ OPERATION='operation',
+ WORKFLOW='workflow',
+ OUTPUT='output',
+ DESCRIPTION='description',
+ GROUP='group',
+ PLUGIN='plugin'
+ )
+
+ action = Column(Enum(*ACTION_TYPES, name='action_type'), nullable=False)
+ entity_id = Column(Text, nullable=False)
+ entity_type = Column(Enum(*ENTITY_TYPES, name='entity_type'), nullable=False)
+
+ # region association proxies
+
+ @declared_attr
+ def service_update_name(cls):
+ return relationship.association_proxy('service_update', cls.name_column_name())
+
+ # endregion
+
+ # region one_to_one relationships
+
+ # endregion
+
+ # region one_to_many relationships
+
+ # endregion
+
+ # region many_to_one relationships
+
+ @declared_attr
+ def service_update(cls):
+ return relationship.many_to_one(cls, 'service_update', back_populates='steps')
+
+ # endregion
+
+ # region foreign keys
+
+ @declared_attr
+ def service_update_fk(cls):
+ return relationship.foreign_key('service_update')
+
+ # endregion
+
+ def __hash__(self):
+ return hash((getattr(self, self.id_column_name()), self.entity_id))
+
+ def __lt__(self, other):
+ """
+ the order is 'remove' < 'modify' < 'add'
+ :param other:
+ :return:
+ """
+ if not isinstance(other, self.__class__):
+ return not self >= other
+
+ if self.action != other.action:
+ if self.action == 'remove':
+ return_value = True
+ elif self.action == 'add':
+ return_value = False
+ else:
+ return_value = other.action == 'add'
+ return return_value
+
+ if self.action == 'add':
+ return self.entity_type == 'node' and other.entity_type == 'relationship'
+ if self.action == 'remove':
+ return self.entity_type == 'relationship' and other.entity_type == 'node'
+ return False
+
+
+class ServiceModificationBase(ModelMixin):
+ """
+ Deployment modification model representation.
+ """
+
+ __tablename__ = 'service_modification'
+
+ __private_fields__ = ('service_fk',)
+
+ STARTED = 'started'
+ FINISHED = 'finished'
+ ROLLEDBACK = 'rolledback'
+
+ STATES = [STARTED, FINISHED, ROLLEDBACK]
+ END_STATES = [FINISHED, ROLLEDBACK]
+
+ context = Column(Dict)
+ created_at = Column(DateTime, nullable=False, index=True)
+ ended_at = Column(DateTime, index=True)
+ modified_node_templates = Column(Dict)
+ nodes = Column(Dict)
+ status = Column(Enum(*STATES, name='service_modification_status'))
+
+ # region association proxies
+
+ @declared_attr
+ def service_name(cls):
+ return relationship.association_proxy('service', cls.name_column_name())
+
+ # endregion
+
+ # region one_to_one relationships
+
+ # endregion
+
+ # region one_to_many relationships
+
+ # endregion
+
+ # region many_to_one relationships
+
+ @declared_attr
+ def service(cls):
+ return relationship.many_to_one(cls, 'service', back_populates='modifications')
+
+ # endregion
+
+ # region foreign keys
+
+ @declared_attr
+ def service_fk(cls):
+ return relationship.foreign_key('service')
+
+ # endregion
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/modeling/service_common.py b/azure/aria/aria-extension-cloudify/src/aria/aria/modeling/service_common.py
new file mode 100644
index 0000000..d1f6b00
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/modeling/service_common.py
@@ -0,0 +1,601 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+ARIA modeling service common module
+"""
+
+# pylint: disable=no-self-argument, no-member, abstract-method
+
+from sqlalchemy import (
+ Column,
+ Text,
+ Boolean
+)
+from sqlalchemy.ext.declarative import declared_attr
+
+from ..utils import (
+ collections,
+ formatting
+)
+from .mixins import InstanceModelMixin, TemplateModelMixin, ParameterMixin
+from . import relationship
+
+
+class OutputBase(ParameterMixin):
+ """
+ Output parameter or declaration for an output parameter.
+ """
+
+ __tablename__ = 'output'
+
+ # region many_to_one relationships
+
+ @declared_attr
+ def service_template(cls):
+ """
+ Containing service template (can be ``None``).
+
+ :type: :class:`ServiceTemplate`
+ """
+ return relationship.many_to_one(cls, 'service_template')
+
+ @declared_attr
+ def service(cls):
+ """
+ Containing service (can be ``None``).
+
+ :type: :class:`ServiceTemplate`
+ """
+ return relationship.many_to_one(cls, 'service')
+
+ # endregion
+
+ # region foreign keys
+
+ @declared_attr
+ def service_template_fk(cls):
+ return relationship.foreign_key('service_template', nullable=True)
+
+ @declared_attr
+ def service_fk(cls):
+ return relationship.foreign_key('service', nullable=True)
+
+ # endregion
+
+
+class InputBase(ParameterMixin):
+ """
+ Input parameter or declaration for an input parameter.
+ """
+
+ __tablename__ = 'input'
+
+ required = Column(Boolean, doc="""
+ Is the input mandatory.
+
+ :type: :obj:`bool`
+ """)
+
+ @classmethod
+ def wrap(cls, name, value, description=None, required=True): # pylint: disable=arguments-differ
+ input = super(InputBase, cls).wrap(name, value, description)
+ input.required = required
+ return input
+
+ # region many_to_one relationships
+
+ @declared_attr
+ def service_template(cls):
+ """
+ Containing service template (can be ``None``).
+
+ :type: :class:`ServiceTemplate`
+ """
+ return relationship.many_to_one(cls, 'service_template')
+
+ @declared_attr
+ def service(cls):
+ """
+ Containing service (can be ``None``).
+
+ :type: :class:`Service`
+ """
+ return relationship.many_to_one(cls, 'service')
+
+ @declared_attr
+ def interface(cls):
+ """
+ Containing interface (can be ``None``).
+
+ :type: :class:`Interface`
+ """
+ return relationship.many_to_one(cls, 'interface')
+
+ @declared_attr
+ def operation(cls):
+ """
+ Containing operation (can be ``None``).
+
+ :type: :class:`Operation`
+ """
+ return relationship.many_to_one(cls, 'operation')
+
+ @declared_attr
+ def interface_template(cls):
+ """
+ Containing interface template (can be ``None``).
+
+ :type: :class:`InterfaceTemplate`
+ """
+ return relationship.many_to_one(cls, 'interface_template')
+
+ @declared_attr
+ def operation_template(cls):
+ """
+ Containing operation template (can be ``None``).
+
+ :type: :class:`OperationTemplate`
+ """
+ return relationship.many_to_one(cls, 'operation_template')
+
+ @declared_attr
+ def execution(cls):
+ """
+ Containing execution (can be ``None``).
+
+ :type: :class:`Execution`
+ """
+ return relationship.many_to_one(cls, 'execution')
+
+ # endregion
+
+ # region foreign keys
+
+ @declared_attr
+ def service_template_fk(cls):
+ return relationship.foreign_key('service_template', nullable=True)
+
+ @declared_attr
+ def service_fk(cls):
+ return relationship.foreign_key('service', nullable=True)
+
+ @declared_attr
+ def interface_fk(cls):
+ return relationship.foreign_key('interface', nullable=True)
+
+ @declared_attr
+ def operation_fk(cls):
+ return relationship.foreign_key('operation', nullable=True)
+
+ @declared_attr
+ def interface_template_fk(cls):
+ return relationship.foreign_key('interface_template', nullable=True)
+
+ @declared_attr
+ def operation_template_fk(cls):
+ return relationship.foreign_key('operation_template', nullable=True)
+
+ @declared_attr
+ def execution_fk(cls):
+ return relationship.foreign_key('execution', nullable=True)
+
+ @declared_attr
+ def task_fk(cls):
+ return relationship.foreign_key('task', nullable=True)
+
+ # endregion
+
+
+class ConfigurationBase(ParameterMixin):
+ """
+ Configuration parameter.
+ """
+
+ __tablename__ = 'configuration'
+
+ # region many_to_one relationships
+
+ @declared_attr
+ def operation_template(cls):
+ """
+ Containing operation template (can be ``None``).
+
+ :type: :class:`OperationTemplate`
+ """
+ return relationship.many_to_one(cls, 'operation_template')
+
+ @declared_attr
+ def operation(cls):
+ """
+ Containing operation (can be ``None``).
+
+ :type: :class:`Operation`
+ """
+ return relationship.many_to_one(cls, 'operation')
+
+ # endregion
+
+ # region foreign keys
+
+ @declared_attr
+ def operation_template_fk(cls):
+ return relationship.foreign_key('operation_template', nullable=True)
+
+ @declared_attr
+ def operation_fk(cls):
+ return relationship.foreign_key('operation', nullable=True)
+
+ # endregion
+
+
+class PropertyBase(ParameterMixin):
+ """
+ Property parameter or declaration for a property parameter.
+ """
+
+ __tablename__ = 'property'
+
+ # region many_to_one relationships
+
+ @declared_attr
+ def node_template(cls):
+ """
+ Containing node template (can be ``None``).
+
+ :type: :class:`NodeTemplate`
+ """
+ return relationship.many_to_one(cls, 'node_template')
+
+ @declared_attr
+ def group_template(cls):
+ """
+ Containing group template (can be ``None``).
+
+ :type: :class:`GroupTemplate`
+ """
+ return relationship.many_to_one(cls, 'group_template')
+
+ @declared_attr
+ def policy_template(cls):
+ """
+ Containing policy template (can be ``None``).
+
+ :type: :class:`PolicyTemplate`
+ """
+ return relationship.many_to_one(cls, 'policy_template')
+
+ @declared_attr
+ def relationship_template(cls):
+ """
+ Containing relationship template (can be ``None``).
+
+ :type: :class:`RelationshipTemplate`
+ """
+ return relationship.many_to_one(cls, 'relationship_template')
+
+ @declared_attr
+ def capability_template(cls):
+ """
+ Containing capability template (can be ``None``).
+
+ :type: :class:`CapabilityTemplate`
+ """
+ return relationship.many_to_one(cls, 'capability_template')
+
+ @declared_attr
+ def artifact_template(cls):
+ """
+ Containing artifact template (can be ``None``).
+
+ :type: :class:`ArtifactTemplate`
+ """
+ return relationship.many_to_one(cls, 'artifact_template')
+
+ @declared_attr
+ def node(cls):
+ """
+ Containing node (can be ``None``).
+
+ :type: :class:`Node`
+ """
+ return relationship.many_to_one(cls, 'node')
+
+ @declared_attr
+ def group(cls):
+ """
+ Containing group (can be ``None``).
+
+ :type: :class:`Group`
+ """
+ return relationship.many_to_one(cls, 'group')
+
+ @declared_attr
+ def policy(cls):
+ """
+ Containing policy (can be ``None``).
+
+ :type: :class:`Policy`
+ """
+ return relationship.many_to_one(cls, 'policy')
+
+ @declared_attr
+ def relationship(cls):
+ """
+ Containing relationship (can be ``None``).
+
+ :type: :class:`Relationship`
+ """
+ return relationship.many_to_one(cls, 'relationship')
+
+ @declared_attr
+ def capability(cls):
+ """
+ Containing capability (can be ``None``).
+
+ :type: :class:`Capability`
+ """
+ return relationship.many_to_one(cls, 'capability')
+
+ @declared_attr
+ def artifact(cls):
+ """
+ Containing artifact (can be ``None``).
+
+ :type: :class:`Artifact`
+ """
+ return relationship.many_to_one(cls, 'artifact')
+
+ # endregion
+
+ # region foreign keys
+
+ @declared_attr
+ def node_template_fk(cls):
+ return relationship.foreign_key('node_template', nullable=True)
+
+ @declared_attr
+ def group_template_fk(cls):
+ return relationship.foreign_key('group_template', nullable=True)
+
+ @declared_attr
+ def policy_template_fk(cls):
+ return relationship.foreign_key('policy_template', nullable=True)
+
+ @declared_attr
+ def relationship_template_fk(cls):
+ return relationship.foreign_key('relationship_template', nullable=True)
+
+ @declared_attr
+ def capability_template_fk(cls):
+ return relationship.foreign_key('capability_template', nullable=True)
+
+ @declared_attr
+ def artifact_template_fk(cls):
+ return relationship.foreign_key('artifact_template', nullable=True)
+
+ @declared_attr
+ def node_fk(cls):
+ return relationship.foreign_key('node', nullable=True)
+
+ @declared_attr
+ def group_fk(cls):
+ return relationship.foreign_key('group', nullable=True)
+
+ @declared_attr
+ def policy_fk(cls):
+ return relationship.foreign_key('policy', nullable=True)
+
+ @declared_attr
+ def relationship_fk(cls):
+ return relationship.foreign_key('relationship', nullable=True)
+
+ @declared_attr
+ def capability_fk(cls):
+ return relationship.foreign_key('capability', nullable=True)
+
+ @declared_attr
+ def artifact_fk(cls):
+ return relationship.foreign_key('artifact', nullable=True)
+
+ # endregion
+
+
+class AttributeBase(ParameterMixin):
+ """
+ Attribute parameter or declaration for an attribute parameter.
+ """
+
+ __tablename__ = 'attribute'
+
+ # region many_to_one relationships
+
+ @declared_attr
+ def node_template(cls):
+ """
+ Containing node template (can be ``None``).
+
+ :type: :class:`NodeTemplate`
+ """
+ return relationship.many_to_one(cls, 'node_template')
+
+ @declared_attr
+ def node(cls):
+ """
+ Containing node (can be ``None``).
+
+ :type: :class:`Node`
+ """
+ return relationship.many_to_one(cls, 'node')
+
+ # endregion
+
+ # region foreign keys
+
+ @declared_attr
+ def node_template_fk(cls):
+ """For Attribute many-to-one to NodeTemplate"""
+ return relationship.foreign_key('node_template', nullable=True)
+
+ @declared_attr
+ def node_fk(cls):
+ """For Attribute many-to-one to Node"""
+ return relationship.foreign_key('node', nullable=True)
+
+ # endregion
+
+
+class TypeBase(InstanceModelMixin):
+ """
+ Type and its children. Can serve as the root for a type hierarchy.
+ """
+
+ __tablename__ = 'type'
+
+ __private_fields__ = ('parent_type_fk',)
+
+ variant = Column(Text, nullable=False)
+
+ description = Column(Text, doc="""
+ Human-readable description.
+
+ :type: :obj:`basestring`
+ """)
+
+ _role = Column(Text, name='role')
+
+ # region one_to_one relationships
+
+ @declared_attr
+ def parent(cls):
+ """
+ Parent type (will be ``None`` for the root of a type hierarchy).
+
+ :type: :class:`Type`
+ """
+ return relationship.one_to_one_self(cls, 'parent_type_fk')
+
+ # endregion
+
+ # region one_to_many relationships
+
+ @declared_attr
+ def children(cls):
+ """
+ Children.
+
+ :type: [:class:`Type`]
+ """
+ return relationship.one_to_many(cls, other_fk='parent_type_fk', self=True)
+
+ # endregion
+
+ # region foreign keys
+
+ @declared_attr
+ def parent_type_fk(cls):
+ """For Type one-to-many to Type"""
+ return relationship.foreign_key('type', nullable=True)
+
+ # endregion
+
+ @property
+ def role(self):
+ def get_role(the_type):
+ if the_type is None:
+ return None
+ elif the_type._role is None:
+ return get_role(the_type.parent)
+ return the_type._role
+
+ return get_role(self)
+
+ @role.setter
+ def role(self, value):
+ self._role = value
+
+ def is_descendant(self, base_name, name):
+ base = self.get_descendant(base_name)
+ if base is not None:
+ if base.get_descendant(name) is not None:
+ return True
+ return False
+
+ def get_descendant(self, name):
+ if self.name == name:
+ return self
+ for child in self.children:
+ found = child.get_descendant(name)
+ if found is not None:
+ return found
+ return None
+
+ def iter_descendants(self):
+ for child in self.children:
+ yield child
+ for descendant in child.iter_descendants():
+ yield descendant
+
+ @property
+ def as_raw(self):
+ return collections.OrderedDict((
+ ('name', self.name),
+ ('description', self.description),
+ ('role', self.role)))
+
+ @property
+ def as_raw_all(self):
+ types = []
+ self._append_raw_children(types)
+ return types
+
+ def _append_raw_children(self, types):
+ for child in self.children:
+ raw_child = formatting.as_raw(child)
+ raw_child['parent'] = self.name
+ types.append(raw_child)
+ child._append_raw_children(types)
+
+ @property
+ def hierarchy(self):
+ """
+ Type hierarchy as a list beginning with this type and ending in the root.
+
+ :type: [:class:`Type`]
+ """
+ return [self] + (self.parent.hierarchy if self.parent else [])
+
+
+class MetadataBase(TemplateModelMixin):
+ """
+ Custom values associated with the service.
+
+ This model is used by both service template and service instance elements.
+
+ :ivar name: name
+ :vartype name: basestring
+ :ivar value: value
+ :vartype value: basestring
+ """
+
+ __tablename__ = 'metadata'
+
+ value = Column(Text)
+
+ @property
+ def as_raw(self):
+ return collections.OrderedDict((
+ ('name', self.name),
+ ('value', self.value)))
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/modeling/service_instance.py b/azure/aria/aria-extension-cloudify/src/aria/aria/modeling/service_instance.py
new file mode 100644
index 0000000..01c4da9
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/modeling/service_instance.py
@@ -0,0 +1,1695 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+ARIA modeling service instance module
+"""
+
+# pylint: disable=too-many-lines, no-self-argument, no-member, abstract-method
+
+from sqlalchemy import (
+ Column,
+ Text,
+ Integer,
+ Enum,
+ Boolean
+)
+from sqlalchemy import DateTime
+from sqlalchemy.ext.declarative import declared_attr
+from sqlalchemy.ext.orderinglist import ordering_list
+
+from . import (
+ relationship,
+ types as modeling_types
+)
+from .mixins import InstanceModelMixin
+
+from ..utils import (
+ collections,
+ formatting
+)
+
+
+class ServiceBase(InstanceModelMixin):
+ """
+ Usually an instance of a :class:`ServiceTemplate` and its many associated templates (node
+ templates, group templates, policy templates, etc.). However, it can also be created
+ programmatically.
+ """
+
+ __tablename__ = 'service'
+
+ __private_fields__ = ('substitution_fk',
+ 'service_template_fk')
+
+ # region one_to_one relationships
+
+ @declared_attr
+ def substitution(cls):
+ """
+ Exposes the entire service as a single node.
+
+ :type: :class:`Substitution`
+ """
+ return relationship.one_to_one(cls, 'substitution', back_populates=relationship.NO_BACK_POP)
+
+ # endregion
+
+ # region one_to_many relationships
+
+ @declared_attr
+ def outputs(cls):
+ """
+ Output parameters.
+
+ :type: {:obj:`basestring`: :class:`Output`}
+ """
+ return relationship.one_to_many(cls, 'output', dict_key='name')
+
+ @declared_attr
+ def inputs(cls):
+ """
+ Externally provided parameters.
+
+ :type: {:obj:`basestring`: :class:`Input`}
+ """
+ return relationship.one_to_many(cls, 'input', dict_key='name')
+
+ @declared_attr
+ def updates(cls):
+ """
+ Service updates.
+
+ :type: [:class:`ServiceUpdate`]
+ """
+ return relationship.one_to_many(cls, 'service_update')
+
+ @declared_attr
+ def modifications(cls):
+ """
+ Service modifications.
+
+ :type: [:class:`ServiceModification`]
+ """
+ return relationship.one_to_many(cls, 'service_modification')
+
+ @declared_attr
+ def executions(cls):
+ """
+ Executions.
+
+ :type: [:class:`Execution`]
+ """
+ return relationship.one_to_many(cls, 'execution')
+
+ @declared_attr
+ def nodes(cls):
+ """
+ Nodes.
+
+ :type: {:obj:`basestring`, :class:`Node`}
+ """
+ return relationship.one_to_many(cls, 'node', dict_key='name')
+
+ @declared_attr
+ def groups(cls):
+ """
+ Groups.
+
+ :type: {:obj:`basestring`, :class:`Group`}
+ """
+ return relationship.one_to_many(cls, 'group', dict_key='name')
+
+ @declared_attr
+ def policies(cls):
+ """
+ Policies.
+
+ :type: {:obj:`basestring`, :class:`Policy`}
+ """
+ return relationship.one_to_many(cls, 'policy', dict_key='name')
+
+ @declared_attr
+ def workflows(cls):
+ """
+ Workflows.
+
+ :type: {:obj:`basestring`, :class:`Operation`}
+ """
+ return relationship.one_to_many(cls, 'operation', dict_key='name')
+
+ # endregion
+
+ # region many_to_one relationships
+
+ @declared_attr
+ def service_template(cls):
+ """
+ Source service template (can be ``None``).
+
+ :type: :class:`ServiceTemplate`
+ """
+ return relationship.many_to_one(cls, 'service_template')
+
+ # endregion
+
+ # region many_to_many relationships
+
+ @declared_attr
+ def meta_data(cls):
+ """
+ Associated metadata.
+
+ :type: {:obj:`basestring`, :class:`Metadata`}
+ """
+ # Warning! We cannot use the attr name "metadata" because it's used by SQLAlchemy!
+ return relationship.many_to_many(cls, 'metadata', dict_key='name')
+
+ @declared_attr
+ def plugins(cls):
+ """
+ Associated plugins.
+
+ :type: {:obj:`basestring`, :class:`Plugin`}
+ """
+ return relationship.many_to_many(cls, 'plugin', dict_key='name')
+
+ # endregion
+
+ # region association proxies
+
+ @declared_attr
+ def service_template_name(cls):
+ return relationship.association_proxy('service_template', 'name', type=':obj:`basestring`')
+
+ # endregion
+
+ # region foreign keys
+
+ @declared_attr
+ def substitution_fk(cls):
+ """Service one-to-one to Substitution"""
+ return relationship.foreign_key('substitution', nullable=True)
+
+ @declared_attr
+ def service_template_fk(cls):
+ """For Service many-to-one to ServiceTemplate"""
+ return relationship.foreign_key('service_template', nullable=True)
+
+ # endregion
+
+ description = Column(Text, doc="""
+ Human-readable description.
+
+ :type: :obj:`basestring`
+ """)
+
+ created_at = Column(DateTime, nullable=False, index=True, doc="""
+ Creation timestamp.
+
+ :type: :class:`~datetime.datetime`
+ """)
+
+ updated_at = Column(DateTime, doc="""
+ Update timestamp.
+
+ :type: :class:`~datetime.datetime`
+ """)
+
+ def get_node_by_type(self, type_name):
+ """
+ Finds the first node of a type (or descendent type).
+ """
+ service_template = self.service_template
+
+ if service_template is not None:
+ node_types = service_template.node_types
+ if node_types is not None:
+ for node in self.nodes.itervalues():
+ if node_types.is_descendant(type_name, node.type.name):
+ return node
+
+ return None
+
+ def get_policy_by_type(self, type_name):
+ """
+ Finds the first policy of a type (or descendent type).
+ """
+ service_template = self.service_template
+
+ if service_template is not None:
+ policy_types = service_template.policy_types
+ if policy_types is not None:
+ for policy in self.policies.itervalues():
+ if policy_types.is_descendant(type_name, policy.type.name):
+ return policy
+
+ return None
+
+ @property
+ def as_raw(self):
+ return collections.OrderedDict((
+ ('description', self.description),
+ ('metadata', formatting.as_raw_dict(self.meta_data)),
+ ('nodes', formatting.as_raw_list(self.nodes)),
+ ('groups', formatting.as_raw_list(self.groups)),
+ ('policies', formatting.as_raw_list(self.policies)),
+ ('substitution', formatting.as_raw(self.substitution)),
+ ('inputs', formatting.as_raw_dict(self.inputs)),
+ ('outputs', formatting.as_raw_dict(self.outputs)),
+ ('workflows', formatting.as_raw_list(self.workflows))))
+
+
+class NodeBase(InstanceModelMixin):
+ """
+ Typed vertex in the service topology.
+
+ Nodes may have zero or more :class:`Relationship` instances to other nodes, together forming
+ a many-to-many node graph.
+
+ Usually an instance of a :class:`NodeTemplate`.
+ """
+
+ __tablename__ = 'node'
+
+ __private_fields__ = ('type_fk',
+ 'host_fk',
+ 'service_fk',
+ 'node_template_fk')
+
+ INITIAL = 'initial'
+ CREATING = 'creating'
+ CREATED = 'created'
+ CONFIGURING = 'configuring'
+ CONFIGURED = 'configured'
+ STARTING = 'starting'
+ STARTED = 'started'
+ STOPPING = 'stopping'
+ DELETING = 'deleting'
+ DELETED = 'deleted'
+ ERROR = 'error'
+
+ # Note: 'deleted' isn't actually part of the TOSCA spec, since according the description of the
+ # 'deleting' state: "Node is transitioning from its current state to one where it is deleted and
+ # its state is no longer tracked by the instance model." However, we prefer to be able to
+ # retrieve information about deleted nodes, so we chose to add this 'deleted' state to enable us
+ # to do so.
+
+ STATES = (INITIAL, CREATING, CREATED, CONFIGURING, CONFIGURED, STARTING, STARTED, STOPPING,
+ DELETING, DELETED, ERROR)
+
+ _OP_TO_STATE = {'create': {'transitional': CREATING, 'finished': CREATED},
+ 'configure': {'transitional': CONFIGURING, 'finished': CONFIGURED},
+ 'start': {'transitional': STARTING, 'finished': STARTED},
+ 'stop': {'transitional': STOPPING, 'finished': CONFIGURED},
+ 'delete': {'transitional': DELETING, 'finished': DELETED}}
+
+ # region one_to_one relationships
+
+ @declared_attr
+ def host(cls): # pylint: disable=method-hidden
+ """
+ Node in which we are hosted (can be ``None``).
+
+ Normally the host node is found by following the relationship graph (relationships with
+ ``host`` roles) to final nodes (with ``host`` roles).
+
+ :type: :class:`Node`
+ """
+ return relationship.one_to_one_self(cls, 'host_fk')
+
+ # endregion
+
+ # region one_to_many relationships
+
+ @declared_attr
+ def tasks(cls):
+ """
+ Associated tasks.
+
+ :type: [:class:`Task`]
+ """
+ return relationship.one_to_many(cls, 'task')
+
+ @declared_attr
+ def interfaces(cls):
+ """
+ Associated interfaces.
+
+ :type: {:obj:`basestring`: :class:`Interface`}
+ """
+ return relationship.one_to_many(cls, 'interface', dict_key='name')
+
+ @declared_attr
+ def properties(cls):
+ """
+ Associated immutable parameters.
+
+ :type: {:obj:`basestring`: :class:`Property`}
+ """
+ return relationship.one_to_many(cls, 'property', dict_key='name')
+
+ @declared_attr
+ def attributes(cls):
+ """
+ Associated mutable parameters.
+
+ :type: {:obj:`basestring`: :class:`Attribute`}
+ """
+ return relationship.one_to_many(cls, 'attribute', dict_key='name')
+
+ @declared_attr
+ def artifacts(cls):
+ """
+ Associated artifacts.
+
+ :type: {:obj:`basestring`: :class:`Artifact`}
+ """
+ return relationship.one_to_many(cls, 'artifact', dict_key='name')
+
+ @declared_attr
+ def capabilities(cls):
+ """
+ Associated exposed capabilities.
+
+ :type: {:obj:`basestring`: :class:`Capability`}
+ """
+ return relationship.one_to_many(cls, 'capability', dict_key='name')
+
+ @declared_attr
+ def outbound_relationships(cls):
+ """
+ Relationships to other nodes.
+
+ :type: [:class:`Relationship`]
+ """
+ return relationship.one_to_many(
+ cls, 'relationship', other_fk='source_node_fk', back_populates='source_node',
+ rel_kwargs=dict(
+ order_by='Relationship.source_position',
+ collection_class=ordering_list('source_position', count_from=0)
+ )
+ )
+
+ @declared_attr
+ def inbound_relationships(cls):
+ """
+ Relationships from other nodes.
+
+ :type: [:class:`Relationship`]
+ """
+ return relationship.one_to_many(
+ cls, 'relationship', other_fk='target_node_fk', back_populates='target_node',
+ rel_kwargs=dict(
+ order_by='Relationship.target_position',
+ collection_class=ordering_list('target_position', count_from=0)
+ )
+ )
+
+ # endregion
+
+ # region many_to_one relationships
+
+ @declared_attr
+ def service(cls):
+ """
+ Containing service.
+
+ :type: :class:`Service`
+ """
+ return relationship.many_to_one(cls, 'service')
+
+ @declared_attr
+ def node_template(cls):
+ """
+ Source node template (can be ``None``).
+
+ :type: :class:`NodeTemplate`
+ """
+ return relationship.many_to_one(cls, 'node_template')
+
+ @declared_attr
+ def type(cls):
+ """
+ Node type.
+
+ :type: :class:`Type`
+ """
+ return relationship.many_to_one(cls, 'type', back_populates=relationship.NO_BACK_POP)
+
+ # endregion
+
+ # region association proxies
+
+ @declared_attr
+ def service_name(cls):
+ return relationship.association_proxy('service', 'name', type=':obj:`basestring`')
+
+ @declared_attr
+ def node_template_name(cls):
+ return relationship.association_proxy('node_template', 'name', type=':obj:`basestring`')
+
+ # endregion
+
+ # region foreign_keys
+
+ @declared_attr
+ def type_fk(cls):
+ """For Node many-to-one to Type"""
+ return relationship.foreign_key('type')
+
+ @declared_attr
+ def host_fk(cls):
+ """For Node one-to-one to Node"""
+ return relationship.foreign_key('node', nullable=True)
+
+ @declared_attr
+ def service_fk(cls):
+ """For Service one-to-many to Node"""
+ return relationship.foreign_key('service')
+
+ @declared_attr
+ def node_template_fk(cls):
+ """For Node many-to-one to NodeTemplate"""
+ return relationship.foreign_key('node_template')
+
+ # endregion
+
+ description = Column(Text, doc="""
+ Human-readable description.
+
+ :type: :obj:`basestring`
+ """)
+
+ state = Column(Enum(*STATES, name='node_state'), nullable=False, default=INITIAL, doc="""
+ TOSCA state.
+
+ :type: :obj:`basestring`
+ """)
+
+ version = Column(Integer, default=1, doc="""
+ Used by :mod:`aria.storage.instrumentation`.
+
+ :type: :obj:`int`
+ """)
+
+ __mapper_args__ = {'version_id_col': version} # Enable SQLAlchemy automatic version counting
+
+ @classmethod
+ def determine_state(cls, op_name, is_transitional):
+ """
+ :returns the state the node should be in as a result of running the operation on this node.
+
+ E.g. if we are running tosca.interfaces.node.lifecycle.Standard.create, then
+ the resulting state should either 'creating' (if the task just started) or 'created'
+ (if the task ended).
+
+ If the operation is not a standard TOSCA lifecycle operation, then we return None.
+ """
+
+ state_type = 'transitional' if is_transitional else 'finished'
+ try:
+ return cls._OP_TO_STATE[op_name][state_type]
+ except KeyError:
+ return None
+
+ def is_available(self):
+ return self.state not in (self.INITIAL, self.DELETED, self.ERROR)
+
+ def get_outbound_relationship_by_name(self, name):
+ for the_relationship in self.outbound_relationships:
+ if the_relationship.name == name:
+ return the_relationship
+ return None
+
+ def get_inbound_relationship_by_name(self, name):
+ for the_relationship in self.inbound_relationships:
+ if the_relationship.name == name:
+ return the_relationship
+ return None
+
+ @property
+ def host_address(self):
+ if self.host and self.host.attributes:
+ attribute = self.host.attributes.get('ip')
+ if attribute is not None:
+ return attribute.value
+ return None
+
+ @property
+ def as_raw(self):
+ return collections.OrderedDict((
+ ('name', self.name),
+ ('type_name', self.type.name),
+ ('properties', formatting.as_raw_dict(self.properties)),
+ ('attributes', formatting.as_raw_dict(self.properties)),
+ ('interfaces', formatting.as_raw_list(self.interfaces)),
+ ('artifacts', formatting.as_raw_list(self.artifacts)),
+ ('capabilities', formatting.as_raw_list(self.capabilities)),
+ ('relationships', formatting.as_raw_list(self.outbound_relationships))))
+
+
+class GroupBase(InstanceModelMixin):
+ """
+ Typed logical container for zero or more :class:`Node` instances.
+
+ Usually an instance of a :class:`GroupTemplate`.
+ """
+
+ __tablename__ = 'group'
+
+ __private_fields__ = ('type_fk',
+ 'service_fk',
+ 'group_template_fk')
+
+ # region one_to_many relationships
+
+ @declared_attr
+ def properties(cls):
+ """
+ Associated immutable parameters.
+
+ :type: {:obj:`basestring`: :class:`Property`}
+ """
+ return relationship.one_to_many(cls, 'property', dict_key='name')
+
+ @declared_attr
+ def interfaces(cls):
+ """
+ Associated interfaces.
+
+ :type: {:obj:`basestring`: :class:`Interface`}
+ """
+ return relationship.one_to_many(cls, 'interface', dict_key='name')
+
+ # endregion
+
+ # region many_to_one relationships
+
+ @declared_attr
+ def service(cls):
+ """
+ Containing service.
+
+ :type: :class:`Service`
+ """
+ return relationship.many_to_one(cls, 'service')
+
+ @declared_attr
+ def group_template(cls):
+ """
+ Source group template (can be ``None``).
+
+ :type: :class:`GroupTemplate`
+ """
+ return relationship.many_to_one(cls, 'group_template')
+
+ @declared_attr
+ def type(cls):
+ """
+ Group type.
+
+ :type: :class:`Type`
+ """
+ return relationship.many_to_one(cls, 'type', back_populates=relationship.NO_BACK_POP)
+
+ # endregion
+
+ # region many_to_many relationships
+
+ @declared_attr
+ def nodes(cls):
+ """
+ Member nodes.
+
+ :type: [:class:`Node`]
+ """
+ return relationship.many_to_many(cls, 'node')
+
+ # endregion
+
+ # region foreign_keys
+
+ @declared_attr
+ def type_fk(cls):
+ """For Group many-to-one to Type"""
+ return relationship.foreign_key('type')
+
+ @declared_attr
+ def service_fk(cls):
+ """For Service one-to-many to Group"""
+ return relationship.foreign_key('service')
+
+ @declared_attr
+ def group_template_fk(cls):
+ """For Group many-to-one to GroupTemplate"""
+ return relationship.foreign_key('group_template', nullable=True)
+
+ # endregion
+
+ description = Column(Text, doc="""
+ Human-readable description.
+
+ :type: :obj:`basestring`
+ """)
+
+ @property
+ def as_raw(self):
+ return collections.OrderedDict((
+ ('name', self.name),
+ ('properties', formatting.as_raw_dict(self.properties)),
+ ('interfaces', formatting.as_raw_list(self.interfaces))))
+
+
+class PolicyBase(InstanceModelMixin):
+ """
+ Typed set of orchestration hints applied to zero or more :class:`Node` or :class:`Group`
+ instances.
+
+ Usually an instance of a :class:`PolicyTemplate`.
+ """
+
+ __tablename__ = 'policy'
+
+ __private_fields__ = ('type_fk',
+ 'service_fk',
+ 'policy_template_fk')
+
+ # region one_to_many relationships
+
+ @declared_attr
+ def properties(cls):
+ """
+ Associated immutable parameters.
+
+ :type: {:obj:`basestring`: :class:`Property`}
+ """
+ return relationship.one_to_many(cls, 'property', dict_key='name')
+
+ # endregion
+
+ # region many_to_one relationships
+
+ @declared_attr
+ def service(cls):
+ """
+ Containing service.
+
+ :type: :class:`Service`
+ """
+ return relationship.many_to_one(cls, 'service')
+
+ @declared_attr
+ def policy_template(cls):
+ """
+ Source policy template (can be ``None``).
+
+ :type: :class:`PolicyTemplate`
+ """
+ return relationship.many_to_one(cls, 'policy_template')
+
+ @declared_attr
+ def type(cls):
+ """
+ Group type.
+
+ :type: :class:`Type`
+ """
+ return relationship.many_to_one(cls, 'type', back_populates=relationship.NO_BACK_POP)
+
+ # endregion
+
+ # region many_to_many relationships
+
+ @declared_attr
+ def nodes(cls):
+ """
+ Policy is enacted on these nodes.
+
+ :type: {:obj:`basestring`: :class:`Node`}
+ """
+ return relationship.many_to_many(cls, 'node')
+
+ @declared_attr
+ def groups(cls):
+ """
+ Policy is enacted on nodes in these groups.
+
+ :type: {:obj:`basestring`: :class:`Group`}
+ """
+ return relationship.many_to_many(cls, 'group')
+
+ # endregion
+
+ # region foreign_keys
+
+ @declared_attr
+ def type_fk(cls):
+ """For Policy many-to-one to Type"""
+ return relationship.foreign_key('type')
+
+ @declared_attr
+ def service_fk(cls):
+ """For Service one-to-many to Policy"""
+ return relationship.foreign_key('service')
+
+ @declared_attr
+ def policy_template_fk(cls):
+ """For Policy many-to-one to PolicyTemplate"""
+ return relationship.foreign_key('policy_template', nullable=True)
+
+ # endregion
+
+ description = Column(Text, doc="""
+ Human-readable description.
+
+ :type: :obj:`basestring`
+ """)
+
+ @property
+ def as_raw(self):
+ return collections.OrderedDict((
+ ('name', self.name),
+ ('type_name', self.type.name),
+ ('properties', formatting.as_raw_dict(self.properties))))
+
+
+class SubstitutionBase(InstanceModelMixin):
+ """
+ Exposes the entire service as a single node.
+
+ Usually an instance of a :class:`SubstitutionTemplate`.
+ """
+
+ __tablename__ = 'substitution'
+
+ __private_fields__ = ('node_type_fk',
+ 'substitution_template_fk')
+
+ # region one_to_many relationships
+
+ @declared_attr
+ def mappings(cls):
+ """
+ Map requirement and capabilities to exposed node.
+
+ :type: {:obj:`basestring`: :class:`SubstitutionMapping`}
+ """
+ return relationship.one_to_many(cls, 'substitution_mapping', dict_key='name')
+
+ # endregion
+
+ # region many_to_one relationships
+
+ @declared_attr
+ def service(cls):
+ """
+ Containing service.
+
+ :type: :class:`Service`
+ """
+ return relationship.one_to_one(cls, 'service', back_populates=relationship.NO_BACK_POP)
+
+ @declared_attr
+ def substitution_template(cls):
+ """
+ Source substitution template (can be ``None``).
+
+ :type: :class:`SubstitutionTemplate`
+ """
+ return relationship.many_to_one(cls, 'substitution_template')
+
+ @declared_attr
+ def node_type(cls):
+ """
+ Exposed node type.
+
+ :type: :class:`Type`
+ """
+ return relationship.many_to_one(cls, 'type', back_populates=relationship.NO_BACK_POP)
+
+ # endregion
+
+ # region foreign_keys
+
+ @declared_attr
+ def node_type_fk(cls):
+ """For Substitution many-to-one to Type"""
+ return relationship.foreign_key('type')
+
+ @declared_attr
+ def substitution_template_fk(cls):
+ """For Substitution many-to-one to SubstitutionTemplate"""
+ return relationship.foreign_key('substitution_template', nullable=True)
+
+ # endregion
+
+ @property
+ def as_raw(self):
+ return collections.OrderedDict((
+ ('node_type_name', self.node_type.name),
+ ('mappings', formatting.as_raw_dict(self.mappings))))
+
+
+class SubstitutionMappingBase(InstanceModelMixin):
+ """
+ Used by :class:`Substitution` to map a capability or a requirement to the exposed node.
+
+ The :attr:`name` field should match the capability or requirement template name on the exposed
+ node's type.
+
+ Only one of :attr:`capability` and :attr:`requirement_template` can be set. If the latter is
+ set, then :attr:`node` must also be set.
+
+ Usually an instance of a :class:`SubstitutionMappingTemplate`.
+ """
+
+ __tablename__ = 'substitution_mapping'
+
+ __private_fields__ = ('substitution_fk',
+ 'node_fk',
+ 'capability_fk',
+ 'requirement_template_fk')
+
+ # region one_to_one relationships
+
+ @declared_attr
+ def capability(cls):
+ """
+ Capability to expose (can be ``None``).
+
+ :type: :class:`Capability`
+ """
+ return relationship.one_to_one(cls, 'capability', back_populates=relationship.NO_BACK_POP)
+
+ @declared_attr
+ def requirement_template(cls):
+ """
+ Requirement template to expose (can be ``None``).
+
+ :type: :class:`RequirementTemplate`
+ """
+ return relationship.one_to_one(cls, 'requirement_template',
+ back_populates=relationship.NO_BACK_POP)
+
+ @declared_attr
+ def node(cls):
+ """
+ Node for which to expose :attr:`requirement_template` (can be ``None``).
+
+ :type: :class:`Node`
+ """
+ return relationship.one_to_one(cls, 'node', back_populates=relationship.NO_BACK_POP)
+
+ # endregion
+
+ # region many_to_one relationships
+
+ @declared_attr
+ def substitution(cls):
+ """
+ Containing substitution.
+
+ :type: :class:`Substitution`
+ """
+ return relationship.many_to_one(cls, 'substitution', back_populates='mappings')
+
+ # endregion
+
+ # region foreign keys
+
+ @declared_attr
+ def substitution_fk(cls):
+ """For Substitution one-to-many to SubstitutionMapping"""
+ return relationship.foreign_key('substitution')
+
+ @declared_attr
+ def capability_fk(cls):
+ """For Substitution one-to-one to Capability"""
+ return relationship.foreign_key('capability', nullable=True)
+
+ @declared_attr
+ def node_fk(cls):
+ """For Substitution one-to-one to Node"""
+ return relationship.foreign_key('node', nullable=True)
+
+ @declared_attr
+ def requirement_template_fk(cls):
+ """For Substitution one-to-one to RequirementTemplate"""
+ return relationship.foreign_key('requirement_template', nullable=True)
+
+ # endregion
+
+ @property
+ def as_raw(self):
+ return collections.OrderedDict((
+ ('name', self.name),))
+
+
+class RelationshipBase(InstanceModelMixin):
+ """
+ Optionally-typed edge in the service topology, connecting a :class:`Node` to a
+ :class:`Capability` of another node.
+
+ Might be an instance of :class:`RelationshipTemplate` and/or :class:`RequirementTemplate`.
+ """
+
+ __tablename__ = 'relationship'
+
+ __private_fields__ = ('type_fk',
+ 'source_node_fk',
+ 'target_node_fk',
+ 'target_capability_fk',
+ 'requirement_template_fk',
+ 'relationship_template_fk',
+ 'target_position',
+ 'source_position')
+
+ # region one_to_one relationships
+
+ @declared_attr
+ def target_capability(cls):
+ """
+ Target capability.
+
+ :type: :class:`Capability`
+ """
+ return relationship.one_to_one(cls, 'capability', back_populates=relationship.NO_BACK_POP)
+
+ # endregion
+
+ # region one_to_many relationships
+
+ @declared_attr
+ def tasks(cls):
+ """
+ Associated tasks.
+
+ :type: [:class:`Task`]
+ """
+ return relationship.one_to_many(cls, 'task')
+
+ @declared_attr
+ def interfaces(cls):
+ """
+ Associated interfaces.
+
+ :type: {:obj:`basestring`: :class:`Interface`}
+ """
+ return relationship.one_to_many(cls, 'interface', dict_key='name')
+
+ @declared_attr
+ def properties(cls):
+ """
+ Associated immutable parameters.
+
+ :type: {:obj:`basestring`: :class:`Property`}
+ """
+ return relationship.one_to_many(cls, 'property', dict_key='name')
+
+ # endregion
+
+ # region many_to_one relationships
+
+ @declared_attr
+ def source_node(cls):
+ """
+ Source node.
+
+ :type: :class:`Node`
+ """
+ return relationship.many_to_one(
+ cls, 'node', fk='source_node_fk', back_populates='outbound_relationships')
+
+ @declared_attr
+ def target_node(cls):
+ """
+ Target node.
+
+ :type: :class:`Node`
+ """
+ return relationship.many_to_one(
+ cls, 'node', fk='target_node_fk', back_populates='inbound_relationships')
+
+ @declared_attr
+ def relationship_template(cls):
+ """
+ Source relationship template (can be ``None``).
+
+ :type: :class:`RelationshipTemplate`
+ """
+ return relationship.many_to_one(cls, 'relationship_template')
+
+ @declared_attr
+ def requirement_template(cls):
+ """
+ Source requirement template (can be ``None``).
+
+ :type: :class:`RequirementTemplate`
+ """
+ return relationship.many_to_one(cls, 'requirement_template')
+
+ @declared_attr
+ def type(cls):
+ """
+ Relationship type.
+
+ :type: :class:`Type`
+ """
+ return relationship.many_to_one(cls, 'type', back_populates=relationship.NO_BACK_POP)
+
+ # endregion
+
+ # region association proxies
+
+ @declared_attr
+ def source_node_name(cls):
+ return relationship.association_proxy('source_node', 'name')
+
+ @declared_attr
+ def target_node_name(cls):
+ return relationship.association_proxy('target_node', 'name')
+
+ # endregion
+
+ # region foreign keys
+
+ @declared_attr
+ def type_fk(cls):
+ """For Relationship many-to-one to Type"""
+ return relationship.foreign_key('type', nullable=True)
+
+ @declared_attr
+ def source_node_fk(cls):
+ """For Node one-to-many to Relationship"""
+ return relationship.foreign_key('node')
+
+ @declared_attr
+ def target_node_fk(cls):
+ """For Node one-to-many to Relationship"""
+ return relationship.foreign_key('node')
+
+ @declared_attr
+ def target_capability_fk(cls):
+ """For Relationship one-to-one to Capability"""
+ return relationship.foreign_key('capability', nullable=True)
+
+ @declared_attr
+ def requirement_template_fk(cls):
+ """For Relationship many-to-one to RequirementTemplate"""
+ return relationship.foreign_key('requirement_template', nullable=True)
+
+ @declared_attr
+ def relationship_template_fk(cls):
+ """For Relationship many-to-one to RelationshipTemplate"""
+ return relationship.foreign_key('relationship_template', nullable=True)
+
+ # endregion
+
+ source_position = Column(Integer, doc="""
+ Position at source.
+
+ :type: :obj:`int`
+ """)
+
+ target_position = Column(Integer, doc="""
+ Position at target.
+
+ :type: :obj:`int`
+ """)
+
+ @property
+ def as_raw(self):
+ return collections.OrderedDict((
+ ('name', self.name),
+ ('target_node_id', self.target_node.name),
+ ('type_name', self.type.name
+ if self.type is not None else None),
+ ('template_name', self.relationship_template.name
+ if self.relationship_template is not None else None),
+ ('properties', formatting.as_raw_dict(self.properties)),
+ ('interfaces', formatting.as_raw_list(self.interfaces))))
+
+
+class CapabilityBase(InstanceModelMixin):
+ """
+ Typed attachment serving two purposes: to provide extra properties and attributes to a
+ :class:`Node`, and to expose targets for :class:`Relationship` instances from other nodes.
+
+ Usually an instance of a :class:`CapabilityTemplate`.
+ """
+
+ __tablename__ = 'capability'
+
+ __private_fields__ = ('capability_fk',
+ 'node_fk',
+ 'capability_template_fk')
+
+ # region one_to_many relationships
+
+ @declared_attr
+ def properties(cls):
+ """
+ Associated immutable parameters.
+
+ :type: {:obj:`basestring`: :class:`Property`}
+ """
+ return relationship.one_to_many(cls, 'property', dict_key='name')
+
+ # endregion
+
+ # region many_to_one relationships
+
+ @declared_attr
+ def node(cls):
+ """
+ Containing node.
+
+ :type: :class:`Node`
+ """
+ return relationship.many_to_one(cls, 'node')
+
+ @declared_attr
+ def capability_template(cls):
+ """
+ Source capability template (can be ``None``).
+
+ :type: :class:`CapabilityTemplate`
+ """
+ return relationship.many_to_one(cls, 'capability_template')
+
+ @declared_attr
+ def type(cls):
+ """
+ Capability type.
+
+ :type: :class:`Type`
+ """
+ return relationship.many_to_one(cls, 'type', back_populates=relationship.NO_BACK_POP)
+
+ # endregion
+
+ # region foreign_keys
+
+ @declared_attr
+ def type_fk(cls):
+ """For Capability many-to-one to Type"""
+ return relationship.foreign_key('type')
+
+ @declared_attr
+ def node_fk(cls):
+ """For Node one-to-many to Capability"""
+ return relationship.foreign_key('node')
+
+ @declared_attr
+ def capability_template_fk(cls):
+ """For Capability many-to-one to CapabilityTemplate"""
+ return relationship.foreign_key('capability_template', nullable=True)
+
+ # endregion
+
+ min_occurrences = Column(Integer, default=None, doc="""
+ Minimum number of requirement matches required.
+
+ :type: :obj:`int`
+ """)
+
+ max_occurrences = Column(Integer, default=None, doc="""
+ Maximum number of requirement matches allowed.
+
+ :type: :obj:`int`
+ """)
+
+ occurrences = Column(Integer, default=0, doc="""
+ Number of requirement matches.
+
+ :type: :obj:`int`
+ """)
+
+ @property
+ def has_enough_relationships(self):
+ if self.min_occurrences is not None:
+ return self.occurrences >= self.min_occurrences
+ return True
+
+ def relate(self):
+ if self.max_occurrences is not None:
+ if self.occurrences == self.max_occurrences:
+ return False
+ self.occurrences += 1
+ return True
+
+ @property
+ def as_raw(self):
+ return collections.OrderedDict((
+ ('name', self.name),
+ ('type_name', self.type.name),
+ ('properties', formatting.as_raw_dict(self.properties))))
+
+
+class InterfaceBase(InstanceModelMixin):
+ """
+ Typed bundle of :class:`Operation` instances.
+
+ Can be associated with a :class:`Node`, a :class:`Group`, or a :class:`Relationship`.
+
+ Usually an instance of a :class:`InterfaceTemplate`.
+ """
+
+ __tablename__ = 'interface'
+
+ __private_fields__ = ('type_fk',
+ 'node_fk',
+ 'group_fk',
+ 'relationship_fk',
+ 'interface_template_fk')
+
+ # region one_to_many relationships
+
+ @declared_attr
+ def inputs(cls):
+ """
+ Parameters for all operations of the interface.
+
+ :type: {:obj:`basestring`: :class:`Input`}
+ """
+ return relationship.one_to_many(cls, 'input', dict_key='name')
+
+ @declared_attr
+ def operations(cls):
+ """
+ Associated operations.
+
+ :type: {:obj:`basestring`: :class:`Operation`}
+ """
+ return relationship.one_to_many(cls, 'operation', dict_key='name')
+
+ # endregion
+
+ # region many_to_one relationships
+
+ @declared_attr
+ def node(cls):
+ """
+ Containing node (can be ``None``).
+
+ :type: :class:`Node`
+ """
+ return relationship.many_to_one(cls, 'node')
+
+ @declared_attr
+ def group(cls):
+ """
+ Containing group (can be ``None``).
+
+ :type: :class:`Group`
+ """
+ return relationship.many_to_one(cls, 'group')
+
+ @declared_attr
+ def relationship(cls):
+ """
+ Containing relationship (can be ``None``).
+
+ :type: :class:`Relationship`
+ """
+ return relationship.many_to_one(cls, 'relationship')
+
+ @declared_attr
+ def interface_template(cls):
+ """
+ Source interface template (can be ``None``).
+
+ :type: :class:`InterfaceTemplate`
+ """
+ return relationship.many_to_one(cls, 'interface_template')
+
+ @declared_attr
+ def type(cls):
+ """
+ Interface type.
+
+ :type: :class:`Type`
+ """
+ return relationship.many_to_one(cls, 'type', back_populates=relationship.NO_BACK_POP)
+
+ # endregion
+
+ # region foreign_keys
+
+ @declared_attr
+ def type_fk(cls):
+ """For Interface many-to-one to Type"""
+ return relationship.foreign_key('type')
+
+ @declared_attr
+ def node_fk(cls):
+ """For Node one-to-many to Interface"""
+ return relationship.foreign_key('node', nullable=True)
+
+ @declared_attr
+ def group_fk(cls):
+ """For Group one-to-many to Interface"""
+ return relationship.foreign_key('group', nullable=True)
+
+ @declared_attr
+ def relationship_fk(cls):
+ """For Relationship one-to-many to Interface"""
+ return relationship.foreign_key('relationship', nullable=True)
+
+ @declared_attr
+ def interface_template_fk(cls):
+ """For Interface many-to-one to InterfaceTemplate"""
+ return relationship.foreign_key('interface_template', nullable=True)
+
+ # endregion
+
+ description = Column(Text, doc="""
+ Human-readable description.
+
+ :type: :obj:`basestring`
+ """)
+
+ @property
+ def as_raw(self):
+ return collections.OrderedDict((
+ ('name', self.name),
+ ('description', self.description),
+ ('type_name', self.type.name),
+ ('inputs', formatting.as_raw_dict(self.inputs)),
+ ('operations', formatting.as_raw_list(self.operations))))
+
+
+class OperationBase(InstanceModelMixin):
+ """
+ Entry points to Python functions called as part of a workflow execution.
+
+ The operation signature (its :attr:`name` and its :attr:`inputs`'s names and types) is declared
+ by the type of the :class:`Interface`, however each operation can provide its own
+ :attr:`implementation` as well as additional inputs.
+
+ The Python :attr:`function` is usually provided by an associated :class:`Plugin`. Its purpose is
+ to execute the implementation, providing it with both the operation's and interface's inputs.
+ The :attr:`arguments` of the function should be set according to the specific signature of the
+ function.
+
+ Additionally, :attr:`configuration` parameters can be provided as hints to configure the
+ function's behavior. For example, they can be used to configure remote execution credentials.
+
+ Might be an instance of :class:`OperationTemplate`.
+ """
+
+ __tablename__ = 'operation'
+
+ __private_fields__ = ('service_fk',
+ 'interface_fk',
+ 'plugin_fk',
+ 'operation_template_fk')
+
+ # region one_to_one relationships
+
+ @declared_attr
+ def plugin(cls):
+ """
+ Associated plugin.
+
+ :type: :class:`Plugin`
+ """
+ return relationship.one_to_one(cls, 'plugin', back_populates=relationship.NO_BACK_POP)
+
+ # endregion
+
+ # region one_to_many relationships
+
+ @declared_attr
+ def inputs(cls):
+ """
+ Parameters provided to the :attr:`implementation`.
+
+ :type: {:obj:`basestring`: :class:`Input`}
+ """
+ return relationship.one_to_many(cls, 'input', dict_key='name')
+
+ @declared_attr
+ def arguments(cls):
+ """
+ Arguments sent to the Python :attr:`function`.
+
+ :type: {:obj:`basestring`: :class:`Argument`}
+ """
+ return relationship.one_to_many(cls, 'argument', dict_key='name')
+
+ @declared_attr
+ def configurations(cls):
+ """
+ Configuration parameters for the Python :attr:`function`.
+
+ :type: {:obj:`basestring`: :class:`Configuration`}
+ """
+ return relationship.one_to_many(cls, 'configuration', dict_key='name')
+
+ # endregion
+
+ # region many_to_one relationships
+
+ @declared_attr
+ def service(cls):
+ """
+ Containing service (can be ``None``). For workflow operations.
+
+ :type: :class:`Service`
+ """
+ return relationship.many_to_one(cls, 'service', back_populates='workflows')
+
+ @declared_attr
+ def interface(cls):
+ """
+ Containing interface (can be ``None``).
+
+ :type: :class:`Interface`
+ """
+ return relationship.many_to_one(cls, 'interface')
+
+ @declared_attr
+ def operation_template(cls):
+ """
+ Source operation template (can be ``None``).
+
+ :type: :class:`OperationTemplate`
+ """
+ return relationship.many_to_one(cls, 'operation_template')
+
+ # endregion
+
+ # region foreign_keys
+
+ @declared_attr
+ def service_fk(cls):
+ """For Service one-to-many to Operation"""
+ return relationship.foreign_key('service', nullable=True)
+
+ @declared_attr
+ def interface_fk(cls):
+ """For Interface one-to-many to Operation"""
+ return relationship.foreign_key('interface', nullable=True)
+
+ @declared_attr
+ def plugin_fk(cls):
+ """For Operation one-to-one to Plugin"""
+ return relationship.foreign_key('plugin', nullable=True)
+
+ @declared_attr
+ def operation_template_fk(cls):
+ """For Operation many-to-one to OperationTemplate"""
+ return relationship.foreign_key('operation_template', nullable=True)
+
+ # endregion
+
+ description = Column(Text, doc="""
+ Human-readable description.
+
+ :type: :obj:`basestring`
+ """)
+
+ relationship_edge = Column(Boolean, doc="""
+ When ``True`` specifies that the operation is on the relationship's target edge; ``False`` is
+ the source edge (only used by operations on relationships)
+
+ :type: :obj:`bool`
+ """)
+
+ implementation = Column(Text, doc="""
+ Implementation (usually the name of an artifact).
+
+ :type: :obj:`basestring`
+ """)
+
+ dependencies = Column(modeling_types.StrictList(item_cls=basestring), doc="""
+ Dependencies (usually names of artifacts).
+
+ :type: [:obj:`basestring`]
+ """)
+
+ function = Column(Text, doc="""
+ Full path to Python function.
+
+ :type: :obj:`basestring`
+ """)
+
+ executor = Column(Text, doc="""
+ Name of executor.
+
+ :type: :obj:`basestring`
+ """)
+
+ max_attempts = Column(Integer, doc="""
+ Maximum number of attempts allowed in case of task failure.
+
+ :type: :obj:`int`
+ """)
+
+ retry_interval = Column(Integer, doc="""
+ Interval between task retry attempts (in seconds).
+
+ :type: :obj:`float`
+ """)
+
+ @property
+ def as_raw(self):
+ return collections.OrderedDict((
+ ('name', self.name),
+ ('description', self.description),
+ ('implementation', self.implementation),
+ ('dependencies', self.dependencies),
+ ('inputs', formatting.as_raw_dict(self.inputs))))
+
+
+class ArtifactBase(InstanceModelMixin):
+ """
+ Typed file, either provided in a CSAR or downloaded from a repository.
+
+ Usually an instance of :class:`ArtifactTemplate`.
+ """
+
+ __tablename__ = 'artifact'
+
+ __private_fields__ = ('type_fk',
+ 'node_fk',
+ 'artifact_template_fk')
+
+ # region one_to_many relationships
+
+ @declared_attr
+ def properties(cls):
+ """
+ Associated immutable parameters.
+
+ :type: {:obj:`basestring`: :class:`Property`}
+ """
+ return relationship.one_to_many(cls, 'property', dict_key='name')
+
+ # endregion
+
+ # region many_to_one relationships
+
+ @declared_attr
+ def node(cls):
+ """
+ Containing node.
+
+ :type: :class:`Node`
+ """
+ return relationship.many_to_one(cls, 'node')
+
+ @declared_attr
+ def artifact_template(cls):
+ """
+ Source artifact template (can be ``None``).
+
+ :type: :class:`ArtifactTemplate`
+ """
+ return relationship.many_to_one(cls, 'artifact_template')
+
+ @declared_attr
+ def type(cls):
+ """
+ Artifact type.
+
+ :type: :class:`Type`
+ """
+ return relationship.many_to_one(cls, 'type', back_populates=relationship.NO_BACK_POP)
+
+ # endregion
+
+ # region foreign_keys
+
+ @declared_attr
+ def type_fk(cls):
+ """For Artifact many-to-one to Type"""
+ return relationship.foreign_key('type')
+
+ @declared_attr
+ def node_fk(cls):
+ """For Node one-to-many to Artifact"""
+ return relationship.foreign_key('node')
+
+ @declared_attr
+ def artifact_template_fk(cls):
+ """For Artifact many-to-one to ArtifactTemplate"""
+ return relationship.foreign_key('artifact_template', nullable=True)
+
+ # endregion
+
+ description = Column(Text, doc="""
+ Human-readable description.
+
+ :type: :obj:`basestring`
+ """)
+
+ source_path = Column(Text, doc="""
+ Source path (in CSAR or repository).
+
+ :type: :obj:`basestring`
+ """)
+
+ target_path = Column(Text, doc="""
+ Path at which to install at destination.
+
+ :type: :obj:`basestring`
+ """)
+
+ repository_url = Column(Text, doc="""
+ Repository URL.
+
+ :type: :obj:`basestring`
+ """)
+
+ repository_credential = Column(modeling_types.StrictDict(basestring, basestring), doc="""
+ Credentials for accessing the repository.
+
+ :type: {:obj:`basestring`, :obj:`basestring`}
+ """)
+
+ @property
+ def as_raw(self):
+ return collections.OrderedDict((
+ ('name', self.name),
+ ('description', self.description),
+ ('type_name', self.type.name),
+ ('source_path', self.source_path),
+ ('target_path', self.target_path),
+ ('repository_url', self.repository_url),
+ ('repository_credential', formatting.as_agnostic(self.repository_credential)),
+ ('properties', formatting.as_raw_dict(self.properties))))
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/modeling/service_template.py b/azure/aria/aria-extension-cloudify/src/aria/aria/modeling/service_template.py
new file mode 100644
index 0000000..cd0adb4
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/modeling/service_template.py
@@ -0,0 +1,1758 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+ARIA modeling service template module
+"""
+
+# pylint: disable=too-many-lines, no-self-argument, no-member, abstract-method
+
+from __future__ import absolute_import # so we can import standard 'types'
+
+from sqlalchemy import (
+ Column,
+ Text,
+ Integer,
+ Boolean,
+ DateTime,
+ PickleType
+)
+from sqlalchemy.ext.declarative import declared_attr
+
+from ..utils import (collections, formatting)
+from .mixins import TemplateModelMixin
+from . import (
+ relationship,
+ types as modeling_types
+)
+
+
+class ServiceTemplateBase(TemplateModelMixin):
+ """
+ Template for creating :class:`Service` instances.
+
+ Usually created by various DSL parsers, such as ARIA's TOSCA extension. However, it can also be
+ created programmatically.
+ """
+
+ __tablename__ = 'service_template'
+
+ __private_fields__ = ('substitution_template_fk',
+ 'node_type_fk',
+ 'group_type_fk',
+ 'policy_type_fk',
+ 'relationship_type_fk',
+ 'capability_type_fk',
+ 'interface_type_fk',
+ 'artifact_type_fk')
+
+ # region one_to_one relationships
+
+ @declared_attr
+ def substitution_template(cls):
+ """
+ Exposes an entire service as a single node.
+
+ :type: :class:`SubstitutionTemplate`
+ """
+ return relationship.one_to_one(
+ cls, 'substitution_template', back_populates=relationship.NO_BACK_POP)
+
+ @declared_attr
+ def node_types(cls):
+ """
+ Base for the node type hierarchy,
+
+ :type: :class:`Type`
+ """
+ return relationship.one_to_one(
+ cls, 'type', fk='node_type_fk', back_populates=relationship.NO_BACK_POP)
+
+ @declared_attr
+ def group_types(cls):
+ """
+ Base for the group type hierarchy,
+
+ :type: :class:`Type`
+ """
+ return relationship.one_to_one(
+ cls, 'type', fk='group_type_fk', back_populates=relationship.NO_BACK_POP)
+
+ @declared_attr
+ def policy_types(cls):
+ """
+ Base for the policy type hierarchy,
+
+ :type: :class:`Type`
+ """
+ return relationship.one_to_one(
+ cls, 'type', fk='policy_type_fk', back_populates=relationship.NO_BACK_POP)
+
+ @declared_attr
+ def relationship_types(cls):
+ """
+ Base for the relationship type hierarchy,
+
+ :type: :class:`Type`
+ """
+ return relationship.one_to_one(
+ cls, 'type', fk='relationship_type_fk', back_populates=relationship.NO_BACK_POP)
+
+ @declared_attr
+ def capability_types(cls):
+ """
+ Base for the capability type hierarchy,
+
+ :type: :class:`Type`
+ """
+ return relationship.one_to_one(
+ cls, 'type', fk='capability_type_fk', back_populates=relationship.NO_BACK_POP)
+
+ @declared_attr
+ def interface_types(cls):
+ """
+ Base for the interface type hierarchy,
+
+ :type: :class:`Type`
+ """
+ return relationship.one_to_one(
+ cls, 'type', fk='interface_type_fk', back_populates=relationship.NO_BACK_POP)
+
+ @declared_attr
+ def artifact_types(cls):
+ """
+ Base for the artifact type hierarchy,
+
+ :type: :class:`Type`
+ """
+ return relationship.one_to_one(
+ cls, 'type', fk='artifact_type_fk', back_populates=relationship.NO_BACK_POP)
+
+ # endregion
+
+ # region one_to_many relationships
+
+ @declared_attr
+ def services(cls):
+ """
+ Instantiated services.
+
+ :type: [:class:`Service`]
+ """
+ return relationship.one_to_many(cls, 'service', dict_key='name')
+
+ @declared_attr
+ def node_templates(cls):
+ """
+ Templates for creating nodes.
+
+ :type: {:obj:`basestring`, :class:`NodeTemplate`}
+ """
+ return relationship.one_to_many(cls, 'node_template', dict_key='name')
+
+ @declared_attr
+ def group_templates(cls):
+ """
+ Templates for creating groups.
+
+ :type: {:obj:`basestring`, :class:`GroupTemplate`}
+ """
+ return relationship.one_to_many(cls, 'group_template', dict_key='name')
+
+ @declared_attr
+ def policy_templates(cls):
+ """
+ Templates for creating policies.
+
+ :type: {:obj:`basestring`, :class:`PolicyTemplate`}
+ """
+ return relationship.one_to_many(cls, 'policy_template', dict_key='name')
+
+ @declared_attr
+ def workflow_templates(cls):
+ """
+ Templates for creating workflows.
+
+ :type: {:obj:`basestring`, :class:`OperationTemplate`}
+ """
+ return relationship.one_to_many(cls, 'operation_template', dict_key='name')
+
+ @declared_attr
+ def outputs(cls):
+ """
+ Declarations for output parameters are filled in after service installation.
+
+ :type: {:obj:`basestring`: :class:`Output`}
+ """
+ return relationship.one_to_many(cls, 'output', dict_key='name')
+
+ @declared_attr
+ def inputs(cls):
+ """
+ Declarations for externally provided parameters.
+
+ :type: {:obj:`basestring`: :class:`Input`}
+ """
+ return relationship.one_to_many(cls, 'input', dict_key='name')
+
+ @declared_attr
+ def plugin_specifications(cls):
+ """
+ Required plugins for instantiated services.
+
+ :type: {:obj:`basestring`: :class:`PluginSpecification`}
+ """
+ return relationship.one_to_many(cls, 'plugin_specification', dict_key='name')
+
+ # endregion
+
+ # region many_to_many relationships
+
+ @declared_attr
+ def meta_data(cls):
+ """
+ Associated metadata.
+
+ :type: {:obj:`basestring`: :class:`Metadata`}
+ """
+ # Warning! We cannot use the attr name "metadata" because it's used by SQLAlchemy!
+ return relationship.many_to_many(cls, 'metadata', dict_key='name')
+
+ # endregion
+
+ # region foreign keys
+
+ @declared_attr
+ def substitution_template_fk(cls):
+ """For ServiceTemplate one-to-one to SubstitutionTemplate"""
+ return relationship.foreign_key('substitution_template', nullable=True)
+
+ @declared_attr
+ def node_type_fk(cls):
+ """For ServiceTemplate one-to-one to Type"""
+ return relationship.foreign_key('type', nullable=True)
+
+ @declared_attr
+ def group_type_fk(cls):
+ """For ServiceTemplate one-to-one to Type"""
+ return relationship.foreign_key('type', nullable=True)
+
+ @declared_attr
+ def policy_type_fk(cls):
+ """For ServiceTemplate one-to-one to Type"""
+ return relationship.foreign_key('type', nullable=True)
+
+ @declared_attr
+ def relationship_type_fk(cls):
+ """For ServiceTemplate one-to-one to Type"""
+ return relationship.foreign_key('type', nullable=True)
+
+ @declared_attr
+ def capability_type_fk(cls):
+ """For ServiceTemplate one-to-one to Type"""
+ return relationship.foreign_key('type', nullable=True)
+
+ @declared_attr
+ def interface_type_fk(cls):
+ """For ServiceTemplate one-to-one to Type"""
+ return relationship.foreign_key('type', nullable=True)
+
+ @declared_attr
+ def artifact_type_fk(cls):
+ """For ServiceTemplate one-to-one to Type"""
+ return relationship.foreign_key('type', nullable=True)
+
+ # endregion
+
+ description = Column(Text, doc="""
+ Human-readable description.
+
+ :type: :obj:`basestring`
+ """)
+
+ main_file_name = Column(Text, doc="""
+ Filename of CSAR or YAML file from which this service template was parsed.
+
+ :type: :obj:`basestring`
+ """)
+
+ created_at = Column(DateTime, nullable=False, index=True, doc="""
+ Creation timestamp.
+
+ :type: :class:`~datetime.datetime`
+ """)
+
+ updated_at = Column(DateTime, doc="""
+ Update timestamp.
+
+ :type: :class:`~datetime.datetime`
+ """)
+
+ @property
+ def as_raw(self):
+ return collections.OrderedDict((
+ ('description', self.description),
+ ('metadata', formatting.as_raw_dict(self.meta_data)),
+ ('node_templates', formatting.as_raw_list(self.node_templates)),
+ ('group_templates', formatting.as_raw_list(self.group_templates)),
+ ('policy_templates', formatting.as_raw_list(self.policy_templates)),
+ ('substitution_template', formatting.as_raw(self.substitution_template)),
+ ('inputs', formatting.as_raw_dict(self.inputs)),
+ ('outputs', formatting.as_raw_dict(self.outputs)),
+ ('workflow_templates', formatting.as_raw_list(self.workflow_templates))))
+
+ @property
+ def types_as_raw(self):
+ return collections.OrderedDict((
+ ('node_types', formatting.as_raw(self.node_types)),
+ ('group_types', formatting.as_raw(self.group_types)),
+ ('policy_types', formatting.as_raw(self.policy_types)),
+ ('relationship_types', formatting.as_raw(self.relationship_types)),
+ ('capability_types', formatting.as_raw(self.capability_types)),
+ ('interface_types', formatting.as_raw(self.interface_types)),
+ ('artifact_types', formatting.as_raw(self.artifact_types))))
+
+
+class NodeTemplateBase(TemplateModelMixin):
+ """
+ Template for creating zero or more :class:`Node` instances, which are typed vertices in the
+ service topology.
+ """
+
+ __tablename__ = 'node_template'
+
+ __private_fields__ = ('type_fk',
+ 'service_template_fk')
+
+ # region one_to_many relationships
+
+ @declared_attr
+ def nodes(cls):
+ """
+ Instantiated nodes.
+
+ :type: [:class:`Node`]
+ """
+ return relationship.one_to_many(cls, 'node')
+
+ @declared_attr
+ def interface_templates(cls):
+ """
+ Associated interface templates.
+
+ :type: {:obj:`basestring`: :class:`InterfaceTemplate`}
+ """
+ return relationship.one_to_many(cls, 'interface_template', dict_key='name')
+
+ @declared_attr
+ def artifact_templates(cls):
+ """
+ Associated artifacts.
+
+ :type: {:obj:`basestring`: :class:`ArtifactTemplate`}
+ """
+ return relationship.one_to_many(cls, 'artifact_template', dict_key='name')
+
+ @declared_attr
+ def capability_templates(cls):
+ """
+ Associated exposed capability templates.
+
+ :type: {:obj:`basestring`: :class:`CapabilityTemplate`}
+ """
+ return relationship.one_to_many(cls, 'capability_template', dict_key='name')
+
+ @declared_attr
+ def requirement_templates(cls):
+ """
+ Associated potential relationships with other nodes.
+
+ :type: [:class:`RequirementTemplate`]
+ """
+ return relationship.one_to_many(cls, 'requirement_template', other_fk='node_template_fk')
+
+ @declared_attr
+ def properties(cls):
+ """
+ Declarations for associated immutable parameters.
+
+ :type: {:obj:`basestring`: :class:`Property`}
+ """
+ return relationship.one_to_many(cls, 'property', dict_key='name')
+
+ @declared_attr
+ def attributes(cls):
+ """
+ Declarations for associated mutable parameters.
+
+ :type: {:obj:`basestring`: :class:`Attribute`}
+ """
+ return relationship.one_to_many(cls, 'attribute', dict_key='name')
+
+ # endregion
+
+ # region many_to_one relationships
+
+ @declared_attr
+ def type(cls):
+ """
+ Node type.
+
+ :type: :class:`Type`
+ """
+ return relationship.many_to_one(cls, 'type', back_populates=relationship.NO_BACK_POP)
+
+ @declared_attr
+ def service_template(cls):
+ """
+ Containing service template.
+
+ :type: :class:`ServiceTemplate`
+ """
+ return relationship.many_to_one(cls, 'service_template')
+
+ # endregion
+
+ # region association proxies
+
+ @declared_attr
+ def service_template_name(cls):
+ return relationship.association_proxy('service_template', 'name')
+
+ @declared_attr
+ def type_name(cls):
+ return relationship.association_proxy('type', 'name')
+
+ # endregion
+
+ # region foreign_keys
+
+ @declared_attr
+ def type_fk(cls):
+ """For NodeTemplate many-to-one to Type"""
+ return relationship.foreign_key('type')
+
+ @declared_attr
+ def service_template_fk(cls):
+ """For ServiceTemplate one-to-many to NodeTemplate"""
+ return relationship.foreign_key('service_template')
+
+ # endregion
+
+ description = Column(Text, doc="""
+ Human-readable description.
+
+ :type: :obj:`basestring`
+ """)
+
+ directives = Column(PickleType, doc="""
+ Directives that apply to this node template.
+
+ :type: [:obj:`basestring`]
+ """)
+
+ default_instances = Column(Integer, default=1, doc="""
+ Default number nodes that will appear in the service.
+
+ :type: :obj:`int`
+ """)
+
+ min_instances = Column(Integer, default=0, doc="""
+ Minimum number nodes that will appear in the service.
+
+ :type: :obj:`int`
+ """)
+
+ max_instances = Column(Integer, default=None, doc="""
+ Maximum number nodes that will appear in the service.
+
+ :type: :obj:`int`
+ """)
+
+ target_node_template_constraints = Column(PickleType, doc="""
+ Constraints for filtering relationship targets.
+
+ :type: [:class:`NodeTemplateConstraint`]
+ """)
+
+ @property
+ def as_raw(self):
+ return collections.OrderedDict((
+ ('name', self.name),
+ ('description', self.description),
+ ('type_name', self.type.name),
+ ('properties', formatting.as_raw_dict(self.properties)),
+ ('attributes', formatting.as_raw_dict(self.properties)),
+ ('interface_templates', formatting.as_raw_list(self.interface_templates)),
+ ('artifact_templates', formatting.as_raw_list(self.artifact_templates)),
+ ('capability_templates', formatting.as_raw_list(self.capability_templates)),
+ ('requirement_templates', formatting.as_raw_list(self.requirement_templates))))
+
+ def is_target_node_template_valid(self, target_node_template):
+ """
+ Checks if ``target_node_template`` matches all our ``target_node_template_constraints``.
+ """
+
+ if self.target_node_template_constraints:
+ for node_template_constraint in self.target_node_template_constraints:
+ if not node_template_constraint.matches(self, target_node_template):
+ return False
+ return True
+
+ @property
+ def _next_index(self):
+ """
+ Next available node index.
+
+ :returns: node index
+ :rtype: int
+ """
+
+ max_index = 0
+ if self.nodes:
+ max_index = max(int(n.name.rsplit('_', 1)[-1]) for n in self.nodes)
+ return max_index + 1
+
+ @property
+ def _next_name(self):
+ """
+ Next available node name.
+
+ :returns: node name
+ :rtype: basestring
+ """
+
+ return '{name}_{index}'.format(name=self.name, index=self._next_index)
+
+ @property
+ def scaling(self):
+ scaling = {}
+
+ def extract_property(properties, name):
+ if name in scaling:
+ return
+ prop = properties.get(name)
+ if (prop is not None) and (prop.type_name == 'integer') and (prop.value is not None):
+ scaling[name] = prop.value
+
+ def extract_properties(properties):
+ extract_property(properties, 'min_instances')
+ extract_property(properties, 'max_instances')
+ extract_property(properties, 'default_instances')
+
+ # From our scaling capabilities
+ for capability_template in self.capability_templates.itervalues():
+ if capability_template.type.role == 'scaling':
+ extract_properties(capability_template.properties)
+
+ # From service scaling policies
+ for policy_template in self.service_template.policy_templates.itervalues():
+ if policy_template.type.role == 'scaling':
+ if policy_template.is_for_node_template(self.name):
+ extract_properties(policy_template.properties)
+
+ # Defaults
+ scaling.setdefault('min_instances', 0)
+ scaling.setdefault('max_instances', 1)
+ scaling.setdefault('default_instances', 1)
+
+ return scaling
+
+
+class GroupTemplateBase(TemplateModelMixin):
+ """
+ Template for creating a :class:`Group` instance, which is a typed logical container for zero or
+ more :class:`Node` instances.
+ """
+
+ __tablename__ = 'group_template'
+
+ __private_fields__ = ('type_fk',
+ 'service_template_fk')
+
+ # region one_to_many relationships
+
+ @declared_attr
+ def groups(cls):
+ """
+ Instantiated groups.
+
+ :type: [:class:`Group`]
+ """
+ return relationship.one_to_many(cls, 'group')
+
+ @declared_attr
+ def interface_templates(cls):
+ """
+ Associated interface templates.
+
+ :type: {:obj:`basestring`: :class:`InterfaceTemplate`}
+ """
+ return relationship.one_to_many(cls, 'interface_template', dict_key='name')
+
+ @declared_attr
+ def properties(cls):
+ """
+ Declarations for associated immutable parameters.
+
+ :type: {:obj:`basestring`: :class:`Property`}
+ """
+ return relationship.one_to_many(cls, 'property', dict_key='name')
+
+ # endregion
+
+ # region many_to_one relationships
+
+ @declared_attr
+ def service_template(cls):
+ """
+ Containing service template.
+
+ :type: :class:`ServiceTemplate`
+ """
+ return relationship.many_to_one(cls, 'service_template')
+
+ @declared_attr
+ def type(cls):
+ """
+ Group type.
+
+ :type: :class:`Type`
+ """
+ return relationship.many_to_one(cls, 'type', back_populates=relationship.NO_BACK_POP)
+
+ # endregion
+
+ # region many_to_many relationships
+
+ @declared_attr
+ def node_templates(cls):
+ """
+ Nodes instantiated by these templates will be members of the group.
+
+ :type: [:class:`NodeTemplate`]
+ """
+ return relationship.many_to_many(cls, 'node_template')
+
+ # endregion
+
+ # region foreign keys
+
+ @declared_attr
+ def type_fk(cls):
+ """For GroupTemplate many-to-one to Type"""
+ return relationship.foreign_key('type')
+
+ @declared_attr
+ def service_template_fk(cls):
+ """For ServiceTemplate one-to-many to GroupTemplate"""
+ return relationship.foreign_key('service_template')
+
+ # endregion
+
+ description = Column(Text, doc="""
+ Human-readable description.
+
+ :type: :obj:`basestring`
+ """)
+
+ @property
+ def as_raw(self):
+ return collections.OrderedDict((
+ ('name', self.name),
+ ('description', self.description),
+ ('type_name', self.type.name),
+ ('properties', formatting.as_raw_dict(self.properties)),
+ ('interface_templates', formatting.as_raw_list(self.interface_templates))))
+
+ def contains_node_template(self, name):
+ for node_template in self.node_templates:
+ if node_template.name == name:
+ return True
+ return False
+
+
+class PolicyTemplateBase(TemplateModelMixin):
+ """
+ Template for creating a :class:`Policy` instance, which is a typed set of orchestration hints
+ applied to zero or more :class:`Node` or :class:`Group` instances.
+ """
+
+ __tablename__ = 'policy_template'
+
+ __private_fields__ = ('type_fk',
+ 'service_template_fk')
+
+ # region one_to_many relationships
+
+ @declared_attr
+ def policies(cls):
+ """
+ Instantiated policies.
+
+ :type: [:class:`Policy`]
+ """
+ return relationship.one_to_many(cls, 'policy')
+
+ @declared_attr
+ def properties(cls):
+ """
+ Declarations for associated immutable parameters.
+
+ :type: {:obj:`basestring`: :class:`Property`}
+ """
+ return relationship.one_to_many(cls, 'property', dict_key='name')
+
+ # endregion
+
+ # region many_to_one relationships
+
+ @declared_attr
+ def service_template(cls):
+ """
+ Containing service template.
+
+ :type: :class:`ServiceTemplate`
+ """
+ return relationship.many_to_one(cls, 'service_template')
+
+ @declared_attr
+ def type(cls):
+ """
+ Policy type.
+
+ :type: :class:`Type`
+ """
+ return relationship.many_to_one(cls, 'type', back_populates=relationship.NO_BACK_POP)
+
+ # endregion
+
+ # region many_to_many relationships
+
+ @declared_attr
+ def node_templates(cls):
+ """
+ Policy will be enacted on all nodes instantiated by these templates.
+
+ :type: {:obj:`basestring`: :class:`NodeTemplate`}
+ """
+ return relationship.many_to_many(cls, 'node_template')
+
+ @declared_attr
+ def group_templates(cls):
+ """
+ Policy will be enacted on all nodes in all groups instantiated by these templates.
+
+ :type: {:obj:`basestring`: :class:`GroupTemplate`}
+ """
+ return relationship.many_to_many(cls, 'group_template')
+
+ # endregion
+
+ # region foreign keys
+
+ @declared_attr
+ def type_fk(cls):
+ """For PolicyTemplate many-to-one to Type"""
+ return relationship.foreign_key('type')
+
+ @declared_attr
+ def service_template_fk(cls):
+ """For ServiceTemplate one-to-many to PolicyTemplate"""
+ return relationship.foreign_key('service_template')
+
+ # endregion
+
+ description = Column(Text, doc="""
+ Human-readable description.
+
+ :type: :obj:`basestring`
+ """)
+
+ @property
+ def as_raw(self):
+ return collections.OrderedDict((
+ ('name', self.name),
+ ('description', self.description),
+ ('type_name', self.type.name),
+ ('properties', formatting.as_raw_dict(self.properties))))
+
+ def is_for_node_template(self, name):
+ for node_template in self.node_templates:
+ if node_template.name == name:
+ return True
+ for group_template in self.group_templates:
+ if group_template.contains_node_template(name):
+ return True
+ return False
+
+ def is_for_group_template(self, name):
+ for group_template in self.group_templates:
+ if group_template.name == name:
+ return True
+ return False
+
+
+class SubstitutionTemplateBase(TemplateModelMixin):
+ """
+ Template for creating a :class:`Substitution` instance, which exposes an entire instantiated
+ service as a single node.
+ """
+
+ __tablename__ = 'substitution_template'
+
+ __private_fields__ = ('node_type_fk',)
+
+ # region one_to_many relationships
+
+ @declared_attr
+ def substitutions(cls):
+ """
+ Instantiated substitutions.
+
+ :type: [:class:`Substitution`]
+ """
+ return relationship.one_to_many(cls, 'substitution')
+
+ @declared_attr
+ def mappings(cls):
+ """
+ Map requirement and capabilities to exposed node.
+
+ :type: {:obj:`basestring`: :class:`SubstitutionTemplateMapping`}
+ """
+ return relationship.one_to_many(cls, 'substitution_template_mapping', dict_key='name')
+
+ # endregion
+
+ # region many_to_one relationships
+
+ @declared_attr
+ def node_type(cls):
+ """
+ Exposed node type.
+
+ :type: :class:`Type`
+ """
+ return relationship.many_to_one(cls, 'type', back_populates=relationship.NO_BACK_POP)
+
+ # endregion
+
+ # region foreign keys
+
+ @declared_attr
+ def node_type_fk(cls):
+ """For SubstitutionTemplate many-to-one to Type"""
+ return relationship.foreign_key('type')
+
+ # endregion
+
+ @property
+ def as_raw(self):
+ return collections.OrderedDict((
+ ('node_type_name', self.node_type.name),
+ ('mappings', formatting.as_raw_dict(self.mappings))))
+
+
+class SubstitutionTemplateMappingBase(TemplateModelMixin):
+ """
+ Used by :class:`SubstitutionTemplate` to map a capability template or a requirement template to
+ the exposed node.
+
+ The :attr:`name` field should match the capability or requirement name on the exposed node's
+ type.
+
+ Only one of :attr:`capability_template` and :attr:`requirement_template` can be set.
+ """
+
+ __tablename__ = 'substitution_template_mapping'
+
+ __private_fields__ = ('substitution_template_fk',
+ 'capability_template_fk',
+ 'requirement_template_fk')
+
+ # region one_to_one relationships
+
+ @declared_attr
+ def capability_template(cls):
+ """
+ Capability template to expose (can be ``None``).
+
+ :type: :class:`CapabilityTemplate`
+ """
+ return relationship.one_to_one(
+ cls, 'capability_template', back_populates=relationship.NO_BACK_POP)
+
+ @declared_attr
+ def requirement_template(cls):
+ """
+ Requirement template to expose (can be ``None``).
+
+ :type: :class:`RequirementTemplate`
+ """
+ return relationship.one_to_one(
+ cls, 'requirement_template', back_populates=relationship.NO_BACK_POP)
+
+ # endregion
+
+ # region many_to_one relationships
+
+ @declared_attr
+ def substitution_template(cls):
+ """
+ Containing substitution template.
+
+ :type: :class:`SubstitutionTemplate`
+ """
+ return relationship.many_to_one(cls, 'substitution_template', back_populates='mappings')
+
+ # endregion
+
+ # region foreign keys
+
+ @declared_attr
+ def substitution_template_fk(cls):
+ """For SubstitutionTemplate one-to-many to SubstitutionTemplateMapping"""
+ return relationship.foreign_key('substitution_template')
+
+ @declared_attr
+ def capability_template_fk(cls):
+ """For SubstitutionTemplate one-to-one to CapabilityTemplate"""
+ return relationship.foreign_key('capability_template', nullable=True)
+
+ @declared_attr
+ def requirement_template_fk(cls):
+ """For SubstitutionTemplate one-to-one to RequirementTemplate"""
+ return relationship.foreign_key('requirement_template', nullable=True)
+
+ # endregion
+
+ @property
+ def as_raw(self):
+ return collections.OrderedDict((
+ ('name', self.name),))
+
+
+class RequirementTemplateBase(TemplateModelMixin):
+ """
+ Template for creating :class:`Relationship` instances, which are optionally-typed edges in the
+ service topology, connecting a :class:`Node` to a :class:`Capability` of another node.
+
+ Note that there is no equivalent "Requirement" instance model. Instead, during instantiation a
+ requirement template is matched with a capability and a :class:`Relationship` is instantiated.
+
+ A requirement template *must* target a :class:`CapabilityType` or a capability name. It can
+ optionally target a specific :class:`NodeType` or :class:`NodeTemplate`.
+
+ Requirement templates may optionally contain a :class:`RelationshipTemplate`. If they do not,
+ a :class:`Relationship` will be instantiated with default values.
+ """
+
+ __tablename__ = 'requirement_template'
+
+ __private_fields__ = ('target_capability_type_fk',
+ 'target_node_template_fk',
+ 'target_node_type_fk',
+ 'relationship_template_fk',
+ 'node_template_fk')
+
+ # region one_to_one relationships
+
+ @declared_attr
+ def target_capability_type(cls):
+ """
+ Target capability type.
+
+ :type: :class:`CapabilityType`
+ """
+ return relationship.one_to_one(cls,
+ 'type',
+ fk='target_capability_type_fk',
+ back_populates=relationship.NO_BACK_POP)
+
+ @declared_attr
+ def target_node_template(cls):
+ """
+ Target node template (can be ``None``).
+
+ :type: :class:`NodeTemplate`
+ """
+ return relationship.one_to_one(cls,
+ 'node_template',
+ fk='target_node_template_fk',
+ back_populates=relationship.NO_BACK_POP)
+
+ @declared_attr
+ def relationship_template(cls):
+ """
+ Associated relationship template (can be ``None``).
+
+ :type: :class:`RelationshipTemplate`
+ """
+ return relationship.one_to_one(cls, 'relationship_template')
+
+ # endregion
+
+ # region one_to_many relationships
+
+ @declared_attr
+ def relationships(cls):
+ """
+ Instantiated relationships.
+
+ :type: [:class:`Relationship`]
+ """
+ return relationship.one_to_many(cls, 'relationship')
+
+ # endregion
+
+ # region many_to_one relationships
+
+ @declared_attr
+ def node_template(cls):
+ """
+ Containing node template.
+
+ :type: :class:`NodeTemplate`
+ """
+ return relationship.many_to_one(cls, 'node_template', fk='node_template_fk')
+
+ @declared_attr
+ def target_node_type(cls):
+ """
+ Target node type (can be ``None``).
+
+ :type: :class:`Type`
+ """
+ return relationship.many_to_one(
+ cls, 'type', fk='target_node_type_fk', back_populates=relationship.NO_BACK_POP)
+
+ # endregion
+
+ # region foreign keys
+
+ @declared_attr
+ def target_node_type_fk(cls):
+ """For RequirementTemplate many-to-one to Type"""
+ return relationship.foreign_key('type', nullable=True)
+
+ @declared_attr
+ def target_node_template_fk(cls):
+ """For RequirementTemplate one-to-one to NodeTemplate"""
+ return relationship.foreign_key('node_template', nullable=True)
+
+ @declared_attr
+ def target_capability_type_fk(cls):
+ """For RequirementTemplate one-to-one to Type"""
+ return relationship.foreign_key('type', nullable=True)
+
+ @declared_attr
+ def node_template_fk(cls):
+ """For NodeTemplate one-to-many to RequirementTemplate"""
+ return relationship.foreign_key('node_template')
+
+ @declared_attr
+ def relationship_template_fk(cls):
+ """For RequirementTemplate one-to-one to RelationshipTemplate"""
+ return relationship.foreign_key('relationship_template', nullable=True)
+
+ # endregion
+
+ target_capability_name = Column(Text, doc="""
+ Target capability name in node template or node type (can be ``None``).
+
+ :type: :obj:`basestring`
+ """)
+
+ target_node_template_constraints = Column(PickleType, doc="""
+ Constraints for filtering relationship targets.
+
+ :type: [:class:`NodeTemplateConstraint`]
+ """)
+
+ @property
+ def as_raw(self):
+ return collections.OrderedDict((
+ ('name', self.name),
+ ('target_node_type_name', self.target_node_type.name
+ if self.target_node_type is not None else None),
+ ('target_node_template_name', self.target_node_template.name
+ if self.target_node_template is not None else None),
+ ('target_capability_type_name', self.target_capability_type.name
+ if self.target_capability_type is not None else None),
+ ('target_capability_name', self.target_capability_name),
+ ('relationship_template', formatting.as_raw(self.relationship_template))))
+
+
+class RelationshipTemplateBase(TemplateModelMixin):
+ """
+ Optional addition to a :class:`RequirementTemplate`.
+
+ Note that a relationship template here is not exactly equivalent to a relationship template
+ entity in TOSCA. For example, a TOSCA requirement specifying a relationship type rather than a
+ relationship template would still be represented here as a relationship template.
+ """
+
+ __tablename__ = 'relationship_template'
+
+ __private_fields__ = ('type_fk',)
+
+ # region one_to_many relationships
+
+ @declared_attr
+ def relationships(cls):
+ """
+ Instantiated relationships.
+
+ :type: [:class:`Relationship`]
+ """
+ return relationship.one_to_many(cls, 'relationship')
+
+ @declared_attr
+ def interface_templates(cls):
+ """
+ Associated interface templates.
+
+ :type: {:obj:`basestring`: :class:`InterfaceTemplate`}
+ """
+ return relationship.one_to_many(cls, 'interface_template', dict_key='name')
+
+ @declared_attr
+ def properties(cls):
+ """
+ Declarations for associated immutable parameters.
+
+ :type: {:obj:`basestring`: :class:`Property`}
+ """
+ return relationship.one_to_many(cls, 'property', dict_key='name')
+
+ # endregion
+
+ # region many_to_one relationships
+
+ @declared_attr
+ def type(cls):
+ """
+ Relationship type.
+
+ :type: :class:`Type`
+ """
+ return relationship.many_to_one(cls, 'type', back_populates=relationship.NO_BACK_POP)
+
+ # endregion
+
+ # region foreign keys
+
+ @declared_attr
+ def type_fk(cls):
+ """For RelationshipTemplate many-to-one to Type"""
+ return relationship.foreign_key('type', nullable=True)
+
+ # endregion
+
+ description = Column(Text, doc="""
+ Human-readable description.
+
+ :type: :obj:`basestring`
+ """)
+
+ @property
+ def as_raw(self):
+ return collections.OrderedDict((
+ ('type_name', self.type.name if self.type is not None else None),
+ ('name', self.name),
+ ('description', self.description),
+ ('properties', formatting.as_raw_dict(self.properties)),
+ ('interface_templates', formatting.as_raw_list(self.interface_templates))))
+
+
+class CapabilityTemplateBase(TemplateModelMixin):
+ """
+ Template for creating :class:`Capability` instances, typed attachments which serve two purposes:
+ to provide extra properties and attributes to :class:`Node` instances, and to expose targets for
+ :class:`Relationship` instances from other nodes.
+ """
+
+ __tablename__ = 'capability_template'
+
+ __private_fields__ = ('type_fk',
+ 'node_template_fk')
+
+ # region one_to_many relationships
+
+ @declared_attr
+ def capabilities(cls):
+ """
+ Instantiated capabilities.
+
+ :type: [:class:`Capability`]
+ """
+ return relationship.one_to_many(cls, 'capability')
+
+ @declared_attr
+ def properties(cls):
+ """
+ Declarations for associated immutable parameters.
+
+ :type: {:obj:`basestring`: :class:`Property`}
+ """
+ return relationship.one_to_many(cls, 'property', dict_key='name')
+
+ # endregion
+
+ # region many_to_one relationships
+
+ @declared_attr
+ def node_template(cls):
+ """
+ Containing node template.
+
+ :type: :class:`NodeTemplate`
+ """
+ return relationship.many_to_one(cls, 'node_template')
+
+ @declared_attr
+ def type(cls):
+ """
+ Capability type.
+
+ :type: :class:`Type`
+ """
+ return relationship.many_to_one(cls, 'type', back_populates=relationship.NO_BACK_POP)
+
+ # endregion
+
+ # region many_to_many relationships
+
+ @declared_attr
+ def valid_source_node_types(cls):
+ """
+ Reject requirements that are not from these node types.
+
+ :type: [:class:`Type`]
+ """
+ return relationship.many_to_many(cls, 'type', prefix='valid_sources')
+
+ # endregion
+
+ # region foreign keys
+
+ @declared_attr
+ def type_fk(cls):
+ """For CapabilityTemplate many-to-one to Type"""
+ return relationship.foreign_key('type')
+
+ @declared_attr
+ def node_template_fk(cls):
+ """For NodeTemplate one-to-many to CapabilityTemplate"""
+ return relationship.foreign_key('node_template')
+
+ # endregion
+
+ description = Column(Text, doc="""
+ Human-readable description.
+
+ :type: :obj:`basestring`
+ """)
+
+ min_occurrences = Column(Integer, default=None, doc="""
+ Minimum number of requirement matches required.
+
+ :type: :obj:`int`
+ """)
+
+ max_occurrences = Column(Integer, default=None, doc="""
+ Maximum number of requirement matches allowed.
+
+ :type: :obj:`int`
+ """)
+
+ @property
+ def as_raw(self):
+ return collections.OrderedDict((
+ ('name', self.name),
+ ('description', self.description),
+ ('type_name', self.type.name),
+ ('min_occurrences', self.min_occurrences),
+ ('max_occurrences', self.max_occurrences),
+ ('valid_source_node_types', [v.name for v in self.valid_source_node_types]),
+ ('properties', formatting.as_raw_dict(self.properties))))
+
+
+class InterfaceTemplateBase(TemplateModelMixin):
+ """
+ Template for creating :class:`Interface` instances, which are typed bundles of
+ :class:`Operation` instances.
+
+ Can be associated with a :class:`NodeTemplate`, a :class:`GroupTemplate`, or a
+ :class:`RelationshipTemplate`.
+ """
+
+ __tablename__ = 'interface_template'
+
+ __private_fields__ = ('type_fk',
+ 'node_template_fk',
+ 'group_template_fk',
+ 'relationship_template_fk')
+
+ # region one_to_many relationships
+
+ @declared_attr
+ def inputs(cls):
+ """
+ Declarations for externally provided parameters that can be used by all operations of the
+ interface.
+
+ :type: {:obj:`basestring`: :class:`Input`}
+ """
+ return relationship.one_to_many(cls, 'input', dict_key='name')
+
+ @declared_attr
+ def interfaces(cls):
+ """
+ Instantiated interfaces.
+
+ :type: [:class:`Interface`]
+ """
+ return relationship.one_to_many(cls, 'interface')
+
+ @declared_attr
+ def operation_templates(cls):
+ """
+ Associated operation templates.
+
+ :type: {:obj:`basestring`: :class:`OperationTemplate`}
+ """
+ return relationship.one_to_many(cls, 'operation_template', dict_key='name')
+
+ # endregion
+
+ # region many_to_one relationships
+
+ @declared_attr
+ def node_template(cls):
+ """
+ Containing node template (can be ``None``).
+
+ :type: :class:`NodeTemplate`
+ """
+ return relationship.many_to_one(cls, 'node_template')
+
+ @declared_attr
+ def group_template(cls):
+ """
+ Containing group template (can be ``None``).
+
+ :type: :class:`GroupTemplate`
+ """
+ return relationship.many_to_one(cls, 'group_template')
+
+ @declared_attr
+ def relationship_template(cls):
+ """
+ Containing relationship template (can be ``None``).
+
+ :type: :class:`RelationshipTemplate`
+ """
+ return relationship.many_to_one(cls, 'relationship_template')
+
+ @declared_attr
+ def type(cls):
+ """
+ Interface type.
+
+ :type: :class:`Type`
+ """
+ return relationship.many_to_one(cls, 'type', back_populates=relationship.NO_BACK_POP)
+
+ # endregion
+
+ # region foreign keys
+
+ @declared_attr
+ def type_fk(cls):
+ """For InterfaceTemplate many-to-one to Type"""
+ return relationship.foreign_key('type')
+
+ @declared_attr
+ def node_template_fk(cls):
+ """For NodeTemplate one-to-many to InterfaceTemplate"""
+ return relationship.foreign_key('node_template', nullable=True)
+
+ @declared_attr
+ def group_template_fk(cls):
+ """For GroupTemplate one-to-many to InterfaceTemplate"""
+ return relationship.foreign_key('group_template', nullable=True)
+
+ @declared_attr
+ def relationship_template_fk(cls):
+ """For RelationshipTemplate one-to-many to InterfaceTemplate"""
+ return relationship.foreign_key('relationship_template', nullable=True)
+
+ # endregion
+
+ description = Column(Text, doc="""
+ Human-readable description.
+
+ :type: :obj:`basestring`
+ """)
+
+ @property
+ def as_raw(self):
+ return collections.OrderedDict((
+ ('name', self.name),
+ ('description', self.description),
+ ('type_name', self.type.name),
+ ('inputs', formatting.as_raw_dict(self.inputs)), # pylint: disable=no-member
+ # TODO fix self.properties reference
+ ('operation_templates', formatting.as_raw_list(self.operation_templates))))
+
+
+class OperationTemplateBase(TemplateModelMixin):
+ """
+ Template for creating :class:`Operation` instances, which are entry points to Python functions
+ called as part of a workflow execution.
+ """
+
+ __tablename__ = 'operation_template'
+
+ __private_fields__ = ('service_template_fk',
+ 'interface_template_fk',
+ 'plugin_fk')
+
+ # region one_to_one relationships
+
+ @declared_attr
+ def plugin_specification(cls):
+ """
+ Associated plugin specification.
+
+ :type: :class:`PluginSpecification`
+ """
+ return relationship.one_to_one(
+ cls, 'plugin_specification', back_populates=relationship.NO_BACK_POP)
+
+ # endregion
+
+ # region one_to_many relationships
+
+ @declared_attr
+ def operations(cls):
+ """
+ Instantiated operations.
+
+ :type: [:class:`Operation`]
+ """
+ return relationship.one_to_many(cls, 'operation')
+
+ @declared_attr
+ def inputs(cls):
+ """
+ Declarations for parameters provided to the :attr:`implementation`.
+
+ :type: {:obj:`basestring`: :class:`Input`}
+ """
+ return relationship.one_to_many(cls, 'input', dict_key='name')
+
+ @declared_attr
+ def configurations(cls):
+ """
+ Configuration parameters for the operation instance Python :attr:`function`.
+
+ :type: {:obj:`basestring`: :class:`Configuration`}
+ """
+ return relationship.one_to_many(cls, 'configuration', dict_key='name')
+
+ # endregion
+
+ # region many_to_one relationships
+
+ @declared_attr
+ def service_template(cls):
+ """
+ Containing service template (can be ``None``). For workflow operation templates.
+
+ :type: :class:`ServiceTemplate`
+ """
+ return relationship.many_to_one(cls, 'service_template',
+ back_populates='workflow_templates')
+
+ @declared_attr
+ def interface_template(cls):
+ """
+ Containing interface template (can be ``None``).
+
+ :type: :class:`InterfaceTemplate`
+ """
+ return relationship.many_to_one(cls, 'interface_template')
+
+ # endregion
+
+ # region foreign keys
+
+ @declared_attr
+ def service_template_fk(cls):
+ """For ServiceTemplate one-to-many to OperationTemplate"""
+ return relationship.foreign_key('service_template', nullable=True)
+
+ @declared_attr
+ def interface_template_fk(cls):
+ """For InterfaceTemplate one-to-many to OperationTemplate"""
+ return relationship.foreign_key('interface_template', nullable=True)
+
+ @declared_attr
+ def plugin_specification_fk(cls):
+ """For OperationTemplate one-to-one to PluginSpecification"""
+ return relationship.foreign_key('plugin_specification', nullable=True)
+
+ # endregion
+
+ description = Column(Text, doc="""
+ Human-readable description.
+
+ :type: :obj:`basestring`
+ """)
+
+ relationship_edge = Column(Boolean, doc="""
+ When ``True`` specifies that the operation is on the relationship's target edge; ``False`` is
+ the source edge (only used by operations on relationships)
+
+ :type: :obj:`bool`
+ """)
+
+ implementation = Column(Text, doc="""
+ Implementation (usually the name of an artifact).
+
+ :type: :obj:`basestring`
+ """)
+
+ dependencies = Column(modeling_types.StrictList(item_cls=basestring), doc="""
+ Dependencies (usually names of artifacts).
+
+ :type: [:obj:`basestring`]
+ """)
+
+ function = Column(Text, doc="""
+ Full path to Python function.
+
+ :type: :obj:`basestring`
+ """)
+
+ executor = Column(Text, doc="""
+ Name of executor.
+
+ :type: :obj:`basestring`
+ """)
+
+ max_attempts = Column(Integer, doc="""
+ Maximum number of attempts allowed in case of task failure.
+
+ :type: :obj:`int`
+ """)
+
+ retry_interval = Column(Integer, doc="""
+ Interval between task retry attemps (in seconds).
+
+ :type: :obj:`float`
+ """)
+
+ @property
+ def as_raw(self):
+ return collections.OrderedDict((
+ ('name', self.name),
+ ('description', self.description),
+ ('implementation', self.implementation),
+ ('dependencies', self.dependencies),
+ ('inputs', formatting.as_raw_dict(self.inputs))))
+
+
+class ArtifactTemplateBase(TemplateModelMixin):
+ """
+ Template for creating an :class:`Artifact` instance, which is a typed file, either provided in a
+ CSAR or downloaded from a repository.
+ """
+
+ __tablename__ = 'artifact_template'
+
+ __private_fields__ = ('type_fk',
+ 'node_template_fk')
+
+ # region one_to_many relationships
+
+ @declared_attr
+ def artifacts(cls):
+ """
+ Instantiated artifacts.
+
+ :type: [:class:`Artifact`]
+ """
+ return relationship.one_to_many(cls, 'artifact')
+
+ @declared_attr
+ def properties(cls):
+ """
+ Declarations for associated immutable parameters.
+
+ :type: {:obj:`basestring`: :class:`Property`}
+ """
+ return relationship.one_to_many(cls, 'property', dict_key='name')
+
+ # endregion
+
+ # region many_to_one relationships
+
+ @declared_attr
+ def node_template(cls):
+ """
+ Containing node template.
+
+ :type: :class:`NodeTemplate`
+ """
+ return relationship.many_to_one(cls, 'node_template')
+
+ @declared_attr
+ def type(cls):
+ """
+ Artifact type.
+
+ :type: :class:`Type`
+ """
+ return relationship.many_to_one(cls, 'type', back_populates=relationship.NO_BACK_POP)
+
+ # endregion
+
+ # region foreign keys
+
+ @declared_attr
+ def type_fk(cls):
+ """For ArtifactTemplate many-to-one to Type"""
+ return relationship.foreign_key('type')
+
+ @declared_attr
+ def node_template_fk(cls):
+ """For NodeTemplate one-to-many to ArtifactTemplate"""
+ return relationship.foreign_key('node_template')
+
+ # endregion
+
+ description = Column(Text, doc="""
+ Human-readable description.
+
+ :type: :obj:`basestring`
+ """)
+
+ source_path = Column(Text, doc="""
+ Source path (in CSAR or repository).
+
+ :type: :obj:`basestring`
+ """)
+
+ target_path = Column(Text, doc="""
+ Path at which to install at destination.
+
+ :type: :obj:`basestring`
+ """)
+
+ repository_url = Column(Text, doc="""
+ Repository URL.
+
+ :type: :obj:`basestring`
+ """)
+
+ repository_credential = Column(modeling_types.StrictDict(basestring, basestring), doc="""
+ Credentials for accessing the repository.
+
+ :type: {:obj:`basestring`, :obj:`basestring`}
+ """)
+
+ @property
+ def as_raw(self):
+ return collections.OrderedDict((
+ ('name', self.name),
+ ('description', self.description),
+ ('type_name', self.type.name),
+ ('source_path', self.source_path),
+ ('target_path', self.target_path),
+ ('repository_url', self.repository_url),
+ ('repository_credential', formatting.as_agnostic(self.repository_credential)),
+ ('properties', formatting.as_raw_dict(self.properties))))
+
+
+class PluginSpecificationBase(TemplateModelMixin):
+ """
+ Requirement for a :class:`Plugin`.
+
+ The actual plugin to be selected depends on those currently installed in ARIA.
+ """
+
+ __tablename__ = 'plugin_specification'
+
+ __private_fields__ = ('service_template_fk',
+ 'plugin_fk')
+
+ # region many_to_one relationships
+
+ @declared_attr
+ def service_template(cls):
+ """
+ Containing service template.
+
+ :type: :class:`ServiceTemplate`
+ """
+ return relationship.many_to_one(cls, 'service_template')
+
+ @declared_attr
+ def plugin(cls): # pylint: disable=method-hidden
+ """
+ Matched plugin.
+
+ :type: :class:`Plugin`
+ """
+ return relationship.many_to_one(cls, 'plugin', back_populates=relationship.NO_BACK_POP)
+
+ # endregion
+
+ # region foreign keys
+
+ @declared_attr
+ def service_template_fk(cls):
+ """For ServiceTemplate one-to-many to PluginSpecification"""
+ return relationship.foreign_key('service_template', nullable=True)
+
+ @declared_attr
+ def plugin_fk(cls):
+ """For PluginSpecification many-to-one to Plugin"""
+ return relationship.foreign_key('plugin', nullable=True)
+
+ # endregion
+
+ version = Column(Text, doc="""
+ Minimum plugin version.
+
+ :type: :obj:`basestring`
+ """)
+
+ enabled = Column(Boolean, nullable=False, default=True, doc="""
+ Whether the plugin is enabled.
+
+ :type: :obj:`bool`
+ """)
+
+ @property
+ def as_raw(self):
+ return collections.OrderedDict((
+ ('name', self.name),
+ ('version', self.version),
+ ('enabled', self.enabled)))
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/modeling/types.py b/azure/aria/aria-extension-cloudify/src/aria/aria/modeling/types.py
new file mode 100644
index 0000000..38240fa
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/modeling/types.py
@@ -0,0 +1,318 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Allows JSON-serializable collections to be used as SQLAlchemy column types.
+"""
+
+import json
+from collections import namedtuple
+
+from sqlalchemy import (
+ TypeDecorator,
+ VARCHAR,
+ event
+)
+from sqlalchemy.ext import mutable
+from ruamel import yaml
+
+from . import exceptions
+
+
+class _MutableType(TypeDecorator):
+ """
+ Dict representation of type.
+ """
+ @property
+ def python_type(self):
+ raise NotImplementedError
+
+ def process_literal_param(self, value, dialect):
+ pass
+
+ impl = VARCHAR
+
+ def process_bind_param(self, value, dialect):
+ if value is not None:
+ value = json.dumps(value)
+ return value
+
+ def process_result_value(self, value, dialect):
+ if value is not None:
+ value = json.loads(value)
+ return value
+
+
+class Dict(_MutableType):
+ """
+ JSON-serializable dict type for SQLAlchemy columns.
+ """
+ @property
+ def python_type(self):
+ return dict
+
+
+class List(_MutableType):
+ """
+ JSON-serializable list type for SQLAlchemy columns.
+ """
+ @property
+ def python_type(self):
+ return list
+
+
+class _StrictDictMixin(object):
+
+ @classmethod
+ def coerce(cls, key, value):
+ """
+ Convert plain dictionaries to MutableDict.
+ """
+ try:
+ if not isinstance(value, cls):
+ if isinstance(value, dict):
+ for k, v in value.items():
+ cls._assert_strict_key(k)
+ cls._assert_strict_value(v)
+ return cls(value)
+ return mutable.MutableDict.coerce(key, value)
+ else:
+ return value
+ except ValueError as e:
+ raise exceptions.ValueFormatException('could not coerce to MutableDict', cause=e)
+
+ def __setitem__(self, key, value):
+ self._assert_strict_key(key)
+ self._assert_strict_value(value)
+ super(_StrictDictMixin, self).__setitem__(key, value)
+
+ def setdefault(self, key, value):
+ self._assert_strict_key(key)
+ self._assert_strict_value(value)
+ super(_StrictDictMixin, self).setdefault(key, value)
+
+ def update(self, *args, **kwargs):
+ for k, v in kwargs.items():
+ self._assert_strict_key(k)
+ self._assert_strict_value(v)
+ super(_StrictDictMixin, self).update(*args, **kwargs)
+
+ @classmethod
+ def _assert_strict_key(cls, key):
+ if cls._key_cls is not None and not isinstance(key, cls._key_cls):
+ raise exceptions.ValueFormatException('key type was set strictly to {0}, but was {1}'
+ .format(cls._key_cls, type(key)))
+
+ @classmethod
+ def _assert_strict_value(cls, value):
+ if cls._value_cls is not None and not isinstance(value, cls._value_cls):
+ raise exceptions.ValueFormatException('value type was set strictly to {0}, but was {1}'
+ .format(cls._value_cls, type(value)))
+
+
+class _MutableDict(mutable.MutableDict):
+ """
+ Enables tracking for dict values.
+ """
+
+ @classmethod
+ def coerce(cls, key, value):
+ """
+ Convert plain dictionaries to MutableDict.
+ """
+ try:
+ return mutable.MutableDict.coerce(key, value)
+ except ValueError as e:
+ raise exceptions.ValueFormatException('could not coerce value', cause=e)
+
+
+class _StrictListMixin(object):
+
+ @classmethod
+ def coerce(cls, key, value):
+ "Convert plain dictionaries to MutableDict."
+ try:
+ if not isinstance(value, cls):
+ if isinstance(value, list):
+ for item in value:
+ cls._assert_item(item)
+ return cls(value)
+ return mutable.MutableList.coerce(key, value)
+ else:
+ return value
+ except ValueError as e:
+ raise exceptions.ValueFormatException('could not coerce to MutableDict', cause=e)
+
+ def __setitem__(self, index, value):
+ """
+ Detect list set events and emit change events.
+ """
+ self._assert_item(value)
+ super(_StrictListMixin, self).__setitem__(index, value)
+
+ def append(self, item):
+ self._assert_item(item)
+ super(_StrictListMixin, self).append(item)
+
+ def extend(self, item):
+ self._assert_item(item)
+ super(_StrictListMixin, self).extend(item)
+
+ def insert(self, index, item):
+ self._assert_item(item)
+ super(_StrictListMixin, self).insert(index, item)
+
+ @classmethod
+ def _assert_item(cls, item):
+ if cls._item_cls is not None and not isinstance(item, cls._item_cls):
+ raise exceptions.ValueFormatException('key type was set strictly to {0}, but was {1}'
+ .format(cls._item_cls, type(item)))
+
+
+class _MutableList(mutable.MutableList):
+
+ @classmethod
+ def coerce(cls, key, value):
+ """
+ Convert plain dictionaries to MutableDict.
+ """
+ try:
+ return mutable.MutableList.coerce(key, value)
+ except ValueError as e:
+ raise exceptions.ValueFormatException('could not coerce to MutableDict', cause=e)
+
+
+_StrictDictID = namedtuple('_StrictDictID', 'key_cls, value_cls')
+_StrictValue = namedtuple('_StrictValue', 'type_cls, listener_cls')
+
+class _StrictDict(object):
+ """
+ This entire class functions as a factory for strict dicts and their listeners. No type class,
+ and no listener type class is created more than once. If a relevant type class exists it is
+ returned.
+ """
+ _strict_map = {}
+
+ def __call__(self, key_cls=None, value_cls=None):
+ strict_dict_map_key = _StrictDictID(key_cls=key_cls, value_cls=value_cls)
+ if strict_dict_map_key not in self._strict_map:
+ key_cls_name = getattr(key_cls, '__name__', str(key_cls))
+ value_cls_name = getattr(value_cls, '__name__', str(value_cls))
+ # Creating the type class itself. this class would be returned (used by the SQLAlchemy
+ # Column).
+ strict_dict_cls = type(
+ 'StrictDict_{0}_{1}'.format(key_cls_name, value_cls_name),
+ (Dict, ),
+ {}
+ )
+ # Creating the type listening class.
+ # The new class inherits from both the _MutableDict class and the _StrictDictMixin,
+ # while setting the necessary _key_cls and _value_cls as class attributes.
+ listener_cls = type(
+ 'StrictMutableDict_{0}_{1}'.format(key_cls_name, value_cls_name),
+ (_StrictDictMixin, _MutableDict),
+ {'_key_cls': key_cls, '_value_cls': value_cls}
+ )
+ yaml.representer.RoundTripRepresenter.add_representer(
+ listener_cls, yaml.representer.RoundTripRepresenter.represent_list)
+ self._strict_map[strict_dict_map_key] = _StrictValue(type_cls=strict_dict_cls,
+ listener_cls=listener_cls)
+
+ return self._strict_map[strict_dict_map_key].type_cls
+
+
+StrictDict = _StrictDict()
+"""
+JSON-serializable strict dict type for SQLAlchemy columns.
+
+:param key_cls:
+:param value_cls:
+"""
+
+
+class _StrictList(object):
+ """
+ This entire class functions as a factory for strict lists and their listeners. No type class,
+ and no listener type class is created more than once. If a relevant type class exists it is
+ returned.
+ """
+ _strict_map = {}
+
+ def __call__(self, item_cls=None):
+
+ if item_cls not in self._strict_map:
+ item_cls_name = getattr(item_cls, '__name__', str(item_cls))
+ # Creating the type class itself. this class would be returned (used by the SQLAlchemy
+ # Column).
+ strict_list_cls = type(
+ 'StrictList_{0}'.format(item_cls_name),
+ (List, ),
+ {}
+ )
+ # Creating the type listening class.
+ # The new class inherits from both the _MutableList class and the _StrictListMixin,
+ # while setting the necessary _item_cls as class attribute.
+ listener_cls = type(
+ 'StrictMutableList_{0}'.format(item_cls_name),
+ (_StrictListMixin, _MutableList),
+ {'_item_cls': item_cls}
+ )
+ yaml.representer.RoundTripRepresenter.add_representer(
+ listener_cls, yaml.representer.RoundTripRepresenter.represent_list)
+ self._strict_map[item_cls] = _StrictValue(type_cls=strict_list_cls,
+ listener_cls=listener_cls)
+
+ return self._strict_map[item_cls].type_cls
+
+
+StrictList = _StrictList()
+"""
+JSON-serializable strict list type for SQLAlchemy columns.
+
+:param item_cls:
+"""
+
+
+def _mutable_association_listener(mapper, cls):
+ strict_dict_type_to_listener = \
+ dict((v.type_cls, v.listener_cls) for v in _StrictDict._strict_map.itervalues())
+
+ strict_list_type_to_listener = \
+ dict((v.type_cls, v.listener_cls) for v in _StrictList._strict_map.itervalues())
+
+ for prop in mapper.column_attrs:
+ column_type = prop.columns[0].type
+ # Dict Listeners
+ if type(column_type) in strict_dict_type_to_listener: # pylint: disable=unidiomatic-typecheck
+ strict_dict_type_to_listener[type(column_type)].associate_with_attribute(
+ getattr(cls, prop.key))
+ elif isinstance(column_type, Dict):
+ _MutableDict.associate_with_attribute(getattr(cls, prop.key))
+
+ # List Listeners
+ if type(column_type) in strict_list_type_to_listener: # pylint: disable=unidiomatic-typecheck
+ strict_list_type_to_listener[type(column_type)].associate_with_attribute(
+ getattr(cls, prop.key))
+ elif isinstance(column_type, List):
+ _MutableList.associate_with_attribute(getattr(cls, prop.key))
+
+
+_LISTENER_ARGS = (mutable.mapper, 'mapper_configured', _mutable_association_listener)
+
+
+def _register_mutable_association_listener():
+ event.listen(*_LISTENER_ARGS)
+
+_register_mutable_association_listener()
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/modeling/utils.py b/azure/aria/aria-extension-cloudify/src/aria/aria/modeling/utils.py
new file mode 100644
index 0000000..491b71a
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/modeling/utils.py
@@ -0,0 +1,185 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Miscellaneous modeling utilities.
+"""
+
+import os
+from json import JSONEncoder
+from StringIO import StringIO
+
+from . import exceptions
+from ..utils.type import validate_value_type
+from ..utils.collections import OrderedDict
+from ..utils.formatting import string_list_as_string
+
+
+class ModelJSONEncoder(JSONEncoder):
+ """
+ JSON encoder that automatically unwraps ``value`` attributes.
+ """
+ def __init__(self, *args, **kwargs):
+ # Just here to make sure Sphinx doesn't grab the base constructor's docstring
+ super(ModelJSONEncoder, self).__init__(*args, **kwargs)
+
+ def default(self, o): # pylint: disable=method-hidden
+ from .mixins import ModelMixin
+ if isinstance(o, ModelMixin):
+ if hasattr(o, 'value'):
+ dict_to_return = o.to_dict(fields=('value',))
+ return dict_to_return['value']
+ else:
+ return o.to_dict()
+ else:
+ return JSONEncoder.default(self, o)
+
+
+class NodeTemplateContainerHolder(object):
+ """
+ Wrapper that allows using a :class:`~aria.modeling.models.NodeTemplate` model directly as the
+ ``container_holder`` input for :func:`~aria.modeling.functions.evaluate`.
+ """
+
+ def __init__(self, node_template):
+ self.container = node_template
+ self.service = None
+
+ @property
+ def service_template(self):
+ return self.container.service_template
+
+
+# def validate_no_undeclared_inputs(declared_inputs, supplied_inputs):
+#
+# undeclared_inputs = [input for input in supplied_inputs if input not in declared_inputs]
+# if undeclared_inputs:
+# raise exceptions.UndeclaredInputsException(
+# 'Undeclared inputs have been provided: {0}; Declared inputs: {1}'
+# .format(string_list_as_string(undeclared_inputs),
+# string_list_as_string(declared_inputs.keys())))
+
+
+def validate_required_inputs_are_supplied(declared_inputs, supplied_inputs):
+ required_inputs = [input for input in declared_inputs.values() if input.required]
+ missing_required_inputs = [input for input in required_inputs
+ if input.name not in supplied_inputs and not str(input.value)]
+ if missing_required_inputs:
+ raise exceptions.MissingRequiredInputsException(
+ 'Required inputs {0} have not been provided values'
+ .format(string_list_as_string(missing_required_inputs)))
+
+
+def merge_parameter_values(provided_values, declared_parameters, model_cls=None):
+ """
+ Merges parameter values according to those declared by a type.
+
+ Exceptions will be raised for validation errors.
+
+ :param provided_values: provided parameter values or None
+ :type provided_values: {:obj:`basestring`: object}
+ :param declared_parameters: declared parameters
+ :type declared_parameters: {:obj:`basestring`: :class:`~aria.modeling.models.Parameter`}
+ :param model_cls: the model class that should be created from a provided value
+ :type model_cls: :class:`~aria.modeling.models.Input` or :class:`~aria.modeling.models.Argument`
+ :return: the merged parameters
+ :rtype: {:obj:`basestring`: :class:`~aria.modeling.models.Parameter`}
+ :raises ~aria.modeling.exceptions.UndeclaredInputsException: if a key in
+ ``parameter_values`` does not exist in ``declared_parameters``
+ :raises ~aria.modeling.exceptions.MissingRequiredInputsException: if a key in
+ ``declared_parameters`` does not exist in ``parameter_values`` and also has no default value
+ :raises ~aria.modeling.exceptions.ParametersOfWrongTypeException: if a value in
+ ``parameter_values`` does not match its type in ``declared_parameters``
+ """
+
+ provided_values = provided_values or {}
+ provided_values_of_wrong_type = OrderedDict()
+ model_parameters = OrderedDict()
+ model_cls = model_cls or _get_class_from_sql_relationship(declared_parameters)
+
+ for declared_parameter_name, declared_parameter in declared_parameters.iteritems():
+ if declared_parameter_name in provided_values:
+ # a value has been provided
+ value = provided_values[declared_parameter_name]
+
+ # Validate type
+ type_name = declared_parameter.type_name
+ try:
+ validate_value_type(value, type_name)
+ except ValueError:
+ provided_values_of_wrong_type[declared_parameter_name] = type_name
+ except RuntimeError:
+ # TODO This error shouldn't be raised (or caught), but right now we lack support
+ # for custom data_types, which will raise this error. Skipping their validation.
+ pass
+ model_parameters[declared_parameter_name] = model_cls( # pylint: disable=unexpected-keyword-arg
+ name=declared_parameter_name,
+ type_name=type_name,
+ description=declared_parameter.description,
+ value=value)
+ else:
+ # Copy default value from declaration
+ model_parameters[declared_parameter_name] = model_cls(
+ value=declared_parameter._value,
+ name=declared_parameter.name,
+ type_name=declared_parameter.type_name,
+ description=declared_parameter.description)
+
+ if provided_values_of_wrong_type:
+ error_message = StringIO()
+ for param_name, param_type in provided_values_of_wrong_type.iteritems():
+ error_message.write('Parameter "{0}" is not of declared type "{1}"{2}'
+ .format(param_name, param_type, os.linesep))
+ raise exceptions.ParametersOfWrongTypeException(error_message.getvalue())
+
+ return model_parameters
+
+
+def parameters_as_values(the_dict):
+ return dict((k, v.value) for k, v in the_dict.iteritems())
+
+
+def dict_as_arguments(the_dict):
+ return OrderedDict((name, value.as_argument()) for name, value in the_dict.iteritems())
+
+
+class classproperty(object): # pylint: disable=invalid-name
+ def __init__(self, f):
+ self._func = f
+ self.__doct__ = f.__doc__
+
+ def __get__(self, instance, owner):
+ return self._func(owner)
+
+
+def fix_doc(cls):
+ """
+ Class decorator to use the last base class's docstring and make sure Sphinx doesn't grab the
+ base constructor's docstring.
+ """
+ original_init = cls.__init__
+ def init(*args, **kwargs):
+ original_init(*args, **kwargs)
+
+ cls.__init__ = init
+ cls.__doc__ = cls.__bases__[-1].__doc__
+
+ return cls
+
+
+def _get_class_from_sql_relationship(field):
+ class_ = field._sa_adapter.owner_state.class_
+ prop_name = field._sa_adapter.attr.key
+ return getattr(class_, prop_name).property.mapper.class_
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/__init__.py b/azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/__init__.py
new file mode 100644
index 0000000..24fee9e
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/__init__.py
@@ -0,0 +1,32 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Orchestrator package.
+"""
+
+from .decorators import (
+ workflow,
+ operation,
+ WORKFLOW_DECORATOR_RESERVED_ARGUMENTS,
+ OPERATION_DECORATOR_RESERVED_ARGUMENTS
+)
+
+from . import (
+ context,
+ events,
+ workflows,
+ decorators
+)
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/context/__init__.py b/azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/context/__init__.py
new file mode 100644
index 0000000..a87828d
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/context/__init__.py
@@ -0,0 +1,21 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Contexts for workflows and operations.
+"""
+
+from . import workflow, operation
+from .toolbelt import toolbelt
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/context/common.py b/azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/context/common.py
new file mode 100644
index 0000000..3c5f618
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/context/common.py
@@ -0,0 +1,217 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Common code for contexts.
+"""
+
+import logging
+from contextlib import contextmanager
+from functools import partial
+
+import jinja2
+
+from aria import (
+ logger as aria_logger,
+ modeling
+)
+from aria.storage import exceptions
+
+from ...utils.uuid import generate_uuid
+
+
+class BaseContext(object):
+ """
+ Base class for contexts.
+ """
+
+ INSTRUMENTATION_FIELDS = (
+ modeling.models.Service.inputs,
+ modeling.models.ServiceTemplate.inputs,
+ modeling.models.Policy.properties,
+ modeling.models.PolicyTemplate.properties,
+ modeling.models.Node.attributes,
+ modeling.models.Node.properties,
+ modeling.models.NodeTemplate.attributes,
+ modeling.models.NodeTemplate.properties,
+ modeling.models.Group.properties,
+ modeling.models.GroupTemplate.properties,
+ modeling.models.Capability.properties,
+ # TODO ARIA-279: modeling.models.Capability.attributes,
+ modeling.models.CapabilityTemplate.properties,
+ # TODO ARIA-279: modeling.models.CapabilityTemplate.attributes
+ modeling.models.Relationship.properties,
+ modeling.models.Artifact.properties,
+ modeling.models.ArtifactTemplate.properties,
+ modeling.models.Interface.inputs,
+ modeling.models.InterfaceTemplate.inputs,
+ modeling.models.Operation.inputs,
+ modeling.models.OperationTemplate.inputs
+ )
+
+ class PrefixedLogger(object):
+ def __init__(self, base_logger, task_id=None):
+ self._logger = base_logger
+ self._task_id = task_id
+
+ def __getattr__(self, attribute):
+ if attribute.upper() in logging._levelNames:
+ return partial(self._logger_with_task_id, _level=attribute)
+ else:
+ return getattr(self._logger, attribute)
+
+ def _logger_with_task_id(self, *args, **kwargs):
+ level = kwargs.pop('_level')
+ kwargs.setdefault('extra', {})['task_id'] = self._task_id
+ return getattr(self._logger, level)(*args, **kwargs)
+
+ def __init__(self,
+ name,
+ service_id,
+ model_storage,
+ resource_storage,
+ execution_id,
+ workdir=None,
+ **kwargs):
+ super(BaseContext, self).__init__(**kwargs)
+ self._name = name
+ self._id = generate_uuid(variant='uuid')
+ self._model = model_storage
+ self._resource = resource_storage
+ self._service_id = service_id
+ self._workdir = workdir
+ self._execution_id = execution_id
+ self.logger = None
+
+ def _register_logger(self, level=None, task_id=None):
+ self.logger = self.PrefixedLogger(
+ logging.getLogger(aria_logger.TASK_LOGGER_NAME), task_id=task_id)
+ self.logger.setLevel(level or logging.DEBUG)
+ if not self.logger.handlers:
+ self.logger.addHandler(self._get_sqla_handler())
+
+ def _get_sqla_handler(self):
+ return aria_logger.create_sqla_log_handler(model=self._model,
+ log_cls=modeling.models.Log,
+ execution_id=self._execution_id)
+
+ def __repr__(self):
+ return (
+ '{name}(name={self.name}, '
+ 'deployment_id={self._service_id}, '
+ .format(name=self.__class__.__name__, self=self))
+
+ @contextmanager
+ def logging_handlers(self, handlers=None):
+ handlers = handlers or []
+ try:
+ for handler in handlers:
+ self.logger.addHandler(handler)
+ yield self.logger
+ finally:
+ for handler in handlers:
+ self.logger.removeHandler(handler)
+
+ @property
+ def model(self):
+ """
+ Storage model API ("MAPI").
+ """
+ return self._model
+
+ @property
+ def resource(self):
+ """
+ Storage resource API ("RAPI").
+ """
+ return self._resource
+
+ @property
+ def service_template(self):
+ """
+ Service template model.
+ """
+ return self.service.service_template
+
+ @property
+ def service(self):
+ """
+ Service instance model.
+ """
+ return self.model.service.get(self._service_id)
+
+ @property
+ def name(self):
+ """
+ Operation name.
+ """
+ return self._name
+
+ @property
+ def id(self):
+ """
+ Operation ID.
+ """
+ return self._id
+
+ def download_resource(self, destination, path=None):
+ """
+ Download a service template resource from the storage resource API ("RAPI").
+ """
+ try:
+ self.resource.service.download(entry_id=str(self.service.id),
+ destination=destination,
+ path=path)
+ except exceptions.StorageError:
+ self.resource.service_template.download(entry_id=str(self.service_template.id),
+ destination=destination,
+ path=path)
+
+ def download_resource_and_render(self, destination, path=None, variables=None):
+ """
+ Downloads a service template resource from the resource storage and renders its content as a
+ Jinja template using the provided variables. ``ctx`` is available to the template without
+ providing it explicitly.
+ """
+ resource_content = self.get_resource(path=path)
+ resource_content = self._render_resource(resource_content=resource_content,
+ variables=variables)
+ with open(destination, 'wb') as f:
+ f.write(resource_content)
+
+ def get_resource(self, path=None):
+ """
+ Reads a service instance resource as string from the resource storage.
+ """
+ try:
+ return self.resource.service.read(entry_id=str(self.service.id), path=path)
+ except exceptions.StorageError:
+ return self.resource.service_template.read(entry_id=str(self.service_template.id),
+ path=path)
+
+ def get_resource_and_render(self, path=None, variables=None):
+ """
+ Reads a service instance resource as string from the resource storage and renders it as a
+ Jinja template using the provided variables. ``ctx`` is available to the template without
+ providing it explicitly.
+ """
+ resource_content = self.get_resource(path=path)
+ return self._render_resource(resource_content=resource_content, variables=variables)
+
+ def _render_resource(self, resource_content, variables):
+ variables = variables or {}
+ variables.setdefault('ctx', self)
+ resource_template = jinja2.Template(resource_content)
+ return resource_template.render(variables)
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/context/exceptions.py b/azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/context/exceptions.py
new file mode 100644
index 0000000..e46e2b1
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/context/exceptions.py
@@ -0,0 +1,27 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Context exceptions.
+"""
+
+from ..exceptions import OrchestratorError
+
+
+class ContextException(OrchestratorError):
+ """
+ Context based exception
+ """
+ pass
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/context/operation.py b/azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/context/operation.py
new file mode 100644
index 0000000..8613ec3
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/context/operation.py
@@ -0,0 +1,174 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Operation contexts.
+"""
+
+import threading
+from contextlib import contextmanager
+
+import aria
+from aria.utils import file
+from . import common
+
+
+class BaseOperationContext(common.BaseContext):
+ """
+ Base class for contexts used during operation creation and execution.
+ """
+
+ def __init__(self, task_id, actor_id, **kwargs):
+ self._task_id = task_id
+ self._actor_id = actor_id
+ self._thread_local = threading.local()
+ self._destroy_session = kwargs.pop('destroy_session', False)
+ logger_level = kwargs.pop('logger_level', None)
+ super(BaseOperationContext, self).__init__(**kwargs)
+ self._register_logger(task_id=self.task.id, level=logger_level)
+
+ def __repr__(self):
+ details = 'function={task.function}; ' \
+ 'operation_arguments={task.arguments}'\
+ .format(task=self.task)
+ return '{name}({0})'.format(details, name=self.name)
+
+ @property
+ def task(self):
+ """
+ The task in the model storage.
+ """
+ # SQLAlchemy prevents from accessing an object which was created on a different thread.
+ # So we retrieve the object from the storage if the current thread isn't the same as the
+ # original thread.
+
+ if not hasattr(self._thread_local, 'task'):
+ self._thread_local.task = self.model.task.get(self._task_id)
+ return self._thread_local.task
+
+ @property
+ def plugin_workdir(self):
+ """
+ A work directory that is unique to the plugin and the service ID.
+ """
+ if self.task.plugin is None:
+ return None
+ plugin_workdir = '{0}/plugins/{1}/{2}'.format(self._workdir,
+ self.service.id,
+ self.task.plugin.name)
+ file.makedirs(plugin_workdir)
+ return plugin_workdir
+
+ @property
+ def serialization_dict(self):
+ context_dict = {
+ 'name': self.name,
+ 'service_id': self._service_id,
+ 'task_id': self._task_id,
+ 'actor_id': self._actor_id,
+ 'workdir': self._workdir,
+ 'model_storage': self.model.serialization_dict if self.model else None,
+ 'resource_storage': self.resource.serialization_dict if self.resource else None,
+ 'execution_id': self._execution_id,
+ 'logger_level': self.logger.level
+ }
+ return {
+ 'context_cls': self.__class__,
+ 'context': context_dict
+ }
+
+ @classmethod
+ def instantiate_from_dict(cls, model_storage=None, resource_storage=None, **kwargs):
+ if model_storage:
+ model_storage = aria.application_model_storage(**model_storage)
+ if resource_storage:
+ resource_storage = aria.application_resource_storage(**resource_storage)
+
+ return cls(model_storage=model_storage,
+ resource_storage=resource_storage,
+ destroy_session=True,
+ **kwargs)
+
+ def close(self):
+ if self._destroy_session:
+ self.model.log._session.remove()
+ self.model.log._engine.dispose()
+
+ @property
+ @contextmanager
+ def persist_changes(self):
+ yield
+ self.model.task.update(self.task)
+
+
+class NodeOperationContext(BaseOperationContext):
+ """
+ Context for node operations.
+ """
+
+ @property
+ def node(self):
+ """
+ The node of the current operation.
+ """
+ return self.model.node.get(self._actor_id)
+
+ @property
+ def node_template(self):
+ """
+ The node template of the current operation.
+ """
+ return self.node.node_template
+
+
+class RelationshipOperationContext(BaseOperationContext):
+ """
+ Context for relationship operations.
+ """
+
+ @property
+ def relationship(self):
+ """
+ The relationship instance of the current operation.
+ """
+ return self.model.relationship.get(self._actor_id)
+
+ @property
+ def source_node(self):
+ """
+ The relationship source node.
+ """
+ return self.relationship.source_node
+
+ @property
+ def source_node_template(self):
+ """
+ The relationship source node template.
+ """
+ return self.source_node.node_template
+
+ @property
+ def target_node(self):
+ """
+ The relationship target node.
+ """
+ return self.relationship.target_node
+
+ @property
+ def target_node_template(self):
+ """
+ The relationship target node template.
+ """
+ return self.target_node.node_template
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/context/toolbelt.py b/azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/context/toolbelt.py
new file mode 100644
index 0000000..a2e1122
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/context/toolbelt.py
@@ -0,0 +1,59 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Tools for operations.
+"""
+
+from . import operation
+
+
+class NodeToolBelt(object):
+ """
+ Node operation tool belt.
+ """
+ def __init__(self, operation_context):
+ self._op_context = operation_context
+
+ @property
+ def host_ip(self):
+ """
+ The host ip of the current node
+ :return:
+ """
+ assert isinstance(self._op_context, operation.NodeOperationContext)
+ return self._op_context.node.host.attributes.get('ip')
+
+
+class RelationshipToolBelt(object):
+ """
+ Relationship operation tool belt.
+ """
+ def __init__(self, operation_context):
+ self._op_context = operation_context
+
+
+def toolbelt(operation_context):
+ """
+ Get a toolbelt from to the current operation executor.
+
+ :param operation_context:
+ """
+ if isinstance(operation_context, operation.NodeOperationContext):
+ return NodeToolBelt(operation_context)
+ elif isinstance(operation_context, operation.RelationshipOperationContext):
+ return RelationshipToolBelt(operation_context)
+ else:
+ raise RuntimeError("Operation context not supported")
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/context/workflow.py b/azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/context/workflow.py
new file mode 100644
index 0000000..738d2fd
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/context/workflow.py
@@ -0,0 +1,135 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Workflow context.
+"""
+
+import threading
+from contextlib import contextmanager
+
+from .exceptions import ContextException
+from .common import BaseContext
+
+
+class WorkflowContext(BaseContext):
+ """
+ Context used during workflow creation and execution.
+ """
+ def __init__(self,
+ workflow_name,
+ parameters=None,
+ task_max_attempts=1,
+ task_retry_interval=0,
+ task_ignore_failure=False,
+ *args, **kwargs):
+ super(WorkflowContext, self).__init__(*args, **kwargs)
+ self._workflow_name = workflow_name
+ self._parameters = parameters or {}
+ self._task_max_attempts = task_max_attempts
+ self._task_retry_interval = task_retry_interval
+ self._task_ignore_failure = task_ignore_failure
+ self._execution_graph = None
+ self._register_logger()
+
+ def __repr__(self):
+ return (
+ '{name}(deployment_id={self._service_id}, '
+ 'workflow_name={self._workflow_name}, execution_id={self._execution_id})'.format(
+ name=self.__class__.__name__, self=self))
+
+ @property
+ def workflow_name(self):
+ return self._workflow_name
+
+ @property
+ def execution(self):
+ """
+ Execution model.
+ """
+ return self.model.execution.get(self._execution_id)
+
+ @execution.setter
+ def execution(self, value):
+ """
+ Stores the execution in the storage model API ("MAPI").
+ """
+ self.model.execution.put(value)
+
+ @property
+ def node_templates(self):
+ """
+ Iterates over nodes templates.
+ """
+ key = 'service_{0}'.format(self.model.node_template.model_cls.name_column_name())
+
+ return self.model.node_template.iter(
+ filters={
+ key: getattr(self.service, self.service.name_column_name())
+ }
+ )
+
+ @property
+ def nodes(self):
+ """
+ Iterates over nodes.
+ """
+ key = 'service_{0}'.format(self.model.node.model_cls.name_column_name())
+ return self.model.node.iter(
+ filters={
+ key: getattr(self.service, self.service.name_column_name())
+ }
+ )
+
+ @property
+ @contextmanager
+ def persist_changes(self):
+ yield
+ self._model.execution.update(self.execution)
+
+
+class _CurrentContext(threading.local):
+ """
+ Provides a thread-level context, with sugar for the task MAPI.
+ """
+
+ def __init__(self):
+ super(_CurrentContext, self).__init__()
+ self._workflow_context = None
+
+ def _set(self, value):
+ self._workflow_context = value
+
+ def get(self):
+ """
+ Retrieves the current workflow context.
+ """
+ if self._workflow_context is not None:
+ return self._workflow_context
+ raise ContextException("No context was set")
+
+ @contextmanager
+ def push(self, workflow_context):
+ """
+ Switches the current context to the provided context.
+ """
+ prev_workflow_context = self._workflow_context
+ self._set(workflow_context)
+ try:
+ yield self
+ finally:
+ self._set(prev_workflow_context)
+
+current = _CurrentContext()
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/decorators.py b/azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/decorators.py
new file mode 100644
index 0000000..4b163d6
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/decorators.py
@@ -0,0 +1,85 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Workflow and operation decorators.
+"""
+
+from functools import partial, wraps
+
+from ..utils.validation import validate_function_arguments
+from ..utils.uuid import generate_uuid
+
+from . import context
+from .workflows.api import task_graph
+
+
+WORKFLOW_DECORATOR_RESERVED_ARGUMENTS = set(('ctx', 'graph'))
+OPERATION_DECORATOR_RESERVED_ARGUMENTS = set(('ctx', 'toolbelt'))
+
+
+def workflow(func=None, suffix_template=''):
+ """
+ Workflow decorator.
+ """
+ if func is None:
+ return partial(workflow, suffix_template=suffix_template)
+
+ @wraps(func)
+ def _wrapper(ctx, **workflow_parameters):
+
+ workflow_name = _generate_name(
+ func_name=func.__name__,
+ suffix_template=suffix_template,
+ ctx=ctx,
+ **workflow_parameters)
+
+ workflow_parameters.setdefault('ctx', ctx)
+ workflow_parameters.setdefault('graph', task_graph.TaskGraph(workflow_name))
+ validate_function_arguments(func, workflow_parameters)
+ with ctx.model.instrument(*ctx.INSTRUMENTATION_FIELDS):
+ with context.workflow.current.push(ctx):
+ func(**workflow_parameters)
+ return workflow_parameters['graph']
+ return _wrapper
+
+
+def operation(func=None, toolbelt=False, suffix_template='', logging_handlers=None):
+ """
+ Operation decorator.
+ """
+
+ if func is None:
+ return partial(operation,
+ suffix_template=suffix_template,
+ toolbelt=toolbelt,
+ logging_handlers=logging_handlers)
+
+ @wraps(func)
+ def _wrapper(**func_kwargs):
+ ctx = func_kwargs['ctx']
+ if toolbelt:
+ operation_toolbelt = context.toolbelt(ctx)
+ func_kwargs.setdefault('toolbelt', operation_toolbelt)
+ validate_function_arguments(func, func_kwargs)
+ with ctx.model.instrument(*ctx.INSTRUMENTATION_FIELDS):
+ return func(**func_kwargs)
+ return _wrapper
+
+
+def _generate_name(func_name, ctx, suffix_template, **custom_kwargs):
+ return '{func_name}.{suffix}'.format(
+ func_name=func_name,
+ suffix=suffix_template.format(ctx=ctx, **custom_kwargs) or generate_uuid(variant='uuid'))
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/events.py b/azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/events.py
new file mode 100644
index 0000000..ef84e5d
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/events.py
@@ -0,0 +1,34 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Orchestrator events.
+"""
+
+from blinker import signal
+
+# workflow engine task signals:
+sent_task_signal = signal('sent_task_signal')
+start_task_signal = signal('start_task_signal')
+on_success_task_signal = signal('success_task_signal')
+on_failure_task_signal = signal('failure_task_signal')
+
+# workflow engine workflow signals:
+start_workflow_signal = signal('start_workflow_signal')
+on_cancelling_workflow_signal = signal('on_cancelling_workflow_signal')
+on_cancelled_workflow_signal = signal('on_cancelled_workflow_signal')
+on_success_workflow_signal = signal('on_success_workflow_signal')
+on_failure_workflow_signal = signal('on_failure_workflow_signal')
+on_resume_workflow_signal = signal('on_resume_workflow_signal')
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/exceptions.py b/azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/exceptions.py
new file mode 100644
index 0000000..384458f
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/exceptions.py
@@ -0,0 +1,85 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Orchestrator exceptions.
+"""
+
+from aria.exceptions import AriaError
+
+
+class OrchestratorError(AriaError):
+ """
+ Orchestrator based exception
+ """
+ pass
+
+
+class InvalidPluginError(AriaError):
+ """
+ Raised when an invalid plugin is validated unsuccessfully
+ """
+ pass
+
+
+class PluginAlreadyExistsError(AriaError):
+ """
+ Raised when a plugin with the same package name and package version already exists
+ """
+ pass
+
+
+class TaskRetryException(RuntimeError):
+ """
+ Used internally when ctx.task.retry is called
+ """
+ def __init__(self, message, retry_interval=None):
+ super(TaskRetryException, self).__init__(message)
+ self.retry_interval = retry_interval
+
+
+class TaskAbortException(RuntimeError):
+ """
+ Used internally when ctx.task.abort is called
+ """
+ pass
+
+
+class UndeclaredWorkflowError(AriaError):
+ """
+ Raised when attempting to execute an undeclared workflow
+ """
+ pass
+
+
+class ActiveExecutionsError(AriaError):
+ """
+ Raised when attempting to execute a workflow on a service which already has an active execution
+ """
+ pass
+
+
+class WorkflowImplementationNotFoundError(AriaError):
+ """
+ Raised when attempting to import a workflow's code but the implementation is not found
+ """
+ pass
+
+
+class InvalidWorkflowRunnerParams(AriaError):
+ """
+ Raised when invalid combination of arguments is passed to the workflow runner
+ """
+ pass
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/execution_plugin/__init__.py b/azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/execution_plugin/__init__.py
new file mode 100644
index 0000000..d15de99
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/execution_plugin/__init__.py
@@ -0,0 +1,39 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Execution plugin package.
+"""
+
+from contextlib import contextmanager
+from . import instantiation
+
+
+# Populated during execution of python scripts
+ctx = None
+inputs = None
+
+
+@contextmanager
+def python_script_scope(operation_ctx, operation_inputs):
+ global ctx
+ global inputs
+ try:
+ ctx = operation_ctx
+ inputs = operation_inputs
+ yield
+ finally:
+ ctx = None
+ inputs = None
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/execution_plugin/common.py b/azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/execution_plugin/common.py
new file mode 100644
index 0000000..ce6746c
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/execution_plugin/common.py
@@ -0,0 +1,154 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Execution plugin utilities.
+"""
+
+import json
+import os
+import tempfile
+
+import requests
+
+from . import constants
+from . import exceptions
+
+
+def is_windows():
+ return os.name == 'nt'
+
+
+def download_script(ctx, script_path):
+ split = script_path.split('://')
+ schema = split[0]
+ suffix = script_path.split('/')[-1]
+ file_descriptor, dest_script_path = tempfile.mkstemp(suffix='-{0}'.format(suffix))
+ os.close(file_descriptor)
+ try:
+ if schema in ('http', 'https'):
+ response = requests.get(script_path)
+ if response.status_code == 404:
+ ctx.task.abort('Failed to download script: {0} (status code: {1})'
+ .format(script_path, response.status_code))
+ content = response.text
+ with open(dest_script_path, 'wb') as f:
+ f.write(content)
+ else:
+ ctx.download_resource(destination=dest_script_path, path=script_path)
+ except:
+ os.remove(dest_script_path)
+ raise
+ return dest_script_path
+
+
+def create_process_config(script_path, process, operation_kwargs, quote_json_env_vars=False):
+ """
+ Updates a process with its environment variables, and return it.
+
+ Gets a dict representing a process and a dict representing the environment variables. Converts
+ each environment variable to a format of::
+
+ <string representing the name of the variable>:
+ <json formatted string representing the value of the variable>.
+
+ Finally, updates the process with the newly formatted environment variables, and return the
+ process.
+
+ :param process: dict representing a process
+ :type process: dict
+ :param operation_kwargs: dict representing environment variables that should exist in the
+ process's running environment.
+ :type operation_kwargs: dict
+ :return: process updated with its environment variables
+ :rtype: dict
+ """
+ process = process or {}
+ env_vars = operation_kwargs.copy()
+ if 'ctx' in env_vars:
+ del env_vars['ctx']
+ env_vars.update(process.get('env', {}))
+ for k, v in env_vars.items():
+ if isinstance(v, (dict, list, tuple, bool, int, float)):
+ v = json.dumps(v)
+ if quote_json_env_vars:
+ v = "'{0}'".format(v)
+ if is_windows():
+ # These <k,v> environment variables will subsequently
+ # be used in a subprocess.Popen() call, as the `env` parameter.
+ # In some windows python versions, if an environment variable
+ # name is not of type str (e.g. unicode), the Popen call will
+ # fail.
+ k = str(k)
+ # The windows shell removes all double quotes - escape them
+ # to still be able to pass JSON in env vars to the shell.
+ v = v.replace('"', '\\"')
+ del env_vars[k]
+ env_vars[k] = str(v)
+ process['env'] = env_vars
+ args = process.get('args')
+ command = script_path
+ command_prefix = process.get('command_prefix')
+ if command_prefix:
+ command = '{0} {1}'.format(command_prefix, command)
+ if args:
+ command = ' '.join([command] + [str(a) for a in args])
+ process['command'] = command
+ return process
+
+
+def patch_ctx(ctx):
+ ctx._error = None
+ task = ctx.task
+
+ def _validate_legal_action():
+ if ctx._error is not None:
+ ctx._error = RuntimeError(constants.ILLEGAL_CTX_OPERATION_MESSAGE)
+ raise ctx._error
+
+ def abort_operation(message=None):
+ _validate_legal_action()
+ ctx._error = exceptions.ScriptException(message=message, retry=False)
+ return ctx._error
+ task.abort = abort_operation
+
+ def retry_operation(message=None, retry_interval=None):
+ _validate_legal_action()
+ ctx._error = exceptions.ScriptException(message=message,
+ retry=True,
+ retry_interval=retry_interval)
+ return ctx._error
+ task.retry = retry_operation
+
+
+def check_error(ctx, error_check_func=None, reraise=False):
+ _error = ctx._error
+ # this happens when a script calls task.abort/task.retry more than once
+ if isinstance(_error, RuntimeError):
+ ctx.task.abort(str(_error))
+ # ScriptException is populated by the ctx proxy server when task.abort or task.retry
+ # are called
+ elif isinstance(_error, exceptions.ScriptException):
+ if _error.retry:
+ ctx.task.retry(_error.message, _error.retry_interval)
+ else:
+ ctx.task.abort(_error.message)
+ # local and ssh operations may pass an additional logic check for errors here
+ if error_check_func:
+ error_check_func()
+ # if this function is called from within an ``except`` clause, a re-raise maybe required
+ if reraise:
+ raise # pylint: disable=misplaced-bare-raise
+ return _error
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/execution_plugin/constants.py b/azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/execution_plugin/constants.py
new file mode 100644
index 0000000..1953912
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/execution_plugin/constants.py
@@ -0,0 +1,57 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Execution plugin constants.
+"""
+import os
+import tempfile
+
+from . import exceptions
+
+# related to local
+PYTHON_SCRIPT_FILE_EXTENSION = '.py'
+POWERSHELL_SCRIPT_FILE_EXTENSION = '.ps1'
+DEFAULT_POWERSHELL_EXECUTABLE = 'powershell'
+
+# related to both local and ssh
+ILLEGAL_CTX_OPERATION_MESSAGE = 'ctx may only abort or retry once'
+
+# related to ssh
+DEFAULT_BASE_DIR = os.path.join(tempfile.gettempdir(), 'aria-ctx')
+FABRIC_ENV_DEFAULTS = {
+ 'connection_attempts': 5,
+ 'timeout': 10,
+ 'forward_agent': False,
+ 'abort_on_prompts': True,
+ 'keepalive': 0,
+ 'linewise': False,
+ 'pool_size': 0,
+ 'skip_bad_hosts': False,
+ 'status': False,
+ 'disable_known_hosts': True,
+ 'combine_stderr': True,
+ 'abort_exception': exceptions.TaskException,
+}
+VALID_FABRIC_GROUPS = set([
+ 'status',
+ 'aborts',
+ 'warnings',
+ 'running',
+ 'stdout',
+ 'stderr',
+ 'user',
+ 'everything'
+])
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/execution_plugin/ctx_proxy/__init__.py b/azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/execution_plugin/ctx_proxy/__init__.py
new file mode 100644
index 0000000..46c8cf1
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/execution_plugin/ctx_proxy/__init__.py
@@ -0,0 +1,20 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+``ctx`` proxy.
+"""
+
+from . import server, client
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/execution_plugin/ctx_proxy/client.py b/azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/execution_plugin/ctx_proxy/client.py
new file mode 100644
index 0000000..84d66f1
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/execution_plugin/ctx_proxy/client.py
@@ -0,0 +1,114 @@
+#! /usr/bin/env python
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+``ctx`` proxy client implementation.
+"""
+
+import argparse
+import json
+import os
+import sys
+import urllib2
+
+
+# Environment variable for the socket url (used by clients to locate the socket)
+CTX_SOCKET_URL = 'CTX_SOCKET_URL'
+
+
+class _RequestError(RuntimeError):
+
+ def __init__(self, ex_message, ex_type, ex_traceback):
+ super(_RequestError, self).__init__(self, '{0}: {1}'.format(ex_type, ex_message))
+ self.ex_type = ex_type
+ self.ex_message = ex_message
+ self.ex_traceback = ex_traceback
+
+
+def _http_request(socket_url, request, method, timeout):
+ opener = urllib2.build_opener(urllib2.HTTPHandler)
+ request = urllib2.Request(socket_url, data=json.dumps(request))
+ request.get_method = lambda: method
+ response = opener.open(request, timeout=timeout)
+
+ if response.code != 200:
+ raise RuntimeError('Request failed: {0}'.format(response))
+ return json.loads(response.read())
+
+
+def _client_request(socket_url, args, timeout, method='POST'):
+ response = _http_request(
+ socket_url=socket_url,
+ request={'args': args},
+ method=method,
+ timeout=timeout
+ )
+ payload = response.get('payload')
+ response_type = response.get('type')
+ if response_type == 'error':
+ ex_type = payload['type']
+ ex_message = payload['message']
+ ex_traceback = payload['traceback']
+ raise _RequestError(ex_message, ex_type, ex_traceback)
+ elif response_type == 'stop_operation':
+ raise SystemExit(payload['message'])
+ else:
+ return payload
+
+
+def _parse_args(args):
+ parser = argparse.ArgumentParser()
+ parser.add_argument('-t', '--timeout', type=int, default=30)
+ parser.add_argument('--socket-url', default=os.environ.get(CTX_SOCKET_URL))
+ parser.add_argument('--json-arg-prefix', default='@')
+ parser.add_argument('-j', '--json-output', action='store_true')
+ parser.add_argument('args', nargs='*')
+ args = parser.parse_args(args=args)
+ if not args.socket_url:
+ raise RuntimeError('Missing CTX_SOCKET_URL environment variable '
+ 'or socket_url command line argument. (ctx is supposed to be executed '
+ 'within an operation context)')
+ return args
+
+
+def _process_args(json_prefix, args):
+ processed_args = []
+ for arg in args:
+ if arg.startswith(json_prefix):
+ arg = json.loads(arg[1:])
+ processed_args.append(arg)
+ return processed_args
+
+
+def main(args=None):
+ args = _parse_args(args)
+ response = _client_request(
+ args.socket_url,
+ args=_process_args(args.json_arg_prefix, args.args),
+ timeout=args.timeout)
+ if args.json_output:
+ response = json.dumps(response)
+ else:
+ if response is None:
+ response = ''
+ try:
+ response = str(response)
+ except UnicodeEncodeError:
+ response = unicode(response).encode('utf8')
+ sys.stdout.write(response)
+
+if __name__ == '__main__':
+ main()
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/execution_plugin/ctx_proxy/server.py b/azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/execution_plugin/ctx_proxy/server.py
new file mode 100644
index 0000000..91b95d9
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/execution_plugin/ctx_proxy/server.py
@@ -0,0 +1,244 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+``ctx`` proxy server implementation.
+"""
+
+import json
+import socket
+import Queue
+import StringIO
+import threading
+import traceback
+import wsgiref.simple_server
+
+import bottle
+from aria import modeling
+
+from .. import exceptions
+
+
+class CtxProxy(object):
+
+ def __init__(self, ctx, ctx_patcher=(lambda *args, **kwargs: None)):
+ self.ctx = ctx
+ self._ctx_patcher = ctx_patcher
+ self.port = _get_unused_port()
+ self.socket_url = 'http://localhost:{0}'.format(self.port)
+ self.server = None
+ self._started = Queue.Queue(1)
+ self.thread = self._start_server()
+ self._started.get(timeout=5)
+
+ def _start_server(self):
+
+ class BottleServerAdapter(bottle.ServerAdapter):
+ proxy = self
+
+ def close_session(self):
+ self.proxy.ctx.model.log._session.remove()
+
+ def run(self, app):
+
+ class Server(wsgiref.simple_server.WSGIServer):
+ allow_reuse_address = True
+ bottle_server = self
+
+ def handle_error(self, request, client_address):
+ pass
+
+ def serve_forever(self, poll_interval=0.5):
+ try:
+ wsgiref.simple_server.WSGIServer.serve_forever(self, poll_interval)
+ finally:
+ # Once shutdown is called, we need to close the session.
+ # If the session is not closed properly, it might raise warnings,
+ # or even lock the database.
+ self.bottle_server.close_session()
+
+ class Handler(wsgiref.simple_server.WSGIRequestHandler):
+ def address_string(self):
+ return self.client_address[0]
+
+ def log_request(*args, **kwargs): # pylint: disable=no-method-argument
+ if not self.quiet:
+ return wsgiref.simple_server.WSGIRequestHandler.log_request(*args,
+ **kwargs)
+ server = wsgiref.simple_server.make_server(
+ host=self.host,
+ port=self.port,
+ app=app,
+ server_class=Server,
+ handler_class=Handler)
+ self.proxy.server = server
+ self.proxy._started.put(True)
+ server.serve_forever(poll_interval=0.1)
+
+ def serve():
+ # Since task is a thread_local object, we need to patch it inside the server thread.
+ self._ctx_patcher(self.ctx)
+
+ bottle_app = bottle.Bottle()
+ bottle_app.post('/', callback=self._request_handler)
+ bottle.run(
+ app=bottle_app,
+ host='localhost',
+ port=self.port,
+ quiet=True,
+ server=BottleServerAdapter)
+ thread = threading.Thread(target=serve)
+ thread.daemon = True
+ thread.start()
+ return thread
+
+ def close(self):
+ if self.server:
+ self.server.shutdown()
+ self.server.server_close()
+
+ def _request_handler(self):
+ request = bottle.request.body.read() # pylint: disable=no-member
+ response = self._process(request)
+ return bottle.LocalResponse(
+ body=json.dumps(response, cls=modeling.utils.ModelJSONEncoder),
+ status=200,
+ headers={'content-type': 'application/json'}
+ )
+
+ def _process(self, request):
+ try:
+ with self.ctx.model.instrument(*self.ctx.INSTRUMENTATION_FIELDS):
+ payload = _process_request(self.ctx, request)
+ result_type = 'result'
+ if isinstance(payload, exceptions.ScriptException):
+ payload = dict(message=str(payload))
+ result_type = 'stop_operation'
+ result = {'type': result_type, 'payload': payload}
+ except Exception as e:
+ traceback_out = StringIO.StringIO()
+ traceback.print_exc(file=traceback_out)
+ payload = {
+ 'type': type(e).__name__,
+ 'message': str(e),
+ 'traceback': traceback_out.getvalue()
+ }
+ result = {'type': 'error', 'payload': payload}
+
+ return result
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, *args, **kwargs):
+ self.close()
+
+
+class CtxError(RuntimeError):
+ pass
+
+
+class CtxParsingError(CtxError):
+ pass
+
+
+def _process_request(ctx, request):
+ request = json.loads(request)
+ args = request['args']
+ return _process_arguments(ctx, args)
+
+
+def _process_arguments(obj, args):
+ # Modifying?
+ try:
+ # TODO: should there be a way to escape "=" in case it is needed as real argument?
+ equals_index = args.index('=') # raises ValueError if not found
+ except ValueError:
+ equals_index = None
+ if equals_index is not None:
+ if equals_index == 0:
+ raise CtxParsingError('The "=" argument cannot be first')
+ elif equals_index != len(args) - 2:
+ raise CtxParsingError('The "=" argument must be penultimate')
+ modifying = True
+ modifying_key = args[-3]
+ modifying_value = args[-1]
+ args = args[:-3]
+ else:
+ modifying = False
+ modifying_key = None
+ modifying_value = None
+
+ # Parse all arguments
+ while len(args) > 0:
+ obj, args = _process_next_operation(obj, args, modifying)
+
+ if modifying:
+ if hasattr(obj, '__setitem__'):
+ # Modify item value (dict, list, and similar)
+ if isinstance(obj, (list, tuple)):
+ modifying_key = int(modifying_key)
+ obj[modifying_key] = modifying_value
+ elif hasattr(obj, modifying_key):
+ # Modify object attribute
+ setattr(obj, modifying_key, modifying_value)
+ else:
+ raise CtxError('Cannot modify `{0}` of `{1!r}`'.format(modifying_key, obj))
+
+ return obj
+
+
+def _process_next_operation(obj, args, modifying):
+ args = list(args)
+ arg = args.pop(0)
+
+ # Call?
+ if arg == '[':
+ # TODO: should there be a way to escape "[" and "]" in case they are needed as real
+ # arguments?
+ try:
+ closing_index = args.index(']') # raises ValueError if not found
+ except ValueError:
+ raise CtxParsingError('Opening "[" without a closing "]')
+ callable_args = args[:closing_index]
+ args = args[closing_index + 1:]
+ if not callable(obj):
+ raise CtxError('Used "[" and "] on an object that is not callable')
+ return obj(*callable_args), args
+
+ # Attribute?
+ if isinstance(arg, basestring):
+ if hasattr(obj, arg):
+ return getattr(obj, arg), args
+ token_sugared = arg.replace('-', '_')
+ if hasattr(obj, token_sugared):
+ return getattr(obj, token_sugared), args
+
+ # Item? (dict, lists, and similar)
+ if hasattr(obj, '__getitem__'):
+ if modifying and (arg not in obj) and hasattr(obj, '__setitem__'):
+ # Create nested dict
+ obj[arg] = {}
+ return obj[arg], args
+
+ raise CtxParsingError('Cannot parse argument: `{0!r}`'.format(arg))
+
+
+def _get_unused_port():
+ sock = socket.socket()
+ sock.bind(('127.0.0.1', 0))
+ _, port = sock.getsockname()
+ sock.close()
+ return port
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/execution_plugin/environment_globals.py b/azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/execution_plugin/environment_globals.py
new file mode 100644
index 0000000..6dec293
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/execution_plugin/environment_globals.py
@@ -0,0 +1,57 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Utilities for managing globals for the environment.
+"""
+
+def create_initial_globals(path):
+ """
+ Emulates a ``globals()`` call in a freshly loaded module.
+
+ The implementation of this function is likely to raise a couple of questions. If you read the
+ implementation and nothing bothered you, feel free to skip the rest of this docstring.
+
+ First, why is this function in its own module and not, say, in the same module of the other
+ environment-related functions? Second, why is it implemented in such a way that copies the
+ globals, then deletes the item that represents this function, and then changes some other
+ entries?
+
+ Well, these two questions can be answered with one (elaborate) explanation. If this function was
+ in the same module with the other environment-related functions, then we would have had to
+ delete more items in globals than just ``create_initial_globals``. That is because all of the
+ other function names would also be in globals, and since there is no built-in mechanism that
+ return the name of the user-defined objects, this approach is quite an overkill.
+
+ *But why do we rely on the copy-existing-globals-and-delete-entries method, when it seems to
+ force us to put ``create_initial_globals`` in its own file?*
+
+ Well, because there is no easier method of creating globals of a newly loaded module.
+
+ *How about hard coding a ``globals`` dict? It seems that there are very few entries:
+ ``__doc__``, ``__file__``, ``__name__``, ``__package__`` (but don't forget ``__builtins__``).*
+
+ That would be coupling our implementation to a specific ``globals`` implementation. What if
+ ``globals`` were to change?
+ """
+ copied_globals = globals().copy()
+ copied_globals.update({
+ '__doc__': 'Dynamically executed script',
+ '__file__': path,
+ '__name__': '__main__',
+ '__package__': None
+ })
+ del copied_globals[create_initial_globals.__name__]
+ return copied_globals
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/execution_plugin/exceptions.py b/azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/execution_plugin/exceptions.py
new file mode 100644
index 0000000..f201fae
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/execution_plugin/exceptions.py
@@ -0,0 +1,47 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Execution plugin exceptions.
+"""
+
+class ProcessException(Exception):
+ """
+ Raised when local scripts and remote SSH commands fail.
+ """
+
+ def __init__(self, stderr=None, stdout=None, command=None, exit_code=None):
+ super(ProcessException, self).__init__(stderr)
+ self.command = command
+ self.exit_code = exit_code
+ self.stdout = stdout
+ self.stderr = stderr
+
+
+class TaskException(Exception):
+ """
+ Raised when remote ssh scripts fail.
+ """
+
+
+class ScriptException(Exception):
+ """
+ Used by the ``ctx`` proxy server when task.retry or task.abort are called by scripts.
+ """
+
+ def __init__(self, message=None, retry=None, retry_interval=None):
+ super(ScriptException, self).__init__(message)
+ self.retry = retry
+ self.retry_interval = retry_interval
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/execution_plugin/instantiation.py b/azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/execution_plugin/instantiation.py
new file mode 100644
index 0000000..8b52015
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/execution_plugin/instantiation.py
@@ -0,0 +1,217 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Instantiation of :class:`~aria.modeling.models.Operation` models.
+"""
+
+# TODO: this module will eventually be moved to a new "aria.instantiation" package
+from ...modeling.functions import Function
+from ... import utils
+
+
+def configure_operation(operation, reporter):
+ host = None
+ interface = operation.interface
+ if interface.node is not None:
+ host = interface.node.host
+ elif interface.relationship is not None:
+ if operation.relationship_edge is True:
+ host = interface.relationship.target_node.host
+ else: # either False or None (None meaning that edge was not specified)
+ host = interface.relationship.source_node.host
+
+ _configure_common(operation, reporter)
+ if host is None:
+ _configure_local(operation)
+ else:
+ _configure_remote(operation, reporter)
+
+ # Any remaining un-handled configuration parameters will become extra arguments, available as
+ # kwargs in either "run_script_locally" or "run_script_with_ssh"
+ for key, value in operation.configurations.iteritems():
+ if key not in ('process', 'ssh'):
+ operation.arguments[key] = value.instantiate(None)
+
+
+def _configure_common(operation, reporter):
+ """
+ Local and remote operations.
+ """
+
+ from ...modeling.models import Argument
+ operation.arguments['script_path'] = Argument.wrap('script_path', operation.implementation,
+ 'Relative path to the executable file.')
+ operation.arguments['process'] = Argument.wrap('process', _get_process(operation, reporter),
+ 'Sub-process configuration.')
+
+
+def _configure_local(operation):
+ """
+ Local operation.
+ """
+
+ from . import operations
+ operation.function = '{0}.{1}'.format(operations.__name__,
+ operations.run_script_locally.__name__)
+
+
+def _configure_remote(operation, reporter):
+ """
+ Remote SSH operation via Fabric.
+ """
+
+ from ...modeling.models import Argument
+ from . import operations
+
+ ssh = _get_ssh(operation, reporter)
+
+ # Defaults
+ # TODO: find a way to configure these generally in the service template
+ default_user = ''
+ default_password = ''
+ if 'user' not in ssh:
+ ssh['user'] = default_user
+ if ('password' not in ssh) and ('key' not in ssh) and ('key_filename' not in ssh):
+ ssh['password'] = default_password
+
+ operation.arguments['use_sudo'] = Argument.wrap('use_sudo', ssh.get('use_sudo', False),
+ 'Whether to execute with sudo.')
+
+ operation.arguments['hide_output'] = Argument.wrap('hide_output', ssh.get('hide_output', []),
+ 'Hide output of these Fabric groups.')
+
+ fabric_env = {}
+ if 'warn_only' in ssh:
+ fabric_env['warn_only'] = ssh['warn_only']
+ fabric_env['user'] = ssh.get('user')
+ fabric_env['password'] = ssh.get('password')
+ fabric_env['key'] = ssh.get('key')
+ fabric_env['key_filename'] = ssh.get('key_filename')
+ if 'address' in ssh:
+ fabric_env['host_string'] = ssh['address']
+
+ # Make sure we have a user
+ if fabric_env.get('user') is None:
+ reporter.report('must configure "ssh.user" for "{0}"'.format(operation.implementation),
+ level=reporter.Issue.BETWEEN_TYPES)
+
+ # Make sure we have an authentication value
+ if (fabric_env.get('password') is None) and \
+ (fabric_env.get('key') is None) and \
+ (fabric_env.get('key_filename') is None):
+ reporter.report(
+ 'must configure "ssh.password", "ssh.key", or "ssh.key_filename" for "{0}"'
+ .format(operation.implementation),
+ level=reporter.Issue.BETWEEN_TYPES)
+
+ operation.arguments['fabric_env'] = Argument.wrap('fabric_env', fabric_env,
+ 'Fabric configuration.')
+
+ operation.function = '{0}.{1}'.format(operations.__name__,
+ operations.run_script_with_ssh.__name__)
+
+
+def _get_process(operation, reporter):
+ value = (operation.configurations.get('process')._value
+ if 'process' in operation.configurations
+ else None)
+ if value is None:
+ return {}
+ _validate_type(value, dict, 'process', reporter)
+ value = utils.collections.OrderedDict(value)
+ for k, v in value.iteritems():
+ if k == 'eval_python':
+ value[k] = _coerce_bool(v, 'process.eval_python', reporter)
+ elif k == 'cwd':
+ _validate_type(v, basestring, 'process.cwd', reporter)
+ elif k == 'command_prefix':
+ _validate_type(v, basestring, 'process.command_prefix', reporter)
+ elif k == 'args':
+ value[k] = _dict_to_list_of_strings(v, 'process.args', reporter)
+ elif k == 'env':
+ _validate_type(v, dict, 'process.env', reporter)
+ else:
+ reporter.report('unsupported configuration parameter: "process.{0}"'.format(k),
+ level=reporter.Issue.BETWEEN_TYPES)
+ return value
+
+
+def _get_ssh(operation, reporter):
+ value = (operation.configurations.get('ssh')._value
+ if 'ssh' in operation.configurations
+ else None)
+ if value is None:
+ return {}
+ _validate_type(value, dict, 'ssh', reporter)
+ value = utils.collections.OrderedDict(value)
+ for k, v in value.iteritems():
+ if k == 'use_sudo':
+ value[k] = _coerce_bool(v, 'ssh.use_sudo', reporter)
+ elif k == 'hide_output':
+ value[k] = _dict_to_list_of_strings(v, 'ssh.hide_output', reporter)
+ elif k == 'warn_only':
+ value[k] = _coerce_bool(v, 'ssh.warn_only', reporter)
+ elif k == 'user':
+ _validate_type(v, basestring, 'ssh.user', reporter)
+ elif k == 'password':
+ _validate_type(v, basestring, 'ssh.password', reporter)
+ elif k == 'key':
+ _validate_type(v, basestring, 'ssh.key', reporter)
+ elif k == 'key_filename':
+ _validate_type(v, basestring, 'ssh.key_filename', reporter)
+ elif k == 'address':
+ _validate_type(v, basestring, 'ssh.address', reporter)
+ else:
+ reporter.report('unsupported configuration parameter: "ssh.{0}"'.format(k),
+ level=reporter.Issue.BETWEEN_TYPES)
+ return value
+
+
+def _validate_type(value, the_type, name, reporter):
+ if isinstance(value, Function):
+ return
+ if not isinstance(value, the_type):
+ reporter.report(
+ '"{0}" configuration is not a {1}: {2}'.format(
+ name, utils.type.full_type_name(the_type), utils.formatting.safe_repr(value)),
+ level=reporter.Issue.BETWEEN_TYPES)
+
+
+def _coerce_bool(value, name, reporter):
+ if value is None:
+ return None
+ if isinstance(value, bool):
+ return value
+ _validate_type(value, basestring, name, reporter)
+ if value == 'true':
+ return True
+ elif value == 'false':
+ return False
+ else:
+ reporter.report(
+ '"{0}" configuration is not "true" or "false": {1}'.format(
+ name, utils.formatting.safe_repr(value)),
+ level=reporter.Issue.BETWEEN_TYPES)
+
+
+def _dict_to_list_of_strings(the_dict, name, reporter):
+ _validate_type(the_dict, dict, name, reporter)
+ value = []
+ for k in sorted(the_dict):
+ v = the_dict[k]
+ _validate_type(v, basestring, '{0}.{1}'.format(name, k), reporter)
+ value.append(v)
+ return value
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/execution_plugin/local.py b/azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/execution_plugin/local.py
new file mode 100644
index 0000000..04b9ecd
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/execution_plugin/local.py
@@ -0,0 +1,128 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Local execution of operations.
+"""
+
+import os
+import subprocess
+import threading
+import StringIO
+
+from . import ctx_proxy
+from . import exceptions
+from . import common
+from . import constants
+from . import environment_globals
+from . import python_script_scope
+
+
+def run_script(ctx, script_path, process, **kwargs):
+ if not script_path:
+ ctx.task.abort('Missing script_path')
+ process = process or {}
+ script_path = common.download_script(ctx, script_path)
+ script_func = _get_run_script_func(script_path, process)
+ return script_func(
+ ctx=ctx,
+ script_path=script_path,
+ process=process,
+ operation_kwargs=kwargs)
+
+
+def _get_run_script_func(script_path, process):
+ if _treat_script_as_python_script(script_path, process):
+ return _eval_script_func
+ else:
+ if _treat_script_as_powershell_script(script_path):
+ process.setdefault('command_prefix', constants.DEFAULT_POWERSHELL_EXECUTABLE)
+ return _execute_func
+
+
+def _treat_script_as_python_script(script_path, process):
+ eval_python = process.get('eval_python')
+ script_extension = os.path.splitext(script_path)[1].lower()
+ return (eval_python is True or (script_extension == constants.PYTHON_SCRIPT_FILE_EXTENSION and
+ eval_python is not False))
+
+
+def _treat_script_as_powershell_script(script_path):
+ script_extension = os.path.splitext(script_path)[1].lower()
+ return script_extension == constants.POWERSHELL_SCRIPT_FILE_EXTENSION
+
+
+def _eval_script_func(script_path, ctx, operation_kwargs, **_):
+ with python_script_scope(operation_ctx=ctx, operation_inputs=operation_kwargs):
+ execfile(script_path, environment_globals.create_initial_globals(script_path))
+
+
+def _execute_func(script_path, ctx, process, operation_kwargs):
+ os.chmod(script_path, 0755)
+ process = common.create_process_config(
+ script_path=script_path,
+ process=process,
+ operation_kwargs=operation_kwargs)
+ command = process['command']
+ env = os.environ.copy()
+ env.update(process['env'])
+ ctx.logger.info('Executing: {0}'.format(command))
+ with ctx_proxy.server.CtxProxy(ctx, common.patch_ctx) as proxy:
+ env[ctx_proxy.client.CTX_SOCKET_URL] = proxy.socket_url
+ running_process = subprocess.Popen(
+ command,
+ shell=True,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ env=env,
+ cwd=process.get('cwd'),
+ bufsize=1,
+ close_fds=not common.is_windows())
+ stdout_consumer = _OutputConsumer(running_process.stdout)
+ stderr_consumer = _OutputConsumer(running_process.stderr)
+ exit_code = running_process.wait()
+ stdout_consumer.join()
+ stderr_consumer.join()
+ ctx.logger.info('Execution done (exit_code={0}): {1}'.format(exit_code, command))
+
+ def error_check_func():
+ if exit_code:
+ raise exceptions.ProcessException(
+ command=command,
+ exit_code=exit_code,
+ stdout=stdout_consumer.read_output(),
+ stderr=stderr_consumer.read_output())
+ return common.check_error(ctx, error_check_func=error_check_func)
+
+
+class _OutputConsumer(object):
+
+ def __init__(self, out):
+ self._out = out
+ self._buffer = StringIO.StringIO()
+ self._consumer = threading.Thread(target=self._consume_output)
+ self._consumer.daemon = True
+ self._consumer.start()
+
+ def _consume_output(self):
+ for line in iter(self._out.readline, b''):
+ self._buffer.write(line)
+ self._out.close()
+
+ def read_output(self):
+ return self._buffer.getvalue()
+
+ def join(self):
+ self._consumer.join()
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/execution_plugin/operations.py b/azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/execution_plugin/operations.py
new file mode 100644
index 0000000..0e987f4
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/execution_plugin/operations.py
@@ -0,0 +1,75 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Entry point functions.
+"""
+
+from aria.orchestrator import operation
+from . import local as local_operations
+
+
+@operation
+def run_script_locally(ctx,
+ script_path,
+ process=None,
+ **kwargs):
+ return local_operations.run_script(
+ ctx=ctx,
+ script_path=script_path,
+ process=process,
+ **kwargs)
+
+
+@operation
+def run_script_with_ssh(ctx,
+ script_path,
+ fabric_env=None,
+ process=None,
+ use_sudo=False,
+ hide_output=None,
+ **kwargs):
+ return _try_import_ssh().run_script(
+ ctx=ctx,
+ script_path=script_path,
+ fabric_env=fabric_env,
+ process=process,
+ use_sudo=use_sudo,
+ hide_output=hide_output,
+ **kwargs)
+
+
+@operation
+def run_commands_with_ssh(ctx,
+ commands,
+ fabric_env=None,
+ use_sudo=False,
+ hide_output=None,
+ **_):
+ return _try_import_ssh().run_commands(
+ ctx=ctx,
+ commands=commands,
+ fabric_env=fabric_env,
+ use_sudo=use_sudo,
+ hide_output=hide_output)
+
+
+def _try_import_ssh():
+ try:
+ from .ssh import operations as ssh_operations
+ return ssh_operations
+ except Exception as e:
+ print(e)
+ raise RuntimeError('Failed to import SSH modules; Have you installed the ARIA SSH extra?')
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/execution_plugin/ssh/__init__.py b/azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/execution_plugin/ssh/__init__.py
new file mode 100644
index 0000000..474deef
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/execution_plugin/ssh/__init__.py
@@ -0,0 +1,18 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Remote execution of operations over SSH.
+"""
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/execution_plugin/ssh/operations.py b/azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/execution_plugin/ssh/operations.py
new file mode 100644
index 0000000..c40e783
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/execution_plugin/ssh/operations.py
@@ -0,0 +1,195 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Utilities for running commands remotely over SSH.
+"""
+
+import os
+import random
+import string
+import tempfile
+import StringIO
+
+import fabric.api
+import fabric.context_managers
+import fabric.contrib.files
+
+from .. import constants
+from .. import exceptions
+from .. import common
+from .. import ctx_proxy
+from . import tunnel
+
+
+_PROXY_CLIENT_PATH = ctx_proxy.client.__file__
+if _PROXY_CLIENT_PATH.endswith('.pyc'):
+ _PROXY_CLIENT_PATH = _PROXY_CLIENT_PATH[:-1]
+
+
+def run_commands(ctx, commands, fabric_env, use_sudo, hide_output, **_):
+ """Runs the provider 'commands' in sequence
+
+ :param commands: a list of commands to run
+ :param fabric_env: fabric configuration
+ """
+ with fabric.api.settings(_hide_output(ctx, groups=hide_output),
+ **_fabric_env(ctx, fabric_env, warn_only=True)):
+ for command in commands:
+ ctx.logger.info('Running command: {0}'.format(command))
+ run = fabric.api.sudo if use_sudo else fabric.api.run
+ result = run(command)
+ if result.failed:
+ raise exceptions.ProcessException(
+ command=result.command,
+ exit_code=result.return_code,
+ stdout=result.stdout,
+ stderr=result.stderr)
+
+
+def run_script(ctx, script_path, fabric_env, process, use_sudo, hide_output, **kwargs):
+ process = process or {}
+ paths = _Paths(base_dir=process.get('base_dir', constants.DEFAULT_BASE_DIR),
+ local_script_path=common.download_script(ctx, script_path))
+ with fabric.api.settings(_hide_output(ctx, groups=hide_output),
+ **_fabric_env(ctx, fabric_env, warn_only=False)):
+ # the remote host must have the ctx before running any fabric scripts
+ if not fabric.contrib.files.exists(paths.remote_ctx_path):
+ # there may be race conditions with other operations that
+ # may be running in parallel, so we pass -p to make sure
+ # we get 0 exit code if the directory already exists
+ fabric.api.run('mkdir -p {0} && mkdir -p {1}'.format(paths.remote_scripts_dir,
+ paths.remote_work_dir))
+ # this file has to be present before using ctx
+ fabric.api.put(_PROXY_CLIENT_PATH, paths.remote_ctx_path)
+ process = common.create_process_config(
+ script_path=paths.remote_script_path,
+ process=process,
+ operation_kwargs=kwargs,
+ quote_json_env_vars=True)
+ fabric.api.put(paths.local_script_path, paths.remote_script_path)
+ with ctx_proxy.server.CtxProxy(ctx, _patch_ctx) as proxy:
+ local_port = proxy.port
+ with fabric.context_managers.cd(process.get('cwd', paths.remote_work_dir)): # pylint: disable=not-context-manager
+ with tunnel.remote(ctx, local_port=local_port) as remote_port:
+ local_socket_url = proxy.socket_url
+ remote_socket_url = local_socket_url.replace(str(local_port), str(remote_port))
+ env_script = _write_environment_script_file(
+ process=process,
+ paths=paths,
+ local_socket_url=local_socket_url,
+ remote_socket_url=remote_socket_url)
+ fabric.api.put(env_script, paths.remote_env_script_path)
+ try:
+ command = 'source {0} && {1}'.format(paths.remote_env_script_path,
+ process['command'])
+ run = fabric.api.sudo if use_sudo else fabric.api.run
+ run(command)
+ except exceptions.TaskException:
+ return common.check_error(ctx, reraise=True)
+ return common.check_error(ctx)
+
+
+def _patch_ctx(ctx):
+ common.patch_ctx(ctx)
+ original_download_resource = ctx.download_resource
+ original_download_resource_and_render = ctx.download_resource_and_render
+
+ def _download_resource(func, destination, **kwargs):
+ handle, temp_local_path = tempfile.mkstemp()
+ os.close(handle)
+ try:
+ func(destination=temp_local_path, **kwargs)
+ return fabric.api.put(temp_local_path, destination)
+ finally:
+ os.remove(temp_local_path)
+
+ def download_resource(destination, path=None):
+ _download_resource(
+ func=original_download_resource,
+ destination=destination,
+ path=path)
+ ctx.download_resource = download_resource
+
+ def download_resource_and_render(destination, path=None, variables=None):
+ _download_resource(
+ func=original_download_resource_and_render,
+ destination=destination,
+ path=path,
+ variables=variables)
+ ctx.download_resource_and_render = download_resource_and_render
+
+
+def _hide_output(ctx, groups):
+ """ Hides Fabric's output for every 'entity' in `groups` """
+ groups = set(groups or [])
+ if not groups.issubset(constants.VALID_FABRIC_GROUPS):
+ ctx.task.abort('`hide_output` must be a subset of {0} (Provided: {1})'
+ .format(', '.join(constants.VALID_FABRIC_GROUPS), ', '.join(groups)))
+ return fabric.api.hide(*groups)
+
+
+def _fabric_env(ctx, fabric_env, warn_only):
+ """Prepares fabric environment variables configuration"""
+ ctx.logger.debug('Preparing fabric environment...')
+ env = constants.FABRIC_ENV_DEFAULTS.copy()
+ env.update(fabric_env or {})
+ env.setdefault('warn_only', warn_only)
+ # validations
+ if (not env.get('host_string')) and (ctx.task) and (ctx.task.actor) and (ctx.task.actor.host):
+ env['host_string'] = ctx.task.actor.host.host_address
+ if not env.get('host_string'):
+ ctx.task.abort('`host_string` not supplied and ip cannot be deduced automatically')
+ if not (env.get('password') or env.get('key_filename') or env.get('key')):
+ ctx.task.abort(
+ 'Access credentials not supplied '
+ '(you must supply at least one of `key_filename`, `key` or `password`)')
+ if not env.get('user'):
+ ctx.task.abort('`user` not supplied')
+ ctx.logger.debug('Environment prepared successfully')
+ return env
+
+
+def _write_environment_script_file(process, paths, local_socket_url, remote_socket_url):
+ env_script = StringIO.StringIO()
+ env = process['env']
+ env['PATH'] = '{0}:$PATH'.format(paths.remote_ctx_dir)
+ env['PYTHONPATH'] = '{0}:$PYTHONPATH'.format(paths.remote_ctx_dir)
+ env_script.write('chmod +x {0}\n'.format(paths.remote_script_path))
+ env_script.write('chmod +x {0}\n'.format(paths.remote_ctx_path))
+ env.update({
+ ctx_proxy.client.CTX_SOCKET_URL: remote_socket_url,
+ 'LOCAL_{0}'.format(ctx_proxy.client.CTX_SOCKET_URL): local_socket_url
+ })
+ for key, value in env.iteritems():
+ env_script.write('export {0}={1}\n'.format(key, value))
+ return env_script
+
+
+class _Paths(object):
+
+ def __init__(self, base_dir, local_script_path):
+ self.local_script_path = local_script_path
+ self.remote_ctx_dir = base_dir
+ self.base_script_path = os.path.basename(self.local_script_path)
+ self.remote_ctx_path = '{0}/ctx'.format(self.remote_ctx_dir)
+ self.remote_scripts_dir = '{0}/scripts'.format(self.remote_ctx_dir)
+ self.remote_work_dir = '{0}/work'.format(self.remote_ctx_dir)
+ random_suffix = ''.join(random.choice(string.ascii_lowercase + string.digits)
+ for _ in range(8))
+ remote_path_suffix = '{0}-{1}'.format(self.base_script_path, random_suffix)
+ self.remote_env_script_path = '{0}/env-{1}'.format(self.remote_scripts_dir,
+ remote_path_suffix)
+ self.remote_script_path = '{0}/{1}'.format(self.remote_scripts_dir, remote_path_suffix)
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/execution_plugin/ssh/tunnel.py b/azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/execution_plugin/ssh/tunnel.py
new file mode 100644
index 0000000..e76d525
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/execution_plugin/ssh/tunnel.py
@@ -0,0 +1,107 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+# This implementation was copied from the Fabric project directly:
+# https://github.com/fabric/fabric/blob/master/fabric/context_managers.py#L486
+# The purpose was to remove the rtunnel creation printouts here:
+# https://github.com/fabric/fabric/blob/master/fabric/context_managers.py#L547
+
+
+import contextlib
+import select
+import socket
+
+import fabric.api
+import fabric.state
+import fabric.thread_handling
+
+
+@contextlib.contextmanager
+def remote(ctx, local_port, remote_port=0, local_host='localhost', remote_bind_address='127.0.0.1'):
+ """Create a tunnel forwarding a locally-visible port to the remote target."""
+ sockets = []
+ channels = []
+ thread_handlers = []
+
+ def accept(channel, *args, **kwargs):
+ # This seemingly innocent statement seems to be doing nothing
+ # but the truth is far from it!
+ # calling fileno() on a paramiko channel the first time, creates
+ # the required plumbing to make the channel valid for select.
+ # While this would generally happen implicitly inside the _forwarder
+ # function when select is called, it may already be too late and may
+ # cause the select loop to hang.
+ # Specifically, when new data arrives to the channel, a flag is set
+ # on an "event" object which is what makes the select call work.
+ # problem is this will only happen if the event object is not None
+ # and it will be not-None only after channel.fileno() has been called
+ # for the first time. If we wait until _forwarder calls select for the
+ # first time it may be after initial data has reached the channel.
+ # calling it explicitly here in the paramiko transport main event loop
+ # guarantees this will not happen.
+ channel.fileno()
+
+ channels.append(channel)
+ sock = socket.socket()
+ sockets.append(sock)
+
+ try:
+ sock.connect((local_host, local_port))
+ except Exception as e:
+ try:
+ channel.close()
+ except Exception as ex2:
+ close_error = ' (While trying to close channel: {0})'.format(ex2)
+ else:
+ close_error = ''
+ ctx.task.abort('[{0}] rtunnel: cannot connect to {1}:{2} ({3}){4}'
+ .format(fabric.api.env.host_string, local_host, local_port, e,
+ close_error))
+
+ thread_handler = fabric.thread_handling.ThreadHandler('fwd', _forwarder, channel, sock)
+ thread_handlers.append(thread_handler)
+
+ transport = fabric.state.connections[fabric.api.env.host_string].get_transport()
+ remote_port = transport.request_port_forward(
+ remote_bind_address, remote_port, handler=accept)
+
+ try:
+ yield remote_port
+ finally:
+ for sock, chan, thread_handler in zip(sockets, channels, thread_handlers):
+ sock.close()
+ chan.close()
+ thread_handler.thread.join()
+ thread_handler.raise_if_needed()
+ transport.cancel_port_forward(remote_bind_address, remote_port)
+
+
+def _forwarder(chan, sock):
+ # Bidirectionally forward data between a socket and a Paramiko channel.
+ while True:
+ read = select.select([sock, chan], [], [])[0]
+ if sock in read:
+ data = sock.recv(1024)
+ if len(data) == 0:
+ break
+ chan.send(data)
+ if chan in read:
+ data = chan.recv(1024)
+ if len(data) == 0:
+ break
+ sock.send(data)
+ chan.close()
+ sock.close()
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/plugin.py b/azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/plugin.py
new file mode 100644
index 0000000..756a28e
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/plugin.py
@@ -0,0 +1,171 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Plugin management.
+"""
+
+import os
+import tempfile
+import subprocess
+import sys
+import zipfile
+from datetime import datetime
+
+import wagon
+
+from . import exceptions
+from ..utils import process as process_utils
+
+_IS_WIN = os.name == 'nt'
+
+
+class PluginManager(object):
+
+ def __init__(self, model, plugins_dir):
+ """
+ :param plugins_dir: root directory in which to install plugins
+ """
+ self._model = model
+ self._plugins_dir = plugins_dir
+
+ def install(self, source):
+ """
+ Install a wagon plugin.
+ """
+ metadata = wagon.show(source)
+ cls = self._model.plugin.model_cls
+
+ os_props = metadata['build_server_os_properties']
+
+ plugin = cls(
+ name=metadata['package_name'],
+ archive_name=metadata['archive_name'],
+ supported_platform=metadata['supported_platform'],
+ supported_py_versions=metadata['supported_python_versions'],
+ distribution=os_props.get('distribution'),
+ distribution_release=os_props['distribution_version'],
+ distribution_version=os_props['distribution_release'],
+ package_name=metadata['package_name'],
+ package_version=metadata['package_version'],
+ package_source=metadata['package_source'],
+ wheels=metadata['wheels'],
+ uploaded_at=datetime.now()
+ )
+ if len(self._model.plugin.list(filters={'package_name': plugin.package_name,
+ 'package_version': plugin.package_version})):
+ raise exceptions.PluginAlreadyExistsError(
+ 'Plugin {0}, version {1} already exists'.format(plugin.package_name,
+ plugin.package_version))
+ self._install_wagon(source=source, prefix=self.get_plugin_dir(plugin))
+ self._model.plugin.put(plugin)
+ return plugin
+
+ def load_plugin(self, plugin, env=None):
+ """
+ Load the plugin into an environment.
+
+ Loading the plugin means the plugin's code and binaries paths will be appended to the
+ environment's ``PATH`` and ``PYTHONPATH``, thereby allowing usage of the plugin.
+
+ :param plugin: plugin to load
+ :param env: environment to load the plugin into; If ``None``, :obj:`os.environ` will be
+ used
+ """
+ env = env or os.environ
+ plugin_dir = self.get_plugin_dir(plugin)
+
+ # Update PATH environment variable to include plugin's bin dir
+ bin_dir = 'Scripts' if _IS_WIN else 'bin'
+ process_utils.append_to_path(os.path.join(plugin_dir, bin_dir), env=env)
+
+ # Update PYTHONPATH environment variable to include plugin's site-packages
+ # directories
+ if _IS_WIN:
+ pythonpath_dirs = [os.path.join(plugin_dir, 'Lib', 'site-packages')]
+ else:
+ # In some linux environments, there will be both a lib and a lib64 directory
+ # with the latter, containing compiled packages.
+ pythonpath_dirs = [os.path.join(
+ plugin_dir, 'lib{0}'.format(b),
+ 'python{0}.{1}'.format(sys.version_info[0], sys.version_info[1]),
+ 'site-packages') for b in ('', '64')]
+
+ process_utils.append_to_pythonpath(*pythonpath_dirs, env=env)
+
+ def get_plugin_dir(self, plugin):
+ return os.path.join(
+ self._plugins_dir,
+ '{0}-{1}'.format(plugin.package_name, plugin.package_version))
+
+ @staticmethod
+ def validate_plugin(source):
+ """
+ Validate a plugin archive.
+
+ A valid plugin is a `wagon <http://github.com/cloudify-cosmo/wagon>`__ in the zip format
+ (suffix may also be ``.wgn``).
+ """
+ if not zipfile.is_zipfile(source):
+ raise exceptions.InvalidPluginError(
+ 'Archive {0} is of an unsupported type. Only '
+ 'zip/wgn is allowed'.format(source))
+ with zipfile.ZipFile(source, 'r') as zip_file:
+ infos = zip_file.infolist()
+ try:
+ package_name = infos[0].filename[:infos[0].filename.index('/')]
+ package_json_path = "{0}/{1}".format(package_name, 'package.json')
+ zip_file.getinfo(package_json_path)
+ except (KeyError, ValueError, IndexError):
+ raise exceptions.InvalidPluginError(
+ 'Failed to validate plugin {0} '
+ '(package.json was not found in archive)'.format(source))
+
+ def _install_wagon(self, source, prefix):
+ pip_freeze_output = self._pip_freeze()
+ file_descriptor, constraint_path = tempfile.mkstemp(prefix='constraint-', suffix='.txt')
+ os.close(file_descriptor)
+ try:
+ with open(constraint_path, 'wb') as constraint:
+ constraint.write(pip_freeze_output)
+ # Install the provided wagon.
+ # * The --prefix install_arg will cause the plugin to be installed under
+ # plugins_dir/{package_name}-{package_version}, So different plugins don't step on
+ # each other and don't interfere with the current virtualenv
+ # * The --constraint flag points a file containing the output of ``pip freeze``.
+ # It is required, to handle cases where plugins depend on some python package with
+ # a different version than the one installed in the current virtualenv. Without this
+ # flag, the existing package will be **removed** from the parent virtualenv and the
+ # new package will be installed under prefix. With the flag, the existing version will
+ # remain, and the version requested by the plugin will be ignored.
+ wagon.install(
+ source=source,
+ install_args='--prefix="{prefix}" --constraint="{constraint}"'.format(
+ prefix=prefix,
+ constraint=constraint.name),
+ venv=os.environ.get('VIRTUAL_ENV'))
+ finally:
+ os.remove(constraint_path)
+
+ @staticmethod
+ def _pip_freeze():
+ """Run pip freeze in current environment and return the output"""
+ bin_dir = 'Scripts' if os.name == 'nt' else 'bin'
+ pip_path = os.path.join(sys.prefix, bin_dir,
+ 'pip{0}'.format('.exe' if os.name == 'nt' else ''))
+ pip_freeze = subprocess.Popen([pip_path, 'freeze'], stdout=subprocess.PIPE)
+ pip_freeze_output, _ = pip_freeze.communicate()
+ assert not pip_freeze.poll()
+ return pip_freeze_output
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/topology/__init__.py b/azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/topology/__init__.py
new file mode 100644
index 0000000..099a950
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/topology/__init__.py
@@ -0,0 +1,16 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from .topology import Topology
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/topology/common.py b/azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/topology/common.py
new file mode 100644
index 0000000..5124557
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/topology/common.py
@@ -0,0 +1,69 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+class HandlerBase(object):
+ def __init__(self, topology, model):
+ self._topology = topology
+ self._model = model
+
+ def coerce(self, **kwargs):
+ raise NotImplementedError
+
+ def _coerce(self, *models, **kwargs):
+ for template in models:
+ self._topology.coerce(template, **kwargs)
+
+ def validate(self, **kwargs):
+ raise NotImplementedError
+
+ def _validate(self, *models, **kwargs):
+ for template in models:
+ self._topology.validate(template, **kwargs)
+
+ def dump(self, out_stream):
+ raise NotImplementedError
+
+
+class TemplateHandlerBase(HandlerBase):
+ """
+ Base handler for template based models
+ """
+
+ def instantiate(self, instance_cls, **kwargs):
+ raise NotImplementedError
+
+
+class InstanceHandlerBase(HandlerBase):
+ """
+ Base handler for instance based models
+
+ """
+ def validate(self, **kwargs):
+ raise NotImplementedError
+
+ def coerce(self, **kwargs):
+ raise NotImplementedError
+
+ def dump(self, out_stream):
+ raise NotImplementedError
+
+
+class ActorHandlerBase(HandlerBase):
+ """
+ Base handler for any model which has (or contains a field which references) an operation
+ """
+ def configure_operations(self):
+ raise NotImplementedError
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/topology/instance_handler.py b/azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/topology/instance_handler.py
new file mode 100644
index 0000000..51f26c6
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/topology/instance_handler.py
@@ -0,0 +1,671 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from ... parser.modeling import context
+from ... modeling import models, functions
+from ... utils import formatting
+from .. import execution_plugin
+from .. import decorators
+from . import common
+
+
+class Artifact(common.InstanceHandlerBase):
+
+ def coerce(self, **kwargs):
+ self._topology.coerce(self._model.properties, **kwargs)
+
+ def validate(self, **kwargs):
+ self._topology.validate(self._model.properties, **kwargs)
+
+ def dump(self, out_stream):
+ with out_stream.indent():
+ out_stream.write(out_stream.node_style(self._model.name))
+ out_stream.write(out_stream.meta_style(self._model.description))
+ with out_stream.indent():
+ out_stream.write('Artifact type: {0}'.format(out_stream.type_style(
+ self._model.type.name)))
+ out_stream.write('Source path: {0}'.format(
+ out_stream.literal_style(self._model.source_path)))
+ if self._model.target_path is not None:
+ out_stream.write('Target path: {0}'.format(
+ out_stream.literal_style(self._model.target_path)))
+ if self._model.repository_url is not None:
+ out_stream.write('Repository URL: {0}'.format(
+ out_stream.literal_style(self._model.repository_url)))
+ if self._model.repository_credential:
+ out_stream.write('Repository credential: {0}'.format(
+ out_stream.literal_style(self._model.repository_credential)))
+ self._topology.dump(self._model.properties, out_stream, title='Properties')
+
+
+class Capability(common.InstanceHandlerBase):
+ def coerce(self, **kwargs):
+ self._topology.coerce(self._model.properties, **kwargs)
+
+ def validate(self, **kwargs):
+ self._topology.validate(self._model.properties, **kwargs)
+
+ def dump(self, out_stream):
+ out_stream.write(out_stream.node_style(self._model.name))
+ with out_stream.indent():
+ out_stream.write('Type: {0}'.format(out_stream.type_style(self._model.type.name)))
+ out_stream.write('Occurrences: {0:d} ({1:d}{2})'.format(
+ self._model.occurrences,
+ self._model.min_occurrences or 0,
+ ' to {0:d}'.format(self._model.max_occurrences)
+ if self._model.max_occurrences is not None
+ else ' or more'))
+ self._topology.dump(self._model.properties, out_stream, title='Properties')
+
+
+class Group(common.ActorHandlerBase):
+
+ def coerce(self, **kwargs):
+ self._coerce(self._model.properties, self._model.interfaces, **kwargs)
+
+ def validate(self, **kwargs):
+ self._validate(self._model.properties,
+ self._model.interfaces,
+ **kwargs)
+
+ def dump(self, out_stream):
+ out_stream.write('Group: {0}'.format(out_stream.node_style(self._model.name)))
+ with out_stream.indent():
+ out_stream.write('Type: {0}'.format(out_stream.type_style(self._model.type.name)))
+ self._topology.dump(self._model.properties, out_stream, title='Properties')
+ self._topology.dump(self._model.interfaces, out_stream, title='Interfaces')
+ if self._model.nodes:
+ out_stream.write('Member nodes:')
+ with out_stream.indent():
+ for node in self._model.nodes:
+ out_stream.write(out_stream.node_style(node.name))
+
+ def configure_operations(self):
+ for interface in self._model.interfaces.values():
+ self._topology.configure_operations(interface)
+
+
+class Interface(common.ActorHandlerBase):
+ def coerce(self, **kwargs):
+ self._coerce(self._model.inputs, self._model.operations, **kwargs)
+
+ def validate(self, **kwargs):
+ self._validate(self._model.inputs,
+ self._model.operations,
+ **kwargs)
+
+ def dump(self, out_stream):
+ out_stream.write(out_stream.node_style(self._model.name))
+ if self._model.description:
+ out_stream.write(out_stream.meta_style(self._model.description))
+ with out_stream.indent():
+ out_stream.write('Interface type: {0}'.format(
+ out_stream.type_style(self._model.type.name)))
+ self._topology.dump(self._model.inputs, out_stream, title='Inputs')
+ self._topology.dump(self._model.operations, out_stream, title='Operations')
+
+ def configure_operations(self):
+ for operation in self._model.operations.values():
+ self._topology.configure_operations(operation)
+
+
+class Node(common.ActorHandlerBase):
+ def coerce(self, **kwargs):
+ self._coerce(self._model.properties,
+ self._model.attributes,
+ self._model.interfaces,
+ self._model.artifacts,
+ self._model.capabilities,
+ self._model.outbound_relationships,
+ **kwargs)
+
+ def validate(self, **kwargs):
+ if len(self._model.name) > context.ID_MAX_LENGTH:
+ self._topology.report(
+ '"{0}" has an ID longer than the limit of {1:d} characters: {2:d}'.format(
+ self._model.name, context.ID_MAX_LENGTH, len(self._model.name)),
+ level=self._topology.Issue.BETWEEN_INSTANCES)
+
+ self._validate(self._model.properties,
+ self._model.attributes,
+ self._model.interfaces,
+ self._model.artifacts,
+ self._model.capabilities,
+ self._model.outbound_relationships)
+
+ def dump(self, out_stream):
+ out_stream.write('Node: {0}'.format(out_stream.node_style(self._model.name)))
+ with out_stream.indent():
+ out_stream.write('Type: {0}'.format(out_stream.type_style(self._model.type.name)))
+ out_stream.write('Template: {0}'.format(
+ out_stream.node_style(self._model.node_template.name)))
+ self._topology.dump(self._model.properties, out_stream, title='Properties')
+ self._topology.dump(self._model.attributes, out_stream, title='Attributes')
+ self._topology.dump(self._model.interfaces, out_stream, title='Interfaces')
+ self._topology.dump(self._model.artifacts, out_stream, title='Artifacts')
+ self._topology.dump(self._model.capabilities, out_stream, title='Capabilities')
+ self._topology.dump(self._model.outbound_relationships, out_stream,
+ title='Relationships')
+
+ def configure_operations(self):
+ for interface in self._model.interfaces.values():
+ self._topology.configure_operations(interface)
+ for relationship in self._model.outbound_relationships:
+ self._topology.configure_operations(relationship)
+
+ def validate_capabilities(self):
+ satisfied = False
+ for capability in self._model.capabilities.itervalues():
+ if not capability.has_enough_relationships:
+ self._topology.report(
+ 'capability "{0}" of node "{1}" requires at least {2:d} '
+ 'relationships but has {3:d}'.format(capability.name,
+ self._model.name,
+ capability.min_occurrences,
+ capability.occurrences),
+ level=self._topology.Issue.BETWEEN_INSTANCES)
+ satisfied = False
+ return satisfied
+
+ def satisfy_requirements(self):
+ satisfied = True
+ for requirement_template in self._model.node_template.requirement_templates:
+
+ # Since we try and satisfy requirements, which are node template bound, and use that
+ # information in the creation of the relationship, Some requirements may have been
+ # satisfied by a previous run on that node template.
+ # The entire mechanism of satisfying requirements needs to be refactored.
+ if any(rel.requirement_template == requirement_template
+ for rel in self._model.outbound_relationships):
+ continue
+
+ # Find target template
+ target_node_template, target_node_capability = self._find_target(requirement_template)
+ if target_node_template is not None:
+ satisfied = self._satisfy_capability(
+ target_node_capability, target_node_template, requirement_template)
+ else:
+ self._topology.report('requirement "{0}" of node "{1}" has no target node template'.
+ format(requirement_template.name, self._model.name),
+ level=self._topology.Issue.BETWEEN_INSTANCES)
+ satisfied = False
+ return satisfied
+
+ def _satisfy_capability(self, target_node_capability, target_node_template,
+ requirement_template):
+ # Find target nodes
+ target_nodes = target_node_template.nodes
+ if target_nodes:
+ target_node = None
+ target_capability = None
+
+ if target_node_capability is not None:
+ # Relate to the first target node that has capacity
+ for node in target_nodes:
+ a_target_capability = node.capabilities.get(target_node_capability.name)
+ if a_target_capability.relate():
+ target_node = node
+ target_capability = a_target_capability
+ break
+ else:
+ # Use first target node
+ target_node = target_nodes[0]
+
+ if target_node is not None:
+ if requirement_template.relationship_template is not None:
+ relationship_model = self._topology.instantiate(
+ requirement_template.relationship_template)
+ else:
+ relationship_model = models.Relationship()
+ relationship_model.name = requirement_template.name
+ relationship_model.requirement_template = requirement_template
+ relationship_model.target_node = target_node
+ relationship_model.target_capability = target_capability
+ self._model.outbound_relationships.append(relationship_model)
+ return True
+ else:
+ self._topology.report(
+ 'requirement "{0}" of node "{1}" targets node '
+ 'template "{2}" but its instantiated nodes do not '
+ 'have enough capacity'.format(
+ requirement_template.name, self._model.name, target_node_template.name),
+ level=self._topology.Issue.BETWEEN_INSTANCES)
+ return False
+ else:
+ self._topology.report(
+ 'requirement "{0}" of node "{1}" targets node template '
+ '"{2}" but it has no instantiated nodes'.format(
+ requirement_template.name, self._model.name, target_node_template.name),
+ level=self._topology.Issue.BETWEEN_INSTANCES)
+ return False
+
+ def _find_target(self, requirement_template):
+ # We might already have a specific node template from the requirement template, so
+ # we'll just verify it
+ if requirement_template.target_node_template is not None:
+ if not self._model.node_template.is_target_node_template_valid(
+ requirement_template.target_node_template):
+ self._topology.report(
+ 'requirement "{0}" of node template "{1}" is for node '
+ 'template "{2}" but it does not match constraints'.format(
+ requirement_template.name,
+ requirement_template.target_node_template.name,
+ self._model.node_template.name),
+ level=self._topology.Issue.BETWEEN_TYPES)
+ if (requirement_template.target_capability_type is not None or
+ requirement_template.target_capability_name is not None):
+ target_node_capability = self._get_capability(requirement_template)
+ if target_node_capability is None:
+ return None, None
+ else:
+ target_node_capability = None
+
+ return requirement_template.target_node_template, target_node_capability
+
+ # Find first node that matches the type
+ elif requirement_template.target_node_type is not None:
+ for target_node_template in \
+ self._model.node_template.service_template.node_templates.itervalues():
+ if requirement_template.target_node_type.get_descendant(
+ target_node_template.type.name) is None:
+ continue
+
+ if not self._model.node_template.is_target_node_template_valid(
+ target_node_template):
+ continue
+
+ target_node_capability = self._get_capability(requirement_template,
+ target_node_template)
+
+ if target_node_capability is None:
+ continue
+
+ return target_node_template, target_node_capability
+
+ # Find the first node which has a capability of the required type
+ elif requirement_template.target_capability_type is not None:
+ for target_node_template in \
+ self._model.node_template.service_template.node_templates.itervalues():
+ target_node_capability = \
+ self._get_capability(requirement_template, target_node_template)
+ if target_node_capability:
+ return target_node_template, target_node_capability
+
+ return None, None
+
+ def _get_capability(self, requirement_template, target_node_template=None):
+ target_node_template = target_node_template or requirement_template.target_node_template
+
+ for capability_template in target_node_template.capability_templates.values():
+ if self._satisfies_requirement(
+ capability_template, requirement_template, target_node_template):
+ return capability_template
+
+ return None
+
+ def _satisfies_requirement(
+ self, capability_template, requirement_template, target_node_template):
+ # Do we match the required capability type?
+ if (requirement_template.target_capability_type and
+ requirement_template.target_capability_type.get_descendant(
+ capability_template.type.name) is None):
+ return False
+
+ # Are we in valid_source_node_types?
+ if capability_template.valid_source_node_types:
+ for valid_source_node_type in capability_template.valid_source_node_types:
+ if valid_source_node_type.get_descendant(
+ self._model.node_template.type.name) is None:
+ return False
+
+ # Apply requirement constraints
+ if requirement_template.target_node_template_constraints:
+ for node_template_constraint in requirement_template.target_node_template_constraints:
+ if not node_template_constraint.matches(
+ self._model.node_template, target_node_template):
+ return False
+
+ return True
+
+
+class Operation(common.ActorHandlerBase):
+ def coerce(self, **kwargs):
+ self._coerce(self._model.inputs,
+ self._model.configurations,
+ self._model.arguments,
+ **kwargs)
+
+ def validate(self, **kwargs):
+ self._validate(self._model.inputs,
+ self._model.configurations,
+ self._model.arguments,
+ **kwargs)
+
+ def dump(self, out_stream):
+ out_stream.write(out_stream.node_style(self._model.name))
+ if self._model.description:
+ out_stream.write(out_stream.meta_style(self._model.description))
+ with out_stream.indent():
+ if self._model.implementation is not None:
+ out_stream.write('Implementation: {0}'.format(
+ out_stream.literal_style(self._model.implementation)))
+ if self._model.dependencies:
+ out_stream.write(
+ 'Dependencies: {0}'.format(', '.join((str(out_stream.literal_style(v))
+ for v in self._model.dependencies))))
+ self._topology.dump(self._model.inputs, out_stream, title='Inputs')
+ if self._model.executor is not None:
+ out_stream.write('Executor: {0}'.format(out_stream.literal_style(
+ self._model.executor)))
+ if self._model.max_attempts is not None:
+ out_stream.write('Max attempts: {0}'.format(out_stream.literal_style(
+ self._model.max_attempts)))
+ if self._model.retry_interval is not None:
+ out_stream.write('Retry interval: {0}'.format(
+ out_stream.literal_style(self._model.retry_interval)))
+ if self._model.plugin is not None:
+ out_stream.write('Plugin: {0}'.format(
+ out_stream.literal_style(self._model.plugin.name)))
+ self._topology.dump(self._model.configurations, out_stream, title='Configuration')
+ if self._model.function is not None:
+ out_stream.write('Function: {0}'.format(out_stream.literal_style(
+ self._model.function)))
+ self._topology.dump(self._model.arguments, out_stream, title='Arguments')
+
+ def configure_operations(self):
+ if self._model.implementation is None and self._model.function is None:
+ return
+
+ if (self._model.interface is not None and
+ self._model.plugin is None and
+ self._model.function is None):
+ # ("interface" is None for workflow operations, which do not currently use "plugin")
+ # The default (None) plugin is the execution plugin
+ execution_plugin.instantiation.configure_operation(self._model, self._topology)
+ else:
+ # In the future plugins may be able to add their own "configure_operation" hook that
+ # can validate the configuration and otherwise create specially derived arguments. For
+ # now, we just send all configuration parameters as arguments without validation.
+ for key, conf in self._model.configurations.items():
+ self._model.arguments[key] = self._topology.instantiate(conf.as_argument())
+
+ if self._model.interface is not None:
+ # Send all interface inputs as extra arguments
+ # ("interface" is None for workflow operations)
+ # Note that they will override existing arguments of the same names
+ for key, input in self._model.interface.inputs.items():
+ self._model.arguments[key] = self._topology.instantiate(input.as_argument())
+
+ # Send all inputs as extra arguments
+ # Note that they will override existing arguments of the same names
+ for key, input in self._model.inputs.items():
+ self._model.arguments[key] = self._topology.instantiate(input.as_argument())
+
+ # Check for reserved arguments
+ used_reserved_names = set(decorators.OPERATION_DECORATOR_RESERVED_ARGUMENTS).intersection(
+ self._model.arguments.keys())
+ if used_reserved_names:
+ self._topology.report(
+ 'using reserved arguments in operation "{0}": {1}'.format(
+ self._model.name, formatting.string_list_as_string(used_reserved_names)),
+ level=self._topology.Issue.EXTERNAL)
+
+
+class Policy(common.InstanceHandlerBase):
+ def coerce(self, **kwargs):
+ self._topology.coerce(self._model.properties, **kwargs)
+
+ def validate(self, **kwargs):
+ self._topology.validate(self._model.properties, **kwargs)
+
+ def dump(self, out_stream):
+ out_stream.write('Policy: {0}'.format(out_stream.node_style(self._model.name)))
+ with out_stream.indent():
+ out_stream.write('Type: {0}'.format(out_stream.type_style(self._model.type.name)))
+ self._topology.dump(self._model.properties, out_stream, title='Properties')
+ if self._model.nodes:
+ out_stream.write('Target nodes:')
+ with out_stream.indent():
+ for node in self._model.nodes:
+ out_stream.write(out_stream.node_style(node.name))
+ if self._model.groups:
+ out_stream.write('Target groups:')
+ with out_stream.indent():
+ for group in self._model.groups:
+ out_stream.write(out_stream.node_style(group.name))
+
+
+class Relationship(common.ActorHandlerBase):
+ def coerce(self, **kwargs):
+ self._coerce(self._model.properties,
+ self._model.interfaces,
+ **kwargs)
+
+ def validate(self, **kwargs):
+ self._validate(self._model.properties,
+ self._model.interfaces,
+ **kwargs)
+
+ def dump(self, out_stream):
+ if self._model.name:
+ out_stream.write('{0} ->'.format(out_stream.node_style(self._model.name)))
+ else:
+ out_stream.write('->')
+ with out_stream.indent():
+ out_stream.write('Node: {0}'.format(out_stream.node_style(
+ self._model.target_node.name)))
+ if self._model.target_capability:
+ out_stream.write('Capability: {0}'.format(out_stream.node_style(
+ self._model.target_capability.name)))
+ if self._model.type is not None:
+ out_stream.write('Relationship type: {0}'.format(
+ out_stream.type_style(self._model.type.name)))
+ if (self._model.relationship_template is not None and
+ self._model.relationship_template.name):
+ out_stream.write('Relationship template: {0}'.format(
+ out_stream.node_style(self._model.relationship_template.name)))
+ self._topology.dump(self._model.properties, out_stream, title='Properties')
+ self._topology.dump(self._model.interfaces, out_stream, title='Interfaces')
+
+ def configure_operations(self):
+ for interface in self._model.interfaces.values():
+ self._topology.configure_operations(interface)
+
+
+class Service(common.ActorHandlerBase):
+ def coerce(self, **kwargs):
+ self._coerce(self._model.meta_data,
+ self._model.nodes,
+ self._model.groups,
+ self._model.policies,
+ self._model.substitution,
+ self._model.inputs,
+ self._model.outputs,
+ self._model.workflows,
+ **kwargs)
+
+ def validate(self, **kwargs):
+ self._validate(self._model.meta_data,
+ self._model.nodes,
+ self._model.groups,
+ self._model.policies,
+ self._model.substitution,
+ self._model.inputs,
+ self._model.outputs,
+ self._model.workflows,
+ **kwargs)
+
+ def dump(self, out_stream):
+ if self._model.description is not None:
+ out_stream.write(out_stream.meta_style(self._model.description))
+ self._topology.dump(self._model.meta_data, out_stream, title='Metadata')
+ self._topology.dump(self._model.nodes, out_stream)
+ self._topology.dump(self._model.groups, out_stream)
+ self._topology.dump(self._model.policies, out_stream)
+ self._topology.dump(self._model.substitution, out_stream)
+ self._topology.dump(self._model.inputs, out_stream, title='Inputs')
+ self._topology.dump(self._model.outputs, out_stream, title='Outputs')
+ self._topology.dump(self._model.workflows, out_stream, title='Workflows')
+
+ def configure_operations(self):
+ for node in self._model.nodes.itervalues():
+ self._topology.configure_operations(node)
+ for group in self._model.groups.itervalues():
+ self._topology.configure_operations(group)
+ for operation in self._model.workflows.itervalues():
+ self._topology.configure_operations(operation)
+
+ def validate_capabilities(self):
+ satisfied = True
+ for node in self._model.nodes.values():
+ if not self._topology.validate_capabilities(node):
+ satisfied = False
+ return satisfied
+
+ def satisfy_requirements(self):
+ return all(self._topology.satisfy_requirements(node)
+ for node in self._model.nodes.values())
+
+
+class Substitution(common.InstanceHandlerBase):
+ def coerce(self, **kwargs):
+ self._topology.coerce(self._model.mappings, **kwargs)
+
+ def validate(self, **kwargs):
+ self._topology.validate(self._model.mappings, **kwargs)
+
+ def dump(self, out_stream):
+ out_stream.write('Substitution:')
+ with out_stream.indent():
+ out_stream.write('Node type: {0}'.format(out_stream.type_style(
+ self._model.node_type.name)))
+ self._topology.dump(self._model.mappings, out_stream, title='Mappings')
+
+
+class SubstitutionMapping(common.InstanceHandlerBase):
+
+ def coerce(self, **kwargs):
+ pass
+
+ def validate(self, **_):
+ if (self._model.capability is None) and (self._model.requirement_template is None):
+ self._topology.report(
+ 'mapping "{0}" refers to neither capability nor a requirement'
+ ' in node: {1}'.format(
+ self._model.name, formatting.safe_repr(self._model.node_style.name)),
+ level=self._topology.Issue.BETWEEN_TYPES)
+
+ def dump(self, out_stream):
+ if self._model.capability is not None:
+ out_stream.write('{0} -> {1}.{2}'.format(
+ out_stream.node_style(self._model.name),
+ out_stream.node_style(self._model.capability.node.name),
+ out_stream.node_style(self._model.capability.name)))
+ else:
+ out_stream.write('{0} -> {1}.{2}'.format(
+ out_stream.node_style(self._model.name),
+ out_stream.node_style(self._model.node.name),
+ out_stream.node_style(self._model.requirement_template.name)))
+
+
+class Metadata(common.InstanceHandlerBase):
+
+ def dump(self, out_stream):
+ out_stream.write('{0}: {1}'.format(
+ out_stream.property_style(self._model.name),
+ out_stream.literal_style(self._model.value)))
+
+ def coerce(self, **_):
+ pass
+
+ def instantiate(self, instance_cls):
+ return instance_cls(name=self._model.name, value=self._model.value)
+
+ def validate(self):
+ pass
+
+
+class _Parameter(common.InstanceHandlerBase):
+
+ def dump(self, out_stream):
+ if self._model.type_name is not None:
+ out_stream.write('{0}: {1} ({2})'.format(
+ out_stream.property_style(self._model.name),
+ out_stream.literal_style(formatting.as_raw(self._model.value)),
+ out_stream.type_style(self._model.type_name)))
+ else:
+ out_stream.write('{0}: {1}'.format(
+ out_stream.property_style(self._model.name),
+ out_stream.literal_style(formatting.as_raw(self._model.value))))
+ if self._model.description:
+ out_stream.write(out_stream.meta_style(self._model.description))
+
+ def instantiate(self, instance_cls, **kwargs):
+ return instance_cls(
+ name=self._model.name, # pylint: disable=unexpected-keyword-arg
+ type_name=self._model.type_name,
+ _value=self._model._value,
+ description=self._model.description
+ )
+
+ def validate(self):
+ pass
+
+ def coerce(self, report_issues): # pylint: disable=arguments-differ
+ value = self._model._value
+ if value is not None:
+ evaluation = functions.evaluate(value, self._model, report_issues)
+ if (evaluation is not None) and evaluation.final:
+ # A final evaluation can safely replace the existing value
+ self._model._value = evaluation.value
+
+
+class Attribute(_Parameter):
+ pass
+
+
+class Input(_Parameter):
+ pass
+
+
+class Output(_Parameter):
+ pass
+
+
+class Argument(_Parameter):
+ pass
+
+
+class Property(_Parameter):
+ pass
+
+
+class Configuration(_Parameter):
+ pass
+
+
+class Type(common.InstanceHandlerBase):
+ def coerce(self, **_):
+ pass
+
+ def dump(self, out_stream):
+ if self._model.name:
+ out_stream.write(out_stream.type_style(self._model.name))
+ with out_stream.indent():
+ for child in self._model.children:
+ self._topology.dump(child, out_stream)
+
+ def validate(self, **kwargs):
+ pass
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/topology/template_handler.py b/azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/topology/template_handler.py
new file mode 100644
index 0000000..dda5418
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/topology/template_handler.py
@@ -0,0 +1,609 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from datetime import datetime
+
+from ...utils import (
+ formatting,
+ versions
+)
+from ...modeling import utils as modeling_utils
+from . import utils, common
+
+
+class ServiceTemplate(common.TemplateHandlerBase):
+ def dump(self, out_stream):
+ if self._model.description is not None:
+ out_stream.write(out_stream.meta_style(self._model.description))
+ self._topology.dump(self._model.meta_data, out_stream, title='Metadata')
+ self._topology.dump(self._model.node_templates, out_stream)
+ self._topology.dump(self._model.group_templates, out_stream)
+ self._topology.dump(self._model.policy_templates, out_stream)
+ self._topology.dump(self._model.substitution_template, out_stream)
+ self._topology.dump(self._model.inputs, out_stream, title='Inputs')
+ self._topology.dump(self._model.outputs, out_stream, title='Outputs')
+ self._topology.dump(self._model.workflow_templates, out_stream, title='Workflow templates')
+
+ def coerce(self, **kwargs):
+ self._coerce(self._model.meta_data,
+ self._model.node_templates,
+ self._model.group_templates,
+ self._model.policy_templates,
+ self._model.substitution_template,
+ self._model.inputs,
+ self._model.outputs,
+ self._model.workflow_templates,
+ **kwargs)
+
+ def instantiate(self, instance_cls, inputs=None, plugins=None): # pylint: disable=arguments-differ
+ now = datetime.now()
+
+ # modeling_utils.validate_no_undeclared_inputs(
+ # declared_inputs=self._model.inputs, supplied_inputs=inputs or {})
+ modeling_utils.validate_required_inputs_are_supplied(
+ declared_inputs=self._model.inputs, supplied_inputs=inputs or {})
+
+ service = instance_cls(
+ created_at=now,
+ updated_at=now,
+ description=utils.deepcopy_with_locators(self._model.description),
+ service_template=self._model,
+ inputs=modeling_utils.merge_parameter_values(inputs, self._model.inputs)
+ )
+
+ for plugin_specification in self._model.plugin_specifications.itervalues():
+ if plugin_specification.enabled and plugins:
+ if self._resolve_plugin_specification(plugin_specification, plugins):
+ plugin = plugin_specification.plugin
+ service.plugins[plugin.name] = plugin
+ else:
+ self._topology.report('specified plugin not found: {0}'.format(
+ plugin_specification.name), level=self._topology.Issue.EXTERNAL)
+ service.meta_data = self._topology.instantiate(self._model.meta_data)
+
+ for node_template in self._model.node_templates.itervalues():
+ for _ in range(self._scaling(node_template)['default_instances']):
+ node = self._topology.instantiate(node_template)
+ service.nodes[node.name] = node
+
+ service.groups = self._topology.instantiate(self._model.group_templates)
+ service.policies = self._topology.instantiate(self._model.policy_templates)
+ service.workflows = self._topology.instantiate(self._model.workflow_templates)
+ service.substitution = self._topology.instantiate(self._model.substitution_template)
+ service.outputs = self._topology.instantiate(self._model.outputs)
+
+ return service
+
+ @staticmethod
+ def _resolve_plugin_specification(plugin_specification, plugins):
+ matching_plugins = []
+ if plugins:
+ for plugin in plugins:
+ if (plugin.name == plugin_specification.name and
+ (plugin_specification.version is None or
+ versions.VersionString(plugin.package_version) >=
+ plugin_specification.version)
+ ):
+ matching_plugins.append(plugin)
+ plugin_specification.plugin = None
+ if matching_plugins:
+ # Return highest version of plugin
+ plugin_specification.plugin = \
+ max(matching_plugins,
+ key=lambda plugin: versions.VersionString(plugin.package_version).key)
+ return plugin_specification.plugin is not None
+
+ def _scaling(self, node_template):
+ scaling = node_template.scaling
+
+ if any([scaling['min_instances'] < 0,
+ scaling['max_instances'] < scaling['min_instances'],
+ scaling['max_instances'] < 0,
+
+ scaling['default_instances'] < 0,
+ scaling['default_instances'] < scaling['min_instances'],
+ scaling['default_instances'] > scaling['max_instances']
+ ]):
+ self._topology.report(
+ 'invalid scaling parameters for node template "{0}": min={min_instances}, max='
+ '{max_instances}, default={default_instances}'.format(self._model.name, **scaling),
+ level=self._topology.Issue.BETWEEN_TYPES)
+
+ return scaling
+
+ def validate(self, **kwargs):
+ self._validate(
+ self._model.meta_data,
+ self._model.node_templates,
+ self._model.group_templates,
+ self._model.policy_templates,
+ self._model.substitution_template,
+ self._model.inputs,
+ self._model.outputs,
+ self._model.workflow_templates,
+ self._model.node_types,
+ self._model.group_types,
+ self._model.policy_types,
+ self._model.relationship_types,
+ self._model.capability_types,
+ self._model.interface_types,
+ self._model.artifact_types,
+ **kwargs
+ )
+
+
+class ArtifactTemplate(common.TemplateHandlerBase):
+ def dump(self, out_stream):
+ out_stream.write(out_stream.node_style(self._model.name))
+ if self._model.description:
+ out_stream.write(out_stream.meta_style(self._model.description))
+ with out_stream.indent():
+ out_stream.write('Artifact type: {0}'.format(out_stream.type_style(
+ self._model.type.name)))
+ out_stream.write('Source path: {0}'.format(out_stream.literal_style(
+ self._model.source_path)))
+ if self._model.target_path is not None:
+ out_stream.write('Target path: {0}'.format(out_stream.literal_style(
+ self._model.target_path)))
+ if self._model.repository_url is not None:
+ out_stream.write('Repository URL: {0}'.format(
+ out_stream.literal_style(self._model.repository_url)))
+ if self._model.repository_credential:
+ out_stream.write('Repository credential: {0}'.format(
+ out_stream.literal_style(self._model.repository_credential)))
+ self._topology.dump(self._model.properties, out_stream, title='Properties')
+
+ def coerce(self, **kwargs):
+ self._topology.coerce(self._model.properties, **kwargs)
+
+ def instantiate(self, instance_cls, **_):
+ return instance_cls(
+ name=self._model.name,
+ type=self._model.type,
+ description=utils.deepcopy_with_locators(self._model.description),
+ source_path=self._model.source_path,
+ target_path=self._model.target_path,
+ repository_url=self._model.repository_url,
+ repository_credential=self._model.repository_credential,
+ artifact_template=self._model)
+
+ def validate(self, **kwargs):
+ self._topology.validate(self._model.properties, **kwargs)
+
+
+class CapabilityTemplate(common.TemplateHandlerBase):
+ def dump(self, out_stream):
+ out_stream.write(out_stream.node_style(self._model.name))
+ if self._model.description:
+ out_stream.write(out_stream.meta_style(self._model.description))
+ with out_stream.indent():
+ out_stream.write('Type: {0}'.format(out_stream.type_style(self._model.type.name)))
+ out_stream.write(
+ 'Occurrences: {0:d}{1}'.format(
+ self._model.min_occurrences or 0,
+ ' to {0:d}'.format(self._model.max_occurrences)
+ if self._model.max_occurrences is not None
+ else ' or more'))
+ if self._model.valid_source_node_types:
+ out_stream.write('Valid source node types: {0}'.format(
+ ', '.join((str(out_stream.type_style(v.name))
+ for v in self._model.valid_source_node_types))))
+ self._topology.dump(self._model.properties, out_stream, title='Properties')
+
+ def coerce(self, **kwargs):
+ self._topology.coerce(self._model.properties, **kwargs)
+
+ def instantiate(self, instance_cls, **_):
+ capability = instance_cls(
+ name=self._model.name,
+ type=self._model.type,
+ min_occurrences=self._model.min_occurrences,
+ max_occurrences=self._model.max_occurrences,
+ occurrences=0,
+ capability_template=self._model)
+ capability.properties = self._topology.instantiate(self._model.properties)
+ return capability
+
+ def validate(self, **kwargs):
+ self._topology.validate(self._model.properties, **kwargs)
+
+
+class RequirementTemplate(common.TemplateHandlerBase):
+ def dump(self, out_stream):
+ if self._model.name:
+ out_stream.write(out_stream.node_style(self._model.name))
+ else:
+ out_stream.write('Requirement:')
+ with out_stream.indent():
+ if self._model.target_node_type is not None:
+ out_stream.write('Target node type: {0}'.format(
+ out_stream.type_style(self._model.target_node_type.name)))
+ elif self._model.target_node_template is not None:
+ out_stream.write('Target node template: {0}'.format(
+ out_stream.node_style(self._model.target_node_template.name)))
+ if self._model.target_capability_type is not None:
+ out_stream.write('Target capability type: {0}'.format(
+ out_stream.type_style(self._model.target_capability_type.name)))
+ elif self._model.target_capability_name is not None:
+ out_stream.write('Target capability name: {0}'.format(
+ out_stream.node_style(self._model.target_capability_name)))
+ if self._model.target_node_template_constraints:
+ out_stream.write('Target node template constraints:')
+ with out_stream.indent():
+ for constraint in self._model.target_node_template_constraints:
+ out_stream.write(out_stream.literal_style(constraint))
+ if self._model.relationship_template:
+ out_stream.write('Relationship:')
+ with out_stream.indent():
+ self._topology.dump(self._model.relationship_template, out_stream)
+
+ def coerce(self, **kwargs):
+ self._topology.coerce(self._model.relationship_template, **kwargs)
+
+ def instantiate(self, instance_cls, **_):
+ pass
+
+ def validate(self, **kwargs):
+ self._topology.validate(self._model.relationship_template, **kwargs)
+
+
+class GroupTemplate(common.TemplateHandlerBase):
+ def dump(self, out_stream):
+ out_stream.write('Group template: {0}'.format(out_stream.node_style(self._model.name)))
+ if self._model.description:
+ out_stream.write(out_stream.meta_style(self._model.description))
+ with out_stream.indent():
+ out_stream.write('Type: {0}'.format(out_stream.type_style(self._model.type.name)))
+ self._topology.dump(self._model.properties, out_stream, title='Properties')
+ self._topology.dump(self._model.interface_templates, out_stream,
+ title='Interface Templates')
+ if self._model.node_templates:
+ out_stream.write('Member node templates: {0}'.format(', '.join(
+ (str(out_stream.node_style(v.name)) for v in self._model.node_templates))))
+
+ def coerce(self, **kwargs):
+ self._coerce(self._model.properties,
+ self._model.interface_templates,
+ **kwargs)
+
+ def instantiate(self, instance_cls, **_):
+ group = instance_cls(
+ name=self._model.name,
+ type=self._model.type,
+ description=utils.deepcopy_with_locators(self._model.description),
+ group_template=self._model)
+ group.properties = self._topology.instantiate(self._model.properties)
+ group.interfaces = self._topology.instantiate(self._model.interface_templates)
+ if self._model.node_templates:
+ for node_template in self._model.node_templates:
+ group.nodes += node_template.nodes
+ return group
+
+ def validate(self, **kwargs):
+ self._validate(self._model.properties,
+ self._model.interface_templates,
+ **kwargs)
+
+
+class InterfaceTemplate(common.TemplateHandlerBase):
+ def dump(self, out_stream):
+ out_stream.write(out_stream.node_style(self._model.name))
+ if self._model.description:
+ out_stream.write(out_stream.meta_style(self._model.description))
+ with out_stream.indent():
+ out_stream.write('Interface type: {0}'.format(out_stream.type_style(
+ self._model.type.name)))
+ self._topology.dump(self._model.inputs, out_stream, title='Inputs')
+ self._topology.dump(self._model.operation_templates, out_stream,
+ title='Operation templates')
+
+ def coerce(self, **kwargs):
+ self._coerce(self._model.inputs,
+ self._model.operation_templates,
+ **kwargs)
+
+ def instantiate(self, instance_cls, **_):
+ interface = instance_cls(
+ name=self._model.name,
+ type=self._model.type,
+ description=utils.deepcopy_with_locators(self._model.description),
+ interface_template=self._model)
+ interface.inputs = self._topology.instantiate(self._model.inputs)
+ interface.operations = self._topology.instantiate(self._model.operation_templates)
+ return interface
+
+ def validate(self, **kwargs):
+ self._validate(self._model.inputs,
+ self._model.operation_templates,
+ **kwargs)
+
+
+class NodeTemplate(common.TemplateHandlerBase):
+ def dump(self, out_stream):
+ out_stream.write('Node template: {0}'.format(out_stream.node_style(self._model.name)))
+ with out_stream.indent():
+ if self._model.description:
+ out_stream.write(out_stream.meta_style(self._model.description))
+ out_stream.write('Type: {0}'.format(out_stream.type_style(self._model.type.name)))
+ self._topology.dump(self._model.properties, out_stream, title='Properties')
+ self._topology.dump(self._model.attributes, out_stream, title='Attributes')
+ self._topology.dump(
+ self._model.interface_templates, out_stream, title='Interface Templates')
+ self._topology.dump(
+ self._model.artifact_templates, out_stream, title='Artifact Templates')
+ self._topology.dump(
+ self._model.capability_templates, out_stream, title='Capability Templates')
+ self._topology.dump(
+ self._model.requirement_templates, out_stream, title='Requirement Templates')
+
+ def coerce(self, **kwargs):
+ self._coerce(self._model.properties,
+ self._model.attributes,
+ self._model.interface_templates,
+ self._model.artifact_templates,
+ self._model.capability_templates,
+ self._model.requirement_templates,
+ **kwargs)
+
+ def instantiate(self, instance_cls, **_):
+ node = instance_cls(
+ name=self._model._next_name,
+ type=self._model.type,
+ description=utils.deepcopy_with_locators(self._model.description),
+ node_template=self._model
+ )
+
+ node.properties = self._topology.instantiate(self._model.properties)
+ node.attributes = self._topology.instantiate(self._model.attributes)
+ node.interfaces = self._topology.instantiate(self._model.interface_templates)
+ node.artifacts = self._topology.instantiate(self._model.artifact_templates)
+ node.capabilities = self._topology.instantiate(self._model.capability_templates)
+
+ # Default attributes
+ if 'tosca_name' in node.attributes and node.attributes['tosca_name'].type_name == 'string':
+ node.attributes['tosca_name'].value = self._model.name
+ if 'tosca_id' in node.attributes and node.attributes['tosca_id'].type_name == 'string':
+ node.attributes['tosca_id'].value = node.name
+
+ return node
+
+ def validate(self, **kwargs):
+ self._validate(self._model.properties,
+ self._model.attributes,
+ self._model.interface_templates,
+ self._model.artifact_templates,
+ self._model.capability_templates,
+ self._model.requirement_templates,
+ **kwargs)
+
+
+class PolicyTemplate(common.TemplateHandlerBase):
+ def dump(self, out_stream):
+ out_stream.write('Policy template: {0}'.format(out_stream.node_style(self._model.name)))
+ if self._model.description:
+ out_stream.write(out_stream.meta_style(self._model.description))
+ with out_stream.indent():
+ out_stream.write('Type: {0}'.format(out_stream.type_style(self._model.type.name)))
+ self._topology.dump(self._model.properties, out_stream, title='Properties')
+ if self._model.node_templates:
+ out_stream.write('Target node templates: {0}'.format(', '.join(
+ (str(out_stream.node_style(v.name)) for v in self._model.node_templates))))
+ if self._model.group_templates:
+ out_stream.write('Target group templates: {0}'.format(', '.join(
+ (str(out_stream.node_style(v.name)) for v in self._model.group_templates))))
+
+ def coerce(self, **kwargs):
+ self._topology.coerce(self._model.properties, **kwargs)
+
+ def instantiate(self, instance_cls, **_):
+ policy = instance_cls(
+ name=self._model.name,
+ type=self._model.type,
+ description=utils.deepcopy_with_locators(self._model.description),
+ policy_template=self._model)
+
+ policy.properties = self._topology.instantiate(self._model.properties)
+ if self._model.node_templates:
+ for node_template in self._model.node_templates:
+ policy.nodes += node_template.nodes
+ if self._model.group_templates:
+ for group_template in self._model.group_templates:
+ policy.groups += group_template.groups
+ return policy
+
+ def validate(self, **kwargs):
+ self._topology.validate(self._model.properties, **kwargs)
+
+
+class SubstitutionTemplate(common.TemplateHandlerBase):
+
+ def dump(self, out_stream):
+ out_stream.write('Substitution template:')
+ with out_stream.indent():
+ out_stream.write('Node type: {0}'.format(out_stream.type_style(
+ self._model.node_type.name)))
+ self._topology.dump(self._model.mappings, out_stream, title='Mappings')
+
+ def coerce(self, **kwargs):
+ self._topology.coerce(self._model.mappings, **kwargs)
+
+ def instantiate(self, instance_cls, **_):
+ return instance_cls(node_type=self._model.node_type, substitution_template=self._model)
+
+ def validate(self, **kwargs):
+ self._topology.validate(self._model.mappings, **kwargs)
+
+
+class SubstitutionTemplateMapping(common.TemplateHandlerBase):
+
+ def dump(self, out_stream):
+ if self._model.capability_template is not None:
+ node_template = self._model.capability_template.node_template
+ else:
+ node_template = self._model.requirement_template.node_template
+ out_stream.write('{0} -> {1}.{2}'.format(
+ out_stream.node_style(self._model.name),
+ out_stream.node_style(node_template.name),
+ out_stream.node_style(self._model.capability_template.name
+ if self._model.capability_template
+ else self._model.requirement_template.name)))
+
+ def coerce(self, **_):
+ pass
+
+ def instantiate(self, instance_cls, **_):
+ substitution_mapping = instance_cls(
+ name=self._model.name,
+ requirement_template=self._model.requirement_template)
+
+ if self._model.capability_template is not None:
+ node_template = self._model.capability_template.node_template
+ else:
+ node_template = self._model.requirement_template.node_template
+ nodes = node_template.nodes
+ if len(nodes) == 0:
+ self._topology.report(
+ 'mapping "{0}" refers to node template "{1}" but there are no node instances'.
+ format(self._model.mapped_name, self._model.node_template.name),
+ level=self._topology.Issue.BETWEEN_INSTANCES)
+ return None
+ # The TOSCA spec does not provide a way to choose the node,
+ # so we will just pick the first one
+ substitution_mapping.node_style = nodes[0]
+ if self._model.capability_template:
+ for a_capability in substitution_mapping.node_style.capabilities.itervalues():
+ if a_capability.capability_template.name == \
+ self._model.capability_template.name:
+ substitution_mapping.capability = a_capability
+
+ return substitution_mapping
+
+ def validate(self, **_):
+ if self._model.capability_template is None and self._model.requirement_template is None:
+ self._topology.report(
+ 'mapping "{0}" refers to neither capability nor a requirement '
+ 'in node template: {1}'.format(
+ self._model.name, formatting.safe_repr(self._model.node_template.name)),
+ level=self._topology.Issue.BETWEEN_TYPES)
+
+
+class RelationshipTemplate(common.TemplateHandlerBase):
+ def dump(self, out_stream):
+ if self._model.type is not None:
+ out_stream.write('Relationship type: {0}'.format(out_stream.type_style(
+ self._model.type.name)))
+ else:
+ out_stream.write('Relationship template: {0}'.format(
+ out_stream.node_style(self._model.name)))
+ if self._model.description:
+ out_stream.write(out_stream.meta_style(self._model.description))
+ with out_stream.indent():
+ self._topology.dump(self._model.properties, out_stream, title='Properties')
+ self._topology.dump(self._model.interface_templates, out_stream,
+ title='Interface Templates')
+
+ def coerce(self, **kwargs):
+ self._coerce(self._model.properties, self._model.interface_templates, **kwargs)
+
+ def instantiate(self, instance_cls, **_):
+ relationship = instance_cls(
+ name=self._model.name,
+ type=self._model.type,
+ relationship_template=self._model)
+
+ relationship.properties = self._topology.instantiate(self._model.properties)
+ relationship.interfaces = self._topology.instantiate(self._model.interface_templates)
+ return relationship
+
+ def validate(self, **kwargs):
+ self._validate(self._model.properties, self._model.interface_templates, **kwargs)
+
+
+class OperationTemplate(common.TemplateHandlerBase):
+
+ def dump(self, out_stream):
+ out_stream.write(out_stream.node_style(self._model.name))
+ if self._model.description:
+ out_stream.write(out_stream.meta_style(self._model.description))
+ with out_stream.indent():
+ if self._model.implementation is not None:
+ out_stream.write('Implementation: {0}'.format(
+ out_stream.literal_style(self._model.implementation)))
+ if self._model.dependencies:
+ out_stream.write('Dependencies: {0}'.format(', '.join(
+ (str(out_stream.literal_style(v)) for v in self._model.dependencies))))
+ self._topology.dump(self._model.inputs, out_stream, title='Inputs')
+ if self._model.executor is not None:
+ out_stream.write('Executor: {0}'.format(
+ out_stream.literal_style(self._model.executor)))
+ if self._model.max_attempts is not None:
+ out_stream.write('Max attempts: {0}'.format(out_stream.literal_style(
+ self._model.max_attempts)))
+ if self._model.retry_interval is not None:
+ out_stream.write('Retry interval: {0}'.format(
+ out_stream.literal_style(self._model.retry_interval)))
+ if self._model.plugin_specification is not None:
+ out_stream.write('Plugin specification: {0}'.format(
+ out_stream.literal_style(self._model.plugin_specification.name)))
+ self._topology.dump(self._model.configurations, out_stream, title='Configuration')
+ if self._model.function is not None:
+ out_stream.write('Function: {0}'.format(out_stream.literal_style(
+ self._model.function)))
+
+ def coerce(self, **kwargs):
+ self._coerce(self._model.inputs,
+ self._model.configurations,
+ **kwargs)
+
+ def instantiate(self, instance_cls, **_):
+ operation = instance_cls(
+ name=self._model.name,
+ description=utils.deepcopy_with_locators(self._model.description),
+ relationship_edge=self._model.relationship_edge,
+ implementation=self._model.implementation,
+ dependencies=self._model.dependencies,
+ executor=self._model.executor,
+ function=self._model.function,
+ max_attempts=self._model.max_attempts,
+ retry_interval=self._model.retry_interval,
+ operation_template=self._model)
+
+ if (self._model.plugin_specification is not None and
+ self._model.plugin_specification.enabled):
+ operation.plugin = self._model.plugin_specification.plugin
+
+ operation.inputs = self._topology.instantiate(self._model.inputs)
+ operation.configurations = self._topology.instantiate(self._model.configurations)
+
+ return operation
+
+ def validate(self, **kwargs):
+ self._validate(self._model.inputs,
+ self._model.configurations,
+ **kwargs)
+
+
+class PluginSpecification(common.HandlerBase):
+ def validate(self, **kwargs):
+ pass
+
+ def coerce(self, **kwargs):
+ pass
+
+ def instantiate(self, **_):
+ pass
+
+ def dump(self, out_stream):
+ pass
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/topology/topology.py b/azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/topology/topology.py
new file mode 100644
index 0000000..f86c9dd
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/topology/topology.py
@@ -0,0 +1,223 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from ...parser.validation import issue
+from ...modeling import models
+from ...utils import console
+from . import (
+ template_handler,
+ instance_handler,
+ common
+)
+
+
+class Topology(issue.ReporterMixin):
+
+ _init_map = {
+ models.ServiceTemplate: models.Service,
+ models.ArtifactTemplate: models.Artifact,
+ models.CapabilityTemplate: models.Capability,
+ models.GroupTemplate: models.Group,
+ models.InterfaceTemplate: models.Interface,
+ models.NodeTemplate: models.Node,
+ models.PolicyTemplate: models.Policy,
+ models.SubstitutionTemplate: models.Substitution,
+ models.RelationshipTemplate: models.Relationship,
+ models.OperationTemplate: models.Operation,
+ models.SubstitutionTemplateMapping: models.SubstitutionMapping,
+
+ # Common
+ models.Metadata: models.Metadata,
+ models.Attribute: models.Attribute,
+ models.Property: models.Property,
+ models.Input: models.Input,
+ models.Output: models.Output,
+ models.Configuration: models.Configuration,
+ models.Argument: models.Argument,
+ models.Type: models.Type
+ }
+
+ def __init__(self, *args, **kwargs):
+ super(Topology, self).__init__(*args, **kwargs)
+ self._model_cls_to_handler = dict(self._init_handlers(instance_handler),
+ **self._init_handlers(template_handler))
+
+ @staticmethod
+ def _init_handlers(module_):
+ """
+ Register handlers from a handler module to the models.
+
+ :param module_: the module to look for handlers
+ :returns: dict where the key is the models class, and the value is the handler class
+ associated with it from the provided module
+ """
+ handlers = {}
+ for attribute_name in dir(module_):
+ if attribute_name.startswith('_'):
+ continue
+ attribute = getattr(module_, attribute_name)
+ if isinstance(attribute, type) and issubclass(attribute, common.HandlerBase):
+ handlers[getattr(models, attribute_name)] = attribute
+ return handlers
+
+ def instantiate(self, model, **kwargs):
+ """
+ Instantiate the provided model.
+
+ :param model:
+ :param kwargs:
+ :returns:
+ """
+ if isinstance(model, dict):
+ return dict((name, self.instantiate(value, **kwargs))
+ for name, value in model.iteritems())
+ elif isinstance(model, list):
+ return list(self.instantiate(value, **kwargs) for value in model)
+ elif model is not None:
+ _handler = self._model_cls_to_handler[model.__class__]
+ model_instance_cls = self._init_map[model.__class__]
+ return _handler(self, model).instantiate(model_instance_cls, **kwargs)
+
+ def validate(self, model, **kwargs):
+ if isinstance(model, dict):
+ return self.validate(model.values(), **kwargs)
+ elif isinstance(model, list):
+ return all(self.validate(value, **kwargs) for value in model)
+ elif model is not None:
+ _handler = self._model_cls_to_handler[model.__class__]
+ return _handler(self, model).validate(**kwargs)
+
+ def dump(self, model, out_stream=None, title=None, **kwargs):
+ out_stream = out_stream or console.TopologyStylizer()
+
+ # if model is empty, no need to print out the section name
+ if model and title:
+ out_stream.write('{0}:'.format(title))
+
+ if isinstance(model, dict):
+ if str(out_stream):
+ with out_stream.indent():
+ return self.dump(model.values(), out_stream=out_stream, **kwargs)
+ else:
+ return self.dump(model.values(), out_stream=out_stream, **kwargs)
+
+ elif isinstance(model, list):
+ for value in model:
+ self.dump(value, out_stream=out_stream, **kwargs)
+
+ elif model is not None:
+ _handler = self._model_cls_to_handler[model.__class__]
+ _handler(self, model).dump(out_stream=out_stream, **kwargs)
+
+ return out_stream
+
+ def dump_graph(self, service):
+ out_stream = console.TopologyStylizer()
+ for node in service.nodes.itervalues():
+ if not node.inbound_relationships:
+ self._dump_graph_node(out_stream, node)
+ return out_stream
+
+ def _dump_graph_node(self, out_stream, node, capability=None):
+ out_stream.write(out_stream.node_style(node.name))
+ if capability is not None:
+ out_stream.write('{0} ({1})'.format(out_stream.property_style(capability.name),
+ out_stream.type_style(capability.type.name)))
+ if node.outbound_relationships:
+ with out_stream.indent():
+ for relationship_model in node.outbound_relationships:
+ styled_relationship_name = out_stream.property_style(relationship_model.name)
+ if relationship_model.type is not None:
+ out_stream.write('-> {0} ({1})'.format(
+ styled_relationship_name,
+ out_stream.type_style(relationship_model.type.name)))
+ else:
+ out_stream.write('-> {0}'.format(styled_relationship_name))
+ with out_stream.indent(3):
+ self._dump_graph_node(out_stream,
+ relationship_model.target_node,
+ relationship_model.target_capability)
+
+ def coerce(self, model, **kwargs):
+ if isinstance(model, dict):
+ return self.coerce(model.values(), **kwargs)
+ elif isinstance(model, list):
+ return all(self.coerce(value, **kwargs) for value in model)
+ elif model is not None:
+ _handler = self._model_cls_to_handler[model.__class__]
+ return _handler(self, model).coerce(**kwargs)
+
+ def dump_types(self, service_template, out_stream=None):
+ out_stream = out_stream or console.TopologyStylizer()
+ self.dump(service_template.node_types, out_stream, 'Node types')
+ self.dump(service_template.group_types, out_stream, 'Group types')
+ self.dump(service_template.capability_types, out_stream, 'Capability types')
+ self.dump(service_template.relationship_types, out_stream, 'Relationship types')
+ self.dump(service_template.policy_types, out_stream, 'Policy types')
+ self.dump(service_template.artifact_types, out_stream, 'Artifact types')
+ self.dump(service_template.interface_types, out_stream, 'Interface types')
+
+ return out_stream
+
+ def satisfy_requirements(self, model, **kwargs):
+ if isinstance(model, dict):
+ return self.satisfy_requirements(model.values(), **kwargs)
+ elif isinstance(model, list):
+ return all(self.satisfy_requirements(value, **kwargs) for value in model)
+ elif model is not None:
+ _handler = self._model_cls_to_handler[model.__class__]
+ return _handler(self, model).satisfy_requirements(**kwargs)
+
+ def validate_capabilities(self, model, **kwargs):
+ if isinstance(model, dict):
+ return self.validate_capabilities(model.values(), **kwargs)
+ elif isinstance(model, list):
+ return all(self.validate_capabilities(value, **kwargs) for value in model)
+ elif model is not None:
+ _handler = self._model_cls_to_handler[model.__class__]
+ return _handler(self, model).validate_capabilities(**kwargs)
+
+ def _find_host(self, node):
+ if node.type.role == 'host':
+ return node
+
+ def target_has_role(rel, role):
+ return (rel.target_capability is not None and
+ rel.target_capability.type.role == role)
+
+ for outbound_relationship in node.outbound_relationships:
+ if target_has_role(outbound_relationship, 'host'):
+ host = self._find_host(outbound_relationship.target_node)
+ if host is not None:
+ return host
+ for inbound_relationship in node.inbound_relationships:
+ if target_has_role(inbound_relationship, 'feature'):
+ host = self._find_host(inbound_relationship.source_node)
+ if host is not None:
+ return host
+ return None
+
+ def assign_hosts(self, service):
+ for node in service.nodes.values():
+ node.host = self._find_host(node)
+
+ def configure_operations(self, model, **kwargs):
+ if isinstance(model, dict):
+ return self.configure_operations(model.values(), **kwargs)
+ elif isinstance(model, list):
+ return all(self.configure_operations(value, **kwargs) for value in model)
+ elif model is not None:
+ _handler = self._model_cls_to_handler[model.__class__]
+ return _handler(self, model).configure_operations(**kwargs)
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/topology/utils.py b/azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/topology/utils.py
new file mode 100644
index 0000000..ec74391
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/topology/utils.py
@@ -0,0 +1,48 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from copy import deepcopy
+
+
+def deepcopy_with_locators(value):
+ """
+ Like :func:`deepcopy`, but also copies over locators.
+ """
+
+ res = deepcopy(value)
+ copy_locators(res, value)
+ return res
+
+
+def copy_locators(target, source):
+ """
+ Copies over ``_locator`` for all elements, recursively.
+
+ Assumes that target and source have exactly the same list/dict structure.
+ """
+
+ locator = getattr(source, '_locator', None)
+ if locator is not None:
+ try:
+ setattr(target, '_locator', locator)
+ except AttributeError:
+ pass
+
+ if isinstance(target, list) and isinstance(source, list):
+ for i, _ in enumerate(target):
+ copy_locators(target[i], source[i])
+ elif isinstance(target, dict) and isinstance(source, dict):
+ for k, v in target.items():
+ copy_locators(v, source[k])
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/workflow_runner.py b/azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/workflow_runner.py
new file mode 100644
index 0000000..0c52e32
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/workflow_runner.py
@@ -0,0 +1,194 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Running workflows.
+"""
+
+import os
+import sys
+from datetime import datetime
+
+from . import exceptions
+from .context.workflow import WorkflowContext
+from .workflows import builtin
+from .workflows.core import engine, graph_compiler
+from .workflows.executor.process import ProcessExecutor
+from ..modeling import models
+from ..modeling import utils as modeling_utils
+from ..utils.imports import import_fullname
+
+DEFAULT_TASK_MAX_ATTEMPTS = 30
+DEFAULT_TASK_RETRY_INTERVAL = 30
+
+
+class WorkflowRunner(object):
+
+ def __init__(self, model_storage, resource_storage, plugin_manager,
+ execution_id=None, retry_failed_tasks=False,
+ service_id=None, workflow_name=None, inputs=None, executor=None,
+ task_max_attempts=DEFAULT_TASK_MAX_ATTEMPTS,
+ task_retry_interval=DEFAULT_TASK_RETRY_INTERVAL):
+ """
+ Manages a single workflow execution on a given service.
+
+ :param workflow_name: workflow name
+ :param service_id: service ID
+ :param inputs: key-value dict of inputs for the execution
+ :param model_storage: model storage API ("MAPI")
+ :param resource_storage: resource storage API ("RAPI")
+ :param plugin_manager: plugin manager
+ :param executor: executor for tasks; defaults to a
+ :class:`~aria.orchestrator.workflows.executor.process.ProcessExecutor` instance
+ :param task_max_attempts: maximum attempts of repeating each failing task
+ :param task_retry_interval: retry interval between retry attempts of a failing task
+ """
+
+ if not (execution_id or (workflow_name and service_id)):
+ exceptions.InvalidWorkflowRunnerParams(
+ "Either provide execution id in order to resume a workflow or workflow name "
+ "and service id with inputs")
+
+ self._is_resume = execution_id is not None
+ self._retry_failed_tasks = retry_failed_tasks
+
+ self._model_storage = model_storage
+ self._resource_storage = resource_storage
+
+ # the IDs are stored rather than the models themselves, so this module could be used
+ # by several threads without raising errors on model objects shared between threadsF
+
+ if self._is_resume:
+ self._service_id = service_id
+ # self._service_id = self.execution.service.id
+ # self._workflow_name = model_storage.execution.get(self._execution_id).workflow_name
+ self._workflow_name = workflow_name
+ self._validate_workflow_exists_for_service()
+ self._execution_id = execution_id
+
+ else:
+ self._service_id = service_id
+ self._workflow_name = workflow_name
+ self._validate_workflow_exists_for_service()
+ self._execution_id = self._create_execution_model(inputs).id
+
+ self._create_execution_model(inputs, execution_id)
+
+ self._workflow_context = WorkflowContext(
+ name=self.__class__.__name__,
+ model_storage=self._model_storage,
+ resource_storage=resource_storage,
+ service_id=service_id,
+ execution_id=execution_id,
+ workflow_name=self._workflow_name,
+ task_max_attempts=task_max_attempts,
+ task_retry_interval=task_retry_interval)
+
+ # Set default executor and kwargs
+ executor = executor or ProcessExecutor(plugin_manager=plugin_manager)
+
+ # transforming the execution inputs to dict, to pass them to the workflow function
+ # execution_inputs_dict = dict(inp.unwrapped for inp in self.execution.inputs.itervalues())
+
+ # if not self._is_resume:
+ # workflow_fn = self._get_workflow_fn()
+ # self._tasks_graph = workflow_fn(ctx=self._workflow_context, **execution_inputs_dict)
+ # compiler = graph_compiler.GraphCompiler(self._workflow_context, executor.__class__)
+ # compiler.compile(self._tasks_graph)
+
+ self._engine = engine.Engine(executors={executor.__class__: executor})
+
+ @property
+ def execution_id(self):
+ return self._execution_id
+
+ @property
+ def execution(self):
+ return self._model_storage.execution.get(self._execution_id)
+
+ @property
+ def service(self):
+ return self._model_storage.service.get(self._service_id)
+
+ def execute(self):
+ self._engine.execute(ctx=self._workflow_context,
+ resuming=self._is_resume,
+ retry_failed=self._retry_failed_tasks)
+
+ def cancel(self):
+ self._engine.cancel_execution(ctx=self._workflow_context)
+
+ def _create_execution_model(self, inputs, execution_id):
+ execution = models.Execution(
+ created_at=datetime.utcnow(),
+ service=self.service,
+ workflow_name=self._workflow_name,
+ inputs={})
+
+ if self._workflow_name in builtin.BUILTIN_WORKFLOWS:
+ workflow_inputs = dict() # built-in workflows don't have any inputs
+ else:
+ workflow_inputs = self.service.workflows[self._workflow_name].inputs
+
+ # modeling_utils.validate_no_undeclared_inputs(declared_inputs=workflow_inputs,
+ # supplied_inputs=inputs or {})
+ modeling_utils.validate_required_inputs_are_supplied(declared_inputs=workflow_inputs,
+ supplied_inputs=inputs or {})
+ execution.inputs = modeling_utils.merge_parameter_values(
+ inputs, workflow_inputs, model_cls=models.Input)
+ execution.id = execution_id
+ # TODO: these two following calls should execute atomically
+ self._validate_no_active_executions(execution)
+ self._model_storage.execution.put(execution)
+ return execution
+
+ def _validate_workflow_exists_for_service(self):
+ if self._workflow_name not in self.service.workflows and \
+ self._workflow_name not in builtin.BUILTIN_WORKFLOWS:
+ raise exceptions.UndeclaredWorkflowError(
+ 'No workflow policy {0} declared in service {1}'
+ .format(self._workflow_name, self.service.name))
+
+ def _validate_no_active_executions(self, execution):
+ active_executions = [e for e in self.service.executions if e.is_active()]
+ if active_executions:
+ raise exceptions.ActiveExecutionsError(
+ "Can't start execution; Service {0} has an active execution with ID {1}"
+ .format(self.service.name, active_executions[0].id))
+
+ def _get_workflow_fn(self):
+ if self._workflow_name in builtin.BUILTIN_WORKFLOWS:
+ return import_fullname('{0}.{1}'.format(builtin.BUILTIN_WORKFLOWS_PATH_PREFIX,
+ self._workflow_name))
+
+ workflow = self.service.workflows[self._workflow_name]
+
+ # TODO: Custom workflow support needs improvement, currently this code uses internal
+ # knowledge of the resource storage; Instead, workflows should probably be loaded
+ # in a similar manner to operation plugins. Also consider passing to import_fullname
+ # as paths instead of appending to sys path.
+ service_template_resources_path = os.path.join(
+ self._resource_storage.service_template.base_path,
+ str(self.service.service_template.id))
+ sys.path.append(service_template_resources_path)
+
+ try:
+ workflow_fn = import_fullname(workflow.function)
+ except ImportError:
+ raise exceptions.WorkflowImplementationNotFoundError(
+ 'Could not find workflow {0} function at {1}'.format(
+ self._workflow_name, workflow.function))
+
+ return workflow_fn
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/workflows/__init__.py b/azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/workflows/__init__.py
new file mode 100644
index 0000000..1f6c368
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/workflows/__init__.py
@@ -0,0 +1,21 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Workflows package.
+"""
+
+# Import required so that logging signals are registered
+from . import events_logging
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/workflows/api/__init__.py b/azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/workflows/api/__init__.py
new file mode 100644
index 0000000..587eee3
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/workflows/api/__init__.py
@@ -0,0 +1,20 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Workflow API.
+"""
+
+from . import task, task_graph
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/workflows/api/task.py b/azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/workflows/api/task.py
new file mode 100644
index 0000000..6ce4a00
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/workflows/api/task.py
@@ -0,0 +1,272 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Provides the tasks to be entered into the task graph
+"""
+
+from ... import context
+from ....modeling import models
+from ....modeling import utils as modeling_utils
+from ....utils.uuid import generate_uuid
+from .. import exceptions
+
+
+class BaseTask(object):
+ """
+ Base class for tasks.
+ """
+
+ def __init__(self, ctx=None, **kwargs):
+ if ctx is not None:
+ self._workflow_context = ctx
+ else:
+ self._workflow_context = context.workflow.current.get()
+ self._id = generate_uuid(variant='uuid')
+
+ @property
+ def id(self):
+ """
+ UUID4 ID.
+ """
+ return self._id
+
+ @property
+ def workflow_context(self):
+ """
+ Context of the current workflow.
+ """
+ return self._workflow_context
+
+
+class OperationTask(BaseTask):
+ """
+ Executes an operation.
+
+ :ivar name: formatted name (includes actor type, actor name, and interface/operation names)
+ :vartype name: basestring
+ :ivar actor: node or relationship
+ :vartype actor: :class:`~aria.modeling.models.Node` or
+ :class:`~aria.modeling.models.Relationship`
+ :ivar interface_name: interface name on actor
+ :vartype interface_name: basestring
+ :ivar operation_name: operation name on interface
+ :vartype operation_name: basestring
+ :ivar plugin: plugin (or None for default plugin)
+ :vartype plugin: :class:`~aria.modeling.models.Plugin`
+ :ivar function: path to Python function
+ :vartype function: basestring
+ :ivar arguments: arguments to send to Python function
+ :vartype arguments: {:obj:`basestring`: :class:`~aria.modeling.models.Argument`}
+ :ivar ignore_failure: whether to ignore failures
+ :vartype ignore_failure: bool
+ :ivar max_attempts: maximum number of attempts allowed in case of failure
+ :vartype max_attempts: int
+ :ivar retry_interval: interval between retries (in seconds)
+ :vartype retry_interval: float
+ """
+
+ NAME_FORMAT = '{interface}:{operation}@{type}:{name}'
+
+ def __init__(self,
+ actor,
+ interface_name,
+ operation_name,
+ arguments=None,
+ ignore_failure=None,
+ max_attempts=None,
+ retry_interval=None):
+ """
+ :param actor: node or relationship
+ :type actor: :class:`~aria.modeling.models.Node` or
+ :class:`~aria.modeling.models.Relationship`
+ :param interface_name: interface name on actor
+ :type interface_name: basestring
+ :param operation_name: operation name on interface
+ :type operation_name: basestring
+ :param arguments: override argument values
+ :type arguments: {:obj:`basestring`: object}
+ :param ignore_failure: override whether to ignore failures
+ :type ignore_failure: bool
+ :param max_attempts: override maximum number of attempts allowed in case of failure
+ :type max_attempts: int
+ :param retry_interval: override interval between retries (in seconds)
+ :type retry_interval: float
+ :raises ~aria.orchestrator.workflows.exceptions.OperationNotFoundException: if
+ ``interface_name`` and ``operation_name`` do not refer to an operation on the actor
+ """
+
+ # Creating OperationTask directly should raise an error when there is no
+ # interface/operation.
+ if not has_operation(actor, interface_name, operation_name):
+ raise exceptions.OperationNotFoundException(
+ 'Could not find operation "{operation_name}" on interface '
+ '"{interface_name}" for {actor_type} "{actor.name}"'.format(
+ operation_name=operation_name,
+ interface_name=interface_name,
+ actor_type=type(actor).__name__.lower(),
+ actor=actor)
+ )
+
+ super(OperationTask, self).__init__()
+
+ self.name = OperationTask.NAME_FORMAT.format(type=type(actor).__name__.lower(),
+ name=actor.name,
+ interface=interface_name,
+ operation=operation_name)
+ self.actor = actor
+ self.interface_name = interface_name
+ self.operation_name = operation_name
+ self.ignore_failure = \
+ self.workflow_context._task_ignore_failure if ignore_failure is None else ignore_failure
+ self.max_attempts = max_attempts or self.workflow_context._task_max_attempts
+ self.retry_interval = retry_interval or self.workflow_context._task_retry_interval
+
+ operation = self.actor.interfaces[self.interface_name].operations[self.operation_name]
+ self.plugin = operation.plugin
+ self.function = operation.function
+ self.arguments = modeling_utils.merge_parameter_values(arguments, operation.arguments)
+
+ actor = self.actor
+ if hasattr(actor, '_wrapped'):
+ # Unwrap instrumented model
+ actor = actor._wrapped
+
+ if isinstance(actor, models.Node):
+ self._context_cls = context.operation.NodeOperationContext
+ elif isinstance(actor, models.Relationship):
+ self._context_cls = context.operation.RelationshipOperationContext
+ else:
+ raise exceptions.TaskCreationException('Could not create valid context for '
+ '{actor.__class__}'.format(actor=actor))
+
+ def __repr__(self):
+ return self.name
+
+
+class StubTask(BaseTask):
+ """
+ Enables creating empty tasks.
+ """
+
+
+class WorkflowTask(BaseTask):
+ """
+ Executes a complete workflow.
+ """
+
+ def __init__(self, workflow_func, **kwargs):
+ """
+ :param workflow_func: function to run
+ :param kwargs: kwargs that would be passed to the workflow_func
+ """
+ super(WorkflowTask, self).__init__(**kwargs)
+ kwargs['ctx'] = self.workflow_context
+ self._graph = workflow_func(**kwargs)
+
+ @property
+ def graph(self):
+ """
+ Graph constructed by the sub workflow.
+ """
+ return self._graph
+
+ def __getattr__(self, item):
+ try:
+ return getattr(self._graph, item)
+ except AttributeError:
+ return super(WorkflowTask, self).__getattribute__(item)
+
+
+def create_task(actor, interface_name, operation_name, **kwargs):
+ """
+ Helper function that enables safe creation of :class:`OperationTask`. If the supplied interface
+ or operation do not exist, ``None`` is returned.
+
+ :param actor: actor for this task
+ :param interface_name: name of the interface
+ :param operation_name: name of the operation
+ :param kwargs: any additional kwargs to be passed to the OperationTask
+ :return: OperationTask or None (if the interface/operation does not exists)
+ """
+ try:
+ return OperationTask(
+ actor,
+ interface_name=interface_name,
+ operation_name=operation_name,
+ **kwargs
+ )
+ except exceptions.OperationNotFoundException:
+ return None
+
+
+def create_relationships_tasks(
+ node, interface_name, source_operation_name=None, target_operation_name=None, **kwargs):
+ """
+ Creates a relationship task (source and target) for all of a node relationships.
+
+ :param basestring source_operation_name: relationship operation name
+ :param basestring interface_name: name of the interface
+ :param source_operation_name:
+ :param target_operation_name:
+ :param node: source node
+ """
+ sub_tasks = []
+ for relationship in node.outbound_relationships:
+ relationship_operations = create_relationship_tasks(
+ relationship,
+ interface_name,
+ source_operation_name=source_operation_name,
+ target_operation_name=target_operation_name,
+ **kwargs)
+ sub_tasks.append(relationship_operations)
+ return sub_tasks
+
+
+def create_relationship_tasks(relationship, interface_name, source_operation_name=None,
+ target_operation_name=None, **kwargs):
+ """
+ Creates a relationship task (source and target).
+
+ :param relationship: relationship instance itself
+ :param source_operation_name:
+ :param target_operation_name:
+ """
+ operations = []
+ if source_operation_name:
+ operations.append(
+ create_task(
+ relationship,
+ interface_name=interface_name,
+ operation_name=source_operation_name,
+ **kwargs
+ )
+ )
+ if target_operation_name:
+ operations.append(
+ create_task(
+ relationship,
+ interface_name=interface_name,
+ operation_name=target_operation_name,
+ **kwargs
+ )
+ )
+
+ return [o for o in operations if o]
+
+
+def has_operation(actor, interface_name, operation_name):
+ interface = actor.interfaces.get(interface_name, None)
+ return interface and interface.operations.get(operation_name, False)
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/workflows/api/task_graph.py b/azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/workflows/api/task_graph.py
new file mode 100644
index 0000000..900a0d1
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/workflows/api/task_graph.py
@@ -0,0 +1,295 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Task graph.
+"""
+
+from collections import Iterable
+
+from networkx import DiGraph, topological_sort
+
+from ....utils.uuid import generate_uuid
+from . import task as api_task
+
+
+class TaskNotInGraphError(Exception):
+ """
+ An error representing a scenario where a given task is not in the graph as expected.
+ """
+ pass
+
+
+def _filter_out_empty_tasks(func=None):
+ if func is None:
+ return lambda f: _filter_out_empty_tasks(func=f)
+
+ def _wrapper(task, *tasks, **kwargs):
+ return func(*(t for t in (task,) + tuple(tasks) if t), **kwargs)
+ return _wrapper
+
+
+class TaskGraph(object):
+ """
+ Task graph builder.
+ """
+
+ def __init__(self, name):
+ self.name = name
+ self._id = generate_uuid(variant='uuid')
+ self._graph = DiGraph()
+
+ def __repr__(self):
+ return '{name}(id={self._id}, name={self.name}, graph={self._graph!r})'.format(
+ name=self.__class__.__name__, self=self)
+
+ @property
+ def id(self):
+ """
+ ID of the graph
+ """
+ return self._id
+
+ # graph traversal methods
+
+ @property
+ def tasks(self):
+ """
+ Iterator over tasks in the graph.
+ """
+ for _, data in self._graph.nodes_iter(data=True):
+ yield data['task']
+
+ def topological_order(self, reverse=False):
+ """
+ Topological sort of the graph.
+
+ :param reverse: whether to reverse the sort
+ :return: list which represents the topological sort
+ """
+ for task_id in topological_sort(self._graph, reverse=reverse):
+ yield self.get_task(task_id)
+
+ def get_dependencies(self, dependent_task):
+ """
+ Iterates over the task's dependencies.
+
+ :param dependent_task: task whose dependencies are requested
+ :raises ~aria.orchestrator.workflows.api.task_graph.TaskNotInGraphError: if
+ ``dependent_task`` is not in the graph
+ """
+ if not self.has_tasks(dependent_task):
+ raise TaskNotInGraphError('Task id: {0}'.format(dependent_task.id))
+ for _, dependency_id in self._graph.out_edges_iter(dependent_task.id):
+ yield self.get_task(dependency_id)
+
+ def get_dependents(self, dependency_task):
+ """
+ Iterates over the task's dependents.
+
+ :param dependency_task: task whose dependents are requested
+ :raises ~aria.orchestrator.workflows.api.task_graph.TaskNotInGraphError: if
+ ``dependency_task`` is not in the graph
+ """
+ if not self.has_tasks(dependency_task):
+ raise TaskNotInGraphError('Task id: {0}'.format(dependency_task.id))
+ for dependent_id, _ in self._graph.in_edges_iter(dependency_task.id):
+ yield self.get_task(dependent_id)
+
+ # task methods
+
+ def get_task(self, task_id):
+ """
+ Get a task instance that's been inserted to the graph by the task's ID.
+
+ :param basestring task_id: task ID
+ :raises ~aria.orchestrator.workflows.api.task_graph.TaskNotInGraphError: if no task found in
+ the graph with the given ID
+ """
+ if not self._graph.has_node(task_id):
+ raise TaskNotInGraphError('Task id: {0}'.format(task_id))
+ data = self._graph.node[task_id]
+ return data['task']
+
+ @_filter_out_empty_tasks
+ def add_tasks(self, *tasks):
+ """
+ Adds a task to the graph.
+
+ :param task: task
+ :return: list of added tasks
+ :rtype: list
+ """
+ assert all([isinstance(task, (api_task.BaseTask, Iterable)) for task in tasks])
+ return_tasks = []
+
+ for task in tasks:
+ if isinstance(task, Iterable):
+ return_tasks += self.add_tasks(*task)
+ elif not self.has_tasks(task):
+ self._graph.add_node(task.id, task=task)
+ return_tasks.append(task)
+
+ return return_tasks
+
+ @_filter_out_empty_tasks
+ def remove_tasks(self, *tasks):
+ """
+ Removes the provided task from the graph.
+
+ :param task: task
+ :return: list of removed tasks
+ :rtype: list
+ """
+ return_tasks = []
+
+ for task in tasks:
+ if isinstance(task, Iterable):
+ return_tasks += self.remove_tasks(*task)
+ elif self.has_tasks(task):
+ self._graph.remove_node(task.id)
+ return_tasks.append(task)
+
+ return return_tasks
+
+ @_filter_out_empty_tasks
+ def has_tasks(self, *tasks):
+ """
+ Checks whether a task is in the graph.
+
+ :param task: task
+ :return: ``True`` if all tasks are in the graph, otherwise ``False``
+ :rtype: list
+ """
+ assert all(isinstance(t, (api_task.BaseTask, Iterable)) for t in tasks)
+ return_value = True
+
+ for task in tasks:
+ if isinstance(task, Iterable):
+ return_value &= self.has_tasks(*task)
+ else:
+ return_value &= self._graph.has_node(task.id)
+
+ return return_value
+
+ def add_dependency(self, dependent, dependency):
+ """
+ Adds a dependency for one item (task, sequence or parallel) on another.
+
+ The dependent will only be executed after the dependency terminates. If either of the items
+ is either a sequence or a parallel, multiple dependencies may be added.
+
+ :param dependent: dependent (task, sequence or parallel)
+ :param dependency: dependency (task, sequence or parallel)
+ :return: ``True`` if the dependency between the two hadn't already existed, otherwise
+ ``False``
+ :rtype: bool
+ :raises ~aria.orchestrator.workflows.api.task_graph.TaskNotInGraphError: if either the
+ dependent or dependency are tasks which are not in the graph
+ """
+ if not (self.has_tasks(dependent) and self.has_tasks(dependency)):
+ raise TaskNotInGraphError()
+
+ if self.has_dependency(dependent, dependency):
+ return
+
+ if isinstance(dependent, Iterable):
+ for dependent_task in dependent:
+ self.add_dependency(dependent_task, dependency)
+ else:
+ if isinstance(dependency, Iterable):
+ for dependency_task in dependency:
+ self.add_dependency(dependent, dependency_task)
+ else:
+ self._graph.add_edge(dependent.id, dependency.id)
+
+ def has_dependency(self, dependent, dependency):
+ """
+ Checks whether one item (task, sequence or parallel) depends on another.
+
+ Note that if either of the items is either a sequence or a parallel, and some of the
+ dependencies exist in the graph but not all of them, this method will return ``False``.
+
+ :param dependent: dependent (task, sequence or parallel)
+ :param dependency: dependency (task, sequence or parallel)
+ :return: ``True`` if the dependency between the two exists, otherwise ``False``
+ :rtype: bool
+ :raises ~aria.orchestrator.workflows.api.task_graph.TaskNotInGraphError: if either the
+ dependent or dependency are tasks which are not in the graph
+ """
+ if not (dependent and dependency):
+ return False
+ elif not (self.has_tasks(dependent) and self.has_tasks(dependency)):
+ raise TaskNotInGraphError()
+
+ return_value = True
+
+ if isinstance(dependent, Iterable):
+ for dependent_task in dependent:
+ return_value &= self.has_dependency(dependent_task, dependency)
+ else:
+ if isinstance(dependency, Iterable):
+ for dependency_task in dependency:
+ return_value &= self.has_dependency(dependent, dependency_task)
+ else:
+ return_value &= self._graph.has_edge(dependent.id, dependency.id)
+
+ return return_value
+
+ def remove_dependency(self, dependent, dependency):
+ """
+ Removes a dependency for one item (task, sequence or parallel) on another.
+
+ Note that if either of the items is either a sequence or a parallel, and some of the
+ dependencies exist in the graph but not all of them, this method will not remove any of the
+ dependencies and return ``False``.
+
+ :param dependent: dependent (task, sequence or parallel)
+ :param dependency: dependency (task, sequence or parallel)
+ :return: ``False`` if the dependency between the two hadn't existed, otherwise ``True``
+ :rtype: bool
+ :raises ~aria.orchestrator.workflows.api.task_graph.TaskNotInGraphError: if either the
+ dependent or dependency are tasks which are not in the graph
+ """
+ if not (self.has_tasks(dependent) and self.has_tasks(dependency)):
+ raise TaskNotInGraphError()
+
+ if not self.has_dependency(dependent, dependency):
+ return
+
+ if isinstance(dependent, Iterable):
+ for dependent_task in dependent:
+ self.remove_dependency(dependent_task, dependency)
+ elif isinstance(dependency, Iterable):
+ for dependency_task in dependency:
+ self.remove_dependency(dependent, dependency_task)
+ else:
+ self._graph.remove_edge(dependent.id, dependency.id)
+
+ @_filter_out_empty_tasks
+ def sequence(self, *tasks):
+ """
+ Creates and inserts a sequence into the graph, effectively each task i depends on i-1.
+
+ :param tasks: iterable of dependencies
+ :return: provided tasks
+ """
+ if tasks:
+ self.add_tasks(*tasks)
+
+ for i in xrange(1, len(tasks)):
+ self.add_dependency(tasks[i], tasks[i-1])
+
+ return tasks
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/workflows/builtin/__init__.py b/azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/workflows/builtin/__init__.py
new file mode 100644
index 0000000..1b2f390
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/workflows/builtin/__init__.py
@@ -0,0 +1,36 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Built-in workflows.
+"""
+
+from .install import install
+from .uninstall import uninstall
+from .start import start
+from .stop import stop
+
+
+BUILTIN_WORKFLOWS = ('install', 'uninstall', 'start', 'stop')
+BUILTIN_WORKFLOWS_PATH_PREFIX = 'aria.orchestrator.workflows.builtin'
+
+
+__all__ = [
+ 'BUILTIN_WORKFLOWS',
+ 'install',
+ 'uninstall',
+ 'start',
+ 'stop'
+]
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/workflows/builtin/execute_operation.py b/azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/workflows/builtin/execute_operation.py
new file mode 100644
index 0000000..949f864
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/workflows/builtin/execute_operation.py
@@ -0,0 +1,101 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Built-in operation execution Workflow.
+"""
+
+from ... import workflow
+from ..api import task
+
+
+@workflow
+def execute_operation(
+ ctx,
+ graph,
+ interface_name,
+ operation_name,
+ operation_kwargs,
+ run_by_dependency_order,
+ type_names,
+ node_template_ids,
+ node_ids,
+ **kwargs):
+ """
+ Built-in operation execution Workflow.
+
+ :param workflow_context: workflow context
+ :param graph: graph which will describe the workflow
+ :param operation: operation name to execute
+ :param operation_kwargs:
+ :param run_by_dependency_order:
+ :param type_names:
+ :param node_template_ids:
+ :param node_ids:
+ :param kwargs:
+ :return:
+ """
+ subgraphs = {}
+ # filtering node instances
+ filtered_nodes = list(_filter_nodes(
+ context=ctx,
+ node_template_ids=node_template_ids,
+ node_ids=node_ids,
+ type_names=type_names))
+
+ if run_by_dependency_order:
+ filtered_node_ids = set(node_instance.id for node_instance in filtered_nodes)
+ for node in ctx.nodes:
+ if node.id not in filtered_node_ids:
+ subgraphs[node.id] = ctx.task_graph(
+ name='execute_operation_stub_{0}'.format(node.id))
+
+ # registering actual tasks to sequences
+ for node in filtered_nodes:
+ graph.add_tasks(
+ task.OperationTask(
+ node,
+ interface_name=interface_name,
+ operation_name=operation_name,
+ arguments=operation_kwargs
+ )
+ )
+
+ for _, node_sub_workflow in subgraphs.items():
+ graph.add_tasks(node_sub_workflow)
+
+ # adding tasks dependencies if required
+ if run_by_dependency_order:
+ for node in ctx.nodes:
+ for relationship in node.relationships:
+ graph.add_dependency(
+ source_task=subgraphs[node.id], after=[subgraphs[relationship.target_id]])
+
+
+def _filter_nodes(context, node_template_ids=(), node_ids=(), type_names=()):
+ def _is_node_template_by_id(node_template_id):
+ return not node_template_ids or node_template_id in node_template_ids
+
+ def _is_node_by_id(node_id):
+ return not node_ids or node_id in node_ids
+
+ def _is_node_by_type(node_type):
+ return not node_type.name in type_names
+
+ for node in context.nodes:
+ if all((_is_node_template_by_id(node.node_template.id),
+ _is_node_by_id(node.id),
+ _is_node_by_type(node.node_template.type))):
+ yield node
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/workflows/builtin/heal.py b/azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/workflows/builtin/heal.py
new file mode 100644
index 0000000..07e27b1
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/workflows/builtin/heal.py
@@ -0,0 +1,179 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# pylint: skip-file
+
+"""
+Built-in heal workflow.
+"""
+
+from aria import workflow
+
+from .workflows import (install_node, uninstall_node)
+from ..api import task
+
+
+@workflow
+def heal(ctx, graph, node_id):
+ """
+ Built-in heal workflow..
+
+ :param ctx: workflow context
+ :param graph: graph which will describe the workflow.
+ :param node_id: ID of the node to heal
+ :return:
+ """
+ failing_node = ctx.model.node.get(node_id)
+ host_node = ctx.model.node.get(failing_node.host.id)
+ failed_node_subgraph = _get_contained_subgraph(ctx, host_node)
+ failed_node_ids = list(n.id for n in failed_node_subgraph)
+
+ targeted_nodes = [node for node in ctx.nodes
+ if node.id not in failed_node_ids]
+
+ uninstall_subgraph = task.WorkflowTask(
+ heal_uninstall,
+ failing_nodes=failed_node_subgraph,
+ targeted_nodes=targeted_nodes
+ )
+
+ install_subgraph = task.WorkflowTask(
+ heal_install,
+ failing_nodes=failed_node_subgraph,
+ targeted_nodes=targeted_nodes)
+
+ graph.sequence(uninstall_subgraph, install_subgraph)
+
+
+@workflow(suffix_template='{failing_nodes}')
+def heal_uninstall(ctx, graph, failing_nodes, targeted_nodes):
+ """
+ Uninstall phase of the heal mechanism.
+
+ :param ctx: workflow context
+ :param graph: task graph to edit
+ :param failing_nodes: failing nodes to heal
+ :param targeted_nodes: targets of the relationships where the failing node are
+ """
+ node_sub_workflows = {}
+
+ # Create install stub workflow for each unaffected node
+ for node in targeted_nodes:
+ node_stub = task.StubTask()
+ node_sub_workflows[node.id] = node_stub
+ graph.add_tasks(node_stub)
+
+ # create install sub workflow for every node
+ for node in failing_nodes:
+ node_sub_workflow = task.WorkflowTask(uninstall_node,
+ node=node)
+ node_sub_workflows[node.id] = node_sub_workflow
+ graph.add_tasks(node_sub_workflow)
+
+ # create dependencies between the node sub workflow
+ for node in failing_nodes:
+ node_sub_workflow = node_sub_workflows[node.id]
+ for relationship in reversed(node.outbound_relationships):
+ graph.add_dependency(
+ node_sub_workflows[relationship.target_node.id],
+ node_sub_workflow)
+
+ # Add operations for intact nodes depending on a node belonging to nodes
+ for node in targeted_nodes:
+ node_sub_workflow = node_sub_workflows[node.id]
+
+ for relationship in reversed(node.outbound_relationships):
+
+ target_node = \
+ ctx.model.node.get(relationship.target_node.id)
+ target_node_subgraph = node_sub_workflows[target_node.id]
+ graph.add_dependency(target_node_subgraph, node_sub_workflow)
+
+ if target_node in failing_nodes:
+ dependency = task.create_relationship_tasks(
+ relationship=relationship,
+ operation_name='aria.interfaces.relationship_lifecycle.unlink')
+ graph.add_tasks(*dependency)
+ graph.add_dependency(node_sub_workflow, dependency)
+
+
+@workflow(suffix_template='{failing_nodes}')
+def heal_install(ctx, graph, failing_nodes, targeted_nodes):
+ """
+ Install phase of the heal mechanism.
+
+ :param ctx: workflow context
+ :param graph: task graph to edit.
+ :param failing_nodes: failing nodes to heal
+ :param targeted_nodes: targets of the relationships where the failing node are
+ """
+ node_sub_workflows = {}
+
+ # Create install sub workflow for each unaffected
+ for node in targeted_nodes:
+ node_stub = task.StubTask()
+ node_sub_workflows[node.id] = node_stub
+ graph.add_tasks(node_stub)
+
+ # create install sub workflow for every node
+ for node in failing_nodes:
+ node_sub_workflow = task.WorkflowTask(install_node,
+ node=node)
+ node_sub_workflows[node.id] = node_sub_workflow
+ graph.add_tasks(node_sub_workflow)
+
+ # create dependencies between the node sub workflow
+ for node in failing_nodes:
+ node_sub_workflow = node_sub_workflows[node.id]
+ if node.outbound_relationships:
+ dependencies = \
+ [node_sub_workflows[relationship.target_node.id]
+ for relationship in node.outbound_relationships]
+ graph.add_dependency(node_sub_workflow, dependencies)
+
+ # Add operations for intact nodes depending on a node
+ # belonging to nodes
+ for node in targeted_nodes:
+ node_sub_workflow = node_sub_workflows[node.id]
+
+ for relationship in node.outbound_relationships:
+ target_node = ctx.model.node.get(
+ relationship.target_node.id)
+ target_node_subworkflow = node_sub_workflows[target_node.id]
+ graph.add_dependency(node_sub_workflow, target_node_subworkflow)
+
+ if target_node in failing_nodes:
+ dependent = task.create_relationship_tasks(
+ relationship=relationship,
+ operation_name='aria.interfaces.relationship_lifecycle.establish')
+ graph.add_tasks(*dependent)
+ graph.add_dependency(dependent, node_sub_workflow)
+
+
+def _get_contained_subgraph(context, host_node):
+ contained_instances = [node
+ for node in context.nodes
+ if node.host_fk == host_node.id and
+ node.host_fk != node.id]
+ result = [host_node]
+
+ if not contained_instances:
+ return result
+
+ result.extend(contained_instances)
+ for node in contained_instances:
+ result.extend(_get_contained_subgraph(context, node))
+
+ return set(result)
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/workflows/builtin/install.py b/azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/workflows/builtin/install.py
new file mode 100644
index 0000000..1e7c531
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/workflows/builtin/install.py
@@ -0,0 +1,34 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Built-in install workflow.
+"""
+
+from ... import workflow
+from ..api import task as api_task
+from . import workflows
+
+
+@workflow
+def install(ctx, graph):
+ """
+ Built-in install workflow.
+ """
+ tasks_and_nodes = []
+ for node in ctx.nodes:
+ tasks_and_nodes.append((api_task.WorkflowTask(workflows.install_node, node=node), node))
+ graph.add_tasks([task for task, _ in tasks_and_nodes])
+ workflows.create_node_task_dependencies(graph, tasks_and_nodes)
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/workflows/builtin/start.py b/azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/workflows/builtin/start.py
new file mode 100644
index 0000000..c02a26d
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/workflows/builtin/start.py
@@ -0,0 +1,31 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Built-in start workflow.
+"""
+
+from .workflows import start_node
+from ... import workflow
+from ..api import task as api_task
+
+
+@workflow
+def start(ctx, graph):
+ """
+ Built-in start workflow.
+ """
+ for node in ctx.model.node.iter():
+ graph.add_tasks(api_task.WorkflowTask(start_node, node=node))
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/workflows/builtin/stop.py b/azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/workflows/builtin/stop.py
new file mode 100644
index 0000000..6f9930b
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/workflows/builtin/stop.py
@@ -0,0 +1,31 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Built-in stop workflow.
+"""
+
+from .workflows import stop_node
+from ..api import task as api_task
+from ... import workflow
+
+
+@workflow
+def stop(ctx, graph):
+ """
+ Built-in stop workflow.
+ """
+ for node in ctx.model.node.iter():
+ graph.add_tasks(api_task.WorkflowTask(stop_node, node=node))
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/workflows/builtin/uninstall.py b/azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/workflows/builtin/uninstall.py
new file mode 100644
index 0000000..7925f4b
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/workflows/builtin/uninstall.py
@@ -0,0 +1,34 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Built-in uninstall workflow.
+"""
+
+from ... import workflow
+from ..api import task as api_task
+from . import workflows
+
+
+@workflow
+def uninstall(ctx, graph):
+ """
+ Built-in uninstall workflow.
+ """
+ tasks_and_nodes = []
+ for node in ctx.nodes:
+ tasks_and_nodes.append((api_task.WorkflowTask(workflows.uninstall_node, node=node), node))
+ graph.add_tasks([task for task, _ in tasks_and_nodes])
+ workflows.create_node_task_dependencies(graph, tasks_and_nodes, reverse=True)
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/workflows/builtin/workflows.py b/azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/workflows/builtin/workflows.py
new file mode 100644
index 0000000..b286e98
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/workflows/builtin/workflows.py
@@ -0,0 +1,149 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+TSOCA normative lifecycle workflows.
+"""
+
+from ... import workflow
+from ..api import task
+
+
+NORMATIVE_STANDARD_INTERFACE = 'Standard' # 'tosca.interfaces.node.lifecycle.Standard'
+NORMATIVE_CONFIGURE_INTERFACE = 'Configure' # 'tosca.interfaces.relationship.Configure'
+
+NORMATIVE_CREATE = 'create'
+NORMATIVE_CONFIGURE = 'configure'
+NORMATIVE_START = 'start'
+NORMATIVE_STOP = 'stop'
+NORMATIVE_DELETE = 'delete'
+
+NORMATIVE_PRE_CONFIGURE_SOURCE = 'pre_configure_source'
+NORMATIVE_PRE_CONFIGURE_TARGET = 'pre_configure_target'
+NORMATIVE_POST_CONFIGURE_SOURCE = 'post_configure_source'
+NORMATIVE_POST_CONFIGURE_TARGET = 'post_configure_target'
+
+NORMATIVE_ADD_SOURCE = 'add_source'
+NORMATIVE_ADD_TARGET = 'add_target'
+NORMATIVE_REMOVE_TARGET = 'remove_target'
+NORMATIVE_REMOVE_SOURCE = 'remove_source'
+NORMATIVE_TARGET_CHANGED = 'target_changed'
+
+
+__all__ = (
+ 'NORMATIVE_STANDARD_INTERFACE',
+ 'NORMATIVE_CONFIGURE_INTERFACE',
+ 'NORMATIVE_CREATE',
+ 'NORMATIVE_START',
+ 'NORMATIVE_STOP',
+ 'NORMATIVE_DELETE',
+ 'NORMATIVE_CONFIGURE',
+ 'NORMATIVE_PRE_CONFIGURE_SOURCE',
+ 'NORMATIVE_PRE_CONFIGURE_TARGET',
+ 'NORMATIVE_POST_CONFIGURE_SOURCE',
+ 'NORMATIVE_POST_CONFIGURE_TARGET',
+ 'NORMATIVE_ADD_SOURCE',
+ 'NORMATIVE_ADD_TARGET',
+ 'NORMATIVE_REMOVE_SOURCE',
+ 'NORMATIVE_REMOVE_TARGET',
+ 'NORMATIVE_TARGET_CHANGED',
+ 'install_node',
+ 'uninstall_node',
+ 'start_node',
+ 'stop_node',
+)
+
+
+@workflow(suffix_template='{node.name}')
+def install_node(graph, node, **kwargs):
+ # Create
+ sequence = [task.create_task(node, NORMATIVE_STANDARD_INTERFACE, NORMATIVE_CREATE)]
+
+ # Configure
+ sequence += task.create_relationships_tasks(node,
+ NORMATIVE_CONFIGURE_INTERFACE,
+ NORMATIVE_PRE_CONFIGURE_SOURCE,
+ NORMATIVE_PRE_CONFIGURE_TARGET)
+ sequence.append(task.create_task(node, NORMATIVE_STANDARD_INTERFACE, NORMATIVE_CONFIGURE))
+ sequence += task.create_relationships_tasks(node,
+ NORMATIVE_CONFIGURE_INTERFACE,
+ NORMATIVE_POST_CONFIGURE_SOURCE,
+ NORMATIVE_POST_CONFIGURE_TARGET)
+ # Start
+ sequence += _create_start_tasks(node)
+
+ graph.sequence(*sequence)
+
+
+@workflow(suffix_template='{node.name}')
+def uninstall_node(graph, node, **kwargs):
+ # Stop
+ sequence = _create_stop_tasks(node)
+
+ # Delete
+ sequence.append(task.create_task(node, NORMATIVE_STANDARD_INTERFACE, NORMATIVE_DELETE))
+
+ graph.sequence(*sequence)
+
+
+@workflow(suffix_template='{node.name}')
+def start_node(graph, node, **kwargs):
+ graph.sequence(*_create_start_tasks(node))
+
+
+@workflow(suffix_template='{node.name}')
+def stop_node(graph, node, **kwargs):
+ graph.sequence(*_create_stop_tasks(node))
+
+
+def _create_start_tasks(node):
+ sequence = [task.create_task(node, NORMATIVE_STANDARD_INTERFACE, NORMATIVE_START)]
+ sequence += task.create_relationships_tasks(node,
+ NORMATIVE_CONFIGURE_INTERFACE,
+ NORMATIVE_ADD_SOURCE, NORMATIVE_ADD_TARGET)
+ return sequence
+
+
+def _create_stop_tasks(node):
+ sequence = [task.create_task(node, NORMATIVE_STANDARD_INTERFACE, NORMATIVE_STOP)]
+ sequence += task.create_relationships_tasks(node,
+ NORMATIVE_CONFIGURE_INTERFACE,
+ NORMATIVE_REMOVE_SOURCE, NORMATIVE_REMOVE_TARGET)
+ return sequence
+
+
+def create_node_task_dependencies(graph, tasks_and_nodes, reverse=False):
+ """
+ Creates dependencies between tasks if there is a relationship (outbound) between their nodes.
+ """
+
+ def get_task(node_name):
+ for api_task, task_node in tasks_and_nodes:
+ if task_node.name == node_name:
+ return api_task
+ return None
+
+ for api_task, node in tasks_and_nodes:
+ dependencies = []
+ for relationship in node.outbound_relationships:
+ dependency = get_task(relationship.target_node.name)
+ if dependency:
+ dependencies.append(dependency)
+ if dependencies:
+ if reverse:
+ for dependency in dependencies:
+ graph.add_dependency(dependency, api_task)
+ else:
+ graph.add_dependency(api_task, dependencies)
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/workflows/core/__init__.py b/azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/workflows/core/__init__.py
new file mode 100644
index 0000000..3f28136
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/workflows/core/__init__.py
@@ -0,0 +1,20 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Workflow core.
+"""
+
+from . import engine
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/workflows/core/engine.py b/azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/workflows/core/engine.py
new file mode 100644
index 0000000..0ec3cd8
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/workflows/core/engine.py
@@ -0,0 +1,185 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Workflow execution.
+"""
+
+import time
+from datetime import datetime
+
+from aria import logger
+from aria.modeling import models
+from aria.orchestrator import events
+from aria.orchestrator.context import operation
+
+from .. import exceptions
+from ..executor.base import StubTaskExecutor
+# Import required so all signals are registered
+from . import events_handler # pylint: disable=unused-import
+
+
+class Engine(logger.LoggerMixin):
+ """
+ Executes workflows.
+ """
+
+ def __init__(self, executors, **kwargs):
+ super(Engine, self).__init__(**kwargs)
+ self._executors = executors.copy()
+ self._executors.setdefault(StubTaskExecutor, StubTaskExecutor())
+
+ def execute(self, ctx, resuming=False, retry_failed=False):
+ """
+ Executes the workflow.
+ """
+ if resuming:
+ events.on_resume_workflow_signal.send(ctx, retry_failed=retry_failed)
+
+ tasks_tracker = _TasksTracker(ctx)
+
+ try:
+ events.start_workflow_signal.send(ctx)
+ while True:
+ cancel = self._is_cancel(ctx)
+ if cancel:
+ break
+ for task in tasks_tracker.ended_tasks:
+ self._handle_ended_tasks(task)
+ tasks_tracker.finished(task)
+ for task in tasks_tracker.executable_tasks:
+ tasks_tracker.executing(task)
+ self._handle_executable_task(ctx, task)
+ if tasks_tracker.all_tasks_consumed:
+ break
+ else:
+ time.sleep(0.1)
+ if cancel:
+ self._terminate_tasks(tasks_tracker.executing_tasks)
+ events.on_cancelled_workflow_signal.send(ctx)
+ else:
+ events.on_success_workflow_signal.send(ctx)
+ except BaseException as e:
+ # Cleanup any remaining tasks
+ self._terminate_tasks(tasks_tracker.executing_tasks)
+ events.on_failure_workflow_signal.send(ctx, exception=e)
+ raise
+
+ def _terminate_tasks(self, tasks):
+ for task in tasks:
+ try:
+ self._executors[task._executor].terminate(task.id)
+ except BaseException:
+ pass
+
+ @staticmethod
+ def cancel_execution(ctx):
+ """
+ Send a cancel request to the engine. If execution already started, execution status
+ will be modified to ``cancelling`` status. If execution is in pending mode, execution status
+ will be modified to ``cancelled`` directly.
+ """
+ events.on_cancelling_workflow_signal.send(ctx)
+
+ @staticmethod
+ def _is_cancel(ctx):
+ execution = ctx.model.execution.refresh(ctx.execution)
+ return execution.status in (models.Execution.CANCELLING, models.Execution.CANCELLED)
+
+ def _handle_executable_task(self, ctx, task):
+ task_executor = self._executors[task._executor]
+
+ # If the task is a stub, a default context is provided, else it should hold the context cls
+ context_cls = operation.BaseOperationContext if task._stub_type else task._context_cls
+ op_ctx = context_cls(
+ model_storage=ctx.model,
+ resource_storage=ctx.resource,
+ workdir=ctx._workdir,
+ task_id=task.id,
+ actor_id=task.actor.id if task.actor else None,
+ service_id=task.execution.service.id,
+ execution_id=task.execution.id,
+ name=task.name
+ )
+
+ if not task._stub_type:
+ events.sent_task_signal.send(op_ctx)
+ task_executor.execute(op_ctx)
+
+ @staticmethod
+ def _handle_ended_tasks(task):
+ if task.status == models.Task.FAILED and not task.ignore_failure:
+ raise exceptions.ExecutorException('Workflow failed')
+
+
+class _TasksTracker(object):
+
+ def __init__(self, ctx):
+ self._ctx = ctx
+
+ self._tasks = ctx.execution.tasks
+ self._executed_tasks = [task for task in self._tasks if task.has_ended()]
+ self._executable_tasks = list(set(self._tasks) - set(self._executed_tasks))
+ self._executing_tasks = []
+
+ @property
+ def all_tasks_consumed(self):
+ return len(self._executed_tasks) == len(self._tasks) and len(self._executing_tasks) == 0
+
+ def executing(self, task):
+ # Task executing could be retrying (thus removed and added earlier)
+ if task not in self._executing_tasks:
+ self._executable_tasks.remove(task)
+ self._executing_tasks.append(task)
+
+ def finished(self, task):
+ self._executing_tasks.remove(task)
+ self._executed_tasks.append(task)
+
+ @property
+ def ended_tasks(self):
+ for task in self.executing_tasks:
+ if task.has_ended():
+ yield task
+
+ @property
+ def executable_tasks(self):
+ now = datetime.utcnow()
+ # we need both lists since retrying task are in the executing task list.
+ for task in self._update_tasks(set(self._executing_tasks + self._executable_tasks)):
+ if all([task.is_waiting(),
+ task.due_at <= now,
+ all(dependency in self._executed_tasks for dependency in task.dependencies)
+ ]):
+ yield task
+
+ @property
+ def executing_tasks(self):
+ for task in self._update_tasks(self._executing_tasks):
+ yield task
+
+ @property
+ def executed_tasks(self):
+ for task in self._update_tasks(self._executed_tasks):
+ yield task
+
+ @property
+ def tasks(self):
+ for task in self._update_tasks(self._tasks):
+ yield task
+
+ def _update_tasks(self, tasks):
+ for task in tasks:
+ yield self._ctx.model.task.refresh(task)
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/workflows/core/events_handler.py b/azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/workflows/core/events_handler.py
new file mode 100644
index 0000000..473475e
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/workflows/core/events_handler.py
@@ -0,0 +1,170 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Workflow event handling.
+"""
+
+from datetime import (
+ datetime,
+ timedelta,
+)
+
+from ... import events
+from ... import exceptions
+
+
+@events.sent_task_signal.connect
+def _task_sent(ctx, *args, **kwargs):
+ with ctx.persist_changes:
+ ctx.task.status = ctx.task.SENT
+
+
+@events.start_task_signal.connect
+def _task_started(ctx, *args, **kwargs):
+ with ctx.persist_changes:
+ ctx.task.started_at = datetime.utcnow()
+ ctx.task.status = ctx.task.STARTED
+ _update_node_state_if_necessary(ctx, is_transitional=True)
+
+
+@events.on_failure_task_signal.connect
+def _task_failed(ctx, exception, *args, **kwargs):
+ with ctx.persist_changes:
+ should_retry = all([
+ not isinstance(exception, exceptions.TaskAbortException),
+ ctx.task.attempts_count < ctx.task.max_attempts or
+ ctx.task.max_attempts == ctx.task.INFINITE_RETRIES,
+ # ignore_failure check here means the task will not be retried and it will be marked
+ # as failed. The engine will also look at ignore_failure so it won't fail the
+ # workflow.
+ not ctx.task.ignore_failure
+ ])
+ if should_retry:
+ retry_interval = None
+ if isinstance(exception, exceptions.TaskRetryException):
+ retry_interval = exception.retry_interval
+ if retry_interval is None:
+ retry_interval = ctx.task.retry_interval
+ ctx.task.status = ctx.task.RETRYING
+ ctx.task.attempts_count += 1
+ ctx.task.due_at = datetime.utcnow() + timedelta(seconds=retry_interval)
+ else:
+ ctx.task.ended_at = datetime.utcnow()
+ ctx.task.status = ctx.task.FAILED
+
+
+@events.on_success_task_signal.connect
+def _task_succeeded(ctx, *args, **kwargs):
+ with ctx.persist_changes:
+ ctx.task.ended_at = datetime.utcnow()
+ ctx.task.status = ctx.task.SUCCESS
+ ctx.task.attempts_count += 1
+
+ _update_node_state_if_necessary(ctx)
+
+
+@events.start_workflow_signal.connect
+def _workflow_started(workflow_context, *args, **kwargs):
+ with workflow_context.persist_changes:
+ execution = workflow_context.execution
+ # the execution may already be in the process of cancelling
+ if execution.status in (execution.CANCELLING, execution.CANCELLED):
+ return
+ execution.status = execution.STARTED
+ execution.started_at = datetime.utcnow()
+
+
+@events.on_failure_workflow_signal.connect
+def _workflow_failed(workflow_context, exception, *args, **kwargs):
+ with workflow_context.persist_changes:
+ execution = workflow_context.execution
+ execution.error = str(exception)
+ execution.status = execution.FAILED
+ execution.ended_at = datetime.utcnow()
+
+
+@events.on_success_workflow_signal.connect
+def _workflow_succeeded(workflow_context, *args, **kwargs):
+ with workflow_context.persist_changes:
+ execution = workflow_context.execution
+ execution.status = execution.SUCCEEDED
+ execution.ended_at = datetime.utcnow()
+
+
+@events.on_cancelled_workflow_signal.connect
+def _workflow_cancelled(workflow_context, *args, **kwargs):
+ with workflow_context.persist_changes:
+ execution = workflow_context.execution
+ # _workflow_cancelling function may have called this function already
+ if execution.status == execution.CANCELLED:
+ return
+ # the execution may have already been finished
+ elif execution.status in (execution.SUCCEEDED, execution.FAILED):
+ _log_tried_to_cancel_execution_but_it_already_ended(workflow_context, execution.status)
+ else:
+ execution.status = execution.CANCELLED
+ execution.ended_at = datetime.utcnow()
+
+
+@events.on_resume_workflow_signal.connect
+def _workflow_resume(workflow_context, retry_failed=False, *args, **kwargs):
+ with workflow_context.persist_changes:
+ execution = workflow_context.execution
+ execution.status = execution.PENDING
+ # Any non ended task would be put back to pending state
+ for task in execution.tasks:
+ if not task.has_ended():
+ task.status = task.PENDING
+
+ if retry_failed:
+ for task in execution.tasks:
+ if task.status == task.FAILED and not task.ignore_failure:
+ task.attempts_count = 0
+ task.status = task.PENDING
+
+
+
+@events.on_cancelling_workflow_signal.connect
+def _workflow_cancelling(workflow_context, *args, **kwargs):
+ with workflow_context.persist_changes:
+ execution = workflow_context.execution
+ if execution.status == execution.PENDING:
+ return _workflow_cancelled(workflow_context=workflow_context)
+ # the execution may have already been finished
+ elif execution.status in (execution.SUCCEEDED, execution.FAILED):
+ _log_tried_to_cancel_execution_but_it_already_ended(workflow_context, execution.status)
+ else:
+ execution.status = execution.CANCELLING
+
+
+def _update_node_state_if_necessary(ctx, is_transitional=False):
+ # TODO: this is not the right way to check! the interface name is arbitrary
+ # and also will *never* be the type name
+ node = ctx.task.node if ctx.task is not None else None
+ if (node is not None) and \
+ (ctx.task.interface_name in ('Standard', 'tosca.interfaces.node.lifecycle.Standard',
+ 'tosca:Standard')):
+ state = node.determine_state(op_name=ctx.task.operation_name,
+ is_transitional=is_transitional)
+ if state:
+ node.state = state
+ ctx.model.node.update(node)
+
+
+def _log_tried_to_cancel_execution_but_it_already_ended(workflow_context, status):
+ workflow_context.logger.info(
+ "'{workflow_name}' workflow execution {status} before the cancel request"
+ "was fully processed".format(workflow_name=workflow_context.workflow_name, status=status))
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/workflows/core/graph_compiler.py b/azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/workflows/core/graph_compiler.py
new file mode 100644
index 0000000..81543d5
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/workflows/core/graph_compiler.py
@@ -0,0 +1,118 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+from ....modeling import models
+from .. import executor, api
+
+
+class GraphCompiler(object):
+ def __init__(self, ctx, default_executor):
+ self._ctx = ctx
+ self._default_executor = default_executor
+ self._stub_executor = executor.base.StubTaskExecutor
+ self._model_to_api_id = {}
+
+ def compile(self,
+ task_graph,
+ start_stub_type=models.Task.START_WORKFLOW,
+ end_stub_type=models.Task.END_WORKFLOW,
+ depends_on=()):
+ """
+ Translates the user graph to the execution graph
+ :param task_graph: The user's graph
+ :param start_stub_type: internal use
+ :param end_stub_type: internal use
+ :param depends_on: internal use
+ """
+ depends_on = list(depends_on)
+
+ # Insert start marker
+ start_task = self._create_stub_task(
+ start_stub_type, depends_on, self._start_graph_suffix(task_graph.id), task_graph.name,
+ )
+
+ for task in task_graph.topological_order(reverse=True):
+ dependencies = \
+ (self._get_tasks_from_dependencies(task_graph.get_dependencies(task))
+ or [start_task])
+
+ if isinstance(task, api.task.OperationTask):
+ self._create_operation_task(task, dependencies)
+
+ elif isinstance(task, api.task.WorkflowTask):
+ # Build the graph recursively while adding start and end markers
+ self.compile(
+ task, models.Task.START_SUBWROFKLOW, models.Task.END_SUBWORKFLOW, dependencies
+ )
+ elif isinstance(task, api.task.StubTask):
+ self._create_stub_task(models.Task.STUB, dependencies, task.id)
+ else:
+ raise RuntimeError('Undefined state')
+
+ # Insert end marker
+ self._create_stub_task(
+ end_stub_type,
+ self._get_non_dependent_tasks(self._ctx.execution) or [start_task],
+ self._end_graph_suffix(task_graph.id),
+ task_graph.name
+ )
+
+ def _create_stub_task(self, stub_type, dependencies, api_id, name=None):
+ model_task = models.Task(
+ name=name,
+ dependencies=dependencies,
+ execution=self._ctx.execution,
+ _executor=self._stub_executor,
+ _stub_type=stub_type)
+ self._ctx.model.task.put(model_task)
+ self._model_to_api_id[model_task.id] = api_id
+ return model_task
+
+ def _create_operation_task(self, api_task, dependencies):
+ model_task = models.Task.from_api_task(
+ api_task, self._default_executor, dependencies=dependencies)
+ self._ctx.model.task.put(model_task)
+ self._model_to_api_id[model_task.id] = api_task.id
+ return model_task
+
+ @staticmethod
+ def _start_graph_suffix(api_id):
+ return '{0}-Start'.format(api_id)
+
+ @staticmethod
+ def _end_graph_suffix(api_id):
+ return '{0}-End'.format(api_id)
+
+ @staticmethod
+ def _get_non_dependent_tasks(execution):
+ tasks_with_dependencies = set()
+ for task in execution.tasks:
+ tasks_with_dependencies.update(task.dependencies)
+ return list(set(execution.tasks) - set(tasks_with_dependencies))
+
+ def _get_tasks_from_dependencies(self, dependencies):
+ """
+ Returns task list from dependencies.
+ """
+ tasks = []
+ for dependency in dependencies:
+ if isinstance(dependency, (api.task.StubTask, api.task.OperationTask)):
+ dependency_name = dependency.id
+ else:
+ dependency_name = self._end_graph_suffix(dependency.id)
+ tasks.extend(task for task in self._ctx.execution.tasks
+ if self._model_to_api_id.get(task.id, None) == dependency_name)
+ return tasks
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/workflows/events_logging.py b/azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/workflows/events_logging.py
new file mode 100644
index 0000000..9eee1e1
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/workflows/events_logging.py
@@ -0,0 +1,85 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+"""
+Workflow event logging.
+"""
+
+from .. import events
+from ... import modeling
+
+
+def _get_task_name(task):
+ if isinstance(task.actor, modeling.model_bases.service_instance.RelationshipBase):
+ return '{source_node.name}->{target_node.name}'.format(
+ source_node=task.actor.source_node, target_node=task.actor.target_node)
+ else:
+ return task.actor.name
+
+
+@events.start_task_signal.connect
+def _start_task_handler(ctx, **kwargs):
+ # If the task has no function this is an empty task.
+ if ctx.task.function:
+ suffix = 'started...'
+ logger = ctx.logger.info
+ else:
+ suffix = 'has no implementation'
+ logger = ctx.logger.debug
+
+ logger('{name} {task.interface_name}.{task.operation_name} {suffix}'.format(
+ name=_get_task_name(ctx.task), task=ctx.task, suffix=suffix))
+
+
+@events.on_success_task_signal.connect
+def _success_task_handler(ctx, **kwargs):
+ if not ctx.task.function:
+ return
+ ctx.logger.info('{name} {task.interface_name}.{task.operation_name} successful'
+ .format(name=_get_task_name(ctx.task), task=ctx.task))
+
+
+@events.on_failure_task_signal.connect
+def _failure_operation_handler(ctx, traceback, **kwargs):
+ ctx.logger.error(
+ '{name} {task.interface_name}.{task.operation_name} failed'
+ .format(name=_get_task_name(ctx.task), task=ctx.task), extra=dict(traceback=traceback)
+ )
+
+
+@events.start_workflow_signal.connect
+def _start_workflow_handler(context, **kwargs):
+ context.logger.info("Starting '{ctx.workflow_name}' workflow execution".format(ctx=context))
+
+
+@events.on_failure_workflow_signal.connect
+def _failure_workflow_handler(context, **kwargs):
+ context.logger.info("'{ctx.workflow_name}' workflow execution failed".format(ctx=context))
+
+
+@events.on_success_workflow_signal.connect
+def _success_workflow_handler(context, **kwargs):
+ context.logger.info("'{ctx.workflow_name}' workflow execution succeeded".format(ctx=context))
+
+
+@events.on_cancelled_workflow_signal.connect
+def _cancel_workflow_handler(context, **kwargs):
+ context.logger.info("'{ctx.workflow_name}' workflow execution canceled".format(ctx=context))
+
+
+@events.on_cancelling_workflow_signal.connect
+def _cancelling_workflow_handler(context, **kwargs):
+ context.logger.info("Cancelling '{ctx.workflow_name}' workflow execution".format(ctx=context))
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/workflows/exceptions.py b/azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/workflows/exceptions.py
new file mode 100644
index 0000000..2a1d6b1
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/workflows/exceptions.py
@@ -0,0 +1,91 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Workflow exceptions.
+"""
+
+import os
+
+from .. import exceptions
+
+
+class ExecutorException(exceptions.AriaError):
+ """
+ General executor exception.
+ """
+ pass
+
+
+class ProcessException(ExecutorException):
+ """
+ Raised when subprocess execution fails.
+ """
+
+ def __init__(self, command, stderr=None, stdout=None, return_code=None):
+ """
+ Process class Exception
+ :param list command: child process command
+ :param str message: custom message
+ :param str stderr: child process stderr
+ :param str stdout: child process stdout
+ :param int return_code: child process exit code
+ """
+ super(ProcessException, self).__init__("child process failed")
+ self.command = command
+ self.stderr = stderr
+ self.stdout = stdout
+ self.return_code = return_code
+
+ @property
+ def explanation(self):
+ """
+ Describes the error in detail
+ """
+ return (
+ 'Command "{error.command}" executed with an error.{0}'
+ 'code: {error.return_code}{0}'
+ 'error: {error.stderr}{0}'
+ 'output: {error.stdout}'.format(os.linesep, error=self))
+
+
+class AriaEngineError(exceptions.AriaError):
+ """
+ Raised by the workflow engine.
+ """
+
+
+class TaskException(exceptions.AriaError):
+ """
+ Raised by the task.
+ """
+
+
+class TaskCreationException(TaskException):
+ """
+ Could not create the task.
+ """
+
+
+class OperationNotFoundException(TaskCreationException):
+ """
+ Could not find an operation on the node or relationship.
+ """
+
+
+class PluginNotFoundException(TaskCreationException):
+ """
+ Could not find a plugin matching the plugin specification.
+ """
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/workflows/executor/__init__.py b/azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/workflows/executor/__init__.py
new file mode 100644
index 0000000..cafab74
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/workflows/executor/__init__.py
@@ -0,0 +1,22 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Task executors.
+"""
+
+
+from . import process, thread
+from .base import BaseExecutor
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/workflows/executor/base.py b/azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/workflows/executor/base.py
new file mode 100644
index 0000000..e7d03ea
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/workflows/executor/base.py
@@ -0,0 +1,75 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Base class for task executors.
+"""
+
+from aria import logger
+from aria.orchestrator import events
+
+
+class BaseExecutor(logger.LoggerMixin):
+ """
+ Base class for task executors.
+ """
+ def _execute(self, ctx):
+ raise NotImplementedError
+
+ def execute(self, ctx):
+ """
+ Executes a task.
+
+ :param task: task to execute
+ """
+ if ctx.task.function:
+ self._execute(ctx)
+ else:
+ # In this case the task is missing a function. This task still gets to an
+ # executor, but since there is nothing to run, we by default simply skip the
+ # execution itself.
+ self._task_started(ctx)
+ self._task_succeeded(ctx)
+
+ def close(self):
+ """
+ Closes the executor.
+ """
+ pass
+
+ def terminate(self, task_id):
+ """
+ Terminate the executing task
+ :return:
+ """
+ pass
+
+ @staticmethod
+ def _task_started(ctx):
+ events.start_task_signal.send(ctx)
+
+ @staticmethod
+ def _task_failed(ctx, exception, traceback=None):
+ events.on_failure_task_signal.send(ctx, exception=exception, traceback=traceback)
+
+ @staticmethod
+ def _task_succeeded(ctx):
+ events.on_success_task_signal.send(ctx)
+
+
+class StubTaskExecutor(BaseExecutor): # pylint: disable=abstract-method
+ def execute(self, ctx, *args, **kwargs):
+ with ctx.persist_changes:
+ ctx.task.status = ctx.task.SUCCESS
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/workflows/executor/celery.py b/azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/workflows/executor/celery.py
new file mode 100644
index 0000000..a2b3513
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/workflows/executor/celery.py
@@ -0,0 +1,97 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Celery task executor.
+"""
+
+import threading
+import Queue
+
+from aria.orchestrator.workflows.executor import BaseExecutor
+
+
+class CeleryExecutor(BaseExecutor):
+ """
+ Celery task executor.
+ """
+
+ def __init__(self, app, *args, **kwargs):
+ super(CeleryExecutor, self).__init__(*args, **kwargs)
+ self._app = app
+ self._started_signaled = False
+ self._started_queue = Queue.Queue(maxsize=1)
+ self._tasks = {}
+ self._results = {}
+ self._receiver = None
+ self._stopped = False
+ self._receiver_thread = threading.Thread(target=self._events_receiver)
+ self._receiver_thread.daemon = True
+ self._receiver_thread.start()
+ self._started_queue.get(timeout=30)
+
+ def _execute(self, ctx):
+ self._tasks[ctx.id] = ctx
+ arguments = dict(arg.unwrapped for arg in ctx.task.arguments.itervalues())
+ arguments['ctx'] = ctx.context
+ self._results[ctx.id] = self._app.send_task(
+ ctx.operation_mapping,
+ kwargs=arguments,
+ task_id=ctx.task.id,
+ queue=self._get_queue(ctx))
+
+ def close(self):
+ self._stopped = True
+ if self._receiver:
+ self._receiver.should_stop = True
+ self._receiver_thread.join()
+
+ @staticmethod
+ def _get_queue(task):
+ return None if task else None # TODO
+
+ def _events_receiver(self):
+ with self._app.connection() as connection:
+ self._receiver = self._app.events.Receiver(connection, handlers={
+ 'task-started': self._celery_task_started,
+ 'task-succeeded': self._celery_task_succeeded,
+ 'task-failed': self._celery_task_failed,
+ })
+ for _ in self._receiver.itercapture(limit=None, timeout=None, wakeup=True):
+ if not self._started_signaled:
+ self._started_queue.put(True)
+ self._started_signaled = True
+ if self._stopped:
+ return
+
+ def _celery_task_started(self, event):
+ self._task_started(self._tasks[event['uuid']])
+
+ def _celery_task_succeeded(self, event):
+ task, _ = self._remove_task(event['uuid'])
+ self._task_succeeded(task)
+
+ def _celery_task_failed(self, event):
+ task, async_result = self._remove_task(event['uuid'])
+ try:
+ exception = async_result.result
+ except BaseException as e:
+ exception = RuntimeError(
+ 'Could not de-serialize exception of task {0} --> {1}: {2}'
+ .format(task.name, type(e).__name__, str(e)))
+ self._task_failed(task, exception=exception)
+
+ def _remove_task(self, task_id):
+ return self._tasks.pop(task_id), self._results.pop(task_id)
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/workflows/executor/dry.py b/azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/workflows/executor/dry.py
new file mode 100644
index 0000000..9314e5d
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/workflows/executor/dry.py
@@ -0,0 +1,54 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Dry task executor.
+"""
+
+from datetime import datetime
+
+from . import base
+
+
+class DryExecutor(base.BaseExecutor): # pylint: disable=abstract-method
+ """
+ Dry task executor: prints task information without causing any side effects.
+ """
+ def execute(self, ctx):
+ with ctx.persist_changes:
+ # updating the task manually instead of calling self._task_started(task),
+ # to avoid any side effects raising that event might cause
+ ctx.task.started_at = datetime.utcnow()
+ ctx.task.status = ctx.task.STARTED
+
+ dry_msg = '<dry> {name} {task.interface_name}.{task.operation_name} {suffix}'
+ logger = ctx.logger.info if ctx.task.function else ctx.logger.debug
+
+ if hasattr(ctx.task.actor, 'source_node'):
+ name = '{source_node.name}->{target_node.name}'.format(
+ source_node=ctx.task.actor.source_node, target_node=ctx.task.actor.target_node)
+ else:
+ name = ctx.task.actor.name
+
+ if ctx.task.function:
+ logger(dry_msg.format(name=name, task=ctx.task, suffix='started...'))
+ logger(dry_msg.format(name=name, task=ctx.task, suffix='successful'))
+ else:
+ logger(dry_msg.format(name=name, task=ctx.task, suffix='has no implementation'))
+
+ # updating the task manually instead of calling self._task_succeeded(task),
+ # to avoid any side effects raising that event might cause
+ ctx.task.ended_at = datetime.utcnow()
+ ctx.task.status = ctx.task.SUCCESS
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/workflows/executor/process.py b/azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/workflows/executor/process.py
new file mode 100644
index 0000000..185f15f
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/workflows/executor/process.py
@@ -0,0 +1,350 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Sub-process task executor.
+"""
+
+# pylint: disable=wrong-import-position
+
+import os
+import sys
+
+# As part of the process executor implementation, subprocess are started with this module as their
+# entry point. We thus remove this module's directory from the python path if it happens to be
+# there
+
+from collections import namedtuple
+
+script_dir = os.path.dirname(__file__)
+if script_dir in sys.path:
+ sys.path.remove(script_dir)
+
+import contextlib
+import io
+import threading
+import socket
+import struct
+import subprocess
+import tempfile
+import Queue
+import pickle
+
+import psutil
+import jsonpickle
+
+import aria
+from aria.orchestrator.workflows.executor import base
+from aria.extension import process_executor
+from aria.utils import (
+ imports,
+ exceptions,
+ process as process_utils
+)
+
+
+_INT_FMT = 'I'
+_INT_SIZE = struct.calcsize(_INT_FMT)
+UPDATE_TRACKED_CHANGES_FAILED_STR = \
+ 'Some changes failed writing to storage. For more info refer to the log.'
+
+
+_Task = namedtuple('_Task', 'proc, ctx')
+
+
+class ProcessExecutor(base.BaseExecutor):
+ """
+ Sub-process task executor.
+ """
+
+ def __init__(self, plugin_manager=None, python_path=None, *args, **kwargs):
+ super(ProcessExecutor, self).__init__(*args, **kwargs)
+ self._plugin_manager = plugin_manager
+
+ # Optional list of additional directories that should be added to
+ # subprocesses python path
+ self._python_path = python_path or []
+
+ # Flag that denotes whether this executor has been stopped
+ self._stopped = False
+
+ # Contains reference to all currently running tasks
+ self._tasks = {}
+
+ self._request_handlers = {
+ 'started': self._handle_task_started_request,
+ 'succeeded': self._handle_task_succeeded_request,
+ 'failed': self._handle_task_failed_request,
+ }
+
+ # Server socket used to accept task status messages from subprocesses
+ self._server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ self._server_socket.bind(('localhost', 0))
+ self._server_socket.listen(10)
+ self._server_port = self._server_socket.getsockname()[1]
+
+ # Used to send a "closed" message to the listener when this executor is closed
+ self._messenger = _Messenger(task_id=None, port=self._server_port)
+
+ # Queue object used by the listener thread to notify this constructed it has started
+ # (see last line of this __init__ method)
+ self._listener_started = Queue.Queue()
+
+ # Listener thread to handle subprocesses task status messages
+ self._listener_thread = threading.Thread(target=self._listener)
+ self._listener_thread.daemon = True
+ self._listener_thread.start()
+
+ # Wait for listener thread to actually start before returning
+ self._listener_started.get(timeout=60)
+
+ def close(self):
+ if self._stopped:
+ return
+ self._stopped = True
+ # Listener thread may be blocked on "accept" call. This will wake it up with an explicit
+ # "closed" message
+ self._messenger.closed()
+ self._server_socket.close()
+ self._listener_thread.join(timeout=60)
+
+ # we use set(self._tasks) since tasks may change in the process of closing
+ for task_id in set(self._tasks):
+ self.terminate(task_id)
+
+ def terminate(self, task_id):
+ task = self._remove_task(task_id)
+ # The process might have managed to finish, thus it would not be in the tasks list
+ if task:
+ try:
+ parent_process = psutil.Process(task.proc.pid)
+ for child_process in reversed(parent_process.children(recursive=True)):
+ try:
+ child_process.kill()
+ except BaseException:
+ pass
+ parent_process.kill()
+ except BaseException:
+ pass
+
+ def _execute(self, ctx):
+ self._check_closed()
+
+ # Temporary file used to pass arguments to the started subprocess
+ file_descriptor, arguments_json_path = tempfile.mkstemp(prefix='executor-', suffix='.json')
+ os.close(file_descriptor)
+ with open(arguments_json_path, 'wb') as f:
+ f.write(pickle.dumps(self._create_arguments_dict(ctx)))
+
+ env = self._construct_subprocess_env(task=ctx.task)
+ # Asynchronously start the operation in a subprocess
+ proc = subprocess.Popen(
+ [
+ sys.executable,
+ os.path.expanduser(os.path.expandvars(__file__)),
+ os.path.expanduser(os.path.expandvars(arguments_json_path))
+ ],
+ env=env)
+
+ self._tasks[ctx.task.id] = _Task(ctx=ctx, proc=proc)
+
+ def _remove_task(self, task_id):
+ return self._tasks.pop(task_id, None)
+
+ def _check_closed(self):
+ if self._stopped:
+ raise RuntimeError('Executor closed')
+
+ def _create_arguments_dict(self, ctx):
+ return {
+ 'task_id': ctx.task.id,
+ 'function': ctx.task.function,
+ 'operation_arguments': dict(arg.unwrapped for arg in ctx.task.arguments.itervalues()),
+ 'port': self._server_port,
+ 'context': ctx.serialization_dict
+ }
+
+ def _construct_subprocess_env(self, task):
+ env = os.environ.copy()
+
+ if task.plugin_fk and self._plugin_manager:
+ # If this is a plugin operation,
+ # load the plugin on the subprocess env we're constructing
+ self._plugin_manager.load_plugin(task.plugin, env=env)
+
+ # Add user supplied directories to injected PYTHONPATH
+ if self._python_path:
+ process_utils.append_to_pythonpath(*self._python_path, env=env)
+
+ return env
+
+ def _listener(self):
+ # Notify __init__ method this thread has actually started
+ self._listener_started.put(True)
+ while not self._stopped:
+ try:
+ with self._accept_request() as (request, response):
+ request_type = request['type']
+ if request_type == 'closed':
+ break
+ request_handler = self._request_handlers.get(request_type)
+ if not request_handler:
+ raise RuntimeError('Invalid request type: {0}'.format(request_type))
+ task_id = request['task_id']
+ request_handler(task_id=task_id, request=request, response=response)
+ except BaseException as e:
+ self.logger.debug('Error in process executor listener: {0}'.format(e))
+
+ @contextlib.contextmanager
+ def _accept_request(self):
+ with contextlib.closing(self._server_socket.accept()[0]) as connection:
+ message = _recv_message(connection)
+ response = {}
+ try:
+ yield message, response
+ except BaseException as e:
+ response['exception'] = exceptions.wrap_if_needed(e)
+ raise
+ finally:
+ _send_message(connection, response)
+
+ def _handle_task_started_request(self, task_id, **kwargs):
+ self._task_started(self._tasks[task_id].ctx)
+
+ def _handle_task_succeeded_request(self, task_id, **kwargs):
+ task = self._remove_task(task_id)
+ if task:
+ self._task_succeeded(task.ctx)
+
+ def _handle_task_failed_request(self, task_id, request, **kwargs):
+ task = self._remove_task(task_id)
+ if task:
+ self._task_failed(
+ task.ctx, exception=request['exception'], traceback=request['traceback'])
+
+
+def _send_message(connection, message):
+
+ # Packing the length of the entire msg using struct.pack.
+ # This enables later reading of the content.
+ def _pack(data):
+ return struct.pack(_INT_FMT, len(data))
+
+ data = jsonpickle.dumps(message)
+ msg_metadata = _pack(data)
+ connection.send(msg_metadata)
+ connection.sendall(data)
+
+
+def _recv_message(connection):
+ # Retrieving the length of the msg to come.
+ def _unpack(conn):
+ return struct.unpack(_INT_FMT, _recv_bytes(conn, _INT_SIZE))[0]
+
+ msg_metadata_len = _unpack(connection)
+ msg = _recv_bytes(connection, msg_metadata_len)
+ return jsonpickle.loads(msg)
+
+
+def _recv_bytes(connection, count):
+ result = io.BytesIO()
+ while True:
+ if not count:
+ return result.getvalue()
+ read = connection.recv(count)
+ if not read:
+ return result.getvalue()
+ result.write(read)
+ count -= len(read)
+
+
+class _Messenger(object):
+
+ def __init__(self, task_id, port):
+ self.task_id = task_id
+ self.port = port
+
+ def started(self):
+ """Task started message"""
+ self._send_message(type='started')
+
+ def succeeded(self):
+ """Task succeeded message"""
+ self._send_message(type='succeeded')
+
+ def failed(self, exception):
+ """Task failed message"""
+ self._send_message(type='failed', exception=exception)
+
+ def closed(self):
+ """Executor closed message"""
+ self._send_message(type='closed')
+
+ def _send_message(self, type, exception=None):
+ sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ sock.connect(('localhost', self.port))
+ try:
+ _send_message(sock, {
+ 'type': type,
+ 'task_id': self.task_id,
+ 'exception': exceptions.wrap_if_needed(exception),
+ 'traceback': exceptions.get_exception_as_string(*sys.exc_info()),
+ })
+ response = _recv_message(sock)
+ response_exception = response.get('exception')
+ if response_exception:
+ raise response_exception
+ finally:
+ sock.close()
+
+
+def _main():
+ arguments_json_path = sys.argv[1]
+ with open(arguments_json_path) as f:
+ arguments = pickle.loads(f.read())
+
+ # arguments_json_path is a temporary file created by the parent process.
+ # so we remove it here
+ os.remove(arguments_json_path)
+
+ task_id = arguments['task_id']
+ port = arguments['port']
+ messenger = _Messenger(task_id=task_id, port=port)
+
+ function = arguments['function']
+ operation_arguments = arguments['operation_arguments']
+ context_dict = arguments['context']
+
+ try:
+ ctx = context_dict['context_cls'].instantiate_from_dict(**context_dict['context'])
+ except BaseException as e:
+ messenger.failed(e)
+ return
+
+ try:
+ messenger.started()
+ task_func = imports.load_attribute(function)
+ aria.install_aria_extensions()
+ for decorate in process_executor.decorate():
+ task_func = decorate(task_func)
+ task_func(ctx=ctx, **operation_arguments)
+ ctx.close()
+ messenger.succeeded()
+ except BaseException as e:
+ ctx.close()
+ messenger.failed(e)
+
+if __name__ == '__main__':
+ _main()
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/workflows/executor/thread.py b/azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/workflows/executor/thread.py
new file mode 100644
index 0000000..170620e
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/workflows/executor/thread.py
@@ -0,0 +1,79 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Thread task executor.
+"""
+
+import Queue
+import threading
+
+import sys
+
+from aria.utils import imports, exceptions
+
+from .base import BaseExecutor
+
+
+class ThreadExecutor(BaseExecutor):
+ """
+ Thread task executor.
+
+ It's easier writing tests using this executor rather than the full-blown sub-process executor.
+
+ Note: This executor is incapable of running plugin operations.
+ """
+
+ def __init__(self, pool_size=1, close_timeout=5, *args, **kwargs):
+ super(ThreadExecutor, self).__init__(*args, **kwargs)
+ self._stopped = False
+ self._close_timeout = close_timeout
+ self._queue = Queue.Queue()
+ self._pool = []
+ for i in range(pool_size):
+ name = 'ThreadExecutor-{index}'.format(index=i+1)
+ thread = threading.Thread(target=self._processor, name=name)
+ thread.daemon = True
+ thread.start()
+ self._pool.append(thread)
+
+ def _execute(self, ctx):
+ self._queue.put(ctx)
+
+ def close(self):
+ self._stopped = True
+ for thread in self._pool:
+ if self._close_timeout is None:
+ thread.join()
+ else:
+ thread.join(self._close_timeout)
+
+ def _processor(self):
+ while not self._stopped:
+ try:
+ ctx = self._queue.get(timeout=1)
+ self._task_started(ctx)
+ try:
+ task_func = imports.load_attribute(ctx.task.function)
+ arguments = dict(arg.unwrapped for arg in ctx.task.arguments.itervalues())
+ task_func(ctx=ctx, **arguments)
+ self._task_succeeded(ctx)
+ except BaseException as e:
+ self._task_failed(ctx,
+ exception=e,
+ traceback=exceptions.get_exception_as_string(*sys.exc_info()))
+ # Daemon threads
+ except BaseException as e:
+ pass
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/parser/__init__.py b/azure/aria/aria-extension-cloudify/src/aria/aria/parser/__init__.py
new file mode 100644
index 0000000..7903b52
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/parser/__init__.py
@@ -0,0 +1,34 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Parser package.
+"""
+
+from .specification import implements_specification, iter_specifications
+
+
+MODULES = (
+ 'consumption',
+ 'loading',
+ 'modeling',
+ 'presentation',
+ 'reading',
+ 'validation')
+
+__all__ = (
+ 'MODULES',
+ 'implements_specification',
+ 'iter_specifications')
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/parser/consumption/__init__.py b/azure/aria/aria-extension-cloudify/src/aria/aria/parser/consumption/__init__.py
new file mode 100644
index 0000000..f9caf5f
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/parser/consumption/__init__.py
@@ -0,0 +1,84 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Consumption package.
+
+.. autosummary::
+ :nosignatures:
+
+ aria.parser.consumption.ConsumptionContext
+
+Consumers
+---------
+
+.. autosummary::
+ :nosignatures:
+
+ aria.parser.consumption.Consumer
+ aria.parser.consumption.ConsumerChain
+ aria.parser.consumption.ConsumerException
+ aria.parser.consumption.Inputs
+ aria.parser.consumption.ServiceTemplate
+ aria.parser.consumption.Types
+ aria.parser.consumption.CoerceServiceInstanceValues
+ aria.parser.consumption.ValidateServiceInstance
+ aria.parser.consumption.SatisfyRequirements
+ aria.parser.consumption.ValidateCapabilities
+ aria.parser.consumption.FindHosts
+ aria.parser.consumption.ConfigureOperations
+ aria.parser.consumption.ServiceInstance
+ aria.parser.consumption.Read
+ aria.parser.consumption.Validate
+"""
+
+from .exceptions import ConsumerException
+from .context import ConsumptionContext
+from .consumer import (
+ Consumer,
+ ConsumerChain
+)
+from .presentation import Read
+from .validation import Validate
+from .modeling import (
+ ServiceTemplate,
+ Types,
+ ServiceInstance,
+ FindHosts,
+ ValidateServiceInstance,
+ ConfigureOperations,
+ SatisfyRequirements,
+ ValidateCapabilities,
+ CoerceServiceInstanceValues
+)
+from .inputs import Inputs
+
+__all__ = (
+ 'ConsumerException',
+ 'ConsumptionContext',
+ 'Consumer',
+ 'ConsumerChain',
+ 'Read',
+ 'Validate',
+ 'ServiceTemplate',
+ 'Types',
+ 'ServiceInstance',
+ 'FindHosts',
+ 'ValidateServiceInstance',
+ 'ConfigureOperations',
+ 'SatisfyRequirements',
+ 'ValidateCapabilities',
+ 'CoerceServiceInstanceValues'
+)
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/parser/consumption/consumer.py b/azure/aria/aria-extension-cloudify/src/aria/aria/parser/consumption/consumer.py
new file mode 100644
index 0000000..878a161
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/parser/consumption/consumer.py
@@ -0,0 +1,93 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+from ...exceptions import AriaException
+from ...utils.exceptions import print_exception
+from ..validation import Issue
+
+
+class Consumer(object):
+ """
+ Base class for ARIA consumers.
+
+ Consumers provide useful functionality by consuming presentations.
+ """
+
+ def __init__(self, context):
+ from ...orchestrator import topology
+
+ self.topology = topology.Topology()
+ self.context = context
+
+ def consume(self):
+ pass
+
+ def dump(self):
+ pass
+
+ def _handle_exception(self, e):
+ if hasattr(e, 'issue') and isinstance(e.issue, Issue):
+ self.context.validation.report(issue=e.issue)
+ else:
+ self.context.validation.report(exception=e)
+ if not isinstance(e, AriaException):
+ print_exception(e)
+
+
+class ConsumerChain(Consumer):
+ """
+ ARIA consumer chain.
+
+ Calls consumers in order, handling exception by calling ``_handle_exception`` on them, and stops
+ the chain if there are any validation issues.
+ """
+
+ def __init__(self, context, consumer_classes=None, handle_exceptions=True):
+ super(ConsumerChain, self).__init__(context)
+ self.handle_exceptions = handle_exceptions
+ self.consumers = []
+ if consumer_classes:
+ for consumer_class in consumer_classes:
+ self.append(consumer_class)
+
+ def append(self, *consumer_classes):
+ for consumer_class in consumer_classes:
+ self.consumers.append(consumer_class(self.context))
+
+ def consume(self):
+ for consumer in self.consumers:
+ try:
+ consumer.consume()
+ except BaseException as e:
+ if self.handle_exceptions:
+ handle_exception(consumer, e)
+ else:
+ raise e
+
+ if consumer.topology.has_issues:
+ self.context.validation.extend_issues(consumer.topology.issues)
+
+ if self.context.validation.has_issues:
+ break
+
+
+def handle_exception(consumer, e):
+ if isinstance(e, AriaException) and e.issue:
+ consumer.context.validation.report(issue=e.issue)
+ else:
+ consumer.context.validation.report(exception=e)
+ if not isinstance(e, AriaException):
+ print_exception(e)
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/parser/consumption/context.py b/azure/aria/aria-extension-cloudify/src/aria/aria/parser/consumption/context.py
new file mode 100644
index 0000000..9164984
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/parser/consumption/context.py
@@ -0,0 +1,106 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import sys
+import threading
+
+from ...utils import console
+from ..validation import ValidationContext
+from ..loading import LoadingContext
+from ..reading import ReadingContext
+from ..presentation import PresentationContext
+from ..modeling import ModelingContext
+
+
+_thread_locals = threading.local()
+
+
+class ConsumptionContext(object):
+ """
+ Consumption context.
+
+ :ivar args: runtime arguments (usually provided on the command line)
+ :ivar out: message output stream (defaults to stdout)
+ :ivar style: message output style
+ :vartype style: Style
+ :ivar validation: validation context
+ :vartype validation: :class:`ValidationContext`
+ :ivar loading: loading context
+ :vartype loading: :class:`LoadingContext`
+ :ivar reading: reading context
+ :vartype reading: :class:`ReadingContext`
+ :ivar presentation: presentation context
+ :vartype presentation: :class:`PresentationContext`
+ :ivar modeling: modeling context
+ :vartype modeling: :class:`ModelingContext`
+ """
+
+ @staticmethod
+ def get_thread_local():
+ """
+ Gets the context attached to the current thread if there is one.
+ """
+
+ return getattr(_thread_locals, 'aria_consumption_context', None)
+
+ def __init__(self, set_thread_local=True):
+ self.args = []
+ self.out = sys.stdout
+ self.validation = ValidationContext()
+ self.loading = LoadingContext()
+ self.reading = ReadingContext()
+ self.presentation = PresentationContext()
+ self.modeling = ModelingContext()
+ self.style = console.TopologyStylizer()
+
+ if set_thread_local:
+ self.set_thread_local()
+
+ def set_thread_local(self):
+ """
+ Attaches this context to the current thread.
+ """
+
+ _thread_locals.aria_consumption_context = self
+
+ def write(self, string):
+ """
+ Writes to our ``out``, making sure to encode UTF-8 if required.
+ """
+
+ try:
+ self.out.write(string)
+ except UnicodeEncodeError:
+ self.out.write(string.encode('utf8'))
+
+ def has_arg_switch(self, name):
+ name = '--%s' % name
+ return name in self.args
+
+ def get_arg_value(self, name, default=None):
+ name = '--%s=' % name
+ for arg in self.args:
+ if arg.startswith(name):
+ return arg[len(name):]
+ return default
+
+ def get_arg_value_int(self, name, default=None):
+ value = self.get_arg_value(name)
+ if value is not None:
+ try:
+ return int(value)
+ except (TypeError, ValueError):
+ pass
+ return default
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/parser/consumption/exceptions.py b/azure/aria/aria-extension-cloudify/src/aria/aria/parser/consumption/exceptions.py
new file mode 100644
index 0000000..78509cb
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/parser/consumption/exceptions.py
@@ -0,0 +1,23 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+from ...exceptions import AriaException
+
+
+class ConsumerException(AriaException):
+ """
+ ARIA consumer exception.
+ """
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/parser/consumption/inputs.py b/azure/aria/aria-extension-cloudify/src/aria/aria/parser/consumption/inputs.py
new file mode 100644
index 0000000..fe7e192
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/parser/consumption/inputs.py
@@ -0,0 +1,53 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from ...utils.formatting import safe_repr
+from ..loading import UriLocation, LiteralLocation
+from ..reading import JsonReader
+from .consumer import Consumer
+
+
+class Inputs(Consumer):
+ """
+ Fills in the inputs if provided as arguments.
+ """
+
+ def consume(self):
+ inputs = self.context.get_arg_value('inputs')
+ if inputs is None:
+ return
+
+ if inputs.endswith('.json') or inputs.endswith('.yaml'):
+ location = UriLocation(inputs)
+ else:
+ location = LiteralLocation(inputs)
+
+ loader = self.context.loading.loader_source.get_loader(self.context.loading, location, None)
+
+ if isinstance(location, LiteralLocation):
+ reader = JsonReader(self.context.reading, location, loader)
+ else:
+ reader = self.context.reading.reader_source.get_reader(self.context.reading,
+ location, loader)
+
+ inputs = reader.read()
+
+ if not isinstance(inputs, dict):
+ self.context.validation.report(
+ 'Inputs consumer: inputs are not a dict: %s' % safe_repr(inputs))
+ return
+
+ for name, value in inputs.iteritems():
+ self.context.modeling.set_input(name, value)
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/parser/consumption/modeling.py b/azure/aria/aria-extension-cloudify/src/aria/aria/parser/consumption/modeling.py
new file mode 100644
index 0000000..221b308
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/parser/consumption/modeling.py
@@ -0,0 +1,198 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from .consumer import Consumer, ConsumerChain
+from ...utils.formatting import json_dumps, yaml_dumps
+
+
+class DeriveServiceTemplate(Consumer):
+ """
+ Derives the service template from the presenter.
+ """
+
+ def consume(self):
+ if self.context.presentation.presenter is None:
+ self.context.validation.report('DeriveServiceTemplate consumer: missing presenter')
+ return
+
+ if not hasattr(self.context.presentation.presenter, '_get_model'):
+ self.context.validation.report('DeriveServiceTemplate consumer: presenter does not'
+ ' support "_get_model"')
+ return
+
+ self.context.modeling.template = \
+ self.context.presentation.presenter._get_model(self.context)
+
+
+class CoerceServiceTemplateValues(Consumer):
+ """
+ Coerces values in the service template.
+ """
+
+ def consume(self):
+ self.topology.coerce(self.context.modeling.template, report_issues=True)
+
+
+class ValidateServiceTemplate(Consumer):
+ """
+ Validates the service template.
+ """
+
+ def consume(self):
+ self.topology.validate(self.context.modeling.template)
+
+
+class ServiceTemplate(ConsumerChain):
+ """
+ Generates the service template from the presenter.
+ """
+
+ def __init__(self, context):
+ super(ServiceTemplate, self).__init__(context, (DeriveServiceTemplate,
+ CoerceServiceTemplateValues,
+ ValidateServiceTemplate))
+
+ def dump(self):
+ if self.context.has_arg_switch('yaml'):
+ indent = self.context.get_arg_value_int('indent', 2)
+ raw = self.context.modeling.template_as_raw
+ self.context.write(yaml_dumps(raw, indent=indent))
+ elif self.context.has_arg_switch('json'):
+ indent = self.context.get_arg_value_int('indent', 2)
+ raw = self.context.modeling.template_as_raw
+ self.context.write(json_dumps(raw, indent=indent))
+ else:
+ self.context.write(self.topology.dump(self.context.modeling.template))
+
+
+class Types(Consumer):
+ """
+ Used to just dump the types.
+ """
+
+ def dump(self):
+ if self.context.has_arg_switch('yaml'):
+ indent = self.context.get_arg_value_int('indent', 2)
+ raw = self.context.modeling.types_as_raw
+ self.context.write(yaml_dumps(raw, indent=indent))
+ elif self.context.has_arg_switch('json'):
+ indent = self.context.get_arg_value_int('indent', 2)
+ raw = self.context.modeling.types_as_raw
+ self.context.write(json_dumps(raw, indent=indent))
+ else:
+ self.topology.dump_types(self.context, self.context.modeling.template)
+
+
+class InstantiateServiceInstance(Consumer):
+ """
+ Instantiates the service template into a service instance.
+ """
+
+ def consume(self):
+ if self.context.modeling.template is None:
+ self.context.validation.report('InstantiateServiceInstance consumer: missing service '
+ 'template')
+ return
+ self.context.modeling.instance = self.topology.instantiate(
+ self.context.modeling.template,
+ inputs=dict(self.context.modeling.inputs)
+ )
+
+
+class CoerceServiceInstanceValues(Consumer):
+ """
+ Coerces values in the service instance.
+ """
+
+ def consume(self):
+ self.topology.coerce(self.context.modeling.instance, report_issues=True)
+
+
+class ValidateServiceInstance(Consumer):
+ """
+ Validates the service instance.
+ """
+
+ def consume(self):
+ self.topology.validate(self.context.modeling.instance)
+
+
+class SatisfyRequirements(Consumer):
+ """
+ Satisfies node requirements in the service instance.
+ """
+
+ def consume(self):
+ self.topology.satisfy_requirements(self.context.modeling.instance)
+
+
+class ValidateCapabilities(Consumer):
+ """
+ Validates capabilities in the service instance.
+ """
+
+ def consume(self):
+ self.topology.validate_capabilities(self.context.modeling.instance)
+
+
+class FindHosts(Consumer):
+ """
+ Find hosts for all nodes in the service instance.
+ """
+
+ def consume(self):
+ self.topology.assign_hosts(self.context.modeling.instance)
+
+
+class ConfigureOperations(Consumer):
+ """
+ Configures all operations in the service instance.
+ """
+
+ def consume(self):
+ self.topology.configure_operations(self.context.modeling.instance)
+
+
+class ServiceInstance(ConsumerChain):
+ """
+ Generates the service instance by instantiating the service template.
+ """
+
+ def __init__(self, context):
+ super(ServiceInstance, self).__init__(context, (InstantiateServiceInstance,
+ CoerceServiceInstanceValues,
+ ValidateServiceInstance,
+ CoerceServiceInstanceValues,
+ SatisfyRequirements,
+ CoerceServiceInstanceValues,
+ ValidateCapabilities,
+ FindHosts,
+ ConfigureOperations,
+ CoerceServiceInstanceValues))
+
+ def dump(self):
+ if self.context.has_arg_switch('graph'):
+ self.context.modeling.instance.dump_graph()
+ elif self.context.has_arg_switch('yaml'):
+ indent = self.context.get_arg_value_int('indent', 2)
+ raw = self.context.modeling.instance_as_raw
+ self.context.write(yaml_dumps(raw, indent=indent))
+ elif self.context.has_arg_switch('json'):
+ indent = self.context.get_arg_value_int('indent', 2)
+ raw = self.context.modeling.instance_as_raw
+ self.context.write(json_dumps(raw, indent=indent))
+ else:
+ str_rep = self.topology.dump(self.context.modeling.instance)
+ self.context.write(str_rep)
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/parser/consumption/presentation.py b/azure/aria/aria-extension-cloudify/src/aria/aria/parser/consumption/presentation.py
new file mode 100644
index 0000000..542b3f0
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/parser/consumption/presentation.py
@@ -0,0 +1,137 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+from ...utils.threading import FixedThreadPoolExecutor
+from ...utils.formatting import json_dumps, yaml_dumps
+from ..loading import UriLocation
+from ..reading import AlreadyReadException
+from ..presentation import PresenterNotFoundError
+from .consumer import Consumer
+
+
+class Read(Consumer):
+ """
+ Reads the presentation, handling imports recursively.
+
+ It works by consuming a data source via appropriate :class:`~aria.parser.loading.Loader`,
+ :class:`~aria.parser.reading.Reader`, and :class:`~aria.parser.presentation.Presenter`
+ instances.
+
+ It supports agnostic raw data composition for presenters that have
+ ``_get_import_locations`` and ``_merge_import``.
+
+ To improve performance, loaders are called asynchronously on separate threads.
+
+ Note that parsing may internally trigger more than one loading/reading/presentation
+ cycle, for example if the agnostic raw data has dependencies that must also be parsed.
+ """
+
+ def consume(self):
+ if self.context.presentation.location is None:
+ self.context.validation.report('Presentation consumer: missing location')
+ return
+
+ presenter = None
+ imported_presentations = None
+
+ executor = FixedThreadPoolExecutor(size=self.context.presentation.threads,
+ timeout=self.context.presentation.timeout)
+ executor.print_exceptions = self.context.presentation.print_exceptions
+ try:
+ presenter = self._present(self.context.presentation.location, None, None, executor)
+ executor.drain()
+
+ # Handle exceptions
+ for e in executor.exceptions:
+ self._handle_exception(e)
+
+ imported_presentations = executor.returns
+ finally:
+ executor.close()
+
+ # Merge imports
+ if (imported_presentations is not None) and hasattr(presenter, '_merge_import'):
+ for imported_presentation in imported_presentations:
+ okay = True
+ if hasattr(presenter, '_validate_import'):
+ okay = presenter._validate_import(self.context, imported_presentation)
+ if okay:
+ presenter._merge_import(imported_presentation)
+
+ self.context.presentation.presenter = presenter
+
+ def dump(self):
+ if self.context.has_arg_switch('yaml'):
+ indent = self.context.get_arg_value_int('indent', 2)
+ raw = self.context.presentation.presenter._raw
+ self.context.write(yaml_dumps(raw, indent=indent))
+ elif self.context.has_arg_switch('json'):
+ indent = self.context.get_arg_value_int('indent', 2)
+ raw = self.context.presentation.presenter._raw
+ self.context.write(json_dumps(raw, indent=indent))
+ else:
+ self.context.presentation.presenter._dump(self.context)
+
+ def _handle_exception(self, e):
+ if isinstance(e, AlreadyReadException):
+ return
+ super(Read, self)._handle_exception(e)
+
+ def _present(self, location, origin_location, presenter_class, executor):
+ # Link the context to this thread
+ self.context.set_thread_local()
+
+ raw = self._read(location, origin_location)
+
+ if self.context.presentation.presenter_class is not None:
+ # The presenter class we specified in the context overrides everything
+ presenter_class = self.context.presentation.presenter_class
+ else:
+ try:
+ presenter_class = self.context.presentation.presenter_source.get_presenter(raw)
+ except PresenterNotFoundError:
+ if presenter_class is None:
+ raise
+ # We'll use the presenter class we were given (from the presenter that imported us)
+ if presenter_class is None:
+ raise PresenterNotFoundError('presenter not found')
+
+ presentation = presenter_class(raw=raw)
+
+ if presentation is not None and hasattr(presentation, '_link_locators'):
+ presentation._link_locators()
+
+ # Submit imports to executor
+ if hasattr(presentation, '_get_import_locations'):
+ import_locations = presentation._get_import_locations(self.context)
+ if import_locations:
+ for import_location in import_locations:
+ # The imports inherit the parent presenter class and use the current location as
+ # their origin location
+ import_location = UriLocation(import_location)
+ executor.submit(self._present, import_location, location, presenter_class,
+ executor)
+
+ return presentation
+
+ def _read(self, location, origin_location):
+ if self.context.reading.reader is not None:
+ return self.context.reading.reader.read()
+ loader = self.context.loading.loader_source.get_loader(self.context.loading, location,
+ origin_location)
+ reader = self.context.reading.reader_source.get_reader(self.context.reading, location,
+ loader)
+ return reader.read()
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/parser/consumption/validation.py b/azure/aria/aria-extension-cloudify/src/aria/aria/parser/consumption/validation.py
new file mode 100644
index 0000000..a7bc3b8
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/parser/consumption/validation.py
@@ -0,0 +1,30 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+from .consumer import Consumer
+
+
+class Validate(Consumer):
+ """
+ Validates the presentation.
+ """
+
+ def consume(self):
+ if self.context.presentation.presenter is None:
+ self.context.validation.report('Validation consumer: missing presenter')
+ return
+
+ self.context.presentation.presenter._validate(self.context)
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/parser/exceptions.py b/azure/aria/aria-extension-cloudify/src/aria/aria/parser/exceptions.py
new file mode 100644
index 0000000..a1f7012
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/parser/exceptions.py
@@ -0,0 +1,33 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Parser exceptions.
+"""
+
+from ..exceptions import AriaException
+from .validation import Issue
+
+
+class InvalidValueError(AriaException):
+ """
+ ARIA error: value is invalid.
+ """
+
+ def __init__(self, message, cause=None, cause_tb=None, location=None, line=None, column=None,
+ locator=None, snippet=None, level=Issue.FIELD):
+ super(InvalidValueError, self).__init__(message, cause, cause_tb)
+ self.issue = Issue(message, location=location, line=line, column=column, locator=locator,
+ snippet=snippet, level=level, exception=cause)
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/parser/loading/__init__.py b/azure/aria/aria-extension-cloudify/src/aria/aria/parser/loading/__init__.py
new file mode 100644
index 0000000..834675e
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/parser/loading/__init__.py
@@ -0,0 +1,80 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Loading package.
+
+.. autosummary::
+ :nosignatures:
+
+ aria.parser.loading.LoadingContext
+ aria.parser.loading.LoaderException
+ aria.parser.loading.LoaderNotFoundError
+ aria.parser.loading.DocumentNotFoundException
+ aria.parser.loading.LoaderSource
+ aria.parser.loading.DefaultLoaderSource
+
+Loaders
+-------
+
+.. autosummary::
+ :nosignatures:
+
+ aria.parser.loading.Loader
+ aria.parser.loading.FileTextLoader
+ aria.parser.loading.LiteralLoader
+ aria.parser.loading.RequestLoader
+ aria.parser.loading.RequestTextLoader
+ aria.parser.loading.UriTextLoader
+
+Locations
+---------
+
+.. autosummary::
+ :nosignatures:
+
+ aria.parser.loading.Location
+ aria.parser.loading.UriLocation
+"""
+
+from .exceptions import LoaderException, LoaderNotFoundError, DocumentNotFoundException
+from .context import LoadingContext
+from .loader import Loader
+from .source import LoaderSource, DefaultLoaderSource
+from .location import Location, UriLocation, LiteralLocation
+from .literal import LiteralLoader
+from .uri import UriTextLoader
+from .request import SESSION, SESSION_CACHE_PATH, RequestLoader, RequestTextLoader
+from .file import FileTextLoader
+
+
+__all__ = (
+ 'LoaderException',
+ 'LoaderNotFoundError',
+ 'DocumentNotFoundException',
+ 'LoadingContext',
+ 'Loader',
+ 'LoaderSource',
+ 'DefaultLoaderSource',
+ 'Location',
+ 'UriLocation',
+ 'LiteralLocation',
+ 'LiteralLoader',
+ 'UriTextLoader',
+ 'SESSION',
+ 'SESSION_CACHE_PATH',
+ 'RequestLoader',
+ 'RequestTextLoader',
+ 'FileTextLoader')
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/parser/loading/context.py b/azure/aria/aria-extension-cloudify/src/aria/aria/parser/loading/context.py
new file mode 100644
index 0000000..59727c9
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/parser/loading/context.py
@@ -0,0 +1,33 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+from ...utils.collections import StrictList
+from .source import DefaultLoaderSource
+
+
+class LoadingContext(object):
+ """
+ Loading context.
+
+ :ivar loader_source: for finding loader instances
+ :vartype loader_source: ~aria.parser.loading.LoaderSource
+ :ivar prefixes: additional prefixes for :class:`UriTextLoader`
+ :vartype prefixes: [:obj:`basestring`]
+ """
+
+ def __init__(self):
+ self.loader_source = DefaultLoaderSource()
+ self.prefixes = StrictList(value_class=basestring)
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/parser/loading/exceptions.py b/azure/aria/aria-extension-cloudify/src/aria/aria/parser/loading/exceptions.py
new file mode 100644
index 0000000..6e8267a
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/parser/loading/exceptions.py
@@ -0,0 +1,35 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+from ...exceptions import AriaException
+
+
+class LoaderException(AriaException):
+ """
+ ARIA loader exception.
+ """
+
+
+class LoaderNotFoundError(LoaderException):
+ """
+ ARIA loader error: loader not found for source.
+ """
+
+
+class DocumentNotFoundException(LoaderException):
+ """
+ ARIA loader exception: document not found.
+ """
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/parser/loading/file.py b/azure/aria/aria-extension-cloudify/src/aria/aria/parser/loading/file.py
new file mode 100644
index 0000000..a02bd69
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/parser/loading/file.py
@@ -0,0 +1,64 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import codecs
+
+from .loader import Loader
+from .exceptions import LoaderException, DocumentNotFoundException
+
+
+class FileTextLoader(Loader):
+ """
+ ARIA file text loader.
+
+ Extracts a text document from a file. The default encoding is UTF-8, but other supported
+ encoding can be specified instead.
+ """
+
+ def __init__(self, context, path, encoding='utf-8'):
+ self.context = context
+ self.path = path
+ self.encoding = encoding
+ self._file = None
+
+ def open(self):
+ try:
+ self._file = codecs.open(self.path, mode='r', encoding=self.encoding, buffering=1)
+ except IOError as e:
+ if e.errno == 2:
+ raise DocumentNotFoundException('file not found: "%s"' % self.path, cause=e)
+ else:
+ raise LoaderException('file I/O error: "%s"' % self.path, cause=e)
+ except Exception as e:
+ raise LoaderException('file error: "%s"' % self.path, cause=e)
+
+ def close(self):
+ if self._file is not None:
+ try:
+ self._file.close()
+ except IOError as e:
+ raise LoaderException('file I/O error: "%s"' % self.path, cause=e)
+ except Exception as e:
+ raise LoaderException('file error: "%s"' % self.path, cause=e)
+
+ def load(self):
+ if self._file is not None:
+ try:
+ return self._file.read()
+ except IOError as e:
+ raise LoaderException('file I/O error: "%s"' % self.path, cause=e)
+ except Exception as e:
+ raise LoaderException('file error %s' % self.path, cause=e)
+ return None
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/parser/loading/literal.py b/azure/aria/aria-extension-cloudify/src/aria/aria/parser/loading/literal.py
new file mode 100644
index 0000000..7865008
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/parser/loading/literal.py
@@ -0,0 +1,31 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+from .loader import Loader
+
+
+class LiteralLoader(Loader):
+ """
+ ARIA literal loader.
+
+ See :class:`~aria.parser.loading.LiteralLocation`.
+ """
+
+ def __init__(self, location):
+ self.location = location
+
+ def load(self):
+ return self.location.content
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/parser/loading/loader.py b/azure/aria/aria-extension-cloudify/src/aria/aria/parser/loading/loader.py
new file mode 100644
index 0000000..e1abfbf
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/parser/loading/loader.py
@@ -0,0 +1,34 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+class Loader(object):
+ """
+ Base class for ARIA loaders.
+
+ Loaders extract a document by consuming a document source.
+
+ Though the extracted document is often textual (a string or string-like
+ data), loaders may provide any format.
+ """
+
+ def open(self):
+ pass
+
+ def close(self):
+ pass
+
+ def load(self):
+ raise NotImplementedError
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/parser/loading/location.py b/azure/aria/aria-extension-cloudify/src/aria/aria/parser/loading/location.py
new file mode 100644
index 0000000..902e856
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/parser/loading/location.py
@@ -0,0 +1,82 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+import os
+
+from ...utils.uris import as_file
+
+
+class Location(object):
+ """
+ Base class for ARIA locations.
+
+ Locations are used by :class:`~aria.parser.loading.LoaderSource` to delegate to
+ an appropriate :class:`~aria.parser.loading.Loader`.
+ """
+
+ def is_equivalent(self, location):
+ raise NotImplementedError
+
+ @property
+ def prefix(self):
+ return None
+
+
+class UriLocation(Location):
+ """
+ A URI location can be absolute or relative, and can include a scheme or not.
+
+ If no scheme is included, it should be treated as a filesystem path.
+
+ See :class:`~aria.parser.loading.UriTextLoader`.
+ """
+
+ def __init__(self, uri):
+ self.uri = uri
+
+ def is_equivalent(self, location):
+ return isinstance(location, UriLocation) and (location.uri == self.uri)
+
+ @property
+ def prefix(self):
+ prefix = os.path.dirname(self.uri)
+ if prefix and (as_file(prefix) is None):
+ # Yes, it's weird, but dirname handles URIs,
+ # too: http://stackoverflow.com/a/35616478/849021
+ # We just need to massage it with a trailing slash
+ prefix += '/'
+ return prefix
+
+ def __str__(self):
+ return self.uri
+
+
+class LiteralLocation(Location):
+ """
+ A location that embeds content.
+
+ See :class:`~aria.parser.loading.LiteralLoader`.
+ """
+
+ def __init__(self, content, name='literal'):
+ self.content = content
+ self.name = name
+
+ def is_equivalent(self, location):
+ return isinstance(location, LiteralLocation) and (location.content == self.content)
+
+ def __str__(self):
+ return '<%s>' % self.name
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/parser/loading/request.py b/azure/aria/aria-extension-cloudify/src/aria/aria/parser/loading/request.py
new file mode 100644
index 0000000..a809347
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/parser/loading/request.py
@@ -0,0 +1,88 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import tempfile
+
+from requests import Session
+from requests.exceptions import (ConnectionError, InvalidSchema)
+from cachecontrol import CacheControl
+from cachecontrol.caches import FileCache
+
+from .exceptions import LoaderException, DocumentNotFoundException
+from .loader import Loader
+
+SESSION = None
+SESSION_CACHE_PATH = os.path.join(tempfile.gettempdir(), 'aria_requests')
+
+
+class RequestLoader(Loader):
+ """
+ Base class for ARIA request-based loaders.
+
+ Extracts a document from a URI by performing a request.
+
+ Note that the "file:" schema is not supported: :class:`FileTextLoader` should
+ be used instead.
+ """
+
+ def __init__(self, context, uri, headers=None):
+ if headers is None:
+ headers = {}
+ self.context = context
+ self.uri = uri
+ self.headers = headers
+ self._response = None
+
+ def load(self):
+ pass
+
+ def open(self):
+ global SESSION
+ if SESSION is None:
+ SESSION = CacheControl(Session(), cache=FileCache(SESSION_CACHE_PATH))
+
+ try:
+ self._response = SESSION.get(self.uri, headers=self.headers)
+ except InvalidSchema as e:
+ raise DocumentNotFoundException('document not found: "%s"' % self.uri, cause=e)
+ except ConnectionError as e:
+ raise LoaderException('request connection error: "%s"' % self.uri, cause=e)
+ except Exception as e:
+ raise LoaderException('request error: "%s"' % self.uri, cause=e)
+
+ status = self._response.status_code
+ if status == 404:
+ self._response = None
+ raise DocumentNotFoundException('document not found: "%s"' % self.uri)
+ elif status != 200:
+ self._response = None
+ raise LoaderException('request error %d: "%s"' % (status, self.uri))
+
+
+class RequestTextLoader(RequestLoader):
+ """
+ ARIA request-based text loader.
+ """
+
+ def load(self):
+ if self._response is not None:
+ try:
+ if self._response.encoding is None:
+ self._response.encoding = 'utf8'
+ return self._response.text
+ except Exception as e:
+ raise LoaderException('request error: %s' % self.uri, cause=e)
+ return None
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/parser/loading/source.py b/azure/aria/aria-extension-cloudify/src/aria/aria/parser/loading/source.py
new file mode 100644
index 0000000..bcd6dd1
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/parser/loading/source.py
@@ -0,0 +1,44 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from .location import LiteralLocation, UriLocation
+from .literal import LiteralLoader
+from .uri import UriTextLoader
+
+
+class LoaderSource(object):
+ """
+ Base class for ARIA loader sources.
+
+ Loader sources provide appropriate :class:`Loader` instances for locations.
+ """
+
+ def get_loader(self, context, location, origin_location):
+ raise NotImplementedError
+
+
+class DefaultLoaderSource(LoaderSource):
+ """
+ The default ARIA loader source will generate a :class:`UriTextLoader` for
+ :class:`UriLocation` and a :class:`LiteralLoader` for a :class:`LiteralLocation`.
+ """
+
+ def get_loader(self, context, location, origin_location):
+ if isinstance(location, UriLocation):
+ return UriTextLoader(context, location, origin_location)
+ elif isinstance(location, LiteralLocation):
+ return LiteralLoader(location)
+
+ return super(DefaultLoaderSource, self).get_loader(context, location, origin_location)
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/parser/loading/uri.py b/azure/aria/aria-extension-cloudify/src/aria/aria/parser/loading/uri.py
new file mode 100644
index 0000000..a5a18e6
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/parser/loading/uri.py
@@ -0,0 +1,97 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+from urlparse import urljoin
+
+from ...extension import parser
+from ...utils.collections import StrictList
+from ...utils.uris import as_file
+from .loader import Loader
+from .file import FileTextLoader
+from .request import RequestTextLoader
+from .exceptions import DocumentNotFoundException
+
+
+class UriTextLoader(Loader):
+ """
+ Base class for ARIA URI loaders.
+
+ See :class:`~aria.parser.loading.UriLocation`.
+
+ Supports a list of search prefixes that are tried in order if the URI cannot be found.
+ They will be:
+
+ * If ``origin_location`` is provided its prefix will come first.
+ * Then the prefixes in the :class:`LoadingContext` will be added.
+ * Finally, the parser can supply a ``uri_loader_prefix`` function with extra prefixes.
+ """
+
+ def __init__(self, context, location, origin_location=None):
+ self.context = context
+ self.location = location
+ self._prefixes = StrictList(value_class=basestring)
+ self._loader = None
+
+ def add_prefix(prefix):
+ if prefix and (prefix not in self._prefixes):
+ self._prefixes.append(prefix)
+
+ def add_prefixes(prefixes):
+ for prefix in prefixes:
+ add_prefix(prefix)
+
+ if origin_location is not None:
+ add_prefix(origin_location.prefix)
+
+ add_prefixes(context.prefixes)
+ add_prefixes(parser.uri_loader_prefix())
+
+ def open(self):
+ try:
+ self._open(self.location.uri)
+ return
+ except DocumentNotFoundException:
+ # Try prefixes in order
+ for prefix in self._prefixes:
+ prefix_as_file = as_file(prefix)
+ if prefix_as_file is not None:
+ uri = os.path.join(prefix_as_file, self.location.uri)
+ else:
+ uri = urljoin(prefix, self.location.uri)
+ try:
+ self._open(uri)
+ return
+ except DocumentNotFoundException:
+ pass
+ raise DocumentNotFoundException('document not found at URI: "%s"' % self.location)
+
+ def close(self):
+ if self._loader is not None:
+ self._loader.close()
+
+ def load(self):
+ return self._loader.load() if self._loader is not None else None
+
+ def _open(self, uri):
+ the_file = as_file(uri)
+ if the_file is not None:
+ uri = the_file
+ loader = FileTextLoader(self.context, uri)
+ else:
+ loader = RequestTextLoader(self.context, uri)
+ loader.open() # might raise an exception
+ self._loader = loader
+ self.location.uri = uri
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/parser/modeling/__init__.py b/azure/aria/aria-extension-cloudify/src/aria/aria/parser/modeling/__init__.py
new file mode 100644
index 0000000..4b1c995
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/parser/modeling/__init__.py
@@ -0,0 +1,26 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Modeling package.
+"""
+
+from .context import IdType, ModelingContext
+
+
+__all__ = (
+ 'IdType',
+ 'ModelingContext'
+)
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/parser/modeling/context.py b/azure/aria/aria-extension-cloudify/src/aria/aria/parser/modeling/context.py
new file mode 100644
index 0000000..d8a1f7a
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/parser/modeling/context.py
@@ -0,0 +1,107 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import itertools
+
+from ...utils.collections import StrictDict, prune
+from ...utils.uuid import generate_uuid
+
+
+# See: http://www.faqs.org/rfcs/rfc1035.html
+ID_MAX_LENGTH = 63
+
+
+class IdType(object):
+ LOCAL_SERIAL = 0
+ """
+ Locally unique serial ID: a running integer.
+ """
+
+ LOCAL_RANDOM = 1
+ """
+ Locally unique ID: 6 random safe characters.
+ """
+
+ UNIVERSAL_RANDOM = 2
+ """
+ Universally unique ID (UUID): 22 random safe characters.
+ """
+
+
+class ModelingContext(object):
+ """
+ Modeling context.
+
+ :ivar template: generated service template
+ :vartype template: aria.modeling.models.ServiceTemplate
+ :ivar instance: generated service instance
+ :vartype instance: aria.modeling.models.Service
+ :ivar node_id_format: format for node instance IDs
+ :vartype node_id_format: basestring
+ :ivar id_type: type of IDs to use for instances
+ :vartype id_type: basestring
+ :ivar id_max_length: maximum allowed instance ID length
+ :vartype id_max_length: int
+ :ivar inputs: inputs values
+ :vartype inputs: {:obj:`basestring`, object}
+ """
+
+ def __init__(self):
+ self.template = None
+ self.instance = None
+ self.node_id_format = '{template}_{id}'
+ #self.id_type = IdType.LOCAL_SERIAL
+ #self.id_type = IdType.LOCAL_RANDOM
+ self.id_type = IdType.UNIVERSAL_RANDOM
+ self.id_max_length = ID_MAX_LENGTH
+ self.inputs = StrictDict(key_class=basestring)
+
+ self._serial_id_counter = itertools.count(1)
+ self._locally_unique_ids = set()
+
+ def store(self, model_storage):
+ if self.template is not None:
+ model_storage.service_template.put(self.template)
+ if self.instance is not None:
+ model_storage.service.put(self.instance)
+
+ def generate_id(self):
+ if self.id_type == IdType.LOCAL_SERIAL:
+ return self._serial_id_counter.next()
+
+ elif self.id_type == IdType.LOCAL_RANDOM:
+ the_id = generate_uuid(6)
+ while the_id in self._locally_unique_ids:
+ the_id = generate_uuid(6)
+ self._locally_unique_ids.add(the_id)
+ return the_id
+
+ return generate_uuid()
+
+ def set_input(self, name, value):
+ self.inputs[name] = value
+ # TODO: coerce to validate type
+
+ @property
+ def template_as_raw(self):
+ raw = self.template.as_raw
+ prune(raw)
+ return raw
+
+ @property
+ def instance_as_raw(self):
+ raw = self.instance.as_raw
+ prune(raw)
+ return raw
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/parser/presentation/__init__.py b/azure/aria/aria-extension-cloudify/src/aria/aria/parser/presentation/__init__.py
new file mode 100644
index 0000000..5633e7b
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/parser/presentation/__init__.py
@@ -0,0 +1,158 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Presentation package.
+
+.. autosummary::
+ :nosignatures:
+
+ aria.parser.presentation.PresentationContext
+ aria.parser.presentation.PresenterException
+ aria.parser.presentation.PresenterNotFoundError
+ aria.parser.presentation.Field
+ aria.parser.presentation.NULL
+ aria.parser.presentation.none_to_null
+ aria.parser.presentation.null_to_none
+ aria.parser.presentation.Value
+ aria.parser.presentation.Presenter
+ aria.parser.presentation.PresenterSource
+ aria.parser.presentation.DefaultPresenterSource
+
+Presentations
+-------------
+
+.. autosummary::
+ :nosignatures:
+
+ aria.parser.presentation.PresentationBase
+ aria.parser.presentation.Presentation
+ aria.parser.presentation.AsIsPresentation
+
+Field decorators
+----------------
+
+.. autosummary::
+ :nosignatures:
+
+ aria.parser.presentation.has_fields
+ aria.parser.presentation.short_form_field
+ aria.parser.presentation.allow_unknown_fields
+ aria.parser.presentation.primitive_field
+ aria.parser.presentation.primitive_list_field
+ aria.parser.presentation.primitive_dict_field
+ aria.parser.presentation.primitive_dict_unknown_fields
+ aria.parser.presentation.object_field
+ aria.parser.presentation.object_list_field
+ aria.parser.presentation.object_dict_field
+ aria.parser.presentation.object_sequenced_list_field
+ aria.parser.presentation.object_dict_unknown_fields
+ aria.parser.presentation.field_getter
+ aria.parser.presentation.field_setter
+ aria.parser.presentation.field_validator
+
+Field validators
+----------------
+
+.. autosummary::
+ :nosignatures:
+
+ aria.parser.presentation.type_validator
+ aria.parser.presentation.list_type_validator
+ aria.parser.presentation.list_length_validator
+ aria.parser.presentation.derived_from_validator
+
+Utilities
+---------
+
+.. autosummary::
+ :nosignatures:
+
+ aria.parser.presentation.get_locator
+ aria.parser.presentation.parse_types_dict_names
+ aria.parser.presentation.validate_primitive
+ aria.parser.presentation.validate_no_short_form
+ aria.parser.presentation.validate_no_unknown_fields
+ aria.parser.presentation.validate_known_fields
+ aria.parser.presentation.get_parent_presentation
+ aria.parser.presentation.report_issue_for_unknown_type
+ aria.parser.presentation.report_issue_for_parent_is_self
+ aria.parser.presentation.report_issue_for_unknown_parent_type
+ aria.parser.presentation.report_issue_for_circular_type_hierarchy
+"""
+
+from .exceptions import PresenterException, PresenterNotFoundError
+from .context import PresentationContext
+from .presenter import Presenter
+from .presentation import Value, PresentationBase, Presentation, AsIsPresentation
+from .source import PresenterSource, DefaultPresenterSource
+from .null import NULL, none_to_null, null_to_none
+from .fields import (Field, has_fields, short_form_field, allow_unknown_fields, primitive_field,
+ primitive_list_field, primitive_dict_field, primitive_dict_unknown_fields,
+ object_field, object_list_field, object_dict_field,
+ object_sequenced_list_field, object_dict_unknown_fields, field_getter,
+ field_setter, field_validator)
+from .field_validators import (type_validator, list_type_validator, list_length_validator,
+ derived_from_validator)
+from .utils import (get_locator, parse_types_dict_names, validate_primitive, validate_no_short_form,
+ validate_no_unknown_fields, validate_known_fields, get_parent_presentation,
+ report_issue_for_unknown_type, report_issue_for_unknown_parent_type,
+ report_issue_for_parent_is_self, report_issue_for_circular_type_hierarchy)
+
+__all__ = (
+ 'PresenterException',
+ 'PresenterNotFoundError',
+ 'PresentationContext',
+ 'Presenter',
+ 'Value',
+ 'PresentationBase',
+ 'Presentation',
+ 'AsIsPresentation',
+ 'PresenterSource',
+ 'DefaultPresenterSource',
+ 'NULL',
+ 'none_to_null',
+ 'null_to_none',
+ 'Field',
+ 'has_fields',
+ 'short_form_field',
+ 'allow_unknown_fields',
+ 'primitive_field',
+ 'primitive_list_field',
+ 'primitive_dict_field',
+ 'primitive_dict_unknown_fields',
+ 'object_field',
+ 'object_list_field',
+ 'object_dict_field',
+ 'object_sequenced_list_field',
+ 'object_dict_unknown_fields',
+ 'field_getter',
+ 'field_setter',
+ 'field_validator',
+ 'type_validator',
+ 'list_type_validator',
+ 'list_length_validator',
+ 'derived_from_validator',
+ 'get_locator',
+ 'parse_types_dict_names',
+ 'validate_primitive',
+ 'validate_no_short_form',
+ 'validate_no_unknown_fields',
+ 'validate_known_fields',
+ 'get_parent_presentation',
+ 'report_issue_for_unknown_type',
+ 'report_issue_for_unknown_parent_type',
+ 'report_issue_for_parent_is_self',
+ 'report_issue_for_circular_type_hierarchy')
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/parser/presentation/context.py b/azure/aria/aria-extension-cloudify/src/aria/aria/parser/presentation/context.py
new file mode 100644
index 0000000..44a6f82
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/parser/presentation/context.py
@@ -0,0 +1,65 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+from .source import DefaultPresenterSource
+
+
+class PresentationContext(object):
+ """
+ Presentation context.
+
+ :ivar presenter: the generated presenter instance
+ :vartype presenter: ~aria.parser.presentation.Presenter
+ :ivar location: from where we will generate the presenter
+ :vartype location: ~aria.parser.loading.Location
+ :ivar presenter_source: for finding presenter classes
+ :vartype presenter_source: ~aria.parser.presentation.PresenterSource
+ :ivar presenter_class: overrides ``presenter_source`` with a specific class
+ :vartype presenter_class: type
+ :ivar import_profile: whether to import the profile by default (defaults to ``True``)
+ :vartype import_profile: bool
+ :ivar threads: number of threads to use when reading data
+ :vartype threads: int
+ :ivar timeout: timeout in seconds for loading data
+ :vartype timeout: float
+ :ivar print_exceptions: whether to print exceptions while reading data
+ :vartype print_exceptions: bool
+ """
+
+ def __init__(self):
+ self.presenter = None
+ self.location = None
+ self.presenter_source = DefaultPresenterSource()
+ self.presenter_class = None # overrides
+ self.import_profile = True
+ self.threads = 8 # reasonable default for networking multithreading
+ self.timeout = 10 # in seconds
+ self.print_exceptions = False
+
+ def get(self, *names):
+ """
+ Gets attributes recursively from the presenter.
+ """
+
+ return self.presenter._get(*names) if self.presenter is not None else None
+
+ def get_from_dict(self, *names):
+ """
+ Gets attributes recursively from the presenter, except for the last name which is used
+ to get a value from the last dict.
+ """
+
+ return self.presenter._get_from_dict(*names) if self.presenter is not None else None
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/parser/presentation/exceptions.py b/azure/aria/aria-extension-cloudify/src/aria/aria/parser/presentation/exceptions.py
new file mode 100644
index 0000000..cd7eb07
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/parser/presentation/exceptions.py
@@ -0,0 +1,29 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+from ...exceptions import AriaException
+
+
+class PresenterException(AriaException):
+ """
+ ARIA presenter exception.
+ """
+
+
+class PresenterNotFoundError(PresenterException):
+ """
+ ARIA presenter error: presenter not found for raw.
+ """
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/parser/presentation/field_validators.py b/azure/aria/aria-extension-cloudify/src/aria/aria/parser/presentation/field_validators.py
new file mode 100644
index 0000000..aa04913
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/parser/presentation/field_validators.py
@@ -0,0 +1,164 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+from ..validation import Issue
+from .utils import (parse_types_dict_names, report_issue_for_unknown_type,
+ report_issue_for_parent_is_self, report_issue_for_unknown_parent_type,
+ report_issue_for_circular_type_hierarchy)
+
+
+def type_validator(type_name, *types_dict_names):
+ """
+ Makes sure that the field refers to an existing type defined in the root presenter.
+
+ The arguments from the second onwards are used to locate a nested field under
+ ``service_template`` under the root presenter. The first of these can optionally be a function,
+ in which case it will be called to convert type names. This can be used to support shorthand
+ type names, aliases, etc.
+
+ Can be used with the :func:`field_validator` decorator.
+ """
+
+ types_dict_names, convert = parse_types_dict_names(types_dict_names)
+
+ def validator_fn(field, presentation, context):
+ field.default_validate(presentation, context)
+
+ # Make sure type exists
+ value = getattr(presentation, field.name)
+ if value is not None:
+ types_dict = context.presentation.get('service_template', *types_dict_names) or {}
+
+ if convert:
+ value = convert(context, value, types_dict)
+
+ if value not in types_dict:
+ report_issue_for_unknown_type(context, presentation, type_name, field.name)
+
+ return validator_fn
+
+
+def list_type_validator(type_name, *types_dict_names):
+ """
+ Makes sure that the field's elements refer to existing types defined in the root presenter.
+
+ Assumes that the field is a list.
+
+ The arguments from the second onwards are used to locate a nested field under
+ ``service_template`` under the root presenter. The first of these can optionally be a function,
+ in which case it will be called to convert type names. This can be used to support shorthand
+ type names, aliases, etc.
+
+ Can be used with the :func:`field_validator` decorator.
+ """
+
+ types_dict_names, convert = parse_types_dict_names(types_dict_names)
+
+ def validator_fn(field, presentation, context):
+ field.default_validate(presentation, context)
+
+ # Make sure types exist
+ values = getattr(presentation, field.name)
+ if values is not None:
+ types_dict = context.presentation.get('service_template', *types_dict_names) or {}
+
+ for value in values:
+ if convert:
+ value = convert(context, value, types_dict)
+
+ if value not in types_dict:
+ report_issue_for_unknown_type(context, presentation, type_name, field.name)
+
+ return validator_fn
+
+
+def list_length_validator(length):
+ """
+ Makes sure the field has exactly a specific number of elements.
+
+ Assumes that the field is a list.
+
+ Can be used with the :func:`field_validator` decorator.
+ """
+
+ def validator_fn(field, presentation, context):
+ field.default_validate(presentation, context)
+
+ # Make sure list has exactly the length
+ values = getattr(presentation, field.name)
+ if isinstance(values, list):
+ if len(values) != length:
+ context.validation.report('field "%s" does not have exactly %d elements in "%s"'
+ % (field.name, length, presentation._fullname),
+ locator=presentation._get_child_locator(field.name),
+ level=Issue.FIELD)
+
+ return validator_fn
+
+
+def derived_from_validator(*types_dict_names):
+ """
+ Makes sure that the field refers to a valid parent type defined in the root presenter.
+
+ Checks that we do not derive from ourselves and that we do not cause a circular hierarchy.
+
+ The arguments are used to locate a nested field under ``service_template`` under the root
+ presenter. The first of these can optionally be a function, in which case it will be called to
+ convert type names. This can be used to support shorthand type names, aliases, etc.
+
+ Can be used with the :func:`field_validator` decorator.
+ """
+
+ types_dict_names, convert = parse_types_dict_names(types_dict_names)
+
+ def validator_fn(field, presentation, context):
+ field.default_validate(presentation, context)
+
+ value = getattr(presentation, field.name)
+ if value is not None:
+ types_dict = context.presentation.get('service_template', *types_dict_names) or {}
+
+ if convert:
+ value = convert(context, value, types_dict)
+
+ # Make sure not derived from self
+ if value == presentation._name:
+ report_issue_for_parent_is_self(context, presentation, field.name)
+ # Make sure derived from type exists
+ elif value not in types_dict:
+ report_issue_for_unknown_parent_type(context, presentation, field.name)
+ else:
+ # Make sure derivation hierarchy is not circular
+ hierarchy = [presentation._name]
+ presentation_tmp = presentation
+ while presentation_tmp.derived_from is not None:
+ derived_from = presentation_tmp.derived_from
+ if convert:
+ derived_from = convert(context, derived_from, types_dict)
+
+ if derived_from == presentation_tmp._name:
+ # This should cause a validation issue at that type
+ break
+ elif derived_from not in types_dict:
+ # This should cause a validation issue at that type
+ break
+ presentation_tmp = types_dict[derived_from]
+ if presentation_tmp._name in hierarchy:
+ report_issue_for_circular_type_hierarchy(context, presentation, field.name)
+ break
+ hierarchy.append(presentation_tmp._name)
+
+ return validator_fn
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/parser/presentation/fields.py b/azure/aria/aria-extension-cloudify/src/aria/aria/parser/presentation/fields.py
new file mode 100644
index 0000000..5c3e074
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/parser/presentation/fields.py
@@ -0,0 +1,757 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import threading
+from functools import wraps
+from types import MethodType
+
+from ...exceptions import AriaException
+from ...utils.collections import FrozenDict, FrozenList, deepcopy_with_locators, merge, OrderedDict
+from ...utils.caching import cachedmethod
+from ...utils.console import puts
+from ...utils.formatting import as_raw, safe_repr
+from ...utils.type import full_type_name
+from ...utils.exceptions import print_exception
+from ..exceptions import InvalidValueError
+
+from .null import NULL
+from .utils import validate_primitive
+
+#
+# Class decorators
+#
+
+# pylint: disable=unused-argument
+
+def has_fields(cls):
+ """
+ Class decorator for validated field support.
+
+ 1. Adds a ``FIELDS`` class property that is a dict of all the fields. Will inherit and merge
+ ``FIELDS`` properties from base classes if they have them.
+
+ 2. Generates automatic ``@property`` implementations for the fields with the help of a set of
+ special function decorators.
+
+ The class also works with the Python dict protocol, so that fields can be accessed via dict
+ semantics. The functionality is identical to that of using attribute access.
+
+ The class will also gain two utility methods, ``_iter_field_names`` and ``_iter_fields``.
+ """
+
+ # Make sure we have FIELDS
+ if 'FIELDS' not in cls.__dict__:
+ setattr(cls, 'FIELDS', OrderedDict())
+
+ # Inherit FIELDS from base classes
+ for base in cls.__bases__:
+ if hasattr(base, 'FIELDS'):
+ cls.FIELDS.update(base.FIELDS)
+
+ # We could do this:
+ #
+ # for name, field in cls.__dict__.iteritems():
+ #
+ # But dir() is better because it has a deterministic order (alphabetical)
+
+ for name in dir(cls):
+ field = getattr(cls, name)
+
+ if isinstance(field, Field):
+ # Accumulate
+ cls.FIELDS[name] = field
+
+ field.name = name
+ field.container_cls = cls
+
+ # This function is here just to create an enclosed scope for "field"
+ def closure(field):
+
+ # By convention, we have the getter wrap the original function.
+ # (It is, for example, where the Python help() function will look for
+ # docstrings when encountering a property.)
+ @cachedmethod
+ @wraps(field.func)
+ def getter(self):
+ return field.get(self, None)
+
+ def setter(self, value):
+ field.set(self, None, value)
+
+ # Convert to Python property
+ return property(fget=getter, fset=setter)
+
+ setattr(cls, name, closure(field))
+
+ # Bind methods
+ setattr(cls, '_iter_field_names', MethodType(has_fields_iter_field_names, None, cls))
+ setattr(cls, '_iter_fields', MethodType(has_fields_iter_fields, None, cls))
+
+ # Behave like a dict
+ setattr(cls, '__len__', MethodType(has_fields_len, None, cls))
+ setattr(cls, '__getitem__', MethodType(has_fields_getitem, None, cls))
+ setattr(cls, '__setitem__', MethodType(has_fields_setitem, None, cls))
+ setattr(cls, '__delitem__', MethodType(has_fields_delitem, None, cls))
+ setattr(cls, '__iter__', MethodType(has_fields_iter, None, cls))
+ setattr(cls, '__contains__', MethodType(has_fields_contains, None, cls))
+
+ return cls
+
+
+def short_form_field(name):
+ """
+ Class decorator for specifying the short form field.
+
+ The class must be decorated with :func:`has_fields`.
+ """
+
+ def decorator(cls):
+ if hasattr(cls, name) and hasattr(cls, 'FIELDS') and (name in cls.FIELDS):
+ setattr(cls, 'SHORT_FORM_FIELD', name)
+ return cls
+ else:
+ raise AttributeError('@short_form_field must be used with '
+ 'a Field name in @has_fields class')
+ return decorator
+
+
+def allow_unknown_fields(cls):
+ """
+ Class decorator specifying that the class allows unknown fields.
+
+ The class must be decorated with :func:`has_fields`.
+ """
+
+ if hasattr(cls, 'FIELDS'):
+ setattr(cls, 'ALLOW_UNKNOWN_FIELDS', True)
+ return cls
+ else:
+ raise AttributeError('@allow_unknown_fields must be used with a @has_fields class')
+
+#
+# Method decorators
+#
+
+
+def primitive_field(cls=None, default=None, allowed=None, required=False):
+ """
+ Method decorator for primitive fields.
+
+ The function must be a method in a class decorated with :func:`has_fields`.
+ """
+
+ def decorator(func):
+ return Field(field_variant='primitive', func=func, cls=cls, default=default,
+ allowed=allowed, required=required)
+ return decorator
+
+
+def primitive_list_field(cls=None, default=None, allowed=None, required=False):
+ """
+ Method decorator for list of primitive fields.
+
+ The function must be a method in a class decorated with :func:`has_fields`.
+ """
+
+ def decorator(func):
+ return Field(field_variant='primitive_list', func=func, cls=cls, default=default,
+ allowed=allowed, required=required)
+ return decorator
+
+
+def primitive_dict_field(cls=None, default=None, allowed=None, required=False):
+ """
+ Method decorator for dict of primitive fields.
+
+ The function must be a method in a class decorated with :func:`has_fields`.
+ """
+ def decorator(func):
+ return Field(field_variant='primitive_dict', func=func, cls=cls, default=default,
+ allowed=allowed, required=required)
+ return decorator
+
+
+def primitive_dict_unknown_fields(cls=None, default=None, allowed=None, required=False):
+ """
+ Method decorator for dict of primitive fields, for all the fields that are
+ not already decorated.
+
+ The function must be a method in a class decorated with :func:`has_fields`.
+ """
+
+ def decorator(func):
+ return Field(field_variant='primitive_dict_unknown_fields', func=func, cls=cls,
+ default=default, allowed=allowed, required=required)
+ return decorator
+
+
+def object_field(cls, default=None, allowed=None, required=False):
+ """
+ Method decorator for object fields.
+
+ The function must be a method in a class decorated with :func:`has_fields`.
+ """
+ def decorator(func):
+ return Field(field_variant='object', func=func, cls=cls, default=default, allowed=allowed,
+ required=required)
+ return decorator
+
+
+def object_list_field(cls, default=None, allowed=None, required=False):
+ """
+ Method decorator for list of object fields.
+
+ The function must be a method in a class decorated with :func:`has_fields`.
+ """
+
+ def decorator(func):
+ return Field(field_variant='object_list', func=func, cls=cls, default=default,
+ allowed=allowed, required=required)
+ return decorator
+
+
+def object_dict_field(cls, default=None, allowed=None, required=False):
+ """
+ Method decorator for dict of object fields.
+
+ The function must be a method in a class decorated with :func:`has_fields`.
+ """
+
+ def decorator(func):
+ return Field(field_variant='object_dict', func=func, cls=cls, default=default,
+ allowed=allowed, required=required)
+ return decorator
+
+
+def object_sequenced_list_field(cls, default=None, allowed=None, required=False):
+ """
+ Method decorator for sequenced list of object fields.
+
+ The function must be a method in a class decorated with :func:`has_fields`.
+ """
+
+ def decorator(func):
+ return Field(field_variant='sequenced_object_list', func=func, cls=cls, default=default,
+ allowed=allowed, required=required)
+ return decorator
+
+
+def object_dict_unknown_fields(cls, default=None, allowed=None, required=False):
+ """
+ Method decorator for dict of object fields, for all the fields that are not already decorated.
+
+ The function must be a method in a class decorated with :func:`has_fields`.
+ """
+ def decorator(func):
+ return Field(field_variant='object_dict_unknown_fields', func=func, cls=cls,
+ default=default, allowed=allowed, required=required)
+ return decorator
+
+
+def field_getter(getter_func):
+ """
+ Method decorator for overriding the getter function of a field.
+
+ The signature of the getter function must be: ``f(field, presentation, context)``.
+ The default getter can be accessed as ```field.default_get(presentation, context)``.
+
+ The function must already be decorated with a field decorator.
+ """
+
+ def decorator(field):
+ if isinstance(field, Field):
+ field.get = MethodType(getter_func, field, Field)
+ return field
+ else:
+ raise AttributeError('@field_getter must be used with a Field')
+ return decorator
+
+
+def field_setter(setter_func):
+ """
+ Method decorator for overriding the setter function of a field.
+
+ The signature of the setter function must be: ``f(field, presentation, context, value)``.
+ The default setter can be accessed as ``field.default_set(presentation, context, value)``.
+
+ The function must already be decorated with a field decorator.
+ """
+
+ def decorator(field):
+ if isinstance(field, Field):
+ field.set = MethodType(setter_func, field, Field)
+ return field
+ else:
+ raise AttributeError('@field_setter must be used with a Field')
+ return decorator
+
+
+def field_validator(validator_fn):
+ """
+ Method decorator for overriding the validator function of a field.
+
+ The signature of the validator function must be: ``f(field, presentation, context)``.
+ The default validator can be accessed as ``field.default_validate(presentation, context)``.
+
+ The function must already be decorated with a field decorator.
+ """
+
+ def decorator(field):
+ if isinstance(field, Field):
+ field.validate = MethodType(validator_fn, field, Field)
+ return field
+ else:
+ raise AttributeError('@field_validator must be used with a Field')
+ return decorator
+
+#
+# Utils
+#
+
+
+def has_fields_iter_field_names(self):
+ for name in self.__class__.FIELDS:
+ yield name
+
+
+def has_fields_iter_fields(self):
+ return self.FIELDS.iteritems()
+
+
+def has_fields_len(self):
+ return len(self.__class__.FIELDS)
+
+
+def has_fields_getitem(self, key):
+ if not isinstance(key, basestring):
+ raise TypeError('key must be a string')
+ if key not in self.__class__.FIELDS:
+ raise KeyError('no \'%s\' property' % key)
+ return getattr(self, key)
+
+
+def has_fields_setitem(self, key, value):
+ if not isinstance(key, basestring):
+ raise TypeError('key must be a string')
+ if key not in self.__class__.FIELDS:
+ raise KeyError('no \'%s\' property' % key)
+ return setattr(self, key, value)
+
+
+def has_fields_delitem(self, key):
+ if not isinstance(key, basestring):
+ raise TypeError('key must be a string')
+ if key not in self.__class__.FIELDS:
+ raise KeyError('no \'%s\' property' % key)
+ return setattr(self, key, None)
+
+
+def has_fields_iter(self):
+ return self.__class__.FIELDS.iterkeys()
+
+
+def has_fields_contains(self, key):
+ if not isinstance(key, basestring):
+ raise TypeError('key must be a string')
+ return key in self.__class__.FIELDS
+
+
+class Field(object):
+ """
+ Field handler used by ``@has_fields`` decorator.
+ """
+
+ def __init__(self, field_variant, func, cls=None, default=None, allowed=None, required=False):
+ if cls == str:
+ # Use "unicode" instead of "str"
+ cls = unicode
+
+ self.container_cls = None
+ self.name = None
+ self.field_variant = field_variant
+ self.func = func
+ self.cls = cls
+ self.default = default
+ self.allowed = allowed
+ self.required = required
+
+ @property
+ def full_name(self):
+ return 'field "%s" in "%s"' % (self.name, full_type_name(self.container_cls))
+
+ @property
+ def full_cls_name(self):
+ name = full_type_name(self.cls)
+ if name == 'unicode':
+ # For simplicity, display "unicode" as "str"
+ name = 'str'
+ return name
+
+ def get(self, presentation, context):
+ return self.default_get(presentation, context)
+
+ def set(self, presentation, context, value):
+ return self.default_set(presentation, context, value)
+
+ def validate(self, presentation, context):
+ self.default_validate(presentation, context)
+
+ def get_locator(self, raw):
+ if hasattr(raw, '_locator'):
+ locator = raw._locator
+ if locator is not None:
+ return locator.get_child(self.name)
+ return None
+
+ def dump(self, presentation, context):
+ value = getattr(presentation, self.name)
+ if value is None:
+ return
+
+ dumper = getattr(self, '_dump_%s' % self.field_variant)
+ dumper(context, value)
+
+ def default_get(self, presentation, context):
+ # Handle raw
+
+ default_raw = (presentation._get_default_raw()
+ if hasattr(presentation, '_get_default_raw')
+ else None)
+
+ if default_raw is None:
+ raw = presentation._raw
+ else:
+ # Handle default raw value
+ raw = deepcopy_with_locators(default_raw)
+ merge(raw, presentation._raw)
+
+ # Handle unknown fields (only dict can have unknown fields, lists can't have them)
+
+ if self.field_variant == 'primitive_dict_unknown_fields':
+ return self._get_primitive_dict_unknown_fields(presentation, raw, context)
+ elif self.field_variant == 'object_dict_unknown_fields':
+ return self._get_object_dict_unknown_fields(presentation, raw, context)
+
+ is_short_form_field = (self.container_cls.SHORT_FORM_FIELD == self.name
+ if hasattr(self.container_cls, 'SHORT_FORM_FIELD')
+ else False)
+ is_dict = isinstance(raw, dict)
+
+ # Find value
+
+ value = self._find_value(is_short_form_field, is_dict, raw)
+
+ # Handle required
+
+ if value is None:
+ if self.required:
+ raise InvalidValueError('required %s does not have a value' % self.full_name,
+ locator=self.get_locator(raw))
+ else:
+ return None
+
+ # Handle allowed values
+
+ if self.allowed is not None:
+ if value not in self.allowed:
+ raise InvalidValueError('%s is not %s'
+ % (self.full_name, ' or '.join([safe_repr(v)
+ for v in self.allowed])),
+ locator=self.get_locator(raw))
+
+ # Handle get according to variant
+
+ getter = getattr(self, '_get_{field_variant}'.format(field_variant=self.field_variant),
+ None)
+
+ if getter is None:
+ locator = self.get_locator(raw)
+ location = (' @%s' % locator) if locator is not None else ''
+ raise AttributeError('%s has unsupported field variant: "%s"%s'
+ % (self.full_name, self.field_variant, location))
+
+ return getter(presentation, raw, value, context)
+
+ def _find_value(self, is_short_form_field, is_dict, raw):
+ value = None
+ if is_short_form_field and not is_dict:
+ # Handle short form
+ value = raw
+ elif is_dict:
+ if self.name in raw:
+ value = raw[self.name]
+ if value is None:
+ # An explicit null
+ value = NULL
+ else:
+ value = self.default
+ return value
+
+ def default_set(self, presentation, context, value):
+ raw = presentation._raw
+ old = self.get(presentation, context)
+ raw[self.name] = value
+ try:
+ self.validate(presentation, context)
+ except Exception as e:
+ raw[self.name] = old
+ raise e
+ return old
+
+ def default_validate(self, presentation, context):
+ value = None
+
+ try:
+ value = self.get(presentation, context)
+ except AriaException as e:
+ if e.issue:
+ context.validation.report(issue=e.issue)
+ except Exception as e:
+ context.validation.report(exception=e)
+ print_exception(e)
+
+ self.validate_value(value, context)
+
+ def validate_value(self, value, context):
+ if isinstance(value, list):
+ if self.field_variant == 'object_list':
+ for element in value:
+ if hasattr(element, '_validate'):
+ element._validate(context)
+ elif self.field_variant == 'sequenced_object_list':
+ for _, element in value:
+ if hasattr(element, '_validate'):
+ element._validate(context)
+ elif isinstance(value, dict):
+ if self.field_variant in ('object_dict', 'object_dict_unknown_fields'):
+ for inner_value in value.itervalues():
+ if hasattr(inner_value, '_validate'):
+ inner_value._validate(context)
+
+ if hasattr(value, '_validate'):
+ value._validate(context)
+
+ @staticmethod
+ def _get_context():
+ thread_locals = threading.local()
+ return getattr(thread_locals, 'aria_consumption_context', None)
+
+ def _coerce_primitive(self, value, context):
+ if context is None:
+ context = Field._get_context()
+ allow_primitive_coercion = (context.validation.allow_primitive_coersion
+ if context is not None
+ else True)
+ return validate_primitive(value, self.cls, allow_primitive_coercion)
+
+ # primitive
+
+ def _get_primitive(self, presentation, raw, value, context):
+ if (self.cls is not None and not isinstance(value, self.cls)
+ and value is not None and value is not NULL):
+ try:
+ return self._coerce_primitive(value, context)
+ except ValueError as e:
+ raise InvalidValueError('%s is not a valid "%s": %s' %
+ (self.full_name, self.full_cls_name, safe_repr(value)),
+ locator=self.get_locator(raw), cause=e)
+ return value
+
+ def _dump_primitive(self, context, value):
+ if hasattr(value, 'as_raw'):
+ value = as_raw(value)
+ puts('%s: %s' % (self.name, context.style.literal_style(value)))
+
+ # primitive list
+
+ def _get_primitive_list(self, presentation, raw, value, context):
+ if not isinstance(value, list):
+ raise InvalidValueError('%s is not a list: %s' % (self.full_name, safe_repr(value)),
+ locator=self.get_locator(raw))
+ primitive_list = value
+ if self.cls is not None:
+ if context is None:
+ context = Field._get_context()
+ primitive_list = []
+ for i, _ in enumerate(value):
+ primitive = value[i]
+ try:
+ primitive = self._coerce_primitive(primitive, context)
+ except ValueError as e:
+ raise InvalidValueError('%s is not a list of "%s": element %d is %s'
+ % (self.full_name,
+ self.full_cls_name,
+ i,
+ safe_repr(primitive)),
+ locator=self.get_locator(raw), cause=e)
+ if primitive in primitive_list:
+ raise InvalidValueError('%s has a duplicate "%s": %s'
+ % (self.full_name,
+ self.full_cls_name,
+ safe_repr(primitive)),
+ locator=self.get_locator(raw))
+ primitive_list.append(primitive)
+ return FrozenList(primitive_list)
+
+ def _dump_primitive_list(self, context, value):
+ puts('%s:' % self.name)
+ with context.style.indent():
+ for primitive in value:
+ if hasattr(primitive, 'as_raw'):
+ primitive = as_raw(primitive)
+ puts(context.style.literal_style(primitive))
+
+ # primitive dict
+
+ def _get_primitive_dict(self, presentation, raw, value, context):
+ if not isinstance(value, dict):
+ raise InvalidValueError('%s is not a dict: %s' % (self.full_name, safe_repr(value)),
+ locator=self.get_locator(raw))
+ primitive_dict = value
+ if self.cls is not None:
+ if context is None:
+ context = Field._get_context()
+ primitive_dict = OrderedDict()
+ for k, v in value.iteritems():
+ try:
+ primitive_dict[k] = self._coerce_primitive(v, context)
+ except ValueError as e:
+ raise InvalidValueError('%s is not a dict of "%s" values: entry "%d" is %s'
+ % (self.full_name, self.full_cls_name, k, safe_repr(v)),
+ locator=self.get_locator(raw),
+ cause=e)
+ return FrozenDict(primitive_dict)
+
+ def _dump_primitive_dict(self, context, value):
+ puts('%s:' % self.name)
+ with context.style.indent():
+ for v in value.itervalues():
+ if hasattr(v, 'as_raw'):
+ v = as_raw(v)
+ puts(context.style.literal_style(v))
+
+ # object
+
+ def _get_object(self, presentation, raw, value, context):
+ try:
+ return self.cls(name=self.name, raw=value, container=presentation)
+ except TypeError as e:
+ raise InvalidValueError('%s cannot not be initialized to an instance of "%s": %s'
+ % (self.full_name, self.full_cls_name, safe_repr(value)),
+ cause=e,
+ locator=self.get_locator(raw))
+
+ def _dump_object(self, context, value):
+ puts('%s:' % self.name)
+ with context.style.indent():
+ if hasattr(value, '_dump'):
+ value._dump(context)
+
+ # object list
+
+ def _get_object_list(self, presentation, raw, value, context):
+ if not isinstance(value, list):
+ raise InvalidValueError('%s is not a list: %s'
+ % (self.full_name, safe_repr(value)),
+ locator=self.get_locator(raw))
+ return FrozenList((self.cls(name=self.name, raw=v, container=presentation) for v in value))
+
+ def _dump_object_list(self, context, value):
+ puts('%s:' % self.name)
+ with context.style.indent():
+ for v in value:
+ if hasattr(v, '_dump'):
+ v._dump(context)
+
+ # object dict
+
+ def _get_object_dict(self, presentation, raw, value, context):
+ if not isinstance(value, dict):
+ raise InvalidValueError('%s is not a dict: %s' % (self.full_name, safe_repr(value)),
+ locator=self.get_locator(raw))
+ return FrozenDict(((k, self.cls(name=k, raw=v, container=presentation))
+ for k, v in value.iteritems()))
+
+ def _dump_object_dict(self, context, value):
+ puts('%s:' % self.name)
+ with context.style.indent():
+ for v in value.itervalues():
+ if hasattr(v, '_dump'):
+ v._dump(context)
+
+ # sequenced object list
+
+ def _get_sequenced_object_list(self, presentation, raw, value, context):
+ if not isinstance(value, list):
+ raise InvalidValueError('%s is not a sequenced list (a list of dicts, '
+ 'each with exactly one key): %s'
+ % (self.full_name, safe_repr(value)),
+ locator=self.get_locator(raw))
+ sequence = []
+ for v in value:
+ if not isinstance(v, dict):
+ raise InvalidValueError('%s list elements are not all dicts with '
+ 'exactly one key: %s' % (self.full_name, safe_repr(value)),
+ locator=self.get_locator(raw))
+ if len(v) != 1:
+ raise InvalidValueError('%s list elements do not all have exactly one key: %s'
+ % (self.full_name, safe_repr(value)),
+ locator=self.get_locator(raw))
+ key, value = v.items()[0]
+ sequence.append((key, self.cls(name=key, raw=value, container=presentation)))
+ return FrozenList(sequence)
+
+ def _dump_sequenced_object_list(self, context, value):
+ puts('%s:' % self.name)
+ for _, v in value:
+ if hasattr(v, '_dump'):
+ v._dump(context)
+
+ # primitive dict for unknown fields
+
+ def _get_primitive_dict_unknown_fields(self, presentation, raw, context):
+ if isinstance(raw, dict):
+ primitive_dict = raw
+ if self.cls is not None:
+ if context is None:
+ context = Field._get_context()
+ primitive_dict = OrderedDict()
+ for k, v in raw.iteritems():
+ if k not in presentation.FIELDS:
+ try:
+ primitive_dict[k] = self._coerce_primitive(v, context)
+ except ValueError as e:
+ raise InvalidValueError('%s is not a dict of "%s" values:'
+ ' entry "%d" is %s'
+ % (self.full_name, self.full_cls_name,
+ k, safe_repr(v)),
+ locator=self.get_locator(raw),
+ cause=e)
+ return FrozenDict(primitive_dict)
+ return None
+
+ def _dump_primitive_dict_unknown_fields(self, context, value):
+ self._dump_primitive_dict(context, value)
+
+ # object dict for unknown fields
+
+ def _get_object_dict_unknown_fields(self, presentation, raw, context):
+ if isinstance(raw, dict):
+ return FrozenDict(((k, self.cls(name=k, raw=v, container=presentation))
+ for k, v in raw.iteritems() if k not in presentation.FIELDS))
+ return None
+
+ def _dump_object_dict_unknown_fields(self, context, value):
+ self._dump_object_dict(context, value)
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/parser/presentation/null.py b/azure/aria/aria-extension-cloudify/src/aria/aria/parser/presentation/null.py
new file mode 100644
index 0000000..287d2ba
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/parser/presentation/null.py
@@ -0,0 +1,67 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from ...utils.collections import deepcopy_with_locators
+
+
+class Null(object):
+ """
+ Represents an explicit null value provided by the user, which is different from
+ not supplying a value at all.
+
+ It is a singleton.
+ """
+
+ @property
+ def as_raw(self):
+ return None
+
+NULL = Null()
+
+
+def none_to_null(value):
+ """
+ Convert ``None`` to ``NULL``, recursively.
+ """
+
+ if value is None:
+ return NULL
+ if isinstance(value, list):
+ value = deepcopy_with_locators(value)
+ for i, _ in enumerate(value):
+ value[i] = none_to_null(value[i])
+ elif isinstance(value, dict):
+ value = deepcopy_with_locators(value)
+ for k, v in value.iteritems():
+ value[k] = none_to_null(v)
+ return value
+
+
+def null_to_none(value):
+ """
+ Convert ``NULL`` to ``None``, recursively.
+ """
+
+ if value is NULL:
+ return None
+ if isinstance(value, list):
+ value = deepcopy_with_locators(value)
+ for i, _ in enumerate(value):
+ value[i] = none_to_null(value[i])
+ elif isinstance(value, dict):
+ value = deepcopy_with_locators(value)
+ for k, v in value.iteritems():
+ value[k] = none_to_null(v)
+ return value
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/parser/presentation/presentation.py b/azure/aria/aria-extension-cloudify/src/aria/aria/parser/presentation/presentation.py
new file mode 100644
index 0000000..3f9f86d
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/parser/presentation/presentation.py
@@ -0,0 +1,248 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from ...utils.caching import HasCachedMethods
+from ...utils.collections import deepcopy_with_locators
+from ...utils.formatting import safe_repr
+from ...utils.type import full_type_name
+from ...utils.console import puts
+from ..validation import Issue
+from .null import none_to_null
+from .utils import (get_locator, validate_no_short_form, validate_no_unknown_fields,
+ validate_known_fields, validate_primitive)
+
+
+class Value(object):
+ """
+ Encapsulates a typed value assignment.
+ """
+
+ def __init__(self, type_name, value, description, required):
+ self.type = deepcopy_with_locators(type_name)
+ self.value = deepcopy_with_locators(value)
+ self.description = deepcopy_with_locators(description)
+ self.required = deepcopy_with_locators(required)
+
+ def _dump(self, context):
+ if self.type is not None:
+ puts(context.style.type_style(self.type))
+ if self.value is not None:
+ puts(context.style.literal_style(self.value))
+ if self.description is not None:
+ puts(context.style.meta_style(self.description))
+ if self.required is not None:
+ puts(context.style.required_style(self.required))
+
+
+class PresentationBase(HasCachedMethods):
+ """
+ Base class for ARIA presentation classes.
+ """
+
+ def __init__(self, name=None, raw=None, container=None):
+ self._name = name
+ self._raw = raw
+ self._container = container
+ super(PresentationBase, self).__init__()
+
+ @property
+ def as_raw(self):
+ return self._raw
+
+ def _validate(self, context):
+ """
+ Validates the presentation while reporting errors in the validation context but *not*
+ raising exceptions.
+
+ The base class does not thing, but subclasses may override this for specialized validation.
+ """
+
+ @property
+ def _fullname(self):
+ """
+ Always returns a usable full name of the presentation, whether it itself is named, or
+ recursing to its container, and finally defaulting to the class name.
+ """
+
+ if self._name is not None:
+ return self._name
+ elif self._container is not None:
+ return self._container._fullname
+ return full_type_name(self)
+
+ @property
+ def _locator(self):
+ """
+ Attempts to return the most relevant locator, whether we have one, or recursing to our
+ container.
+
+ :rtype: :class:`aria.parser.reading.Locator`
+ """
+
+ return get_locator(self._raw, self._container)
+
+ def _get(self, *names):
+ """
+ Gets attributes recursively.
+ """
+
+ obj = self
+ if (obj is not None) and names:
+ for name in names:
+ obj = getattr(obj, name, None)
+ if obj is None:
+ break
+ return obj
+
+ def _get_from_dict(self, *names):
+ """
+ Gets attributes recursively, except for the last name which is used to get a value from the
+ last dict.
+ """
+
+ if names:
+ obj = self._get(*names[:-1])
+ if isinstance(obj, dict):
+ return obj.get(names[-1]) # pylint: disable=no-member
+ return None
+
+ def _get_child_locator(self, *names):
+ """
+ Attempts to return the locator of one our children. Will default to our locator if not
+ found.
+
+ :rtype: :class:`aria.parser.reading.Locator`
+ """
+
+ if hasattr(self._raw, '_locator'):
+ locator = self._raw._locator
+ if locator is not None:
+ return locator.get_child(*names)
+ return self._locator
+
+ def _dump(self, context):
+ """
+ Emits a colorized representation.
+
+ The base class will emit a sensible default representation of the fields, (by calling
+ ``_dump_content``), but subclasses may override this for specialized dumping.
+ """
+
+ if self._name:
+ puts(context.style.node(self._name))
+ with context.style.indent():
+ self._dump_content(context)
+ else:
+ self._dump_content(context)
+
+ def _dump_content(self, context, field_names=None):
+ """
+ Emits a colorized representation of the contents.
+
+ The base class will call ``_dump_field`` on all the fields, but subclasses may override
+ this for specialized dumping.
+ """
+
+ if field_names:
+ for field_name in field_names:
+ self._dump_field(context, field_name)
+ elif hasattr(self, '_iter_field_names'):
+ for field_name in self._iter_field_names(): # pylint: disable=no-member
+ self._dump_field(context, field_name)
+ else:
+ puts(context.style.literal_style(self._raw))
+
+ def _dump_field(self, context, field_name):
+ """
+ Emits a colorized representation of the field.
+
+ According to the field type, this may trigger nested recursion. The nested types will
+ delegate to their ``_dump`` methods.
+ """
+
+ field = self.FIELDS[field_name] # pylint: disable=no-member
+ field.dump(self, context)
+
+ def _clone(self, container=None):
+ """
+ Creates a clone of this presentation, optionally allowing for a new container.
+ """
+
+ raw = deepcopy_with_locators(self._raw)
+ if container is None:
+ container = self._container
+ return self.__class__(name=self._name, raw=raw, container=container)
+
+
+class Presentation(PresentationBase):
+ """
+ Base class for ARIA presentations. A presentation is a Pythonic wrapper around agnostic raw
+ data, adding the ability to read and modify the data with proper validation.
+
+ ARIA presentation classes will often be decorated with :func:`has_fields`, as that mechanism
+ automates a lot of field-specific validation. However, that is not a requirement.
+
+ Make sure that your utility property and method names begin with a ``_``, because those names
+ without a ``_`` prefix are normally reserved for fields.
+ """
+
+ def _validate(self, context):
+ validate_no_short_form(context, self)
+ validate_no_unknown_fields(context, self)
+ validate_known_fields(context, self)
+
+
+class AsIsPresentation(PresentationBase):
+ """
+ Base class for trivial ARIA presentations that provide the raw value as is.
+ """
+
+ def __init__(self, name=None, raw=None, container=None, cls=None):
+ super(AsIsPresentation, self).__init__(name, raw, container)
+ self.cls = cls
+
+ @property
+ def value(self):
+ return none_to_null(self._raw)
+
+ @value.setter
+ def value(self, value):
+ self._raw = value
+
+ @property
+ def _full_cls_name(self):
+ name = full_type_name(self.cls) if self.cls is not None else None
+ if name == 'unicode':
+ # For simplicity, display "unicode" as "str"
+ name = 'str'
+ return name
+
+ def _validate(self, context):
+ try:
+ validate_primitive(self._raw, self.cls, context.validation.allow_primitive_coersion)
+ except ValueError as e:
+ context.validation.report('"%s" is not a valid "%s": %s'
+ % (self._fullname, self._full_cls_name, safe_repr(self._raw)),
+ locator=self._locator,
+ level=Issue.FIELD,
+ exception=e)
+
+ def _dump(self, context):
+ if hasattr(self._raw, '_dump'):
+ puts(context.style.node(self._name))
+ with context.style.indent():
+ self._raw._dump(context)
+ else:
+ super(AsIsPresentation, self)._dump(context)
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/parser/presentation/presenter.py b/azure/aria/aria-extension-cloudify/src/aria/aria/parser/presentation/presenter.py
new file mode 100644
index 0000000..9fd296f
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/parser/presentation/presenter.py
@@ -0,0 +1,70 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from ...utils.collections import merge
+from ...utils.formatting import safe_repr
+from ..validation import Issue
+from .presentation import Presentation
+
+
+class Presenter(Presentation):
+ """
+ Base class for ARIA presenters.
+
+ Presenters provide a robust API over agnostic raw data.
+ """
+
+ DSL_VERSIONS = None
+ ALLOWED_IMPORTED_DSL_VERSIONS = None
+
+ @classmethod
+ def can_present(cls, raw):
+ dsl = raw.get('tosca_definitions_version')
+ assert cls.DSL_VERSIONS
+ return dsl in cls.DSL_VERSIONS
+
+ def _validate_import(self, context, presentation):
+ tosca_definitions_version = presentation.service_template.tosca_definitions_version
+ assert self.ALLOWED_IMPORTED_DSL_VERSIONS
+ if tosca_definitions_version is not None \
+ and tosca_definitions_version not in self.__class__.ALLOWED_IMPORTED_DSL_VERSIONS:
+ context.validation.report(
+ 'import "tosca_definitions_version" is not one of %s: %s'
+ % (' or '.join([safe_repr(v)
+ for v in self.__class__.ALLOWED_IMPORTED_DSL_VERSIONS]),
+ presentation.service_template.tosca_definitions_version),
+ locator=presentation._get_child_locator('inputs'),
+ level=Issue.BETWEEN_TYPES)
+ return False
+ return True
+
+ def _merge_import(self, presentation):
+ merge(self._raw, presentation._raw)
+ if hasattr(self._raw, '_locator') and hasattr(presentation._raw, '_locator'):
+ self._raw._locator.merge(presentation._raw._locator)
+
+ def _link_locators(self):
+ if hasattr(self._raw, '_locator'):
+ locator = self._raw._locator
+ delattr(self._raw, '_locator')
+ locator.link(self._raw)
+
+ @staticmethod
+ def _get_import_locations(context):
+ raise NotImplementedError
+
+ @staticmethod
+ def _get_deployment_template(context):
+ raise NotImplementedError
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/parser/presentation/source.py b/azure/aria/aria-extension-cloudify/src/aria/aria/parser/presentation/source.py
new file mode 100644
index 0000000..4bfb8e1
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/parser/presentation/source.py
@@ -0,0 +1,55 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+from ...extension import parser
+
+from .exceptions import PresenterNotFoundError
+
+
+class PresenterSource(object):
+ """
+ Base class for ARIA presenter sources.
+
+ Presenter sources provide appropriate :class:`Presenter` classes for agnostic raw data.
+ """
+
+ def get_presenter(self, raw): # pylint: disable=unused-argument,no-self-use
+ raise PresenterNotFoundError('presenter not found')
+
+
+class DefaultPresenterSource(PresenterSource):
+ """
+ The default ARIA presenter source.
+ """
+
+ def __init__(self, classes=None):
+ if classes is None:
+ classes = parser.presenter_class()
+ self.classes = classes
+
+ def get_presenter(self, raw):
+ for cls in self.classes:
+ if cls.can_present(raw):
+ return cls
+
+ if 'tosca_definitions_version' in raw:
+ if raw['tosca_definitions_version'] is None:
+ raise PresenterNotFoundError("'tosca_definitions_version' is not specified")
+ if not isinstance(raw['tosca_definitions_version'], basestring):
+ raise PresenterNotFoundError("'tosca_definitions_version' is not a string")
+ if not raw['tosca_definitions_version']:
+ raise PresenterNotFoundError("'tosca_definitions_version' is not specified")
+ return super(DefaultPresenterSource, self).get_presenter(raw)
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/parser/presentation/utils.py b/azure/aria/aria-extension-cloudify/src/aria/aria/parser/presentation/utils.py
new file mode 100644
index 0000000..f0fd390
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/parser/presentation/utils.py
@@ -0,0 +1,187 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from types import FunctionType
+
+from ...utils.formatting import safe_repr
+from ...utils.type import full_type_name
+from ..validation import Issue
+from .null import NULL
+
+
+def get_locator(*values):
+ """
+ Gets the first available locator.
+
+ :rtype: :class:`aria.parser.reading.Locator`
+ """
+
+ for v in values:
+ if hasattr(v, '_locator'):
+ locator = v._locator
+ if locator is not None:
+ return locator
+ return None
+
+
+def parse_types_dict_names(types_dict_names):
+ """
+ If the first element in the array is a function, extracts it out.
+ """
+
+ convert = None
+ if isinstance(types_dict_names[0], FunctionType):
+ convert = types_dict_names[0]
+ types_dict_names = types_dict_names[1:]
+ return types_dict_names, convert
+
+
+def validate_primitive(value, cls, coerce=False):
+ """
+ Checks if the value is of the primitive type, optionally attempting to coerce it
+ if it is not.
+
+ :raises ValueError: if not a primitive type or if coercion failed.
+ """
+
+ if (cls is not None) and (value is not None) and (value is not NULL):
+ if (cls is unicode) or (cls is str): # These two types are interchangeable
+ valid = isinstance(value, basestring)
+ elif cls is int:
+ # In Python, a bool is an int
+ valid = isinstance(value, int) and not isinstance(value, bool)
+ else:
+ valid = isinstance(value, cls)
+ if not valid:
+ if coerce:
+ value = cls(value)
+ else:
+ raise ValueError('not a "%s": %s' % (full_type_name(cls), safe_repr(value)))
+ return value
+
+
+def validate_no_short_form(context, presentation):
+ """
+ Makes sure that we can use short form definitions only if we allowed it.
+ """
+
+ if not hasattr(presentation, 'SHORT_FORM_FIELD') and not isinstance(presentation._raw, dict):
+ context.validation.report('short form not allowed for field "%s"' % presentation._fullname,
+ locator=presentation._locator,
+ level=Issue.BETWEEN_FIELDS)
+
+
+def validate_no_unknown_fields(context, presentation):
+ """
+ Make sure that we can use unknown fields only if we allowed it.
+ """
+
+ if not getattr(presentation, 'ALLOW_UNKNOWN_FIELDS', False) \
+ and not context.validation.allow_unknown_fields \
+ and isinstance(presentation._raw, dict) \
+ and hasattr(presentation, 'FIELDS'):
+ for k in presentation._raw:
+ if k not in presentation.FIELDS:
+ context.validation.report('field "%s" is not supported in "%s"'
+ % (k, presentation._fullname),
+ locator=presentation._get_child_locator(k),
+ level=Issue.BETWEEN_FIELDS)
+
+
+def validate_known_fields(context, presentation):
+ """
+ Validates all known fields.
+ """
+
+ if hasattr(presentation, '_iter_fields'):
+ for _, field in presentation._iter_fields():
+ field.validate(presentation, context)
+
+
+def get_parent_presentation(context, presentation, *types_dict_names):
+ """
+ Returns the parent presentation according to the ``derived_from`` field, or ``None`` if invalid.
+
+ Checks that we do not derive from ourselves and that we do not cause a circular hierarchy.
+
+ The arguments from the third onwards are used to locate a nested field under
+ ``service_template`` under the root presenter. The first of these can optionally be a function,
+ in which case it will be called to convert type names. This can be used to support shorthand
+ type names, aliases, etc.
+ """
+
+ type_name = presentation.derived_from
+
+ if type_name is None:
+ return None
+
+ types_dict_names, convert = parse_types_dict_names(types_dict_names)
+ types_dict = context.presentation.get('service_template', *types_dict_names) or {}
+
+ if convert:
+ type_name = convert(context, type_name, types_dict)
+
+ # Make sure not derived from self
+ if type_name == presentation._name:
+ return None
+ # Make sure derived from type exists
+ elif type_name not in types_dict:
+ return None
+ else:
+ # Make sure derivation hierarchy is not circular
+ hierarchy = [presentation._name]
+ presentation_copy = presentation
+ while presentation_copy.derived_from is not None:
+ derived_from = presentation_copy.derived_from
+ if convert:
+ derived_from = convert(context, derived_from, types_dict)
+
+ if derived_from == presentation_copy._name or derived_from not in types_dict:
+ return None
+ presentation_copy = types_dict[derived_from]
+ if presentation_copy._name in hierarchy:
+ return None
+ hierarchy.append(presentation_copy._name)
+
+ return types_dict[type_name]
+
+
+def report_issue_for_unknown_type(context, presentation, type_name, field_name, value=None):
+ if value is None:
+ value = getattr(presentation, field_name)
+ context.validation.report('"%s" refers to an unknown %s in "%s": %s'
+ % (field_name, type_name, presentation._fullname, safe_repr(value)),
+ locator=presentation._get_child_locator(field_name),
+ level=Issue.BETWEEN_TYPES)
+
+
+def report_issue_for_parent_is_self(context, presentation, field_name):
+ context.validation.report('parent type of "%s" is self' % presentation._fullname,
+ locator=presentation._get_child_locator(field_name),
+ level=Issue.BETWEEN_TYPES)
+
+
+def report_issue_for_unknown_parent_type(context, presentation, field_name):
+ context.validation.report('unknown parent type "%s" in "%s"'
+ % (getattr(presentation, field_name), presentation._fullname),
+ locator=presentation._get_child_locator(field_name),
+ level=Issue.BETWEEN_TYPES)
+
+
+def report_issue_for_circular_type_hierarchy(context, presentation, field_name):
+ context.validation.report('"%s" of "%s" creates a circular type hierarchy'
+ % (getattr(presentation, field_name), presentation._fullname),
+ locator=presentation._get_child_locator(field_name),
+ level=Issue.BETWEEN_TYPES)
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/parser/reading/__init__.py b/azure/aria/aria-extension-cloudify/src/aria/aria/parser/reading/__init__.py
new file mode 100644
index 0000000..c110585
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/parser/reading/__init__.py
@@ -0,0 +1,60 @@
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Reading package.
+
+.. autosummary::
+ :nosignatures:
+
+ aria.parser.reading.ReadingContext
+ ReaderException
+ ReaderNotFoundError
+ ReaderSyntaxError
+ AlreadyReadException
+ JinjaReader
+ JsonReader
+ Locator
+ RawReader
+ Reader
+ ReaderSource
+ DefaultReaderSource
+ YamlReader
+"""
+
+from .raw import RawReader
+from .reader import Reader
+from .yaml import YamlReader
+from .locator import Locator
+from .json import JsonReader
+from .jinja import JinjaReader
+from .context import ReadingContext
+from .source import ReaderSource, DefaultReaderSource
+from .exceptions import (ReaderException,
+ ReaderNotFoundError,
+ ReaderSyntaxError,
+ AlreadyReadException)
+
+__all__ = (
+ 'ReaderException',
+ 'ReaderNotFoundError',
+ 'ReaderSyntaxError',
+ 'AlreadyReadException',
+ 'Reader',
+ 'ReaderSource',
+ 'DefaultReaderSource',
+ 'ReadingContext',
+ 'RawReader',
+ 'Locator',
+ 'YamlReader',
+ 'JsonReader',
+ 'JinjaReader')
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/parser/reading/context.py b/azure/aria/aria-extension-cloudify/src/aria/aria/parser/reading/context.py
new file mode 100644
index 0000000..233e407
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/parser/reading/context.py
@@ -0,0 +1,31 @@
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from ...utils.threading import LockedList
+from .source import DefaultReaderSource
+
+
+class ReadingContext(object):
+ """
+ Reading context.
+
+ :ivar reader_source: for finding reader instances
+ :vartype reader_source: ReaderSource
+ :ivar reader: overrides ``reader_source`` with a specific class
+ :vartype reader: type
+ """
+
+ def __init__(self):
+ self.reader_source = DefaultReaderSource()
+ self.reader = None
+
+ self._locations = LockedList() # for keeping track of locations already read
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/parser/reading/exceptions.py b/azure/aria/aria-extension-cloudify/src/aria/aria/parser/reading/exceptions.py
new file mode 100644
index 0000000..3699729
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/parser/reading/exceptions.py
@@ -0,0 +1,44 @@
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from ...exceptions import AriaException
+from ..validation import Issue
+
+
+class ReaderException(AriaException):
+ """
+ ARIA reader exception.
+ """
+
+
+class ReaderNotFoundError(ReaderException):
+ """
+ ARIA reader error: reader not found for source.
+ """
+
+
+class ReaderSyntaxError(ReaderException):
+ """
+ ARIA read format error.
+ """
+
+ def __init__(self, message, cause=None, cause_tb=None, location=None, line=None,
+ column=None, locator=None, snippet=None, level=Issue.SYNTAX):
+ super(ReaderSyntaxError, self).__init__(message, cause, cause_tb)
+ self.issue = Issue(message, location=location, line=line, column=column,
+ locator=locator, snippet=snippet, level=level)
+
+
+class AlreadyReadException(ReaderException):
+ """
+ ARIA reader exception: already read.
+ """
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/parser/reading/jinja.py b/azure/aria/aria-extension-cloudify/src/aria/aria/parser/reading/jinja.py
new file mode 100644
index 0000000..687317a
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/parser/reading/jinja.py
@@ -0,0 +1,55 @@
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+
+from jinja2 import Template
+
+from ... import __version__ as version
+from ..loading import LiteralLocation, LiteralLoader
+from .reader import Reader
+from .exceptions import ReaderSyntaxError
+
+
+# TODO: we could put a lot of other useful stuff here.
+CONTEXT = {
+ 'ARIA_VERSION': version,
+ 'ENV': os.environ}
+
+
+class JinjaReader(Reader):
+ """
+ ARIA Jinja reader.
+
+ Forwards the rendered result to a new reader in the reader source.
+ """
+
+ def read(self):
+ data = self.load()
+ try:
+ data = str(data)
+ template = Template(data)
+ literal = template.render(CONTEXT)
+ # TODO: might be useful to write the literal result to a file for debugging
+ location = self.location
+ if isinstance(location, basestring) and location.endswith('.jinja'):
+ # Use reader based on the location with the ".jinja" prefix stripped off
+ location = location[:-6]
+ next_reader = self.context.reading.reader_source.get_reader(
+ self.context, LiteralLocation(literal, name=location), LiteralLoader(literal))
+ else:
+ # Use reader for literal loader
+ next_reader = self.context.reading.reader_source.get_reader(
+ self.context, LiteralLocation(literal), LiteralLoader(literal))
+ return next_reader.read()
+ except Exception as e:
+ raise ReaderSyntaxError('Jinja: %s' % e, cause=e)
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/parser/reading/json.py b/azure/aria/aria-extension-cloudify/src/aria/aria/parser/reading/json.py
new file mode 100644
index 0000000..d144f80
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/parser/reading/json.py
@@ -0,0 +1,33 @@
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import # so we can import standard 'json'
+
+import json
+
+from ...utils.collections import OrderedDict
+from .reader import Reader
+from .exceptions import ReaderSyntaxError
+
+
+class JsonReader(Reader):
+ """
+ ARIA JSON reader.
+ """
+
+ def read(self):
+ data = self.load()
+ try:
+ data = unicode(data)
+ return json.loads(data, object_pairs_hook=OrderedDict)
+ except Exception as e:
+ raise ReaderSyntaxError('JSON: %s' % e, cause=e)
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/parser/reading/locator.py b/azure/aria/aria-extension-cloudify/src/aria/aria/parser/reading/locator.py
new file mode 100644
index 0000000..57b4d50
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/parser/reading/locator.py
@@ -0,0 +1,119 @@
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from ...utils.console import puts, Colored, indent
+
+
+# We are inheriting the primitive types in order to add the ability to set
+# an attribute (_locator) on them.
+
+class LocatableString(unicode):
+ pass
+
+
+class LocatableInt(int):
+ pass
+
+
+class LocatableFloat(float):
+ pass
+
+
+def wrap(value):
+ if isinstance(value, basestring):
+ return True, LocatableString(value)
+ elif isinstance(value, int) and \
+ not isinstance(value, bool): # Note: bool counts as int in Python!
+ return True, LocatableInt(value)
+ elif isinstance(value, float):
+ return True, LocatableFloat(value)
+ return False, value
+
+
+class Locator(object):
+ """
+ Stores location information (line and column numbers) for agnostic raw data.
+ """
+ def __init__(self, location, line, column, children=None):
+ self.location = location
+ self.line = line
+ self.column = column
+ self.children = children
+
+ def get_child(self, *names):
+ if (not names) or (not isinstance(self.children, dict)):
+ return self
+ name = names[0]
+ if name not in self.children:
+ return self
+ child = self.children[name]
+ return child.get_child(names[1:])
+
+ def link(self, raw, path=None):
+ if hasattr(raw, '_locator'):
+ # This can happen when we use anchors
+ return
+
+ try:
+ setattr(raw, '_locator', self)
+ except AttributeError:
+ return
+
+ if isinstance(raw, list):
+ for i, raw_element in enumerate(raw):
+ wrapped, raw_element = wrap(raw_element)
+ if wrapped:
+ raw[i] = raw_element
+ child_path = '%s.%d' % (path, i) if path else str(i)
+ try:
+ self.children[i].link(raw_element, child_path)
+ except KeyError:
+ raise ValueError('location map does not match agnostic raw data: %s' %
+ child_path)
+ elif isinstance(raw, dict):
+ for k, raw_element in raw.iteritems():
+ wrapped, raw_element = wrap(raw_element)
+ if wrapped:
+ raw[k] = raw_element
+ child_path = '%s.%s' % (path, k) if path else k
+ try:
+ self.children[k].link(raw_element, child_path)
+ except KeyError:
+ raise ValueError('location map does not match agnostic raw data: %s' %
+ child_path)
+
+ def merge(self, locator):
+ if isinstance(self.children, dict) and isinstance(locator.children, dict):
+ for k, loc in locator.children.iteritems():
+ if k in self.children:
+ self.children[k].merge(loc)
+ else:
+ self.children[k] = loc
+
+ def dump(self, key=None):
+ if key:
+ puts('%s "%s":%d:%d' %
+ (Colored.red(key), Colored.blue(self.location), self.line, self.column))
+ else:
+ puts('"%s":%d:%d' % (Colored.blue(self.location), self.line, self.column))
+ if isinstance(self.children, list):
+ with indent(2):
+ for loc in self.children:
+ loc.dump()
+ elif isinstance(self.children, dict):
+ with indent(2):
+ for k, loc in self.children.iteritems():
+ loc.dump(k)
+
+ def __str__(self):
+ # Should be in same format as Issue.locator_as_str
+ return '"%s":%d:%d' % (self.location, self.line, self.column)
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/parser/reading/raw.py b/azure/aria/aria-extension-cloudify/src/aria/aria/parser/reading/raw.py
new file mode 100644
index 0000000..ed980ac
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/parser/reading/raw.py
@@ -0,0 +1,24 @@
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from .reader import Reader
+
+
+class RawReader(Reader):
+ """
+ ARIA raw reader.
+
+ Expects to receive agnostic raw data from the loader, and so does nothing to it.
+ """
+
+ def read(self):
+ return self.load()
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/parser/reading/reader.py b/azure/aria/aria-extension-cloudify/src/aria/aria/parser/reading/reader.py
new file mode 100644
index 0000000..1a29f11
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/parser/reading/reader.py
@@ -0,0 +1,44 @@
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from ...utils.openclose import OpenClose
+from .exceptions import ReaderException, AlreadyReadException
+
+
+class Reader(object):
+ """
+ Base class for ARIA readers.
+
+ Readers provide agnostic raw data by consuming :class:`aria.parser.loading.Loader` instances.
+ """
+
+ def __init__(self, context, location, loader):
+ self.context = context
+ self.location = location
+ self.loader = loader
+
+ def load(self):
+ with OpenClose(self.loader) as loader:
+ if self.context is not None:
+ with self.context._locations:
+ for location in self.context._locations:
+ if location.is_equivalent(loader.location):
+ raise AlreadyReadException('already read: %s' % loader.location)
+ self.context._locations.append(loader.location)
+
+ data = loader.load()
+ if data is None:
+ raise ReaderException('loader did not provide data: %s' % loader)
+ return data
+
+ def read(self):
+ raise NotImplementedError
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/parser/reading/source.py b/azure/aria/aria-extension-cloudify/src/aria/aria/parser/reading/source.py
new file mode 100644
index 0000000..6fff2f6
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/parser/reading/source.py
@@ -0,0 +1,59 @@
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from ..loading import LiteralLocation, UriLocation
+from .yaml import YamlReader
+from .json import JsonReader
+from .jinja import JinjaReader
+from .exceptions import ReaderNotFoundError
+
+
+EXTENSIONS = {
+ '.yaml': YamlReader,
+ '.json': JsonReader,
+ '.jinja': JinjaReader}
+
+
+class ReaderSource(object):
+ """
+ Base class for ARIA reader sources.
+
+ Reader sources provide appropriate :class:`Reader` instances for locations.
+ """
+
+ @staticmethod
+ def get_reader(context, location, loader): # pylint: disable=unused-argument
+ raise ReaderNotFoundError('location: %s' % location)
+
+
+class DefaultReaderSource(ReaderSource):
+ """
+ The default ARIA reader source will generate a :class:`YamlReader` for
+ locations that end in ".yaml", a :class:`JsonReader` for locations that
+ end in ".json", and a :class:`JinjaReader` for locations that end in
+ ".jinja".
+ """
+
+ def __init__(self, literal_reader_class=YamlReader):
+ super(DefaultReaderSource, self).__init__()
+ self.literal_reader_class = literal_reader_class
+
+ def get_reader(self, context, location, loader):
+ if isinstance(location, LiteralLocation):
+ return self.literal_reader_class(context, location, loader)
+
+ elif isinstance(location, UriLocation):
+ for extension, reader_class in EXTENSIONS.iteritems():
+ if location.uri.endswith(extension):
+ return reader_class(context, location, loader)
+
+ return super(DefaultReaderSource, self).get_reader(context, location, loader)
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/parser/reading/yaml.py b/azure/aria/aria-extension-cloudify/src/aria/aria/parser/reading/yaml.py
new file mode 100644
index 0000000..f5eac43
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/parser/reading/yaml.py
@@ -0,0 +1,113 @@
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from ruamel import yaml # @UnresolvedImport
+
+from ...utils.collections import OrderedDict
+from .reader import Reader
+from .locator import Locator
+from .exceptions import ReaderSyntaxError
+from .locator import LocatableString, LocatableInt, LocatableFloat
+
+# Add our types to ruamel.yaml
+yaml.representer.RoundTripRepresenter.add_representer(
+ LocatableString, yaml.representer.RoundTripRepresenter.represent_unicode)
+yaml.representer.RoundTripRepresenter.add_representer(
+ LocatableInt, yaml.representer.RoundTripRepresenter.represent_int)
+yaml.representer.RoundTripRepresenter.add_representer(
+ LocatableFloat, yaml.representer.RoundTripRepresenter.represent_float)
+
+MERGE_TAG = u'tag:yaml.org,2002:merge'
+MAP_TAG = u'tag:yaml.org,2002:map'
+
+
+class YamlLocator(Locator):
+ """
+ Map for agnostic raw data read from YAML.
+ """
+
+ def add_children(self, node):
+ if isinstance(node, yaml.SequenceNode):
+ self.children = []
+ for child_node in node.value:
+ self.add_child(child_node)
+ elif isinstance(node, yaml.MappingNode):
+ self.children = {}
+ for k, child_node in node.value:
+ self.add_child(child_node, k)
+
+ def add_child(self, node, key=None):
+ locator = YamlLocator(self.location, node.start_mark.line + 1, node.start_mark.column + 1)
+ if key is not None:
+ # Dict
+ if key.tag == MERGE_TAG:
+ for merge_key, merge_node in node.value:
+ self.add_child(merge_node, merge_key)
+ else:
+ self.children[key.value] = locator
+ else:
+ # List
+ self.children.append(locator)
+ locator.add_children(node)
+
+
+def construct_yaml_map(self, node):
+ data = OrderedDict()
+ yield data
+ value = self.construct_mapping(node)
+ data.update(value)
+
+
+yaml.constructor.SafeConstructor.add_constructor(MAP_TAG, construct_yaml_map)
+
+
+class YamlReader(Reader):
+ """
+ ARIA YAML reader.
+ """
+
+ def read(self):
+ data = self.load()
+ try:
+ data = unicode(data)
+ # see issue here:
+ # https://bitbucket.org/ruamel/yaml/issues/61/roundtriploader-causes-exceptions-with
+ #yaml_loader = yaml.RoundTripLoader(data)
+ yaml_loader = yaml.SafeLoader(data)
+ try:
+ node = yaml_loader.get_single_node()
+ locator = YamlLocator(self.loader.location, 0, 0)
+ if node is not None:
+ locator.add_children(node)
+ raw = yaml_loader.construct_document(node)
+ else:
+ raw = OrderedDict()
+ #locator.dump()
+ setattr(raw, '_locator', locator)
+ return raw
+ finally:
+ yaml_loader.dispose()
+ except yaml.parser.MarkedYAMLError as e:
+ context = e.context or 'while parsing'
+ problem = e.problem
+ line = e.problem_mark.line
+ column = e.problem_mark.column
+ snippet = e.problem_mark.get_snippet()
+ raise ReaderSyntaxError('YAML %s: %s %s' %
+ (e.__class__.__name__, problem, context),
+ location=self.loader.location,
+ line=line,
+ column=column,
+ snippet=snippet,
+ cause=e)
+ except Exception as e:
+ raise ReaderSyntaxError('YAML: %s' % e, cause=e)
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/parser/specification.py b/azure/aria/aria-extension-cloudify/src/aria/aria/parser/specification.py
new file mode 100644
index 0000000..4f452b8
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/parser/specification.py
@@ -0,0 +1,69 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Utilities for cross-referencing code with specification documents.
+"""
+
+import re
+
+from ..extension import parser
+from ..utils.collections import OrderedDict
+from ..utils.specification import (DSL_SPECIFICATIONS, implements_specification) # pylint: disable=unused-import
+
+
+def iter_specifications():
+ """
+ Iterates all specification assignments in the codebase.
+ """
+ def iter_sections(spec, sections):
+ for k in sorted(sections.keys(), key=_section_key):
+ details = OrderedDict()
+ details['code'] = sections[k]['code']
+ yield k, _fix_details(sections[k], spec)
+
+ for spec, sections in DSL_SPECIFICATIONS.iteritems():
+ yield spec, iter_sections(spec, sections)
+
+
+def _section_key(value):
+ try:
+ parts = value.split('-', 1)
+ first = (int(v) for v in parts[0].split('.'))
+ second = parts[1] if len(parts) > 1 else None
+ return (first, second)
+ except ValueError:
+ return value
+
+
+def _fix_details(details, spec):
+ code = details.get('code')
+ doc = details.get('doc')
+ url = parser.specification_url().get(spec)
+
+ if (url is not None) and (doc is not None):
+ # Look for a URL in ReST docstring that begins with our url
+ pattern = r'<?('
+ for char in url:
+ pattern += r'\s*'
+ pattern += re.escape(char)
+ pattern += r'[^>]+)>'
+ match = re.search(pattern, doc)
+ if match:
+ url = re.sub(r'\s+', '', match.group(1))
+
+ return OrderedDict((
+ ('code', code),
+ ('url', url)))
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/parser/validation/__init__.py b/azure/aria/aria-extension-cloudify/src/aria/aria/parser/validation/__init__.py
new file mode 100644
index 0000000..21632ba
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/parser/validation/__init__.py
@@ -0,0 +1,25 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Validation package.
+"""
+
+from .issue import Issue
+from .context import ValidationContext
+
+__all__ = (
+ 'ValidationContext',
+ 'Issue')
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/parser/validation/context.py b/azure/aria/aria-extension-cloudify/src/aria/aria/parser/validation/context.py
new file mode 100644
index 0000000..da9eef6
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/parser/validation/context.py
@@ -0,0 +1,36 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from . import issue
+
+
+class ValidationContext(issue.ReporterMixin):
+ """
+ Validation context.
+
+ :ivar allow_unknown_fields: when ``False`` (the default) will report an issue if an unknown
+ field is used
+ :vartype allow_unknown_fields: bool
+ :ivar allow_primitive_coersion`: when ``False`` (the default) will not attempt to coerce
+ primitive field types
+ :vartype allow_primitive_coersion: bool
+ :ivar max_level: maximum validation level to report (default is all)
+ :vartype max_level: int
+ """
+
+ def __init__(self, *args, **kwargs):
+ super(ValidationContext, self).__init__(*args, **kwargs)
+ self.allow_unknown_fields = False
+ self.allow_primitive_coersion = False
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/parser/validation/issue.py b/azure/aria/aria-extension-cloudify/src/aria/aria/parser/validation/issue.py
new file mode 100644
index 0000000..42fc580
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/parser/validation/issue.py
@@ -0,0 +1,190 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import # so we can import standard 'collections'
+
+from ...utils import (
+ collections,
+ type,
+ threading,
+ exceptions,
+ console,
+ formatting
+)
+
+
+class Issue(object):
+ PLATFORM = 0
+ """
+ Platform error (e.g. I/O, hardware, a bug in ARIA)
+ """
+
+ SYNTAX = 1
+ """
+ Syntax and format (e.g. YAML, XML, JSON)
+ """
+
+ FIELD = 2
+ """
+ Single field
+ """
+
+ BETWEEN_FIELDS = 3
+ """
+ Relationships between fields within the type (internal grammar)
+ """
+
+ BETWEEN_TYPES = 4
+ """
+ Relationships between types (e.g. inheritance, external grammar)
+ """
+
+ BETWEEN_INSTANCES = 5
+ """
+ Topology (e.g. static requirements and capabilities)
+ """
+
+ EXTERNAL = 6
+ """
+ External (e.g. live requirements and capabilities)
+ """
+
+ ALL = 100
+
+ def __init__(self, message=None, exception=None, location=None, line=None,
+ column=None, locator=None, snippet=None, level=0):
+ if message is not None:
+ self.message = str(message)
+ elif exception is not None:
+ self.message = str(exception)
+ else:
+ self.message = 'unknown issue'
+
+ self.exception = exception
+
+ if locator is not None:
+ self.location = locator.location
+ self.line = locator.line
+ self.column = locator.column
+ else:
+ self.location = location
+ self.line = line
+ self.column = column
+
+ self.snippet = snippet
+ self.level = level
+
+ @property
+ def as_raw(self):
+ return collections.OrderedDict((
+ ('level', self.level),
+ ('message', self.message),
+ ('location', self.location),
+ ('line', self.line),
+ ('column', self.column),
+ ('snippet', self.snippet),
+ ('exception', type.full_type_name(self.exception) if self.exception else None)))
+
+ @property
+ def locator_as_str(self):
+ if self.location is not None:
+ if self.line is not None:
+ if self.column is not None:
+ return '"%s":%d:%d' % (self.location, self.line, self.column)
+ else:
+ return '"%s":%d' % (self.location, self.line)
+ else:
+ return '"%s"' % self.location
+ else:
+ return None
+
+ @property
+ def heading_as_str(self):
+ return '%d: %s' % (self.level, self.message)
+
+ @property
+ def details_as_str(self):
+ details_str = ''
+ locator = self.locator_as_str
+ if locator is not None:
+ details_str += '@%s' % locator
+ if self.snippet is not None:
+ details_str += '\n%s' % self.snippet
+ return details_str
+
+ def __str__(self):
+ heading_str = self.heading_as_str
+ details = self.details_as_str
+ if details:
+ heading_str += ', ' + details
+ return heading_str
+
+
+class ReporterMixin(object):
+
+ Issue = Issue
+
+ def __init__(self, *args, **kwargs):
+ super(ReporterMixin, self).__init__(*args, **kwargs)
+ self._issues = threading.LockedList()
+ self.max_level = self.Issue.ALL
+
+ def report(self, message=None, exception=None, location=None, line=None,
+ column=None, locator=None, snippet=None, level=Issue.PLATFORM, issue=None):
+ if issue is None:
+ issue = self.Issue(message, exception, location, line, column, locator, snippet, level)
+
+ # Avoid duplicate issues
+ with self._issues:
+ for i in self._issues:
+ if str(i) == str(issue):
+ return
+
+ self._issues.append(issue)
+
+ @property
+ def has_issues(self):
+ return len(self._issues) > 0
+
+ @property
+ def issues(self):
+ issues = [i for i in self._issues if i.level <= self.max_level]
+ issues.sort(key=lambda i: (i.level, i.location, i.line, i.column, i.message))
+ return collections.FrozenList(issues)
+
+ @property
+ def issues_as_raw(self):
+ return [formatting.as_raw(i) for i in self.issues]
+
+ def extend_issues(self, *issues):
+ with self._issues:
+ self._issues.extend(*issues)
+
+ def dump_issues(self):
+ issues = self.issues
+ if issues:
+ console.puts(console.Colored.blue('Validation issues:', bold=True))
+ with console.indent(2):
+ for issue in issues:
+ console.puts(console.Colored.blue(issue.heading_as_str))
+ details = issue.details_as_str
+ if details:
+ with console.indent(3):
+ console.puts(details)
+ if issue.exception is not None:
+ with console.indent(3):
+ exceptions.print_exception(issue.exception)
+ return True
+ return False
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/storage/__init__.py b/azure/aria/aria-extension-cloudify/src/aria/aria/storage/__init__.py
new file mode 100644
index 0000000..a553ca7
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/storage/__init__.py
@@ -0,0 +1,41 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Storage package.
+"""
+
+from .core import (
+ Storage,
+ ModelStorage,
+ ResourceStorage,
+)
+from . import (
+ exceptions,
+ api,
+ core,
+ filesystem_rapi,
+ sql_mapi,
+)
+
+__all__ = (
+ 'exceptions',
+ 'Storage',
+ 'ModelStorage',
+ 'ResourceStorage',
+ 'filesystem_rapi',
+ 'sql_mapi',
+ 'api',
+)
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/storage/api.py b/azure/aria/aria-extension-cloudify/src/aria/aria/storage/api.py
new file mode 100644
index 0000000..a337743
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/storage/api.py
@@ -0,0 +1,186 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Storage APIs.
+"""
+
+import threading
+
+
+class StorageAPI(object):
+ """
+ Base class for storage APIs.
+ """
+ def create(self, **kwargs):
+ """
+ Create a storage API.
+ :param kwargs:
+ :return:
+ """
+ raise NotImplementedError('Subclass must implement abstract create method')
+
+
+class ModelAPI(StorageAPI):
+ """
+ Base class for model APIs ("MAPI").
+ """
+ def __init__(self, model_cls, name=None, **kwargs):
+ """
+ :param model_cls: representing class of the model
+ :param name: name of the model
+ """
+ super(ModelAPI, self).__init__(**kwargs)
+ self._model_cls = model_cls
+ self._name = name or model_cls.__modelname__
+ self._thread_local = threading.local()
+ self._thread_local._instrumentation = []
+
+ @property
+ def _instrumentation(self):
+ if not hasattr(self._thread_local, '_instrumentation'):
+ self._thread_local._instrumentation = []
+ return self._thread_local._instrumentation
+
+
+ @property
+ def name(self):
+ """
+ Name of the class.
+
+ :type: :obj:`basestring`
+ """
+ return self._name
+
+ @property
+ def model_cls(self):
+ """
+ Class representing the model
+
+ :type: :obj:`Type`
+ """
+ return self._model_cls
+
+ def get(self, entry_id, filters=None, **kwargs):
+ """
+ Gets a model from storage.
+
+ :param entry_id:
+ """
+ raise NotImplementedError('Subclass must implement abstract get method')
+
+ def put(self, entry, **kwargs):
+ """
+ Puts a model in storage.
+
+ :param entry:
+ """
+ raise NotImplementedError('Subclass must implement abstract store method')
+
+ def delete(self, entry_id, **kwargs):
+ """
+ Deletes a model from storage.
+
+ :param entry_id:
+ """
+ raise NotImplementedError('Subclass must implement abstract delete method')
+
+ def __iter__(self):
+ return self.iter()
+
+ def iter(self, **kwargs):
+ """
+ Iterate over all models in storage.
+ """
+ raise NotImplementedError('Subclass must implement abstract iter method')
+
+ def update(self, entry, **kwargs):
+ """
+ Update a model in storage.
+
+ :param entry:
+ :param kwargs:
+ """
+ raise NotImplementedError('Subclass must implement abstract update method')
+
+
+class ResourceAPI(StorageAPI):
+ """
+ Base class for resource APIs ("RAPI").
+ """
+ def __init__(self, name, **kwargs):
+ """
+ :param name: resource type
+ """
+ super(ResourceAPI, self).__init__(**kwargs)
+ self._name = name
+
+ @property
+ def name(self):
+ """
+ Name of resource.
+
+ :type: :obj:`basestring`
+ """
+ return self._name
+
+ def read(self, entry_id, path, **kwargs):
+ """
+ Get a bytesteam for a resource from storage.
+
+ :param entry_id:
+ :param path:
+ """
+ raise NotImplementedError('Subclass must implement abstract read method')
+
+ def delete(self, entry_id, path, **kwargs):
+ """
+ Delete a resource from storage.
+
+ :param entry_id:
+ :param path:
+ """
+ raise NotImplementedError('Subclass must implement abstract delete method')
+
+ def download(self, entry_id, destination, path=None, **kwargs):
+ """
+ Download a resource from storage.
+
+ :param entry_id:
+ :param destination:
+ :param path:
+ """
+ raise NotImplementedError('Subclass must implement abstract download method')
+
+ def upload(self, entry_id, source, path=None, **kwargs):
+ """
+ Upload a resource to storage.
+
+ :param entry_id:
+ :param source:
+ :param path:
+ """
+ raise NotImplementedError('Subclass must implement abstract upload method')
+
+
+def generate_lower_name(model_cls):
+ """
+ Generates the name of the class from the class object, e.g. ``SomeClass`` -> ``some_class``
+
+ :param model_cls: class to evaluate
+ :return: lowercase name
+ :rtype: basestring
+ """
+ return getattr(model_cls, '__mapiname__', model_cls.__tablename__)
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/storage/collection_instrumentation.py b/azure/aria/aria-extension-cloudify/src/aria/aria/storage/collection_instrumentation.py
new file mode 100644
index 0000000..6154e5d
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/storage/collection_instrumentation.py
@@ -0,0 +1,314 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Utilities for instrumenting collections of models in storage.
+"""
+
+from . import exceptions
+
+
+class _InstrumentedCollection(object):
+
+ def __init__(self,
+ mapi,
+ parent,
+ field_name,
+ field_cls,
+ seq=None,
+ is_top_level=True,
+ **kwargs):
+ self._mapi = mapi
+ self._parent = parent
+ self._field_name = field_name
+ self._is_top_level = is_top_level
+ self._field_cls = field_cls
+ self._load(seq, **kwargs)
+
+ @property
+ def _raw(self):
+ raise NotImplementedError
+
+ def _load(self, seq, **kwargs):
+ """
+ Instantiates the object from existing seq.
+
+ :param seq: the original sequence to load from
+ """
+ raise NotImplementedError
+
+ def _set(self, key, value):
+ """
+ Sets the changes for the current object (not in the database).
+
+ :param key:
+ :param value:
+ """
+ raise NotImplementedError
+
+ def _del(self, collection, key):
+ raise NotImplementedError
+
+ def _instrument(self, key, value):
+ """
+ Instruments any collection to track changes (and ease of access).
+
+ :param key:
+ :param value:
+ """
+ if isinstance(value, _InstrumentedCollection):
+ return value
+ elif isinstance(value, dict):
+ instrumentation_cls = _InstrumentedDict
+ elif isinstance(value, list):
+ instrumentation_cls = _InstrumentedList
+ else:
+ return value
+
+ return instrumentation_cls(self._mapi, self, key, self._field_cls, value, False)
+
+ def _raw_value(self, value):
+ """
+ Gets the raw value.
+
+ :param value:
+ """
+ if isinstance(value, self._field_cls):
+ return value.value
+ return value
+
+ def _encapsulate_value(self, key, value):
+ """
+ Creates a new item class if needed.
+
+ :param key:
+ :param value:
+ """
+ if isinstance(value, self._field_cls):
+ return value
+ # If it is not wrapped
+ return self._field_cls.wrap(key, value)
+
+ def __setitem__(self, key, value):
+ """
+ Updates the values in both the local and the database locations.
+
+ :param key:
+ :param value:
+ """
+ self._set(key, value)
+ if self._is_top_level:
+ # We are at the top level
+ field = getattr(self._parent, self._field_name)
+ self._set_field(
+ field, key, value if key in field else self._encapsulate_value(key, value))
+ self._mapi.update(self._parent)
+ else:
+ # We are not at the top level
+ self._set_field(self._parent, self._field_name, self)
+
+ def _set_field(self, collection, key, value):
+ """
+ Enables updating the current change in the ancestors.
+
+ :param collection: collection to change
+ :param key: key for the specific field
+ :param value: new value
+ """
+ if isinstance(value, _InstrumentedCollection):
+ value = value._raw
+ if key in collection and isinstance(collection[key], self._field_cls):
+ if isinstance(collection[key], _InstrumentedCollection):
+ self._del(collection, key)
+ collection[key].value = value
+ else:
+ collection[key] = value
+ return collection[key]
+
+ def __deepcopy__(self, *args, **kwargs):
+ return self._raw
+
+
+class _InstrumentedDict(_InstrumentedCollection, dict):
+
+ def _load(self, dict_=None, **kwargs):
+ dict.__init__(
+ self,
+ tuple((key, self._raw_value(value)) for key, value in (dict_ or {}).iteritems()),
+ **kwargs)
+
+ def update(self, dict_=None, **kwargs):
+ dict_ = dict_ or {}
+ for key, value in dict_.iteritems():
+ self[key] = value
+ for key, value in kwargs.iteritems():
+ self[key] = value
+
+ def __getitem__(self, key):
+ return self._instrument(key, dict.__getitem__(self, key))
+
+ def _set(self, key, value):
+ dict.__setitem__(self, key, self._raw_value(value))
+
+ @property
+ def _raw(self):
+ return dict(self)
+
+ def _del(self, collection, key):
+ del collection[key]
+
+
+class _InstrumentedList(_InstrumentedCollection, list):
+
+ def _load(self, list_=None, **kwargs):
+ list.__init__(self, list(item for item in list_ or []))
+
+ def append(self, value):
+ self.insert(len(self), value)
+
+ def insert(self, index, value):
+ list.insert(self, index, self._raw_value(value))
+ if self._is_top_level:
+ field = getattr(self._parent, self._field_name)
+ field.insert(index, self._encapsulate_value(index, value))
+ else:
+ self._parent[self._field_name] = self
+
+ def __getitem__(self, key):
+ return self._instrument(key, list.__getitem__(self, key))
+
+ def _set(self, key, value):
+ list.__setitem__(self, key, value)
+
+ def _del(self, collection, key):
+ del collection[key]
+
+ @property
+ def _raw(self):
+ return list(self)
+
+
+class _WrappedBase(object):
+
+ def __init__(self, wrapped, instrumentation, instrumentation_kwargs=None):
+ """
+ :param wrapped: model to be instrumented
+ :param instrumentation: instrumentation dict
+ :param instrumentation_kwargs: arguments for instrumentation class
+ """
+ self._wrapped = wrapped
+ self._instrumentation = instrumentation
+ self._instrumentation_kwargs = instrumentation_kwargs or {}
+
+ def _wrap(self, value):
+ if value.__class__ in set(class_.class_ for class_ in self._instrumentation):
+ return _create_instrumented_model(
+ value, instrumentation=self._instrumentation, **self._instrumentation_kwargs)
+ # Check that the value is a SQLAlchemy model (it should have metadata) or a collection
+ elif hasattr(value, 'metadata') or isinstance(value, (dict, list)):
+ return _create_wrapped_model(
+ value, instrumentation=self._instrumentation, **self._instrumentation_kwargs)
+ return value
+
+ def __getattr__(self, item):
+ if hasattr(self, '_wrapped'):
+ return self._wrap(getattr(self._wrapped, item))
+ else:
+ super(_WrappedBase, self).__getattribute__(item)
+
+
+class _InstrumentedModel(_WrappedBase):
+
+ def __init__(self, mapi, *args, **kwargs):
+ """
+ The original model.
+
+ :param mapi: MAPI for the wrapped model
+ :param wrapped: model to be instrumented
+ :param instrumentation: instrumentation dict
+ :param instrumentation_kwargs: arguments for instrumentation class
+ """
+ super(_InstrumentedModel, self).__init__(instrumentation_kwargs=dict(mapi=mapi),
+ *args, **kwargs)
+ self._mapi = mapi
+ self._apply_instrumentation()
+
+ def _apply_instrumentation(self):
+ for field in self._instrumentation:
+ if not issubclass(type(self._wrapped), field.parent.class_):
+ # Do not apply if this field is not for our class
+ continue
+
+ field_name = field.key
+ field_cls = field.mapper.class_
+
+ field = getattr(self._wrapped, field_name)
+
+ # Preserve the original field, e.g. original "attributes" would be located under
+ # "_attributes"
+ setattr(self, '_{0}'.format(field_name), field)
+
+ # Set instrumented value
+ if isinstance(field, dict):
+ instrumentation_cls = _InstrumentedDict
+ elif isinstance(field, list):
+ instrumentation_cls = _InstrumentedList
+ else:
+ # TODO: raise proper error
+ raise exceptions.StorageError(
+ "ARIA supports instrumentation for dict and list. Field {field} of the "
+ "class `{model}` is of type `{type}`.".format(
+ field=field,
+ model=self._wrapped,
+ type=type(field)))
+
+ instrumented_class = instrumentation_cls(seq=field,
+ parent=self._wrapped,
+ mapi=self._mapi,
+ field_name=field_name,
+ field_cls=field_cls)
+ setattr(self, field_name, instrumented_class)
+
+
+class _WrappedModel(_WrappedBase):
+
+ def __getitem__(self, item):
+ return self._wrap(self._wrapped[item])
+
+ def __iter__(self):
+ for item in self._wrapped.__iter__():
+ yield self._wrap(item)
+
+
+def _create_instrumented_model(original_model, mapi, instrumentation):
+ return type('Instrumented{0}'.format(original_model.__class__.__name__),
+ (_InstrumentedModel,),
+ {})(wrapped=original_model, instrumentation=instrumentation, mapi=mapi)
+
+
+def _create_wrapped_model(original_model, mapi, instrumentation):
+ return type('Wrapped{0}'.format(original_model.__class__.__name__),
+ (_WrappedModel, ),
+ {})(wrapped=original_model,
+ instrumentation=instrumentation,
+ instrumentation_kwargs=dict(mapi=mapi))
+
+
+def instrument(instrumentation, original_model, mapi):
+ for instrumented_field in instrumentation:
+ if isinstance(original_model, instrumented_field.class_):
+ return _create_instrumented_model(original_model, mapi, instrumentation)
+
+ return _create_wrapped_model(original_model, mapi, instrumentation)
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/storage/core.py b/azure/aria/aria-extension-cloudify/src/aria/aria/storage/core.py
new file mode 100644
index 0000000..7e9b201
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/storage/core.py
@@ -0,0 +1,160 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Storage API management.
+"""
+
+import copy
+from contextlib import contextmanager
+
+from aria.logger import LoggerMixin
+from . import sql_mapi
+
+__all__ = (
+ 'Storage',
+ 'ModelStorage',
+ 'ResourceStorage'
+)
+
+
+class Storage(LoggerMixin):
+ """
+ Base class for storage managers.
+ """
+ def __init__(self,
+ api_cls,
+ api_kwargs=None,
+ items=(),
+ initiator=None,
+ initiator_kwargs=None,
+ **kwargs):
+ """
+ :param api_cls: API class for each entry
+ :param api_kwargs:
+ :param items: items to register
+ :param initiator: function which initializes the storage before the first use; this function
+ should return a dict, this dict would be passed in addition to the API kwargs; this enables
+ the creation of non-serializable objects
+ :param initiator_kwargs:
+ :param kwargs:
+ """
+ super(Storage, self).__init__(**kwargs)
+ self.api = api_cls
+ self.registered = {}
+ self._initiator = initiator
+ self._initiator_kwargs = initiator_kwargs or {}
+ self._api_kwargs = api_kwargs or {}
+ self._additional_api_kwargs = {}
+ if self._initiator:
+ self._additional_api_kwargs = self._initiator(**self._initiator_kwargs)
+ for item in items:
+ self.register(item)
+ self.logger.debug('{name} object is ready: {0!r}'.format(
+ self, name=self.__class__.__name__))
+
+ @property
+ def _all_api_kwargs(self):
+ kwargs = self._api_kwargs.copy()
+ kwargs.update(self._additional_api_kwargs)
+ return kwargs
+
+ def __repr__(self):
+ return '{name}(api={self.api})'.format(name=self.__class__.__name__, self=self)
+
+ def __getattr__(self, item):
+ try:
+ return self.registered[item]
+ except KeyError:
+ return super(Storage, self).__getattribute__(item)
+
+ @property
+ def serialization_dict(self):
+ return {
+ 'api': self.api,
+ 'api_kwargs': self._api_kwargs,
+ 'initiator': self._initiator,
+ 'initiator_kwargs': self._initiator_kwargs
+ }
+
+ def register(self, entry):
+ """
+ Register an API.
+
+ :param entry:
+ """
+ raise NotImplementedError('Subclass must implement abstract register method')
+
+
+class ResourceStorage(Storage):
+ """
+ Manages storage resource APIs ("RAPIs").
+ """
+ def register(self, name):
+ """
+ Register a storage resource API ("RAPI").
+
+ :param name: name
+ """
+ self.registered[name] = self.api(name=name, **self._all_api_kwargs)
+ self.registered[name].create()
+ self.logger.debug('setup {name} in storage {self!r}'.format(name=name, self=self))
+
+
+class ModelStorage(Storage):
+ """
+ Manages storage model APIs ("MAPIs").
+ """
+ def __init__(self, *args, **kwargs):
+ if kwargs.get('initiator', None) is None:
+ kwargs['initiator'] = sql_mapi.init_storage
+ super(ModelStorage, self).__init__(*args, **kwargs)
+
+ def register(self, model_cls):
+ """
+ Register a storage model API ("MAPI").
+
+ :param model_cls: model API to register
+ """
+ model_name = model_cls.__modelname__
+ if model_name in self.registered:
+ self.logger.debug('{name} already in storage {self!r}'.format(name=model_name,
+ self=self))
+ return
+ self.registered[model_name] = self.api(name=model_name,
+ model_cls=model_cls,
+ **self._all_api_kwargs)
+ self.registered[model_name].create()
+ self.logger.debug('setup {name} in storage {self!r}'.format(name=model_name, self=self))
+
+ def drop(self):
+ """
+ Drop all the tables.
+ """
+ for mapi in self.registered.itervalues():
+ mapi.drop()
+
+ @contextmanager
+ def instrument(self, *instrumentation):
+ original_instrumentation = {}
+
+ try:
+ for mapi in self.registered.itervalues():
+ original_instrumentation[mapi] = copy.copy(mapi._instrumentation)
+ mapi._instrumentation.extend(instrumentation)
+ yield self
+ finally:
+ for mapi in self.registered.itervalues():
+ mapi._instrumentation[:] = original_instrumentation[mapi]
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/storage/exceptions.py b/azure/aria/aria-extension-cloudify/src/aria/aria/storage/exceptions.py
new file mode 100644
index 0000000..c538876
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/storage/exceptions.py
@@ -0,0 +1,31 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Storage exceptions.
+"""
+
+from .. import exceptions
+
+
+class StorageError(exceptions.AriaError):
+ """
+ General storage exception
+ """
+ pass
+
+
+class NotFoundError(StorageError):
+ pass
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/storage/filesystem_rapi.py b/azure/aria/aria-extension-cloudify/src/aria/aria/storage/filesystem_rapi.py
new file mode 100644
index 0000000..b425fa2
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/storage/filesystem_rapi.py
@@ -0,0 +1,165 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+File system implementation of the storage resource API ("RAPI").
+"""
+
+import os
+import shutil
+from multiprocessing import RLock
+from contextlib import contextmanager
+from functools import partial
+from distutils import dir_util # https://github.com/PyCQA/pylint/issues/73; pylint: disable=no-name-in-module
+
+from aria.storage import (
+ api,
+ exceptions
+)
+
+
+class FileSystemResourceAPI(api.ResourceAPI):
+ """
+ File system implementation of the storage resource API ("RAPI").
+ """
+
+ def __init__(self, directory, **kwargs):
+ """
+ :param directory: root dir for storage
+ """
+ super(FileSystemResourceAPI, self).__init__(**kwargs)
+ self.directory = directory
+ self.base_path = os.path.join(self.directory, self.name)
+ self._join_path = partial(os.path.join, self.base_path)
+ self._lock = RLock()
+
+ @contextmanager
+ def connect(self):
+ """
+ Establishes a connection and destroys it after use.
+ """
+ try:
+ self._establish_connection()
+ yield self
+ except BaseException as e:
+ raise exceptions.StorageError(str(e))
+ finally:
+ self._destroy_connection()
+
+ def _establish_connection(self):
+ """
+ Establishes a connection. Used in the ``connect`` context manager.
+ """
+ self._lock.acquire()
+
+ def _destroy_connection(self):
+ """
+ Destroys a connection. Used in the ``connect`` context manager.
+ """
+ self._lock.release()
+
+ def __repr__(self):
+ return '{cls.__name__}(directory={self.directory})'.format(
+ cls=self.__class__, self=self)
+
+ def create(self, **kwargs):
+ """
+ Creates a directory in by path. Tries to create the root directory as well.
+
+ :param name: path of directory
+ """
+ try:
+ os.makedirs(self.directory)
+ except (OSError, IOError):
+ pass
+ try:
+ os.makedirs(self.base_path)
+ except (OSError, IOError):
+ pass
+
+ def read(self, entry_id, path, **_):
+ """
+ Retrieves the contents of a file.
+
+ :param entry_id: entry ID
+ :param path: path to resource
+ :return: contents of the file
+ :rtype: bytes
+ """
+ resource_relative_path = os.path.join(self.name, entry_id, path or '')
+ resource = os.path.join(self.directory, resource_relative_path)
+ if not os.path.exists(resource):
+ raise exceptions.StorageError("Resource {0} does not exist".
+ format(resource_relative_path))
+ if not os.path.isfile(resource):
+ resources = os.listdir(resource)
+ if len(resources) != 1:
+ raise exceptions.StorageError(
+ 'Failed to read {0}; Reading a directory is '
+ 'only allowed when it contains a single resource'.format(resource))
+ resource = os.path.join(resource, resources[0])
+ with open(resource, 'rb') as resource_file:
+ return resource_file.read()
+
+ def download(self, entry_id, destination, path=None, **_):
+ """
+ Downloads a file or directory.
+
+ :param entry_id: entry ID
+ :param destination: download destination
+ :param path: path to download relative to the root of the entry (otherwise all)
+ """
+ resource_relative_path = os.path.join(self.name, entry_id, path or '')
+ resource = os.path.join(self.directory, resource_relative_path)
+ if not os.path.exists(resource):
+ raise exceptions.StorageError("Resource {0} does not exist".
+ format(resource_relative_path))
+ if os.path.isfile(resource):
+ shutil.copy2(resource, destination)
+ else:
+ dir_util.copy_tree(resource, destination) # pylint: disable=no-member
+
+ def upload(self, entry_id, source, path=None, **_):
+ """
+ Uploads a file or directory.
+
+ :param entry_id: entry ID
+ :param source: source of the files to upload
+ :param path: the destination of the file/s relative to the entry root dir.
+ """
+ resource_directory = os.path.join(self.directory, self.name, entry_id)
+ if not os.path.exists(resource_directory):
+ os.makedirs(resource_directory)
+ destination = os.path.join(resource_directory, path or '')
+ if os.path.isfile(source):
+ shutil.copy2(source, destination)
+ else:
+ dir_util.copy_tree(source, destination) # pylint: disable=no-member
+
+ def delete(self, entry_id, path=None, **_):
+ """
+ Deletes a file or directory.
+
+ :param entry_id: entry ID
+ :param path: path to delete relative to the root of the entry (otherwise all)
+ """
+ destination = os.path.join(self.directory, self.name, entry_id, path or '')
+ if os.path.exists(destination):
+ if os.path.isfile(destination):
+ os.remove(destination)
+ else:
+ shutil.rmtree(destination)
+ return True
+ return False
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/storage/sql_mapi.py b/azure/aria/aria-extension-cloudify/src/aria/aria/storage/sql_mapi.py
new file mode 100644
index 0000000..975ada7
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/storage/sql_mapi.py
@@ -0,0 +1,439 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+SQLAlchemy implementation of the storage model API ("MAPI").
+"""
+
+import os
+import platform
+
+from sqlalchemy import (
+ create_engine,
+ orm,
+)
+from sqlalchemy.exc import SQLAlchemyError
+from sqlalchemy.orm.exc import StaleDataError
+
+from aria.utils.collections import OrderedDict
+from . import (
+ api,
+ exceptions,
+ collection_instrumentation
+)
+
+_predicates = {'ge': '__ge__',
+ 'gt': '__gt__',
+ 'lt': '__lt__',
+ 'le': '__le__',
+ 'eq': '__eq__',
+ 'ne': '__ne__'}
+
+
+class SQLAlchemyModelAPI(api.ModelAPI):
+ """
+ SQLAlchemy implementation of the storage model API ("MAPI").
+ """
+
+ def __init__(self,
+ engine,
+ session,
+ **kwargs):
+ super(SQLAlchemyModelAPI, self).__init__(**kwargs)
+ self._engine = engine
+ self._session = session
+
+ def get(self, entry_id, include=None, **kwargs):
+ """
+ Returns a single result based on the model class and element ID
+ """
+ query = self._get_query(include, {'id': entry_id})
+ result = query.first()
+
+ if not result:
+ raise exceptions.NotFoundError(
+ 'Requested `{0}` with ID `{1}` was not found'
+ .format(self.model_cls.__name__, entry_id)
+ )
+ return self._instrument(result)
+
+ def get_by_name(self, entry_name, include=None, **kwargs):
+ assert hasattr(self.model_cls, 'name')
+ result = self.list(include=include, filters={'name': entry_name})
+ if not result:
+ raise exceptions.NotFoundError(
+ 'Requested {0} with name `{1}` was not found'
+ .format(self.model_cls.__name__, entry_name)
+ )
+ elif len(result) > 1:
+ raise exceptions.StorageError(
+ 'Requested {0} with name `{1}` returned more than 1 value'
+ .format(self.model_cls.__name__, entry_name)
+ )
+ else:
+ return result[0]
+
+ def list(self,
+ include=None,
+ filters=None,
+ pagination=None,
+ sort=None,
+ **kwargs):
+ query = self._get_query(include, filters, sort)
+
+ results, total, size, offset = self._paginate(query, pagination)
+
+ return ListResult(
+ dict(total=total, size=size, offset=offset),
+ [self._instrument(result) for result in results]
+ )
+
+ def iter(self,
+ include=None,
+ filters=None,
+ sort=None,
+ **kwargs):
+ """
+ Returns a (possibly empty) list of ``model_class`` results.
+ """
+ for result in self._get_query(include, filters, sort):
+ yield self._instrument(result)
+
+ def put(self, entry, **kwargs):
+ """
+ Creatse a ``model_class`` instance from a serializable ``model`` object.
+
+ :param entry: dict with relevant kwargs, or an instance of a class that has a ``to_dict``
+ method, and whose attributes match the columns of ``model_class`` (might also be just an
+ instance of ``model_class``)
+ :return: an instance of ``model_class``
+ """
+ self._session.add(entry)
+ self._safe_commit()
+ return entry
+
+ def delete(self, entry, **kwargs):
+ """
+ Deletes a single result based on the model class and element ID.
+ """
+ self._load_relationships(entry)
+ self._session.delete(entry)
+ self._safe_commit()
+ return entry
+
+ def update(self, entry, **kwargs):
+ """
+ Adds ``instance`` to the database session, and attempts to commit.
+
+ :return: updated instance
+ """
+ return self.put(entry)
+
+ def refresh(self, entry):
+ """
+ Reloads the instance with fresh information from the database.
+
+ :param entry: instance to be re-loaded from the database
+ :return: refreshed instance
+ """
+ self._session.refresh(entry)
+ self._load_relationships(entry)
+ return entry
+
+ def _destroy_connection(self):
+ pass
+
+ def _establish_connection(self):
+ pass
+
+ def create(self, checkfirst=True, create_all=True, **kwargs):
+ self.model_cls.__table__.create(self._engine, checkfirst=checkfirst)
+
+ if create_all:
+ # In order to create any models created dynamically (e.g. many-to-many helper tables are
+ # created at runtime).
+ self.model_cls.metadata.create_all(bind=self._engine, checkfirst=checkfirst)
+
+ def drop(self):
+ """
+ Drops the table.
+ """
+ self.model_cls.__table__.drop(self._engine)
+
+ def _safe_commit(self):
+ """
+ Try to commit changes in the session. Roll back if exception raised SQLAlchemy errors and
+ rolls back if they're caught.
+ """
+ try:
+ self._session.commit()
+ except StaleDataError as e:
+ self._session.rollback()
+ raise exceptions.StorageError('Version conflict: {0}'.format(str(e)))
+ except (SQLAlchemyError, ValueError) as e:
+ self._session.rollback()
+ raise exceptions.StorageError('SQL Storage error: {0}'.format(str(e)))
+
+ def _get_base_query(self, include, joins):
+ """
+ Create the initial query from the model class and included columns.
+
+ :param include: (possibly empty) list of columns to include in the query
+ :return: SQLAlchemy AppenderQuery object
+ """
+ # If only some columns are included, query through the session object
+ if include:
+ # Make sure that attributes come before association proxies
+ include.sort(key=lambda x: x.is_clause_element)
+ query = self._session.query(*include)
+ else:
+ # If all columns should be returned, query directly from the model
+ query = self._session.query(self.model_cls)
+
+ query = query.join(*joins)
+ return query
+
+ @staticmethod
+ def _get_joins(model_class, columns):
+ """
+ Gets a list of all the tables on which we need to join.
+
+ :param columns: set of all attributes involved in the query
+ """
+
+ # Using a list instead of a set because order is important
+ joins = OrderedDict()
+ for column_name in columns:
+ column = getattr(model_class, column_name)
+ while not column.is_attribute:
+ join_attr = column.local_attr
+ # This is a hack, to deal with the fact that SQLA doesn't
+ # fully support doing something like: `if join_attr in joins`,
+ # because some SQLA elements have their own comparators
+ join_attr_name = str(join_attr)
+ if join_attr_name not in joins:
+ joins[join_attr_name] = join_attr
+ column = column.remote_attr
+
+ return joins.values()
+
+ @staticmethod
+ def _sort_query(query, sort=None):
+ """
+ Adds sorting clauses to the query.
+
+ :param query: base SQL query
+ :param sort: optional dictionary where keys are column names to sort by, and values are
+ the order (asc/desc)
+ :return: SQLAlchemy AppenderQuery object
+ """
+ if sort:
+ for column, order in sort.items():
+ if order == 'desc':
+ column = column.desc()
+ query = query.order_by(column)
+ return query
+
+ def _filter_query(self, query, filters):
+ """
+ Adds filter clauses to the query.
+
+ :param query: base SQL query
+ :param filters: optional dictionary where keys are column names to filter by, and values
+ are values applicable for those columns (or lists of such values)
+ :return: SQLAlchemy AppenderQuery object
+ """
+ return self._add_value_filter(query, filters)
+
+ @staticmethod
+ def _add_value_filter(query, filters):
+ for column, value in filters.items():
+ if isinstance(value, dict):
+ for predicate, operand in value.items():
+ query = query.filter(getattr(column, predicate)(operand))
+ elif isinstance(value, (list, tuple)):
+ query = query.filter(column.in_(value))
+ else:
+ query = query.filter(column == value)
+
+ return query
+
+ def _get_query(self,
+ include=None,
+ filters=None,
+ sort=None):
+ """
+ Gets a SQL query object based on the params passed.
+
+ :param model_class: SQL database table class
+ :param include: optional list of columns to include in the query
+ :param filters: optional dictionary where keys are column names to filter by, and values
+ are values applicable for those columns (or lists of such values)
+ :param sort: optional dictionary where keys are column names to sort by, and values are the
+ order (asc/desc)
+ :return: sorted and filtered query with only the relevant columns
+ """
+ include, filters, sort, joins = self._get_joins_and_converted_columns(
+ include, filters, sort
+ )
+ filters = self._convert_operands(filters)
+
+ query = self._get_base_query(include, joins)
+ query = self._filter_query(query, filters)
+ query = self._sort_query(query, sort)
+ return query
+
+ @staticmethod
+ def _convert_operands(filters):
+ for column, conditions in filters.items():
+ if isinstance(conditions, dict):
+ for predicate, operand in conditions.items():
+ if predicate not in _predicates:
+ raise exceptions.StorageError(
+ "{0} is not a valid predicate for filtering. Valid predicates are {1}"
+ .format(predicate, ', '.join(_predicates.keys())))
+ del filters[column][predicate]
+ filters[column][_predicates[predicate]] = operand
+
+
+ return filters
+
+ def _get_joins_and_converted_columns(self,
+ include,
+ filters,
+ sort):
+ """
+ Gets a list of tables on which we need to join and the converted ``include``, ``filters``
+ and ```sort`` arguments (converted to actual SQLAlchemy column/label objects instead of
+ column names).
+ """
+ include = include or []
+ filters = filters or dict()
+ sort = sort or OrderedDict()
+
+ all_columns = set(include) | set(filters.keys()) | set(sort.keys())
+ joins = self._get_joins(self.model_cls, all_columns)
+
+ include, filters, sort = self._get_columns_from_field_names(
+ include, filters, sort
+ )
+ return include, filters, sort, joins
+
+ def _get_columns_from_field_names(self,
+ include,
+ filters,
+ sort):
+ """
+ Gooes over the optional parameters (include, filters, sort), and replace column names with
+ actual SQLAlechmy column objects.
+ """
+ include = [self._get_column(c) for c in include]
+ filters = dict((self._get_column(c), filters[c]) for c in filters)
+ sort = OrderedDict((self._get_column(c), sort[c]) for c in sort)
+
+ return include, filters, sort
+
+ def _get_column(self, column_name):
+ """
+ Returns the column on which an action (filtering, sorting, etc.) would need to be performed.
+ Can be either an attribute of the class, or an association proxy linked to a relationship
+ in the class.
+ """
+ column = getattr(self.model_cls, column_name)
+ if column.is_attribute:
+ return column
+ else:
+ # We need to get to the underlying attribute, so we move on to the
+ # next remote_attr until we reach one
+ while not column.remote_attr.is_attribute:
+ column = column.remote_attr
+ # Put a label on the remote attribute with the name of the column
+ return column.remote_attr.label(column_name)
+
+ @staticmethod
+ def _paginate(query, pagination):
+ """
+ Paginates the query by size and offset.
+
+ :param query: current SQLAlchemy query object
+ :param pagination: optional dict with size and offset keys
+ :return: tuple with four elements:
+ * results: ``size`` items starting from ``offset``
+ * the total count of items
+ * ``size`` [default: 0]
+ * ``offset`` [default: 0]
+ """
+ if pagination:
+ size = pagination.get('size', 0)
+ offset = pagination.get('offset', 0)
+ total = query.order_by(None).count() # Fastest way to count
+ results = query.limit(size).offset(offset).all()
+ return results, total, size, offset
+ else:
+ results = query.all()
+ return results, len(results), 0, 0
+
+ @staticmethod
+ def _load_relationships(instance):
+ """
+ Helper method used to overcome a problem where the relationships that rely on joins aren't
+ being loaded automatically.
+ """
+ for rel in instance.__mapper__.relationships:
+ getattr(instance, rel.key)
+
+ def _instrument(self, model):
+ if self._instrumentation:
+ return collection_instrumentation.instrument(self._instrumentation, model, self)
+ else:
+ return model
+
+
+def init_storage(base_dir, filename='db.sqlite'):
+ """
+ Built-in ModelStorage initiator.
+
+ Creates a SQLAlchemy engine and a session to be passed to the MAPI.
+
+ ``initiator_kwargs`` must be passed to the ModelStorage which must hold the ``base_dir`` for the
+ location of the database file, and an option filename. This would create an SQLite database.
+
+ :param base_dir: directory of the database
+ :param filename: database file name.
+ :return:
+ """
+ uri = 'sqlite:///{platform_char}{path}'.format(
+ # Handles the windows behavior where there is not root, but drivers.
+ # Thus behaving as relative path.
+ platform_char='' if 'Windows' in platform.system() else '/',
+
+ path=os.path.join(base_dir, filename))
+
+ engine = create_engine(uri, connect_args=dict(timeout=15))
+
+ session_factory = orm.sessionmaker(bind=engine)
+ session = orm.scoped_session(session_factory=session_factory)
+
+ return dict(engine=engine, session=session)
+
+
+class ListResult(list):
+ """
+ Contains results about the requested items.
+ """
+ def __init__(self, metadata, *args, **qwargs):
+ super(ListResult, self).__init__(*args, **qwargs)
+ self.metadata = metadata
+ self.items = self
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/utils/__init__.py b/azure/aria/aria-extension-cloudify/src/aria/aria/utils/__init__.py
new file mode 100644
index 0000000..43dd882
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/utils/__init__.py
@@ -0,0 +1,65 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+General-purpose utilities package.
+"""
+
+from . import (
+ archive,
+ argparse,
+ caching,
+ collections,
+ console,
+ exceptions,
+ file,
+ formatting,
+ http,
+ imports,
+ openclose,
+ plugin,
+ process,
+ specification,
+ threading,
+ type,
+ uris,
+ uuid,
+ validation,
+ versions
+)
+
+
+__all__ = (
+ 'archive',
+ 'argparse',
+ 'caching',
+ 'collections',
+ 'console',
+ 'exceptions',
+ 'file',
+ 'formatting',
+ 'http',
+ 'imports',
+ 'openclose',
+ 'plugin',
+ 'process',
+ 'specification',
+ 'threading',
+ 'type',
+ 'uris',
+ 'uuid',
+ 'validation',
+ 'versions'
+)
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/utils/archive.py b/azure/aria/aria-extension-cloudify/src/aria/aria/utils/archive.py
new file mode 100644
index 0000000..29efcb1
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/utils/archive.py
@@ -0,0 +1,66 @@
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Archive utilities.
+"""
+
+import os
+import tarfile
+import zipfile
+import tempfile
+from contextlib import closing
+
+
+def is_archive(source):
+ return tarfile.is_tarfile(source) or zipfile.is_zipfile(source)
+
+
+def extract_archive(source):
+ if tarfile.is_tarfile(source):
+ return untar(source)
+ elif zipfile.is_zipfile(source):
+ return unzip(source)
+ raise ValueError(
+ 'Unsupported archive type provided or archive is not valid: {0}.'.format(source))
+
+
+def tar(source, destination):
+ with closing(tarfile.open(destination, 'w:gz')) as tar_archive:
+ tar_archive.add(source, arcname=os.path.basename(source))
+
+
+def untar(archive, destination=None):
+ if not destination:
+ destination = tempfile.mkdtemp()
+ with closing(tarfile.open(name=archive)) as tar_archive:
+ tar_archive.extractall(path=destination, members=tar_archive.getmembers())
+ return destination
+
+
+def zip(source, destination):
+ with closing(zipfile.ZipFile(destination, 'w')) as zip_file:
+ for root, _, files in os.walk(source):
+ for filename in files:
+ file_path = os.path.join(root, filename)
+ source_dir = os.path.dirname(source)
+ zip_file.write(
+ file_path, os.path.relpath(file_path, source_dir))
+ return destination
+
+
+def unzip(archive, destination=None):
+ if not destination:
+ destination = tempfile.mkdtemp()
+ with closing(zipfile.ZipFile(archive, 'r')) as zip_file:
+ zip_file.extractall(destination)
+ return destination
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/utils/argparse.py b/azure/aria/aria-extension-cloudify/src/aria/aria/utils/argparse.py
new file mode 100644
index 0000000..a05a841
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/utils/argparse.py
@@ -0,0 +1,118 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Enhancements to Python's ``argparse`` module.
+"""
+
+from __future__ import absolute_import # so we can import standard 'argparse'
+
+from argparse import ArgumentParser as BaseArgumentParser
+
+
+class ArgumentParser(BaseArgumentParser):
+ """
+ Enhanced argument parser.
+
+ Applied patch to fix `this issue <https://bugs.python.org/issue22433>`__.
+ """
+
+ def add_flag_argument(self, name, help_true=None, help_false=None, default=False):
+ """
+ Adds a flag argument as two arguments: ``--my-flag`` and ``--no-my-flag``.
+ """
+
+ dest = name.replace('-', '_')
+
+ if default:
+ if help_true is not None:
+ help_true += ' (default)'
+ else:
+ help_true = '(default)'
+ else:
+ if help_false is not None:
+ help_false += ' (default)'
+ else:
+ help_false = '(default)'
+
+ group = self.add_mutually_exclusive_group()
+ group.add_argument('--%s' % name, action='store_true', help=help_true)
+ group.add_argument('--no-%s' % name, dest=dest, action='store_false', help=help_false)
+
+ self.set_defaults(**{dest: default})
+
+ def _parse_optional(self, arg_string):
+
+ if self._is_positional(arg_string):
+ return None
+
+ # if the option string is present in the parser, return the action
+ if arg_string in self._option_string_actions:
+ action = self._option_string_actions[arg_string]
+ return action, arg_string, None
+
+ # if the option string before the "=" is present, return the action
+ if '=' in arg_string:
+ option_string, explicit_arg = arg_string.split('=', 1)
+ if option_string in self._option_string_actions:
+ action = self._option_string_actions[option_string]
+ return action, option_string, explicit_arg
+
+ # search through all possible prefixes of the option string
+ # and all actions in the parser for possible interpretations
+ option_tuples = self._get_option_tuples(arg_string)
+
+ # if multiple actions match, the option string was ambiguous
+ if len(option_tuples) > 1:
+ options = ', '.join(
+ [option_string for action, option_string, explicit_arg in option_tuples])
+ tup = arg_string, options
+ self.error('ambiguous option: %s could match %s' % tup)
+
+ # if exactly one action matched, this segmentation is good,
+ # so return the parsed action
+ elif len(option_tuples) == 1:
+ option_tuple = option_tuples
+ return option_tuple
+
+ # if it was not found as an option, but it looks like a negative
+ # number, it was meant to be positional
+ # unless there are negative-number-like options
+ if self._negative_number_matcher.match(arg_string):
+ if not self._has_negative_number_optionals:
+ return None
+
+ # it was meant to be an optional but there is no such option
+ # in this parser (though it might be a valid option in a subparser)
+ return None, arg_string, None
+
+ def _is_positional(self, arg_string):
+ # if it's an empty string, it was meant to be a positional
+ if not arg_string:
+ return True
+
+ # if it doesn't start with a prefix, it was meant to be positional
+ if not arg_string[0] in self.prefix_chars:
+ return True
+
+ # if it's just a single character, it was meant to be positional
+ if len(arg_string) == 1:
+ return True
+
+ # if it contains a space, it was meant to be a positional
+ if ' ' in arg_string and arg_string[0] not in self.prefix_chars:
+ return True
+
+ return False
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/utils/caching.py b/azure/aria/aria-extension-cloudify/src/aria/aria/utils/caching.py
new file mode 100644
index 0000000..5f8cd88
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/utils/caching.py
@@ -0,0 +1,137 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Caching utilities.
+"""
+
+from __future__ import absolute_import # so we can import standard 'collections' and 'threading'
+
+from threading import Lock
+from functools import partial
+
+from .collections import OrderedDict
+
+
+class cachedmethod(object): # pylint: disable=invalid-name
+ """
+ Decorator for caching method return values.
+
+ The implementation is thread-safe.
+
+ Supports ``cache_info`` to be compatible with Python 3's ``functools.lru_cache``. Note that the
+ statistics are combined for all instances of the class.
+
+ Won't use the cache if not called when bound to an object, allowing you to override the cache.
+
+ Adapted from `this solution
+ <http://code.activestate.com/recipes/577452-a-memoize-decorator-for-instance-methods/>`__.
+ """
+
+ ENABLED = True
+
+ def __init__(self, func):
+ self.__doc__ = func.__doc__
+ self.func = func
+ self.hits = 0
+ self.misses = 0
+ self.lock = Lock()
+
+ def cache_info(self):
+ with self.lock:
+ return (self.hits, self.misses, None, self.misses)
+
+ def reset_cache_info(self):
+ with self.lock:
+ self.hits = 0
+ self.misses = 0
+
+ def __get__(self, instance, owner):
+ if instance is None:
+ # Don't use cache if not bound to an object
+ # Note: This is also a way for callers to override the cache
+ return self.func
+ return partial(self, instance)
+
+ def __call__(self, *args, **kwargs):
+ if not self.ENABLED:
+ return self.func(*args, **kwargs)
+
+ instance = args[0]
+ if not hasattr(instance, '_method_cache'):
+ instance._method_cache = {}
+ method_cache = instance._method_cache
+
+ key = (self.func, args[1:], frozenset(kwargs.items()))
+
+ try:
+ with self.lock:
+ return_value = method_cache[key]
+ self.hits += 1
+ except KeyError:
+ return_value = self.func(*args, **kwargs)
+ with self.lock:
+ method_cache[key] = return_value
+ self.misses += 1
+ # Another thread may override our cache entry here, so we need to read
+ # it again to make sure all threads use the same return value
+ return_value = method_cache.get(key, return_value)
+
+ return return_value
+
+
+class HasCachedMethods(object):
+ """
+ Provides convenience methods for working with :class:`cachedmethod`.
+ """
+
+ def __init__(self, method_cache=None):
+ self._method_cache = method_cache or {}
+
+ @property
+ def _method_cache_info(self):
+ """
+ The cache infos of all cached methods.
+
+ :rtype: dict of str, 4-tuple
+ """
+
+ cached_info = OrderedDict()
+ for k, v in self.__class__.__dict__.iteritems():
+ if isinstance(v, property):
+ # The property getter might be cached
+ v = v.fget
+ if hasattr(v, 'cache_info'):
+ cached_info[k] = v.cache_info()
+ return cached_info
+
+ def _reset_method_cache(self):
+ """
+ Resets the caches of all cached methods.
+ """
+
+ if hasattr(self, '_method_cache'):
+ self._method_cache = {}
+
+ # Note: Another thread may already be storing entries in the cache here.
+ # But it's not a big deal! It only means that our cache_info isn't
+ # guaranteed to be accurate.
+
+ for entry in self.__class__.__dict__.itervalues():
+ if isinstance(entry, property):
+ # The property getter might be cached
+ entry = entry.fget
+ if hasattr(entry, 'reset_cache_info'):
+ entry.reset_cache_info()
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/utils/collections.py b/azure/aria/aria-extension-cloudify/src/aria/aria/utils/collections.py
new file mode 100644
index 0000000..ccc37a1
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/utils/collections.py
@@ -0,0 +1,303 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Additional collection classes and collection utilities.
+"""
+
+from __future__ import absolute_import # so we can import standard 'collections'
+
+from copy import deepcopy
+try:
+ from collections import OrderedDict
+except ImportError:
+ from ordereddict import OrderedDict
+
+
+def cls_name(cls):
+ module = str(cls.__module__)
+ name = str(cls.__name__)
+ return name if module == '__builtin__' else '%s.%s' % (module, name)
+
+
+class FrozenList(list):
+ """
+ An immutable list.
+
+ After initialization it will raise :class:`~exceptions.TypeError` exceptions if modification is
+ attempted.
+
+ Note that objects stored in the list may not be immutable.
+ """
+ def __init__(self, *args, **kwargs):
+ self.locked = False
+ super(FrozenList, self).__init__(*args, **kwargs)
+ self.locked = True
+
+ def __setitem__(self, index, value):
+ if self.locked:
+ raise TypeError('frozen list')
+ return super(FrozenList, self).__setitem__(index, value)
+
+ def __delitem__(self, index):
+ if self.locked:
+ raise TypeError('frozen list')
+ return super(FrozenList, self).__delitem__(index)
+
+ def __iadd__(self, values):
+ if self.locked:
+ raise TypeError('frozen list')
+ return super(FrozenList, self).__iadd__(values)
+
+ def __deepcopy__(self, memo):
+ res = [deepcopy(v, memo) for v in self]
+ return FrozenList(res)
+
+ def append(self, value):
+ if self.locked:
+ raise TypeError('frozen list')
+ return super(FrozenList, self).append(value)
+
+ def extend(self, values):
+ if self.locked:
+ raise TypeError('frozen list')
+ return super(FrozenList, self).append(values)
+
+ def insert(self, index, value):
+ if self.locked:
+ raise TypeError('frozen list')
+ return super(FrozenList, self).insert(index, value)
+
+EMPTY_READ_ONLY_LIST = FrozenList()
+
+
+class FrozenDict(OrderedDict):
+ """
+ An immutable ordered dict.
+
+ After initialization it will raise :class:`~exceptions.TypeError` exceptions if modification is
+ attempted.
+
+ Note that objects stored in the dict may not be immutable.
+ """
+
+ def __init__(self, *args, **kwargs):
+ self.locked = False
+ super(FrozenDict, self).__init__(*args, **kwargs)
+ self.locked = True
+
+ def __setitem__(self, key, value, **_):
+ if self.locked:
+ raise TypeError('frozen dict')
+ return super(FrozenDict, self).__setitem__(key, value)
+
+ def __delitem__(self, key, **_):
+ if self.locked:
+ raise TypeError('frozen dict')
+ return super(FrozenDict, self).__delitem__(key)
+
+ def __deepcopy__(self, memo):
+ res = [(deepcopy(k, memo), deepcopy(v, memo)) for k, v in self.iteritems()]
+ return FrozenDict(res)
+
+EMPTY_READ_ONLY_DICT = FrozenDict()
+
+
+class StrictList(list):
+ """
+ A list that raises :class:`~exceptions.TypeError` exceptions when objects of the wrong type are
+ inserted.
+ """
+
+ def __init__(self,
+ items=None,
+ value_class=None,
+ wrapper_function=None,
+ unwrapper_function=None):
+ super(StrictList, self).__init__()
+ if isinstance(items, StrictList):
+ self.value_class = items.value_class
+ self.wrapper_function = items.wrapper_function
+ self.unwrapper_function = items.unwrapper_function
+ self.value_class = value_class
+ self.wrapper_function = wrapper_function
+ self.unwrapper_function = unwrapper_function
+ if items:
+ for item in items:
+ self.append(item)
+
+ def _wrap(self, value):
+ if (self.value_class is not None) and (not isinstance(value, self.value_class)):
+ raise TypeError('value must be a "%s": %s' % (cls_name(self.value_class), repr(value)))
+ if self.wrapper_function is not None:
+ value = self.wrapper_function(value)
+ return value
+
+ def _unwrap(self, value):
+ if self.unwrapper_function is not None:
+ value = self.unwrapper_function(value)
+ return value
+
+ def __getitem__(self, index):
+ value = super(StrictList, self).__getitem__(index)
+ value = self._unwrap(value)
+ return value
+
+ def __setitem__(self, index, value):
+ value = self._wrap(value)
+ return super(StrictList, self).__setitem__(index, value)
+
+ def __iadd__(self, values):
+ values = [self._wrap(v) for v in values]
+ return super(StrictList, self).__iadd__(values)
+
+ def append(self, value):
+ value = self._wrap(value)
+ return super(StrictList, self).append(value)
+
+ def extend(self, values):
+ values = [self._wrap(v) for v in values]
+ return super(StrictList, self).extend(values)
+
+ def insert(self, index, value):
+ value = self._wrap(value)
+ return super(StrictList, self).insert(index, value)
+
+
+class StrictDict(OrderedDict):
+ """
+ An ordered dict that raises :class:`~exceptions.TypeError` exceptions when keys or values of the
+ wrong type are used.
+ """
+
+ def __init__(self,
+ items=None,
+ key_class=None,
+ value_class=None,
+ wrapper_function=None,
+ unwrapper_function=None):
+ super(StrictDict, self).__init__()
+ if isinstance(items, StrictDict):
+ self.key_class = items.key_class
+ self.value_class = items.value_class
+ self.wrapper_function = items.wrapper_function
+ self.unwrapper_function = items.unwrapper_function
+ self.key_class = key_class
+ self.value_class = value_class
+ self.wrapper_function = wrapper_function
+ self.unwrapper_function = unwrapper_function
+ if items:
+ for k, v in items:
+ self[k] = v
+
+ def __getitem__(self, key):
+ if (self.key_class is not None) and (not isinstance(key, self.key_class)):
+ raise TypeError('key must be a "%s": %s' % (cls_name(self.key_class), repr(key)))
+ value = super(StrictDict, self).__getitem__(key)
+ if self.unwrapper_function is not None:
+ value = self.unwrapper_function(value)
+ return value
+
+ def __setitem__(self, key, value, **_):
+ if (self.key_class is not None) and (not isinstance(key, self.key_class)):
+ raise TypeError('key must be a "%s": %s' % (cls_name(self.key_class), repr(key)))
+ if (self.value_class is not None) and (not isinstance(value, self.value_class)):
+ raise TypeError('value must be a "%s": %s' % (cls_name(self.value_class), repr(value)))
+ if self.wrapper_function is not None:
+ value = self.wrapper_function(value)
+ return super(StrictDict, self).__setitem__(key, value)
+
+
+def merge(dict_a, dict_b, path=None, strict=False):
+ """
+ Merges dicts, recursively.
+ """
+
+ # TODO: a.add_yaml_merge(b), see https://bitbucket.org/ruamel/yaml/src/
+ # TODO: 86622a1408e0f171a12e140d53c4ffac4b6caaa3/comments.py?fileviewer=file-view-default
+
+ path = path or []
+ for key, value_b in dict_b.iteritems():
+ if key in dict_a:
+ value_a = dict_a[key]
+ if isinstance(value_a, dict) and isinstance(value_b, dict):
+ merge(value_a, value_b, path + [str(key)], strict)
+ elif value_a != value_b:
+ if strict:
+ raise ValueError('dict merge conflict at %s' % '.'.join(path + [str(key)]))
+ else:
+ dict_a[key] = value_b
+ else:
+ dict_a[key] = value_b
+ return dict_a
+
+
+def is_removable(_container, _key, v):
+ return (v is None) or ((isinstance(v, dict) or isinstance(v, list)) and (len(v) == 0))
+
+
+def prune(value, is_removable_function=is_removable):
+ """
+ Deletes ``None`` and empty lists and dicts, recursively.
+ """
+
+ if isinstance(value, list):
+ for i, v in enumerate(value):
+ if is_removable_function(value, i, v):
+ del value[i]
+ else:
+ prune(v, is_removable_function)
+ elif isinstance(value, dict):
+ for k, v in value.items():
+ if is_removable_function(value, k, v):
+ del value[k]
+ else:
+ prune(v, is_removable_function)
+
+ return value
+
+
+# TODO: Move following two methods to some place parser specific
+
+def deepcopy_with_locators(value):
+ """
+ Like :func:`~copy.deepcopy`, but also copies over locators.
+ """
+
+ res = deepcopy(value)
+ copy_locators(res, value)
+ return res
+
+
+def copy_locators(target, source):
+ """
+ Copies over ``_locator`` for all elements, recursively.
+
+ Assumes that target and source have exactly the same list/dict structure.
+ """
+
+ locator = getattr(source, '_locator', None)
+ if locator is not None:
+ try:
+ setattr(target, '_locator', locator)
+ except AttributeError:
+ pass
+
+ if isinstance(target, list) and isinstance(source, list):
+ for i, _ in enumerate(target):
+ copy_locators(target[i], source[i])
+ elif isinstance(target, dict) and isinstance(source, dict):
+ for k, v in target.iteritems():
+ copy_locators(v, source[k])
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/utils/console.py b/azure/aria/aria-extension-cloudify/src/aria/aria/utils/console.py
new file mode 100644
index 0000000..81e8cf8
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/utils/console.py
@@ -0,0 +1,132 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Abstraction API above terminal color libraries.
+"""
+
+import os
+import sys
+from StringIO import StringIO
+
+from contextlib import contextmanager
+
+from ..cli import color
+from . import formatting
+
+
+_indent_string = ''
+
+
+class TopologyStylizer(object):
+ def __init__(self, indentation=0):
+ self._str = StringIO()
+ self._indentation = indentation
+
+ def write(self, string):
+ self._str.write(' ' * self._indentation)
+ self._str.write(string)
+ self._str.write(os.linesep)
+
+ @contextmanager
+ def indent(self, indentation=2):
+ self._indentation += indentation
+ yield
+ self._indentation -= indentation
+
+ @staticmethod
+ def type_style(value):
+ return Colored.blue(value, bold=True)
+
+ @staticmethod
+ def node_style(value):
+ return Colored.red(value, bold=True)
+
+ @staticmethod
+ def property_style(value):
+ return Colored.magenta(value, bold=True)
+
+ @staticmethod
+ def literal_style(value):
+ return Colored.magenta(formatting.safe_repr(value))
+
+ @staticmethod
+ def required_style(value):
+ return Colored.white(value)
+
+ @staticmethod
+ def meta_style(value):
+ return Colored.green(value)
+
+ def __str__(self):
+ return self._str.getvalue()
+
+
+def puts(string='', newline=True, stream=sys.stdout):
+ stream.write(_indent_string)
+ stream.write(formatting.safe_str(string))
+ if newline:
+ stream.write(os.linesep)
+
+
+@contextmanager
+def indent(size=4):
+ global _indent_string
+ original_indent_string = _indent_string
+ try:
+ _indent_string += ' ' * size
+ yield
+ finally:
+ _indent_string = original_indent_string
+
+
+class Colored(object):
+ @staticmethod
+ def black(string, always=False, bold=False):
+ return Colored._color(string, color.Colors.Fore.BLACK, bold)
+
+ @staticmethod
+ def red(string, always=False, bold=False):
+ return Colored._color(string, color.Colors.Fore.RED, bold)
+
+ @staticmethod
+ def green(string, always=False, bold=False):
+ return Colored._color(string, color.Colors.Fore.GREEN, bold)
+
+ @staticmethod
+ def yellow(string, always=False, bold=False):
+ return Colored._color(string, color.Colors.Fore.YELLOW, bold)
+
+ @staticmethod
+ def blue(string, always=False, bold=False):
+ return Colored._color(string, color.Colors.Fore.BLUE, bold)
+
+ @staticmethod
+ def magenta(string, always=False, bold=False):
+ return Colored._color(string, color.Colors.Fore.MAGENTA, bold)
+
+ @staticmethod
+ def cyan(string, always=False, bold=False):
+ return Colored._color(string, color.Colors.Fore.CYAN, bold)
+
+ @staticmethod
+ def white(string, always=False, bold=False):
+ return Colored._color(string, color.Colors.Fore.WHITE, bold)
+
+ @staticmethod
+ def _color(string, fore, bold):
+ return color.StringStylizer(string, color.ColorSpec(
+ fore=fore,
+ style=color.Colors.Style.BRIGHT if bold else color.Colors.Style.NORMAL))
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/utils/exceptions.py b/azure/aria/aria-extension-cloudify/src/aria/aria/utils/exceptions.py
new file mode 100644
index 0000000..5bb0e6d
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/utils/exceptions.py
@@ -0,0 +1,120 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Utilities for extracting and formatting Python exceptions.
+"""
+
+import sys
+import linecache
+import StringIO
+import traceback as tb
+
+import jsonpickle
+
+from .console import (puts, indent, Colored)
+
+
+ENTRY_FORMAT = 'File "{filename}", line {lineno}, in {name}'
+
+
+def print_exception(e, full=True, cause=False, traceback=None):
+ """
+ Prints the exception with nice colors and such.
+ """
+ def format_heading(e):
+ return '{0}{1}: {2}'.format(
+ Colored.red('Caused by ') if cause else '',
+ Colored.red(e.__class__.__name__, bold=True),
+ Colored.red(e))
+
+ puts(format_heading(e))
+ if full:
+ if cause:
+ if traceback:
+ print_traceback(traceback, True)
+ else:
+ print_traceback()
+ if hasattr(e, 'cause') and e.cause:
+ traceback = e.cause_traceback if hasattr(e, 'cause_traceback') else None
+ print_exception(e.cause, full=full, cause=True, traceback=traceback)
+
+
+def print_traceback(traceback=None, print_last_stack=False):
+ """
+ Prints the traceback with nice colors and such.
+ """
+
+ if traceback is None:
+ _, _, traceback = sys.exc_info()
+ while traceback is not None:
+ frame = traceback.tb_frame
+ code = frame.f_code
+ filename = code.co_filename
+ lineno = traceback.tb_lineno
+ name = code.co_name
+ with indent(2):
+ puts(ENTRY_FORMAT.format(filename=Colored.blue(filename),
+ lineno=Colored.cyan(lineno),
+ name=Colored.cyan(name)))
+ linecache.checkcache(filename)
+ line = linecache.getline(filename, lineno, frame.f_globals)
+ if line:
+ with indent(2):
+ puts(line.strip())
+ traceback = traceback.tb_next
+ if print_last_stack and (traceback is None):
+ # Print stack of *last* traceback
+ _print_stack(frame)
+
+
+def _print_stack(frame):
+ entries = tb.extract_stack(frame)
+ if not entries:
+ return
+ puts(Colored.red('Call stack:'))
+ with indent(2):
+ for filename, lineno, name, line in entries:
+ puts(ENTRY_FORMAT.format(filename=Colored.blue(filename),
+ lineno=Colored.cyan(lineno),
+ name=Colored.cyan(name)))
+ with indent(2):
+ puts(line)
+
+
+def get_exception_as_string(exc_type, exc_val, traceback):
+ s_traceback = StringIO.StringIO()
+ tb.print_exception(
+ etype=exc_type,
+ value=exc_val,
+ tb=traceback,
+ file=s_traceback)
+ return s_traceback.getvalue()
+
+
+class _WrappedException(Exception):
+
+ def __init__(self, exception_type, exception_str):
+ super(_WrappedException, self).__init__(exception_type, exception_str)
+ self.exception_type = exception_type
+ self.exception_str = exception_str
+
+
+def wrap_if_needed(exception):
+ try:
+ jsonpickle.loads(jsonpickle.dumps(exception))
+ return exception
+ except BaseException:
+ return _WrappedException(type(exception).__name__, str(exception))
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/utils/file.py b/azure/aria/aria-extension-cloudify/src/aria/aria/utils/file.py
new file mode 100644
index 0000000..75f2859
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/utils/file.py
@@ -0,0 +1,46 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+File utilities.
+"""
+
+import errno
+import os
+import shutil
+
+
+def makedirs(path):
+ """
+ Enhancement of :func:`os.makedirs` that doesn't fail if the directory already exists.
+ """
+ if os.path.isdir(path):
+ return
+ try:
+ os.makedirs(path)
+ except IOError as e:
+ if e.errno != errno.EEXIST:
+ raise
+
+def remove_if_exists(path):
+ try:
+ if os.path.isfile(path):
+ os.remove(path)
+ if os.path.isdir(path):
+ shutil.rmtree(path)
+
+ except OSError as e:
+ if e.errno != errno.ENOENT: # errno.ENOENT = no such file or directory
+ raise # re-raise exception if a different error occurred
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/utils/formatting.py b/azure/aria/aria-extension-cloudify/src/aria/aria/utils/formatting.py
new file mode 100644
index 0000000..fa34b7d
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/utils/formatting.py
@@ -0,0 +1,235 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+String formatting and string-based format utilities.
+"""
+
+import json
+from types import MethodType
+
+from ruamel import yaml # @UnresolvedImport
+
+from .collections import FrozenList, FrozenDict, StrictList, StrictDict, OrderedDict
+
+
+PLURALIZE_EXCEPTIONS = {}
+
+
+# Add our types to ruamel.yaml (for round trips)
+yaml.representer.RoundTripRepresenter.add_representer(
+ FrozenList, yaml.representer.RoundTripRepresenter.represent_list)
+yaml.representer.RoundTripRepresenter.add_representer(
+ FrozenDict, yaml.representer.RoundTripRepresenter.represent_dict)
+yaml.representer.RoundTripRepresenter.add_representer(
+ StrictList, yaml.representer.RoundTripRepresenter.represent_list)
+yaml.representer.RoundTripRepresenter.add_representer(
+ StrictDict, yaml.representer.RoundTripRepresenter.represent_dict)
+
+# Without this, ruamel.yaml will output "!!omap" types, which is
+# technically correct but unnecessarily verbose for our uses
+yaml.representer.RoundTripRepresenter.add_representer(
+ OrderedDict, yaml.representer.RoundTripRepresenter.represent_dict)
+
+
+class JsonAsRawEncoder(json.JSONEncoder):
+ """
+ A :class:`JSONEncoder` that will use the ``as_raw`` property of objects if available.
+ """
+ def raw_encoder_default(self, obj):
+ try:
+ return iter(obj)
+ except TypeError:
+ if hasattr(obj, 'as_raw'):
+ return as_raw(obj)
+ return str(obj)
+ return super(JsonAsRawEncoder, self).default(obj)
+
+ def __init__(self, *args, **kwargs):
+ kwargs['default'] = self.raw_encoder_default
+ super(JsonAsRawEncoder, self).__init__(*args, **kwargs)
+
+
+class YamlAsRawDumper(yaml.dumper.RoundTripDumper): # pylint: disable=too-many-ancestors
+ """
+ A :class:`RoundTripDumper` that will use the ``as_raw`` property of objects if available.
+ """
+
+ def represent_data(self, data):
+ if hasattr(data, 'as_raw'):
+ data = as_raw(data)
+ return super(YamlAsRawDumper, self).represent_data(data)
+
+
+def decode_list(data):
+ decoded_list = []
+ for item in data:
+ if isinstance(item, unicode):
+ item = item.encode('utf-8')
+ elif isinstance(item, list):
+ item = decode_list(item)
+ elif isinstance(item, dict):
+ item = decode_dict(item)
+ decoded_list.append(item)
+ return decoded_list
+
+
+def decode_dict(data):
+ decoded_dict = {}
+ for key, value in data.iteritems():
+ if isinstance(key, unicode):
+ key = key.encode('utf-8')
+ if isinstance(value, unicode):
+ value = value.encode('utf-8')
+ elif isinstance(value, list):
+ value = decode_list(value)
+ elif isinstance(value, dict):
+ value = decode_dict(value)
+ decoded_dict[key] = value
+ return decoded_dict
+
+
+def safe_str(value):
+ """
+ Like :class:`str` coercion, but makes sure that Unicode strings are properly encoded, and will
+ never return ``None``.
+ """
+
+ try:
+ return str(value)
+ except UnicodeEncodeError:
+ return unicode(value).encode('utf8')
+
+
+def safe_repr(value):
+ """
+ Like :func:`repr`, but calls :func:`as_raw` and :func:`as_agnostic` first.
+ """
+
+ return repr(as_agnostic(as_raw(value)))
+
+
+def string_list_as_string(strings):
+ """
+ Nice representation of a list of strings.
+ """
+
+ if not strings:
+ return 'none'
+ return ', '.join('"{0}"'.format(safe_str(v)) for v in strings)
+
+
+def pluralize(noun):
+ plural = PLURALIZE_EXCEPTIONS.get(noun)
+ if plural is not None:
+ return plural
+ elif noun.endswith('s'):
+ return '{0}es'.format(noun)
+ elif noun.endswith('y'):
+ return '{0}ies'.format(noun[:-1])
+ else:
+ return '{0}s'.format(noun)
+
+
+def as_raw(value):
+ """
+ Converts values using their ``as_raw`` property, if it exists, recursively.
+ """
+
+ if hasattr(value, 'as_raw'):
+ value = value.as_raw
+ if isinstance(value, MethodType):
+ # Old-style Python classes don't support properties
+ value = value()
+ elif isinstance(value, list):
+ value = list(value)
+ for i, v in enumerate(value):
+ value[i] = as_raw(v)
+ elif isinstance(value, dict):
+ value = dict(value)
+ for k, v in value.iteritems():
+ value[k] = as_raw(v)
+ return value
+
+
+def as_raw_list(value):
+ """
+ Assuming value is a list, converts its values using :func:`as_raw`.
+ """
+
+ if value is None:
+ return []
+ if isinstance(value, dict):
+ value = value.itervalues()
+ return [as_raw(v) for v in value]
+
+
+def as_raw_dict(value):
+ """
+ Assuming value is a dict, converts its values using :func:`as_raw`. The keys are left as is.
+ """
+
+ if value is None:
+ return OrderedDict()
+ return OrderedDict((
+ (k, as_raw(v)) for k, v in value.iteritems()))
+
+
+def as_agnostic(value):
+ """
+ Converts subclasses of list and dict to standard lists and dicts, and Unicode strings to
+ non-Unicode if possible, recursively.
+
+ Useful for creating human-readable output of structures.
+ """
+
+ if isinstance(value, unicode):
+ try:
+ value = str(value)
+ except UnicodeEncodeError:
+ pass
+ elif isinstance(value, list):
+ value = list(value)
+ elif isinstance(value, dict):
+ value = dict(value)
+
+ if isinstance(value, list):
+ for i, _ in enumerate(value):
+ value[i] = as_agnostic(value[i])
+ elif isinstance(value, dict):
+ for k, v in value.iteritems():
+ value[k] = as_agnostic(v)
+
+ return value
+
+
+def json_dumps(value, indent=2):
+ """
+ JSON dumps that supports Unicode and the ``as_raw`` property of objects if available.
+ """
+
+ return json.dumps(value, indent=indent, ensure_ascii=False, cls=JsonAsRawEncoder)
+
+
+def yaml_dumps(value, indent=2):
+ """
+ YAML dumps that supports Unicode and the ``as_raw`` property of objects if available.
+ """
+
+ return yaml.dump(value, indent=indent, allow_unicode=True, Dumper=YamlAsRawDumper)
+
+
+def yaml_loads(value):
+ return yaml.load(value, Loader=yaml.SafeLoader)
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/utils/http.py b/azure/aria/aria-extension-cloudify/src/aria/aria/utils/http.py
new file mode 100644
index 0000000..c8357e9
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/utils/http.py
@@ -0,0 +1,66 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+HTTP utilities.
+"""
+
+import os
+import tempfile
+
+import requests
+
+
+def download_file(url, destination=None, logger=None, progress_handler=None):
+ """
+ Download file.
+
+ :param url: URL from which to download
+ :type url: basestring
+ :param destination: path where the file should be saved or ``None`` to auto-generate
+ :type destination: basestring
+ :returns: path where the file was saved
+ :rtype: basestring
+ :raises exceptions.IOError:
+ :raises requests.exceptions.RequestException:
+ """
+ chunk_size = 1024
+
+ if not destination:
+ file_descriptor, destination = tempfile.mkstemp()
+ os.close(file_descriptor)
+ if logger:
+ logger.info('Downloading {0} to {1}...'.format(url, destination))
+
+ response = requests.get(url, stream=True)
+ final_url = response.url
+ if final_url != url and logger:
+ logger.debug('Redirected to {0}'.format(final_url))
+
+ read_bytes = 0
+ total_size = int(response.headers['Content-Length']) \
+ if 'Content-Length' in response.headers else None
+ try:
+ with open(destination, 'wb') as destination_file:
+ for chunk in response.iter_content(chunk_size):
+ destination_file.write(chunk)
+ if total_size and progress_handler:
+ # Only showing progress bar if we have the total content length
+ read_bytes += chunk_size
+ progress_handler(read_bytes, total_size)
+ finally:
+ response.close()
+
+ return destination
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/utils/imports.py b/azure/aria/aria-extension-cloudify/src/aria/aria/utils/imports.py
new file mode 100644
index 0000000..14ad09e
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/utils/imports.py
@@ -0,0 +1,96 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Utilities for dynamically loading Python code.
+"""
+
+import pkgutil
+import importlib
+
+
+def import_fullname(name, paths=None):
+ """
+ Imports a variable or class based on a full name, optionally searching for it in the paths.
+ """
+ paths = paths or []
+ if name is None:
+ return None
+
+ def do_import(name):
+ if name and ('.' in name):
+ module_name, name = name.rsplit('.', 1)
+ return getattr(__import__(module_name, fromlist=[name], level=0), name)
+ else:
+ raise ImportError('import not found: %s' % name)
+
+ try:
+ return do_import(name)
+ except ImportError:
+ for path in paths:
+ try:
+ return do_import('%s.%s' % (path, name))
+ except Exception as e:
+ raise ImportError('cannot import %s, because %s' % (name, e))
+
+ raise ImportError('import not found: %s' % name)
+
+
+def import_modules(name):
+ """
+ Imports a module and all its sub-modules, recursively. Relies on modules defining a ``MODULES``
+ attribute listing their sub-module names.
+ """
+
+ module = __import__(name, fromlist=['MODULES'], level=0)
+ if hasattr(module, 'MODULES'):
+ for module_ in module.MODULES:
+ import_modules('%s.%s' % (name, module_))
+
+
+# TODO merge with import_fullname
+def load_attribute(attribute_path):
+ """
+ Dynamically load an attribute based on the path to it. E.g.
+ ``some_package.some_module.some_attribute``, will load ``some_attribute`` from the
+ ``some_package.some_module`` module.
+ """
+ module_name, attribute_name = attribute_path.rsplit('.', 1)
+ try:
+ module = importlib.import_module(module_name)
+ return getattr(module, attribute_name)
+ except ImportError:
+ # TODO: handle
+ raise
+ except AttributeError:
+ # TODO: handle
+ raise
+
+
+def iter_modules():
+ # apparently pkgutil had some issues in python 2.6. Accessing any root level directories
+ # failed. and it got the entire process of importing fail. Since we only need any
+ # aria_extension related loading, in the meantime we could try to import only those
+ # (and assume they are not located at the root level.
+ # [In python 2.7 it does actually ignore any OSError].
+ yielded = {}
+ for importer in pkgutil.iter_importers():
+ try:
+ for module_name, ispkg in pkgutil.iter_importer_modules(importer):
+ if module_name not in yielded:
+ yielded[module_name] = True
+ yield importer, module_name, ispkg
+ except OSError:
+ pass
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/utils/openclose.py b/azure/aria/aria-extension-cloudify/src/aria/aria/utils/openclose.py
new file mode 100644
index 0000000..722885c
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/utils/openclose.py
@@ -0,0 +1,36 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Utilities for working with open/close patterns.
+"""
+
+class OpenClose(object):
+ """
+ Wraps an object that has ``open()`` and ``close()`` methods to support the ``with`` keyword.
+ """
+
+ def __init__(self, wrapped):
+ self.wrapped = wrapped
+
+ def __enter__(self):
+ if hasattr(self.wrapped, 'open'):
+ self.wrapped.open()
+ return self.wrapped
+
+ def __exit__(self, the_type, value, traceback):
+ if hasattr(self.wrapped, 'close'):
+ self.wrapped.close()
+ return False
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/utils/plugin.py b/azure/aria/aria-extension-cloudify/src/aria/aria/utils/plugin.py
new file mode 100644
index 0000000..4fb6a8e
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/utils/plugin.py
@@ -0,0 +1,24 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Plugin utilities.
+"""
+
+import wagon
+
+
+def create(source, destination_dir):
+ return wagon.create(source=source, archive_destination_dir=destination_dir)
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/utils/process.py b/azure/aria/aria-extension-cloudify/src/aria/aria/utils/process.py
new file mode 100644
index 0000000..ec4a72d
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/utils/process.py
@@ -0,0 +1,51 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Process utilities.
+"""
+
+import os
+
+
+def append_to_path(*args, **kwargs):
+ """
+ Appends one or more paths to the system path of an environment.
+ The environment will be that of the current process unless another is passed using the
+ 'env' keyword argument.
+ :param args: paths to append
+ :param kwargs: 'env' may be used to pass a custom environment to use
+ """
+ _append_to_path('PATH', *args, **kwargs)
+
+
+def append_to_pythonpath(*args, **kwargs):
+ """
+ Appends one or more paths to the python path of an environment.
+ The environment will be that of the current process unless another is passed using the
+ 'env' keyword argument.
+ :param args: paths to append
+ :param kwargs: 'env' may be used to pass a custom environment to use
+ """
+ _append_to_path('PYTHONPATH', *args, **kwargs)
+
+
+def _append_to_path(path, *args, **kwargs):
+ env = kwargs.get('env') or os.environ
+ env[path] = '{0}{1}{2}'.format(
+ os.pathsep.join(args),
+ os.pathsep,
+ env.get(path, '')
+ )
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/utils/specification.py b/azure/aria/aria-extension-cloudify/src/aria/aria/utils/specification.py
new file mode 100644
index 0000000..8c51134
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/utils/specification.py
@@ -0,0 +1,57 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Utilities for cross-referencing code with specification documents.
+"""
+
+from .collections import OrderedDict
+
+
+DSL_SPECIFICATIONS = {}
+
+
+def implements_specification(section, spec):
+ """
+ Decorator for specification implementations.
+
+ Used for documentation and standards compliance.
+ """
+
+ from .type import full_type_name
+
+ def decorator(obj):
+ specification = DSL_SPECIFICATIONS.get(spec)
+
+ if specification is None:
+ specification = {}
+ DSL_SPECIFICATIONS[spec] = specification
+
+ if section in specification:
+ raise Exception('you cannot specify the same @implements_specification twice, consider'
+ ' adding \'-1\', \'-2\', etc.: {0}, {1}'.format(spec, section))
+
+ specification[section] = OrderedDict((
+ ('code', full_type_name(obj)),
+ ('doc', obj.__doc__)))
+
+ try:
+ setattr(obj, '_dsl_specifications', {section: section, spec: spec})
+ except BaseException:
+ pass
+
+ return obj
+
+ return decorator
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/utils/threading.py b/azure/aria/aria-extension-cloudify/src/aria/aria/utils/threading.py
new file mode 100644
index 0000000..f5ca302
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/utils/threading.py
@@ -0,0 +1,286 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Threading utilities.
+"""
+
+from __future__ import absolute_import # so we can import standard 'threading'
+
+import sys
+import itertools
+import multiprocessing
+from threading import (Thread, Lock)
+from Queue import (Queue, Full, Empty)
+
+from .exceptions import print_exception
+
+class ExecutorException(Exception):
+ pass
+
+
+class DaemonThread(Thread):
+ def __init__(self, *args, **kwargs):
+ super(DaemonThread, self).__init__(*args, **kwargs)
+ self.daemon = True
+
+ def run(self):
+ """
+ We're overriding ``Thread.run`` in order to avoid annoying (but harmless) error messages
+ during shutdown. The problem is that CPython nullifies the global state _before_ shutting
+ down daemon threads, so that exceptions might happen, and then ``Thread.__bootstrap_inner``
+ prints them out.
+
+ Our solution is to swallow these exceptions here.
+
+ The side effect is that uncaught exceptions in our own thread code will _not_ be printed out
+ as usual, so it's our responsibility to catch them in our code.
+ """
+
+ try:
+ super(DaemonThread, self).run()
+ except SystemExit as e:
+ # This exception should be bubbled up
+ raise e
+ except BaseException:
+ # Exceptions might occur in daemon threads during interpreter shutdown
+ pass
+
+
+# https://gist.github.com/tliron/81dd915166b0bfc64be08b4f8e22c835
+class FixedThreadPoolExecutor(object):
+ """
+ Executes tasks in a fixed thread pool.
+
+ Makes sure to gather all returned results and thrown exceptions in one place, in order of task
+ submission.
+
+ Example::
+
+ def sum(arg1, arg2):
+ return arg1 + arg2
+
+ executor = FixedThreadPoolExecutor(10)
+ try:
+ for value in range(100):
+ executor.submit(sum, value, value)
+ executor.drain()
+ except:
+ executor.close()
+ executor.raise_first()
+ print executor.returns
+
+ You can also use it with the Python ``with`` keyword, in which case you don't need to call
+ ``close`` explicitly::
+
+ with FixedThreadPoolExecutor(10) as executor:
+ for value in range(100):
+ executor.submit(sum, value, value)
+ executor.drain()
+ executor.raise_first()
+ print executor.returns
+ """
+
+ _CYANIDE = object() # Special task marker used to kill worker threads.
+
+ def __init__(self,
+ size=None,
+ timeout=None,
+ print_exceptions=False):
+ """
+ :param size: number of threads in the pool; if ``None`` will use an optimal number for the
+ platform
+ :param timeout: timeout in seconds for all blocking operations (``None`` means no timeout)
+ :param print_exceptions: set to ``True`` in order to print exceptions from tasks
+ """
+ if not size:
+ try:
+ size = multiprocessing.cpu_count() * 2 + 1
+ except NotImplementedError:
+ size = 3
+
+ self.size = size
+ self.timeout = timeout
+ self.print_exceptions = print_exceptions
+
+ self._tasks = Queue()
+ self._returns = {}
+ self._exceptions = {}
+ self._id_creator = itertools.count()
+ self._lock = Lock() # for console output
+
+ self._workers = []
+ for index in range(size):
+ worker = DaemonThread(
+ name='%s%d' % (self.__class__.__name__, index),
+ target=self._thread_worker)
+ worker.start()
+ self._workers.append(worker)
+
+ def submit(self, func, *args, **kwargs):
+ """
+ Submit a task for execution.
+
+ The task will be called ASAP on the next available worker thread in the pool.
+
+ :raises ExecutorException: if cannot be submitted
+ """
+
+ try:
+ self._tasks.put((self._id_creator.next(), func, args, kwargs), timeout=self.timeout)
+ except Full:
+ raise ExecutorException('cannot submit task: queue is full')
+
+ def close(self):
+ """
+ Blocks until all current tasks finish execution and all worker threads are dead.
+
+ You cannot submit tasks anymore after calling this.
+
+ This is called automatically upon exit if you are using the ``with`` keyword.
+ """
+
+ self.drain()
+ while self.is_alive:
+ try:
+ self._tasks.put(self._CYANIDE, timeout=self.timeout)
+ except Full:
+ raise ExecutorException('cannot close executor: a thread seems to be hanging')
+ self._workers = None
+
+ def drain(self):
+ """
+ Blocks until all current tasks finish execution, but leaves the worker threads alive.
+ """
+
+ self._tasks.join() # oddly, the API does not support a timeout parameter
+
+ @property
+ def is_alive(self):
+ """
+ True if any of the worker threads are alive.
+ """
+
+ for worker in self._workers:
+ if worker.is_alive():
+ return True
+ return False
+
+ @property
+ def returns(self):
+ """
+ The returned values from all tasks, in order of submission.
+ """
+
+ return [self._returns[k] for k in sorted(self._returns)]
+
+ @property
+ def exceptions(self):
+ """
+ The raised exceptions from all tasks, in order of submission.
+ """
+
+ return [self._exceptions[k] for k in sorted(self._exceptions)]
+
+ def raise_first(self):
+ """
+ If exceptions were thrown by any task, then the first one will be raised.
+
+ This is rather arbitrary: proper handling would involve iterating all the exceptions.
+ However, if you want to use the "raise" mechanism, you are limited to raising only one of
+ them.
+ """
+
+ exceptions = self.exceptions
+ if exceptions:
+ raise exceptions[0]
+
+ def _thread_worker(self):
+ while True:
+ if not self._execute_next_task():
+ break
+
+ def _execute_next_task(self):
+ try:
+ task = self._tasks.get(timeout=self.timeout)
+ except Empty:
+ # Happens if timeout is reached
+ return True
+ if task == self._CYANIDE:
+ # Time to die :(
+ return False
+ self._execute_task(*task)
+ return True
+
+ def _execute_task(self, task_id, func, args, kwargs):
+ try:
+ result = func(*args, **kwargs)
+ self._returns[task_id] = result
+ except Exception as e:
+ self._exceptions[task_id] = e
+ if self.print_exceptions:
+ with self._lock:
+ print_exception(e)
+ self._tasks.task_done()
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, the_type, value, traceback):
+ self.close()
+ return False
+
+
+class LockedList(list):
+ """
+ A list that supports the ``with`` keyword with a built-in lock.
+
+ Though Python lists are thread-safe in that they will not raise exceptions during concurrent
+ access, they do not guarantee atomicity. This class will let you gain atomicity when needed.
+ """
+
+ def __init__(self, *args, **kwargs):
+ super(LockedList, self).__init__(*args, **kwargs)
+ self.lock = Lock()
+
+ def __enter__(self):
+ return self.lock.__enter__()
+
+ def __exit__(self, the_type, value, traceback):
+ return self.lock.__exit__(the_type, value, traceback)
+
+
+class ExceptionThread(Thread):
+ """
+ A thread from which top level exceptions can be retrieved or re-raised.
+ """
+ def __init__(self, *args, **kwargs):
+ Thread.__init__(self, *args, **kwargs)
+ self.exception = None
+ self.daemon = True
+
+ def run(self):
+ try:
+ super(ExceptionThread, self).run()
+ except BaseException:
+ self.exception = sys.exc_info()
+
+ def is_error(self):
+ return self.exception is not None
+
+ def raise_error_if_exists(self):
+ if self.is_error():
+ type_, value, trace = self.exception
+ raise type_, value, trace
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/utils/type.py b/azure/aria/aria-extension-cloudify/src/aria/aria/utils/type.py
new file mode 100644
index 0000000..fe88a62
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/utils/type.py
@@ -0,0 +1,156 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Type utilities.
+"""
+
+import datetime
+
+from .specification import implements_specification
+
+
+BASE_TYPES_TO_CANONICAL_NAMES = {
+ # TOSCA aliases:
+ None.__class__: 'null',
+ basestring: 'string',
+ int: 'integer',
+ float: 'float',
+ bool: 'boolean',
+ list: 'list',
+ tuple: 'list',
+ dict: 'map',
+ datetime.datetime: 'timestamp'
+}
+
+NAMES_TO_CANONICAL_TYPES = {
+ # Python:
+ 'none': None.__class__,
+ 'basestring': unicode,
+ 'str': unicode,
+ 'unicode': unicode,
+ 'int': int,
+ 'float': float, # also a TOSCA alias
+ 'bool': bool,
+ 'list': list, # also a TOSCA alias
+ 'tuple': list,
+ 'dict': dict,
+ 'datetime': datetime.datetime,
+
+ # YAML 1.2:
+ 'tag:yaml.org,2002:null': None.__class__,
+ 'tag:yaml.org,2002:str': unicode,
+ 'tag:yaml.org,2002:integer': int,
+ 'tag:yaml.org,2002:float': float,
+ 'tag:yaml.org,2002:bool': bool,
+
+ # TOSCA aliases:
+ 'null': None.__class__,
+ 'string': unicode,
+ 'integer': int,
+ 'boolean': bool,
+
+ # TOSCA custom types:
+ 'map': dict,
+ 'timestamp': datetime.datetime
+}
+
+
+def full_type_name(value):
+ """
+ The full class name of a type or instance.
+ """
+
+ if not isinstance(value, type):
+ value = value.__class__
+ module = str(value.__module__)
+ name = str(value.__name__)
+ return name if module == '__builtin__' else '{0}.{1}'.format(module, name)
+
+
+@implements_specification('3.2.1-1', 'tosca-simple-1.0')
+def canonical_type_name(value):
+ """
+ Returns the canonical TOSCA type name of a primitive value, or ``None`` if unknown.
+
+ For a list of TOSCA type names, see the `TOSCA Simple Profile v1.0
+ cos01 specification <http://docs.oasis-open.org/tosca/TOSCA-Simple-Profile-YAML/v1.0/cos01
+ /TOSCA-Simple-Profile-YAML-v1.0-cos01.html#_Toc373867862>`__
+ """
+
+ for the_type, name in BASE_TYPES_TO_CANONICAL_NAMES.iteritems():
+ if isinstance(value, the_type):
+ return name
+ return None
+
+
+@implements_specification('3.2.1-2', 'tosca-simple-1.0')
+def canonical_type(type_name):
+ """
+ Return the canonical type for any Python, YAML, or TOSCA type name or alias, or ``None`` if
+ unsupported.
+
+ :param type_name: Type name (case insensitive)
+ """
+
+ return NAMES_TO_CANONICAL_TYPES.get(type_name.lower())
+
+
+def validate_value_type(value, type_name):
+ """
+ Validate that a value is of a specific type. Supports Python, YAML, and TOSCA type names and
+ aliases.
+
+ :param type_name: type name (case insensitive)
+ :raises ~exceptions.ValueError: on type mismatch
+ """
+
+ the_type = canonical_type(type_name)
+ if the_type is None:
+ raise RuntimeError('Unsupported type name: {0}'.format(type_name))
+
+ # The following Python types do not inherit from the canonical type, but are considered valid
+ if (the_type is unicode) and isinstance(value, str):
+ return
+ if (the_type is list) and isinstance(value, tuple):
+ return
+
+ if not isinstance(value, the_type):
+ raise ValueError('Value {0} is not of type {1}'.format(value, type_name))
+
+
+def convert_value_to_type(str_value, python_type_name):
+ """
+ Converts a value to a specific Python primitive type.
+
+ :param python_type_name: Python primitive type name (case insensitive)
+ :raises ~exceptions.ValueError: for unsupported types or conversion failure
+ """
+
+ python_type_name = python_type_name.lower()
+ try:
+ if python_type_name in ('str', 'unicode'):
+ return str_value.decode('utf-8')
+ elif python_type_name == 'int':
+ return int(str_value)
+ elif python_type_name == 'bool':
+ return bool(str_value)
+ elif python_type_name == 'float':
+ return float(str_value)
+ else:
+ raise ValueError('Unsupported Python type name: {0}'.format(python_type_name))
+ except ValueError:
+ raise ValueError('Failed to to convert {0} to {1}'.format(str_value,
+ python_type_name))
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/utils/uris.py b/azure/aria/aria-extension-cloudify/src/aria/aria/utils/uris.py
new file mode 100644
index 0000000..49881f2
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/utils/uris.py
@@ -0,0 +1,48 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+URI utilities.
+"""
+
+import os
+import urlparse
+
+
+_IS_WINDOWS = (os.name == 'nt')
+
+
+def as_file(uri):
+ """
+ If the URI is a file (either the ``file`` scheme or no scheme), then returns the normalized
+ path. Otherwise, returns ``None``.
+ """
+
+ if _IS_WINDOWS:
+ # We need this extra check in Windows before urlparse because paths might have a drive
+ # prefix, e.g. "C:" which will be considered a scheme for urlparse below
+ path = uri.replace('/', '\\')
+ if os.path.exists(path):
+ return os.path.normpath(path)
+
+ url = urlparse.urlparse(uri)
+ scheme = url.scheme
+ if (not scheme) or (scheme == 'file'):
+ path = url.path
+ if _IS_WINDOWS:
+ path = path.replace('/', '\\')
+ return os.path.normpath(path)
+
+ return None
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/utils/uuid.py b/azure/aria/aria-extension-cloudify/src/aria/aria/utils/uuid.py
new file mode 100644
index 0000000..d6c9ced
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/utils/uuid.py
@@ -0,0 +1,70 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+UUID generation utilities.
+"""
+
+from __future__ import absolute_import # so we can import standard 'uuid'
+
+from random import randrange
+from uuid import uuid4
+
+from shortuuid import ShortUUID
+
+
+# Alphanumeric without visually ambiguous characters; default length is 22
+UUID_BASE57 = ShortUUID()
+
+# Lower-case alphanumeric; default length is 25
+UUID_LOWERCASE_ALPHANUMERIC = ShortUUID(alphabet='abcdefghijklmnopqrstuvwxyz0123456789')
+
+
+def generate_uuid(length=None, variant='base57'):
+ """
+ A random string with varying degrees of guarantee of universal uniqueness.
+
+ :param variant:
+ * ``base57`` (the default) uses a mix of upper and lowercase alphanumerics ensuring no visually
+ ambiguous characters; default length 22
+ * ``alphanumeric`` uses lowercase alphanumeric; default length 25
+ * ``uuid`` uses lowercase hexadecimal in the classic UUID format, including dashes; length is
+ always 36
+ * ``hex`` uses lowercase hexadecimal characters but has no guarantee of uniqueness; default
+ length of 5
+ """
+
+ if variant == 'base57':
+ the_id = UUID_BASE57.uuid()
+ if length is not None:
+ the_id = the_id[:length]
+
+ elif variant == 'alphanumeric':
+ the_id = UUID_LOWERCASE_ALPHANUMERIC.uuid()
+ if length is not None:
+ the_id = the_id[:length]
+
+ elif variant == 'uuid':
+ the_id = str(uuid4())
+
+ elif variant == 'hex':
+ length = length or 5
+ # See: http://stackoverflow.com/a/2782859
+ the_id = ('%0' + str(length) + 'x') % randrange(16 ** length)
+
+ else:
+ raise ValueError('unsupported UUID variant: {0}'.format(variant))
+
+ return the_id
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/utils/validation.py b/azure/aria/aria-extension-cloudify/src/aria/aria/utils/validation.py
new file mode 100644
index 0000000..06989a7
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/utils/validation.py
@@ -0,0 +1,97 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Validation utilities.
+"""
+
+from .formatting import string_list_as_string
+
+
+class ValidatorMixin(object):
+ """
+ A mix0in that should be added to classes that require validating user input.
+ """
+
+ _ARGUMENT_TYPE_MESSAGE = '{name} argument must be {type} based, got {arg!r}'
+ _ARGUMENT_CHOICE_MESSAGE = '{name} argument must be in {choices}, got {arg!r}'
+
+ @classmethod
+ def validate_in_choice(cls, name, argument, choices):
+ """
+ Validate ``argument`` is in ``choices``
+ """
+ if argument not in choices:
+ raise TypeError(cls._ARGUMENT_CHOICE_MESSAGE.format(
+ name=name, choices=choices, arg=argument))
+
+ @classmethod
+ def validate_type(cls, argument_name, argument, expected_type):
+ """
+ Validate ``argument`` is a subclass of ``expected_type``
+ """
+ if not issubclass(argument, expected_type):
+ raise TypeError(cls._ARGUMENT_TYPE_MESSAGE.format(
+ name=argument_name, type=expected_type, arg=argument))
+
+ @classmethod
+ def validate_instance(cls, argument_name, argument, expected_type):
+ """
+ Validate ``argument`` is a instance of ``expected_type``
+ """
+ if not isinstance(argument, expected_type):
+ raise TypeError(cls._ARGUMENT_TYPE_MESSAGE.format(
+ name=argument_name, type=expected_type, arg=argument))
+
+ @classmethod
+ def validate_callable(cls, argument_name, argument):
+ """
+ Validate ``argument`` is callable
+ """
+ if not callable(argument):
+ raise TypeError(cls._ARGUMENT_TYPE_MESSAGE.format(
+ name=argument_name, type='callable', arg=argument))
+
+
+def validate_function_arguments(func, func_kwargs):
+ """
+ Validates all required arguments are supplied to ``func`` and that no additional arguments are
+ supplied.
+ """
+
+ _kwargs_flags = 8
+
+ has_kwargs = func.func_code.co_flags & _kwargs_flags != 0
+ args_count = func.func_code.co_argcount
+
+ # all args without the ones with default values
+ args = func.func_code.co_varnames[:args_count]
+ non_default_args = args[:len(args) - len(func.func_defaults)] if func.func_defaults else args
+
+ # Check if any args without default values is missing in the func_kwargs
+ for arg in non_default_args:
+ if arg not in func_kwargs:
+ raise ValueError(
+ 'The argument "{arg}" is not provided and does not have a default value for '
+ 'function "{func.__name__}"'.format(arg=arg, func=func))
+
+ # check if there are any extra kwargs
+ extra_kwargs = [arg for arg in func_kwargs.keys() if arg not in args]
+
+ # assert that the function has kwargs
+ if extra_kwargs and not has_kwargs:
+ raise ValueError("The following extra kwargs were supplied: {extra_kwargs}".format(
+ extra_kwargs=string_list_as_string(extra_kwargs)
+ ))
diff --git a/azure/aria/aria-extension-cloudify/src/aria/aria/utils/versions.py b/azure/aria/aria-extension-cloudify/src/aria/aria/utils/versions.py
new file mode 100644
index 0000000..521004c
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/aria/utils/versions.py
@@ -0,0 +1,163 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Verion string utilities.
+"""
+
+import re
+
+
+_INF = float('inf')
+
+_NULL = (), _INF
+
+_DIGITS_RE = re.compile(r'^\d+$')
+
+_PREFIXES = {
+ 'dev': 0.0001,
+ 'alpha': 0.001,
+ 'beta': 0.01,
+ 'rc': 0.1
+}
+
+
+class VersionString(unicode):
+ """
+ Version string that can be compared, sorted, made unique in a set, and used as a unique dict
+ key.
+
+ The primary part of the string is one or more dot-separated natural numbers. Trailing zeroes
+ are treated as redundant, e.g. "1.0.0" == "1.0" == "1".
+
+ An optional qualifier can be added after a "-". The qualifier can be a natural number or a
+ specially treated prefixed natural number, e.g. "1.1-beta1" > "1.1-alpha2". The case of the
+ prefix is ignored.
+
+ Numeric qualifiers will always be greater than prefixed integer qualifiers, e.g. "1.1-1" >
+ "1.1-beta1".
+
+ Versions without a qualifier will always be greater than their equivalents with a qualifier,
+ e.g. e.g. "1.1" > "1.1-1".
+
+ Any value that does not conform to this format will be treated as a zero version, which would
+ be lesser than any non-zero version.
+
+ For efficient list sorts use the ``key`` property, e.g.::
+
+ sorted(versions, key=lambda x: x.key)
+ """
+
+ NULL = None # initialized below
+
+ def __init__(self, value=None):
+ if value is not None:
+ super(VersionString, self).__init__(value)
+ self.key = parse_version_string(self)
+
+ def __eq__(self, version):
+ if not isinstance(version, VersionString):
+ version = VersionString(version)
+ return self.key == version.key
+
+ def __lt__(self, version):
+ if not isinstance(version, VersionString):
+ version = VersionString(version)
+ return self.key < version.key
+
+ def __hash__(self):
+ return self.key.__hash__()
+
+
+def parse_version_string(version): # pylint: disable=too-many-branches
+ """
+ Parses a version string.
+
+ :param version: version string
+ :returns: primary tuple and qualifier float
+ :rtype: ((:obj:`int`), :obj:`float`)
+ """
+
+ if version is None:
+ return _NULL
+ version = unicode(version)
+
+ # Split to primary and qualifier on '-'
+ split = version.split('-', 1)
+ if len(split) == 2:
+ primary, qualifier = split
+ else:
+ primary = split[0]
+ qualifier = None
+
+ # Parse primary
+ split = primary.split('.')
+ primary = []
+ for element in split:
+ if _DIGITS_RE.match(element) is None:
+ # Invalid version string
+ return _NULL
+ try:
+ element = int(element)
+ except ValueError:
+ # Invalid version string
+ return _NULL
+ primary.append(element)
+
+ # Remove redundant zeros
+ for element in reversed(primary):
+ if element == 0:
+ primary.pop()
+ else:
+ break
+ primary = tuple(primary)
+
+ # Parse qualifier
+ if qualifier is not None:
+ if _DIGITS_RE.match(qualifier) is not None:
+ # Integer qualifier
+ try:
+ qualifier = float(int(qualifier))
+ except ValueError:
+ # Invalid version string
+ return _NULL
+ else:
+ # Prefixed integer qualifier
+ value = None
+ qualifier = qualifier.lower()
+ for prefix, factor in _PREFIXES.iteritems():
+ if qualifier.startswith(prefix):
+ value = qualifier[len(prefix):]
+ if _DIGITS_RE.match(value) is None:
+ # Invalid version string
+ return _NULL
+ try:
+ value = float(int(value)) * factor
+ except ValueError:
+ # Invalid version string
+ return _NULL
+ break
+ if value is None:
+ # Invalid version string
+ return _NULL
+ qualifier = value
+ else:
+ # Version strings with no qualifiers are higher
+ qualifier = _INF
+
+ return primary, qualifier
+
+
+VersionString.NULL = VersionString()
diff --git a/azure/aria/aria-extension-cloudify/src/aria/docs/.gitignore b/azure/aria/aria-extension-cloudify/src/aria/docs/.gitignore
new file mode 100644
index 0000000..5ccff1a
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/docs/.gitignore
@@ -0,0 +1 @@
+html/
diff --git a/azure/aria/aria-extension-cloudify/src/aria/docs/_static/.gitkeep b/azure/aria/aria-extension-cloudify/src/aria/docs/_static/.gitkeep
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/docs/_static/.gitkeep
diff --git a/azure/aria/aria-extension-cloudify/src/aria/docs/aria.cli.rst b/azure/aria/aria-extension-cloudify/src/aria/docs/aria.cli.rst
new file mode 100644
index 0000000..c325cf0
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/docs/aria.cli.rst
@@ -0,0 +1,100 @@
+..
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements. See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+:mod:`aria.cli`
+===============
+
+.. automodule:: aria.cli
+
+:mod:`aria.cli.color`
+---------------------
+
+.. automodule:: aria.cli.color
+
+:mod:`aria.cli.csar`
+--------------------
+
+.. automodule:: aria.cli.csar
+
+:mod:`aria.cli.defaults`
+------------------------
+
+.. automodule:: aria.cli.defaults
+
+:mod:`aria.cli.exceptions`
+--------------------------
+
+.. automodule:: aria.cli.exceptions
+
+:mod:`aria.cli.execution_logging`
+---------------------------------
+
+.. automodule:: aria.cli.execution_logging
+
+:mod:`aria.cli.helptexts`
+-------------------------
+
+.. automodule:: aria.cli.helptexts
+
+:mod:`aria.cli.inputs`
+----------------------
+
+.. automodule:: aria.cli.inputs
+
+:mod:`aria.cli.logger`
+----------------------
+
+.. automodule:: aria.cli.logger
+
+:mod:`aria.cli.main`
+--------------------
+
+.. automodule:: aria.cli.main
+
+:mod:`aria.cli.service_template_utils`
+--------------------------------------
+
+.. automodule:: aria.cli.service_template_utils
+
+:mod:`aria.cli.table`
+---------------------
+
+.. automodule:: aria.cli.table
+
+:mod:`aria.cli.utils`
+---------------------
+
+.. automodule:: aria.cli.utils
+
+:mod:`aria.cli.config`
+----------------------
+
+.. automodule:: aria.cli.config
+
+:mod:`aria.cli.config.config`
+-----------------------------
+
+.. automodule:: aria.cli.config.config
+
+:mod:`aria.cli.core`
+--------------------
+
+.. automodule:: aria.cli.core
+
+:mod:`aria.cli.core.aria`
+-------------------------
+
+.. automodule:: aria.cli.core.aria
diff --git a/azure/aria/aria-extension-cloudify/src/aria/docs/aria.modeling.models.rst b/azure/aria/aria-extension-cloudify/src/aria/docs/aria.modeling.models.rst
new file mode 100644
index 0000000..6431780
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/docs/aria.modeling.models.rst
@@ -0,0 +1,21 @@
+..
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements. See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+:mod:`aria.modeling.models`
+===========================
+
+.. automodule:: aria.modeling.models
+ :no-show-inheritance:
diff --git a/azure/aria/aria-extension-cloudify/src/aria/docs/aria.modeling.rst b/azure/aria/aria-extension-cloudify/src/aria/docs/aria.modeling.rst
new file mode 100644
index 0000000..b85e22c
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/docs/aria.modeling.rst
@@ -0,0 +1,56 @@
+..
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements. See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+
+:mod:`aria.modeling`
+====================
+
+.. automodule:: aria.modeling
+
+:mod:`aria.modeling.constraints`
+--------------------------------
+
+.. automodule:: aria.modeling.constraints
+
+:mod:`aria.modeling.exceptions`
+-------------------------------
+
+.. automodule:: aria.modeling.exceptions
+
+:mod:`aria.modeling.functions`
+------------------------------
+
+.. automodule:: aria.modeling.functions
+
+:mod:`aria.modeling.mixins`
+---------------------------
+
+.. automodule:: aria.modeling.mixins
+
+:mod:`aria.modeling.relationship`
+---------------------------------
+
+.. automodule:: aria.modeling.relationship
+
+:mod:`aria.modeling.types`
+--------------------------
+
+.. automodule:: aria.modeling.types
+
+:mod:`aria.modeling.utils`
+--------------------------
+
+.. automodule:: aria.modeling.utils
diff --git a/azure/aria/aria-extension-cloudify/src/aria/docs/aria.orchestrator.context.rst b/azure/aria/aria-extension-cloudify/src/aria/docs/aria.orchestrator.context.rst
new file mode 100644
index 0000000..395befc
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/docs/aria.orchestrator.context.rst
@@ -0,0 +1,46 @@
+..
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements. See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+
+:mod:`aria.orchestrator.context`
+================================
+
+.. automodule:: aria.orchestrator.context
+
+:mod:`aria.orchestrator.context.common`
+---------------------------------------
+
+.. automodule:: aria.orchestrator.context.common
+
+:mod:`aria.orchestrator.context.exceptions`
+-------------------------------------------
+
+.. automodule:: aria.orchestrator.context.exceptions
+
+:mod:`aria.orchestrator.context.operation`
+------------------------------------------
+
+.. automodule:: aria.orchestrator.context.operation
+
+:mod:`aria.orchestrator.context.toolbelt`
+-----------------------------------------
+
+.. automodule:: aria.orchestrator.context.toolbelt
+
+:mod:`aria.orchestrator.context.workflow`
+-----------------------------------------
+
+.. automodule:: aria.orchestrator.context.workflow
diff --git a/azure/aria/aria-extension-cloudify/src/aria/docs/aria.orchestrator.execution_plugin.ctx_proxy.rst b/azure/aria/aria-extension-cloudify/src/aria/docs/aria.orchestrator.execution_plugin.ctx_proxy.rst
new file mode 100644
index 0000000..47ed598
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/docs/aria.orchestrator.execution_plugin.ctx_proxy.rst
@@ -0,0 +1,31 @@
+..
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements. See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+
+:mod:`aria.orchestrator.execution_plugin.ctx_proxy`
+===================================================
+
+.. automodule:: aria.orchestrator.execution_plugin.ctx_proxy
+
+:mod:`aria.orchestrator.execution_plugin.ctx_proxy.client`
+----------------------------------------------------------
+
+.. automodule:: aria.orchestrator.execution_plugin.ctx_proxy.client
+
+:mod:`aria.orchestrator.execution_plugin.ctx_proxy.server`
+----------------------------------------------------------
+
+.. automodule:: aria.orchestrator.execution_plugin.ctx_proxy.server
diff --git a/azure/aria/aria-extension-cloudify/src/aria/docs/aria.orchestrator.execution_plugin.rst b/azure/aria/aria-extension-cloudify/src/aria/docs/aria.orchestrator.execution_plugin.rst
new file mode 100644
index 0000000..177a316
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/docs/aria.orchestrator.execution_plugin.rst
@@ -0,0 +1,56 @@
+..
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements. See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+
+:mod:`aria.orchestrator.execution_plugin`
+=========================================
+
+.. automodule:: aria.orchestrator.execution_plugin
+
+:mod:`aria.orchestrator.execution_plugin.common`
+------------------------------------------------
+
+.. automodule:: aria.orchestrator.execution_plugin.common
+
+:mod:`aria.orchestrator.execution_plugin.constants`
+---------------------------------------------------
+
+.. automodule:: aria.orchestrator.execution_plugin.constants
+
+:mod:`aria.orchestrator.execution_plugin.environment_globals`
+-------------------------------------------------------------
+
+.. automodule:: aria.orchestrator.execution_plugin.environment_globals
+
+:mod:`aria.orchestrator.execution_plugin.exceptions`
+----------------------------------------------------
+
+.. automodule:: aria.orchestrator.execution_plugin.exceptions
+
+:mod:`aria.orchestrator.execution_plugin.instantiation`
+-------------------------------------------------------
+
+.. automodule:: aria.orchestrator.execution_plugin.instantiation
+
+:mod:`aria.orchestrator.execution_plugin.local`
+-----------------------------------------------
+
+.. automodule:: aria.orchestrator.execution_plugin.local
+
+:mod:`aria.orchestrator.execution_plugin.operations`
+----------------------------------------------------
+
+.. automodule:: aria.orchestrator.execution_plugin.operations
diff --git a/azure/aria/aria-extension-cloudify/src/aria/docs/aria.orchestrator.execution_plugin.ssh.rst b/azure/aria/aria-extension-cloudify/src/aria/docs/aria.orchestrator.execution_plugin.ssh.rst
new file mode 100644
index 0000000..8bbaa57
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/docs/aria.orchestrator.execution_plugin.ssh.rst
@@ -0,0 +1,31 @@
+..
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements. See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+
+:mod:`aria.orchestrator.execution_plugin.ssh`
+=============================================
+
+.. automodule:: aria.orchestrator.execution_plugin.ssh
+
+:mod:`aria.orchestrator.execution_plugin.ssh.operations`
+--------------------------------------------------------
+
+.. automodule:: aria.orchestrator.execution_plugin.ssh.operations
+
+:mod:`aria.orchestrator.execution_plugin.ssh.tunnel`
+----------------------------------------------------
+
+.. automodule:: aria.orchestrator.execution_plugin.ssh.tunnel
diff --git a/azure/aria/aria-extension-cloudify/src/aria/docs/aria.orchestrator.rst b/azure/aria/aria-extension-cloudify/src/aria/docs/aria.orchestrator.rst
new file mode 100644
index 0000000..33454e6
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/docs/aria.orchestrator.rst
@@ -0,0 +1,46 @@
+..
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements. See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+
+:mod:`aria.orchestrator`
+========================
+
+.. automodule:: aria.orchestrator
+
+:mod:`aria.orchestrator.decorators`
+-----------------------------------
+
+.. automodule:: aria.orchestrator.decorators
+
+:mod:`aria.orchestrator.events`
+-------------------------------
+
+.. automodule:: aria.orchestrator.events
+
+:mod:`aria.orchestrator.exceptions`
+-----------------------------------
+
+.. automodule:: aria.orchestrator.exceptions
+
+:mod:`aria.orchestrator.plugin`
+-------------------------------
+
+.. automodule:: aria.orchestrator.plugin
+
+:mod:`aria.orchestrator.workflow_runner`
+----------------------------------------
+
+.. automodule:: aria.orchestrator.workflow_runner
diff --git a/azure/aria/aria-extension-cloudify/src/aria/docs/aria.orchestrator.workflows.api.rst b/azure/aria/aria-extension-cloudify/src/aria/docs/aria.orchestrator.workflows.api.rst
new file mode 100644
index 0000000..7ecac75
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/docs/aria.orchestrator.workflows.api.rst
@@ -0,0 +1,31 @@
+..
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements. See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+
+:mod:`aria.orchestrator.workflows.api`
+======================================
+
+.. automodule:: aria.orchestrator.workflows.api
+
+:mod:`aria.orchestrator.workflows.api.task_graph`
+-------------------------------------------------
+
+.. automodule:: aria.orchestrator.workflows.api.task_graph
+
+:mod:`aria.orchestrator.workflows.api.task`
+-------------------------------------------
+
+.. automodule:: aria.orchestrator.workflows.api.task
diff --git a/azure/aria/aria-extension-cloudify/src/aria/docs/aria.orchestrator.workflows.builtin.rst b/azure/aria/aria-extension-cloudify/src/aria/docs/aria.orchestrator.workflows.builtin.rst
new file mode 100644
index 0000000..de1a8f9
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/docs/aria.orchestrator.workflows.builtin.rst
@@ -0,0 +1,57 @@
+..
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements. See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+
+
+:mod:`aria.orchestrator.workflows.builtin`
+==========================================
+
+.. automodule:: aria.orchestrator.workflows.builtin
+
+:mod:`aria.orchestrator.workflows.builtin.execute_operation`
+------------------------------------------------------------
+
+.. automodule:: aria.orchestrator.workflows.builtin.execute_operation
+
+:mod:`aria.orchestrator.workflows.builtin.heal`
+-----------------------------------------------
+
+.. automodule:: aria.orchestrator.workflows.builtin.heal
+
+:mod:`aria.orchestrator.workflows.builtin.install`
+--------------------------------------------------
+
+.. automodule:: aria.orchestrator.workflows.builtin.install
+
+:mod:`aria.orchestrator.workflows.builtin.start`
+------------------------------------------------
+
+.. automodule:: aria.orchestrator.workflows.builtin.start
+
+:mod:`aria.orchestrator.workflows.builtin.stop`
+-----------------------------------------------
+
+.. automodule:: aria.orchestrator.workflows.builtin.stop
+
+:mod:`aria.orchestrator.workflows.builtin.uninstall`
+----------------------------------------------------
+
+.. automodule:: aria.orchestrator.workflows.builtin.uninstall
+
+:mod:`aria.orchestrator.workflows.builtin.workflows`
+----------------------------------------------------
+
+.. automodule:: aria.orchestrator.workflows.builtin.workflows
diff --git a/azure/aria/aria-extension-cloudify/src/aria/docs/aria.orchestrator.workflows.executor.rst b/azure/aria/aria-extension-cloudify/src/aria/docs/aria.orchestrator.workflows.executor.rst
new file mode 100644
index 0000000..cde0a77
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/docs/aria.orchestrator.workflows.executor.rst
@@ -0,0 +1,46 @@
+..
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements. See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+
+:mod:`aria.orchestrator.workflows.executor`
+===========================================
+
+.. automodule:: aria.orchestrator.workflows.executor
+
+:mod:`aria.orchestrator.workflows.executor.base`
+------------------------------------------------
+
+.. automodule:: aria.orchestrator.workflows.executor.base
+
+:mod:`aria.orchestrator.workflows.executor.celery`
+--------------------------------------------------
+
+.. automodule:: aria.orchestrator.workflows.executor.celery
+
+:mod:`aria.orchestrator.workflows.executor.dry`
+-----------------------------------------------
+
+.. automodule:: aria.orchestrator.workflows.executor.dry
+
+:mod:`aria.orchestrator.workflows.executor.process`
+---------------------------------------------------
+
+.. automodule:: aria.orchestrator.workflows.executor.process
+
+:mod:`aria.orchestrator.workflows.executor.thread`
+--------------------------------------------------
+
+.. automodule:: aria.orchestrator.workflows.executor.thread
diff --git a/azure/aria/aria-extension-cloudify/src/aria/docs/aria.orchestrator.workflows.rst b/azure/aria/aria-extension-cloudify/src/aria/docs/aria.orchestrator.workflows.rst
new file mode 100644
index 0000000..c0bc1c1
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/docs/aria.orchestrator.workflows.rst
@@ -0,0 +1,51 @@
+..
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements. See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+
+:mod:`aria.orchestrator.workflows`
+==================================
+
+.. automodule:: aria.orchestrator.workflows
+
+:mod:`aria.orchestrator.workflows.events_logging`
+-------------------------------------------------
+
+.. automodule:: aria.orchestrator.workflows.events_logging
+
+:mod:`aria.orchestrator.workflows.exceptions`
+---------------------------------------------
+
+.. automodule:: aria.orchestrator.workflows.exceptions
+
+:mod:`aria.orchestrator.workflows.core`
+---------------------------------------
+
+.. automodule:: aria.orchestrator.workflows.core
+
+:mod:`aria.orchestrator.workflows.core.graph_compiler`
+------------------------------------------------------
+
+.. automodule:: aria.orchestrator.workflows.core.graph_compiler
+
+:mod:`aria.orchestrator.workflows.core.engine`
+----------------------------------------------
+
+.. automodule:: aria.orchestrator.workflows.core.engine
+
+:mod:`aria.orchestrator.workflows.core.events_handler`
+------------------------------------------------------
+
+.. automodule:: aria.orchestrator.workflows.core.events_handler
diff --git a/azure/aria/aria-extension-cloudify/src/aria/docs/aria.parser.consumption.rst b/azure/aria/aria-extension-cloudify/src/aria/docs/aria.parser.consumption.rst
new file mode 100644
index 0000000..3d9fc6e
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/docs/aria.parser.consumption.rst
@@ -0,0 +1,21 @@
+..
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements. See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+
+:mod:`aria.parser.consumption`
+==============================
+
+.. automodule:: aria.parser.consumption
diff --git a/azure/aria/aria-extension-cloudify/src/aria/docs/aria.parser.loading.rst b/azure/aria/aria-extension-cloudify/src/aria/docs/aria.parser.loading.rst
new file mode 100644
index 0000000..0ae7565
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/docs/aria.parser.loading.rst
@@ -0,0 +1,21 @@
+..
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements. See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+
+:mod:`aria.parser.loading`
+==========================
+
+.. automodule:: aria.parser.loading
diff --git a/azure/aria/aria-extension-cloudify/src/aria/docs/aria.parser.modeling.rst b/azure/aria/aria-extension-cloudify/src/aria/docs/aria.parser.modeling.rst
new file mode 100644
index 0000000..16c359c
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/docs/aria.parser.modeling.rst
@@ -0,0 +1,21 @@
+..
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements. See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+
+:mod:`aria.parser.modeling`
+===========================
+
+.. automodule:: aria.parser.modeling
diff --git a/azure/aria/aria-extension-cloudify/src/aria/docs/aria.parser.presentation.rst b/azure/aria/aria-extension-cloudify/src/aria/docs/aria.parser.presentation.rst
new file mode 100644
index 0000000..6c63b2e
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/docs/aria.parser.presentation.rst
@@ -0,0 +1,21 @@
+..
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements. See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+
+:mod:`aria.parser.presentation`
+===============================
+
+.. automodule:: aria.parser.presentation
diff --git a/azure/aria/aria-extension-cloudify/src/aria/docs/aria.parser.reading.rst b/azure/aria/aria-extension-cloudify/src/aria/docs/aria.parser.reading.rst
new file mode 100644
index 0000000..b1d4f6c
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/docs/aria.parser.reading.rst
@@ -0,0 +1,21 @@
+..
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements. See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+
+:mod:`aria.parser.reading`
+==========================
+
+.. automodule:: aria.parser.reading
diff --git a/azure/aria/aria-extension-cloudify/src/aria/docs/aria.parser.rst b/azure/aria/aria-extension-cloudify/src/aria/docs/aria.parser.rst
new file mode 100644
index 0000000..700f03d
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/docs/aria.parser.rst
@@ -0,0 +1,31 @@
+..
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements. See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+
+:mod:`aria.parser`
+==================
+
+.. automodule:: aria.parser
+
+:mod:`aria.parser.exceptions`
+-----------------------------
+
+.. automodule:: aria.parser.exceptions
+
+:mod:`aria.parser.specification`
+--------------------------------
+
+.. automodule:: aria.parser.specification
diff --git a/azure/aria/aria-extension-cloudify/src/aria/docs/aria.parser.validation.rst b/azure/aria/aria-extension-cloudify/src/aria/docs/aria.parser.validation.rst
new file mode 100644
index 0000000..621898b
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/docs/aria.parser.validation.rst
@@ -0,0 +1,21 @@
+..
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements. See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+
+:mod:`aria.parser.validation`
+=============================
+
+.. automodule:: aria.parser.validation
diff --git a/azure/aria/aria-extension-cloudify/src/aria/docs/aria.rst b/azure/aria/aria-extension-cloudify/src/aria/docs/aria.rst
new file mode 100644
index 0000000..1a0dae5
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/docs/aria.rst
@@ -0,0 +1,40 @@
+..
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements. See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+:mod:`aria`
+===========
+
+.. automodule:: aria
+
+:mod:`aria.core`
+----------------
+
+.. automodule:: aria.core
+
+:mod:`aria.exceptions`
+----------------------
+
+.. automodule:: aria.exceptions
+
+:mod:`aria.extension`
+---------------------
+
+.. automodule:: aria.extension
+
+:mod:`aria.logger`
+------------------
+
+.. automodule:: aria.logger
diff --git a/azure/aria/aria-extension-cloudify/src/aria/docs/aria.storage.rst b/azure/aria/aria-extension-cloudify/src/aria/docs/aria.storage.rst
new file mode 100644
index 0000000..7c51c2f
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/docs/aria.storage.rst
@@ -0,0 +1,51 @@
+..
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements. See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+
+:mod:`aria.storage`
+===================
+
+.. automodule:: aria.storage
+
+:mod:`aria.storage.api`
+-----------------------
+
+.. automodule:: aria.storage.api
+
+:mod:`aria.storage.collection_instrumentation`
+----------------------------------------------
+
+.. automodule:: aria.storage.collection_instrumentation
+
+:mod:`aria.storage.core`
+------------------------
+
+.. automodule:: aria.storage.core
+
+:mod:`aria.storage.exceptions`
+------------------------------
+
+.. automodule:: aria.storage.exceptions
+
+:mod:`aria.storage.filesystem_rapi`
+-----------------------------------
+
+.. automodule:: aria.storage.filesystem_rapi
+
+:mod:`aria.storage.sql_mapi`
+----------------------------
+
+.. automodule:: aria.storage.sql_mapi
diff --git a/azure/aria/aria-extension-cloudify/src/aria/docs/aria.utils.rst b/azure/aria/aria-extension-cloudify/src/aria/docs/aria.utils.rst
new file mode 100644
index 0000000..220c0cd
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/docs/aria.utils.rst
@@ -0,0 +1,121 @@
+..
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements. See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+
+:mod:`aria.utils`
+=================
+
+.. automodule:: aria.utils
+
+:mod:`aria.utils.archive`
+-------------------------
+
+.. automodule:: aria.utils.archive
+
+:mod:`aria.utils.argparse`
+--------------------------
+
+.. automodule:: aria.utils.argparse
+
+:mod:`aria.utils.caching`
+-------------------------
+
+.. automodule:: aria.utils.caching
+
+:mod:`aria.utils.collections`
+-----------------------------
+
+.. automodule:: aria.utils.collections
+
+:mod:`aria.utils.console`
+-------------------------
+
+.. automodule:: aria.utils.console
+
+:mod:`aria.utils.exceptions`
+----------------------------
+
+.. automodule:: aria.utils.exceptions
+
+:mod:`aria.utils.file`
+----------------------
+
+.. automodule:: aria.utils.file
+
+:mod:`aria.utils.formatting`
+----------------------------
+
+.. automodule:: aria.utils.formatting
+
+:mod:`aria.utils.http`
+----------------------
+
+.. automodule:: aria.utils.http
+
+:mod:`aria.utils.imports`
+-------------------------
+
+.. automodule:: aria.utils.imports
+
+:mod:`aria.utils.openclose`
+---------------------------
+
+.. automodule:: aria.utils.openclose
+
+:mod:`aria.utils.plugin`
+------------------------
+
+.. automodule:: aria.utils.plugin
+
+:mod:`aria.utils.process`
+-------------------------
+
+.. automodule:: aria.utils.process
+
+:mod:`aria.utils.specification`
+-------------------------------
+
+.. automodule:: aria.utils.specification
+
+:mod:`aria.utils.threading`
+---------------------------
+
+.. automodule:: aria.utils.threading
+
+:mod:`aria.utils.type`
+----------------------
+
+.. automodule:: aria.utils.type
+
+:mod:`aria.utils.uris`
+----------------------
+
+.. automodule:: aria.utils.uris
+
+:mod:`aria.utils.uuid`
+----------------------
+
+.. automodule:: aria.utils.uuid
+
+:mod:`aria.utils.validation`
+----------------------------
+
+.. automodule:: aria.utils.validation
+
+:mod:`aria.utils.versions`
+--------------------------
+
+.. automodule:: aria.utils.versions
diff --git a/azure/aria/aria-extension-cloudify/src/aria/docs/aria_extension_tosca.simple_nfv_v1_0.rst b/azure/aria/aria-extension-cloudify/src/aria/docs/aria_extension_tosca.simple_nfv_v1_0.rst
new file mode 100644
index 0000000..6e7b6cd
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/docs/aria_extension_tosca.simple_nfv_v1_0.rst
@@ -0,0 +1,20 @@
+..
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements. See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+:mod:`aria_extension_tosca.simple_nfv_v1_0`
+===========================================
+
+.. automodule:: aria_extension_tosca.simple_nfv_v1_0
diff --git a/azure/aria/aria-extension-cloudify/src/aria/docs/aria_extension_tosca.simple_v1_0.modeling.rst b/azure/aria/aria-extension-cloudify/src/aria/docs/aria_extension_tosca.simple_v1_0.modeling.rst
new file mode 100644
index 0000000..8bc5499
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/docs/aria_extension_tosca.simple_v1_0.modeling.rst
@@ -0,0 +1,75 @@
+..
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements. See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+:mod:`aria_extension_tosca.simple_v1_0.modeling`
+================================================
+
+.. automodule:: aria_extension_tosca.simple_v1_0.modeling
+
+:mod:`aria_extension_tosca.simple_v1_0.modeling.artifacts`
+----------------------------------------------------------
+
+.. automodule:: aria_extension_tosca.simple_v1_0.modeling.artifacts
+
+:mod:`aria_extension_tosca.simple_v1_0.modeling.capabilities`
+-------------------------------------------------------------
+
+.. automodule:: aria_extension_tosca.simple_v1_0.modeling.capabilities
+
+:mod:`aria_extension_tosca.simple_v1_0.modeling.constraints`
+------------------------------------------------------------
+
+.. automodule:: aria_extension_tosca.simple_v1_0.modeling.constraints
+
+:mod:`aria_extension_tosca.simple_v1_0.modeling.copy`
+-----------------------------------------------------
+
+.. automodule:: aria_extension_tosca.simple_v1_0.modeling.copy
+
+:mod:`aria_extension_tosca.simple_v1_0.modeling.data_types`
+-----------------------------------------------------------
+
+.. automodule:: aria_extension_tosca.simple_v1_0.modeling.data_types
+
+:mod:`aria_extension_tosca.simple_v1_0.modeling.functions`
+----------------------------------------------------------
+
+.. automodule:: aria_extension_tosca.simple_v1_0.modeling.functions
+
+:mod:`aria_extension_tosca.simple_v1_0.modeling.interfaces`
+-----------------------------------------------------------
+
+.. automodule:: aria_extension_tosca.simple_v1_0.modeling.interfaces
+
+:mod:`aria_extension_tosca.simple_v1_0.modeling.parameters`
+-----------------------------------------------------------
+
+.. automodule:: aria_extension_tosca.simple_v1_0.modeling.parameters
+
+:mod:`aria_extension_tosca.simple_v1_0.modeling.policies`
+---------------------------------------------------------
+
+.. automodule:: aria_extension_tosca.simple_v1_0.modeling.policies
+
+:mod:`aria_extension_tosca.simple_v1_0.modeling.requirements`
+-------------------------------------------------------------
+
+.. automodule:: aria_extension_tosca.simple_v1_0.modeling.requirements
+
+:mod:`aria_extension_tosca.simple_v1_0.modeling.substitution_mappings`
+----------------------------------------------------------------------
+
+.. automodule:: aria_extension_tosca.simple_v1_0.modeling.substitution_mappings
diff --git a/azure/aria/aria-extension-cloudify/src/aria/docs/aria_extension_tosca.simple_v1_0.presentation.rst b/azure/aria/aria-extension-cloudify/src/aria/docs/aria_extension_tosca.simple_v1_0.presentation.rst
new file mode 100644
index 0000000..964c029
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/docs/aria_extension_tosca.simple_v1_0.presentation.rst
@@ -0,0 +1,40 @@
+..
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements. See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+:mod:`aria_extension_tosca.simple_v1_0.presentation`
+====================================================
+
+.. automodule:: aria_extension_tosca.simple_v1_0.presentation
+
+:mod:`aria_extension_tosca.simple_v1_0.presentation.extensible`
+---------------------------------------------------------------
+
+.. automodule:: aria_extension_tosca.simple_v1_0.presentation.extensible
+
+:mod:`aria_extension_tosca.simple_v1_0.presentation.field_getters`
+------------------------------------------------------------------
+
+.. automodule:: aria_extension_tosca.simple_v1_0.presentation.field_getters
+
+:mod:`aria_extension_tosca.simple_v1_0.presentation.field_validators`
+---------------------------------------------------------------------
+
+.. automodule:: aria_extension_tosca.simple_v1_0.presentation.field_validators
+
+:mod:`aria_extension_tosca.simple_v1_0.presentation.types`
+----------------------------------------------------------
+
+.. automodule:: aria_extension_tosca.simple_v1_0.presentation.types
diff --git a/azure/aria/aria-extension-cloudify/src/aria/docs/aria_extension_tosca.simple_v1_0.rst b/azure/aria/aria-extension-cloudify/src/aria/docs/aria_extension_tosca.simple_v1_0.rst
new file mode 100644
index 0000000..bdae6ab
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/docs/aria_extension_tosca.simple_v1_0.rst
@@ -0,0 +1,20 @@
+..
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements. See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+:mod:`aria_extension_tosca.simple_v1_0`
+=======================================
+
+.. automodule:: aria_extension_tosca.simple_v1_0
diff --git a/azure/aria/aria-extension-cloudify/src/aria/docs/cli.rst b/azure/aria/aria-extension-cloudify/src/aria/docs/cli.rst
new file mode 100644
index 0000000..ee51545
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/docs/cli.rst
@@ -0,0 +1,57 @@
+..
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements. See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+CLI
+===
+
+.. click:: aria.cli.main:_aria
+ :prog: aria
+
+.. click:: aria.cli.commands.reset:reset
+ :prog: aria reset
+ :show-nested:
+
+.. click:: aria.cli.commands.plugins:plugins
+ :prog: aria plugins
+ :show-nested:
+
+.. click:: aria.cli.commands.service_templates:service_templates
+ :prog: aria service_templates
+ :show-nested:
+
+.. click:: aria.cli.commands.node_templates:node_templates
+ :prog: aria node_templates
+ :show-nested:
+
+.. click:: aria.cli.commands.services:services
+ :prog: aria services
+ :show-nested:
+
+.. click:: aria.cli.commands.nodes:nodes
+ :prog: aria nodes
+ :show-nested:
+
+.. click:: aria.cli.commands.workflows:workflows
+ :prog: aria workflows
+ :show-nested:
+
+.. click:: aria.cli.commands.executions:executions
+ :prog: aria executions
+ :show-nested:
+
+.. click:: aria.cli.commands.logs:logs
+ :prog: aria logs
+ :show-nested:
diff --git a/azure/aria/aria-extension-cloudify/src/aria/docs/conf.py b/azure/aria/aria-extension-cloudify/src/aria/docs/conf.py
new file mode 100644
index 0000000..fd1a066
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/docs/conf.py
@@ -0,0 +1,441 @@
+# -*- coding: utf-8 -*-
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# ARIA TOSCA documentation build configuration file.
+#
+# This file is execfile()d with the current directory set to its
+# containing dir.
+#
+# Note that not all possible configuration values are present in this
+# autogenerated file.
+#
+# All configuration values have a default; values that are commented out
+# serve to show the default.
+
+# If extensions (or modules to document with autodoc) are in another directory,
+# add these directories to sys.path here. If the directory is relative to the
+# documentation root, use os.path.abspath to make it absolute, like shown here.
+#
+import os
+import sys
+
+sys.path.append(os.path.abspath('../aria'))
+sys.path.append(os.path.abspath('../extensions'))
+
+with open('../VERSION') as f:
+ version = f.readline()
+
+# -- General configuration ------------------------------------------------
+
+# If your documentation needs a minimal Sphinx version, state it here.
+#
+# needs_sphinx = '1.0'
+
+# Add any Sphinx extension module names here, as strings. They can be
+# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
+# ones.
+extensions = [
+ 'sphinx.ext.autodoc',
+ 'sphinx.ext.autosummary',
+ 'sphinx.ext.intersphinx',
+ 'sphinx_click.ext'
+]
+
+# Add any paths that contain templates here, relative to this directory.
+templates_path = ['_templates']
+
+# The suffix(es) of source filenames.
+# You can specify multiple suffix as a list of string:
+#
+# source_suffix = ['.rst', '.md']
+source_suffix = '.rst'
+
+# The encoding of source files.
+#
+# source_encoding = 'utf-8-sig'
+
+# The master toctree document.
+master_doc = 'index'
+
+# General information about the project.
+project = u'ARIA TOSCA'
+copyright = u'2016-2017, Apache Software Foundation' # @ReservedAssignment
+author = u'Apache Software Foundation'
+
+# The version info for the project you're documenting, acts as replacement for
+# |version| and |release|, also used in various other places throughout the
+# built documents.
+#
+# The short X.Y version.
+#version = u'0.0'
+# The full version, including alpha/beta/rc tags.
+release = version # @UndefinedVariable
+
+# The language for content autogenerated by Sphinx. Refer to documentation
+# for a list of supported languages.
+#
+# This is also used if you do content translation via gettext catalogs.
+# Usually you set "language" from the command line for these cases.
+language = None
+
+# There are two options for replacing |today|: either, you set today to some
+# non-false value, then it is used:
+#
+# today = ''
+#
+# Else, today_fmt is used as the format for a strftime call.
+#
+# today_fmt = '%B %d, %Y'
+
+# List of patterns, relative to source directory, that match files and
+# directories to ignore when looking for source files.
+# This patterns also effect to html_static_path and html_extra_path
+exclude_patterns = []
+
+# The reST default role (used for this markup: `text`) to use for all
+# documents.
+#
+# default_role = None
+
+# If true, '()' will be appended to :func: etc. cross-reference text.
+#
+# add_function_parentheses = True
+
+# If true, the current module name will be prepended to all description
+# unit titles (such as .. function::).
+#
+# add_module_names = True
+
+# If true, sectionauthor and moduleauthor directives will be shown in the
+# output. They are ignored by default.
+#
+# show_authors = False
+
+# The name of the Pygments (syntax highlighting) style to use.
+pygments_style = 'sphinx'
+
+# A list of ignored prefixes for module index sorting.
+# modindex_common_prefix = []
+
+# If true, keep warnings as "system message" paragraphs in the built documents.
+# keep_warnings = False
+
+# If true, `todo` and `todoList` produce output, else they produce nothing.
+todo_include_todos = False
+
+
+# -- Options for HTML output ----------------------------------------------
+
+# The theme to use for HTML and HTML Help pages. See the documentation for
+# a list of builtin themes.
+#
+html_theme = 'sphinx_rtd_theme'
+
+# Theme options are theme-specific and customize the look and feel of a theme
+# further. For a list of options available for each theme, see the
+# documentation.
+#
+# html_theme_options = {}
+
+# Add any paths that contain custom themes here, relative to this directory.
+#html_theme_path = []
+
+# The name for this set of Sphinx documents.
+# "<project> v<release> documentation" by default.
+#
+# html_title = u'ARIA TOSCA v0.1.0'
+
+# A shorter title for the navigation bar. Default is the same as html_title.
+#
+# html_short_title = None
+
+# The name of an image file (relative to this directory) to place at the top
+# of the sidebar.
+#
+# html_logo = None
+
+# The name of an image file (relative to this directory) to use as a favicon of
+# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
+# pixels large.
+#
+# html_favicon = None
+
+# Add any paths that contain custom static files (such as style sheets) here,
+# relative to this directory. They are copied after the builtin static files,
+# so a file named "default.css" will overwrite the builtin "default.css".
+html_static_path = ['_static']
+
+# Add any extra paths that contain custom files (such as robots.txt or
+# .htaccess) here, relative to this directory. These files are copied
+# directly to the root of the documentation.
+#
+# html_extra_path = []
+
+# If not None, a 'Last updated on:' timestamp is inserted at every page
+# bottom, using the given strftime format.
+# The empty string is equivalent to '%b %d, %Y'.
+#
+# html_last_updated_fmt = None
+
+# If true, SmartyPants will be used to convert quotes and dashes to
+# typographically correct entities.
+#
+# html_use_smartypants = True
+
+# Custom sidebar templates, maps document names to template names.
+#
+# html_sidebars = {}
+
+# Additional templates that should be rendered to pages, maps page names to
+# template names.
+#
+# html_additional_pages = {}
+
+# If false, no module index is generated.
+#
+# html_domain_indices = True
+
+# If false, no index is generated.
+#
+# html_use_index = True
+
+# If true, the index is split into individual pages for each letter.
+#
+# html_split_index = False
+
+# If true, links to the reST sources are added to the pages.
+#
+# html_show_sourcelink = True
+
+# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
+#
+# html_show_sphinx = True
+
+# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
+#
+# html_show_copyright = True
+
+# If true, an OpenSearch description file will be output, and all pages will
+# contain a <link> tag referring to it. The value of this option must be the
+# base URL from which the finished HTML is served.
+#
+# html_use_opensearch = ''
+
+# This is the file name suffix for HTML files (e.g. ".xhtml").
+# html_file_suffix = None
+
+# Language to be used for generating the HTML full-text search index.
+# Sphinx supports the following languages:
+# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
+# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh'
+#
+# html_search_language = 'en'
+
+# A dictionary with options for the search language support, empty by default.
+# 'ja' uses this config value.
+# 'zh' user can custom change `jieba` dictionary path.
+#
+# html_search_options = {'type': 'default'}
+
+# The name of a javascript file (relative to the configuration directory) that
+# implements a search results scorer. If empty, the default will be used.
+#
+# html_search_scorer = 'scorer.js'
+
+# Output file base name for HTML help builder.
+htmlhelp_basename = 'ARIATOSCAdoc'
+
+# -- Options for LaTeX output ---------------------------------------------
+
+latex_elements = {
+ # The paper size ('letterpaper' or 'a4paper').
+ #
+ # 'papersize': 'letterpaper',
+
+ # The font size ('10pt', '11pt' or '12pt').
+ #
+ # 'pointsize': '10pt',
+
+ # Additional stuff for the LaTeX preamble.
+ #
+ # 'preamble': '',
+
+ # Latex figure (float) alignment
+ #
+ # 'figure_align': 'htbp',
+}
+
+# Grouping the document tree into LaTeX files. List of tuples
+# (source start file, target name, title,
+# author, documentclass [howto, manual, or own class]).
+latex_documents = [
+ (master_doc, 'ARIATOSCA.tex', u'ARIA TOSCA',
+ u'Apache Software Foundation', 'manual'),
+]
+
+# The name of an image file (relative to this directory) to place at the top of
+# the title page.
+#
+# latex_logo = None
+
+# For "manual" documents, if this is true, then toplevel headings are parts,
+# not chapters.
+#
+# latex_use_parts = False
+
+# If true, show page references after internal links.
+#
+# latex_show_pagerefs = False
+
+# If true, show URL addresses after external links.
+#
+# latex_show_urls = False
+
+# Documents to append as an appendix to all manuals.
+#
+# latex_appendices = []
+
+# If false, no module index is generated.
+#
+# latex_domain_indices = True
+
+
+# -- Options for manual page output ---------------------------------------
+
+# One entry per manual page. List of tuples
+# (source start file, name, description, authors, manual section).
+man_pages = [
+ (master_doc, 'aria', u'ARIA TOSCA',
+ [author], 1)
+]
+
+# If true, show URL addresses after external links.
+#
+# man_show_urls = False
+
+
+# -- Options for Texinfo output -------------------------------------------
+
+# Grouping the document tree into Texinfo files. List of tuples
+# (source start file, target name, title, author,
+# dir menu entry, description, category)
+texinfo_documents = [
+ (master_doc, 'ARIATOSCA', u'ARIA TOSCA',
+ author, 'ARIA TOSCA', 'an open, light, CLI-driven library of orchestration tools that other '
+ 'open projects can consume to easily build TOSCA-based orchestration solutions.',
+ 'Miscellaneous'),
+]
+
+# Documents to append as an appendix to all manuals.
+#
+# texinfo_appendices = []
+
+# If false, no module index is generated.
+#
+# texinfo_domain_indices = True
+
+# How to display URL addresses: 'footnote', 'no', or 'inline'.
+#
+# texinfo_show_urls = 'footnote'
+
+# If true, do not generate a @detailmenu in the "Top" node's menu.
+#
+# texinfo_no_detailmenu = False
+
+
+# -- Options for InterSphinx
+
+intersphinx_mapping = {
+ 'python': ('https://docs.python.org/2.7', None)
+}
+
+# -- Options for Python domain
+
+# Append __init__ docstring into class docstring
+autoclass_content = 'both'
+
+# Default to everything important
+autodoc_default_flags = [
+ 'members',
+ 'undoc-members',
+ 'show-inheritance'
+]
+
+SKIP_MEMBERS = (
+ 'FIELDS',
+ 'ALLOW_UNKNOWN_FIELDS',
+ 'SHORT_FORM_FIELD',
+ 'INSTRUMENTATION_FIELDS'
+)
+
+SKIP_MEMBER_SUFFIXES = (
+ '_fk',
+)
+
+NEVER_SKIP_MEMBERS = (
+ '__evaluate__',
+)
+
+SKIP_DOCUMENTS = ()
+
+from sphinx import addnodes
+from sphinx.domains.python import PythonDomain
+
+try:
+ import fabric
+except:
+ # Note: "exclude_patterns" is not good enough for us, because we still have a TOC entry.
+ # Unfortunately, there is no way to conditionally exclude a TOC entry, and TOC entries without
+ # matching documents emit an error. So, we will have to manipulate the doctree directly!
+ SKIP_DOCUMENTS = ('aria.orchestrator.execution_plugin.ssh',)
+
+def on_autodoc_skip_member(app, what, name, obj, skip, options):
+ if name in NEVER_SKIP_MEMBERS:
+ return False
+ if name in SKIP_MEMBERS:
+ return True
+ for suffix in SKIP_MEMBER_SUFFIXES:
+ if name.endswith(suffix):
+ return True
+ return skip
+
+def on_source_read(app, docname, source):
+ # Empty out source
+ if docname in SKIP_DOCUMENTS:
+ source[0] = ''
+
+def on_doctree_read(app, doctree):
+ # Remove TOC entry (see: https://gist.github.com/kakawait/9215487)
+ for toctreenode in doctree.traverse(addnodes.toctree):
+ for e in toctreenode['entries']:
+ ref = str(e[1])
+ if ref in SKIP_DOCUMENTS:
+ toctreenode['entries'].remove(e)
+
+class PatchedPythonDomain(PythonDomain):
+ # See: https://github.com/sphinx-doc/sphinx/issues/3866
+ def resolve_xref(self, env, fromdocname, builder, typ, target, node, contnode):
+ if 'refspecific' in node:
+ del node['refspecific']
+ return super(PatchedPythonDomain, self).resolve_xref(
+ env, fromdocname, builder, typ, target, node, contnode)
+
+def setup(app):
+ app.connect('autodoc-skip-member', on_autodoc_skip_member)
+ app.connect('source-read', on_source_read)
+ app.connect('doctree-read', on_doctree_read)
+ app.override_domain(PatchedPythonDomain)
diff --git a/azure/aria/aria-extension-cloudify/src/aria/docs/index.rst b/azure/aria/aria-extension-cloudify/src/aria/docs/index.rst
new file mode 100644
index 0000000..f68769b
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/docs/index.rst
@@ -0,0 +1,86 @@
+..
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements. See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+User Manual for ARIA TOSCA
+==========================
+
+`ARIA TOSCA <http://ariatosca.incubator.apache.org/>`__ is an open, light, CLI-driven library of
+orchestration tools that other open projects can consume to easily build
+`TOSCA <https://www.oasis-open.org/committees/tosca/>`__-based orchestration solutions. ARIA is now
+an incubation project at the Apache Software Foundation.
+
+Interfaces
+----------
+
+.. toctree::
+ :maxdepth: 1
+ :includehidden:
+
+ cli
+ rest
+
+SDK
+---
+
+Core
+####
+
+.. toctree::
+ :maxdepth: 1
+ :includehidden:
+
+ aria
+ aria.cli
+ aria.modeling
+ aria.modeling.models
+ aria.orchestrator
+ aria.orchestrator.context
+ aria.orchestrator.execution_plugin
+ aria.orchestrator.execution_plugin.ctx_proxy
+ aria.orchestrator.execution_plugin.ssh
+ aria.orchestrator.workflows
+ aria.orchestrator.workflows.api
+ aria.orchestrator.workflows.builtin
+ aria.orchestrator.workflows.executor
+ aria.parser
+ aria.parser.consumption
+ aria.parser.loading
+ aria.parser.modeling
+ aria.parser.presentation
+ aria.parser.reading
+ aria.parser.validation
+ aria.storage
+ aria.utils
+
+Extensions
+##########
+
+.. toctree::
+ :maxdepth: 1
+ :includehidden:
+
+ aria_extension_tosca.simple_v1_0
+ aria_extension_tosca.simple_v1_0.modeling
+ aria_extension_tosca.simple_v1_0.presentation
+ aria_extension_tosca.simple_nfv_v1_0
+
+
+Indices and Tables
+------------------
+
+* :ref:`genindex`
+* :ref:`modindex`
+* :ref:`search`
diff --git a/azure/aria/aria-extension-cloudify/src/aria/docs/requirements.txt b/azure/aria/aria-extension-cloudify/src/aria/docs/requirements.txt
new file mode 100644
index 0000000..a49bb26
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/docs/requirements.txt
@@ -0,0 +1,15 @@
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+Sphinx>=1.6.2, <2.0.0
+sphinx_rtd_theme>=0.2.4, <2.0.0
+sphinx-click>=1.0.2, <1.1.0
diff --git a/azure/aria/aria-extension-cloudify/src/aria/docs/rest.rst b/azure/aria/aria-extension-cloudify/src/aria/docs/rest.rst
new file mode 100644
index 0000000..185837e
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/docs/rest.rst
@@ -0,0 +1,20 @@
+..
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements. See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+REST
+====
+
+TODO
diff --git a/azure/aria/aria-extension-cloudify/src/aria/examples/clearwater/clearwater-live-test-existing.yaml b/azure/aria/aria-extension-cloudify/src/aria/examples/clearwater/clearwater-live-test-existing.yaml
new file mode 100644
index 0000000..0e6a11c
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/examples/clearwater/clearwater-live-test-existing.yaml
@@ -0,0 +1,54 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+tosca_definitions_version: tosca_simple_yaml_1_0
+
+description: >-
+ Project Clearwater is an open-source IMS core, developed by Metaswitch Networks and released under
+ the GNU GPLv3.
+
+metadata:
+ template_name: clearwater-live-test-existing
+ template_author: ARIA
+ template_version: '1.0'
+ aria_version: '0.2.0'
+
+imports:
+ - types/clearwater.yaml
+ - aria-1.0
+
+topology_template:
+
+ inputs:
+ hosts.ssh.user:
+ type: string
+ hosts.ssh.password:
+ type: string
+ existing_host.public_address:
+ type: string
+
+ node_templates:
+ live_test:
+ type: clearwater.LiveTest
+
+ existing_host:
+ type: clearwater.HostBase
+ attributes:
+ public_address: { get_input: existing_host.public_address }
+ capabilities:
+ host:
+ properties:
+ ssh.user: { get_input: hosts.ssh.user }
+ ssh.password: { get_input: hosts.ssh.password }
diff --git a/azure/aria/aria-extension-cloudify/src/aria/examples/clearwater/clearwater-single-existing.yaml b/azure/aria/aria-extension-cloudify/src/aria/examples/clearwater/clearwater-single-existing.yaml
new file mode 100644
index 0000000..72b882a
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/examples/clearwater/clearwater-single-existing.yaml
@@ -0,0 +1,147 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+tosca_definitions_version: tosca_simple_yaml_1_0
+
+description: >-
+ Project Clearwater is an open-source IMS core, developed by Metaswitch Networks and released under
+ the GNU GPLv3.
+
+metadata:
+ template_name: clearwater-single-existing
+ template_author: ARIA
+ template_version: '1.0'
+ aria_version: '0.2.0'
+
+imports:
+ - types/clearwater.yaml
+ - aria-1.0
+
+topology_template:
+
+ inputs:
+ hosts.ssh.user:
+ description: >-
+ Existing SSH user.
+ type: string
+ hosts.ssh.password:
+ description: >-
+ Existing SSH password.
+ type: string
+ existing_host.public_address:
+ description: >-
+ Existing IP address that can be accessed by ARIA.
+ type: string
+ existing_host.private_address:
+ description: >-
+ Existing IP address that can be accessed within the service.
+ type: string
+ default: { get_input: existing_host.public_address }
+ existing_host.hostname:
+ description: >-
+ The hostname will be changed to this.
+ type: string
+ default: aria-clearwater-single
+
+ node_templates:
+ bono:
+ type: clearwater.Bono
+ requirements:
+ - sip_downstream: clearwater.Sprout
+ - sip_secure_downstream: clearwater.Sprout
+ - ralf: clearwater.Ralf
+
+ sprout:
+ type: clearwater.Sprout
+ requirements:
+ - ralf: clearwater.Ralf
+# cyclical: see ARIA-327
+# - sip_upstream: clearwater.Bono
+
+ dime:
+ type: clearwater.Dime
+
+ homestead:
+ type: clearwater.Homestead
+
+ ralf:
+ type: clearwater.Ralf
+ description: >-
+ Optional, only required if you are using a CCF (Charging Collection Function).
+
+ homer:
+ type: clearwater.Homer
+
+ vellum:
+ type: clearwater.Vellum
+# requirements:
+# cyclical: see ARIA-327
+# - ralf: clearwater.Ralf
+
+ i-cscf:
+ type: clearwater.I-CSCF
+
+ s-cscf:
+ type: clearwater.S-CSCF
+
+ ellis:
+ type: clearwater.Ellis
+ description: >-
+ Optional, only required if you want a web frontend.
+ properties:
+ provision_numbers_count: 1000
+ requirements:
+ - ralf: clearwater.Ralf
+
+ existing_host:
+ type: clearwater.Host
+ attributes:
+ public_address: { get_input: existing_host.public_address }
+ private_address: { get_input: existing_host.private_address }
+ capabilities:
+ host:
+ properties:
+ hostname: { get_input: existing_host.hostname }
+ ssh.user: { get_input: hosts.ssh.user }
+ ssh.password: { get_input: hosts.ssh.password }
+ max_log_directory_size: 50 MiB
+ reduce_cassandra_mem_usage: true
+
+ smtp:
+ type: smtp.SMTP
+ properties:
+ address: 127.0.0.1
+ capabilities:
+ smtp:
+ properties:
+ username: username
+ password: password
+
+ policies:
+ configuration:
+ type: clearwater.Configuration
+ properties:
+ zone: example.com
+ secret: secret
+
+ substitution_mappings:
+ node_type: ims.nodes.IMS
+ capabilities:
+ p-cscf: [ bono, p-cscf ]
+ i-cscf: [ i-cscf, i-cscf ]
+ s-cscf: [ s-cscf, s-cscf ]
+ hss: [ homestead, hss ]
+ ctf: [ ralf, ctf ]
+ xdms: [ homer, xdms ]
diff --git a/azure/aria/aria-extension-cloudify/src/aria/examples/clearwater/scripts/bono/create.sh b/azure/aria/aria-extension-cloudify/src/aria/examples/clearwater/scripts/bono/create.sh
new file mode 100644
index 0000000..b2a3a68
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/examples/clearwater/scripts/bono/create.sh
@@ -0,0 +1,20 @@
+#!/bin/bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -e
+
+yes | aptdcon --hide-terminal --install bono
+yes | aptdcon --hide-terminal --install restund
diff --git a/azure/aria/aria-extension-cloudify/src/aria/examples/clearwater/scripts/bono/delete.sh b/azure/aria/aria-extension-cloudify/src/aria/examples/clearwater/scripts/bono/delete.sh
new file mode 100644
index 0000000..73485c3
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/examples/clearwater/scripts/bono/delete.sh
@@ -0,0 +1,17 @@
+#!/bin/bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+TODO
diff --git a/azure/aria/aria-extension-cloudify/src/aria/examples/clearwater/scripts/dime/create.sh b/azure/aria/aria-extension-cloudify/src/aria/examples/clearwater/scripts/dime/create.sh
new file mode 100644
index 0000000..9b25876
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/examples/clearwater/scripts/dime/create.sh
@@ -0,0 +1,21 @@
+#!/bin/bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -e
+
+# Installs Homestead and Ralf
+yes | aptdcon --hide-terminal --install dime
+yes | aptdcon --hide-terminal --install clearwater-prov-tools
diff --git a/azure/aria/aria-extension-cloudify/src/aria/examples/clearwater/scripts/dime/delete.sh b/azure/aria/aria-extension-cloudify/src/aria/examples/clearwater/scripts/dime/delete.sh
new file mode 100644
index 0000000..73485c3
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/examples/clearwater/scripts/dime/delete.sh
@@ -0,0 +1,17 @@
+#!/bin/bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+TODO
diff --git a/azure/aria/aria-extension-cloudify/src/aria/examples/clearwater/scripts/ellis/configure.sh b/azure/aria/aria-extension-cloudify/src/aria/examples/clearwater/scripts/ellis/configure.sh
new file mode 100644
index 0000000..b52cc08
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/examples/clearwater/scripts/ellis/configure.sh
@@ -0,0 +1,29 @@
+#!/bin/bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -e
+
+PROVISION_NUMBERS_START=$(ctx node properties provision_numbers_start)
+PROVISION_NUMBERS_COUNT=$(ctx node properties provision_numbers_count)
+
+if [ "$PROVISION_NUMBERS_COUNT" != 0 ]; then
+ cd /usr/share/clearwater/ellis
+ . env/bin/activate
+ python src/metaswitch/ellis/tools/create_numbers.py \
+ --start "$PROVISION_NUMBERS_START" \
+ --count "$PROVISION_NUMBERS_COUNT"
+ deactivate
+fi
diff --git a/azure/aria/aria-extension-cloudify/src/aria/examples/clearwater/scripts/ellis/create.sh b/azure/aria/aria-extension-cloudify/src/aria/examples/clearwater/scripts/ellis/create.sh
new file mode 100644
index 0000000..bdd9341
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/examples/clearwater/scripts/ellis/create.sh
@@ -0,0 +1,19 @@
+#!/bin/bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -e
+
+yes | aptdcon --hide-terminal --install ellis
diff --git a/azure/aria/aria-extension-cloudify/src/aria/examples/clearwater/scripts/ellis/delete.sh b/azure/aria/aria-extension-cloudify/src/aria/examples/clearwater/scripts/ellis/delete.sh
new file mode 100644
index 0000000..73485c3
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/examples/clearwater/scripts/ellis/delete.sh
@@ -0,0 +1,17 @@
+#!/bin/bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+TODO
diff --git a/azure/aria/aria-extension-cloudify/src/aria/examples/clearwater/scripts/homer/create.sh b/azure/aria/aria-extension-cloudify/src/aria/examples/clearwater/scripts/homer/create.sh
new file mode 100644
index 0000000..5f40960
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/examples/clearwater/scripts/homer/create.sh
@@ -0,0 +1,27 @@
+#!/bin/bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -e
+
+yes | aptdcon --hide-terminal --install homer
+
+# ARIA fix to avoid warnings by Twisted for missing service_identity library
+# (Crest is used by both Homer and Homestead-prov)
+cd /usr/share/clearwater/crest
+. env/bin/activate
+pip install service_identity
+deactivate
+service homer restart
diff --git a/azure/aria/aria-extension-cloudify/src/aria/examples/clearwater/scripts/homer/delete.sh b/azure/aria/aria-extension-cloudify/src/aria/examples/clearwater/scripts/homer/delete.sh
new file mode 100644
index 0000000..73485c3
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/examples/clearwater/scripts/homer/delete.sh
@@ -0,0 +1,17 @@
+#!/bin/bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+TODO
diff --git a/azure/aria/aria-extension-cloudify/src/aria/examples/clearwater/scripts/homestead/create.sh b/azure/aria/aria-extension-cloudify/src/aria/examples/clearwater/scripts/homestead/create.sh
new file mode 100644
index 0000000..d280033
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/examples/clearwater/scripts/homestead/create.sh
@@ -0,0 +1,25 @@
+#!/bin/bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -e
+
+# ARIA fix to avoid warnings by Twisted for missing service_identity library
+# (Crest is used by both Homer and Homestead-prov)
+cd /usr/share/clearwater/crest
+. env/bin/activate
+pip install service_identity
+deactivate
+service homer restart
diff --git a/azure/aria/aria-extension-cloudify/src/aria/examples/clearwater/scripts/homestead/delete.sh b/azure/aria/aria-extension-cloudify/src/aria/examples/clearwater/scripts/homestead/delete.sh
new file mode 100644
index 0000000..73485c3
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/examples/clearwater/scripts/homestead/delete.sh
@@ -0,0 +1,17 @@
+#!/bin/bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+TODO
diff --git a/azure/aria/aria-extension-cloudify/src/aria/examples/clearwater/scripts/host-base/configure.sh b/azure/aria/aria-extension-cloudify/src/aria/examples/clearwater/scripts/host-base/configure.sh
new file mode 100644
index 0000000..c5b87d9
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/examples/clearwater/scripts/host-base/configure.sh
@@ -0,0 +1,23 @@
+#!/bin/bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -e
+
+if ! type aptdcon > /dev/null; then
+ # This will allow us to do concurrent installs
+ apt update
+ apt install aptdaemon --yes
+fi
diff --git a/azure/aria/aria-extension-cloudify/src/aria/examples/clearwater/scripts/host/configure.sh b/azure/aria/aria-extension-cloudify/src/aria/examples/clearwater/scripts/host/configure.sh
new file mode 100644
index 0000000..61cb835
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/examples/clearwater/scripts/host/configure.sh
@@ -0,0 +1,183 @@
+#!/bin/bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -e
+
+HOSTNAME=$(ctx node capabilities host properties hostname)
+
+# Change hostname
+OLD_HOSTNAME=$(hostname)
+if [ "$OLD_HOSTNAME" != "$HOSTNAME" ]; then
+ hostname "$HOSTNAME"
+ echo "$HOSTNAME" > /etc/hostname
+ sed --in-place --expression "s/127.0.1.1\s\+$OLD_HOSTNAME/127.0.1.1 $HOSTNAME/" /etc/hosts
+fi
+
+ZONE=$(ctx service get_policy_by_type [ clearwater.Configuration ] properties zone)
+GEOGRAPHICALLY_REDUNDANT=$(ctx service get_policy_by_type [ clearwater.Configuration ] properties geographically_redundant)
+SITE_NAME=$(ctx service get_policy_by_type [ clearwater.Configuration ] properties site_name)
+SECRET=$(ctx service get_policy_by_type [ clearwater.Configuration ] properties secret)
+
+SMTP_HOSTNAME=$(ctx service get_node_by_type [ clearwater.Ellis ] get_outbound_relationship_by_name [ smtp ] target_node properties address)
+SMTP_USERNAME=$(ctx service get_node_by_type [ clearwater.Ellis ] get_outbound_relationship_by_name [ smtp ] target_node capabilities smtp properties username)
+SMTP_PASSWORD=$(ctx service get_node_by_type [ clearwater.Ellis ] get_outbound_relationship_by_name [ smtp ] target_node capabilities smtp properties password)
+
+MAX_LOG_DIRECTORY_SIZE=$(ctx node capabilities host properties max_log_directory_size value)
+REDUCE_CASSANDRA_MEM_USAGE=$(ctx node capabilities host properties reduce_cassandra_mem_usage)
+
+PRIVATE_IP=$(ctx node attributes private_address)
+PUBLIC_IP=$(ctx node attributes public_address)
+PUBLIC_HOSTNAME=$(hostname)
+# TODO: comma-separated list of all private IP addresses in group
+ETCD_CLUSTER=$PRIVATE_IP
+
+REPO_FILE=/etc/apt/sources.list.d/clearwater.list
+REPO_LINE='deb http://repo.cw-ngv.com/stable binary/'
+KEY_URL=http://repo.cw-ngv.com/repo_key
+
+
+#
+# Repository
+#
+
+if [ ! -f "$REPO_FILE" ]; then
+ echo "$REPO_LINE" > "$REPO_FILE"
+ curl --location "$KEY_URL" | apt-key add -
+fi
+
+apt update
+
+if ! type aptdcon > /dev/null; then
+ # This will allow us to do concurrent installs
+ apt install aptdaemon --yes
+fi
+
+yes | aptdcon --hide-terminal --install clearwater-management
+
+
+#
+# DNS
+#
+
+S_CSCF_HOST="$PRIVATE_IP scscf.$PUBLIC_HOSTNAME # ARIA"
+grep --quiet --fixed-strings "$S_CSCF_HOST" /etc/hosts || echo "$S_CSCF_HOST" >> /etc/hosts
+
+
+#
+# Local configuration
+#
+
+mkdir --parents /etc/clearwater
+CONFIG_FILE=/etc/clearwater/local_config
+echo "# Created by ARIA on $(date -u)" > "$CONFIG_FILE"
+
+echo >> "$CONFIG_FILE"
+echo "# Local IP configuration" >> "$CONFIG_FILE"
+echo "local_ip=$PRIVATE_IP" >> "$CONFIG_FILE"
+echo "public_ip=$PUBLIC_IP" >> "$CONFIG_FILE"
+echo "public_hostname=$PUBLIC_HOSTNAME" >> "$CONFIG_FILE"
+echo "etcd_cluster=$ETCD_CLUSTER" >> "$CONFIG_FILE"
+
+if [ "$MAX_LOG_DIRECTORY_SIZE" != 0 ]; then
+ echo >> "$CONFIG_FILE"
+ echo "max_log_directory_size=$MAX_LOG_DIRECTORY_SIZE" >> "$CONFIG_FILE"
+fi
+
+if [ "$GEOGRAPHICALLY_REDUNDANT" = True ]; then
+ echo >> "$CONFIG_FILE"
+ echo "# Geographically redundant" >> "$CONFIG_FILE"
+ echo "local_site_name=$SITE_NAME" >> "$CONFIG_FILE"
+
+ # On the first Vellum node in the second site, you should set remote_cassandra_seeds to the
+ # IP address of a Vellum node in the first site.
+ #echo "remote_cassandra_seeds=" >> "$CONFIG_FILE"
+fi
+
+
+#
+# Shared configuration
+#
+
+if [ "$GEOGRAPHICALLY_REDUNDANT" = True ]; then
+ SPROUT_HOSTNAME=sprout.$SITE_NAME.$ZONE
+ SPROUT_REGISTRATION_STORE=vellum.$SITE_NAME.$ZONE
+ HS_HOSTNAME=hs.$SITE_NAME.$ZONE:8888
+ HS_PROVISIONING_HOSTNAME=hs.$SITE_NAME.$ZONE:8889
+ RALF_HOSTNAME=ralf.$SITE_NAME.$ZONE:10888
+ RALF_SESSION_STORE=vellum.$ZONE
+ XDMS_HOSTNAME=homer.$SITE_NAME.$ZONE:7888
+ CHRONOS_HOSTNAME=vellum.$SITE_NAME.$ZONE
+ CASSANDRA_HOSTNAME=vellum.$SITE_NAME.$ZONE
+else
+ VELLUM_IP=$PRIVATE_IP
+ HOMESTEAD_IP=$PRIVATE_IP
+ HOMER_IP=$PRIVATE_IP
+
+ SPROUT_HOSTNAME=$PUBLIC_HOSTNAME
+ SPROUT_REGISTRATION_STORE=$VELLUM_IP
+ HS_HOSTNAME=$HOMESTEAD_IP:8888
+ HS_PROVISIONING_HOSTNAME=$HOMESTEAD_IP:8889
+ RALF_HOSTNAME=
+ RALF_SESSION_STORE=
+ XDMS_HOSTNAME=$HOMER_IP:7888
+ CHRONOS_HOSTNAME=
+ CASSANDRA_HOSTNAME=
+fi
+
+mkdir --parents /etc/clearwater
+CONFIG_FILE=/etc/clearwater/shared_config
+echo "# Created by ARIA on $(date -u)" > "$CONFIG_FILE"
+
+echo >> "$CONFIG_FILE"
+echo "# Deployment definitions" >> "$CONFIG_FILE"
+echo "home_domain=$ZONE" >> "$CONFIG_FILE"
+echo "sprout_hostname=$SPROUT_HOSTNAME" >> "$CONFIG_FILE"
+echo "sprout_registration_store=$SPROUT_REGISTRATION_STORE" >> "$CONFIG_FILE"
+echo "hs_hostname=$HS_HOSTNAME" >> "$CONFIG_FILE"
+echo "hs_provisioning_hostname=$HS_PROVISIONING_HOSTNAME" >> "$CONFIG_FILE"
+echo "ralf_hostname=$RALF_HOSTNAME" >> "$CONFIG_FILE"
+echo "ralf_session_store=$RALF_SESSION_STORE" >> "$CONFIG_FILE"
+echo "xdms_hostname=$XDMS_HOSTNAME" >> "$CONFIG_FILE"
+echo "chronos_hostname=$CHRONOS_HOSTNAME" >> "$CONFIG_FILE"
+echo "cassandra_hostname=$CASSANDRA_HOSTNAME" >> "$CONFIG_FILE"
+
+echo >> "$CONFIG_FILE"
+echo "# Email server configuration" >> "$CONFIG_FILE"
+echo "smtp_smarthost=$SMTP_HOSTNAME" >> "$CONFIG_FILE"
+echo "smtp_username=$SMTP_USERNAME" >> "$CONFIG_FILE"
+echo "smtp_password=$SMTP_PASSWORD" >> "$CONFIG_FILE"
+echo "email_recovery_sender=clearwater@$ZONE" >> "$CONFIG_FILE"
+
+echo >> "$CONFIG_FILE"
+echo "# I-CSCF/S-CSCF configuration (used by Bono to proxy to Sprout)" >> "$CONFIG_FILE"
+echo "upstream_hostname=scscf.$HOSTNAME" >> "$CONFIG_FILE"
+
+echo >> "$CONFIG_FILE"
+echo "# Keys" >> "$CONFIG_FILE"
+echo "signup_key=$SECRET" >> "$CONFIG_FILE"
+echo "turn_workaround=$SECRET" >> "$CONFIG_FILE"
+echo "ellis_api_key=$SECRET" >> "$CONFIG_FILE"
+echo "ellis_cookie_key=$SECRET" >> "$CONFIG_FILE"
+
+if [ "$REDUCE_CASSANDRA_MEM_USAGE" = True ]; then
+ echo >> "$CONFIG_FILE"
+ echo "# $REDUCE_CASSANDRA_MEM_USAGE" >> "$CONFIG_FILE"
+ echo "reduce_cassandra_mem_usage=Y" >> "$CONFIG_FILE"
+fi
+
+# Copy to other hosts in etcd group
+#yes | aptdcon --hide-terminal --install clearwater-config-manager
+#cw-upload_shared_config
diff --git a/azure/aria/aria-extension-cloudify/src/aria/examples/clearwater/scripts/live-test/create.sh b/azure/aria/aria-extension-cloudify/src/aria/examples/clearwater/scripts/live-test/create.sh
new file mode 100644
index 0000000..f28bff3
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/examples/clearwater/scripts/live-test/create.sh
@@ -0,0 +1,69 @@
+#!/bin/bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -e
+
+LIB=/opt/clearwater-live-test
+COMMAND=/usr/bin/clearwater-live-test
+RUBY_VERSION=1.9.3
+RVM=/usr/local/rvm
+QUAFF_OLD_URL=git@github.com:metaswitch/quaff.git
+QUAFF_NEW_URL=https://github.com/Metaswitch/quaff.git
+
+# Build requirements
+yes | aptdcon --hide-terminal --install build-essential
+yes | aptdcon --hide-terminal --install bundler
+yes | aptdcon --hide-terminal --install git
+
+# Required by nokogiri Ruby gem
+yes | aptdcon --hide-terminal --install zlib1g-dev
+
+# Install Ruby enVironment Manager
+if [ ! -d "$RVM" ]; then
+ # Install
+ curl --location https://get.rvm.io | bash -s stable
+fi
+
+# Install Ruby using RVM
+. "$RVM/scripts/rvm"
+rvm autolibs enable
+rvm install "$RUBY_VERSION"
+rvm use "$RUBY_VERSION@global"
+
+# Install Clearwater Live Test
+if [ ! -d "$LIB" ]; then
+ mkdir --parents /opt
+ cd /opt
+ git clone --depth 1 https://github.com/Metaswitch/clearwater-live-test.git
+ cd clearwater-live-test
+ chmod a+rw -R .
+
+ # Note: we must fix the URLs for Quaff
+ sed --in-place --expression "s,$QUAFF_OLD_URL,$QUAFF_NEW_URL,g" Gemfile Gemfile.lock
+
+ # Install required Ruby gems
+ bundle install
+fi
+
+# Create command
+echo "#!/bin/bash" > "$COMMAND"
+echo ". \"$RVM/scripts/rvm\"" >> "$COMMAND"
+echo "rvm use \"$RUBY_VERSION@global\"" >> "$COMMAND"
+echo "cd \"$LIB\"" >> "$COMMAND"
+echo "rake \"\$@\"" >> "$COMMAND"
+chmod a+x "$COMMAND"
+
+# clearwater-live-test test[example.com] SIGNUP_CODE=secret PROXY=192.168.1.171 ELLIS=192.168.1.171
diff --git a/azure/aria/aria-extension-cloudify/src/aria/examples/clearwater/scripts/live-test/delete.sh b/azure/aria/aria-extension-cloudify/src/aria/examples/clearwater/scripts/live-test/delete.sh
new file mode 100644
index 0000000..079627c
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/examples/clearwater/scripts/live-test/delete.sh
@@ -0,0 +1,23 @@
+#!/bin/bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -e
+
+LIB=/opt/clearwater-live-test
+COMMAND=/usr/bin/clearwater-live-test
+
+rm --recursive --force "$LIB"
+rm --force "$COMMAND"
diff --git a/azure/aria/aria-extension-cloudify/src/aria/examples/clearwater/scripts/memento/create.sh b/azure/aria/aria-extension-cloudify/src/aria/examples/clearwater/scripts/memento/create.sh
new file mode 100644
index 0000000..91ffd9f
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/examples/clearwater/scripts/memento/create.sh
@@ -0,0 +1,20 @@
+#!/bin/bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -e
+
+yes | aptdcon --hide-terminal --install memento-as
+yes | aptdcon --hide-terminal --install memento-nginx
diff --git a/azure/aria/aria-extension-cloudify/src/aria/examples/clearwater/scripts/memento/delete.sh b/azure/aria/aria-extension-cloudify/src/aria/examples/clearwater/scripts/memento/delete.sh
new file mode 100644
index 0000000..73485c3
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/examples/clearwater/scripts/memento/delete.sh
@@ -0,0 +1,17 @@
+#!/bin/bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+TODO
diff --git a/azure/aria/aria-extension-cloudify/src/aria/examples/clearwater/scripts/ralf/create.sh b/azure/aria/aria-extension-cloudify/src/aria/examples/clearwater/scripts/ralf/create.sh
new file mode 100644
index 0000000..5cae7ef
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/examples/clearwater/scripts/ralf/create.sh
@@ -0,0 +1,15 @@
+#!/bin/bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/azure/aria/aria-extension-cloudify/src/aria/examples/clearwater/scripts/ralf/delete.sh b/azure/aria/aria-extension-cloudify/src/aria/examples/clearwater/scripts/ralf/delete.sh
new file mode 100644
index 0000000..73485c3
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/examples/clearwater/scripts/ralf/delete.sh
@@ -0,0 +1,17 @@
+#!/bin/bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+TODO
diff --git a/azure/aria/aria-extension-cloudify/src/aria/examples/clearwater/scripts/sprout/create.sh b/azure/aria/aria-extension-cloudify/src/aria/examples/clearwater/scripts/sprout/create.sh
new file mode 100644
index 0000000..a9946b9
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/examples/clearwater/scripts/sprout/create.sh
@@ -0,0 +1,19 @@
+#!/bin/bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -e
+
+yes | aptdcon --hide-terminal --install sprout
diff --git a/azure/aria/aria-extension-cloudify/src/aria/examples/clearwater/scripts/sprout/delete.sh b/azure/aria/aria-extension-cloudify/src/aria/examples/clearwater/scripts/sprout/delete.sh
new file mode 100644
index 0000000..73485c3
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/examples/clearwater/scripts/sprout/delete.sh
@@ -0,0 +1,17 @@
+#!/bin/bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+TODO
diff --git a/azure/aria/aria-extension-cloudify/src/aria/examples/clearwater/scripts/vellum/create.sh b/azure/aria/aria-extension-cloudify/src/aria/examples/clearwater/scripts/vellum/create.sh
new file mode 100644
index 0000000..1d7b2db
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/examples/clearwater/scripts/vellum/create.sh
@@ -0,0 +1,23 @@
+#!/bin/bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -e
+
+yes | aptdcon --hide-terminal --install vellum
+
+# Memento
+# TODO: see if there is a Memento node
+#yes | aptdcon --hide-terminal --install memento-cassandra
diff --git a/azure/aria/aria-extension-cloudify/src/aria/examples/clearwater/scripts/vellum/delete.sh b/azure/aria/aria-extension-cloudify/src/aria/examples/clearwater/scripts/vellum/delete.sh
new file mode 100644
index 0000000..73485c3
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/examples/clearwater/scripts/vellum/delete.sh
@@ -0,0 +1,17 @@
+#!/bin/bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+TODO
diff --git a/azure/aria/aria-extension-cloudify/src/aria/examples/clearwater/types/cassandra.yaml b/azure/aria/aria-extension-cloudify/src/aria/examples/clearwater/types/cassandra.yaml
new file mode 100644
index 0000000..cbb3a5d
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/examples/clearwater/types/cassandra.yaml
@@ -0,0 +1,30 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+capability_types:
+
+ cassandra.Endpoint:
+ derived_from: tosca.capabilities.Endpoint.Database
+ properties:
+ port: # override
+ type: tosca.datatypes.network.PortDef
+ default: 7000
+
+ cassandra.Endpoint.Thrift:
+ derived_from: tosca.capabilities.Endpoint.Database
+ properties:
+ port: # override
+ type: tosca.datatypes.network.PortDef
+ default: 9160
diff --git a/azure/aria/aria-extension-cloudify/src/aria/examples/clearwater/types/clearwater.yaml b/azure/aria/aria-extension-cloudify/src/aria/examples/clearwater/types/clearwater.yaml
new file mode 100644
index 0000000..1021262
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/examples/clearwater/types/clearwater.yaml
@@ -0,0 +1,728 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+imports:
+ - ims.yaml
+ - smtp.yaml
+ - cassandra.yaml
+
+dsl_definitions:
+
+ clearwater_operation_dependencies: &CLEARWATER_OPERATION_DEPENDENCIES
+ - "ssh.user > { get_property: [ HOST, host, ssh.user ] }"
+ - "ssh.password > { get_property: [ HOST, host, ssh.password ] }"
+ - "ssh.address > { get_attribute: [ HOST, public_address ] }"
+ - "ssh.use_sudo > true"
+
+policy_types:
+
+ clearwater.Configuration:
+ derived_from: tosca.policies.Root
+ properties:
+ zone:
+ description: >-
+ The domain name for SIP addresses, for example if its "example.com" then a SIP address
+ could be "6505550243@example.com".
+ type: string
+ geographically_redundant:
+ description: >-
+ Enable a geographically redundant deployment.
+
+ See: http://clearwater.readthedocs.io/en/stable/Geographic_redundancy.html
+ type: boolean
+ default: false
+ site_name:
+ description: >-
+ Used by geographically redundant deployments.
+ type: string
+ required: false
+ secret:
+ description: >-
+ Used for registration via Ellis.
+ type: string
+
+capability_types:
+
+ clearwater.Container:
+ description: >-
+ Clearwater container capability.
+ derived_from: tosca.capabilities.Container
+ properties:
+ hostname:
+ type: string
+ ssh.user:
+ type: string
+ ssh.password:
+ type: string
+ max_log_directory_size:
+ type: scalar-unit.size
+ default: 0 B # 0 means no max size
+ reduce_cassandra_mem_usage:
+ type: boolean
+ default: false
+
+ # http://clearwater.readthedocs.io/en/stable/Clearwater_IP_Port_Usage.html
+
+ # SIP endpoints
+
+ clearwater.Endpoint.SIP.Upstream:
+ derived_from: tosca.capabilities.Endpoint
+ properties:
+ protocol: # override
+ type: string
+ default: sip
+ port: # override
+ type: tosca.datatypes.network.PortDef
+ default: 5052
+
+ clearwater.Endpoint.SIP.Upstream.Secure:
+ derived_from: tosca.capabilities.Endpoint
+ properties:
+ protocol: # override
+ type: string
+ default: sip
+ port: # override
+ type: tosca.datatypes.network.PortDef
+ default: 5054
+ secure: # override
+ type: boolean
+ default: true
+
+ clearwater.Endpoint.SIP.Proxy:
+ derived_from: tosca.capabilities.Endpoint
+ properties:
+ protocol: # override
+ type: string
+ default: sip
+ port: # override
+ type: tosca.datatypes.network.PortDef
+ default: 5058
+
+ clearwater.Endpoint.SIP.Public:
+ derived_from: tosca.capabilities.Endpoint.Public
+ properties:
+ protocol: # override
+ type: string
+ default: sip
+ port: # override
+ type: tosca.datatypes.network.PortDef
+ default: 5060
+
+ clearwater.Endpoint.SIP.Public.Secure:
+ derived_from: tosca.capabilities.Endpoint.Public
+ properties:
+ protocol: # override
+ type: string
+ default: sip
+ port: # override
+ type: tosca.datatypes.network.PortDef
+ default: 5062
+ secure: # override
+ type: boolean
+ default: true
+
+ # STUN endpoints
+
+ clearwater.Endpoint.STUN:
+ derived_from: tosca.capabilities.Endpoint
+ properties:
+ protocol: # override
+ type: string
+ default: stun
+ port: # override
+ type: tosca.datatypes.network.PortDef
+ default: 3478
+
+ # Diameter endpoints
+
+ clearwater.Endpoint.Diameter.HSS:
+ description: >-
+ In shared_config: hs_listen_port
+ derived_from: tosca.capabilities.Endpoint
+ properties:
+ protocol: # override
+ type: string
+ default: diameter
+ port: # override
+ type: tosca.datatypes.network.PortDef
+ default: 3868
+
+ clearwater.Endpoint.Diameter.CTF:
+ description: >-
+ In shared_config: ralf_listen_port
+ derived_from: tosca.capabilities.Endpoint
+ properties:
+ protocol: # override
+ type: string
+ default: diameter
+ port: # override
+ type: tosca.datatypes.network.PortDef
+ default: 3869
+
+ # Management endpoints
+
+ clearwater.Endpoint.Management.Homer:
+ derived_from: ims.interfaces.HTTP
+ properties:
+ port: # override
+ type: tosca.datatypes.network.PortDef
+ default: 7888
+
+ clearwater.Endpoint.Management.Homestead:
+ derived_from: ims.interfaces.HTTP
+ properties:
+ port: # override
+ type: tosca.datatypes.network.PortDef
+ default: 8888
+
+ clearwater.Endpoint.Management.Homestead.Provisioning:
+ description: >-
+ In shared_config: homestead_provisioning_port
+ derived_from: ims.interfaces.HTTP
+ properties:
+ port: # override
+ type: tosca.datatypes.network.PortDef
+ default: 8889
+
+ clearwater.Endpoint.Management.Sprout:
+ derived_from: ims.interfaces.HTTP
+ properties:
+ port: # override
+ type: tosca.datatypes.network.PortDef
+ default: 9886
+
+ clearwater.Endpoint.Management.Ralf:
+ derived_from: ims.interfaces.HTTP
+ properties:
+ port: # override
+ type: tosca.datatypes.network.PortDef
+ default: 9888 # note: some documentation shows 10888
+
+ # Web endpoints
+
+ clearwater.Endpoint.Public.Web:
+ derived_from: tosca.capabilities.Endpoint.Public
+ properties:
+ protocol: # override
+ type: string
+ default: http
+ port: # override
+ type: tosca.datatypes.network.PortDef
+ default: 80
+ url_path: # override
+ type: string
+ default: /
+
+ clearwater.Endpoint.Public.Web.Secure:
+ derived_from: tosca.capabilities.Endpoint.Public
+ properties:
+ protocol: # override
+ type: string
+ default: https
+ port: # override
+ type: tosca.datatypes.network.PortDef
+ default: 443
+ secure: # override
+ type: boolean
+ default: true
+ url_path: # override
+ type: string
+ default: /
+
+ # Other endpoints
+
+ clearwater.Endpoint.Chronos:
+ derived_from: tosca.capabilities.Endpoint
+ properties:
+ port: # override
+ type: tosca.datatypes.network.PortDef
+ default: 7253
+
+ clearwater.Endpoint.Memcached:
+ derived_from: tosca.capabilities.Endpoint
+ properties:
+ port: # override
+ type: tosca.datatypes.network.PortDef
+ default: 11211
+
+ clearwater.Endpoint.Astaire:
+ derived_from: tosca.capabilities.Endpoint
+ properties:
+ port: # override
+ type: tosca.datatypes.network.PortDef
+ default: 11311
+
+data_types:
+
+ clearwater.Number:
+ derived_from: string
+ constraints:
+ - pattern: '^\d{10}$'
+
+node_types:
+
+ # http://clearwater.readthedocs.io/en/stable/Clearwater_Architecture.html
+
+ clearwater.SoftwareComponent:
+ description: >-
+ Clearwater software components must be installed in a Clearwater-capable compute node.
+ derived_from: tosca.nodes.SoftwareComponent
+ requirements:
+ - host: # override
+ capability: clearwater.Container
+ relationship: tosca.relationships.HostedOn
+
+ clearwater.Bono:
+ description: >-
+ Clearwater edge proxy.
+
+ The Bono nodes form a horizontally scalable SIP edge proxy providing both a SIP IMS Gm
+ compliant interface and a WebRTC interface to clients. Client connections are load balanced
+ across the nodes. The Bono node provides the anchor point for the client's connection to the
+ Clearwater system, including support for various NAT traversal mechanisms. A client is
+ therefore anchored to a particular Bono node for the duration of its registration, but can
+ move to another Bono node if the connection or client fails.
+
+ Clients can connect to Bono using SIP/UDP or SIP/TCP. Bono supports any WebRTC client that
+ performs call setup signaling using SIP over WebSocket.
+
+ Alternatively, Clearwater can be deployed with a third party P-CSCF or Session Border
+ Controller implementing P-CSCF. In this case Bono nodes are not required.
+ derived_from: clearwater.SoftwareComponent
+ capabilities:
+ p-cscf: ims.functions.P-CSCF
+ gm: ims.interfaces.Gm
+ sip_endpoint: clearwater.Endpoint.SIP.Public
+ sip_secure_endpoint: clearwater.Endpoint.SIP.Public.Secure
+ sip_proxy: clearwater.Endpoint.SIP.Proxy # open to Sprout
+ stun_endoint: clearwater.Endpoint.STUN
+ requirements:
+ - sip_downstream:
+ capability: clearwater.Endpoint.SIP.Upstream
+ occurrences: [ 0, UNBOUNDED ]
+ - sip_secure_downstream:
+ capability: clearwater.Endpoint.SIP.Upstream.Secure
+ occurrences: [ 0, UNBOUNDED ]
+ - ralf: # for billable events
+ capability: clearwater.Endpoint.Management.Ralf
+ occurrences: [ 0, 1 ]
+ interfaces:
+ Standard:
+ type: tosca.interfaces.node.lifecycle.Standard
+ create:
+ implementation:
+ primary: scripts/bono/create.sh
+ dependencies: *CLEARWATER_OPERATION_DEPENDENCIES
+ delete:
+ implementation:
+ primary: scripts/bono/delete.sh
+ dependencies: *CLEARWATER_OPERATION_DEPENDENCIES
+
+ clearwater.Sprout:
+ description: >-
+ Clearwater SIP router.
+
+ The Sprout nodes act as a horizontally scalable, combined SIP registrar and authoritative
+ routing proxy, and handle client authentication and the ISC interface to application servers.
+ The Sprout nodes also contain the in-built MMTEL application server. SIP transactions are load
+ balanced across the Sprout cluster, so there is no long-lived association between a client and
+ a particular Sprout node. Sprout does not store any long-lived data itself and instead uses
+ web service interfaces to Homestead and Homer to retrieve HSS configuration such as
+ authentication data/user profiles and MMTEL service settings APIs to Vellum for storing
+ subscriber registration data and for running timers.
+
+ Sprout is where the bulk of the I-CSCF and S-CSCF function resides, with the remainder
+ provided by Dime (and backed by the long-lived data stores on Vellum).
+ derived_from: clearwater.SoftwareComponent
+ capabilities:
+ sip_endpoint: clearwater.Endpoint.SIP.Upstream # open to Bono
+ sip_secure_endpoint: clearwater.Endpoint.SIP.Upstream.Secure # open to Bono
+ management_endpoint: clearwater.Endpoint.Management.Sprout
+ memento:
+ type: tosca.capabilities.Container
+ valid_source_types: [ clearwater.Memento ]
+ requirements:
+# cyclical: see ARIA-327
+# - sip_upstream:
+# capability: clearwater.Endpoint.SIP.Proxy
+# occurrences: [ 0, UNBOUNDED ]
+ - homer: # for subscriber profiles
+ capability: clearwater.Endpoint.Management.Homer
+ - ralf: # for billable events
+ capability: clearwater.Endpoint.Management.Ralf
+ occurrences: [ 0, 1 ]
+ - chronos:
+ capability: clearwater.Endpoint.Chronos
+ node: clearwater.Vellum
+ - astaire:
+ capability: clearwater.Endpoint.Astaire
+ node: clearwater.Vellum
+ interfaces:
+ Standard:
+ type: tosca.interfaces.node.lifecycle.Standard
+ create:
+ implementation:
+ primary: scripts/sprout/create.sh
+ dependencies: *CLEARWATER_OPERATION_DEPENDENCIES
+ delete:
+ implementation:
+ primary: scripts/sprout/delete.sh
+ dependencies: *CLEARWATER_OPERATION_DEPENDENCIES
+
+ clearwater.Memento:
+ derived_from: tosca.nodes.Root
+ capabilities:
+ sip-as: ims.functions.SIP-AS
+ web_secure_endpoint: clearwater.Endpoint.Public.Web.Secure
+ requirements:
+ - host:
+ capability: tosca.capabilities.Container
+ node: clearwater.Sprout
+ - cassandra_thrift:
+ capability: cassandra.Endpoint.Thrift
+ node: clearwater.Vellum
+ interfaces:
+ Standard:
+ type: tosca.interfaces.node.lifecycle.Standard
+ create:
+ implementation:
+ primary: scripts/memento/create.sh
+ dependencies: *CLEARWATER_OPERATION_DEPENDENCIES
+ delete:
+ implementation:
+ primary: scripts/memento/delete.sh
+ dependencies: *CLEARWATER_OPERATION_DEPENDENCIES
+
+ clearwater.Dime:
+ description: >-
+ Clearwater Diameter gateway.
+
+ Dime nodes run Clearwater's Homestead and Ralf components.
+ derived_from: clearwater.SoftwareComponent
+ capabilities:
+ host:
+ type: tosca.capabilities.Container
+ valid_source_types: [ clearwater.DimeSoftwareComponent ]
+ interfaces:
+ Standard:
+ type: tosca.interfaces.node.lifecycle.Standard
+ create:
+ implementation:
+ primary: scripts/dime/create.sh
+ dependencies: *CLEARWATER_OPERATION_DEPENDENCIES
+ delete:
+ implementation:
+ primary: scripts/dime/delete.sh
+ dependencies: *CLEARWATER_OPERATION_DEPENDENCIES
+
+ clearwater.DimeSoftwareComponent:
+ description: >-
+ Base type for Dime software components.
+ derived_from: clearwater.SoftwareComponent
+ requirements:
+ - host: # override
+ capability: tosca.capabilities.Container
+ node: clearwater.Dime
+
+ clearwater.Homestead:
+ description: >-
+ Clearwater HSS cache.
+
+ Homestead provides a web services interface to Sprout for retrieving authentication
+ credentials and user profile information. It can either master the data (in which case it
+ exposes a web services provisioning interface) or can pull the data from an IMS compliant HSS
+ over the Cx interface. The Homestead nodes themselves are stateless - the mastered / cached
+ subscriber data is all stored on Vellum (via Cassandra's Thrift interface).
+
+ In the IMS architecture, the HSS mirror function is considered to be part of the I-CSCF and
+ S-CSCF components, so in Clearwater I-CSCF and S-CSCF function is implemented with a
+ combination of Sprout and Dime clusters.
+ derived_from: clearwater.DimeSoftwareComponent
+ capabilities:
+ hss: ims.functions.HSS
+ cx: ims.interfaces.Cx
+ diameter_endpoint: clearwater.Endpoint.Diameter.HSS
+ management_endpoint: clearwater.Endpoint.Management.Homestead # open to Ellis
+ provisioning_management_endpoint: clearwater.Endpoint.Management.Homestead.Provisioning # open to Ellis
+ requirements:
+ - cassandra_thrift:
+ capability: cassandra.Endpoint.Thrift
+ node: clearwater.Vellum
+ interfaces:
+ Standard:
+ type: tosca.interfaces.node.lifecycle.Standard
+ create:
+ implementation:
+ primary: scripts/homestead/create.sh
+ dependencies: *CLEARWATER_OPERATION_DEPENDENCIES
+ delete:
+ implementation:
+ primary: scripts/homestead/delete.sh
+ dependencies: *CLEARWATER_OPERATION_DEPENDENCIES
+
+ clearwater.Ralf:
+ description: >-
+ Clearwater CTF.
+
+ Ralf provides an HTTP API that both Bono and Sprout can use to report billable events that
+ should be passed to the CDF (Charging Data Function) over the Rf billing interface. Ralf is
+ stateless, using Vellum to maintain the long lived session state and run the timers necessary
+ to enable it to conform to the Rf protocol.
+ derived_from: clearwater.DimeSoftwareComponent
+ capabilities:
+ ctf: ims.functions.CTF
+ rf: ims.interfaces.Rf
+ diameter_endpoint: clearwater.Endpoint.Diameter.CTF
+ management_endpoint: clearwater.Endpoint.Management.Ralf # open to Sprout, Bono, Vellum
+ requirements:
+ - chronos:
+ capability: clearwater.Endpoint.Chronos
+ node: clearwater.Vellum
+ - astaire:
+ capability: clearwater.Endpoint.Astaire
+ node: clearwater.Vellum
+ interfaces:
+ Standard:
+ type: tosca.interfaces.node.lifecycle.Standard
+ create:
+ implementation:
+ primary: scripts/ralf/create.sh
+ dependencies: *CLEARWATER_OPERATION_DEPENDENCIES
+ delete:
+ implementation:
+ primary: scripts/ralf/delete.sh
+ dependencies: *CLEARWATER_OPERATION_DEPENDENCIES
+
+ clearwater.Vellum:
+ description: >-
+ Clearwater state store.
+
+ Vellum is used to maintain all long-lived state in the deployment. It does this by running a
+ number of cloud optimized, distributed storage clusters.
+
+ - Cassandra. Cassandra is used by Homestead to store authentication credentials and profile
+ information, and is used by Homer to store MMTEL service settings. Vellum exposes Cassandra's
+ Thrift API.
+
+ - etcd. etcd is used by Vellum itself to share clustering information between Vellum nodes and
+ by other nodes in the deployment for shared configuration.
+
+ - Chronos. Chronos is a distributed, redundant, reliable timer service developed by
+ Clearwater. It is used by Sprout and Ralf nodes to enable timers to be run (e.g. for SIP
+ Registration expiry) without pinning operations to a specific node (one node can set the timer
+ and another act on it when it pops). Chronos is accessed via an HTTP API.
+
+ - Memcached / Astaire. Vellum also runs a Memcached cluster fronted by Astaire. Astaire is a
+ service developed by Clearwater that enabled more rapid scale up and scale down of memcached
+ clusters. This cluster is used by Sprout and Ralf for storing registration and session state.
+ derived_from: clearwater.SoftwareComponent
+ capabilities:
+ cassandra_endpoint: cassandra.Endpoint # open to other Vellum
+ cassandra_thrift_endpoint: cassandra.Endpoint.Thrift # open to Homer, Dime (Homestead), Sprout (Memento)
+ chronos_endpoint: clearwater.Endpoint.Chronos # open to other Vellum, Sprout, Dime (Ralf)
+ memcached_endpoint: clearwater.Endpoint.Memcached # open to other Vellum
+ astaire_endpoint: clearwater.Endpoint.Astaire # open to Sprout, Dime (Ralf)
+# cyclical: see ARIA-327
+# requirements:
+# - ralf:
+# capability: clearwater.Endpoint.Management.Ralf
+# occurrences: [ 0, 1 ]
+ interfaces:
+ Standard:
+ type: tosca.interfaces.node.lifecycle.Standard
+ create:
+ implementation:
+ primary: scripts/vellum/create.sh
+ dependencies: *CLEARWATER_OPERATION_DEPENDENCIES
+ delete:
+ implementation:
+ primary: scripts/vellum/delete.sh
+ dependencies: *CLEARWATER_OPERATION_DEPENDENCIES
+
+ clearwater.Homer:
+ description: >-
+ Clearwater XDMS.
+
+ Homer is a standard XDMS used to store MMTEL service settings documents for each user of the
+ system. Documents are created, read, updated and deleted using a standard XCAP interface. As
+ with Homestead, the Homer nodes use Vellum as the data store for all long lived data.
+ derived_from: clearwater.SoftwareComponent
+ capabilities:
+ xdms: ims.functions.XDMS
+ management_endpoint: clearwater.Endpoint.Management.Homer # open to Sprout, Ellis
+ requirements:
+ - cassandra_thrift:
+ capability: cassandra.Endpoint.Thrift
+ node: clearwater.Vellum
+ interfaces:
+ Standard:
+ type: tosca.interfaces.node.lifecycle.Standard
+ create:
+ implementation:
+ primary: scripts/homer/create.sh
+ dependencies: *CLEARWATER_OPERATION_DEPENDENCIES
+ delete:
+ implementation:
+ primary: scripts/homer/delete.sh
+ dependencies: *CLEARWATER_OPERATION_DEPENDENCIES
+
+ clearwater.Ellis:
+ description: >-
+ Ellis is a sample provisioning portal providing self sign-up, password management, line
+ management and control of MMTEL service settings. It is not intended to be a part of
+ production Clearwater deployments (it is not easy to horizontally scale because of the MySQL
+ underpinnings for one thing) but to make the system easy to use out of the box.
+ derived_from: clearwater.SoftwareComponent
+ properties:
+ provision_numbers_start:
+ type: clearwater.Number
+ default: '6505550000'
+ provision_numbers_count:
+ type: integer
+ default: 0 # 0 means do not provision numbers
+ constraints:
+ - greater_or_equal: 0
+ capabilities:
+ web_endpoint: clearwater.Endpoint.Public.Web
+ web_secure_endpoint: clearwater.Endpoint.Public.Web.Secure
+ requirements:
+ - homer: # for subscriber profiles
+ capability: clearwater.Endpoint.Management.Homer
+ - homestead: # for subscriber authentication
+ capability: clearwater.Endpoint.Management.Homestead
+ - homestead_provisioning:
+ capability: clearwater.Endpoint.Management.Homestead.Provisioning
+ - ralf: # TODO: really?
+ capability: clearwater.Endpoint.Management.Ralf
+ occurrences: [ 0, 1 ]
+ - smtp:
+ capability: smtp.SMTP
+ interfaces:
+ Standard:
+ type: tosca.interfaces.node.lifecycle.Standard
+ create:
+ implementation:
+ primary: scripts/ellis/create.sh
+ dependencies: *CLEARWATER_OPERATION_DEPENDENCIES
+ configure:
+ implementation:
+ primary: scripts/ellis/configure.sh
+ dependencies: *CLEARWATER_OPERATION_DEPENDENCIES
+ delete:
+ implementation:
+ primary: scripts/ellis/delete.sh
+ dependencies: *CLEARWATER_OPERATION_DEPENDENCIES
+
+ clearwater.I-CSCF:
+ description: >-
+ Clearwater I-CSCF.
+
+ Logical node encompassing Sprout and Homestead. Required only if you need to expose the I-CSCF
+ function.
+ derived_from: tosca.nodes.Root
+ capabilities:
+ i-cscf: ims.functions.I-CSCF
+ requirements:
+ - sprout:
+ capability: tosca.capabilities.Node
+ node: clearwater.Sprout
+ - homestead:
+ capability: tosca.capabilities.Node
+ node: clearwater.Homestead
+
+ clearwater.S-CSCF:
+ description: >-
+ Clearwater S-CSCF.
+
+ Logical node encompassing Sprout and Homestead. Required only if you need to expose the S-CSCF
+ function.
+ derived_from: tosca.nodes.Root
+ capabilities:
+ s-cscf: ims.functions.S-CSCF
+ requirements:
+ - sprout:
+ capability: tosca.capabilities.Node
+ node: clearwater.Sprout
+ - homestead:
+ capability: tosca.capabilities.Node
+ node: clearwater.Homestead
+
+ clearwater.LiveTest:
+ derived_from: tosca.nodes.SoftwareComponent
+ interfaces:
+ Standard:
+ type: tosca.interfaces.node.lifecycle.Standard
+ create:
+ implementation:
+ primary: scripts/live-test/create.sh
+ dependencies: *CLEARWATER_OPERATION_DEPENDENCIES
+ delete:
+ implementation:
+ primary: scripts/live-test/delete.sh
+ dependencies: *CLEARWATER_OPERATION_DEPENDENCIES
+
+ clearwater.HostBase:
+ derived_from: tosca.nodes.Compute
+ interfaces:
+ Standard:
+ type: tosca.interfaces.node.lifecycle.Standard
+ configure:
+ implementation:
+ primary: scripts/host-base/configure.sh
+ dependencies: *CLEARWATER_OPERATION_DEPENDENCIES
+ capabilities:
+ host: # override
+ type: clearwater.Container
+ valid_source_types: [ tosca.nodes.SoftwareComponent ]
+ os: # override
+ type: tosca.capabilities.OperatingSystem
+ properties:
+ architecture:
+ type: string
+ default: x86_64
+ type:
+ type: string
+ default: linux
+ distribution:
+ type: string
+ default: ubuntu
+ version:
+ type: version
+ default: 14.04
+
+ clearwater.Host:
+ description: >-
+ Default Clearwater host.
+
+ Note that any node can function as a Clearwater host as long as it has a clearwater.Container
+ capability.
+ derived_from: clearwater.HostBase
+ capabilities:
+ host: # override
+ type: clearwater.Container
+ valid_source_types: [ tosca.nodes.SoftwareComponent ]
+ properties:
+ mem_size:
+ type: scalar-unit.size
+ constraints:
+ - greater_or_equal: 0 MB
+ default: 4 GB # will run out of memory with less than this
+ interfaces:
+ Standard:
+ type: tosca.interfaces.node.lifecycle.Standard
+ configure:
+ implementation:
+ primary: scripts/host/configure.sh
+ dependencies: *CLEARWATER_OPERATION_DEPENDENCIES
diff --git a/azure/aria/aria-extension-cloudify/src/aria/examples/clearwater/types/ims.yaml b/azure/aria/aria-extension-cloudify/src/aria/examples/clearwater/types/ims.yaml
new file mode 100644
index 0000000..687ad58
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/examples/clearwater/types/ims.yaml
@@ -0,0 +1,446 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+capability_types:
+
+ # https://en.wikipedia.org/wiki/IP_Multimedia_Subsystem#Core_network
+
+ ims.functions.Root:
+ derived_from: tosca.capabilities.Root
+
+ ims.functions.CTF: # not mentioned in Wikipedia
+ description: >-
+ Charging Trigger Function.
+ derived_from: ims.functions.Root
+
+ ims.functions.XDMS: # not mentioned in Wikipedia
+ description: >-
+ XML Document Management Server
+ derived_from: ims.functions.Root
+
+ ims.functions.HSS:
+ description: >-
+ The home subscriber server (HSS), or user profile server function (UPSF), is a master user
+ database that supports the IMS network entities that actually handle calls. It contains the
+ subscription-related information (subscriber profiles), performs authentication and
+ authorization of the user, and can provide information about the subscriber's location and IP
+ information. It is similar to the GSM home location register (HLR) and Authentication centre
+ (AuC).
+
+ A subscriber location function (SLF) is needed to map user addresses when multiple HSSs are
+ used.
+ derived_from: ims.functions.Root
+
+ ims.functions.CSCF:
+ description: >-
+ Several roles of SIP servers or proxies, collectively called Call Session Control Function
+ (CSCF), are used to process SIP signalling packets in the IMS.
+ derived_from: ims.functions.Root
+
+ ims.functions.P-CSCF:
+ description: >-
+ A Proxy-CSCF (P-CSCF) is a SIP proxy that is the first point of contact for the IMS terminal.
+ It can be located either in the visited network (in full IMS networks) or in the home network
+ (when the visited network is not IMS compliant yet). Some networks may use a Session Border
+ Controller (SBC) for this function. The P-CSCF is at its core a specialized SBC for the
+ User–network interface which not only protects the network, but also the IMS terminal. The use
+ of an additional SBC between the IMS terminal and the P-CSCF is unnecessary and infeasible due
+ to the signaling being encrypted on this leg. The terminal discovers its P-CSCF with either
+ DHCP, or it may be configured (e.g. during initial provisioning or via a 3GPP IMS Management
+ Object (MO)) or in the ISIM or assigned in the PDP Context (in General Packet Radio Service
+ (GPRS)).
+ derived_from: ims.functions.CSCF
+
+ ims.functions.I-CSCF:
+ description: >-
+ An Interrogating-CSCF (I-CSCF) is another SIP function located at the edge of an
+ administrative domain. Its IP address is published in the Domain Name System (DNS) of the
+ domain (using NAPTR and SRV type of DNS records), so that remote servers can find it, and use
+ it as a forwarding point (e.g., registering) for SIP packets to this domain.
+ derived_from: ims.functions.CSCF
+
+ ims.functions.S-CSCF:
+ description: >-
+ A Serving-CSCF (S-CSCF) is the central node of the signalling plane. It is a SIP server, but
+ performs session control too. It is always located in the home network. It uses Diameter Cx
+ and Dx interfaces to the HSS to download user profiles and upload user-to-S-CSCF associations
+ (the user profile is only cached locally for processing reasons only and is not changed). All
+ necessary subscriber profile information is loaded from the HSS.
+ derived_from: ims.functions.CSCF
+
+ ims.functions.AS:
+ description: >-
+ SIP Application servers (AS) host and execute services, and interface with the S-CSCF using
+ SIP. An example of an application server that is being developed in 3GPP is the Voice call
+ continuity Function (VCC Server). Depending on the actual service, the AS can operate in SIP
+ proxy mode, SIP UA (user agent) mode or SIP B2BUA mode. An AS can be located in the home
+ network or in an external third-party network. If located in the home network, it can query
+ the HSS with the Diameter Sh or Si interfaces (for a SIP-AS).
+ derived_from: ims.functions.Root
+
+ ims.functions.SIP-AS:
+ description: >-
+ Host and execute IMS specific services.
+ derived_from: ims.functions.AS
+
+ ims.functions.IM-SSF:
+ description: >-
+ IP Multimedia Service Switching Function. Interfaces SIP to CAP to communicate with CAMEL
+ Application Servers.
+ derived_from: ims.functions.AS
+
+ ims.functions.OSA-SCS:
+ description: >-
+ OSA service capability server. Interfaces SIP to the OSA framework.
+ derived_from: ims.functions.AS
+
+ ims.functions.AS-ILCM:
+ description: >-
+ The AS-ILCM (Application Server - Incoming Leg Control Model) stores transaction state, and
+ may optionally store session state depending on the specific service being executed. The
+ AS-ILCM interfaces to the S-CSCF (ILCM) for an incoming leg. Application Logic provides the
+ service(s) and interacts between the AS-ILCM and AS-OLCM.
+ derived_from: ims.functions.AS
+
+ ims.functions.AS-OLCM:
+ description: >-
+ The AS-OLCM (Application Server - Outgoing Leg Control Model) stores transaction state, and
+ may optionally store session state depending on the specific service being executed. The
+ AS-OLCM interfaces to the S-CSCF (OLCM) for an outgoing leg. Application Logic provides the
+ service(s) and interacts between the AS-ILCM and AS-OLCM.
+ derived_from: ims.functions.AS
+
+ ims.functions.MRF:
+ description: >-
+ The Media Resource Function (MRF) provides media related functions such as media manipulation
+ (e.g. voice stream mixing) and playing of tones and announcements.
+
+ Each MRF is further divided into a media resource function controller (MRFC) and a media
+ resource function processor (MRFP).
+ derived_from: ims.functions.Root
+
+ ims.functions.MRFC:
+ description: >-
+ The MRFC is a signalling plane node that interprets information coming from an AS and S-CSCF
+ to control the MRFP.
+ derived_from: ims.functions.Root
+
+ ims.functions.MRFP:
+ description: >-
+ The MRFP is a media plane node used to mix, source or process media streams. It can also
+ manage access right to shared resources.
+ derived_from: ims.functions.Root
+
+ ims.functions.MRB:
+ description: >-
+ The Media Resource Broker (MRB) is a functional entity that is responsible for both collection
+ of appropriate published MRF information and supplying of appropriate MRF information to
+ consuming entities such as the AS. MRB can be used in two modes:
+ * Query mode: AS queries the MRB for media and sets up the call using the response of MRB
+ * In-Line Mode: AS sends a SIP INVITE to the MRB. The MRB sets up the call
+ derived_from: ims.functions.Root
+
+ ims.functions.BGCF:
+ description: >-
+ A Breakout Gateway Control Function (BGCF) is a SIP proxy which processes requests for routing
+ from an S-CSCF when the S-CSCF has determined that the session cannot be routed using DNS or
+ ENUM/DNS. It includes routing functionality based on telephone numbers.
+ derived_from: ims.functions.Root
+
+ ims.functions.PTSNGateway:
+ description: >-
+ A PSTN/CS gateway interfaces with PSTN circuit switched (CS) networks. For signalling, CS
+ networks use ISDN User Part (ISUP) (or BICC) over Message Transfer Part (MTP), while IMS uses
+ SIP over IP. For media, CS networks use Pulse-code modulation (PCM), while IMS uses Real-time
+ Transport Protocol (RTP).
+ derived_from: ims.functions.Root
+
+ ims.functions.SGW:
+ description: >-
+ A signalling gateway (SGW) interfaces with the signalling plane of the CS. It transforms lower
+ layer protocols as Stream Control Transmission Protocol (SCTP, an IP protocol) into Message
+ Transfer Part (MTP, an Signalling System 7 (SS7) protocol), to pass ISDN User Part (ISUP) from
+ the MGCF to the CS network.
+ derived_from: ims.functions.PTSNGateway
+
+ ims.functions.MGCF:
+ description: >-
+ A media gateway controller function (MGCF) is a SIP endpoint that does call control protocol
+ conversion between SIP and ISUP/BICC and interfaces with the SGW over SCTP. It also controls
+ the resources in a Media Gateway (MGW) across an H.248 interface.
+ derived_from: ims.functions.PTSNGateway
+
+ ims.functions.MGW:
+ description: >-
+ A media gateway (MGW) interfaces with the media plane of the CS network, by converting between
+ RTP and PCM. It can also transcode when the codecs don't match (e.g., IMS might use AMR, PSTN
+ might use G.711).
+ derived_from: ims.functions.PTSNGateway
+
+ # https://en.wikipedia.org/wiki/IP_Multimedia_Subsystem#Interfaces_description
+
+ ims.interfaces.Diameter:
+ derived_from: tosca.capabilities.Endpoint
+
+ ims.interfaces.TCP:
+ derived_from: tosca.capabilities.Endpoint
+
+ ims.interfaces.SIP:
+ derived_from: tosca.capabilities.Endpoint
+ properties:
+ protocol: # override
+ type: string
+ default: sip
+
+ ims.interfaces.RTP:
+ derived_from: tosca.capabilities.Endpoint
+ properties:
+ protocol: # override
+ type: string
+ default: rtp
+
+ ims.interfaces.H248:
+ derived_from: tosca.capabilities.Endpoint
+ properties:
+ protocol: # override
+ type: string
+ default: h248
+
+ ims.interfaces.HTTP:
+ derived_from: tosca.capabilities.Endpoint
+ properties:
+ protocol: # override
+ type: string
+ default: http
+
+ ims.interfaces.MAP:
+ derived_from: tosca.capabilities.Endpoint
+ properties:
+ protocol: # override
+ type: string
+ default: map
+
+ ims.interfaces.Cr:
+ description: >-
+ Used by MRFC to fetch documents (e.g. scripts, announcement files, and other resources) from
+ an AS. Also used for media control related commands.
+ derived_from: ims.interfaces.TCP
+
+ ims.interfaces.Cx:
+ description: >-
+ Used to send subscriber data to the S-CSCF; including filter criteria and their priority. Also
+ used to furnish CDF and/or OCF addresses.
+ derived_from: ims.interfaces.Diameter
+
+ ims.interfaces.Dh:
+ description: >-
+ Used by AS to find the HSS holding the user profile information in a multi-HSS environment.
+ DH_SLF_QUERY indicates an IMPU and DX_SLF_RESP return the HSS name.
+ derived_from: ims.interfaces.Diameter
+
+ ims.interfaces.Dx:
+ description: >-
+ Used by I-CSCF or S-CSCF to find a correct HSS in a multi-HSS environment. DX_SLF_QUERY
+ indicates an IMPU and DX_SLF_RESP return the HSS name.
+ derived_from: ims.interfaces.Diameter
+
+ ims.interfaces.Gm:
+ description: >-
+ Used to exchange messages between SIP user equipment (UE) or Voip gateway and P-CSCF.
+ derived_from: ims.interfaces.SIP
+
+ ims.interfaces.Go:
+ description: >-
+ Allows operators to control QoS in a user plane and exchange charging correlation
+ information between IMS and GPRS network.
+ derived_from: ims.interfaces.Diameter
+
+ ims.interfaces.Gq:
+ description: >-
+ Used to exchange policy decisions-related information between P-CSCF and PDF.
+ derived_from: ims.interfaces.Diameter
+
+ ims.interfaces.Gx:
+ description: >-
+ Used to exchange policy decisions-related information between PCEF and PCRF.
+ derived_from: ims.interfaces.Diameter
+
+ ims.interfaces.Gy:
+ description: >-
+ Used for online flow-based bearer charging. Functionally equivalent to Ro interface.
+ derived_from: ims.interfaces.Diameter
+
+ ims.interfaces.ISC:
+ description: >-
+ Reference point between S-CSCF and AS. Main functions are to:
+ * Notify the AS of the registered IMPU, registration state and UE capabilities
+ * Supply the AS with information to allow it to execute multiple services
+ * Convey charging function addresses
+ derived_from: ims.interfaces.SIP
+
+ ims.interfaces.Ici:
+ description: >-
+ Used to exchange messages between an IBCF and another IBCF belonging to a different IMS
+ network.
+ derived_from: ims.interfaces.SIP
+
+ ims.interfaces.Izi:
+ description: >-
+ Used to forward media streams from a TrGW to another TrGW belonging to a different IMS
+ network.
+ derived_from: ims.interfaces.RTP
+
+ ims.interfaces.Ma:
+ description: >-
+ Main functions are to:
+ * Forward SIP requests which are destined to a public service identity hosted by the AS
+ * Originate a session on behalf of a user or public service identity, if the AS has no
+ knowledge of a S-CSCF assigned to that user or public service identity
+ * Convey charging function addresses
+ derived_from: ims.interfaces.SIP
+
+ ims.interfaces.Mg:
+ description: >-
+ ISUP signalling to SIP signalling and forwards SIP signalling to I-CSCF.
+ derived_from: ims.interfaces.SIP
+
+ ims.interfaces.Mi:
+ description: >-
+ Used to exchange messages between S-CSCF and BGCF.
+ derived_from: ims.interfaces.SIP
+
+ ims.interfaces.Mj:
+ description: >-
+ Used for the interworking with the PSTN/CS domain, when the BGCF has determined that a
+ breakout should occur in the same IMS network to send SIP message from BGCF to MGCF.
+ derived_from: ims.interfaces.SIP
+
+ ims.interfaces.Mk:
+ description: >-
+ Used for the interworking with the PSTN/CS domain, when the BGCF has determined that a
+ breakout should occur in another IMS network to send SIP message from BGCF to the BGCF in the
+ other network.
+ derived_from: ims.interfaces.SIP
+
+ ims.interfaces.Mm:
+ description: >-
+ Used for exchanging messages between IMS and external IP networks.
+ derived_from: ims.interfaces.SIP
+
+ ims.interfaces.Mn:
+ description: >-
+ Allows control of user-plane resources.
+ derived_from: ims.interfaces.H248
+
+ ims.interfaces.Mp:
+ description: >-
+ Allows an MRFC to control media stream resources provided by an MRFP.
+ derived_from: ims.interfaces.H248
+
+ ims.interfaces.Mr:
+ description: >-
+ Used to exchange information between S-CSCF and MRFC.
+ derived_from: ims.interfaces.SIP
+
+ ims.interfaces.Mr2:
+ description: >-
+ Used to exchange session controls between AS and MRFC.
+ derived_from: ims.interfaces.SIP
+
+ ims.interfaces.Mw:
+ description: >-
+ Used to exchange messages between CSCFs. AGCF appears as a P-CSCF to the other CSCFs.
+ derived_from: ims.interfaces.SIP
+
+ ims.interfaces.Mx:
+ description: >-
+ Used for the interworking with another IMS network, when the BGCF has determined that a
+ breakout should occur in the other IMS network to send SIP message from BGCF to the IBCF in
+ the other network.
+ derived_from: ims.interfaces.SIP
+
+ ims.interfaces.P1:
+ description: >-
+ Used for call control services by AGCF to control H.248 A-MGW and residential gateways.
+ derived_from: ims.interfaces.H248
+
+ ims.interfaces.P2:
+ description: >-
+ Reference point between AGCF and CSCF.
+ derived_from: ims.interfaces.SIP
+
+ ims.interfaces.Rc:
+ description: >-
+ Used by the AS to request that media resources be assigned to a call when using MRB in-line
+ mode or in query mode.
+ derived_from: ims.interfaces.SIP
+
+ ims.interfaces.Rf:
+ description: >-
+ Used to exchange offline charging information with CDF.
+ derived_from: ims.interfaces.Diameter
+
+ ims.interfaces.Ro:
+ description: >-
+ Used to exchange online charging information with OCF.
+ derived_from: ims.interfaces.Diameter
+
+ ims.interfaces.Rx:
+ description: >-
+ Used to exchange policy and charging related information between P-CSCF and PCRF. Replacement
+ for the Gq reference point.
+ derived_from: ims.interfaces.Diameter
+
+ ims.interfaces.Sh:
+ description: >-
+ Used to exchange User Profile information (e.g., user-related data, group lists,
+ user-service-related information or user location information or charging function addresses
+ (used when the AS has not received the third-party REGISTER for a user)) between an AS (SIP
+ AS or OSA SCS) and HSS. Also allow AS to activate/deactivate filter criteria stored in the HSS
+ on a per-subscriber basis.
+ derived_from: ims.interfaces.Diameter
+
+ ims.interfaces.Si:
+ description: >-
+ Transports CAMEL subscription information, including triggers for use by CAMEL-based
+ application services information.
+ derived_from: ims.interfaces.MAP
+
+ ims.interfaces.Sr:
+ description: >-
+ Used by MRFC to fetch documents (scripts and other resources) from an AS.
+ derived_from: ims.interfaces.HTTP
+
+ ims.interfaces.Ut:
+ description: >-
+ Facilitates the management of subscriber information related to services and settings.
+ derived_from: ims.interfaces.HTTP
+
+ ims.interfaces.Z:
+ description: >-
+ Conversion of POTS services to SIP messages.
+ derived_from: tosca.capabilities.Root
+
+node_types:
+
+ ims.nodes.IMS:
+ derived_from: tosca.nodes.Root
+ capabilities:
+ p-cscf: ims.functions.P-CSCF
+ i-cscf: ims.functions.I-CSCF
+ s-cscf: ims.functions.S-CSCF
+ hss: ims.functions.HSS
+ ctf: ims.functions.CTF
+ xdms: ims.functions.XDMS
diff --git a/azure/aria/aria-extension-cloudify/src/aria/examples/clearwater/types/smtp.yaml b/azure/aria/aria-extension-cloudify/src/aria/examples/clearwater/types/smtp.yaml
new file mode 100644
index 0000000..14e0df1
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/examples/clearwater/types/smtp.yaml
@@ -0,0 +1,35 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+capability_types:
+
+ smtp.SMTP:
+ derived_from: tosca.capabilities.Root
+ properties:
+ username:
+ type: string
+ password:
+ type: string
+
+node_types:
+
+ smtp.SMTP:
+ derived_from: tosca.nodes.SoftwareComponent
+ properties:
+ address:
+ type: string
+ capabilities:
+ smtp:
+ type: smtp.SMTP
diff --git a/azure/aria/aria-extension-cloudify/src/aria/examples/hello-world/hello-world.yaml b/azure/aria/aria-extension-cloudify/src/aria/examples/hello-world/hello-world.yaml
new file mode 100644
index 0000000..86e2ad0
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/examples/hello-world/hello-world.yaml
@@ -0,0 +1,38 @@
+tosca_definitions_version: tosca_simple_yaml_1_0
+
+node_types:
+
+ WebServer:
+ derived_from: tosca:Root
+ capabilities:
+ host:
+ type: tosca:Container
+
+ WebApp:
+ derived_from: tosca:WebApplication
+ properties:
+ port:
+ type: integer
+
+topology_template:
+
+ node_templates:
+ web_server:
+ type: WebServer
+
+ web_app:
+ type: WebApp
+ properties:
+ port: 9090
+ requirements:
+ - host: web_server
+ interfaces:
+ Standard:
+ configure: scripts/configure.sh
+ start: scripts/start.sh
+ stop: scripts/stop.sh
+
+ outputs:
+ port:
+ type: integer
+ value: { get_property: [ web_app, port ] }
diff --git a/azure/aria/aria-extension-cloudify/src/aria/examples/hello-world/images/aria-logo.png b/azure/aria/aria-extension-cloudify/src/aria/examples/hello-world/images/aria-logo.png
new file mode 100644
index 0000000..3505844
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/examples/hello-world/images/aria-logo.png
Binary files differ
diff --git a/azure/aria/aria-extension-cloudify/src/aria/examples/hello-world/index.html b/azure/aria/aria-extension-cloudify/src/aria/examples/hello-world/index.html
new file mode 100644
index 0000000..8d21c3a
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/examples/hello-world/index.html
@@ -0,0 +1,14 @@
+<html>
+ <header>
+ <title>ARIA Hello World</title>
+ </header>
+<body>
+ <h1>Hello, World!</h1>
+ <p>
+ blueprint_id = {{ ctx.service_template.name }}<br/>
+ deployment_id = {{ ctx.service.name }}<br/>
+ node_id = {{ ctx.node.name }}
+ </p>
+ <img src="aria-logo.png">
+</body>
+</html> \ No newline at end of file
diff --git a/azure/aria/aria-extension-cloudify/src/aria/examples/hello-world/scripts/configure.sh b/azure/aria/aria-extension-cloudify/src/aria/examples/hello-world/scripts/configure.sh
new file mode 100644
index 0000000..dc7c09f
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/examples/hello-world/scripts/configure.sh
@@ -0,0 +1,36 @@
+#!/bin/bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -e
+
+TEMP_DIR=/tmp
+PYTHON_FILE_SERVER_ROOT="$TEMP_DIR/python-simple-http-webserver"
+INDEX_PATH=index.html
+IMAGE_PATH=images/aria-logo.png
+
+if [ -d "$PYTHON_FILE_SERVER_ROOT" ]; then
+ ctx logger info [ "Removing old web server root folder: $PYTHON_FILE_SERVER_ROOT." ]
+ rm -rf "$PYTHON_FILE_SERVER_ROOT"
+fi
+
+ctx logger info [ "Creating web server root folder: $PYTHON_FILE_SERVER_ROOT." ]
+
+mkdir -p "$PYTHON_FILE_SERVER_ROOT"
+cd "$PYTHON_FILE_SERVER_ROOT"
+
+ctx logger info [ "Downloading service template resources..." ]
+ctx download-resource-and-render [ "$PYTHON_FILE_SERVER_ROOT/index.html" "$INDEX_PATH" ]
+ctx download-resource [ "$PYTHON_FILE_SERVER_ROOT/aria-logo.png" "$IMAGE_PATH" ]
diff --git a/azure/aria/aria-extension-cloudify/src/aria/examples/hello-world/scripts/start.sh b/azure/aria/aria-extension-cloudify/src/aria/examples/hello-world/scripts/start.sh
new file mode 100644
index 0000000..1525f30
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/examples/hello-world/scripts/start.sh
@@ -0,0 +1,64 @@
+#!/bin/bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -e
+
+TEMP_DIR=/tmp
+PYTHON_FILE_SERVER_ROOT="$TEMP_DIR/python-simple-http-webserver"
+PID_FILE=server.pid
+PORT=$(ctx node properties port)
+URL="http://localhost:$PORT"
+
+ctx logger info [ "Starting web server at: $PYTHON_FILE_SERVER_ROOT." ]
+
+cd "$PYTHON_FILE_SERVER_ROOT"
+nohup python -m SimpleHTTPServer "$PORT" > /dev/null 2>&1 &
+echo $! > "$PID_FILE"
+
+server_is_up() {
+ if which wget >/dev/null; then
+ if wget "$URL" >/dev/null; then
+ return 0
+ fi
+ elif which curl >/dev/null; then
+ if curl "$URL" >/dev/null; then
+ return 0
+ fi
+ else
+ ctx logger error [ "Both curl and wget were not found in path." ]
+ exit 1
+ fi
+ return 1
+}
+
+ctx logger info [ "Waiting for web server to launch on port $PORT..." ]
+STARTED=false
+for i in $(seq 1 15)
+do
+ if server_is_up; then
+ ctx logger info [ "Web server is up." ]
+ STARTED=true
+ break
+ else
+ ctx logger info [ "Web server not up. waiting 1 second." ]
+ sleep 1
+ fi
+done
+
+if [ "$STARTED" = false ]; then
+ ctx logger error [ "Web server did not start within 15 seconds." ]
+ exit 1
+fi
diff --git a/azure/aria/aria-extension-cloudify/src/aria/examples/hello-world/scripts/stop.sh b/azure/aria/aria-extension-cloudify/src/aria/examples/hello-world/scripts/stop.sh
new file mode 100644
index 0000000..be4d68e
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/examples/hello-world/scripts/stop.sh
@@ -0,0 +1,28 @@
+#!/bin/bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -e
+
+TEMP_DIR=/tmp
+PYTHON_FILE_SERVER_ROOT="${TEMP_DIR}/python-simple-http-webserver"
+PID_FILE=server.pid
+PID=$(cat "$PYTHON_FILE_SERVER_ROOT/$PID_FILE")
+
+ctx logger info [ "Shutting down web server, pid = ${PID}." ]
+kill -9 "$PID" || exit $?
+
+ctx logger info [ "Removing web server root folder: $PYTHON_FILE_SERVER_ROOT." ]
+rm -rf "$PYTHON_FILE_SERVER_ROOT"
diff --git a/azure/aria/aria-extension-cloudify/src/aria/examples/tosca-simple-1.0/use-cases/block-storage-1/block-storage-1.yaml b/azure/aria/aria-extension-cloudify/src/aria/examples/tosca-simple-1.0/use-cases/block-storage-1/block-storage-1.yaml
new file mode 100644
index 0000000..8b3bab3
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/examples/tosca-simple-1.0/use-cases/block-storage-1/block-storage-1.yaml
@@ -0,0 +1,68 @@
+tosca_definitions_version: tosca_simple_yaml_1_0
+
+description: >-
+ TOSCA simple profile with server and attached block storage using the normative AttachesTo
+ Relationship Type.
+
+metadata:
+ template_name: block-storage-1
+ template_author: TOSCA Simple Profile in YAML
+ template_version: '1.0'
+
+topology_template:
+
+ inputs:
+ cpus:
+ type: integer
+ description: Number of CPUs for the server.
+ constraints:
+ - valid_values: [ 1, 2, 4, 8 ]
+ storage_size:
+ type: scalar-unit.size
+ description: Size of the storage to be created.
+ default: 1 GB
+ storage_snapshot_id:
+ type: string
+ description: >-
+ Optional identifier for an existing snapshot to use when creating storage.
+ storage_location:
+ type: string
+ description: Block storage mount point (filesystem path).
+
+ node_templates:
+
+ my_server:
+ type: Compute
+ capabilities:
+ host:
+ properties:
+ disk_size: 10 GB
+ num_cpus: { get_input: cpus }
+ mem_size: 1 GB
+ os:
+ properties:
+ architecture: x86_64
+ type: linux
+ distribution: fedora
+ version: 18.0
+ requirements:
+ - local_storage:
+ node: my_storage
+ relationship:
+ type: AttachesTo
+ properties:
+ location: { get_input: storage_location }
+
+ my_storage:
+ type: BlockStorage
+ properties:
+ size: { get_input: storage_size }
+ snapshot_id: { get_input: storage_snapshot_id }
+
+ outputs:
+ private_ip:
+ description: The private IP address of the newly created compute instance.
+ value: { get_attribute: [ my_server, private_address ] }
+ volume_id:
+ description: The volume id of the block storage instance.
+ value: { get_property: [ my_storage, volume_id ] } # ARIA NOTE: wrong in spec
diff --git a/azure/aria/aria-extension-cloudify/src/aria/examples/tosca-simple-1.0/use-cases/block-storage-1/inputs.yaml b/azure/aria/aria-extension-cloudify/src/aria/examples/tosca-simple-1.0/use-cases/block-storage-1/inputs.yaml
new file mode 100644
index 0000000..d0b0854
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/examples/tosca-simple-1.0/use-cases/block-storage-1/inputs.yaml
@@ -0,0 +1,3 @@
+storage_snapshot_id: "snapshot-id"
+storage_location: /mnt
+cpus: 4 \ No newline at end of file
diff --git a/azure/aria/aria-extension-cloudify/src/aria/examples/tosca-simple-1.0/use-cases/block-storage-2/block-storage-2.yaml b/azure/aria/aria-extension-cloudify/src/aria/examples/tosca-simple-1.0/use-cases/block-storage-2/block-storage-2.yaml
new file mode 100644
index 0000000..9a2c0b0
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/examples/tosca-simple-1.0/use-cases/block-storage-2/block-storage-2.yaml
@@ -0,0 +1,75 @@
+tosca_definitions_version: tosca_simple_yaml_1_0
+
+description: >-
+ TOSCA simple profile with server and attached block storage using a custom AttachesTo Relationship
+ Type.
+
+metadata:
+ template_name: block-storage-2
+ template_author: TOSCA Simple Profile in YAML
+ template_version: '1.0'
+
+relationship_types:
+
+ MyCustomAttachesTo:
+ derived_from: AttachesTo
+
+topology_template:
+
+ inputs:
+ cpus:
+ type: integer
+ description: Number of CPUs for the server.
+ constraints:
+ - valid_values: [ 1, 2, 4, 8 ]
+ storage_size:
+ type: scalar-unit.size
+ description: Size of the storage to be created.
+ default: 1 GB
+ storage_snapshot_id:
+ type: string
+ description: >-
+ Optional identifier for an existing snapshot to use when creating storage.
+ storage_location:
+ type: string
+ description: Block storage mount point (filesystem path).
+
+ node_templates:
+
+ my_server:
+ type: Compute
+ capabilities:
+ host:
+ properties:
+ disk_size: 10 GB
+ num_cpus: { get_input: cpus }
+ mem_size: 4 GB
+ os:
+ properties:
+ architecture: x86_64
+ type: Linux
+ distribution: Fedora
+ version: 18.0
+ requirements:
+ - local_storage:
+ node: my_storage
+ # Declare custom AttachesTo type using the 'relationship' keyword
+ relationship:
+ type: MyCustomAttachesTo
+ properties:
+ location: { get_input: storage_location }
+
+ my_storage:
+ type: BlockStorage
+ properties:
+ size: { get_input: storage_size }
+ snapshot_id: { get_input: storage_snapshot_id }
+
+ outputs:
+ private_ip:
+ description: The private IP address of the newly created compute instance.
+ value: { get_attribute: [ my_server, private_address ] }
+
+ volume_id:
+ description: The volume id of the block storage instance.
+ value: { get_property: [ my_storage, volume_id ] } # ARIA NOTE: wrong in spec
diff --git a/azure/aria/aria-extension-cloudify/src/aria/examples/tosca-simple-1.0/use-cases/block-storage-2/inputs.yaml b/azure/aria/aria-extension-cloudify/src/aria/examples/tosca-simple-1.0/use-cases/block-storage-2/inputs.yaml
new file mode 100644
index 0000000..d0b0854
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/examples/tosca-simple-1.0/use-cases/block-storage-2/inputs.yaml
@@ -0,0 +1,3 @@
+storage_snapshot_id: "snapshot-id"
+storage_location: /mnt
+cpus: 4 \ No newline at end of file
diff --git a/azure/aria/aria-extension-cloudify/src/aria/examples/tosca-simple-1.0/use-cases/block-storage-3/block-storage-3.yaml b/azure/aria/aria-extension-cloudify/src/aria/examples/tosca-simple-1.0/use-cases/block-storage-3/block-storage-3.yaml
new file mode 100644
index 0000000..0b09b34
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/examples/tosca-simple-1.0/use-cases/block-storage-3/block-storage-3.yaml
@@ -0,0 +1,68 @@
+tosca_definitions_version: tosca_simple_yaml_1_0
+
+description: >-
+ TOSCA simple profile with server and attached block storage using a named Relationship Template
+ for the storage attachment.
+
+metadata:
+ template_name: block-storage-3
+ template_author: TOSCA Simple Profile in YAML
+ template_version: '1.0'
+
+topology_template:
+
+ inputs:
+ cpus:
+ type: integer
+ description: Number of CPUs for the server.
+ constraints:
+ - valid_values: [ 1, 2, 4, 8 ]
+ storage_size:
+ type: scalar-unit.size
+ description: Size of the storage to be created.
+ default: 1 GB
+ storage_location:
+ type: string
+ description: Block storage mount point (filesystem path).
+
+ node_templates:
+
+ my_server:
+ type: Compute
+ capabilities:
+ host:
+ properties:
+ disk_size: 10 GB
+ num_cpus: { get_input: cpus }
+ mem_size: 4 GB
+ os:
+ properties:
+ architecture: x86_64
+ type: Linux
+ distribution: Fedora
+ version: 18.0
+ requirements:
+ - local_storage:
+ node: my_storage
+ # Declare template to use with 'relationship' keyword
+ relationship: storage_attachment
+
+ my_storage:
+ type: BlockStorage
+ properties:
+ size: { get_input: storage_size }
+
+ relationship_templates:
+
+ storage_attachment:
+ type: AttachesTo
+ properties:
+ location: { get_input: storage_location }
+
+ outputs:
+ private_ip:
+ description: The private IP address of the newly created compute instance.
+ value: { get_attribute: [ my_server, private_address ] }
+ volume_id:
+ description: The volume id of the block storage instance.
+ value: { get_property: [ my_storage, volume_id ] } # ARIA NOTE: wrong in spec
diff --git a/azure/aria/aria-extension-cloudify/src/aria/examples/tosca-simple-1.0/use-cases/block-storage-3/inputs.yaml b/azure/aria/aria-extension-cloudify/src/aria/examples/tosca-simple-1.0/use-cases/block-storage-3/inputs.yaml
new file mode 100644
index 0000000..daca041
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/examples/tosca-simple-1.0/use-cases/block-storage-3/inputs.yaml
@@ -0,0 +1,2 @@
+storage_location: /mnt
+cpus: 4 \ No newline at end of file
diff --git a/azure/aria/aria-extension-cloudify/src/aria/examples/tosca-simple-1.0/use-cases/block-storage-4/block-storage-4.yaml b/azure/aria/aria-extension-cloudify/src/aria/examples/tosca-simple-1.0/use-cases/block-storage-4/block-storage-4.yaml
new file mode 100644
index 0000000..d3c2614
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/examples/tosca-simple-1.0/use-cases/block-storage-4/block-storage-4.yaml
@@ -0,0 +1,96 @@
+tosca_definitions_version: tosca_simple_yaml_1_0
+
+description: >-
+ TOSCA simple profile with a Single Block Storage node shared by 2-Tier Application with custom
+ AttachesTo Type and implied relationships.
+
+metadata:
+ template_name: block-storage-4
+ template_author: TOSCA Simple Profile in YAML
+ template_version: '1.0'
+
+relationship_types:
+
+ MyAttachesTo:
+ derived_from: tosca.relationships.AttachesTo
+ properties:
+ location:
+ type: string
+ default: /default_location
+
+topology_template:
+
+ inputs:
+ cpus:
+ type: integer
+ description: Number of CPUs for the server.
+ constraints:
+ - valid_values: [ 1, 2, 4, 8 ]
+ storage_size:
+ type: scalar-unit.size
+ default: 1 GB
+ description: Size of the storage to be created.
+ storage_snapshot_id:
+ type: string
+ description: >-
+ Optional identifier for an existing snapshot to use when creating storage.
+
+ node_templates:
+
+ my_web_app_tier_1:
+ type: tosca.nodes.Compute
+ capabilities:
+ host:
+ properties:
+ disk_size: 10 GB
+ num_cpus: { get_input: cpus }
+ mem_size: 4096 MB
+ os:
+ properties:
+ architecture: x86_64
+ type: Linux
+ distribution: Fedora
+ version: 18.0
+ requirements:
+ - local_storage:
+ node: my_storage
+ relationship: MyAttachesTo
+
+ my_web_app_tier_2:
+ type: tosca.nodes.Compute
+ capabilities:
+ host:
+ properties:
+ disk_size: 10 GB
+ num_cpus: { get_input: cpus }
+ mem_size: 4096 MB
+ os:
+ properties:
+ architecture: x86_64
+ type: Linux
+ distribution: Fedora
+ version: 18.0
+ requirements:
+ - local_storage:
+ node: my_storage
+ relationship:
+ type: MyAttachesTo
+ properties:
+ location: /some_other_data_location
+
+ my_storage:
+ type: tosca.nodes.BlockStorage
+ properties:
+ size: { get_input: storage_size }
+ snapshot_id: { get_input: storage_snapshot_id }
+
+ outputs:
+ private_ip_1:
+ description: The private IP address of the application's first tier.
+ value: { get_attribute: [ my_web_app_tier_1, private_address ] }
+ private_ip_2:
+ description: The private IP address of the application's second tier.
+ value: { get_attribute: [ my_web_app_tier_2, private_address ] }
+ volume_id:
+ description: The volume id of the block storage instance.
+ value: { get_property: [ my_storage, volume_id ] } # ARIA NOTE: wrong in spec
diff --git a/azure/aria/aria-extension-cloudify/src/aria/examples/tosca-simple-1.0/use-cases/block-storage-4/inputs.yaml b/azure/aria/aria-extension-cloudify/src/aria/examples/tosca-simple-1.0/use-cases/block-storage-4/inputs.yaml
new file mode 100644
index 0000000..18e457d
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/examples/tosca-simple-1.0/use-cases/block-storage-4/inputs.yaml
@@ -0,0 +1,2 @@
+storage_snapshot_id: "snapshot-id"
+cpus: 4 \ No newline at end of file
diff --git a/azure/aria/aria-extension-cloudify/src/aria/examples/tosca-simple-1.0/use-cases/block-storage-5/block-storage-5.yaml b/azure/aria/aria-extension-cloudify/src/aria/examples/tosca-simple-1.0/use-cases/block-storage-5/block-storage-5.yaml
new file mode 100644
index 0000000..a0c2229
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/examples/tosca-simple-1.0/use-cases/block-storage-5/block-storage-5.yaml
@@ -0,0 +1,109 @@
+tosca_definitions_version: tosca_simple_yaml_1_0
+
+description: >-
+ TOSCA simple profile with a single Block Storage node shared by 2-Tier Application with custom
+ AttachesTo Type and explicit Relationship Templates.
+
+metadata:
+ template_name: block-storage-5
+ template_author: TOSCA Simple Profile in YAML
+ template_version: '1.0'
+
+relationship_types:
+
+ MyAttachesTo:
+ derived_from: tosca.relationships.AttachesTo
+ properties:
+ location:
+ type: string
+ default: /default_location
+
+topology_template:
+
+ inputs:
+ cpus:
+ type: integer
+ description: Number of CPUs for the server.
+ constraints:
+ - valid_values: [ 1, 2, 4, 8 ]
+ storage_size:
+ type: scalar-unit.size
+ default: 1 GB
+ description: Size of the storage to be created.
+ storage_snapshot_id:
+ type: string
+ description: >-
+ Optional identifier for an existing snapshot to use when creating storage.
+ storage_location:
+ type: string
+ description: >-
+ Block storage mount point (filesystem path).
+
+ node_templates:
+
+ my_web_app_tier_1:
+ type: tosca.nodes.Compute
+ capabilities:
+ host:
+ properties:
+ disk_size: 10 GB
+ num_cpus: { get_input: cpus }
+ mem_size: 4096 MB
+ os:
+ properties:
+ architecture: x86_64
+ type: Linux
+ distribution: Fedora
+ version: 18.0
+ requirements:
+ - local_storage:
+ node: my_storage
+ relationship: storage_attachesto_1
+
+ my_web_app_tier_2:
+ type: tosca.nodes.Compute
+ capabilities:
+ host:
+ properties:
+ disk_size: 10 GB
+ num_cpus: { get_input: cpus }
+ mem_size: 4096 MB
+ os:
+ properties:
+ architecture: x86_64
+ type: Linux
+ distribution: Fedora
+ version: 18.0
+ requirements:
+ - local_storage:
+ node: my_storage
+ relationship: storage_attachesto_2
+
+ my_storage:
+ type: tosca.nodes.BlockStorage
+ properties:
+ size: { get_input: storage_size }
+ snapshot_id: { get_input: storage_snapshot_id }
+
+ relationship_templates:
+
+ storage_attachesto_1:
+ type: MyAttachesTo
+ properties:
+ location: /my_data_location
+
+ storage_attachesto_2:
+ type: MyAttachesTo
+ properties:
+ location: /some_other_data_location
+
+ outputs:
+ private_ip_1:
+ description: The private IP address of the application's first tier.
+ value: { get_attribute: [ my_web_app_tier_1, private_address ] }
+ private_ip_2:
+ description: The private IP address of the application's second tier.
+ value: { get_attribute: [ my_web_app_tier_2, private_address ] }
+ volume_id:
+ description: The volume id of the block storage instance.
+ value: { get_property: [ my_storage, volume_id ] } # ARIA NOTE: wrong in spec
diff --git a/azure/aria/aria-extension-cloudify/src/aria/examples/tosca-simple-1.0/use-cases/block-storage-5/inputs.yaml b/azure/aria/aria-extension-cloudify/src/aria/examples/tosca-simple-1.0/use-cases/block-storage-5/inputs.yaml
new file mode 100644
index 0000000..d0b0854
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/examples/tosca-simple-1.0/use-cases/block-storage-5/inputs.yaml
@@ -0,0 +1,3 @@
+storage_snapshot_id: "snapshot-id"
+storage_location: /mnt
+cpus: 4 \ No newline at end of file
diff --git a/azure/aria/aria-extension-cloudify/src/aria/examples/tosca-simple-1.0/use-cases/block-storage-6/block-storage-6.yaml b/azure/aria/aria-extension-cloudify/src/aria/examples/tosca-simple-1.0/use-cases/block-storage-6/block-storage-6.yaml
new file mode 100644
index 0000000..534884a
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/examples/tosca-simple-1.0/use-cases/block-storage-6/block-storage-6.yaml
@@ -0,0 +1,102 @@
+tosca_definitions_version: tosca_simple_yaml_1_0
+
+description: >-
+ TOSCA simple profile with 2 servers each with different attached block storage.
+
+metadata:
+ template_name: block-storage-6
+ template_author: TOSCA Simple Profile in YAML
+ template_version: '1.0'
+
+topology_template:
+
+ inputs:
+ cpus:
+ type: integer
+ description: Number of CPUs for the server.
+ constraints:
+ - valid_values: [ 1, 2, 4, 8 ]
+ storage_size:
+ type: scalar-unit.size
+ default: 1 GB
+ description: Size of the storage to be created.
+ storage_snapshot_id:
+ type: string
+ description: >-
+ Optional identifier for an existing snapshot to use when creating storage.
+ storage_location:
+ type: string
+ description: >-
+ Block storage mount point (filesystem path).
+
+ node_templates:
+
+ my_server:
+ type: tosca.nodes.Compute
+ capabilities:
+ host:
+ properties:
+ disk_size: 10 GB
+ num_cpus: { get_input: cpus }
+ mem_size: 4096 MB
+ os:
+ properties:
+ architecture: x86_64
+ type: Linux
+ distribution: Fedora
+ version: 18.0
+ requirements:
+ - local_storage:
+ node: my_storage
+ relationship:
+ type: AttachesTo
+ properties:
+ location: { get_input: storage_location }
+
+ my_storage:
+ type: tosca.nodes.BlockStorage
+ properties:
+ size: { get_input: storage_size }
+ snapshot_id: { get_input: storage_snapshot_id }
+
+ my_server2:
+ type: tosca.nodes.Compute
+ capabilities:
+ host:
+ properties:
+ disk_size: 10 GB
+ num_cpus: { get_input: cpus }
+ mem_size: 4096 MB
+ os:
+ properties:
+ architecture: x86_64
+ type: Linux
+ distribution: Fedora
+ version: 18.0
+ requirements:
+ - local_storage:
+ node: my_storage2
+ relationship:
+ type: AttachesTo
+ properties:
+ location: { get_input: storage_location }
+
+ my_storage2:
+ type: tosca.nodes.BlockStorage
+ properties:
+ size: { get_input: storage_size }
+ snapshot_id: { get_input: storage_snapshot_id }
+
+ outputs:
+ server_ip_1:
+ description: The private IP address of the application's first server.
+ value: { get_attribute: [ my_server, private_address ] }
+ server_ip_2:
+ description: The private IP address of the application's second server.
+ value: { get_attribute: [ my_server2, private_address ] }
+ volume_id_1:
+ description: The volume id of the first block storage instance.
+ value: { get_property: [ my_storage, volume_id ] } # ARIA NOTE: wrong in spec
+ volume_id_2:
+ description: The volume id of the second block storage instance.
+ value: { get_property: [ my_storage2, volume_id ] } # ARIA NOTE: wrong in spec
diff --git a/azure/aria/aria-extension-cloudify/src/aria/examples/tosca-simple-1.0/use-cases/block-storage-6/inputs.yaml b/azure/aria/aria-extension-cloudify/src/aria/examples/tosca-simple-1.0/use-cases/block-storage-6/inputs.yaml
new file mode 100644
index 0000000..d0b0854
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/examples/tosca-simple-1.0/use-cases/block-storage-6/inputs.yaml
@@ -0,0 +1,3 @@
+storage_snapshot_id: "snapshot-id"
+storage_location: /mnt
+cpus: 4 \ No newline at end of file
diff --git a/azure/aria/aria-extension-cloudify/src/aria/examples/tosca-simple-1.0/use-cases/compute-1/compute-1.yaml b/azure/aria/aria-extension-cloudify/src/aria/examples/tosca-simple-1.0/use-cases/compute-1/compute-1.yaml
new file mode 100644
index 0000000..254d2b6
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/examples/tosca-simple-1.0/use-cases/compute-1/compute-1.yaml
@@ -0,0 +1,42 @@
+tosca_definitions_version: tosca_simple_yaml_1_0
+
+description: >-
+ TOSCA simple profile that just defines a single compute instance and selects a (guest) host
+ Operating System from the Compute node's properties. Note, this example does not include default
+ values on inputs properties.
+
+metadata:
+ template_name: compute-1
+ template_author: TOSCA Simple Profile in YAML
+ template_version: '1.0'
+
+topology_template:
+
+ inputs:
+ cpus:
+ type: integer
+ description: Number of CPUs for the server.
+ constraints:
+ - valid_values: [ 1, 2, 4, 8 ]
+
+ node_templates:
+
+ my_server:
+ type: Compute
+ capabilities:
+ host:
+ properties:
+ disk_size: 10 GB
+ num_cpus: { get_input: cpus }
+ mem_size: 1 GB
+ os:
+ properties:
+ architecture: x86_64
+ type: Linux
+ distribution: ubuntu
+ version: 12.04
+
+ outputs:
+ private_ip:
+ description: The private IP address of the deployed server instance.
+ value: { get_attribute: [ my_server, private_address ] }
diff --git a/azure/aria/aria-extension-cloudify/src/aria/examples/tosca-simple-1.0/use-cases/compute-1/inputs.yaml b/azure/aria/aria-extension-cloudify/src/aria/examples/tosca-simple-1.0/use-cases/compute-1/inputs.yaml
new file mode 100644
index 0000000..c1ee88a
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/examples/tosca-simple-1.0/use-cases/compute-1/inputs.yaml
@@ -0,0 +1 @@
+cpus: 4 \ No newline at end of file
diff --git a/azure/aria/aria-extension-cloudify/src/aria/examples/tosca-simple-1.0/use-cases/container-1/container-1.yaml b/azure/aria/aria-extension-cloudify/src/aria/examples/tosca-simple-1.0/use-cases/container-1/container-1.yaml
new file mode 100644
index 0000000..f6f69fc
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/examples/tosca-simple-1.0/use-cases/container-1/container-1.yaml
@@ -0,0 +1,68 @@
+tosca_definitions_version: tosca_simple_yaml_1_0
+
+description: >-
+ TOSCA simple profile with wordpress, web server and mysql on the same server.
+
+metadata:
+ template_name: compute-1
+ template_author: TOSCA Simple Profile in YAML
+ template_version: '1.0'
+
+imports:
+ - ../non-normative-types.yaml
+
+# Repositories to retrieve code artifacts from
+
+repositories:
+
+ docker_hub: https://registry.hub.docker.com/
+
+topology_template:
+
+ inputs:
+ wp_host_port:
+ type: integer
+ description: The host port that maps to port 80 of the WordPress container.
+ db_root_pwd:
+ type: string
+ description: Root password for MySQL.
+
+ node_templates:
+
+ # The MYSQL container based on official MySQL image in Docker hub
+
+ mysql_container:
+ type: tosca.nodes.Container.Application.Docker
+ # ARIA NOTE: moved to a requirement in the node type
+ #capabilities:
+ # # This is a capability that would mimic the Docker –link feature
+ # database_link: tosca.capabilities.Docker.Link
+ artifacts:
+ my_image:
+ file: mysql
+ type: tosca.artifacts.Deployment.Image.Container.Docker
+ repository: docker_hub
+ interfaces:
+ Standard:
+ create:
+ implementation: my_image
+ inputs:
+ db_root_password: { get_input: db_root_pwd }
+
+ # The WordPress container based on official WordPress image in Docker hub
+
+ wordpress_container:
+ type: tosca.nodes.Container.Application.Docker
+ requirements:
+ - database_link: mysql_container
+ artifacts:
+ my_image:
+ file: wordpress
+ type: tosca.artifacts.Deployment.Image.Container.Docker
+ repository: docker_hub
+ interfaces:
+ Standard:
+ create:
+ implementation: my_image
+ inputs:
+ host_port: { get_input: wp_host_port }
diff --git a/azure/aria/aria-extension-cloudify/src/aria/examples/tosca-simple-1.0/use-cases/multi-tier-1/custom_types/collectd.yaml b/azure/aria/aria-extension-cloudify/src/aria/examples/tosca-simple-1.0/use-cases/multi-tier-1/custom_types/collectd.yaml
new file mode 100644
index 0000000..6d28899
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/examples/tosca-simple-1.0/use-cases/multi-tier-1/custom_types/collectd.yaml
@@ -0,0 +1,10 @@
+# ARIA NOTE: missing in spec
+
+node_types:
+
+ tosca.nodes.SoftwareComponent.Collectd:
+ derived_from: tosca.nodes.SoftwareComponent
+ requirements:
+ - collectd_endpoint:
+ capability: tosca.capabilities.Endpoint
+ relationship: tosca.relationships.ConnectsTo
diff --git a/azure/aria/aria-extension-cloudify/src/aria/examples/tosca-simple-1.0/use-cases/multi-tier-1/custom_types/elasticsearch.yaml b/azure/aria/aria-extension-cloudify/src/aria/examples/tosca-simple-1.0/use-cases/multi-tier-1/custom_types/elasticsearch.yaml
new file mode 100644
index 0000000..72b210a
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/examples/tosca-simple-1.0/use-cases/multi-tier-1/custom_types/elasticsearch.yaml
@@ -0,0 +1,8 @@
+# ARIA NOTE: missing in spec
+
+node_types:
+
+ tosca.nodes.SoftwareComponent.Elasticsearch:
+ derived_from: tosca.nodes.SoftwareComponent
+ capabilities:
+ app: tosca.capabilities.Endpoint
diff --git a/azure/aria/aria-extension-cloudify/src/aria/examples/tosca-simple-1.0/use-cases/multi-tier-1/custom_types/kibana.yaml b/azure/aria/aria-extension-cloudify/src/aria/examples/tosca-simple-1.0/use-cases/multi-tier-1/custom_types/kibana.yaml
new file mode 100644
index 0000000..4ee8700
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/examples/tosca-simple-1.0/use-cases/multi-tier-1/custom_types/kibana.yaml
@@ -0,0 +1,12 @@
+# ARIA NOTE: missing in spec
+
+node_types:
+
+ tosca.nodes.SoftwareComponent.Kibana:
+ derived_from: tosca.nodes.SoftwareComponent
+ requirements:
+ - search_endpoint:
+ capability: tosca.capabilities.Endpoint
+ relationship: tosca.relationships.ConnectsTo
+ capabilities:
+ app: tosca.capabilities.Endpoint
diff --git a/azure/aria/aria-extension-cloudify/src/aria/examples/tosca-simple-1.0/use-cases/multi-tier-1/custom_types/logstash.yaml b/azure/aria/aria-extension-cloudify/src/aria/examples/tosca-simple-1.0/use-cases/multi-tier-1/custom_types/logstash.yaml
new file mode 100644
index 0000000..ea74c7e
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/examples/tosca-simple-1.0/use-cases/multi-tier-1/custom_types/logstash.yaml
@@ -0,0 +1,12 @@
+# ARIA NOTE: missing in spec
+
+node_types:
+
+ tosca.nodes.SoftwareComponent.Logstash:
+ derived_from: tosca.nodes.SoftwareComponent
+ requirements:
+ - search_endpoint:
+ capability: tosca.capabilities.Endpoint
+ relationship: tosca.relationships.ConnectsTo
+ capabilities:
+ app: tosca.capabilities.Endpoint
diff --git a/azure/aria/aria-extension-cloudify/src/aria/examples/tosca-simple-1.0/use-cases/multi-tier-1/custom_types/rsyslog.yaml b/azure/aria/aria-extension-cloudify/src/aria/examples/tosca-simple-1.0/use-cases/multi-tier-1/custom_types/rsyslog.yaml
new file mode 100644
index 0000000..3bd7c2b
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/examples/tosca-simple-1.0/use-cases/multi-tier-1/custom_types/rsyslog.yaml
@@ -0,0 +1,10 @@
+# ARIA NOTE: missing in spec
+
+node_types:
+
+ tosca.nodes.SoftwareComponent.Rsyslog:
+ derived_from: tosca.nodes.SoftwareComponent
+ requirements:
+ - rsyslog_endpoint:
+ capability: tosca.capabilities.Endpoint
+ relationship: tosca.relationships.ConnectsTo
diff --git a/azure/aria/aria-extension-cloudify/src/aria/examples/tosca-simple-1.0/use-cases/multi-tier-1/inputs.yaml b/azure/aria/aria-extension-cloudify/src/aria/examples/tosca-simple-1.0/use-cases/multi-tier-1/inputs.yaml
new file mode 100644
index 0000000..5302bbf
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/examples/tosca-simple-1.0/use-cases/multi-tier-1/inputs.yaml
@@ -0,0 +1 @@
+my_cpus: 8 \ No newline at end of file
diff --git a/azure/aria/aria-extension-cloudify/src/aria/examples/tosca-simple-1.0/use-cases/multi-tier-1/multi-tier-1.yaml b/azure/aria/aria-extension-cloudify/src/aria/examples/tosca-simple-1.0/use-cases/multi-tier-1/multi-tier-1.yaml
new file mode 100644
index 0000000..50401ec
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/examples/tosca-simple-1.0/use-cases/multi-tier-1/multi-tier-1.yaml
@@ -0,0 +1,237 @@
+tosca_definitions_version: tosca_simple_yaml_1_0
+
+description: >-
+ This TOSCA simple profile deploys nodejs, mongodb, elasticsearch, logstash and kibana each on a
+ separate server with monitoring enabled for nodejs server where a sample nodejs application is
+ running. The syslog and collectd are installed on a nodejs server.
+
+metadata:
+ template_name: multi-tier-1
+ template_author: TOSCA Simple Profile in YAML
+ template_version: '1.0'
+
+imports:
+ - ../webserver-dbms-2/custom_types/paypalpizzastore_nodejs_app.yaml # ARIA NOTE: moved
+ - custom_types/elasticsearch.yaml
+ - custom_types/logstash.yaml
+ - custom_types/kibana.yaml
+ - custom_types/collectd.yaml
+ - custom_types/rsyslog.yaml
+
+dsl_definitions:
+
+ host_capabilities: &host_capabilities
+ # container properties (flavor)
+ disk_size: 10 GB
+ num_cpus: { get_input: my_cpus }
+ mem_size: 4096 MB
+ os_capabilities: &os_capabilities
+ architecture: x86_64
+ type: Linux
+ distribution: Ubuntu
+ version: 14.04
+
+topology_template:
+
+ inputs:
+ my_cpus:
+ type: integer
+ description: Number of CPUs for the server.
+ constraints:
+ - valid_values: [ 1, 2, 4, 8 ]
+ github_url:
+ type: string
+ description: The URL to download nodejs.
+ default: https://github.com/sample.git
+
+ node_templates:
+
+ paypal_pizzastore:
+ type: tosca.nodes.WebApplication.PayPalPizzaStore
+ properties:
+ github_url: { get_input: github_url }
+ requirements:
+ - host: nodejs
+ - database_connection: mongo_db
+ interfaces:
+ Standard:
+ configure:
+ implementation: scripts/nodejs/configure.sh
+ inputs:
+ github_url: { get_property: [ SELF, github_url ] }
+ mongodb_ip: { get_attribute: [ mongo_server, private_address ] }
+ start: scripts/nodejs/start.sh
+
+ nodejs:
+ type: tosca.nodes.WebServer.Nodejs
+ requirements:
+ - host: app_server
+ interfaces:
+ Standard:
+ create: scripts/nodejs/create.sh
+
+ mongo_db:
+ type: tosca.nodes.Database
+ properties:
+ name: 'pizzastore' # ARIA NOTE: missing in spec
+ requirements:
+ - host: mongo_dbms
+ interfaces:
+ Standard:
+ create: create_database.sh
+
+ mongo_dbms:
+ type: tosca.nodes.DBMS
+ requirements:
+ - host: mongo_server
+ interfaces:
+ Standard: # ARIA NOTE: wrong in spec
+ create: scripts/mongodb/create.sh
+ configure:
+ implementation: scripts/mongodb/config.sh
+ inputs:
+ mongodb_ip: { get_attribute: [ mongo_server, private_address ] } # ARIA NOTE: wrong in spec
+ start: scripts/mongodb/start.sh
+
+ elasticsearch:
+ type: tosca.nodes.SoftwareComponent.Elasticsearch
+ requirements:
+ - host: elasticsearch_server
+ interfaces:
+ Standard: # ARIA NOTE: wrong in spec
+ create: scripts/elasticsearch/create.sh
+ start: scripts/elasticsearch/start.sh
+
+ logstash:
+ type: tosca.nodes.SoftwareComponent.Logstash
+ requirements:
+ - host: logstash_server
+ # ARIA NOTE: mangled in the spec
+ - search_endpoint:
+ node: elasticsearch
+ relationship:
+ interfaces:
+ Configure:
+ pre_configure_source:
+ implementation: python/logstash/configure_elasticsearch.py
+ inputs:
+ elasticsearch_ip: { get_attribute: [ elasticsearch_server, private_address ] } # ARIA NOTE: wrong in spec
+ interfaces:
+ Standard: # ARIA NOTE: wrong in spec
+ create: scripts/lostash/create.sh
+ configure: scripts/logstash/config.sh
+ start: scripts/logstash/start.sh
+
+ kibana:
+ type: tosca.nodes.SoftwareComponent.Kibana
+ requirements:
+ - host: kibana_server
+ - search_endpoint: elasticsearch
+ interfaces:
+ Standard: # ARIA NOTE: wrong in spec
+ create: scripts/kibana/create.sh
+ configure:
+ implementation: scripts/kibana/config.sh
+ inputs:
+ elasticsearch_ip: { get_attribute: [ elasticsearch_server, private_address ] } # ARIA NOTE: wrong in spec
+ kibana_ip: { get_attribute: [ kibana_server, private_address ] } # ARIA NOTE: wrong in spec
+ start: scripts/kibana/start.sh
+
+ app_collectd:
+ type: tosca.nodes.SoftwareComponent.Collectd
+ requirements:
+ - host: app_server
+ # ARIA NOTE: mangled in the spec
+ - collectd_endpoint:
+ node: logstash
+ relationship:
+ interfaces:
+ Configure:
+ pre_configure_target:
+ implementation: python/logstash/configure_collectd.py
+ interfaces:
+ Standard: # ARIA NOTE: wrong in spec
+ create: scripts/collectd/create.sh
+ configure:
+ implementation: python/collectd/config.py
+ inputs:
+ logstash_ip: { get_attribute: [ logstash_server, private_address ] } # ARIA NOTE: wrong in spec
+ start: scripts/collectd/start.sh
+
+ app_rsyslog:
+ type: tosca.nodes.SoftwareComponent.Rsyslog
+ requirements:
+ - host: app_server
+ # ARIA NOTE: mangled in the spec
+ - rsyslog_endpoint:
+ node: logstash
+ relationship:
+ interfaces:
+ Configure:
+ pre_configure_target:
+ implementation: python/logstash/configure_rsyslog.py
+ interfaces:
+ Standard: # ARIA NOTE: wrong in spec
+ create: scripts/rsyslog/create.sh
+ configure:
+ implementation: scripts/rsyslog/config.sh
+ inputs:
+ logstash_ip: { get_attribute: [ logstash_server, private_address ] } # ARIA NOTE: wrong in spec
+ start: scripts/rsyslog/start.sh
+
+ app_server:
+ type: tosca.nodes.Compute
+ capabilities:
+ host:
+ properties: *host_capabilities
+ os:
+ properties: *os_capabilities
+
+ mongo_server:
+ type: tosca.nodes.Compute
+ capabilities:
+ host:
+ properties: *host_capabilities
+ os:
+ properties: *os_capabilities
+
+ elasticsearch_server:
+ type: tosca.nodes.Compute
+ capabilities:
+ host:
+ properties: *host_capabilities
+ os:
+ properties: *os_capabilities
+
+ logstash_server:
+ type: tosca.nodes.Compute
+ capabilities:
+ host:
+ properties: *host_capabilities
+ os:
+ properties: *os_capabilities
+
+ kibana_server:
+ type: tosca.nodes.Compute
+ capabilities:
+ host:
+ properties: *host_capabilities
+ os:
+ properties: *os_capabilities
+
+ outputs:
+ nodejs_url:
+ description: URL for the nodejs server.
+ value: { get_attribute: [ app_server, private_address ] }
+ mongodb_url:
+ description: URL for the mongodb server.
+ value: { get_attribute: [ mongo_server, private_address ] }
+ elasticsearch_url:
+ description: URL for the elasticsearch server.
+ value: { get_attribute: [ elasticsearch_server, private_address ] }
+ logstash_url:
+ description: URL for the logstash server.
+ value: { get_attribute: [ logstash_server, private_address ] }
+ kibana_url:
+ description: URL for the kibana server.
+ value: { get_attribute: [ kibana_server, private_address ] }
diff --git a/azure/aria/aria-extension-cloudify/src/aria/examples/tosca-simple-1.0/use-cases/network-1/inputs.yaml b/azure/aria/aria-extension-cloudify/src/aria/examples/tosca-simple-1.0/use-cases/network-1/inputs.yaml
new file mode 100644
index 0000000..9687bb0
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/examples/tosca-simple-1.0/use-cases/network-1/inputs.yaml
@@ -0,0 +1 @@
+network_name: "network" \ No newline at end of file
diff --git a/azure/aria/aria-extension-cloudify/src/aria/examples/tosca-simple-1.0/use-cases/network-1/network-1.yaml b/azure/aria/aria-extension-cloudify/src/aria/examples/tosca-simple-1.0/use-cases/network-1/network-1.yaml
new file mode 100644
index 0000000..c66964f
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/examples/tosca-simple-1.0/use-cases/network-1/network-1.yaml
@@ -0,0 +1,49 @@
+tosca_definitions_version: tosca_simple_yaml_1_0
+
+description: >-
+ TOSCA simple profile with 1 server bound to a new network.
+
+metadata:
+ template_name: network-1
+ template_author: TOSCA Simple Profile in YAML
+ template_version: '1.0'
+
+topology_template:
+
+ inputs:
+ network_name:
+ type: string
+ description: Network name
+
+ node_templates:
+
+ my_server:
+ type: tosca.nodes.Compute
+ capabilities:
+ host:
+ properties:
+ disk_size: 10 GB
+ num_cpus: 1
+ mem_size: 4096 MB
+ os:
+ properties:
+ architecture: x86_64
+ type: Linux
+ distribution: CirrOS
+ version: 0.3.2
+
+ my_network:
+ type: tosca.nodes.network.Network
+ properties:
+ network_name: { get_input: network_name }
+ ip_version: 4
+ cidr: '192.168.0.0/24'
+ start_ip: '192.168.0.50'
+ end_ip: '192.168.0.200'
+ gateway_ip: '192.168.0.1'
+
+ my_port:
+ type: tosca.nodes.network.Port
+ requirements:
+ - binding: my_server
+ - link: my_network
diff --git a/azure/aria/aria-extension-cloudify/src/aria/examples/tosca-simple-1.0/use-cases/network-2/inputs.yaml b/azure/aria/aria-extension-cloudify/src/aria/examples/tosca-simple-1.0/use-cases/network-2/inputs.yaml
new file mode 100644
index 0000000..9687bb0
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/examples/tosca-simple-1.0/use-cases/network-2/inputs.yaml
@@ -0,0 +1 @@
+network_name: "network" \ No newline at end of file
diff --git a/azure/aria/aria-extension-cloudify/src/aria/examples/tosca-simple-1.0/use-cases/network-2/network-2.yaml b/azure/aria/aria-extension-cloudify/src/aria/examples/tosca-simple-1.0/use-cases/network-2/network-2.yaml
new file mode 100644
index 0000000..017950d
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/examples/tosca-simple-1.0/use-cases/network-2/network-2.yaml
@@ -0,0 +1,46 @@
+tosca_definitions_version: tosca_simple_yaml_1_0
+
+description: >-
+ TOSCA simple profile with 1 server bound to an existing network.
+
+metadata:
+ template_name: network-2
+ template_author: TOSCA Simple Profile in YAML
+ template_version: '1.0'
+
+topology_template:
+
+ inputs:
+ network_name:
+ type: string
+ description: Network name
+
+ node_templates:
+
+ my_server:
+ type: tosca.nodes.Compute
+ capabilities:
+ host:
+ properties:
+ disk_size: 10 GB
+ num_cpus: 1
+ mem_size: 4096 MB
+ os:
+ properties:
+ architecture: x86_64
+ type: Linux
+ distribution: CirrOS
+ version: 0.3.2
+
+ my_network:
+ type: tosca.nodes.network.Network
+ properties:
+ network_name: { get_input: network_name }
+
+ my_port:
+ type: tosca.nodes.network.Port
+ requirements:
+ - binding:
+ node: my_server
+ - link:
+ node: my_network
diff --git a/azure/aria/aria-extension-cloudify/src/aria/examples/tosca-simple-1.0/use-cases/network-3/inputs.yaml b/azure/aria/aria-extension-cloudify/src/aria/examples/tosca-simple-1.0/use-cases/network-3/inputs.yaml
new file mode 100644
index 0000000..9687bb0
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/examples/tosca-simple-1.0/use-cases/network-3/inputs.yaml
@@ -0,0 +1 @@
+network_name: "network" \ No newline at end of file
diff --git a/azure/aria/aria-extension-cloudify/src/aria/examples/tosca-simple-1.0/use-cases/network-3/network-3.yaml b/azure/aria/aria-extension-cloudify/src/aria/examples/tosca-simple-1.0/use-cases/network-3/network-3.yaml
new file mode 100644
index 0000000..5fa40b7
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/examples/tosca-simple-1.0/use-cases/network-3/network-3.yaml
@@ -0,0 +1,81 @@
+tosca_definitions_version: tosca_simple_yaml_1_0
+
+description: >-
+ TOSCA simple profile with 2 servers bound to the 1 network.
+
+metadata:
+ template_name: network-3
+ template_author: TOSCA Simple Profile in YAML
+ template_version: '1.0'
+
+topology_template:
+
+ inputs:
+ network_name:
+ type: string
+ description: Network name
+ network_cidr:
+ type: string
+ default: 10.0.0.0/24
+ description: CIDR for the network
+ network_start_ip:
+ type: string
+ default: 10.0.0.100
+ description: Start IP for the allocation pool
+ network_end_ip:
+ type: string
+ default: 10.0.0.150
+ description: End IP for the allocation pool
+
+ node_templates:
+
+ my_server:
+ type: tosca.nodes.Compute
+ capabilities:
+ host:
+ properties:
+ disk_size: 10 GB
+ num_cpus: 1
+ mem_size: 4096 MB
+ os:
+ properties:
+ architecture: x86_64
+ type: Linux
+ distribution: CirrOS
+ version: 0.3.2
+
+ my_server2:
+ type: tosca.nodes.Compute
+ capabilities:
+ host:
+ properties:
+ disk_size: 10 GB
+ num_cpus: 1
+ mem_size: 4096 MB
+ os:
+ properties:
+ architecture: x86_64
+ type: Linux
+ distribution: CirrOS
+ version: 0.3.2
+
+ my_network:
+ type: tosca.nodes.network.Network
+ properties:
+ ip_version: 4
+ cidr: { get_input: network_cidr }
+ network_name: { get_input: network_name }
+ start_ip: { get_input: network_start_ip }
+ end_ip: { get_input: network_end_ip }
+
+ my_port:
+ type: tosca.nodes.network.Port
+ requirements:
+ - binding: my_server
+ - link: my_network
+
+ my_port2:
+ type: tosca.nodes.network.Port
+ requirements:
+ - binding: my_server2
+ - link: my_network
diff --git a/azure/aria/aria-extension-cloudify/src/aria/examples/tosca-simple-1.0/use-cases/network-4/network-4.yaml b/azure/aria/aria-extension-cloudify/src/aria/examples/tosca-simple-1.0/use-cases/network-4/network-4.yaml
new file mode 100644
index 0000000..5b51117
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/examples/tosca-simple-1.0/use-cases/network-4/network-4.yaml
@@ -0,0 +1,70 @@
+tosca_definitions_version: tosca_simple_yaml_1_0
+
+description: >-
+ TOSCA simple profile with 1 server bound to 3 networks.
+
+metadata:
+ template_name: network-4
+ template_author: TOSCA Simple Profile in YAML
+ template_version: '1.0'
+
+topology_template:
+
+ node_templates:
+
+ my_server:
+ type: tosca.nodes.Compute
+ capabilities:
+ host:
+ properties:
+ disk_size: 10 GB
+ num_cpus: 1
+ mem_size: 4096 MB
+ os:
+ properties:
+ architecture: x86_64
+ type: Linux
+ distribution: CirrOS
+ version: 0.3.2
+
+ my_network1:
+ type: tosca.nodes.network.Network
+ properties:
+ cidr: '192.168.1.0/24'
+ network_name: net1
+
+ my_network2:
+ type: tosca.nodes.network.Network
+ properties:
+ cidr: '192.168.2.0/24'
+ network_name: net2
+
+ my_network3:
+ type: tosca.nodes.network.Network
+ properties:
+ cidr: '192.168.3.0/24'
+ network_name: net3
+
+ my_port1:
+ type: tosca.nodes.network.Port
+ properties:
+ order: 0
+ requirements:
+ - binding: my_server
+ - link: my_network1
+
+ my_port2:
+ type: tosca.nodes.network.Port
+ properties:
+ order: 1
+ requirements:
+ - binding: my_server
+ - link: my_network2
+
+ my_port3:
+ type: tosca.nodes.network.Port
+ properties:
+ order: 2
+ requirements:
+ - binding: my_server
+ - link: my_network3
diff --git a/azure/aria/aria-extension-cloudify/src/aria/examples/tosca-simple-1.0/use-cases/non-normative-types.yaml b/azure/aria/aria-extension-cloudify/src/aria/examples/tosca-simple-1.0/use-cases/non-normative-types.yaml
new file mode 100644
index 0000000..da89dcb
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/examples/tosca-simple-1.0/use-cases/non-normative-types.yaml
@@ -0,0 +1,177 @@
+
+artifact_types:
+
+ tosca.artifacts.Deployment.Image.Container.Docker:
+ _extensions:
+ specification: tosca-simple-1.0
+ specification_section: 8.1.1
+ derived_from: tosca.artifacts.Deployment.Image
+ description: Docker Container Image
+
+ tosca.artifacts.Deployment.Image.VM.ISO:
+ _extensions:
+ specification: tosca-simple-1.0
+ specification_section: 8.1.2
+ derived_from: tosca.artifacts.Deployment.Image.VM
+ description: Virtual Machine (VM) image in ISO disk format
+ mime_type: application/octet-stream
+ file_ext: [ iso ]
+
+ tosca.artifacts.Deployment.Image.VM.QCOW2:
+ _extensions:
+ specification: tosca-simple-1.0
+ specification_section: 8.1.3
+ derived_from: tosca.artifacts.Deployment.Image.VM
+ description: Virtual Machine (VM) image in QCOW v2 standard disk format
+ mime_type: application/octet-stream
+ file_ext: [ qcow2 ]
+
+capability_types:
+
+ tosca.capabilities.Container.Docker:
+ _extensions:
+ specification: tosca-simple-1.0
+ specification_section: 8.2.1
+ derived_from: tosca.capabilities.Container
+ description: The type indicates capabilities of a Docker runtime environment (client).
+ properties:
+ version:
+ description: >-
+ The Docker version capability (i.e., the versions supported by the capability).
+ type: list
+ required: false
+ entry_schema: version
+ publish_all:
+ description: >-
+ Indicates that all ports (ranges) listed in the dockerfile using the EXPOSE keyword be
+ published.
+ type: boolean
+ default: false
+ required: false
+ publish_ports:
+ description: >-
+ List of ports mappings from source (Docker container) to target (host) ports to publish.
+ type: list
+ entry_schema: PortSpec
+ required: false
+ expose_ports:
+ description: >-
+ List of ports mappings from source (Docker container) to expose to other Docker containers
+ (not accessible outside host).
+ type: list
+ entry_schema: PortSpec
+ required: false
+ volumes:
+ description: >-
+ The dockerfile VOLUME command which is used to enable access from the Docker container to
+ a directory on the host machine.
+ type: list
+ entry_schema: string
+ required: false
+ # ARIA NOTE: these are missing in the spec
+ host_id:
+ description: >-
+ The optional identifier of an existing host resource that should be used to run this
+ container on.
+ type: string
+ required: false
+ volume_id:
+ description: >-
+ The optional identifier of an existing storage volume (resource) that should be used to
+ create the container's mount point(s) on.
+ type: string
+ required: false
+
+ # ARIA NOTE: missing in spec
+ tosca.capabilities.Docker.Link:
+ derived_from: tosca.capabilities.Root
+ description: This is a capability that would mimic the Docker –link feature
+
+node_types:
+
+ tosca.nodes.Database.MySQL:
+ _extensions:
+ specification: tosca-simple-1.0
+ specification_section: 8.3.1
+ derived_from: tosca.nodes.Database
+ requirements:
+ - host:
+ capability: tosca.capabilities.Container # ARIA NOTE: missing in spec
+ node: tosca.nodes.DBMS.MySQL
+
+ tosca.nodes.DBMS.MySQL:
+ _extensions:
+ specification: tosca-simple-1.0
+ specification_section: 8.3.2
+ derived_from: tosca.nodes.DBMS
+ properties:
+ port:
+ description: reflect the default MySQL server port
+ type: integer # AIRA NOTE: missing in spec
+ default: 3306
+ root_password:
+ # MySQL requires a root_password for configuration
+ # Override parent DBMS definition to make this property required
+ type: string # AIRA NOTE: missing in spec
+ required: true
+ capabilities:
+ # Further constrain the 'host' capability to only allow MySQL databases
+ host:
+ type: tosca.capabilities.Container # ARIA NOTE: missing in spec
+ valid_source_types: [ tosca.nodes.Database.MySQL ]
+
+ tosca.nodes.WebServer.Apache:
+ _extensions:
+ specification: tosca-simple-1.0
+ specification_section: 8.3.3
+ derived_from: tosca.nodes.WebServer
+
+ tosca.nodes.WebApplication.WordPress:
+ _extensions:
+ specification: tosca-simple-1.0
+ specification_section: 8.3.4
+ derived_from: tosca.nodes.WebApplication
+ properties:
+ admin_user:
+ type: string
+ required: false # ARIA NOTE: missing in spec
+ admin_password:
+ type: string
+ required: false # ARIA NOTE: missing in spec
+ db_host:
+ type: string
+ required: false # ARIA NOTE: missing in spec
+ requirements:
+ - database_endpoint:
+ capability: tosca.capabilities.Endpoint.Database
+ node: tosca.nodes.Database
+ relationship: tosca.relationships.ConnectsTo
+
+ tosca.nodes.WebServer.Nodejs:
+ _extensions:
+ specification: tosca-simple-1.0
+ specification_section: 8.3.5
+ derived_from: tosca.nodes.WebServer
+ properties:
+ # Property to supply the desired implementation in the Github repository
+ github_url:
+ required: false
+ type: string
+ description: location of the application on the github.
+ default: https://github.com/mmm/testnode.git
+ interfaces:
+ Standard:
+ inputs:
+ github_url:
+ type: string
+
+ tosca.nodes.Container.Application.Docker:
+ _extensions:
+ specification: tosca-simple-1.0
+ specification_section: 8.3.6
+ derived_from: tosca.nodes.Container.Application
+ requirements:
+ - host:
+ capability: tosca.capabilities.Container.Docker
+ - database_link: # ARIA NOTE: missing in spec
+ capability: tosca.capabilities.Docker.Link
diff --git a/azure/aria/aria-extension-cloudify/src/aria/examples/tosca-simple-1.0/use-cases/object-storage-1/inputs.yaml b/azure/aria/aria-extension-cloudify/src/aria/examples/tosca-simple-1.0/use-cases/object-storage-1/inputs.yaml
new file mode 100644
index 0000000..57f99a3
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/examples/tosca-simple-1.0/use-cases/object-storage-1/inputs.yaml
@@ -0,0 +1 @@
+objectstore_name: "objectstore" \ No newline at end of file
diff --git a/azure/aria/aria-extension-cloudify/src/aria/examples/tosca-simple-1.0/use-cases/object-storage-1/object-storage-1.yaml b/azure/aria/aria-extension-cloudify/src/aria/examples/tosca-simple-1.0/use-cases/object-storage-1/object-storage-1.yaml
new file mode 100644
index 0000000..c55a4db
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/examples/tosca-simple-1.0/use-cases/object-storage-1/object-storage-1.yaml
@@ -0,0 +1,24 @@
+tosca_definitions_version: tosca_simple_yaml_1_0
+
+description: >-
+ TOSCA template for creating an object storage service.
+
+metadata:
+ template_name: object-storage-1
+ template_author: TOSCA Simple Profile in YAML
+ template_version: '1.0'
+
+topology_template:
+
+ inputs:
+ objectstore_name:
+ type: string
+
+ node_templates:
+
+ obj_store_server:
+ type: tosca.nodes.ObjectStorage
+ properties:
+ name: { get_input: objectstore_name }
+ size: 4096 MB
+ maxsize: 20 GB
diff --git a/azure/aria/aria-extension-cloudify/src/aria/examples/tosca-simple-1.0/use-cases/software-component-1/inputs.yaml b/azure/aria/aria-extension-cloudify/src/aria/examples/tosca-simple-1.0/use-cases/software-component-1/inputs.yaml
new file mode 100644
index 0000000..c1ee88a
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/examples/tosca-simple-1.0/use-cases/software-component-1/inputs.yaml
@@ -0,0 +1 @@
+cpus: 4 \ No newline at end of file
diff --git a/azure/aria/aria-extension-cloudify/src/aria/examples/tosca-simple-1.0/use-cases/software-component-1/software-component-1.yaml b/azure/aria/aria-extension-cloudify/src/aria/examples/tosca-simple-1.0/use-cases/software-component-1/software-component-1.yaml
new file mode 100644
index 0000000..fc1cfd7
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/examples/tosca-simple-1.0/use-cases/software-component-1/software-component-1.yaml
@@ -0,0 +1,54 @@
+tosca_definitions_version: tosca_simple_yaml_1_0
+
+description: >-
+ TOSCA Simple Profile with a SoftwareComponent node with a declared Virtual machine (VM) deployment
+ artifact that automatically deploys to its host Compute node.
+
+metadata:
+ template_name: software-component-1
+ template_author: TOSCA Simple Profile in YAML
+ template_version: '1.0'
+
+imports:
+ - ../non-normative-types.yaml
+
+topology_template:
+
+ # ARIA NOTE: missing in spec
+ inputs:
+ cpus:
+ type: integer
+ description: Number of CPUs for the server.
+ constraints:
+ - valid_values: [ 1, 2, 4, 8 ]
+
+ node_templates:
+
+ my_virtual_machine:
+ type: SoftwareComponent
+ artifacts:
+ my_vm_image:
+ file: images/fedora-18-x86_64.qcow2
+ type: tosca.artifacts.Deployment.Image.VM.QCOW2
+ requirements:
+ - host: my_server
+ # Automatically deploy the VM image referenced on the create operation
+ interfaces:
+ Standard:
+ create: my_vm_image
+
+ # Compute instance with no Operating System guest host
+ my_server:
+ type: Compute
+ capabilities:
+ # Note: no guest OperatingSystem requirements as these are in the image.
+ host:
+ properties:
+ disk_size: 10 GB
+ num_cpus: { get_input: cpus }
+ mem_size: 4 GB
+
+ outputs:
+ private_ip:
+ description: The private IP address of the deployed server instance.
+ value: { get_attribute: [ my_server, private_address ] }
diff --git a/azure/aria/aria-extension-cloudify/src/aria/examples/tosca-simple-1.0/use-cases/webserver-dbms-1/webserver-dbms-1.yaml b/azure/aria/aria-extension-cloudify/src/aria/examples/tosca-simple-1.0/use-cases/webserver-dbms-1/webserver-dbms-1.yaml
new file mode 100644
index 0000000..daa24df
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/examples/tosca-simple-1.0/use-cases/webserver-dbms-1/webserver-dbms-1.yaml
@@ -0,0 +1,122 @@
+tosca_definitions_version: tosca_simple_yaml_1_0
+
+description: >-
+ TOSCA simple profile with WordPress, a web server, a MySQL DBMS hosting the application's database
+ content on the same server. Does not have input defaults or constraints.
+
+metadata:
+ template_name: webserver-dbms-1
+ template_author: TOSCA Simple Profile in YAML
+ template_version: '1.0'
+
+imports:
+ - ../non-normative-types.yaml
+
+topology_template:
+
+ inputs:
+ cpus:
+ type: integer
+ description: Number of CPUs for the server.
+ db_name:
+ type: string
+ description: The name of the database.
+ db_user:
+ type: string
+ description: The username of the DB user.
+ db_pwd:
+ type: string
+ description: The WordPress database admin account password.
+ db_root_pwd:
+ type: string
+ description: Root password for MySQL.
+ db_port:
+ type: PortDef
+ description: Port for the MySQL database
+ # ARIA NOTE: missing in spec
+ context_root:
+ type: string
+ description: Context root for WordPress.
+
+ node_templates:
+
+ wordpress:
+ type: tosca.nodes.WebApplication.WordPress
+ properties:
+ context_root: { get_input: context_root }
+ requirements:
+ - host: webserver
+ - database_endpoint: mysql_database
+ interfaces:
+ Standard:
+ create: wordpress_install.sh
+ configure:
+ implementation: wordpress_configure.sh
+ inputs:
+ wp_db_name: { get_property: [ mysql_database, name ] }
+ wp_db_user: { get_property: [ mysql_database, user ] }
+ wp_db_password: { get_property: [ mysql_database, password ] }
+ # In my own template, find requirement/capability, find port property
+ wp_db_port: { get_property: [ SELF, database_endpoint, port ] }
+
+ mysql_database:
+ type: Database
+ properties:
+ name: { get_input: db_name }
+ user: { get_input: db_user }
+ password: { get_input: db_pwd }
+ port: { get_input: db_port }
+ capabilities:
+ database_endpoint:
+ properties:
+ port: { get_input: db_port }
+ requirements:
+ - host: mysql_dbms
+ interfaces:
+ Standard:
+ configure: mysql_database_configure.sh
+
+ mysql_dbms:
+ type: DBMS
+ properties:
+ root_password: { get_input: db_root_pwd }
+ port: { get_input: db_port }
+ requirements:
+ - host: server
+ interfaces:
+ Standard:
+ # ARIA NOTE: not declared in spec
+ #inputs:
+ # db_root_password: { get_property: [ mysql_dbms, root_password ] }
+ create: mysql_dbms_install.sh
+ start: mysql_dbms_start.sh
+ configure: mysql_dbms_configure.sh
+
+ webserver:
+ type: WebServer
+ requirements:
+ - host: server
+ interfaces:
+ Standard:
+ create: webserver_install.sh
+ start: webserver_start.sh
+
+ server:
+ type: Compute
+ capabilities:
+ host:
+ properties:
+ disk_size: 10 GB
+ num_cpus: { get_input: cpus }
+ mem_size: 4096 MB
+ os:
+ properties:
+ architecture: x86_64
+ type: linux
+ distribution: fedora
+ version: 17.0
+
+ outputs:
+ website_url:
+ description: URL for Wordpress wiki.
+ value: { get_attribute: [ server, public_address ] }
diff --git a/azure/aria/aria-extension-cloudify/src/aria/examples/tosca-simple-1.0/use-cases/webserver-dbms-2/custom_types/paypalpizzastore_nodejs_app.yaml b/azure/aria/aria-extension-cloudify/src/aria/examples/tosca-simple-1.0/use-cases/webserver-dbms-2/custom_types/paypalpizzastore_nodejs_app.yaml
new file mode 100644
index 0000000..02bb399
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/examples/tosca-simple-1.0/use-cases/webserver-dbms-2/custom_types/paypalpizzastore_nodejs_app.yaml
@@ -0,0 +1,15 @@
+# ARIA NOTE: missing in spec
+
+node_types:
+
+ tosca.nodes.WebApplication.PayPalPizzaStore:
+ derived_from: tosca.nodes.WebApplication
+ properties:
+ github_url:
+ type: string
+ requirements:
+ - database_connection:
+ capability: tosca.capabilities.Node
+
+ tosca.nodes.WebServer.Nodejs:
+ derived_from: tosca.nodes.WebServer
diff --git a/azure/aria/aria-extension-cloudify/src/aria/examples/tosca-simple-1.0/use-cases/webserver-dbms-2/webserver-dbms-2.yaml b/azure/aria/aria-extension-cloudify/src/aria/examples/tosca-simple-1.0/use-cases/webserver-dbms-2/webserver-dbms-2.yaml
new file mode 100644
index 0000000..91f0b35
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/examples/tosca-simple-1.0/use-cases/webserver-dbms-2/webserver-dbms-2.yaml
@@ -0,0 +1,115 @@
+tosca_definitions_version: tosca_simple_yaml_1_0
+
+description: >-
+ TOSCA simple profile with a nodejs web server hosting a PayPal sample application which connects
+ to a mongodb database.
+
+metadata:
+ template_name: webserver-dbms-2
+ template_author: TOSCA Simple Profile in YAML
+ template_version: '1.0'
+
+imports:
+ - custom_types/paypalpizzastore_nodejs_app.yaml
+
+dsl_definitions:
+
+ ubuntu_node: &ubuntu_node
+ disk_size: 10 GB
+ num_cpus: { get_input: my_cpus }
+ mem_size: 4096 MB
+ os_capabilities: &os_capabilities
+ architecture: x86_64
+ type: Linux
+ distribution: Ubuntu
+ version: 14.04
+
+topology_template:
+
+ inputs:
+ my_cpus:
+ type: integer
+ description: Number of CPUs for the server.
+ constraints:
+ - valid_values: [ 1, 2, 4, 8 ]
+ default: 1
+ github_url:
+ type: string
+ description: The URL to download nodejs.
+ default: https://github.com/sample.git
+
+ node_templates:
+
+ paypal_pizzastore:
+ type: tosca.nodes.WebApplication.PayPalPizzaStore
+ properties:
+ github_url: { get_input: github_url }
+ requirements:
+ - host: nodejs
+ - database_connection: mongo_db
+ interfaces:
+ Standard:
+ configure:
+ implementation: scripts/nodejs/configure.sh
+ inputs:
+ github_url: { get_property: [ SELF, github_url ] }
+ mongodb_ip: { get_attribute: [ mongo_server, private_address ] }
+ start: scripts/nodejs/start.sh
+
+ nodejs:
+ type: tosca.nodes.WebServer.Nodejs
+ requirements:
+ - host: app_server
+ interfaces:
+ Standard:
+ create: scripts/nodejs/create.sh
+
+ mongo_db:
+ type: tosca.nodes.Database
+ properties:
+ name: 'pizzastore' # ARIA NOTE: missing in spec
+ requirements:
+ - host: mongo_dbms
+ interfaces:
+ Standard:
+ create: create_database.sh
+
+ mongo_dbms:
+ type: tosca.nodes.DBMS
+ requirements:
+ - host: mongo_server
+ properties:
+ port: 27017
+ interfaces:
+ Standard: # ARIA NOTE: mistaken in spec
+ create: mongodb/create.sh
+ configure:
+ implementation: mongodb/config.sh
+ inputs:
+ mongodb_ip: { get_attribute: [ mongo_server, private_address ] }
+ start: mongodb/start.sh
+
+ mongo_server:
+ type: tosca.nodes.Compute
+ capabilities:
+ os:
+ properties: *os_capabilities
+ host:
+ properties: *ubuntu_node
+
+ app_server:
+ type: tosca.nodes.Compute
+ capabilities:
+ os:
+ properties: *os_capabilities
+ host:
+ properties: *ubuntu_node
+
+ outputs:
+
+ nodejs_url:
+ description: URL for the nodejs server, http://<IP>:3000
+ value: { get_attribute: [ app_server, private_address ] }
+ mongodb_url:
+ description: URL for the mongodb server.
+ value: { get_attribute: [ mongo_server, private_address ] }
diff --git a/azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/__init__.py b/azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/__init__.py
new file mode 100644
index 0000000..ff4fa7d
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/__init__.py
@@ -0,0 +1,56 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os.path
+
+from aria import extension
+
+from .simple_v1_0 import ToscaSimplePresenter1_0
+from .simple_nfv_v1_0 import ToscaSimpleNfvPresenter1_0
+
+
+@extension.parser
+class ParserExtensions(object):
+
+ @staticmethod
+ def presenter_class():
+ return ToscaSimplePresenter1_0, ToscaSimpleNfvPresenter1_0
+
+ @staticmethod
+ def specification_package():
+ return 'aria_extension_tosca'
+
+ @staticmethod
+ def specification_url():
+ return {
+ 'yaml-1.1': 'http://yaml.org',
+ 'tosca-simple-1.0': 'http://docs.oasis-open.org/tosca/TOSCA-Simple-Profile-YAML/v1.0/'
+ 'cos01/TOSCA-Simple-Profile-YAML-v1.0-cos01.html',
+ 'tosca-simple-nfv-1.0': 'http://docs.oasis-open.org/tosca/tosca-nfv/v1.0/'
+ 'tosca-nfv-v1.0.html'
+ }
+
+ @staticmethod
+ def uri_loader_prefix():
+ the_dir = os.path.dirname(__file__)
+ return os.path.join(the_dir, 'profiles')
+
+
+MODULES = (
+ 'simple_v1_0',
+ 'simple_nfv_v1_0')
+
+__all__ = (
+ 'MODULES',)
diff --git a/azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/profiles/aria-1.0/aria-1.0.yaml b/azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/profiles/aria-1.0/aria-1.0.yaml
new file mode 100644
index 0000000..e421150
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/profiles/aria-1.0/aria-1.0.yaml
@@ -0,0 +1,97 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+policy_types:
+
+ aria.Plugin:
+ _extensions:
+ shorthand_name: Plugin
+ type_qualified_name: aria:Plugin
+ role: plugin
+ description: >-
+ Policy used to specify plugins used by services. For an operation to be able to use a plugin
+ it must have a matching policy. The name of the policy must be the name of the plugin. The
+ optional properties can be used to further specify plugin selection by the orchestrator.
+ derived_from: tosca.policies.Root
+ properties:
+ version:
+ description: >-
+ Minimum plugin version.
+ type: version
+ required: false
+ enabled:
+ description: >-
+ If the policy is to disable the plugin then it will be ignored and all operations and
+ workflows depending on it will also be disabled.
+ type: boolean
+ default: true
+
+ aria.Workflow:
+ _extensions:
+ shorthand_name: Workflow
+ type_qualified_name: aria:Workflow
+ role: workflow
+ description: >-
+ Policy used to specify custom workflows. A workflow is usually a workload of interconnected
+ calls to operations on nodes and relationships in the service topology. The name of the policy
+ is used as the name of the workflow. Note that it can be the same name as one of the normative
+ lifecycle workflows ("install", "uninstall", etc.), in which case it would be considered an
+ override of the default behavior. If the workflow requires parameters then this base type
+ should be inherited and extended with additional properties.
+ derived_from: tosca.policies.Root
+ properties:
+ implementation:
+ description: >-
+ The interpretation of the implementation string depends on the orchestrator. In ARIA it is
+ the full path to a Python @workflow function that generates a task graph based on the
+ service topology.
+ type: string
+
+ aria.Scaling:
+ _extensions:
+ type_qualified_name: aria:Scaling
+ role: scaling
+ description: >-
+ Scaling.
+ derived_from: tosca.policies.Scaling
+ properties:
+ min_instances:
+ description: >-
+ This property is used to indicate the minimum number of instances that should be created
+ for the associated TOSCA Node Template by a TOSCA orchestrator.
+ type: integer
+ default: 1
+ constraints:
+ - greater_or_equal: 0
+ max_instances:
+ description: >-
+ This property is used to indicate the maximum number of instances that should be created
+ for the associated TOSCA Node Template by a TOSCA orchestrator.
+ type: integer
+ default: 1
+ constraints:
+ - greater_or_equal: 0
+ default_instances:
+ description: >-
+ An optional property that indicates the requested default number of instances that should
+ be the starting number of instances a TOSCA orchestrator should attempt to allocate. Note:
+ The value for this property MUST be in the range between the values set for
+ "min_instances" and "max_instances" properties.
+ type: integer
+ constraints:
+ - greater_or_equal: 0
+ required: false
+ targets:
+ - tosca.nodes.Root
diff --git a/azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/profiles/azure-plugin/azureplugin.yaml b/azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/profiles/azure-plugin/azureplugin.yaml
new file mode 100644
index 0000000..77b61b5
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/profiles/azure-plugin/azureplugin.yaml
@@ -0,0 +1,1981 @@
+#
+# Copyright (c) 2017 GigaSpaces Technologies Ltd. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+tosca_definitions_version: tosca_simple_yaml_1_0
+
+
+topology_template:
+ policies:
+ cloudify-azure-plugin:
+ description: >-
+ azure plugin executes operations.
+ type: aria.Plugin
+ properties:
+ version: 1.4.2
+
+
+data_types:
+
+ aria.azure.datatypes.Config:
+ description: >-
+ azure configuration
+ properties:
+ subscription_id:
+ description: >
+ A Microsoft Azure subscription ID. This is a unique
+ user account in Azure. This can be found in the
+ Subscriptions tab on your dashboard.
+ type: string
+ required: false
+ tenant_id:
+ description: >
+ A Microsoft Azure tenant ID. This can be found in
+ the Azure Active Directory dashboard by accessing
+ your directory. Open the Application Endpoints
+ dialog and your tenant ID will be in the URL for
+ the OAUTH2.0 TOKEN ENDPOINT.
+ type: string
+ required: false
+ client_id:
+ description: >
+ A Microsoft Azure client ID. This can be found in
+ the Azure Active Directory dashboard by accessing
+ your directory. View the Applications tab and select
+ the application used to access Azure APIs. Your
+ client ID can be found by expanding the ACCESS WEB
+ APIS IN OTHER APPLICATIONS tab.
+ type: string
+ required: false
+ client_secret:
+ description: >
+ A Microsoft Azure client secret key. This can be found
+ or generated in the same location as your client ID.
+ type: string
+ required: false
+ scale_name_separator:
+ description: >
+ When scaling resources, a unique name must be sent to
+ the Azure API. Since names are left to the user to
+ manage (the service does not generate unique IDs),
+ this plugin will attempt to append characters or
+ numbers to the end of the resource name when resources
+ are scaled out. This value should be a character, or
+ characters, that will separate the base name from the
+ generated unique characters. For instance, if the
+ base name of a resource is "myvm", the separator is
+ set to "_", and a scale workflow attempts to create
+ another resource, the resulting name could be
+ something like "myvm_1". This field can be left blank.
+ type: string
+ required: false
+ default: "_"
+ scale_name_suffix_chars:
+ description: >
+ A string of characters (ASCII) to be used when
+ generating unique suffix data when scaling resources.
+ See "scale_name_separator" for more information.
+ type: string
+ required: true
+ default: "1234567890"
+
+ aria.azure.datatypes.AgentConfig:
+ properties:
+ install_method:
+ type: string
+ required: false
+ port:
+ type: integer
+ required: false
+
+
+ aria.azure.datatypes.StorageAccountConfig:
+ description: >
+ See https://msdn.microsoft.com/en-us/library/mt163564.aspx
+ properties:
+ accountType:
+ description: >
+ An storage account type (case sensitive)
+ type: string
+ default: Standard_LRS
+ required: true
+
+ aria.azure.datatypes.DataDiskConfig:
+ properties:
+ container_name:
+ type: string
+ description: >
+ Container for the resource.
+ default: vhds
+ size:
+ description: >
+ Size of the Page Blob (Azure disk) in GiB. Maximum of 1023 GiB allowed.
+ type: integer
+ default: 20
+ required: true
+ force_delete:
+ description: >
+ If set to true, the resource's "delete" lifecycle will purge the
+ Azure Data Disk from the Azure Storage Account. If false, the
+ Data Disk is left as-is in the Storage Account.
+ type: boolean
+ default: false
+
+
+ aria.azure.datatypes.FileShareConfig:
+ properties:
+ metadata:
+ description: >
+ Metadata (dict) for the File Share
+ required: false
+ type: string
+ quota:
+ description: >
+ Quote, in GiB, for the maximum size of the file share
+ required: false
+ type: integer
+ fail_on_exist:
+ description: >
+ If true, causes the operation to raise a NonRecoverableError if
+ the file share already exists. If false, issues a warning and
+ continues execution.
+ default: false
+ type: boolean
+
+ aria.azure.datatypes.AddressSpace:
+ description: >
+ Contains the address prefix,typically network CIDR
+ properties:
+ addressPrefixes:
+ required: true
+ type: list
+ entry_schema: string
+
+ aria.azure.datatypes.VirtualNetworkConfig:
+ description: >
+ See https://msdn.microsoft.com/en-us/library/mt163661.aspx
+ properties:
+ addressSpace:
+ default:
+ addressPrefixes:
+ - 172.16.0.0/16
+ required: true
+ type: aria.azure.datatypes.AddressSpace
+ dhcpOptions:
+ required: false
+ type: string
+ subnets:
+ required: false
+ type: string
+
+ aria.azure.datatypes.NetworkSecurityGroupConfig:
+ description: >
+ See https://msdn.microsoft.com/en-us/library/mt163656.aspx
+ properties:
+ securityRules:
+ required: false
+ type: list
+ entry_schema: string
+
+
+
+ aria.azure.datatypes.NetworkSecurityRuleConfig:
+ description: >
+ See https://msdn.microsoft.com/en-us/library/mt163645.aspx
+ properties:
+ description:
+ type: string
+ required: false
+ protocol:
+ type: string
+ required: true
+ sourcePortRange:
+ type: string
+ required: false
+ destinationPortRange:
+ type: string
+ required: false
+ sourceAddressPrefix:
+ type: string
+ required: true
+ destinationAddressPrefix:
+ type: string
+ required: true
+ access:
+ type: string
+ required: true
+ priority:
+ type: integer
+ required: true
+ direction:
+ type: string
+ required: true
+
+ aria.azure.datatypes.SubnetConfig:
+ description: >
+ See https://msdn.microsoft.com/en-us/library/mt163621.aspx
+ properties:
+ addressPrefix:
+ type: string
+ required: false
+ networkSecurityGroup:
+ type: string
+ required: false
+ routeTable:
+ type: string
+ required: false
+
+
+
+
+
+
+
+ aria.azure.datatypes.RouteTableConfig:
+ description: >
+ See https://msdn.microsoft.com/en-us/library/mt502548.aspx
+ properties:
+ routes:
+ type: string
+ required: false
+
+
+ aria.azure.datatypes.RouteConfig:
+ description: >
+ See https://msdn.microsoft.com/en-us/library/mt459110.aspx
+ properties:
+ addressPrefix:
+ type: string
+ required: true
+ nextHopType:
+ type: string
+ required: true
+ nextHopIpAddress:
+ type: string
+ required: false
+
+
+ aria.azure.datatype.NetworkInterfaceDnsSettings:
+ description: >
+ See https://msdn.microsoft.com/en-us/library/mt163668.aspx
+ properties:
+ appliedDnsServers:
+ type: list
+ entry_schema: string
+ required: false
+ dnsServers:
+ type: list
+ entry_schema: string
+ required: false
+ internalDnsNameLabel:
+ type: string
+ required: false
+ internalDomainNameSuffix:
+ type: string
+ required: false
+ internalFqdn:
+ type: string
+ required: false
+
+
+ aria.azure.datatypes.NetworkInterfaceCardConfig:
+ description: >
+ See https://msdn.microsoft.com/en-us/library/mt163668.aspx
+ properties:
+ networkSecurityGroups:
+ required: false
+ type: string
+ enableIPForwarding:
+ required: false
+ type: boolean
+ ipConfigurations:
+ required: false
+ type: string
+ dnsSettings:
+ required: false
+ type: aria.azure.datatype.NetworkInterfaceDnsSettings
+
+
+ aria.azure.datatypes.IPConfigurationConfig:
+ properties:
+ privateIPAddress:
+ type: string
+ description: >
+ Static, private IP Address
+ required: false
+ privateIPAllocationMethod:
+ type: string
+ description: >
+ Defines how a private IP address is assigned. Options
+ are Static or Dynamic
+ required: true
+ privateIPAddressVersion:
+ type: string
+ description: >
+ Define the version of the IP protocol
+ required: false
+
+
+ aria.azure.datatypes.PublicIPAddressDnsSettings:
+ description: >
+ See https://docs.microsoft.com/en-gb/rest/api/virtualnetwork/PublicIPAddresses/CreateOrUpdate#definitions_publicipaddressdnssettings
+ properties:
+ domainNameLabel:
+ type: string
+ description: >
+ name refer to the VM
+ required: false
+ fqdn:
+ type: string
+ required: false
+ reverseFqdn:
+ type: string
+ required: false
+
+ aria.azure.datatypes.PublicIPAddressConfig:
+ description: >
+ See https://msdn.microsoft.com/en-us/library/mt163590.aspx
+ properties:
+ publicIPAllocationMethod:
+ type: string
+ description: >
+ Defines whether the IP address is stable or dynamic.
+ Options are Static or Dynamic
+ required: true
+ publicIPAddressVersion:
+ type: string
+ description:
+ Define the version of the public IP.
+ required: false
+ idleTimeoutInMinutes:
+ type: integer
+ description: >
+ Specifies the timeout (in minutes) for the TCP idle connection.
+ The value can be set between 4 and 30 minutes
+ required: false
+ dnsSettings:
+ type: aria.azure.datatypes.PublicIPAddressDnsSettings
+ required: false
+# domainNameLabel:
+# type: string
+# description: >
+# The concatenation of the domain name label and the regionalized
+# DNS zone make up the fully qualified domain name associated
+# with the public IP address.
+# required: false
+# reverseFqdn:
+# type: string
+# description: >
+# A fully qualified domain name that resolves to this
+# public IP address.
+# required: false
+
+
+
+
+
+
+ aria.azure.datatypes.AvailabilitySetConfig:
+ description: >
+ See https://msdn.microsoft.com/en-us/library/mt163607.aspx
+ properties:
+ platformUpdateDomainCount:
+ type: integer
+ required: false
+ platformFaultDomainCount:
+ type: integer
+ required: false
+
+
+
+ aria.azure.datatypes.HardwareProfile:
+ properties:
+ vmSize:
+ required: true
+ type: string
+
+
+ aria.azure.datatypes.ImageReference:
+ properties:
+ publisher:
+ required: true
+ type: string
+ offer:
+ required: true
+ type: string
+ sku:
+ required: true
+ type: string
+ version:
+ required: true
+ type: string
+
+
+ aria.azure.datatypes.StorageProfile:
+ properties:
+ imageReference:
+ required: true
+ type: aria.azure.datatypes.ImageReference
+
+
+
+
+ aria.azure.datatypes.PublicKey:
+ properties:
+ keydata:
+ required: true
+ type: string
+ path:
+ required: true
+ type: string
+
+ aria.azure.datatypes.SSH:
+ properties:
+ publicKeys:
+ required: false
+ type: list
+ entry_schema: aria.azure.datatypes.PublicKey
+
+
+ aria.azure.datatypes.LinuxConfiguration:
+ properties:
+ ssh:
+ required: false
+ type: aria.azure.datatypes.SSH
+ disablePasswordAuthentication:
+ required: false
+ default: true
+ type: boolean
+
+
+ aria.azure.datatypes.OSProfile:
+ properties:
+ computerName:
+ required: true
+ type: string
+ adminUserName:
+ required: true
+ type: string
+ adminPassword:
+ required: true
+ type: string
+ linuxConfiguration:
+ required: false
+ type: aria.azure.datatypes.LinuxConfiguration
+
+
+
+ aria.azure.datatypes.VirtualMachineConfig:
+ description: >
+ https://msdn.microsoft.com/en-us/library/azure/mt163591.aspx
+ properties:
+ hardwareProfile:
+ required: true
+ type: aria.azure.datatypes.HardwareProfile
+ storageProfile:
+ required: true
+ type: aria.azure.datatypes.StorageProfile
+ osProfile:
+ required: true
+ type: aria.azure.datatypes.OSProfile
+
+ aria.azure.datatypes.LoadBalancerConfig:
+ description: >
+ See https://msdn.microsoft.com/en-us/library/mt163574.aspx
+ properties:
+ frontendIPConfigurations:
+ required: false
+ type: string
+ backendAddressPools:
+ required: false
+ type: string
+ loadBalancingRules:
+ required: false
+ type: string
+ probes:
+ required: false
+ type: string
+ inboundNatRules:
+ required: false
+ type: string
+
+
+
+ aria.azure.datatypes.LoadBalancerProbeConfig:
+ description: >
+ See https://msdn.microsoft.com/en-us/library/mt163574.aspx
+ properties:
+ protocol:
+ type: string
+ default: Tcp
+ required: true
+ port:
+ type: integer
+ required: true
+ requestPath:
+ type: string
+ required: false
+ intervalInSeconds:
+ type: integer
+ default: 5
+ required: true
+ numberOfProbes:
+ type: integer
+ default: 16
+ required: true
+
+ aria.azure.datatypes.LoadBalancerIncomingNATRuleConfig:
+ description: >
+ See https://msdn.microsoft.com/en-us/library/mt163574.aspx
+ properties:
+ protocol:
+ type: string
+ default: Tcp
+ required: true
+ frontendPort:
+ type: integer
+ required: true
+ backendPort:
+ type: integer
+ required: true
+
+
+ aria.azure.datatypes.LoadBalancerRuleConfig:
+ description: >
+ See https://msdn.microsoft.com/en-us/library/mt163574.aspx
+ properties:
+ protocol:
+ type: string
+ default: Tcp
+ required: true
+ frontendPort:
+ type: integer
+ required: true
+ backendPort:
+ type: integer
+ required: true
+ enableFloatingIP:
+ type: boolean
+ required: false
+ default: false
+ idleTimeoutInMinutes:
+ type: integer
+ required: false
+ default: 5
+ loadDistribution:
+ type: string
+ required: false
+ default: Default
+
+
+
+
+
+interface_types:
+
+ aria.azure.interfaces.validation:
+ derived_from: tosca.interfaces.Root
+ creation:
+ description: >-
+ creation operation for the openstack validation interface
+ deletion:
+ description: >-
+ deletion operation for the openstack validation interface
+ aria.azure.interfaces.network:
+ derived_from: tosca.interfaces.Root
+ preconfigure:
+ establish:
+ unlink:
+
+node_types:
+
+ aria.azure.nodes.ResourceGroup:
+ derived_from: tosca.nodes.Root
+ properties:
+ name:
+ type: string
+ description: >
+ Name for the resource. Resource group name must be no longer than
+ 80 characters long. It can contain only alphanumeric characters,
+ dash, underscore, opening parenthesis, closing parenthesis,
+ and period. The name cannot end with a period.
+ required: false
+ location:
+ type: string
+ description: >
+ Specifies the supported Azure location for the resource
+ required: false
+ tags:
+ description: >
+ Specifies a dictionary of one or more name and value pairs that describe a tag
+ required: false
+ type: string
+ use_external_resource:
+ description: >
+ Indicate whether the resource exists or if Cloudify should create the resource
+ type: boolean
+ default: false
+ required: true
+ retry_after:
+ description: >
+ Overrides the Azure-specified "retry_after" response. This property
+ will set the number of seconds for each task retry interval (in the
+ case of iteratively checking the status of an asynchronous operation)
+ type: integer
+ required: false
+ azure_config:
+ description: >
+ A dictionary of values to pass to authenticate with the Azure API
+ type: aria.azure.datatypes.Config
+ required: false
+ interfaces:
+ Standard:
+ create: cloudify-azure-plugin > cloudify_azure.resources.resourcegroup.create
+ delete: cloudify-azure-plugin > cloudify_azure.resources.resourcegroup.delete
+
+
+
+
+
+
+
+ aria.azure.nodes.storage.StorageAccount:
+ derived_from: tosca.nodes.Root
+ properties:
+ name:
+ type: string
+ description: >
+ Name for the resource. Storage account name must be between
+ 3 and 24 characters in length and use numbers and lower-case
+ letters only.
+ required: false
+ resource_group_name:
+ type: string
+ description: >
+ Name of the Resource Group that the existing resource belongs to
+ (this is only useful when not using a relationship between a resource
+ node and a Resource Group node)
+ required: false
+ location:
+ type: string
+ description: >
+ Specifies the supported Azure location for the resource
+ required: false
+ tags:
+ description: >
+ Specifies a dictionary of one or more name and value pairs that describe a tag
+ required: false
+ type: string
+ resource_config:
+ description: >
+ A dictionary of values to pass as properties when creating the resource
+ type: aria.azure.datatypes.StorageAccountConfig
+ required: true
+ use_external_resource:
+ description: >
+ Indicate whether the resource exists or if Cloudify should create the resource
+ type: boolean
+ default: false
+ required: true
+ retry_after:
+ description: >
+ Overrides the Azure-specified "retry_after" response. This property
+ will set the number of seconds for each task retry interval (in the
+ case of iteratively checking the status of an asynchronous operation)
+ type: integer
+ required: false
+ azure_config:
+ description: >
+ A dictionary of values to pass to authenticate with the Azure API
+ type: aria.azure.datatypes.Config
+ required: false
+ interfaces:
+ Standard:
+ create: cloudify-azure-plugin > cloudify_azure.resources.storage.storageaccount.create
+ delete: cloudify-azure-plugin > cloudify_azure.resources.storage.storageaccount.delete
+ requirements:
+ - resource_group:
+ capability: tosca.capabilities.Node
+ node: aria.azure.nodes.ResourceGroup
+ relationship: cloudify.azure.relationships.contained_in_resource_group
+ occurrences: [ 0, UNBOUNDED ]
+
+
+
+ aria.azure.nodes.storage.DataDisk:
+ derived_from: tosca.nodes.Root
+ properties:
+ name:
+ type: string
+ description: >
+ Name for the resource (include any extension, such as .vhd).
+ Can be up to 80 characters in length and
+ contain lowercase letters, numbers, ".", and "_". Must start
+ with a number or lowercase letter and cannot end with
+ either "_" or "."
+ required: false
+ location:
+ type: string
+ description: >
+ Specifies the supported Azure location for the resource
+ required: false
+ use_external_resource:
+ description: >
+ Indicate whether the resource exists or if Cloudify should create the resource
+ type: boolean
+ default: false
+ required: true
+ retry_after:
+ description: >
+ Overrides the Azure-specified "retry_after" response. This property
+ will set the number of seconds for each task retry interval (in the
+ case of iteratively checking the status of an asynchronous operation)
+ type: integer
+ required: false
+ resource_config:
+ description: >
+ A dictionary of values to pass as properties when creating the resource
+ type: aria.azure.datatypes.DataDiskConfig
+ required: false
+ azure_config:
+ description: >
+ A dictionary of values to pass to authenticate with the Azure API
+ type: aria.azure.datatypes.Config
+ required: false
+ interfaces:
+ Standard:
+ create: cloudify-azure-plugin > cloudify_azure.resources.storage.disk.create_data_disk
+ delete: cloudify-azure-plugin > cloudify_azure.resources.storage.disk.delete_data_disk
+ requirements:
+ - storage_account:
+ capability: tosca.capabilities.Node
+ node: aria.azure.nodes.storage.StorageAccount
+ relationship: cloudify.azure.relationships.contained_in_storage_account
+ occurrences: [ 0, UNBOUNDED ]
+
+ aria.azure.nodes.storage.FileShare:
+ derived_from: tosca.nodes.Root
+ properties:
+ name:
+ type: string
+ description: >
+ Name for the resource. Can be up to 63 characters in length and
+ contain lowercase letters, numbers, and dashes. Must start
+ with a number or lowercase letter and cannot contain
+ two consecutive dashes.
+ required: false
+ location:
+ type: string
+ description: >
+ Specifies the supported Azure location for the resource
+ required: false
+ use_external_resource:
+ description: >
+ Indicate whether the resource exists or if Cloudify should create the resource
+ type: boolean
+ default: false
+ required: true
+ retry_after:
+ description: >
+ Overrides the Azure-specified "retry_after" response. This property
+ will set the number of seconds for each task retry interval (in the
+ case of iteratively checking the status of an asynchronous operation)
+ type: integer
+ required: false
+ resource_config:
+ description: >
+ A dictionary of values to pass as properties when creating the resource
+ type: aria.azure.datatypes.FileShareConfig
+ required: false
+ azure_config:
+ description: >
+ A dictionary of values to pass to authenticate with the Azure API
+ type: aria.azure.datatypes.Config
+ required: false
+ interfaces:
+ Standard:
+ create: cloudify-azure-plugin > cloudify_azure.resources.storage.file.create_file_share
+
+
+
+
+ aria.azure.nodes.network.VirtualNetwork:
+ derived_from: tosca.nodes.Root
+ properties:
+ name:
+ type: string
+ description: >
+ Name for the resource.
+ required: false
+ resource_group_name:
+ type: string
+ description: >
+ Name of the Resource Group that the existing resource belongs to
+ (this is only useful when not using a relationship between a resource
+ node and a Resource Group node)
+ required: false
+ location:
+ type: string
+ description: >
+ Specifies the supported Azure location for the resource
+ required: false
+ tags:
+ description: >
+ Specifies a dictionary of one or more name and value pairs that describe a tag
+ required: false
+ type: string
+ resource_config:
+ description: >
+ A dictionary of values to pass as properties when creating the resource
+ type: aria.azure.datatypes.VirtualNetworkConfig
+ required: false
+ use_external_resource:
+ description: >
+ Indicate whether the resource exists or if Cloudify should create the resource
+ type: boolean
+ default: false
+ required: true
+ retry_after:
+ description: >
+ Overrides the Azure-specified "retry_after" response. This property
+ will set the number of seconds for each task retry interval (in the
+ case of iteratively checking the status of an asynchronous operation)
+ type: integer
+ required: false
+ azure_config:
+ description: >
+ A dictionary of values to pass to authenticate with the Azure API
+ type: aria.azure.datatypes.Config
+ required: false
+ interfaces:
+ Standard:
+ create: cloudify-azure-plugin > cloudify_azure.resources.network.virtualnetwork.create
+ delete: cloudify-azure-plugin > cloudify_azure.resources.network.virtualnetwork.delete
+ requirements:
+ - resource_group:
+ capability: tosca.capabilities.Node
+ node: aria.azure.nodes.ResourceGroup
+ relationship: cloudify.azure.relationships.contained_in_resource_group
+ occurrences: [ 0, UNBOUNDED ]
+ - storage_account:
+ capability: tosca.capabilities.Node
+ node: aria.azure.nodes.storage.StorageAccount
+ relationship: cloudify.azure.relationships.virtual_network_depends_on_storage
+ occurrences: [ 0, UNBOUNDED ]
+
+
+
+ aria.azure.nodes.network.NetworkSecurityGroup:
+ derived_from: tosca.nodes.Root
+ properties:
+ name:
+ type: string
+ description: >
+ Name for the resource.
+ required: false
+ resource_group_name:
+ type: string
+ description: >
+ Name of the Resource Group that the existing resource belongs to
+ (this is only useful when not using a relationship between a resource
+ node and a Resource Group node)
+ required: false
+ location:
+ type: string
+ description: >
+ Specifies the supported Azure location for the resource
+ required: false
+ tags:
+ description: >
+ Specifies a dictionary of one or more name and value pairs that describe a tag
+ required: false
+ type: string
+ resource_config:
+ description: >
+ A dictionary of values to pass as properties when creating the resource
+ type: aria.azure.datatypes.NetworkSecurityGroupConfig
+ required: false
+ use_external_resource:
+ description: >
+ Indicate whether the resource exists or if Cloudify should create the resource
+ type: boolean
+ default: false
+ required: true
+ retry_after:
+ description: >
+ Overrides the Azure-specified "retry_after" response. This property
+ will set the number of seconds for each task retry interval (in the
+ case of iteratively checking the status of an asynchronous operation)
+ type: integer
+ required: false
+ azure_config:
+ description: >
+ A dictionary of values to pass to authenticate with the Azure API
+ type: aria.azure.datatypes.Config
+ required: false
+ interfaces:
+ Standard:
+ create: cloudify-azure-plugin > cloudify_azure.resources.network.networksecuritygroup.create
+ delete: cloudify-azure-plugin > cloudify_azure.resources.network.networksecuritygroup.delete
+ requirements:
+ - resource_group:
+ capability: tosca.capabilities.Node
+ node: aria.azure.nodes.ResourceGroup
+ relationship: cloudify.azure.relationships.contained_in_resource_group
+ occurrences: [ 0, UNBOUNDED ]
+
+
+
+ aria.azure.nodes.network.NetworkSecurityRule:
+ derived_from: tosca.nodes.Root
+ properties:
+ name:
+ type: string
+ description: >
+ Name for the resource.
+ required: false
+ resource_group_name:
+ type: string
+ description: >
+ Name of the Resource Group that the existing resource belongs to
+ (this is only useful when not using a relationship between a resource
+ node and a Resource Group node)
+ required: false
+ network_security_group_name:
+ type: string
+ description: >
+ Name of the Network Security Group that the existing resource belongs to
+ (this is only useful when not using a relationship between a resource
+ node and a Network Security Groupnode)
+ required: false
+ location:
+ type: string
+ description: >
+ Specifies the supported Azure location for the resource
+ required: false
+ tags:
+ description: >
+ Specifies a dictionary of one or more name and value pairs that describe a tag
+ required: false
+ type: string
+ resource_config:
+ description: >
+ A dictionary of values to pass as properties when creating the resource
+ type: aria.azure.datatypes.NetworkSecurityRuleConfig
+ required: false
+ use_external_resource:
+ description: >
+ Indicate whether the resource exists or if Cloudify should create the resource
+ type: boolean
+ default: false
+ required: true
+ retry_after:
+ description: >
+ Overrides the Azure-specified "retry_after" response. This property
+ will set the number of seconds for each task retry interval (in the
+ case of iteratively checking the status of an asynchronous operation)
+ type: integer
+ required: false
+ azure_config:
+ description: >
+ A dictionary of values to pass to authenticate with the Azure API
+ type: aria.azure.datatypes.Config
+ required: false
+ interfaces:
+ Standard:
+ create: cloudify-azure-plugin > cloudify_azure.resources.network.networksecurityrule.create
+ delete: cloudify-azure-plugin > cloudify_azure.resources.network.networksecurityrule.delete
+ requirements:
+ - resource_group:
+ capability: tosca.capabilities.Node
+ node: aria.azure.nodes.ResourceGroup
+ relationship: cloudify.azure.relationships.contained_in_resource_group
+ occurrences: [ 0, UNBOUNDED ]
+ - network_security_group:
+ capability: tosca.capabilities.Node
+ node: aria.azure.nodes.network.NetworkSecurityGroup
+ relationship: cloudify.azure.relationships.nic_connected_to_network_security_group
+ occurrences: [ 0, UNBOUNDED ]
+
+
+
+ aria.azure.nodes.network.Subnet:
+ derived_from: tosca.nodes.Root
+ properties:
+ name:
+ type: string
+ description: >
+ Name for the resource.
+ required: false
+ resource_group_name:
+ type: string
+ description: >
+ Name of the Resource Group that the existing resource belongs to
+ (this is only useful when not using a relationship between a resource
+ node and a Resource Group node)
+ required: false
+ virtual_network_name:
+ type: string
+ description: >
+ Name of the Virtual Network that the existing resource belongs to
+ (this is only useful when not using a relationship between a resource
+ node and a Virtual Network node)
+ required: false
+ location:
+ type: string
+ description: >
+ Specifies the supported Azure location for the resource
+ required: false
+ tags:
+ description: >
+ Specifies a dictionary of one or more name and value pairs that describe a tag
+ required: false
+ type: string
+ resource_config:
+ description: >
+ A dictionary of values to pass as properties when creating the resource
+ type: aria.azure.datatypes.SubnetConfig
+ required: false
+ use_external_resource:
+ description: >
+ Indicate whether the resource exists or if Cloudify should create the resource
+ type: boolean
+ default: false
+ required: true
+ retry_after:
+ description: >
+ Overrides the Azure-specified "retry_after" response. This property
+ will set the number of seconds for each task retry interval (in the
+ case of iteratively checking the status of an asynchronous operation)
+ type: integer
+ required: false
+ azure_config:
+ description: >
+ A dictionary of values to pass to authenticate with the Azure API
+ type: aria.azure.datatypes.Config
+ required: false
+ interfaces:
+ Standard:
+ create: cloudify-azure-plugin > cloudify_azure.resources.network.subnet.create
+ delete: cloudify-azure-plugin > cloudify_azure.resources.network.subnet.delete
+ requirements:
+ - virtual_network:
+ capability: tosca.capabilities.Node
+ node: aria.azure.nodes.network.VirtualNetwork
+ relationship: cloudify.azure.relationships.contained_in_virtual_network
+ occurrences: [ 0, UNBOUNDED ]
+ - subnet_dependency:
+ capability: tosca.capabilities.Node
+ node: aria.azure.nodes.network.Subnet
+ relationship: cloudify.azure.relationships.depends_on_subnet
+ occurrences: [ 0, UNBOUNDED ]
+
+
+
+ aria.azure.nodes.network.RouteTable:
+ derived_from: tosca.nodes.Root
+ properties:
+ name:
+ type: string
+ description: >
+ Name for the resource.
+ required: false
+ resource_group_name:
+ type: string
+ description: >
+ Name of the Resource Group that the existing resource belongs to
+ (this is only useful when not using a relationship between a resource
+ node and a Resource Group node)
+ required: false
+ location:
+ type: string
+ description: >
+ Specifies the supported Azure location for the resource
+ required: false
+ tags:
+ description: >
+ Specifies a dictionary of one or more name and value pairs that describe a tag
+ required: false
+ type: string
+ resource_config:
+ description: >
+ A dictionary of values to pass as properties when creating the resource
+ type: aria.azure.datatypes.RouteTableConfig
+ required: false
+ use_external_resource:
+ description: >
+ Indicate whether the resource exists or if Cloudify should create the resource
+ type: boolean
+ default: false
+ required: true
+ retry_after:
+ description: >
+ Overrides the Azure-specified "retry_after" response. This property
+ will set the number of seconds for each task retry interval (in the
+ case of iteratively checking the status of an asynchronous operation)
+ type: integer
+ required: false
+ azure_config:
+ description: >
+ A dictionary of values to pass to authenticate with the Azure API
+ type: aria.azure.datatypes.Config
+ required: false
+ interfaces:
+ Standard:
+ create: cloudify-azure-plugin > cloudify_azure.resources.network.routetable.create
+ delete: cloudify-azure-plugin > cloudify_azure.resources.network.routetable.delete
+ requirements:
+ - virtual_subnet:
+ capability: tosca.capabilities.Node
+ node: aria.azure.nodes.network.Subnet
+ relationship: cloudify.azure.relationships.route_table_attached_to_subnet
+ occurrences: [ 0, UNBOUNDED ]
+
+ aria.azure.nodes.network.Route:
+ derived_from: tosca.nodes.Root
+ properties:
+ name:
+ type: string
+ description: >
+ Name for the resource.
+ required: false
+ resource_group_name:
+ type: string
+ description: >
+ Name of the Resource Group that the existing resource belongs to
+ (this is only useful when not using a relationship between a resource
+ node and a Resource Group node)
+ required: false
+ route_table_name:
+ type: string
+ description: >
+ Name of the Network Security Group that the existing resource belongs to
+ (this is only useful when not using a relationship between a resource
+ node and a Virtual Network node)
+ required: false
+ location:
+ type: string
+ description: >
+ Specifies the supported Azure location for the resource
+ required: false
+ tags:
+ description: >
+ Specifies a dictionary of one or more name and value pairs that describe a tag
+ required: false
+ type: string
+ resource_config:
+ description: >
+ A dictionary of values to pass as properties when creating the resource
+ type: aria.azure.datatypes.RouteConfig
+ required: false
+ use_external_resource:
+ description: >
+ Indicate whether the resource exists or if Cloudify should create the resource
+ type: boolean
+ default: false
+ required: true
+ retry_after:
+ description: >
+ Overrides the Azure-specified "retry_after" response. This property
+ will set the number of seconds for each task retry interval (in the
+ case of iteratively checking the status of an asynchronous operation)
+ type: integer
+ required: false
+ azure_config:
+ description: >
+ A dictionary of values to pass to authenticate with the Azure API
+ type: aria.azure.datatypes.Config
+ required: false
+ interfaces:
+ Standard:
+ create: cloudify-azure-plugin > cloudify_azure.resources.network.route.create
+ delete: cloudify-azure-plugin > cloudify_azure.resources.network.route.delete
+ requirements:
+ - route_table:
+ capability: tosca.capabilities.Node
+ node: aria.azure.nodes.network.RouteTable
+ relationship: cloudify.azure.relationships.depends_on_route_table
+ occurrences: [ 0, UNBOUNDED ]
+
+
+ aria.azure.nodes.network.NetworkInterfaceCard:
+ derived_from: tosca.nodes.Root
+ properties:
+ name:
+ type: string
+ description: >
+ Name for the resource.
+ required: false
+ resource_group_name:
+ type: string
+ description: >
+ Name of the Resource Group that the existing resource belongs to
+ (this is only useful when not using a relationship between a resource
+ node and a Resource Group node)
+ required: false
+ location:
+ type: string
+ description: >
+ Specifies the supported Azure location for the resource
+ required: false
+ tags:
+ description: >
+ Specifies a dictionary of one or more name and value pairs that describe a tag
+ required: false
+ type: string
+ primary:
+ description: >
+ When using multiple Network Interfaces, a primary must be set
+ required: false
+ default: false
+ type: boolean
+ resource_config:
+ description: >
+ A dictionary of values to pass as properties when creating the resource
+ type: aria.azure.datatypes.NetworkInterfaceCardConfig
+ required: false
+ use_external_resource:
+ description: >
+ Indicate whether the resource exists or if Cloudify should create the resource
+ type: boolean
+ default: false
+ required: true
+ retry_after:
+ description: >
+ Overrides the Azure-specified "retry_after" response. This property
+ will set the number of seconds for each task retry interval (in the
+ case of iteratively checking the status of an asynchronous operation)
+ type: integer
+ required: false
+ azure_config:
+ description: >
+ A dictionary of values to pass to authenticate with the Azure API
+ type: aria.azure.datatypes.Config
+ required: false
+ interfaces:
+ Standard:
+ create: cloudify-azure-plugin > cloudify_azure.resources.network.networkinterfacecard.create
+ configure: cloudify-azure-plugin > cloudify_azure.resources.network.networkinterfacecard.configure
+ delete: cloudify-azure-plugin > cloudify_azure.resources.network.networkinterfacecard.delete
+ requirements:
+ - resource_group:
+ capability: tosca.capabilities.Node
+ node: aria.azure.nodes.ResourceGroup
+ relationship: cloudify.azure.relationships.contained_in_resource_group
+ occurrences: [ 0, UNBOUNDED ]
+ - ip_config:
+ capability: tosca.capabilities.Node
+ node: aria.azure.nodes.network.IPConfiguration
+ relationship: cloudify.azure.relationships.nic_connected_to_ip_configuration
+ occurrences: [ 0, UNBOUNDED ]
+ - security_group:
+ capability: tosca.capabilities.Node
+ node: aria.azure.nodes.network.NetworkSecurityGroup
+ relationship: cloudify.azure.relationships.nic_connected_to_network_security_group
+ occurrences: [ 0, UNBOUNDED ]
+
+
+
+ aria.azure.nodes.network.IPConfiguration:
+ derived_from: tosca.nodes.Root
+ properties:
+ name:
+ type: string
+ description: >
+ Name for the resource.
+ required: false
+ resource_group_name:
+ type: string
+ description: >
+ Name of the Resource Group that the existing resource belongs to
+ (this is only useful when not using a relationship between a resource
+ node and a Resource Group node)
+ required: false
+ location:
+ type: string
+ description: >
+ Specifies the supported Azure location for the resource
+ required: false
+ tags:
+ description: >
+ Specifies a dictionary of one or more name and value pairs that describe a tag
+ required: false
+ type: string
+ resource_config:
+ description: >
+ A dictionary of values to pass as properties when creating the resource
+ type: aria.azure.datatypes.IPConfigurationConfig
+ required: true
+ use_external_resource:
+ description: >
+ Indicate whether the resource exists or if Cloudify should create the resource
+ type: boolean
+ default: false
+ required: true
+ retry_after:
+ description: >
+ Overrides the Azure-specified "retry_after" response. This property
+ will set the number of seconds for each task retry interval (in the
+ case of iteratively checking the status of an asynchronous operation)
+ type: integer
+ required: false
+ azure_config:
+ description: >
+ A dictionary of values to pass to authenticate with the Azure API
+ type: aria.azure.datatypes.Config
+ required: false
+ requirements:
+ - subnet:
+ capability: tosca.capabilities.Node
+ node: aria.azure.nodes.network.Subnet
+ relationship: cloudify.azure.relationships.ip_configuration_connected_to_subnet
+ occurrences: [ 0, UNBOUNDED ]
+ - ipaddress:
+ capability: tosca.capabilities.Node
+ node: aria.azure.nodes.network.PublicIPAddress
+ relationship: cloudify.azure.relationships.ip_configuration_connected_to_public_ip
+ occurrences: [ 0, UNBOUNDED ]
+ occurrences: [ 0, UNBOUNDED ]
+
+ aria.azure.nodes.network.PublicIPAddress:
+ derived_from: tosca.nodes.Root
+ properties:
+ name:
+ type: string
+ description: >
+ Name for the resource.
+ required: false
+ resource_group_name:
+ type: string
+ description: >
+ Name of the Resource Group that the existing resource belongs to
+ (this is only useful when not using a relationship between a resource
+ node and a Resource Group node)
+ required: false
+ location:
+ type: string
+ description: >
+ Specifies the supported Azure location for the resource
+ required: false
+ tags:
+ description: >
+ Specifies a dictionary of one or more name and value pairs that describe a tag
+ required: false
+ type: string
+ resource_config:
+ description: >
+ A dictionary of values to pass as properties when creating the resource
+ type: aria.azure.datatypes.PublicIPAddressConfig
+ required: false
+ use_external_resource:
+ description: >
+ Indicate whether the resource exists or if Cloudify should create the resource
+ type: boolean
+ default: false
+ required: true
+ retry_after:
+ description: >
+ Overrides the Azure-specified "retry_after" response. This property
+ will set the number of seconds for each task retry interval (in the
+ case of iteratively checking the status of an asynchronous operation)
+ type: integer
+ required: false
+ azure_config:
+ description: >
+ A dictionary of values to pass to authenticate with the Azure API
+ type: aria.azure.datatypes.Config
+ required: false
+ interfaces:
+ Standard:
+ create: cloudify-azure-plugin > cloudify_azure.resources.network.publicipaddress.create
+ delete: cloudify-azure-plugin > cloudify_azure.resources.network.publicipaddress.delete
+ requirements:
+ - resource_group:
+ capability: tosca.capabilities.Node
+ node: aria.azure.nodes.ResourceGroup
+ relationship: cloudify.azure.relationships.contained_in_resource_group
+ occurrences: [ 0, UNBOUNDED ]
+
+ aria.azure.nodes.compute.AvailabilitySet:
+ derived_from: tosca.nodes.Root
+ properties:
+ name:
+ type: string
+ description: >
+ Name for the resource.
+ required: false
+ resource_group_name:
+ type: string
+ description: >
+ Name of the Resource Group that the existing resource belongs to
+ (this is only useful when not using a relationship between a resource
+ node and a Resource Group node)
+ required: false
+ location:
+ type: string
+ description: >
+ Specifies the supported Azure location for the resource
+ required: false
+ tags:
+ description: >
+ Specifies a dictionary of one or more name and value pairs that describe a tag
+ required: false
+ type: string
+ resource_config:
+ description: >
+ A dictionary of values to pass as properties when creating the resource
+ type: aria.azure.datatypes.AvailabilitySetConfig
+ required: true
+ use_external_resource:
+ description: >
+ Indicate whether the resource exists or if Cloudify should create the resource
+ type: boolean
+ default: false
+ required: true
+ retry_after:
+ description: >
+ Overrides the Azure-specified "retry_after" response. This property
+ will set the number of seconds for each task retry interval (in the
+ case of iteratively checking the status of an asynchronous operation)
+ type: integer
+ required: false
+ azure_config:
+ description: >
+ A dictionary of values to pass to authenticate with the Azure API
+ type: aria.azure.datatypes.Config
+ required: false
+ interfaces:
+ Standard:
+ create: cloudify-azure-plugin > cloudify_azure.resources.compute.availabilityset.create
+ delete: cloudify-azure-plugin > cloudify_azure.resources.compute.availabilityset.delete
+
+
+ aria.azure.nodes.compute.VirtualMachine:
+ derived_from: tosca.nodes.Compute
+ properties:
+ name:
+ type: string
+ description: >
+ Name for the resource.
+ required: false
+ resource_group_name:
+ type: string
+ description: >
+ Name of the Resource Group that the existing resource belongs to
+ (this is only useful when not using a relationship between a resource
+ node and a Resource Group node)
+ required: false
+ use_public_ip:
+ type: boolean
+ description: >
+ Tells the deployment to use the public IP (if available) of the resource
+ for Cloudify Agent connections
+ default: false
+ location:
+ type: string
+ description: >
+ Specifies the supported Azure location for the resource
+ required: false
+ tags:
+ description: >
+ Specifies a dictionary of one or more name and value pairs that describe a tag
+ required: false
+ type: string
+ plan:
+ description: >
+ Specifies information about the marketplace image used to create the virtual
+ machine. This element is only used for marketplace images.
+ required: false
+ type: string
+ resource_config:
+ description: >
+ A dictionary of values to pass as properties when creating the resource
+ type: aria.azure.datatypes.VirtualMachineConfig
+ required: true
+ use_external_resource:
+ description: >
+ Indicate whether the resource exists or if Cloudify should create the resource
+ type: boolean
+ default: false
+ required: true
+ retry_after:
+ description: >
+ Overrides the Azure-specified "retry_after" response. This property
+ will set the number of seconds for each task retry interval (in the
+ case of iteratively checking the status of an asynchronous operation)
+ type: integer
+ required: false
+ azure_config:
+ description: >
+ A dictionary of values to pass to authenticate with the Azure API
+ type: aria.azure.datatypes.Config
+ required: false
+ ip:
+ description: |
+ Property specifying the IP address of the resource to
+ use for the agent installer.
+ type: string
+ required: false
+ os_family:
+ description: |
+ Property specifying what type of operating system family
+ this compute node will run.
+ default: windows
+ type: string
+ agent_config:
+ type: aria.azure.datatypes.AgentConfig
+ default:
+ install_method: remote
+ port: 5985
+ interfaces:
+ Standard:
+ create: cloudify-azure-plugin > cloudify_azure.resources.compute.virtualmachine.create
+# configure:
+# implementation: cloudify-azure-plugin > cloudify_azure.resources.compute.virtualmachine.configure
+# inputs:
+# command_to_execute:
+# description: >
+# This is the command that the CustomScriptExtension extension will
+# execute. The file_uris below will be downloaded and this property
+# should specify a command to start the execution of one of them.
+# default: powershell -ExecutionPolicy Unrestricted -file ps_enable_winrm_http.ps1
+# file_uris:
+# default:
+# - https://raw.githubusercontent.com/cloudify-cosmo/cloudify-azure-plugin/1.4/scripts/ps_enable_winrm_http.ps1
+ delete: cloudify-azure-plugin > cloudify_azure.resources.compute.virtualmachine.delete
+ requirements:
+ - resource_group:
+ capability: tosca.capabilities.Node
+ node: aria.azure.nodes.ResourceGroup
+ relationship: cloudify.azure.relationships.contained_in_resource_group
+ occurrences: [ 0, UNBOUNDED ]
+ - nic:
+ capability: tosca.capabilities.Node
+ node: aria.azure.nodes.network.NetworkInterfaceCard
+ relationship: cloudify.azure.relationships.connected_to_nic
+ occurrences: [ 0, UNBOUNDED ]
+ - storage_account:
+ capability: tosca.capabilities.Node
+ node: aria.azure.nodes.storage.StorageAccount
+ relationship: cloudify.azure.relationships.connected_to_storage_account
+ - data_disk:
+ capability: tosca.capabilities.Node
+ node: aria.azure.nodes.storage.DataDisk
+ relationship: cloudify.azure.relationships.vm_connected_to_datadisk
+ occurrences: [ 0, UNBOUNDED ]
+
+
+
+ aria.azure.nodes.network.LoadBalancer:
+ derived_from: tosca.nodes.Root
+ properties:
+ name:
+ type: string
+ description: >
+ Name for the resource.
+ required: false
+ resource_group_name:
+ type: string
+ description: >
+ Name of the Resource Group that the existing resource belongs to
+ (this is only useful when not using a relationship between a resource
+ node and a Resource Group node)
+ required: false
+ location:
+ type: string
+ description: >
+ Specifies the supported Azure location for the resource
+ required: false
+ tags:
+ description: >
+ Specifies a dictionary of one or more name and value pairs that describe a tag
+ required: false
+ type: string
+ resource_config:
+ description: >
+ A dictionary of values to pass as properties when creating the resource
+ type: aria.azure.datatypes.LoadBalancerConfig
+ required: false
+ use_external_resource:
+ description: >
+ Indicate whether the resource exists or if Cloudify should create the resource
+ type: boolean
+ default: false
+ required: true
+ retry_after:
+ description: >
+ Overrides the Azure-specified "retry_after" response. This property
+ will set the number of seconds for each task retry interval (in the
+ case of iteratively checking the status of an asynchronous operation)
+ type: integer
+ required: false
+ azure_config:
+ description: >
+ A dictionary of values to pass to authenticate with the Azure API
+ type: aria.azure.datatypes.Config
+ required: false
+ interfaces:
+ Standard:
+ create: cloudify-azure-plugin > cloudify_azure.resources.network.loadbalancer.create
+ configure: cloudify-azure-plugin > cloudify_azure.resources.network.loadbalancer.configure
+ delete: cloudify-azure-plugin > cloudify_azure.resources.network.loadbalancer.delete
+
+
+
+
+ aria.azure.nodes.network.LoadBalancer.BackendAddressPool:
+ derived_from: tosca.nodes.Root
+ properties:
+ name:
+ type: string
+ description: >
+ Name for the resource.
+ required: false
+ resource_group_name:
+ type: string
+ description: >
+ Name of the Resource Group that the existing resource belongs to
+ (this is only useful when not using a relationship between a resource
+ node and a Resource Group node)
+ required: false
+ load_balancer_name:
+ type: string
+ required: false
+ location:
+ type: string
+ description: >
+ Specifies the supported Azure location for the resource
+ required: false
+ tags:
+ description: >
+ Specifies a dictionary of one or more name and value pairs that describe a tag
+ required: false
+ type: string
+ use_external_resource:
+ description: >
+ Indicate whether the resource exists or if Cloudify should create the resource
+ type: boolean
+ default: false
+ required: true
+ retry_after:
+ description: >
+ Overrides the Azure-specified "retry_after" response. This property
+ will set the number of seconds for each task retry interval (in the
+ case of iteratively checking the status of an asynchronous operation)
+ type: integer
+ required: false
+ azure_config:
+ description: >
+ A dictionary of values to pass to authenticate with the Azure API
+ type: aria.azure.datatypes.Config
+ required: false
+ interfaces:
+ Standard:
+ create: cloudify-azure-plugin > cloudify_azure.resources.network.loadbalancer.create_backend_pool
+ delete: cloudify-azure-plugin > cloudify_azure.resources.network.loadbalancer.delete_backend_pool
+
+
+
+ aria.azure.nodes.network.LoadBalancer.Probe:
+ derived_from: tosca.nodes.Root
+ properties:
+ name:
+ type: string
+ description: >
+ Name for the resource.
+ required: false
+ resource_group_name:
+ type: string
+ description: >
+ Name of the Resource Group that the existing resource belongs to
+ (this is only useful when not using a relationship between a resource
+ node and a Resource Group node)
+ required: false
+ load_balancer_name:
+ type: string
+ required: false
+ location:
+ type: string
+ description: >
+ Specifies the supported Azure location for the resource
+ required: false
+ tags:
+ description: >
+ Specifies a dictionary of one or more name and value pairs that describe a tag
+ required: false
+ type: string
+ resource_config:
+ description: >
+ A dictionary of values to pass as properties when creating the resource
+ type: aria.azure.datatypes.LoadBalancerProbeConfig
+ required: false
+ use_external_resource:
+ description: >
+ Indicate whether the resource exists or if Cloudify should create the resource
+ type: boolean
+ default: false
+ required: true
+ retry_after:
+ description: >
+ Overrides the Azure-specified "retry_after" response. This property
+ will set the number of seconds for each task retry interval (in the
+ case of iteratively checking the status of an asynchronous operation)
+ type: integer
+ required: false
+ azure_config:
+ description: >
+ A dictionary of values to pass to authenticate with the Azure API
+ type: aria.azure.datatypes.Config
+ required: false
+ interfaces:
+ Standard:
+ create: cloudify-azure-plugin > cloudify_azure.resources.network.loadbalancer.create_probe
+ delete: cloudify-azure-plugin > cloudify_azure.resources.network.loadbalancer.delete_probe
+
+
+ aria.azure.nodes.network.LoadBalancer.IncomingNATRule:
+ derived_from: tosca.nodes.Root
+ properties:
+ name:
+ type: string
+ description: >
+ Name for the resource.
+ required: false
+ resource_group_name:
+ type: string
+ description: >
+ Name of the Resource Group that the existing resource belongs to
+ (this is only useful when not using a relationship between a resource
+ node and a Resource Group node)
+ required: false
+ load_balancer_name:
+ type: string
+ required: false
+ location:
+ type: string
+ description: >
+ Specifies the supported Azure location for the resource
+ required: false
+ tags:
+ description: >
+ Specifies a dictionary of one or more name and value pairs that describe a tag
+ required: false
+ type: string
+ resource_config:
+ description: >
+ A dictionary of values to pass as properties when creating the resource
+ type: aria.azure.datatypes.LoadBalancerIncomingNATRuleConfig
+ required: false
+ use_external_resource:
+ description: >
+ Indicate whether the resource exists or if Cloudify should create the resource
+ type: boolean
+ default: false
+ required: true
+ retry_after:
+ description: >
+ Overrides the Azure-specified "retry_after" response. This property
+ will set the number of seconds for each task retry interval (in the
+ case of iteratively checking the status of an asynchronous operation)
+ type: integer
+ required: false
+ azure_config:
+ description: >
+ A dictionary of values to pass to authenticate with the Azure API
+ type: aria.azure.datatypes.Config
+ required: false
+ interfaces:
+ Standard:
+ create: cloudify-azure-plugin > cloudify_azure.resources.network.loadbalancer.create_incoming_nat_rule
+ delete: cloudify-azure-plugin > cloudify_azure.resources.network.loadbalancer.delete_incoming_nat_rule
+
+
+
+ aria.azure.nodes.network.LoadBalancer.Rule:
+ derived_from: tosca.nodes.Root
+ properties:
+ name:
+ type: string
+ description: >
+ Name for the resource.
+ required: false
+ resource_group_name:
+ type: string
+ description: >
+ Name of the Resource Group that the existing resource belongs to
+ (this is only useful when not using a relationship between a resource
+ node and a Resource Group node)
+ required: false
+ load_balancer_name:
+ type: string
+ required: false
+ location:
+ type: string
+ description: >
+ Specifies the supported Azure location for the resource
+ required: false
+ tags:
+ description: >
+ Specifies a dictionary of one or more name and value pairs that describe a tag
+ required: false
+ type: string
+ resource_config:
+ description: >
+ A dictionary of values to pass as properties when creating the resource
+ type: aria.azure.datatypes.LoadBalancerRuleConfig
+ required: false
+ use_external_resource:
+ description: >
+ Indicate whether the resource exists or if Cloudify should create the resource
+ type: boolean
+ default: false
+ required: true
+ retry_after:
+ description: >
+ Overrides the Azure-specified "retry_after" response. This property
+ will set the number of seconds for each task retry interval (in the
+ case of iteratively checking the status of an asynchronous operation)
+ type: integer
+ required: false
+ azure_config:
+ description: >
+ A dictionary of values to pass to authenticate with the Azure API
+ type: aria.azure.datatypes.Config
+ required: false
+ interfaces:
+ Standard:
+ create: cloudify-azure-plugin > cloudify_azure.resources.network.loadbalancer.create_rule
+ delete: cloudify-azure-plugin > cloudify_azure.resources.network.loadbalancer.delete_rule
+
+
+
+
+
+relationship_types:
+ cloudify.azure.relationships.contained_in_resource_group:
+ derived_from: tosca.relationships.HostedOn
+
+ cloudify.azure.relationships.contained_in_storage_account:
+ derived_from: tosca.relationships.HostedOn
+
+ cloudify.azure.relationships.contained_in_virtual_network:
+ derived_from: tosca.relationships.HostedOn
+
+ cloudify.azure.relationships.contained_in_network_security_group:
+ derived_from: tosca.relationships.HostedOn
+
+ cloudify.azure.relationships.contained_in_route_table:
+ derived_from: tosca.relationships.HostedOn
+
+ cloudify.azure.relationships.contained_in_load_balancer:
+ derived_from: tosca.relationships.HostedOn
+
+ cloudify.azure.relationships.depends_on_route_table:
+ derived_from: tosca.relationships.DependsOn
+
+ cloudify.azure.relationships.depends_on_subnet:
+ derived_from: tosca.relationships.DependsOn
+
+ cloudify.azure.relationships.virtual_network_depends_on_storage:
+ derived_from: tosca.relationships.DependsOn
+
+ cloudify.azure.relationships.network_security_group_attached_to_subnet:
+ derived_from: tosca.relationships.ConnectsTo
+ interfaces:
+ Configure:
+ add_source: cloudify-azure-plugin > cloudify_azure.resources.network.subnet.attach_network_security_group
+ remove_source: cloudify-azure-plugin > cloudify_azure.resources.network.subnet.detach_network_security_group
+
+ cloudify.azure.relationships.route_table_attached_to_subnet:
+ derived_from: tosca.relationships.ConnectsTo
+ interfaces:
+ Configure:
+ add_source: cloudify-azure-plugin > cloudify_azure.resources.network.subnet.attach_route_table
+ remove_source: cloudify-azure-plugin > cloudify_azure.resources.network.subnet.detach_route_table
+
+ cloudify.azure.relationships.nic_connected_to_network_security_group:
+ derived_from: tosca.relationships.ConnectsTo
+
+ cloudify.azure.relationships.nic_connected_to_ip_configuration:
+ derived_from: tosca.relationships.ConnectsTo
+ interfaces:
+ Configure:
+ pre_configure_source: cloudify-azure-plugin > cloudify_azure.resources.network.networkinterfacecard.attach_ip_configuration
+
+ cloudify.azure.relationships.lb_connected_to_ip_configuration:
+ derived_from: tosca.relationships.ConnectsTo
+ interfaces:
+ Configure:
+ preconfigure: cloudify-azure-plugin > cloudify_azure.resources.network.loadbalancer.attach_ip_configuration
+
+ cloudify.azure.relationships.ip_configuration_connected_to_subnet:
+ derived_from: tosca.relationships.ConnectsTo
+
+ cloudify.azure.relationships.ip_configuration_connected_to_public_ip:
+ derived_from: tosca.relationships.ConnectsTo
+
+ cloudify.azure.relationships.connected_to_storage_account:
+ derived_from: tosca.relationships.ConnectsTo
+
+ cloudify.azure.relationships.connected_to_data_disk:
+ derived_from: tosca.relationships.ConnectsTo
+ interfaces:
+ Configure:
+ add_target: cloudify-azure-plugin > cloudify_azure.resources.compute.virtualmachine.attach_disk
+ remove_target: cloudify-azure-plugin > cloudify_azure.resources.compute.virtualmachine.detach_disk
+
+
+ cloudify.azure.relationships.connected_to_nic:
+ derived_from: tosca.relationships.ConnectsTo
+
+
+ cloudify.azure.relationships.connected_to_availability_set:
+ derived_from: tosca.relationships.ConnectsTo
+
+ cloudify.azure.relationships.connected_to_ip_configuration:
+ derived_from: tosca.relationships.ConnectsTo
+
+ cloudify.azure.relationships.connected_to_lb_be_pool:
+ derived_from: tosca.relationships.ConnectsTo
+
+ cloudify.azure.relationships.connected_to_lb_probe:
+ derived_from: tosca.relationships.ConnectsTo
+
+ cloudify.azure.relationships.vmx_contained_in_vm:
+ derived_from: tosca.relationships.HostedOn
+
+ cloudify.azure.relationships.nic_connected_to_lb_be_pool:
+ derived_from: tosca.relationships.ConnectsTo
+ interfaces:
+ Configure:
+ add_target: cloudify-azure-plugin > cloudify_azure.resources.network.loadbalancer.attach_nic_to_backend_pool
+ remove_target: cloudify-azure-plugin > cloudify_azure.resources.network.loadbalancer.detach_nic_from_backend_pool
+
+ cloudify.azure.relationships.vm_connected_to_datadisk:
+ derived_from: tosca.relationships.ConnectsTo
+ interfaces:
+ Configure:
+ add_target:
+ implementation: cloudify-azure-plugin > cloudify_azure.resources.compute.virtualmachine.attach_data_disk
+ inputs:
+ lun:
+ description: >
+ Specifies the logical unit number of the data disk in the VM
+ default: 0
+ required: true
+ type: integer
+ remove_target: cloudify-azure-plugin > cloudify_azure.resources.compute.virtualmachine.detach_data_disk
+
+
+
diff --git a/azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/profiles/tosca-simple-1.0/artifacts.yaml b/azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/profiles/tosca-simple-1.0/artifacts.yaml
new file mode 100644
index 0000000..945622f
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/profiles/tosca-simple-1.0/artifacts.yaml
@@ -0,0 +1,121 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+artifact_types:
+
+ tosca.artifacts.Root:
+ _extensions:
+ shorthand_name: Root # ARIA NOTE: omitted in the spec
+ type_qualified_name: tosca:Root
+ specification: tosca-simple-1.0
+ specification_section: 5.3.1
+ specification_url: 'http://docs.oasis-open.org/tosca/TOSCA-Simple-Profile-YAML/v1.0/cos01/TOSCA-Simple-Profile-YAML-v1.0-cos01.html#DEFN_TYPE_ARTIFACTS_ROOT'
+ description: >-
+ This is the default (root) TOSCA Artifact Type definition that all other TOSCA base Artifact Types derive from.
+
+ tosca.artifacts.File:
+ _extensions:
+ shorthand_name: File
+ type_qualified_name: tosca:File
+ specification: tosca-simple-1.0
+ specification_section: 5.3.2
+ description: >-
+ This artifact type is used when an artifact definition needs to have its associated file simply treated as a file and no special handling/handlers are invoked (i.e., it is not treated as either an implementation or deployment artifact type).
+ derived_from: tosca.artifacts.Root
+
+ #
+ # Deployments
+ #
+
+ tosca.artifacts.Deployment:
+ _extensions:
+ shorthand_name: Deployment # ARIA NOTE: omitted in the spec
+ type_qualified_name: tosca:Deployment
+ specification: tosca-simple-1.0
+ specification_section: 5.3.3.1
+ specification_url: 'http://docs.oasis-open.org/tosca/TOSCA-Simple-Profile-YAML/v1.0/cos01/TOSCA-Simple-Profile-YAML-v1.0-cos01.html#DEFN_TYPE_ARTIFACTS_DEPLOYMENT'
+ description: >-
+ This artifact type represents the parent type for all deployment artifacts in TOSCA. This class of artifacts typically
+ represents a binary packaging of an application or service that is used to install/create or deploy it as part of a node's
+ lifecycle.
+ derived_from: tosca.artifacts.Root
+
+ tosca.artifacts.Deployment.Image:
+ _extensions:
+ shorthand_name: Deployment.Image
+ type_qualified_name: tosca:Deployment.Image
+ specification: tosca-simple-1.0
+ specification_section: 5.3.3.3
+ specification_url: 'http://docs.oasis-open.org/tosca/TOSCA-Simple-Profile-YAML/v1.0/cos01/TOSCA-Simple-Profile-YAML-v1.0-cos01.html#DEFN_TYPE_ARTIFACTS_DEPLOYMENT_IMAGE'
+ description: >-
+ This artifact type represents a parent type for any "image" which is an opaque packaging of a TOSCA Node's deployment
+ (whether real or virtual) whose contents are typically already installed and pre-configured (i.e., "stateful") and prepared
+ to be run on a known target container.
+ derived_from: tosca.artifacts.Deployment
+
+ tosca.artifacts.Deployment.Image.VM:
+ _extensions:
+ shorthand_name: Deployment.VM # ARIA NOTE: omitted in the spec
+ type_qualified_name: tosca:Deployment.VM
+ specification: tosca-simple-1.0
+ specification_section: 5.3.3.4
+ specification_url: 'http://docs.oasis-open.org/tosca/TOSCA-Simple-Profile-YAML/v1.0/cos01/TOSCA-Simple-Profile-YAML-v1.0-cos01.html#DEFN_TYPE_ARTIFACTS_DEPLOY_IMAGE_VM'
+ description: >-
+ This artifact represents the parent type for all Virtual Machine (VM) image and container formatted deployment artifacts.
+ These images contain a stateful capture of a machine (e.g., server) including operating system and installed software along
+ with any configurations and can be run on another machine using a hypervisor which virtualizes typical server (i.e.,
+ hardware) resources.
+ derived_from: tosca.artifacts.Deployment
+
+ #
+ # Implementations
+ #
+
+ tosca.artifacts.Implementation:
+ _extensions:
+ shorthand_name: Implementation # ARIA NOTE: omitted in the spec
+ type_qualified_name: tosca:Implementation
+ specification: tosca-simple-1.0
+ specification_section: 5.3.4.1
+ specification_url: 'http://docs.oasis-open.org/tosca/TOSCA-Simple-Profile-YAML/v1.0/cos01/TOSCA-Simple-Profile-YAML-v1.0-cos01.html#DEFN_TYPE_ARTIFACTS_IMPLEMENTATION'
+ description: >-
+ This artifact type represents the parent type for all implementation artifacts in TOSCA. These artifacts are used to
+ implement operations of TOSCA interfaces either directly (e.g., scripts) or indirectly (e.g., config. files).
+ derived_from: tosca.artifacts.Root
+
+ tosca.artifacts.Implementation.Bash:
+ _extensions:
+ shorthand_name: Implementation.Bash # ARIA NOTE: mistake in spec? shouldn't we have "Implementation." as prefix?
+ type_qualified_name: tosca:Implementation.Bash
+ specification: tosca-simple-1.0
+ specification_section: 5.3.4.3
+ description: >-
+ This artifact type represents a Bash script type that contains Bash commands that can be executed on the Unix Bash shell.
+ derived_from: tosca.artifacts.Implementation
+ mime_type: application/x-sh
+ file_ext: [ sh ]
+
+ tosca.artifacts.Implementation.Python:
+ _extensions:
+ shorthand_name: Implementation.Python # ARIA NOTE: mistake in spec? shouldn't we have "Implementation." as prefix?
+ type_qualified_name: tosca:Implementation.Python
+ specification: tosca-simple-1.0
+ specification_section: 5.3.4.4
+ description: >-
+ This artifact type represents a Python file that contains Python language constructs that can be executed within a Python
+ interpreter.
+ derived_from: tosca.artifacts.Implementation
+ mime_type: application/x-python
+ file_ext: [ py ]
diff --git a/azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/profiles/tosca-simple-1.0/capabilities.yaml b/azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/profiles/tosca-simple-1.0/capabilities.yaml
new file mode 100644
index 0000000..66a4046
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/profiles/tosca-simple-1.0/capabilities.yaml
@@ -0,0 +1,322 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+capability_types:
+
+ tosca.capabilities.Root:
+ _extensions:
+ shorthand_name: Root # ARIA NOTE: omitted in the spec
+ type_qualified_name: tosca:Root
+ specification: tosca-simple-1.0
+ specification_section: 5.4.1
+ specification_url: 'http://docs.oasis-open.org/tosca/TOSCA-Simple-Profile-YAML/v1.0/cos01/TOSCA-Simple-Profile-YAML-v1.0-cos01.html#DEFN_TYPE_CAPABILITIES_ROOT'
+ description: >-
+ This is the default (root) TOSCA Capability Type definition that all other TOSCA Capability Types derive from.
+
+ tosca.capabilities.Node:
+ _extensions:
+ shorthand_name: Node
+ type_qualified_name: tosca:Node
+ specification: tosca-simple-1.0
+ specification_section: 5.4.2
+ specification_url: 'http://docs.oasis-open.org/tosca/TOSCA-Simple-Profile-YAML/v1.0/cos01/TOSCA-Simple-Profile-YAML-v1.0-cos01.html#DEFN_TYPE_CAPABILITIES_NODE'
+ role: feature
+ description: >-
+ The Node capability indicates the base capabilities of a TOSCA Node Type.
+ derived_from: tosca.capabilities.Root
+
+ tosca.capabilities.Container:
+ _extensions:
+ shorthand_name: Container
+ type_qualified_name: tosca:Container
+ specification: tosca-simple-1.0
+ specification_section: 5.4.3
+ specification_url: 'http://docs.oasis-open.org/tosca/TOSCA-Simple-Profile-YAML/v1.0/cos01/TOSCA-Simple-Profile-YAML-v1.0-cos01.html#DEFN_TYPE_CAPABILITIES_CONTAINER'
+ role: host
+ description: >-
+ The Container capability, when included on a Node Type or Template definition, indicates that the node can act as a container
+ for (or a host for) one or more other declared Node Types.
+ derived_from: tosca.capabilities.Root
+ properties:
+ num_cpus:
+ description: >-
+ Number of (actual or virtual) CPUs associated with the Compute node.
+ type: integer
+ constraints:
+ - greater_or_equal: 1
+ required: false
+ cpu_frequency:
+ description: >-
+ Specifies the operating frequency of CPU's core. This property expresses the expected frequency of one (1) CPU as
+ provided by the property "num_cpus".
+ type: scalar-unit.frequency
+ constraints:
+ - greater_or_equal: 0.1 GHz
+ required: false
+ disk_size:
+ description: >-
+ Size of the local disk available to applications running on the Compute node (default unit is MB).
+ type: scalar-unit.size
+ constraints:
+ - greater_or_equal: 0 MB
+ required: false
+ mem_size:
+ description: >-
+ Size of memory available to applications running on the Compute node (default unit is MB).
+ type: scalar-unit.size
+ constraints:
+ - greater_or_equal: 0 MB
+ required: false
+
+ tosca.capabilities.Attachment:
+ _extensions:
+ shorthand_name: Attachment
+ type_qualified_name: tosca:Attachment
+ specification: tosca-simple-1.0
+ specification_section: 5.4.8
+ specification_url: 'http://docs.oasis-open.org/tosca/TOSCA-Simple-Profile-YAML/v1.0/cos01/TOSCA-Simple-Profile-YAML-v1.0-cos01.html#DEFN_TYPE_CAPABILITIES_ATTACHMENT'
+ description: >-
+ This is the default TOSCA type that should be used or extended to define an attachment capability of a (logical)
+ infrastructure device node (e.g., BlockStorage node).
+ derived_from: tosca.capabilities.Root
+
+ tosca.capabilities.OperatingSystem:
+ _extensions:
+ shorthand_name: OperatingSystem
+ type_qualified_name: tosca:OperatingSystem
+ specification: tosca-simple-1.0
+ specification_section: 5.4.9
+ specification_url: 'http://docs.oasis-open.org/tosca/TOSCA-Simple-Profile-YAML/v1.0/cos01/TOSCA-Simple-Profile-YAML-v1.0-cos01.html#DEFN_TYPE_CAPABILITIES_OPSYS'
+ description: >-
+ This is the default TOSCA type that should be used to express an Operating System capability for a node.
+ derived_from: tosca.capabilities.Root
+ properties:
+ architecture:
+ description: >-
+ The Operating System (OS) architecture. Examples of valid values include: x86_32, x86_64, etc.
+ type: string
+ required: false
+ type:
+ description: >-
+ The Operating System (OS) type. Examples of valid values include: linux, aix, mac, windows, etc.
+ type: string
+ required: false
+ distribution:
+ description: >-
+ The Operating System (OS) distribution. Examples of valid values for a "type" of "Linux" would include: debian, fedora,
+ rhel and ubuntu.
+ type: string
+ required: false
+ version:
+ description: >-
+ The Operating System version.
+ type: version
+ required: false
+
+ tosca.capabilities.Scalable:
+ _extensions:
+ shorthand_name: Scalable
+ type_qualified_name: tosca:Scalable
+ specification: tosca-simple-1.0
+ specification_section: 5.4.10
+ specification_url: 'http://docs.oasis-open.org/tosca/TOSCA-Simple-Profile-YAML/v1.0/cos01/TOSCA-Simple-Profile-YAML-v1.0-cos01.html#DEFN_TYPE_CAPABILITIES_SCALABLE'
+ role: scaling
+ description: >-
+ This is the default TOSCA type that should be used to express a scalability capability for a node.
+ derived_from: tosca.capabilities.Root
+ properties:
+ min_instances:
+ description: >-
+ This property is used to indicate the minimum number of instances that should be created for the associated TOSCA Node
+ Template by a TOSCA orchestrator.
+ type: integer
+ default: 1
+ max_instances:
+ description: >-
+ This property is used to indicate the maximum number of instances that should be created for the associated TOSCA Node
+ Template by a TOSCA orchestrator.
+ type: integer
+ default: 1
+ default_instances:
+ description: >-
+ An optional property that indicates the requested default number of instances that should be the starting number of
+ instances a TOSCA orchestrator should attempt to allocate. Note: The value for this property MUST be in the range between
+ the values set for "min_instances" and "max_instances" properties.
+ type: integer
+ required: false
+
+ #
+ # Endpoints
+ #
+
+ tosca.capabilities.Endpoint:
+ _extensions:
+ shorthand_name: Endpoint
+ type_qualified_name: tosca:Endpoint
+ specification: tosca-simple-1.0
+ specification_section: 5.4.4
+ specification_url: 'http://docs.oasis-open.org/tosca/TOSCA-Simple-Profile-YAML/v1.0/cos01/TOSCA-Simple-Profile-YAML-v1.0-cos01.html#DEFN_TYPE_CAPABILITIES_ENDPOINT'
+ description: >-
+ This is the default TOSCA type that should be used or extended to define a network endpoint capability. This includes the information to express a basic endpoint with a single port or a complex endpoint with multiple ports. By default the Endpoint is assumed to represent an address on a private network unless otherwise specified.
+ derived_from: tosca.capabilities.Root
+ properties:
+ protocol:
+ description: >-
+ The name of the protocol (i.e., the protocol prefix) that the endpoint accepts (any OSI Layer 4-7 protocols). Examples:
+ http, https, ftp, tcp, udp, etc.
+ type: string
+ default: tcp
+ required: true
+ port:
+ description: >-
+ The optional port of the endpoint.
+ type: tosca.datatypes.network.PortDef
+ required: false
+ secure:
+ description: >-
+ Requests for the endpoint to be secure and use credentials supplied on the ConnectsTo relationship.
+ type: boolean
+ default: false
+ required: false
+ url_path:
+ description: >-
+ The optional URL path of the endpoint's address if applicable for the protocol.
+ type: string
+ required: false
+ port_name:
+ description: >-
+ The optional name (or ID) of the network port this endpoint should be bound to.
+ type: string
+ required: false
+ network_name:
+ description: >-
+ The optional name (or ID) of the network this endpoint should be bound to. network_name: PRIVATE | PUBLIC |
+ <network_name> | <network_id>.
+ type: string
+ default: PRIVATE
+ required: false
+ initiator:
+ description: >-
+ The optional indicator of the direction of the connection.
+ type: string
+ constraints:
+ - valid_values: [ source, target, peer ]
+ default: source
+ required: false
+ ports:
+ description: >-
+ The optional map of ports the Endpoint supports (if more than one).
+ type: map
+ entry_schema:
+ type: tosca.datatypes.network.PortSpec
+ constraints:
+ - min_length: 1
+ required: false
+ attributes:
+ ip_address:
+ description: >-
+ Note: This is the IP address as propagated up by the associated node's host (Compute) container.
+ type: string
+
+ tosca.capabilities.Endpoint.Public:
+ _extensions:
+ shorthand_name: Endpoint.Public
+ type_qualified_name: tosca:Endpoint.Public
+ specification: tosca-simple-1.0
+ specification_section: 5.4.5
+ specification_url: 'http://docs.oasis-open.org/tosca/TOSCA-Simple-Profile-YAML/v1.0/cos01/TOSCA-Simple-Profile-YAML-v1.0-cos01.html#DEFN_TYPE_CAPABILITIES_ENDPOINT_PUBLIC'
+ description: >-
+ This capability represents a public endpoint which is accessible to the general internet (and its public IP address ranges).
+
+ This public endpoint capability also can be used to create a floating (IP) address that the underlying network assigns from a
+ pool allocated from the application's underlying public network. This floating address is managed by the underlying network
+ such that can be routed an application's private address and remains reliable to internet clients.
+ derived_from: tosca.capabilities.Endpoint
+ properties:
+ network_name:
+ type: string
+ constraints:
+ - equal: PUBLIC
+ default: PUBLIC
+ floating:
+ description: >-
+ Indicates that the public address should be allocated from a pool of floating IPs that are associated with the network.
+ type: boolean
+ default: false
+ status: experimental
+ dns_name:
+ description: >-
+ The optional name to register with DNS.
+ type: string
+ required: false
+ status: experimental
+
+ tosca.capabilities.Endpoint.Admin:
+ _extensions:
+ shorthand_name: Endpoint.Admin
+ type_qualified_name: tosca:Endpoint.Admin
+ specification: tosca-simple-1.0
+ specification_section: 5.4.6
+ specification_url: 'http://docs.oasis-open.org/tosca/TOSCA-Simple-Profile-YAML/v1.0/cos01/TOSCA-Simple-Profile-YAML-v1.0-cos01.html#DEFN_TYPE_CAPABILITIES_ENDPOINT_ADMIN'
+ description: >-
+ This is the default TOSCA type that should be used or extended to define a specialized administrator endpoint capability.
+ derived_from: tosca.capabilities.Endpoint
+ properties:
+ secure:
+ description: >-
+ Requests for the endpoint to be secure and use credentials supplied on the ConnectsTo relationship.
+ type: boolean
+ constraints:
+ - equal: true
+ default: true
+
+ tosca.capabilities.Endpoint.Database:
+ _extensions:
+ shorthand_name: Endpoint.Database
+ type_qualified_name: tosca:Endpoint.Database
+ specification: tosca-simple-1.0
+ specification_section: 5.4.7
+ specification_url: 'http://docs.oasis-open.org/tosca/TOSCA-Simple-Profile-YAML/v1.0/cos01/TOSCA-Simple-Profile-YAML-v1.0-cos01.html#DEFN_TYPE_CAPABILITIES_ENDPOINT_DATABASE'
+ description: >-
+ This is the default TOSCA type that should be used or extended to define a specialized database endpoint capability.
+ derived_from: tosca.capabilities.Endpoint
+
+ #
+ # Network
+ #
+
+ tosca.capabilities.network.Bindable:
+ _extensions:
+ shorthand_name: Bindable # ARIA NOTE: mistake in spec? has "network." as a prefix
+ type_qualified_name: tosca:Bindable
+ specification: tosca-simple-1.0
+ specification_section: 5.4.11
+ specification_url: 'http://docs.oasis-open.org/tosca/TOSCA-Simple-Profile-YAML/v1.0/cos01/TOSCA-Simple-Profile-YAML-v1.0-cos01.html#DEFN_TYPE_CAPABILITIES_NETWORK_BINDABLE'
+ description: >-
+ A node type that includes the Bindable capability indicates that it can be bound to a logical network association via a
+ network port.
+ derived_from: tosca.capabilities.Node
+
+ tosca.capabilities.network.Linkable:
+ _extensions:
+ shorthand_name: Linkable
+ type_qualified_name: tosca:Linkable
+ specification: tosca-simple-1.0
+ specification_section: 7.5.3
+ specification_url: 'http://docs.oasis-open.org/tosca/TOSCA-Simple-Profile-YAML/v1.0/cos01/TOSCA-Simple-Profile-YAML-v1.0-cos01.html#DEFN_TYPE_CAPABILITIES_NETWORK_LINKABLE'
+ description: >-
+ A node type that includes the Linkable capability indicates that it can be pointed by tosca.relationships.network.LinksTo
+ relationship type.
+ derived_from: tosca.capabilities.Node
diff --git a/azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/profiles/tosca-simple-1.0/data.yaml b/azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/profiles/tosca-simple-1.0/data.yaml
new file mode 100644
index 0000000..61d4186
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/profiles/tosca-simple-1.0/data.yaml
@@ -0,0 +1,268 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+data_types:
+
+ #
+ # Primitive
+ #
+
+ timestamp:
+ _extensions:
+ coerce_value: aria_extension_tosca.simple_v1_0.data_types.coerce_timestamp
+
+ version:
+ _extensions:
+ coerce_value: aria_extension_tosca.simple_v1_0.data_types.coerce_version
+ type_qualified_name: tosca:version
+ specification: tosca-simple-1.0
+ specification_section: 3.2.2
+ specification_url: 'http://docs.oasis-open.org/tosca/TOSCA-Simple-Profile-YAML/v1.0/cos01/TOSCA-Simple-Profile-YAML-v1.0-cos01.html#TYPE_TOSCA_VERSION'
+
+ range:
+ _extensions:
+ coerce_value: aria_extension_tosca.simple_v1_0.data_types.coerce_range
+ type_qualified_name: tosca:range
+ specification: tosca-simple-1.0
+ specification_section: 3.2.3
+ specification_url: 'http://docs.oasis-open.org/tosca/TOSCA-Simple-Profile-YAML/v1.0/cos01/TOSCA-Simple-Profile-YAML-v1.0-cos01.html#TYPE_TOSCA_RANGE'
+
+ #
+ # With entry schema
+ #
+
+ list:
+ _extensions:
+ use_entry_schema: true
+ coerce_value: aria_extension_tosca.simple_v1_0.data_types.coerce_list
+ type_qualified_name: tosca:list
+ specification: tosca-simple-1.0
+ specification_section: 3.2.4
+ specification_url: 'http://docs.oasis-open.org/tosca/TOSCA-Simple-Profile-YAML/v1.0/cos01/TOSCA-Simple-Profile-YAML-v1.0-cos01.html#TYPE_TOSCA_LIST'
+
+ map:
+ _extensions:
+ use_entry_schema: true
+ coerce_value: aria_extension_tosca.simple_v1_0.data_types.coerce_map_value
+ type_qualified_name: tosca:map
+ specification: tosca-simple-1.0
+ specification_section: 3.2.5
+ specification_url: 'http://docs.oasis-open.org/tosca/TOSCA-Simple-Profile-YAML/v1.0/cos01/TOSCA-Simple-Profile-YAML-v1.0-cos01.html#TYPE_TOSCA_MAP'
+
+ #
+ # Scalar
+ #
+
+ scalar-unit.size:
+ _extensions:
+ coerce_value: aria_extension_tosca.simple_v1_0.data_types.coerce_scalar_unit_size
+ type_qualified_name: tosca:scalar-unit.size
+ specification: tosca-simple-1.0
+ specification_section: 3.2.6.4
+ specification_url: 'http://docs.oasis-open.org/tosca/TOSCA-Simple-Profile-YAML/v1.0/cos01/TOSCA-Simple-Profile-YAML-v1.0-cos01.html#TYPE_TOSCA_SCALAR_UNIT_SIZE'
+
+ scalar-unit.time:
+ _extensions:
+ coerce_value: aria_extension_tosca.simple_v1_0.data_types.coerce_scalar_unit_time
+ type_qualified_name: tosca:scalar-unit.time
+ specification: tosca-simple-1.0
+ specification_section: 3.2.6.5
+ specification_url: 'http://docs.oasis-open.org/tosca/TOSCA-Simple-Profile-YAML/v1.0/cos01/TOSCA-Simple-Profile-YAML-v1.0-cos01.html#TYPE_TOSCA_SCALAR_UNIT_TIME'
+
+ scalar-unit.frequency:
+ _extensions:
+ coerce_value: aria_extension_tosca.simple_v1_0.data_types.coerce_scalar_unit_frequency
+ type_qualified_name: tosca:scalar-unit.frequency
+ specification: tosca-simple-1.0
+ specification_section: 3.2.6.6
+ specification_url: 'http://docs.oasis-open.org/tosca/TOSCA-Simple-Profile-YAML/v1.0/cos01/TOSCA-Simple-Profile-YAML-v1.0-cos01.html#TYPE_TOSCA_SCALAR_UNIT_FREQUENCY'
+
+ #
+ # Complex
+ #
+
+ tosca.datatypes.Root:
+ _extensions:
+ shorthand_name: Root # ARIA NOTE: omitted in the spec
+ type_qualified_name: tosca:Root
+ specification: tosca-simple-1.0
+ specification_section: 5.2.1
+ specification_url: 'http://docs.oasis-open.org/tosca/TOSCA-Simple-Profile-YAML/v1.0/cos01/TOSCA-Simple-Profile-YAML-v1.0-cos01.html#TYPE_TOSCA_DATA_ROOT'
+ description: >-
+ This is the default (root) TOSCA Root Type definition that all complex TOSCA Data Types derive from.
+
+ tosca.datatypes.Credential:
+ _extensions:
+ shorthand_name: Credential
+ type_qualified_name: tosca:Credential
+ specification: tosca-simple-1.0
+ specification_section: 5.2.2
+ specification_url: 'http://docs.oasis-open.org/tosca/TOSCA-Simple-Profile-YAML/v1.0/cos01/TOSCA-Simple-Profile-YAML-v1.0-cos01.html#TYPE_TOSCA_DATA_CREDENTIAL'
+ description: >-
+ The Credential type is a complex TOSCA data Type used when describing authorization credentials used to access network
+ accessible resources.
+ derived_from: tosca.datatypes.Root
+ properties:
+ protocol:
+ description: >-
+ The optional protocol name.
+ type: string
+ required: false
+ token_type:
+ description: >-
+ The required token type.
+ type: string
+ default: password
+ token:
+ description: >-
+ The required token used as a credential for authorization or access to a networked resource.
+ type: string
+ required: false
+ keys:
+ description: >-
+ The optional list of protocol-specific keys or assertions.
+ type: map
+ entry_schema:
+ type: string
+ required: false
+ user:
+ description: >-
+ The optional user (name or ID) used for non-token based credentials.
+ type: string
+ required: false
+
+ tosca.datatypes.network.NetworkInfo:
+ _extensions:
+ shorthand_name: NetworkInfo
+ type_qualified_name: tosca:NetworkInfo
+ specification: tosca-simple-1.0
+ specification_section: 5.2.3
+ specification_url: 'http://docs.oasis-open.org/tosca/TOSCA-Simple-Profile-YAML/v1.0/cos01/TOSCA-Simple-Profile-YAML-v1.0-cos01.html#TYPE_TOSCA_DATA_NETWORKINFO'
+ description: >-
+ The Network type is a complex TOSCA data type used to describe logical network information.
+ derived_from: tosca.datatypes.Root
+ properties:
+ network_name:
+ description: >-
+ The name of the logical network. e.g., "public", "private", "admin". etc.
+ type: string
+ required: false
+ network_id:
+ description: >-
+ The unique ID of for the network generated by the network provider.
+ type: string
+ required: false
+ addresses:
+ description: >-
+ The list of IP addresses assigned from the underlying network.
+ type: list
+ entry_schema:
+ type: string
+ required: false
+
+ tosca.datatypes.network.PortInfo:
+ _extensions:
+ shorthand_name: PortInfo
+ type_qualified_name: tosca:PortInfo
+ specification: tosca-simple-1.0
+ specification_section: 5.2.4
+ specification_url: 'http://docs.oasis-open.org/tosca/TOSCA-Simple-Profile-YAML/v1.0/cos01/TOSCA-Simple-Profile-YAML-v1.0-cos01.html#TYPE_TOSCA_DATA_PORTINFO'
+ description: >-
+ The PortInfo type is a complex TOSCA data type used to describe network port information.
+ derived_from: tosca.datatypes.Root
+ properties:
+ port_name:
+ description: >-
+ The logical network port name.
+ type: string
+ required: false
+ port_id:
+ description: >-
+ The unique ID for the network port generated by the network provider.
+ type: string
+ required: false
+ network_id:
+ description: >-
+ The unique ID for the network.
+ type: string
+ required: false
+ mac_address:
+ description: >-
+ The unique media access control address (MAC address) assigned to the port.
+ type: string
+ required: false
+ addresses:
+ description: >-
+ The list of IP address(es) assigned to the port.
+ type: list
+ entry_schema:
+ type: string
+ required: false
+
+ tosca.datatypes.network.PortDef:
+ _extensions:
+ shorthand_name: PortDef
+ type_qualified_name: tosca:PortDef
+ specification: tosca-simple-1.0
+ specification_section: 5.2.5
+ specification_url: 'http://docs.oasis-open.org/tosca/TOSCA-Simple-Profile-YAML/v1.0/cos01/TOSCA-Simple-Profile-YAML-v1.0-cos01.html#TYPE_TOSCA_DATA_PORTDEF'
+ description: >-
+ The PortDef type is a TOSCA data Type used to define a network port.
+ derived_from: integer # ARIA NOTE: we allow deriving from primitives
+ constraints:
+ - in_range: [ 1, 65535 ]
+
+ tosca.datatypes.network.PortSpec:
+ _extensions:
+ shorthand_name: PortSpec
+ type_qualified_name: tosca:PortSpec
+ specification: tosca-simple-1.0
+ specification_section: 5.2.6
+ specification_url: 'http://docs.oasis-open.org/tosca/TOSCA-Simple-Profile-YAML/v1.0/cos01/TOSCA-Simple-Profile-YAML-v1.0-cos01.html#TYPE_TOSCA_DATA_PORTSPEC'
+ description: >-
+ The PortSpec type is a complex TOSCA data Type used when describing port specifications for a network connection.
+ derived_from: tosca.datatypes.Root
+ properties:
+ protocol:
+ description: >-
+ The required protocol used on the port.
+ type: string
+ constraints:
+ - valid_values: [ udp, tcp, igmp ]
+ default: tcp
+ source:
+ description: >-
+ The optional source port.
+ type: tosca.datatypes.network.PortDef
+ required: false
+ source_range:
+ description: >-
+ The optional range for source port.
+ type: range
+ constraints:
+ - in_range: [ 1, 65535 ]
+ required: false
+ target:
+ description: >-
+ The optional target port.
+ type: tosca.datatypes.network.PortDef
+ required: false
+ target_range:
+ description: >-
+ The optional range for target port.
+ type: range
+ constraints:
+ - in_range: [ 1, 65535 ]
+ required: false
diff --git a/azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/profiles/tosca-simple-1.0/groups.yaml b/azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/profiles/tosca-simple-1.0/groups.yaml
new file mode 100644
index 0000000..66cc25f
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/profiles/tosca-simple-1.0/groups.yaml
@@ -0,0 +1,28 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+group_types:
+
+ tosca.groups.Root:
+ _extensions:
+ shorthand_name: Root # ARIA NOTE: omitted in the spec
+ type_qualified_name: tosca:Root
+ specification: tosca-simple-1.0
+ specification_section: 5.9.1
+ description: >-
+ This is the default (root) TOSCA Group Type definition that all other TOSCA base Group Types derive from.
+ interfaces:
+ Standard:
+ type: tosca.interfaces.node.lifecycle.Standard
diff --git a/azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/profiles/tosca-simple-1.0/interfaces.yaml b/azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/profiles/tosca-simple-1.0/interfaces.yaml
new file mode 100644
index 0000000..29cc8dd
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/profiles/tosca-simple-1.0/interfaces.yaml
@@ -0,0 +1,107 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+interface_types:
+
+ tosca.interfaces.Root:
+ _extensions:
+ shorthand_name: Root # ARIA NOTE: omitted in the spec
+ type_qualified_name: tosca:Root
+ specification: tosca-simple-1.0
+ specification_section: 5.7.3
+ specification_url: 'http://docs.oasis-open.org/tosca/TOSCA-Simple-Profile-YAML/v1.0/cos01/TOSCA-Simple-Profile-YAML-v1.0-cos01.html#_Ref384391055'
+ description: >-
+ This is the default (root) TOSCA Interface Type definition that all other TOSCA Interface Types derive from.
+
+ tosca.interfaces.node.lifecycle.Standard:
+ _extensions:
+ shorthand_name: Standard
+ type_qualified_name: tosca:Standard
+ specification: tosca-simple-1.0
+ specification_section: 5.7.4
+ description: >-
+ This lifecycle interface defines the essential, normative operations that TOSCA nodes may support.
+ derived_from: tosca.interfaces.Root
+ create:
+ description: >-
+ Standard lifecycle create operation.
+ configure:
+ description: >-
+ Standard lifecycle configure operation.
+ start:
+ description: >-
+ Standard lifecycle start operation.
+ stop:
+ description: >-
+ Standard lifecycle stop operation.
+ delete:
+ description: >-
+ Standard lifecycle delete operation.
+
+ tosca.interfaces.relationship.Configure:
+ _extensions:
+ shorthand_name: Configure
+ type_qualified_name: tosca:Configure
+ specification: tosca-simple-1.0
+ specification_section: 5.7.5
+ specification_url: 'http://docs.oasis-open.org/tosca/TOSCA-Simple-Profile-YAML/v1.0/cos01/TOSCA-Simple-Profile-YAML-v1.0-cos01.html#DEFN_TYPE_ITFC_RELATIONSHIP_CONFIGURE'
+ description: >-
+ The lifecycle interfaces define the essential, normative operations that each TOSCA Relationship Types may support.
+ derived_from: tosca.interfaces.Root
+ pre_configure_source:
+ description: >-
+ Operation to pre-configure the source endpoint.
+ _extensions:
+ relationship_edge: source
+ pre_configure_target:
+ description: >-
+ Operation to pre-configure the target endpoint.
+ _extensions:
+ relationship_edge: target
+ post_configure_source:
+ description: >-
+ Operation to post-configure the source endpoint.
+ _extensions:
+ relationship_edge: source
+ post_configure_target:
+ description: >-
+ Operation to post-configure the target endpoint.
+ _extensions:
+ relationship_edge: target
+ add_target:
+ description: >-
+ Operation to notify the source node of a target node being added via a relationship.
+ _extensions:
+ relationship_edge: source
+ add_source:
+ description: >-
+ Operation to notify the target node of a source node which is now available via a relationship.
+ _extensions:
+ relationship_edge: target
+ target_changed:
+ description: >-
+ Operation to notify source some property or attribute of the target changed
+ _extensions:
+ relationship_edge: source
+ remove_target:
+ description: >-
+ Operation to remove a target node.
+ _extensions:
+ relationship_edge: source
+ remove_source:
+ description: >-
+ Operation to remove the source node.
+ _extensions:
+ relationship_edge: target
diff --git a/azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/profiles/tosca-simple-1.0/nodes.yaml b/azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/profiles/tosca-simple-1.0/nodes.yaml
new file mode 100644
index 0000000..05963b7
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/profiles/tosca-simple-1.0/nodes.yaml
@@ -0,0 +1,525 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+node_types:
+
+ tosca.nodes.Root:
+ _extensions:
+ shorthand_name: Root
+ type_qualified_name: tosca:Root
+ specification: tosca-simple-1.0
+ specification_section: 5.8.1
+ specification_url: 'http://docs.oasis-open.org/tosca/TOSCA-Simple-Profile-YAML/v1.0/cos01/TOSCA-Simple-Profile-YAML-v1.0-cos01.html#DEFN_TYPE_NODES_ROOT'
+ description: >-
+ The TOSCA Root Node Type is the default type that all other TOSCA base Node Types derive from. This allows for all TOSCA nodes to have a consistent set of features for modeling and management (e.g., consistent definitions for requirements, capabilities and lifecycle interfaces).
+ attributes:
+ tosca_id:
+ description: >-
+ A unique identifier of the realized instance of a Node Template that derives from any TOSCA normative type.
+ type: string
+ tosca_name:
+ description: >-
+ This attribute reflects the name of the Node Template as defined in the TOSCA service template. This name is not unique
+ to the realized instance model of corresponding deployed application as each template in the model can result in one or
+ more instances (e.g., scaled) when orchestrated to a provider environment.
+ type: string
+ state:
+ description: >-
+ The state of the node instance.
+ type: string
+ default: initial
+ interfaces:
+ Standard:
+ type: tosca.interfaces.node.lifecycle.Standard
+ capabilities:
+ feature:
+ type: tosca.capabilities.Node
+ requirements:
+ - dependency:
+ capability: tosca.capabilities.Node
+ node: tosca.nodes.Root
+ relationship: tosca.relationships.DependsOn
+ occurrences: [ 0, UNBOUNDED ]
+
+ tosca.nodes.Compute:
+ _extensions:
+ shorthand_name: Compute
+ type_qualified_name: tosca:Compute
+ specification: tosca-simple-1.0
+ specification_section: 5.8.2
+ specification_url: 'http://docs.oasis-open.org/tosca/TOSCA-Simple-Profile-YAML/v1.0/cos01/TOSCA-Simple-Profile-YAML-v1.0-cos01.html#DEFN_TYPE_NODES_COMPUTE'
+ role: host
+ description: >-
+ The TOSCA Compute node represents one or more real or virtual processors of software applications or services along with
+ other essential local resources. Collectively, the resources the compute node represents can logically be viewed as a (real
+ or virtual) "server".
+ derived_from: tosca.nodes.Root
+ attributes:
+ private_address:
+ description: >-
+ The primary private IP address assigned by the cloud provider that applications may use to access the Compute node.
+ type: string
+ public_address:
+ description: >-
+ The primary public IP address assigned by the cloud provider that applications may use to access the Compute node.
+ type: string
+ networks:
+ description: >-
+ The list of logical networks assigned to the compute host instance and information about them.
+ type: map
+ entry_schema:
+ type: tosca.datatypes.network.NetworkInfo
+ ports:
+ description: >-
+ The list of logical ports assigned to the compute host instance and information about them.
+ type: map
+ entry_schema:
+ type: tosca.datatypes.network.PortInfo
+ capabilities:
+ host:
+ type: tosca.capabilities.Container
+ valid_source_types: [ tosca.nodes.SoftwareComponent ]
+ binding:
+ type: tosca.capabilities.network.Bindable
+ os:
+ type: tosca.capabilities.OperatingSystem
+ scalable:
+ type: tosca.capabilities.Scalable
+ requirements:
+ - local_storage:
+ capability: tosca.capabilities.Attachment
+ node: tosca.nodes.BlockStorage
+ relationship: tosca.relationships.AttachesTo
+ occurrences: [ 0, UNBOUNDED ]
+
+ tosca.nodes.LoadBalancer:
+ _extensions:
+ shorthand_name: LoadBalancer
+ type_qualified_name: tosca:LoadBalancer
+ specification: tosca-simple-1.0
+ specification_section: 5.8.12
+ specification_url: 'http://docs.oasis-open.org/tosca/TOSCA-Simple-Profile-YAML/v1.0/cos01/TOSCA-Simple-Profile-YAML-v1.0-cos01.html#_Toc379548332'
+ description: >-
+ The TOSCA Load Balancer node represents logical function that be used in conjunction with a Floating Address to distribute an
+ application's traffic (load) across a number of instances of the application (e.g., for a clustered or scaled application).
+ derived_from: tosca.nodes.Root
+ properties:
+ algorithm:
+ description: >-
+ No description in spec.
+ type: string
+ required: false
+ status: experimental
+ capabilities:
+ client:
+ description: >-
+ The Floating (IP) client's on the public network can connect to.
+ type: tosca.capabilities.Endpoint.Public
+ occurrences: [ 0, UNBOUNDED ] # ARIA NOTE: it seems unnecessary to specify this, as it is the implied default
+ requirements:
+ - application:
+ capability: tosca.capabilities.Endpoint
+ relationship: tosca.relationships.RoutesTo
+ occurrences: [ 0, UNBOUNDED ]
+
+ #
+ # Software
+ #
+
+ tosca.nodes.SoftwareComponent:
+ _extensions:
+ shorthand_name: SoftwareComponent
+ type_qualified_name: tosca:SoftwareComponent
+ specification: tosca-simple-1.0
+ specification_section: 5.8.3
+ specification_url: 'http://docs.oasis-open.org/tosca/TOSCA-Simple-Profile-YAML/v1.0/cos01/TOSCA-Simple-Profile-YAML-v1.0-cos01.html#DEFN_TYPE_NODES_SOFTWARE_COMPONENT'
+ description: >-
+ The TOSCA SoftwareComponent node represents a generic software component that can be managed and run by a TOSCA Compute Node
+ Type.
+ derived_from: tosca.nodes.Root
+ properties:
+ component_version:
+ description: >-
+ The optional software component's version.
+ type: version
+ required: false
+ admin_credential:
+ description: >-
+ The optional credential that can be used to authenticate to the software component.
+ type: tosca.datatypes.Credential
+ required: false
+ requirements:
+ - host:
+ capability: tosca.capabilities.Container
+ node: tosca.nodes.Compute
+ relationship: tosca.relationships.HostedOn
+
+ tosca.nodes.WebServer:
+ _extensions:
+ shorthand_name: WebServer
+ type_qualified_name: tosca:WebServer
+ specification: tosca-simple-1.0
+ specification_section: 5.8.4
+ specification_url: 'http://docs.oasis-open.org/tosca/TOSCA-Simple-Profile-YAML/v1.0/cos01/TOSCA-Simple-Profile-YAML-v1.0-cos01.html#DEFN_TYPE_NODES_WEBSERVER'
+ description: >-
+ This TOSCA WebServer Node Type represents an abstract software component or service that is capable of hosting and providing
+ management operations for one or more WebApplication nodes.
+ derived_from: tosca.nodes.SoftwareComponent
+ capabilities:
+ data_endpoint:
+ type: tosca.capabilities.Endpoint
+ admin_endpoint:
+ type: tosca.capabilities.Endpoint.Admin
+ host:
+ type: tosca.capabilities.Container
+ valid_source_types: [ tosca.nodes.WebApplication ]
+
+ tosca.nodes.WebApplication:
+ _extensions:
+ shorthand_name: WebApplication
+ type_qualified_name: tosca:WebApplication
+ specification: tosca-simple-1.0
+ specification_section: 5.8.5
+ specification_url: 'http://docs.oasis-open.org/tosca/TOSCA-Simple-Profile-YAML/v1.0/cos01/TOSCA-Simple-Profile-YAML-v1.0-cos01.html#DEFN_TYPE_NODES_WEBAPPLICATION'
+ description: >-
+ The TOSCA WebApplication node represents a software application that can be managed and run by a TOSCA WebServer node.
+ Specific types of web applications such as Java, etc. could be derived from this type.
+ derived_from: tosca.nodes.SoftwareComponent # ARIA NOTE: the spec says tosca.nodes.Root
+ properties:
+ context_root:
+ description: >-
+ The web application's context root which designates the application's URL path within the web server it is hosted on.
+ type: string
+ required: false
+ capabilities:
+ app_endpoint:
+ type: tosca.capabilities.Endpoint
+ requirements:
+ - host:
+ capability: tosca.capabilities.Container
+ node: tosca.nodes.WebServer
+ relationship: tosca.relationships.HostedOn
+
+ tosca.nodes.DBMS:
+ _extensions:
+ shorthand_name: DBMS # ARIA NOTE: omitted in the spec
+ type_qualified_name: tosca:DBMS
+ specification: tosca-simple-1.0
+ specification_section: 5.8.6
+ specification_url: 'http://docs.oasis-open.org/tosca/TOSCA-Simple-Profile-YAML/v1.0/cos01/TOSCA-Simple-Profile-YAML-v1.0-cos01.html#DEFN_TYPE_NODES_DBMS'
+ description: >-
+ The TOSCA DBMS node represents a typical relational, SQL Database Management System software component or service.
+ derived_from: tosca.nodes.SoftwareComponent
+ properties:
+ root_password:
+ description: >-
+ The optional root password for the DBMS server.
+ type: string
+ required: false
+ port:
+ description: >-
+ The DBMS server's port.
+ type: integer
+ required: false
+ capabilities:
+ host:
+ type: tosca.capabilities.Container
+ valid_source_types: [ tosca.nodes.Database ]
+
+ tosca.nodes.Database:
+ _extensions:
+ shorthand_name: Database
+ type_qualified_name: tosca:Database
+ specification: tosca-simple-1.0
+ specification_section: 5.8.7
+ specification_url: 'http://docs.oasis-open.org/tosca/TOSCA-Simple-Profile-YAML/v1.0/cos01/TOSCA-Simple-Profile-YAML-v1.0-cos01.html#DEFN_TYPE_NODES_DATABASE'
+ description: >-
+ The TOSCA Database node represents a logical database that can be managed and hosted by a TOSCA DBMS node.
+ derived_from: tosca.nodes.Root # ARIA NOTE: it's *not* a SoftwareComponent
+ properties:
+ name:
+ description: >-
+ The logical database Name.
+ type: string
+ port:
+ description: >-
+ The port the database service will use to listen for incoming data and requests.
+ type: integer
+ required: false
+ user:
+ description: >-
+ The special user account used for database administration.
+ type: string
+ required: false
+ password:
+ description: >-
+ The password associated with the user account provided in the 'user' property.
+ type: string
+ required: false
+ capabilities:
+ database_endpoint:
+ type: tosca.capabilities.Endpoint.Database
+ requirements:
+ - host:
+ capability: tosca.capabilities.Container
+ node: tosca.nodes.DBMS
+ relationship: tosca.relationships.HostedOn
+
+ #
+ # Container
+ #
+
+ tosca.nodes.Container.Runtime:
+ _extensions:
+ shorthand_name: Container.Runtime
+ type_qualified_name: tosca:Container.Runtime
+ specification: tosca-simple-1.0
+ specification_section: 5.8.10
+ specification_url: 'http://docs.oasis-open.org/tosca/TOSCA-Simple-Profile-YAML/v1.0/cos01/TOSCA-Simple-Profile-YAML-v1.0-cos01.html#DEFN_TYPE_NODES_CONTAINER_RUNTIME'
+ description: >-
+ The TOSCA Container Runtime node represents operating system-level virtualization technology used to run multiple application
+ services on a single Compute host.
+ derived_from: tosca.nodes.SoftwareComponent
+ capabilities:
+ host:
+ type: tosca.capabilities.Container
+ scalable:
+ type: tosca.capabilities.Scalable
+
+ tosca.nodes.Container.Application:
+ _extensions:
+ shorthand_name: Container.Application
+ type_qualified_name: tosca:Container.Application
+ specification: tosca-simple-1.0
+ specification_section: 5.8.11
+ specification_url: 'http://docs.oasis-open.org/tosca/TOSCA-Simple-Profile-YAML/v1.0/cos01/TOSCA-Simple-Profile-YAML-v1.0-cos01.html#DEFN_TYPE_NODES_CONTAINER_APP'
+ description: >-
+ The TOSCA Container Application node represents an application that requires Container-level virtualization technology.
+ derived_from: tosca.nodes.Root
+ requirements:
+ - host:
+ capability: tosca.capabilities.Container
+ # ARIA NOTE: seems a mistake in the spec
+ #node: tosca.nodes.Container
+ relationship: tosca.relationships.HostedOn
+
+ #
+ # Storage
+ #
+
+ tosca.nodes.ObjectStorage:
+ _extensions:
+ shorthand_name: ObjectStorage
+ type_qualified_name: tosca:ObjectStorage
+ specification: tosca-simple-1.0
+ specification_section: 5.8.8
+ description: >-
+ The TOSCA ObjectStorage node represents storage that provides the ability to store data as objects (or BLOBs of data) without
+ consideration for the underlying filesystem or devices.
+ derived_from: tosca.nodes.Root
+ properties:
+ name:
+ description: >-
+ The logical name of the object store (or container).
+ type: string
+ size:
+ description: >-
+ The requested initial storage size (default unit is in Gigabytes).
+ type: scalar-unit.size
+ constraints:
+ - greater_or_equal: 0 GB
+ required: false
+ maxsize:
+ description: >-
+ The requested maximum storage size (default unit is in Gigabytes).
+ type: scalar-unit.size
+ constraints:
+ - greater_or_equal: 0 GB
+ required: false
+ capabilities:
+ storage_endpoint:
+ type: tosca.capabilities.Endpoint
+
+ tosca.nodes.BlockStorage:
+ _extensions:
+ shorthand_name: BlockStorage
+ type_qualified_name: tosca:BlockStorage
+ specification: tosca-simple-1.0
+ specification_section: 5.8.9
+ specification_url: 'http://docs.oasis-open.org/tosca/TOSCA-Simple-Profile-YAML/v1.0/cos01/TOSCA-Simple-Profile-YAML-v1.0-cos01.html#DEFN_TYPE_NODES_BLOCK_STORAGE'
+ description: >-
+ derived_from: tosca.nodes.Root
+ properties:
+ size:
+ description: >-
+ The requested storage size (default unit is MB).
+ type: scalar-unit.size
+ constraints:
+ - greater_or_equal: 1 MB
+ volume_id:
+ description: >-
+ ID of an existing volume (that is in the accessible scope of the requesting application).
+ type: string
+ required: false
+ snapshot_id:
+ description: >-
+ Some identifier that represents an existing snapshot that should be used when creating the block storage (volume).
+ type: string
+ required: false
+ capabilities:
+ attachment:
+ type: tosca.capabilities.Attachment
+
+ #
+ # Network
+ #
+
+ tosca.nodes.network.Network:
+ _extensions:
+ shorthand_name: Network
+ type_qualified_name: tosca:Network
+ specification: tosca-simple-1.0
+ specification_section: 7.5.1
+ description: >-
+ The TOSCA Network node represents a simple, logical network service.
+ derived_from: tosca.nodes.Root
+ properties:
+ ip_version:
+ description: >-
+ The IP version of the requested network.
+ type: integer
+ constraints:
+ - valid_values: [ 4, 6 ]
+ default: 4
+ required: false
+ cidr:
+ description: >-
+ The cidr block of the requested network.
+ type: string
+ required: false
+ start_ip:
+ description: >-
+ The IP address to be used as the 1st one in a pool of addresses derived from the cidr block full IP range.
+ type: string
+ required: false
+ end_ip:
+ description: >-
+ The IP address to be used as the last one in a pool of addresses derived from the cidr block full IP range.
+ type: string
+ required: false
+ gateway_ip:
+ description: >-
+ The gateway IP address.
+ type: string
+ required: false
+ network_name:
+ description: >-
+ An Identifier that represents an existing Network instance in the underlying cloud infrastructure - OR - be used as the
+ name of the new created network.
+ type: string
+ required: false
+ network_id:
+ description: >-
+ An Identifier that represents an existing Network instance in the underlying cloud infrastructure. This property is
+ mutually exclusive with all other properties except network_name.
+ type: string
+ required: false
+ segmentation_id:
+ description: >-
+ A segmentation identifier in the underlying cloud infrastructure (e.g., VLAN id, GRE tunnel id). If the segmentation_id
+ is specified, the network_type or physical_network properties should be provided as well.
+ type: string
+ required: false
+ network_type:
+ description: >-
+ Optionally, specifies the nature of the physical network in the underlying cloud infrastructure. Examples are flat, vlan,
+ gre or vxlan. For flat and vlan types, physical_network should be provided too.
+ type: string
+ required: false
+ physical_network:
+ description: >-
+ Optionally, identifies the physical network on top of which the network is implemented, e.g. physnet1. This property is
+ required if network_type is flat or vlan.
+ type: string
+ required: false
+ dhcp_enabled:
+ description: >-
+ Indicates the TOSCA container to create a virtual network instance with or without a DHCP service.
+ type: boolean
+ default: true
+ required: false
+ capabilities:
+ link:
+ type: tosca.capabilities.network.Linkable
+
+ tosca.nodes.network.Port:
+ _extensions:
+ shorthand_name: Port
+ type_qualified_name: tosca:Port
+ specification: tosca-simple-1.0
+ specification_section: 7.5.2
+ description: >-
+ The TOSCA Port node represents a logical entity that associates between Compute and Network normative types.
+
+ The Port node type effectively represents a single virtual NIC on the Compute node instance.
+ derived_from: tosca.nodes.Root
+ properties:
+ ip_address:
+ description: >-
+ Allow the user to set a fixed IP address. Note that this address is a request to the provider which they will attempt to
+ fulfill but may not be able to dependent on the network the port is associated with.
+ type: string
+ required: false
+ order:
+ description: >-
+ The order of the NIC on the compute instance (e.g. eth2). Note: when binding more than one port to a single compute (aka
+ multi vNICs) and ordering is desired, it is *mandatory* that all ports will be set with an order value and. The order
+ values must represent a positive, arithmetic progression that starts with 0 (e.g. 0, 1, 2, ..., n).
+ type: integer
+ constraints:
+ - greater_or_equal: 0
+ default: 0
+ required: false
+ is_default:
+ description: >-
+ Set is_default=true to apply a default gateway route on the running compute instance to the associated network gateway.
+ Only one port that is associated to single compute node can set as default=true.
+ type: boolean
+ default: false
+ required: false
+ ip_range_start:
+ description: >-
+ Defines the starting IP of a range to be allocated for the compute instances that are associated by this Port. Without
+ setting this property the IP allocation is done from the entire CIDR block of the network.
+ type: string
+ required: false
+ ip_range_end:
+ description: >-
+ Defines the ending IP of a range to be allocated for the compute instances that are associated by this Port. Without
+ setting this property the IP allocation is done from the entire CIDR block of the network.
+ type: string
+ required: false
+ attributes:
+ ip_address:
+ description: >-
+ The IP address would be assigned to the associated compute instance.
+ type: string
+ requirements:
+ - link:
+ capability: tosca.capabilities.network.Linkable
+ relationship: tosca.relationships.network.LinksTo
+ - binding:
+ capability: tosca.capabilities.network.Bindable
+ relationship: tosca.relationships.network.BindsTo
diff --git a/azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/profiles/tosca-simple-1.0/policies.yaml b/azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/profiles/tosca-simple-1.0/policies.yaml
new file mode 100644
index 0000000..7b35bb9
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/profiles/tosca-simple-1.0/policies.yaml
@@ -0,0 +1,71 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+policy_types:
+
+ tosca.policies.Root:
+ _extensions:
+ shorthand_name: Root # ARIA NOTE: omitted in the spec
+ type_qualified_name: tosca:Root
+ specification: tosca-simple-1.0
+ specification_section: 5.10.1
+ specification_url: 'http://docs.oasis-open.org/tosca/TOSCA-Simple-Profile-YAML/v1.0/cos01/TOSCA-Simple-Profile-YAML-v1.0-cos01.html#DEFN_TYPE_POLICIES_ROOT'
+ description: >-
+ This is the default (root) TOSCA Policy Type definition that all other TOSCA base Policy Types derive from.
+
+ tosca.policies.Placement:
+ _extensions:
+ shorthand_name: Placement # ARIA NOTE: omitted in the spec
+ type_qualified_name: tosca:Placement
+ specification: tosca-simple-1.0
+ specification_section: 5.10.2
+ specification_url: 'http://docs.oasis-open.org/tosca/TOSCA-Simple-Profile-YAML/v1.0/cos01/TOSCA-Simple-Profile-YAML-v1.0-cos01.html#DEFN_TYPE_POLICIES_PLACEMENT'
+ description: >-
+ This is the default (root) TOSCA Policy Type definition that is used to govern placement of TOSCA nodes or groups of nodes.
+ derived_from: tosca.policies.Root
+
+ tosca.policies.Scaling:
+ _extensions:
+ shorthand_name: Scaling # ARIA NOTE: omitted in the spec
+ type_qualified_name: tosca:Scaling
+ specification: tosca-simple-1.0
+ specification_section: 5.10.3
+ specification_url: 'http://docs.oasis-open.org/tosca/TOSCA-Simple-Profile-YAML/v1.0/cos01/TOSCA-Simple-Profile-YAML-v1.0-cos01.html#DEFN_TYPE_POLICIES_SCALING'
+ description: >-
+ This is the default (root) TOSCA Policy Type definition that is used to govern scaling of TOSCA nodes or groups of nodes.
+ derived_from: tosca.policies.Root
+
+ tosca.policies.Update:
+ _extensions:
+ shorthand_name: Update # ARIA NOTE: omitted in the spec
+ type_qualified_name: tosca:Update
+ specification: tosca-simple-1.0
+ specification_section: 5.10.4
+ specification_url: 'http://docs.oasis-open.org/tosca/TOSCA-Simple-Profile-YAML/v1.0/cos01/TOSCA-Simple-Profile-YAML-v1.0-cos01.html#DEFN_TYPE_POLICIES_UPDATE'
+ description: >-
+ This is the default (root) TOSCA Policy Type definition that is used to govern update of TOSCA nodes or groups of nodes.
+ derived_from: tosca.policies.Root
+
+ tosca.policies.Performance:
+ _extensions:
+ shorthand_name: Performance # ARIA NOTE: omitted in the spec
+ type_qualified_name: tosca:Performance
+ specification: tosca-simple-1.0
+ specification_section: 5.10.5
+ specification_url: 'http://docs.oasis-open.org/tosca/TOSCA-Simple-Profile-YAML/v1.0/cos01/TOSCA-Simple-Profile-YAML-v1.0-cos01.html#DEFN_TYPE_POLICIES_PERFORMANCE'
+ description: >-
+ This is the default (root) TOSCA Policy Type definition that is used to declare performance requirements for TOSCA nodes or
+ groups of nodes.
+ derived_from: tosca.policies.Root
diff --git a/azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/profiles/tosca-simple-1.0/relationships.yaml b/azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/profiles/tosca-simple-1.0/relationships.yaml
new file mode 100644
index 0000000..9f2c32c
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/profiles/tosca-simple-1.0/relationships.yaml
@@ -0,0 +1,158 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+relationship_types:
+
+ tosca.relationships.Root:
+ _extensions:
+ shorthand_name: Root # ARIA NOTE: omitted in the spec
+ type_qualified_name: tosca:Root
+ specification: tosca-simple-1.0
+ specification_section: 5.6.1
+ specification_url: 'http://docs.oasis-open.org/tosca/TOSCA-Simple-Profile-YAML/v1.0/cos01/TOSCA-Simple-Profile-YAML-v1.0-cos01.html#DEFN_TYPE_RELATIONSHIPS_ROOT'
+ description: >-
+ This is the default (root) TOSCA Relationship Type definition that all other TOSCA Relationship Types derive from.
+ attributes:
+ tosca_id:
+ description: >-
+ A unique identifier of the realized instance of a Relationship Template that derives from any TOSCA normative type.
+ type: string
+ tosca_name:
+ description: >-
+ This attribute reflects the name of the Relationship Template as defined in the TOSCA service template. This name is not
+ unique to the realized instance model of corresponding deployed application as each template in the model can result in
+ one or more instances (e.g., scaled) when orchestrated to a provider environment.
+ type: string
+ state:
+ description: >-
+ The state of the relationship instance.
+ type: string
+ default: initial
+ interfaces:
+ Configure:
+ type: tosca.interfaces.relationship.Configure
+
+ tosca.relationships.DependsOn:
+ _extensions:
+ shorthand_name: DependsOn
+ type_qualified_name: tosca:DependsOn
+ specification: tosca-simple-1.0
+ specification_section: 5.6.2
+ specification_url: 'http://docs.oasis-open.org/tosca/TOSCA-Simple-Profile-YAML/v1.0/cos01/TOSCA-Simple-Profile-YAML-v1.0-cos01.html#DEFN_TYPE_RELATIONSHIPS_DEPENDSON'
+ description: >-
+ This type represents a general dependency relationship between two nodes.
+ derived_from: tosca.relationships.Root
+ valid_target_types: [ tosca.capabilities.Node ]
+
+ tosca.relationships.HostedOn:
+ _extensions:
+ shorthand_name: HostedOn
+ type_qualified_name: tosca:HostedOn
+ specification: tosca-simple-1.0
+ specification_section: 5.6.3
+ specification_url: 'http://docs.oasis-open.org/tosca/TOSCA-Simple-Profile-YAML/v1.0/cos01/TOSCA-Simple-Profile-YAML-v1.0-cos01.html#DEFN_TYPE_RELATIONSHIPS_HOSTEDON'
+ description: >-
+ This type represents a hosting relationship between two nodes.
+ derived_from: tosca.relationships.Root
+ valid_target_types: [ tosca.capabilities.Container ]
+
+ tosca.relationships.ConnectsTo:
+ _extensions:
+ shorthand_name: ConnectsTo
+ type_qualified_name: tosca:ConnectsTo
+ specification: tosca-simple-1.0
+ specification_section: 5.6.4
+ specification_url: 'http://docs.oasis-open.org/tosca/TOSCA-Simple-Profile-YAML/v1.0/cos01/TOSCA-Simple-Profile-YAML-v1.0-cos01.html#DEFN_TYPE_RELATIONSHIPS_CONNECTSTO'
+ description: >-
+ This type represents a network connection relationship between two nodes.
+ derived_from: tosca.relationships.Root
+ valid_target_types: [ tosca.capabilities.Endpoint ]
+ properties:
+ credential:
+ type: tosca.datatypes.Credential
+ required: false
+
+ tosca.relationships.AttachesTo:
+ _extensions:
+ shorthand_name: AttachesTo
+ type_qualified_name: tosca:AttachesTo
+ specification: tosca-simple-1.0
+ specification_section: 5.6.5
+ specification_url: 'http://docs.oasis-open.org/tosca/TOSCA-Simple-Profile-YAML/v1.0/cos01/TOSCA-Simple-Profile-YAML-v1.0-cos01.html#DEFN_TYPE_RELATIONSHIPS_ATTACHTO'
+ description: >-
+ This type represents an attachment relationship between two nodes. For example, an AttachesTo relationship type would be used
+ for attaching a storage node to a Compute node.
+ derived_from: tosca.relationships.Root
+ valid_target_types: [ tosca.capabilities.Attachment ]
+ properties:
+ location:
+ description: >-
+ The relative location (e.g., path on the file system), which provides the root location to address an attached node.
+ e.g., a mount point / path such as '/usr/data'. Note: The user must provide it and it cannot be "root".
+ type: string
+ constraints:
+ - min_length: 1
+ device:
+ description: >-
+ The logical device name which for the attached device (which is represented by the target node in the model). e.g.,
+ '/dev/hda1'.
+ type: string
+ required: false
+ attributes:
+ device:
+ description: >-
+ The logical name of the device as exposed to the instance.
+ Note: A runtime property that gets set when the model gets instantiated by the orchestrator.
+ type: string
+
+ tosca.relationships.RoutesTo:
+ _extensions:
+ shorthand_name: RoutesTo
+ type_qualified_name: tosca:RoutesTo
+ specification: tosca-simple-1.0
+ specification_section: 5.6.6
+ specification_url: 'http://docs.oasis-open.org/tosca/TOSCA-Simple-Profile-YAML/v1.0/cos01/TOSCA-Simple-Profile-YAML-v1.0-cos01.html#_Toc397688815'
+ description: >-
+ This type represents an intentional network routing between two Endpoints in different networks.
+ derived_from: tosca.relationships.ConnectsTo
+ valid_target_types: [ tosca.capabilities.Endpoint ]
+
+ #
+ # Network
+ #
+
+ tosca.relationships.network.LinksTo:
+ _extensions:
+ shorthand_name: LinksTo
+ type_qualified_name: tosca:LinksTo
+ specification: tosca-simple-1.0
+ specification_section: 7.5.4
+ specification_url: 'http://docs.oasis-open.org/tosca/TOSCA-Simple-Profile-YAML/v1.0/cos01/TOSCA-Simple-Profile-YAML-v1.0-cos01.html#DEFN_TYPE_RELATIONSHIPS_NETWORK_LINKSTO'
+ description: >-
+ This relationship type represents an association relationship between Port and Network node types.
+ derived_from: tosca.relationships.DependsOn
+ valid_target_types: [ tosca.capabilities.network.Linkable ]
+
+ tosca.relationships.network.BindsTo:
+ _extensions:
+ shorthand_name: BindsTo # ARIA NOTE: the spec says "network.BindsTo" which seems wrong
+ type_qualified_name: tosca:BindsTo
+ specification: tosca-simple-1.0
+ specification_section: 7.5.5
+ specification_url: 'http://docs.oasis-open.org/tosca/TOSCA-Simple-Profile-YAML/v1.0/cos01/TOSCA-Simple-Profile-YAML-v1.0-cos01.html#DEFN_TYPE_RELATIONSHIPS_NETWORK_BINDTO'
+ description: >-
+ This type represents a network association relationship between Port and Compute node types.
+ derived_from: tosca.relationships.DependsOn
+ valid_target_types: [ tosca.capabilities.network.Bindable ]
diff --git a/azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/profiles/tosca-simple-1.0/tosca-simple-1.0.yaml b/azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/profiles/tosca-simple-1.0/tosca-simple-1.0.yaml
new file mode 100644
index 0000000..f8cc520
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/profiles/tosca-simple-1.0/tosca-simple-1.0.yaml
@@ -0,0 +1,24 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+imports:
+ - artifacts.yaml
+ - capabilities.yaml
+ - data.yaml
+ - groups.yaml
+ - interfaces.yaml
+ - nodes.yaml
+ - policies.yaml
+ - relationships.yaml
diff --git a/azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/profiles/tosca-simple-nfv-1.0/artifacts.yaml b/azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/profiles/tosca-simple-nfv-1.0/artifacts.yaml
new file mode 100644
index 0000000..2427d9f
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/profiles/tosca-simple-nfv-1.0/artifacts.yaml
@@ -0,0 +1,84 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+artifact_types:
+
+ tosca.artifacts.nfv.SwImage:
+ _extensions:
+ shorthand_name: SwImage
+ type_qualified_name: tosca:SwImage
+ specification: tosca-simple-nfv-1.0
+ specification_section: 5.4.1
+ specification_url: 'http://docs.oasis-open.org/tosca/tosca-nfv/v1.0/csd04/tosca-nfv-v1.0-csd04.html#_Toc482896067'
+ derived_from: tosca.artifacts.Deployment.Image
+ properties:
+ name:
+ description: >-
+ Name of this software image.
+ type: string
+ required: true
+ version:
+ description: >-
+ Version of this software image.
+ type: string
+ required: true
+ checksum:
+ description: >-
+ Checksum of the software image file.
+ type: string
+ container_format:
+ description: >-
+ The container format describes the container file format in which software image is
+ provided.
+ type: string
+ required: true
+ disk_format:
+ description: >-
+ The disk format of a software image is the format of the underlying disk image.
+ type: string
+ required: true
+ min_disk:
+ description: >-
+ The minimal disk size requirement for this software image.
+ type: scalar-unit.size
+ required: true
+ min_ram:
+ description: >-
+ The minimal disk size requirement for this software image.
+ type: scalar-unit.size
+ required: false
+ size: # ARIA NOTE: section [5.4.1.1 Properties] calls this field 'Size'
+ description: >-
+ The size of this software image
+ type: scalar-unit.size
+ required: true
+ sw_image:
+ description: >-
+ A reference to the actual software image within VNF Package, or url.
+ type: string
+ required: true
+ operating_system:
+ description: >-
+ Identifies the operating system used in the software image.
+ type: string
+ required: false
+ supported _virtualization_enviroment:
+ description: >-
+ Identifies the virtualization environments (e.g. hypervisor) compatible with this software
+ image.
+ type: list
+ entry_schema:
+ type: string
+ required: false
diff --git a/azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/profiles/tosca-simple-nfv-1.0/capabilities.yaml b/azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/profiles/tosca-simple-nfv-1.0/capabilities.yaml
new file mode 100644
index 0000000..7b6363f
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/profiles/tosca-simple-nfv-1.0/capabilities.yaml
@@ -0,0 +1,70 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+capability_types:
+
+ tosca.capabilities.nfv.VirtualBindable:
+ _extensions:
+ shorthand_name: VirtualBindable
+ type_qualified_name: tosca:VirtualBindable
+ specification: tosca-simple-nfv-1.0
+ specification_section: 5.5.1
+ specification_url: 'http://docs.oasis-open.org/tosca/tosca-nfv/v1.0/csd04/tosca-nfv-v1.0-csd04.html#_Toc482896069'
+ description: >-
+ A node type that includes the VirtualBindable capability indicates that it can be pointed by
+ tosca.relationships.nfv.VirtualBindsTo relationship type.
+ derived_from: tosca.capabilities.Node
+
+ tosca.capabilities.nfv.Metric:
+ _extensions:
+ shorthand_name: Metric
+ type_qualified_name: tosca:Metric
+ specification: tosca-simple-nfv-1.0
+ specification_section: 5.5.2
+ specification_url: 'http://docs.oasis-open.org/tosca/tosca-nfv/v1.0/csd04/tosca-nfv-v1.0-csd04.html#_Toc482896070'
+ description: >-
+ A node type that includes the Metric capability indicates that it can be monitored using an nfv.relationships.Monitor
+ relationship type.
+ derived_from: tosca.capabilities.Endpoint
+
+ tosca.capabilities.nfv.VirtualCompute:
+ _extensions:
+ shorthand_name: VirtualCompute
+ type_qualified_name: tosca:VirtualCompute
+ specification: tosca-simple-nfv-1.0
+ specification_section: 5.5.3
+ specification_url: 'http://docs.oasis-open.org/tosca/tosca-nfv/v1.0/csd04/tosca-nfv-v1.0-csd04.html#_Toc482896071'
+ derived_from: tosca.capabilities.Root
+ properties:
+ requested_additional_capabilities:
+ # ARIA NOTE: in section [5.5.3.1 Properties] the name of this property is
+ # "request_additional_capabilities", and its type is not a map, but
+ # tosca.datatypes.nfv.RequestedAdditionalCapability
+ description: >-
+ Describes additional capability for a particular VDU.
+ type: map
+ entry_schema:
+ type: tosca.datatypes.nfv.RequestedAdditionalCapability
+ required: false
+ virtual_memory:
+ description: >-
+ Describes virtual memory of the virtualized compute.
+ type: tosca.datatypes.nfv.VirtualMemory
+ required: true
+ virtual_cpu:
+ description: >-
+ Describes virtual CPU(s) of the virtualized compute.
+ type: tosca.datatypes.nfv.VirtualCpu
+ required: true
diff --git a/azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/profiles/tosca-simple-nfv-1.0/data.yaml b/azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/profiles/tosca-simple-nfv-1.0/data.yaml
new file mode 100644
index 0000000..889dcf7
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/profiles/tosca-simple-nfv-1.0/data.yaml
@@ -0,0 +1,318 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+data_types:
+
+ tosca.datatypes.nfv.L2AddressData:
+ # TBD
+ _extensions:
+ shorthand_name: L2AddressData
+ type_qualified_name: tosca:L2AddressData
+ specification: tosca-simple-nfv-1.0
+ specification_section: 5.3.1
+ specification_url: 'http://docs.oasis-open.org/tosca/tosca-nfv/v1.0/csd04/tosca-nfv-v1.0-csd04.html#_Toc482896055'
+
+ tosca.datatypes.nfv.L3AddressData:
+ _extensions:
+ shorthand_name: L3AddressData
+ type_qualified_name: tosca:L3AddressData
+ specification: tosca-simple-nfv-1.0
+ specification_section: 5.3.2
+ specification_url: 'http://docs.oasis-open.org/tosca/tosca-nfv/v1.0/csd04/tosca-nfv-v1.0-csd04.html#_Toc482896056'
+ description: >-
+ The L3AddressData type is a complex TOSCA data type used to describe L3AddressData information
+ element as defined in [ETSI GS NFV-IFA 011], it provides the information on the IP addresses
+ to be assigned to the connection point instantiated from the parent Connection Point
+ Descriptor.
+ derived_from: tosca.datatypes.Root
+ properties:
+ ip_address_assignment:
+ description: >-
+ Specify if the address assignment is the responsibility of management and orchestration
+ function or not. If it is set to True, it is the management and orchestration function
+ responsibility.
+ type: boolean
+ required: true
+ floating_ip_activated:
+ description: Specify if the floating IP scheme is activated on the Connection Point or not.
+ type: boolean
+ required: true
+ ip_address_type:
+ description: >-
+ Define address type. The address type should be aligned with the address type supported by
+ the layer_protocol properties of the parent VnfExtCpd.
+ type: string
+ required: false
+ constraints:
+ - valid_values: [ ipv4, ipv6 ]
+ number_of_ip_address:
+ description: >-
+ Minimum number of IP addresses to be assigned.
+ type: integer
+ required: false
+
+ tosca.datatypes.nfv.AddressData:
+ _extensions:
+ shorthand_name: AddressData
+ type_qualified_name: tosca:AddressData
+ specification: tosca-simple-nfv-1.0
+ specification_section: 5.3.3
+ specification_url: 'http://docs.oasis-open.org/tosca/tosca-nfv/v1.0/csd04/tosca-nfv-v1.0-csd04.html#_Toc482896057'
+ description: >-
+ The AddressData type is a complex TOSCA data type used to describe AddressData information
+ element as defined in [ETSI GS NFV-IFA 011], it provides information on the addresses to be
+ assigned to the connection point(s) instantiated from a Connection Point Descriptor.
+ derived_from: tosca.datatypes.Root
+ properties:
+ address_type:
+ description: >-
+ Describes the type of the address to be assigned to the connection point instantiated from
+ the parent Connection Point Descriptor. The content type shall be aligned with the address
+ type supported by the layerProtocol property of the parent Connection Point Descriptor.
+ type: string
+ required: true
+ constraints:
+ - valid_values: [ mac_address, ip_address ]
+ l2_address_data:
+ # Shall be present when the addressType is mac_address.
+ description: >-
+ Provides the information on the MAC addresses to be assigned to the connection point(s)
+ instantiated from the parent Connection Point Descriptor.
+ type: tosca.datatypes.nfv.L2AddressData # Empty in "GS NFV IFA011 V0.7.3"
+ required: false
+ l3_address_data:
+ # Shall be present when the addressType is ip_address.
+ description: >-
+ Provides the information on the IP addresses to be assigned to the connection point
+ instantiated from the parent Connection Point Descriptor.
+ type: tosca.datatypes.nfv.L3AddressData
+ required: false
+
+ tosca.datatypes.nfv.VirtualNetworkInterfaceRequirements:
+ _extensions:
+ shorthand_name: VirtualNetworkInterfaceRequirements
+ type_qualified_name: tosca:VirtualNetworkInterfaceRequirements
+ specification: tosca-simple-nfv-1.0
+ specification_section: 5.3.4
+ specification_url: 'http://docs.oasis-open.org/tosca/tosca-nfv/v1.0/csd04/tosca-nfv-v1.0-csd04.html#_Toc482896058'
+ description: >-
+ The VirtualNetworkInterfaceRequirements type is a complex TOSCA data type used to describe
+ VirtualNetworkInterfaceRequirements information element as defined in [ETSI GS NFV-IFA 011],
+ it provides the information to specify requirements on a virtual network interface realising the
+ CPs instantiated from this CPD.
+ derived_from: tosca.datatypes.Root
+ properties:
+ name:
+ description: >-
+ Provides a human readable name for the requirement.
+ type: string
+ required: false
+ description:
+ description: >-
+ Provides a human readable description for the requirement.
+ type: string
+ required: false
+ support_mandatory:
+ description: >-
+ Indicates whether fulfilling the constraint is mandatory (TRUE) for successful operation
+ or desirable (FALSE).
+ type: boolean
+ required: false
+ requirement:
+ description: >-
+ Specifies a requirement such as the support of SR-IOV, a particular data plane
+ acceleration library, an API to be exposed by a NIC, etc.
+ type: string # ARIA NOTE: the spec says "not specified", but TOSCA requires a type
+ required: true
+
+ tosca.datatypes.nfv.ConnectivityType:
+ _extensions:
+ shorthand_name: ConnectivityType
+ type_qualified_name: tosca:ConnectivityType
+ specification: tosca-simple-nfv-1.0
+ specification_section: 5.3.5
+ specification_url: 'http://docs.oasis-open.org/tosca/tosca-nfv/v1.0/csd04/tosca-nfv-v1.0-csd04.html#_Toc482896059'
+ description: >-
+ The TOSCA ConnectivityType type is a complex TOSCA data type used to describe ConnectivityType
+ information element as defined in [ETSI GS NFV-IFA 011].
+ derived_from: tosca.datatypes.Root
+ properties:
+ layer_protocol:
+ description: >-
+ Identifies the protocol this VL gives access to (ethernet, mpls, odu2, ipv4, ipv6,
+ pseudo_wire).
+ type: string
+ required: true
+ constraints:
+ - valid_values: [ ethernet, mpls, odu2, ipv4, ipv6, pseudo_wire ]
+ flow_pattern:
+ description: >-
+ Identifies the flow pattern of the connectivity (Line, Tree, Mesh).
+ type: string
+ required: false
+
+ tosca.datatypes.nfv.RequestedAdditionalCapability:
+ _extensions:
+ shorthand_name: RequestedAdditionalCapability
+ type_qualified_name: tosca:RequestedAdditionalCapability
+ specification: tosca-simple-nfv-1.0
+ specification_section: 5.3.6
+ specification_url: 'http://docs.oasis-open.org/tosca/tosca-nfv/v1.0/csd04/tosca-nfv-v1.0-csd04.html#_Toc482896060'
+ description: >-
+ RequestAdditionalCapability describes additional capability for a particular VDU.
+ derived_from: tosca.datatypes.Root
+ properties:
+ request_additional_capability_name:
+ description: >-
+ Identifies a requested additional capability for the VDU.
+ type: string
+ required: true
+ support_mandatory:
+ description: >-
+ Indicates whether the requested additional capability is mandatory for successful
+ operation.
+ type: string
+ required: true
+ min_requested_additional_capability_version:
+ description: >-
+ Identifies the minimum version of the requested additional capability.
+ type: string
+ required: false
+ preferred_requested_additional_capability_version:
+ description: >-
+ Identifies the preferred version of the requested additional capability.
+ type: string
+ required: false
+ target_performance_parameters:
+ description: >-
+ Identifies specific attributes, dependent on the requested additional capability type.
+ type: map
+ entry_schema:
+ type: string
+ required: true
+
+ tosca.datatypes.nfv.VirtualMemory:
+ _extensions:
+ shorthand_name: VirtualMemory
+ type_qualified_name: tosca:VirtualMemory
+ specification: tosca-simple-nfv-1.0
+ specification_section: 5.3.7
+ specification_url: 'http://docs.oasis-open.org/tosca/tosca-nfv/v1.0/csd04/tosca-nfv-v1.0-csd04.html#_Toc482896061'
+ description: >-
+ VirtualMemory describes virtual memory for a particular VDU.
+ derived_from: tosca.datatypes.Root
+ properties:
+ virtual_mem_size:
+ description: Amount of virtual memory.
+ type: scalar-unit.size
+ required: true
+ virtual_mem_oversubscription_policy:
+ description: >-
+ The memory core oversubscription policy in terms of virtual memory to physical memory on
+ the platform. The cardinality can be 0 during the allocation request, if no particular
+ value is requested.
+ type: string
+ required: false
+ numa_enabled:
+ description: >-
+ It specifies the memory allocation to be cognisant of the relevant process/core
+ allocation. The cardinality can be 0 during the allocation request, if no particular value
+ is requested.
+ type: boolean
+ required: false
+
+ tosca.datatypes.nfv.VirtualCpu:
+ _extensions:
+ shorthand_name: VirtualCpu
+ type_qualified_name: tosca:VirtualCpu
+ specification: tosca-simple-nfv-1.0
+ specification_section: 5.3.8
+ specification_url: 'http://docs.oasis-open.org/tosca/tosca-nfv/v1.0/csd04/tosca-nfv-v1.0-csd04.html#_Toc482896062'
+ description: >-
+ VirtualMemory describes virtual memory for a particular VDU.
+ derived_from: tosca.datatypes.Root
+ properties:
+ cpu_architecture:
+ description: >-
+ CPU architecture type. Examples are x86, ARM.
+ type: string
+ required: false
+ num_virtual_cpu:
+ description: >-
+ Number of virtual CPUs.
+ type: integer
+ required: true
+ virtual_cpu_clock:
+ description: >-
+ Minimum virtual CPU clock rate.
+ type: scalar-unit.frequency
+ required: false
+ virtual_cpu_oversubscription_policy:
+ description: >-
+ CPU core oversubscription policy.
+ type: string
+ required: false
+ virtual_cpu_pinning:
+ description: >-
+ The virtual CPU pinning configuration for the virtualized compute resource.
+ type: tosca.datatypes.nfv.VirtualCpuPinning
+ required: false
+
+ tosca.datatypes.nfv.VirtualCpuPinning:
+ _extensions:
+ shorthand_name: VirtualCpuPinning
+ type_qualified_name: tosca:VirtualCpuPinning
+ specification: tosca-simple-nfv-1.0
+ specification_section: 5.3.9
+ specification_url: 'http://docs.oasis-open.org/tosca/tosca-nfv/v1.0/csd04/tosca-nfv-v1.0-csd04.html#_Toc482896064'
+ description: >-
+ VirtualCpuPinning describes CPU pinning configuration for a particular CPU.
+ derived_from: tosca.datatypes.Root
+ properties:
+ cpu_pinning_policy:
+ description: >-
+ Indicates the policy for CPU pinning.
+ type: string
+ constraints:
+ - valid_values: [ static, dynamic ]
+ required: false
+ cpu_pinning_map:
+ description: >-
+ If cpuPinningPolicy is defined as "static", the cpuPinningMap provides the map of pinning
+ virtual CPU cores to physical CPU cores/threads.
+ type: map
+ entry_schema:
+ type: string
+ required: false
+
+ tosca.datatypes.nfv.VnfcConfigurableProperties:
+ _extensions:
+ shorthand_name: VnfcconfigurableProperties
+ type_qualified_name: tosca:VnfcconfigurableProperties
+ specification: tosca-simple-nfv-1.0
+ specification_section: 5.3.10
+ specification_url: 'http://docs.oasis-open.org/tosca/tosca-nfv/v1.0/csd04/tosca-nfv-v1.0-csd04.html#_Toc482896065'
+ # ARIA NOTE: description is mangled in spec
+ description: >-
+ VnfcConfigurableProperties describes additional configurable properties of a VNFC.
+ derived_from: tosca.datatypes.Root
+ properties:
+ additional_vnfc_configurable_properties:
+ description: >-
+ Describes additional configuration for VNFC.
+ type: map
+ entry_schema:
+ type: string
+ required: false
diff --git a/azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/profiles/tosca-simple-nfv-1.0/nodes.yaml b/azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/profiles/tosca-simple-nfv-1.0/nodes.yaml
new file mode 100644
index 0000000..8d1f0a2
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/profiles/tosca-simple-nfv-1.0/nodes.yaml
@@ -0,0 +1,260 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+node_types:
+
+ tosca.nodes.nfv.VDU.Compute:
+ _extensions:
+ shorthand_name: VDU.Compute
+ type_qualified_name: tosca:VDU.Compute
+ specification: tosca-simple-nfv-1.0
+ specification_section: 5.9.2
+ specification_url: 'http://docs.oasis-open.org/tosca/tosca-nfv/v1.0/csd04/tosca-nfv-v1.0-csd04.html#_Toc482896079'
+ description: >-
+ The TOSCA nfv.VDU.Compute node type represents the virtual compute part of a VDU entity which
+ it mainly describes the deployment and operational behavior of a VNF component (VNFC), as
+ defined by [ETSI NFV IFA011].
+ derived_from: tosca.nodes.Compute
+ properties:
+ name:
+ description: >-
+ Human readable name of the VDU.
+ type: string
+ required: true
+ description:
+ description: >-
+ Human readable description of the VDU.
+ type: string
+ required: true
+ boot_order:
+ description: >-
+ The key indicates the boot index (lowest index defines highest boot priority).
+ The Value references a descriptor from which a valid boot device is created e.g.
+ VirtualStorageDescriptor from which a VirtualStorage instance is created. If no boot order
+ is defined the default boot order defined in the VIM or NFVI shall be used.
+ type: list # ARIA NOTE: an explicit index (boot index) is unnecessary, contrary to IFA011
+ entry_schema:
+ type: string
+ required: false
+ nfvi_constraints:
+ description: >-
+ Describes constraints on the NFVI for the VNFC instance(s) created from this VDU.
+ For example, aspects of a secure hosting environment for the VNFC instance that involve
+ additional entities or processes. More software images can be attached to the
+ virtualization container using virtual_storage.
+ type: list
+ entry_schema:
+ type: string
+ required: false
+ configurable_properties:
+ description: >-
+ Describes the configurable properties of all VNFC instances based on this VDU.
+ type: map
+ entry_schema:
+ type: tosca.datatypes.nfv.VnfcConfigurableProperties
+ required: true
+ attributes:
+ # ARIA NOTE: The attributes are only described in section [5.9.2.5 Definition], but are not
+ # mentioned in section [5.9.2.2 Attributes]. Additionally, it does not seem to make sense to
+ # deprecate inherited attributes, as it breaks the inheritence contract.
+ private_address:
+ type: string
+ status: deprecated
+ public_address:
+ type: string
+ status: deprecated
+ networks:
+ type: map
+ entry_schema:
+ type: tosca.datatypes.network.NetworkInfo
+ status: deprecated
+ ports:
+ type: map
+ entry_schema:
+ type: tosca.datatypes.network.PortInfo
+ status: deprecated
+ capabilities:
+ virtual_compute:
+ description: >-
+ Describes virtual compute resources capabilities.
+ type: tosca.capabilities.nfv.VirtualCompute
+ virtual_binding:
+ description: >-
+ Defines ability of VirtualBindable.
+ type: tosca.capabilities.nfv.VirtualBindable
+ monitoring_parameter:
+ # ARIA NOTE: commented out in 5.9.2.5
+ description: >-
+ Monitoring parameter, which can be tracked for a VNFC based on this VDU. Examples include:
+ memory-consumption, CPU-utilisation, bandwidth-consumption, VNFC downtime, etc.
+ type: tosca.capabilities.nfv.Metric
+ #requirements:
+ # ARIA NOTE: virtual_storage is TBD
+
+ # ARIA NOTE: csd04 attempts to deprecate the inherited local_storage requirement, but this
+ # is not possible in TOSCA
+ artifacts:
+ sw_image:
+ description: >-
+ Describes the software image which is directly loaded on the virtualization container
+ realizing this virtual storage.
+ file: '' # ARIA NOTE: missing value even though it is required in TOSCA
+ type: tosca.artifacts.nfv.SwImage
+
+ tosca.nodes.nfv.VDU.VirtualStorage:
+ _extensions:
+ shorthand_name: VirtualStorage # ARIA NOTE: seems wrong in spec
+ type_qualified_name: tosca:VirtualStorage # ARIA NOTE: seems wrong in spec
+ specification: tosca-simple-nfv-1.0
+ specification_section: 5.9.3
+ specification_url: 'http://docs.oasis-open.org/tosca/tosca-nfv/v1.0/csd04/tosca-nfv-v1.0-csd04.html#_Toc482896080'
+ description: >-
+ The NFV VirtualStorage node type represents a virtual storage entity which it describes the
+ deployment and operational behavior of a virtual storage resources, as defined by
+ [ETSI NFV IFA011].
+ derived_from: tosca.nodes.Root
+ properties:
+ type_of_storage:
+ description: >-
+ Type of virtualized storage resource.
+ type: string
+ required: true
+ size_of_storage:
+ description: >-
+ Size of virtualized storage resource (in GB).
+ type: scalar-unit.size
+ required: true
+ rdma_enabled:
+ description: >-
+ Indicate if the storage support RDMA.
+ type: boolean
+ required: false
+ artifacts:
+ sw_image:
+ description: >-
+ Describes the software image which is directly loaded on the virtualization container
+ realizing this virtual storage.
+ file: '' # ARIA NOTE: missing in spec
+ type: tosca.artifacts.nfv.SwImage
+
+ tosca.nodes.nfv.Cpd:
+ _extensions:
+ shorthand_name: Cpd
+ type_qualified_name: tosca:Cpd
+ specification: tosca-simple-nfv-1.0
+ specification_section: 5.9.4
+ specification_url: 'http://docs.oasis-open.org/tosca/tosca-nfv/v1.0/csd04/tosca-nfv-v1.0-csd04.html#_Toc482896081'
+ description: >-
+ The TOSCA nfv.Cpd node represents network connectivity to a compute resource or a VL as defined
+ by [ETSI GS NFV-IFA 011]. This is an abstract type used as parent for the various Cpd types.
+ derived_from: tosca.nodes.Root
+ properties:
+ layer_protocol:
+ description: >-
+ Identifies which protocol the connection point uses for connectivity purposes.
+ type: string
+ constraints:
+ - valid_values: [ ethernet, mpls, odu2, ipv4, ipv6, pseudo_wire ]
+ required: false
+ role: # Name in ETSI NFV IFA011 v0.7.3 cpRole
+ description: >-
+ Identifies the role of the port in the context of the traffic flow patterns in the VNF or
+ parent NS. For example a VNF with a tree flow pattern within the VNF will have legal
+ cpRoles of ROOT and LEAF.
+ type: string
+ constraints:
+ - valid_values: [ root, leaf ]
+ required: false
+ description:
+ description: >-
+ Provides human-readable information on the purpose of the connection point
+ (e.g. connection point for control plane traffic).
+ type: string
+ required: false
+ address_data:
+ description: >-
+ Provides information on the addresses to be assigned to the connection point(s) instantiated
+ from this Connection Point Descriptor.
+ type: list
+ entry_schema:
+ type: tosca.datatypes.nfv.AddressData
+ required: false
+
+ tosca.nodes.nfv.VduCpd:
+ _extensions:
+ shorthand_name: VduCpd
+ type_qualified_name: tosca:VduCpd
+ specification: tosca-simple-nfv-1.0
+ specification_section: 5.9.5
+ specification_url: 'http://docs.oasis-open.org/tosca/tosca-nfv/v1.0/csd04/tosca-nfv-v1.0-csd04.html#_Toc482896082'
+ description: >-
+ The TOSCA nfv.VduCpd node type represents a type of TOSCA Cpd node and describes network
+ connectivity between a VNFC instance (based on this VDU) and an internal VL as defined by
+ [ETSI GS NFV-IFA 011].
+ derived_from: tosca.nodes.nfv.Cpd
+ properties:
+ bitrate_requirement:
+ description: >-
+ Bitrate requirement on this connection point.
+ type: integer
+ required: false
+ virtual_network_interface_requirements:
+ description: >-
+ Specifies requirements on a virtual network interface realising the CPs instantiated from
+ this CPD.
+ type: list
+ entry_schema:
+ type: VirtualNetworkInterfaceRequirements
+ required: false
+ requirements:
+ # ARIA NOTE: seems to be a leftover from csd03
+ # - virtual_link:
+ # description: Describes the requirements for linking to virtual link
+ # capability: tosca.capabilities.nfv.VirtualLinkable
+ # relationship: tosca.relationships.nfv.VirtualLinksTo
+ # node: tosca.nodes.nfv.VnfVirtualLinkDesc
+ - virtual_binding:
+ capability: tosca.capabilities.nfv.VirtualBindable
+ relationship: tosca.relationships.nfv.VirtualBindsTo
+ node: tosca.nodes.nfv.VDU.Compute # ARIA NOTE: seems wrong in spec
+
+ tosca.nodes.nfv.VnfVirtualLinkDesc:
+ _extensions:
+ shorthand_name: VnfVirtualLinkDesc
+ type_qualified_name: tosca:VnfVirtualLinkDesc
+ specification: tosca-simple-nfv-1.0
+ specification_section: 5.9.6
+ specification_url: 'http://docs.oasis-open.org/tosca/tosca-nfv/v1.0/csd04/tosca-nfv-v1.0-csd04.html#_Toc482896083'
+ description: >-
+ The TOSCA nfv.VnfVirtualLinkDesc node type represents a logical internal virtual link as
+ defined by [ETSI GS NFV-IFA 011].
+ derived_from: tosca.nodes.Root
+ properties:
+ connectivity_type:
+ description: >-
+ specifies the protocol exposed by the VL and the flow pattern supported by the VL.
+ type: tosca.datatypes.nfv.ConnectivityType
+ required: true
+ description:
+ description: >-
+ Provides human-readable information on the purpose of the VL (e.g. control plane traffic).
+ type: string
+ required: false
+ test_access:
+ description: >-
+ Test access facilities available on the VL (e.g. none, passive, monitoring, or active
+ (intrusive) loopbacks at endpoints.
+ type: string
+ required: false
diff --git a/azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/profiles/tosca-simple-nfv-1.0/relationships.yaml b/azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/profiles/tosca-simple-nfv-1.0/relationships.yaml
new file mode 100644
index 0000000..4cf99a2
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/profiles/tosca-simple-nfv-1.0/relationships.yaml
@@ -0,0 +1,43 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+relationship_types:
+
+ tosca.relationships.nfv.VirtualBindsTo:
+ _extensions:
+ shorthand_name: VirtualBindsTo
+ type_qualified_name: tosca:VirtualBindsTo
+ specification: tosca-simple-nfv-1.0
+ specification_section: 5.7.1
+ specification_url: 'http://docs.oasis-open.org/tosca/tosca-nfv/v1.0/csd04/tosca-nfv-v1.0-csd04.html#_Toc482896074'
+ description: >-
+ This relationship type represents an association relationship between VDU and CP node types.
+ derived_from: tosca.relationships.DependsOn
+ valid_target_types: [ tosca.capabilities.nfv.VirtualBindable ]
+
+ # ARIA NOTE: csd04 lacks the definition of tosca.relationships.nfv.Monitor (the derived_from and
+ # valid_target_types), so we are using the definition in csd03 section 8.4.2.
+ tosca.relationships.nfv.Monitor:
+ _extensions:
+ shorthand_name: Monitor
+ type_qualified_name: tosca:Monitor
+ specification: tosca-simple-nfv-1.0
+ specification_section: 5.7.2
+ specification_url: 'http://docs.oasis-open.org/tosca/tosca-nfv/v1.0/csd04/tosca-nfv-v1.0-csd04.html#_Toc482896075'
+ description: >-
+ This relationship type represents an association relationship to the Metric capability of VDU
+ node types.
+ derived_from: tosca.relationships.ConnectsTo
+ valid_target_types: [ tosca.capabilities.nfv.Metric ]
diff --git a/azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/profiles/tosca-simple-nfv-1.0/tosca-simple-nfv-1.0.yaml b/azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/profiles/tosca-simple-nfv-1.0/tosca-simple-nfv-1.0.yaml
new file mode 100644
index 0000000..764c739
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/profiles/tosca-simple-nfv-1.0/tosca-simple-nfv-1.0.yaml
@@ -0,0 +1,21 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+imports:
+ - artifacts.yaml
+ - capabilities.yaml
+ - data.yaml
+ - nodes.yaml
+ - relationships.yaml
diff --git a/azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/simple_nfv_v1_0/__init__.py b/azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/simple_nfv_v1_0/__init__.py
new file mode 100644
index 0000000..313e3ef
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/simple_nfv_v1_0/__init__.py
@@ -0,0 +1,19 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from .presenter import ToscaSimpleNfvPresenter1_0
+
+__all__ = (
+ 'ToscaSimpleNfvPresenter1_0',)
diff --git a/azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/simple_nfv_v1_0/presenter.py b/azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/simple_nfv_v1_0/presenter.py
new file mode 100644
index 0000000..64178aa
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/simple_nfv_v1_0/presenter.py
@@ -0,0 +1,43 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from aria.utils.collections import FrozenList
+from aria.utils.caching import cachedmethod
+
+from ..simple_v1_0 import ToscaSimplePresenter1_0
+
+
+class ToscaSimpleNfvPresenter1_0(ToscaSimplePresenter1_0): # pylint: disable=invalid-name,abstract-method
+ """
+ ARIA presenter for the `TOSCA Simple Profile for NFV v1.0 csd04 <http://docs.oasis-open.org
+ /tosca/tosca-nfv/v1.0/csd04/tosca-nfv-v1.0-csd04.html>`__.
+
+ Supported ``tosca_definitions_version`` values:
+
+ * ``tosca_simple_profile_for_nfv_1_0``
+ """
+
+ DSL_VERSIONS = ('tosca_simple_profile_for_nfv_1_0',)
+ ALLOWED_IMPORTED_DSL_VERSIONS = ('tosca_simple_yaml_1_0', 'tosca_simple_profile_for_nfv_1_0')
+ SIMPLE_PROFILE_FOR_NFV_LOCATION = 'tosca-simple-nfv-1.0/tosca-simple-nfv-1.0.yaml'
+
+ # Presenter
+
+ @cachedmethod
+ def _get_import_locations(self, context):
+ import_locations = super(ToscaSimpleNfvPresenter1_0, self)._get_import_locations(context)
+ if context.presentation.import_profile:
+ return FrozenList([self.SIMPLE_PROFILE_FOR_NFV_LOCATION] + import_locations)
+ return import_locations
diff --git a/azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/simple_v1_0/__init__.py b/azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/simple_v1_0/__init__.py
new file mode 100644
index 0000000..61995db
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/simple_v1_0/__init__.py
@@ -0,0 +1,199 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Parser implementation of `TOSCA Simple Profile v1.0 cos01 <http://docs.oasis-open.org/tosca
+/TOSCA-Simple-Profile-YAML/v1.0/cos01/TOSCA-Simple-Profile-YAML-v1.0-cos01.html>`__.
+
+.. autosummary::
+ :nosignatures:
+
+ aria_extension_tosca.simple_v1_0.ToscaSimplePresenter1_0
+
+Assignments
+-----------
+
+.. autosummary::
+ :nosignatures:
+
+ aria_extension_tosca.simple_v1_0.PropertyAssignment
+ aria_extension_tosca.simple_v1_0.OperationAssignment
+ aria_extension_tosca.simple_v1_0.InterfaceAssignment
+ aria_extension_tosca.simple_v1_0.RelationshipAssignment
+ aria_extension_tosca.simple_v1_0.RequirementAssignment
+ aria_extension_tosca.simple_v1_0.AttributeAssignment
+ aria_extension_tosca.simple_v1_0.CapabilityAssignment
+ aria_extension_tosca.simple_v1_0.ArtifactAssignment
+
+Definitions
+-----------
+
+.. autosummary::
+ :nosignatures:
+
+ aria_extension_tosca.simple_v1_0.PropertyDefinition
+ aria_extension_tosca.simple_v1_0.AttributeDefinition
+ aria_extension_tosca.simple_v1_0.ParameterDefinition
+ aria_extension_tosca.simple_v1_0.OperationDefinition
+ aria_extension_tosca.simple_v1_0.InterfaceDefinition
+ aria_extension_tosca.simple_v1_0.RelationshipDefinition
+ aria_extension_tosca.simple_v1_0.RequirementDefinition
+ aria_extension_tosca.simple_v1_0.CapabilityDefinition
+
+Filters
+-------
+
+.. autosummary::
+ :nosignatures:
+
+ aria_extension_tosca.simple_v1_0.CapabilityFilter
+ aria_extension_tosca.simple_v1_0.NodeFilter
+
+Miscellaneous
+-------------
+
+.. autosummary::
+ :nosignatures:
+
+ aria_extension_tosca.simple_v1_0.Description
+ aria_extension_tosca.simple_v1_0.MetaData
+ aria_extension_tosca.simple_v1_0.Repository
+ aria_extension_tosca.simple_v1_0.Import
+ aria_extension_tosca.simple_v1_0.ConstraintClause
+ aria_extension_tosca.simple_v1_0.EntrySchema
+ aria_extension_tosca.simple_v1_0.OperationImplementation
+ aria_extension_tosca.simple_v1_0.SubstitutionMappingsRequirement
+ aria_extension_tosca.simple_v1_0.SubstitutionMappingsCapability
+ aria_extension_tosca.simple_v1_0.SubstitutionMappings
+
+Templates
+---------
+
+.. autosummary::
+ :nosignatures:
+
+ aria_extension_tosca.simple_v1_0.NodeTemplate
+ aria_extension_tosca.simple_v1_0.RelationshipTemplate
+ aria_extension_tosca.simple_v1_0.GroupTemplate
+ aria_extension_tosca.simple_v1_0.PolicyTemplate
+ aria_extension_tosca.simple_v1_0.TopologyTemplate
+ aria_extension_tosca.simple_v1_0.ServiceTemplate
+
+Types
+-----
+
+.. autosummary::
+ :nosignatures:
+
+ aria_extension_tosca.simple_v1_0.ArtifactType
+ aria_extension_tosca.simple_v1_0.DataType
+ aria_extension_tosca.simple_v1_0.CapabilityType
+ aria_extension_tosca.simple_v1_0.InterfaceType
+ aria_extension_tosca.simple_v1_0.RelationshipType
+ aria_extension_tosca.simple_v1_0.NodeType
+ aria_extension_tosca.simple_v1_0.GroupType
+ aria_extension_tosca.simple_v1_0.PolicyType
+
+Data types
+----------
+
+.. autosummary::
+ :nosignatures:
+
+ aria_extension_tosca.simple_v1_0.Timestamp
+ aria_extension_tosca.simple_v1_0.Version
+ aria_extension_tosca.simple_v1_0.Range
+ aria_extension_tosca.simple_v1_0.List
+ aria_extension_tosca.simple_v1_0.Map
+ aria_extension_tosca.simple_v1_0.ScalarSize
+ aria_extension_tosca.simple_v1_0.ScalarTime
+ aria_extension_tosca.simple_v1_0.ScalarFrequency
+"""
+
+from .presenter import ToscaSimplePresenter1_0
+from .assignments import (PropertyAssignment, OperationAssignment, InterfaceAssignment,
+ RelationshipAssignment, RequirementAssignment, AttributeAssignment,
+ CapabilityAssignment, ArtifactAssignment)
+from .definitions import (PropertyDefinition, AttributeDefinition, ParameterDefinition,
+ OperationDefinition, InterfaceDefinition, RelationshipDefinition,
+ RequirementDefinition, CapabilityDefinition)
+from .filters import CapabilityFilter, NodeFilter
+from .misc import (Description, MetaData, Repository, Import, ConstraintClause, EntrySchema,
+ OperationImplementation, SubstitutionMappingsRequirement,
+ SubstitutionMappingsCapability, SubstitutionMappings)
+from .templates import (NodeTemplate, RelationshipTemplate, GroupTemplate, PolicyTemplate,
+ TopologyTemplate, ServiceTemplate)
+from .types import (ArtifactType, DataType, CapabilityType, InterfaceType, RelationshipType,
+ NodeType, GroupType, PolicyType)
+from .data_types import (Timestamp, Version, Range, List, Map, ScalarSize, ScalarTime,
+ ScalarFrequency)
+
+MODULES = (
+ 'modeling',
+ 'presentation')
+
+__all__ = (
+ 'MODULES',
+ 'ToscaSimplePresenter1_0',
+ 'PropertyAssignment',
+ 'OperationAssignment',
+ 'InterfaceAssignment',
+ 'RelationshipAssignment',
+ 'RequirementAssignment',
+ 'AttributeAssignment',
+ 'CapabilityAssignment',
+ 'ArtifactAssignment',
+ 'PropertyDefinition',
+ 'AttributeDefinition',
+ 'ParameterDefinition',
+ 'OperationDefinition',
+ 'InterfaceDefinition',
+ 'RelationshipDefinition',
+ 'RequirementDefinition',
+ 'CapabilityDefinition',
+ 'CapabilityFilter',
+ 'NodeFilter',
+ 'Description',
+ 'MetaData',
+ 'Repository',
+ 'Import',
+ 'ConstraintClause',
+ 'EntrySchema',
+ 'OperationImplementation',
+ 'SubstitutionMappingsRequirement',
+ 'SubstitutionMappingsCapability',
+ 'SubstitutionMappings',
+ 'NodeTemplate',
+ 'RelationshipTemplate',
+ 'GroupTemplate',
+ 'PolicyTemplate',
+ 'TopologyTemplate',
+ 'ServiceTemplate',
+ 'ArtifactType',
+ 'DataType',
+ 'CapabilityType',
+ 'InterfaceType',
+ 'RelationshipType',
+ 'NodeType',
+ 'GroupType',
+ 'PolicyType',
+ 'Timestamp',
+ 'Version',
+ 'Range',
+ 'List',
+ 'Map',
+ 'ScalarSize',
+ 'ScalarTime',
+ 'ScalarFrequency')
diff --git a/azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/simple_v1_0/assignments.py b/azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/simple_v1_0/assignments.py
new file mode 100644
index 0000000..7b48ed0
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/simple_v1_0/assignments.py
@@ -0,0 +1,453 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from aria.utils.collections import FrozenDict
+from aria.utils.caching import cachedmethod
+from aria.parser import implements_specification
+from aria.parser.presentation import (AsIsPresentation, has_fields, allow_unknown_fields,
+ short_form_field, primitive_field, object_field,
+ object_dict_field, object_dict_unknown_fields,
+ field_validator, type_validator)
+
+from .filters import NodeFilter
+from .misc import Description, OperationImplementation
+from .modeling.parameters import get_assigned_and_defined_parameter_values
+from .presentation.extensible import ExtensiblePresentation
+from .presentation.field_validators import (node_template_or_type_validator,
+ relationship_template_or_type_validator,
+ capability_definition_or_type_validator,
+ node_filter_validator)
+from .presentation.types import (convert_name_to_full_type_name, get_type_by_name)
+
+
+
+@implements_specification('3.5.9', 'tosca-simple-1.0')
+class PropertyAssignment(AsIsPresentation):
+ """
+ This section defines the grammar for assigning values to named properties within TOSCA Node and
+ Relationship templates that are defined in their corresponding named types.
+
+ See the `TOSCA Simple Profile v1.0 cos01 specification <http://docs.oasis-open.org/tosca
+ /TOSCA-Simple-Profile-YAML/v1.0/cos01/TOSCA-Simple-Profile-YAML-v1.0-cos01.html
+ #DEFN_ELEMENT_PROPERTY_VALUE_ASSIGNMENT>`__
+ """
+
+
+@short_form_field('implementation')
+@has_fields
+@implements_specification('3.5.13-2', 'tosca-simple-1.0')
+class OperationAssignment(ExtensiblePresentation):
+ """
+ An operation definition defines a named function or procedure that can be bound to an
+ implementation artifact (e.g., a script).
+
+ See the `TOSCA Simple Profile v1.0 cos01 specification <http://docs.oasis-open.org/tosca
+ /TOSCA-Simple-Profile-YAML/v1.0/cos01/TOSCA-Simple-Profile-YAML-v1.0-cos01.html
+ #DEFN_ELEMENT_OPERATION_DEF>`__
+ """
+
+ @object_field(Description)
+ def description(self):
+ """
+ The optional description string for the associated named operation.
+
+ :type: :class:`Description`
+ """
+
+ @object_field(OperationImplementation)
+ def implementation(self):
+ """
+ The optional implementation artifact name (e.g., a script file name within a TOSCA CSAR
+ file).
+
+ :type: :class:`OperationImplementation`
+ """
+
+ @object_dict_field(PropertyAssignment)
+ def inputs(self):
+ """
+ The optional list of input property assignments (i.e., parameters assignments) for operation
+ definitions that are within TOSCA Node or Relationship Template definitions. This includes
+ when operation definitions are included as part of a Requirement assignment in a Node
+ Template.
+
+ :type: {:obj:`basestring`: :class:`PropertyAssignment`}
+ """
+
+ @cachedmethod
+ def _get_extensions(self, context):
+ def update_inherited_extensions(extensions, interface_type):
+ parent = interface_type._get_parent(context)
+ if parent is not None:
+ update_inherited_extensions(extensions, parent)
+ operation_definition = interface_type.operations.get(self._name)
+ if operation_definition is not None:
+ if operation_definition._extensions:
+ extensions.update(operation_definition._extensions)
+
+ extensions = {}
+ update_inherited_extensions(extensions, self._container._get_type(context))
+ if self._container._extensions:
+ extensions.update(self._container._extensions)
+ if self._extensions:
+ extensions.update(self._extensions)
+ return extensions
+
+
+@allow_unknown_fields
+@has_fields
+@implements_specification('3.5.14-2', 'tosca-simple-1.0')
+class InterfaceAssignment(ExtensiblePresentation):
+ """
+ An interface definition defines a named interface that can be associated with a Node or
+ Relationship Type.
+
+ See the `TOSCA Simple Profile v1.0 cos01 specification <http://docs.oasis-open.org/tosca
+ /TOSCA-Simple-Profile-YAML/v1.0/cos01/TOSCA-Simple-Profile-YAML-v1.0-cos01.html
+ #DEFN_ELEMENT_INTERFACE_DEF>`__
+ """
+
+ @object_dict_field(PropertyAssignment)
+ def inputs(self):
+ """
+ The optional list of input property assignments (i.e., parameters assignments) for interface
+ definitions that are within TOSCA Node or Relationship Template definitions. This includes
+ when interface definitions are referenced as part of a Requirement assignment in a Node
+ Template.
+
+ :type: {:obj:`basestring`: :class:`PropertyAssignment`}
+ """
+
+ @object_dict_unknown_fields(OperationAssignment)
+ def operations(self):
+ """
+ :type: {:obj:`basestring`: :class:`OperationAssignment`}
+ """
+
+ @cachedmethod
+ def _get_type(self, context):
+ the_type = self._container._get_type(context)
+
+ if isinstance(the_type, tuple):
+ # In RelationshipAssignment
+ the_type = the_type[0] # This could be a RelationshipTemplate
+
+ interface_definitions = the_type._get_interfaces(context) \
+ if the_type is not None else None
+ interface_definition = interface_definitions.get(self._name) \
+ if interface_definitions is not None else None
+ return interface_definition._get_type(context) \
+ if interface_definition is not None else None
+
+ def _validate(self, context):
+ super(InterfaceAssignment, self)._validate(context)
+ if self.operations:
+ for operation in self.operations.itervalues(): # pylint: disable=no-member
+ operation._validate(context)
+
+
+@short_form_field('type')
+@has_fields
+class RelationshipAssignment(ExtensiblePresentation):
+ """
+ Relationship assignment.
+ """
+
+ @field_validator(relationship_template_or_type_validator)
+ @primitive_field(str)
+ def type(self):
+ """
+ The optional reserved keyname used to provide the name of the Relationship Type for the
+ requirement assignment's relationship keyname.
+
+ :type: :obj:`basestring`
+ """
+
+ @object_dict_field(PropertyAssignment)
+ def properties(self):
+ """
+ ARIA NOTE: This field is not mentioned in the spec, but is implied.
+
+ :type: {:obj:`basestring`: :class:`PropertyAssignment`}
+ """
+
+ @object_dict_field(InterfaceAssignment)
+ def interfaces(self):
+ """
+ The optional reserved keyname used to reference declared (named) interface definitions of
+ the corresponding Relationship Type in order to provide Property assignments for these
+ interfaces or operations of these interfaces.
+
+ :type: {:obj:`basestring`: :class:`InterfaceAssignment`}
+ """
+
+ @cachedmethod
+ def _get_type(self, context):
+ type_name = self.type
+ if type_name is not None:
+ the_type = context.presentation.get_from_dict('service_template', 'topology_template',
+ 'relationship_templates', type_name)
+ if the_type is not None:
+ return the_type, 'relationship_template'
+ the_type = get_type_by_name(context, type_name, 'relationship_types')
+ if the_type is not None:
+ return the_type, 'relationship_type'
+ return None, None
+
+
+@short_form_field('node')
+@has_fields
+@implements_specification('3.7.2', 'tosca-simple-1.0')
+class RequirementAssignment(ExtensiblePresentation):
+ """
+ A Requirement assignment allows template authors to provide either concrete names of TOSCA
+ templates or provide abstract selection criteria for providers to use to find matching TOSCA
+ templates that are used to fulfill a named requirement's declared TOSCA Node Type.
+
+ See the `TOSCA Simple Profile v1.0 cos01 specification <http://docs.oasis-open.org/tosca
+ /TOSCA-Simple-Profile-YAML/v1.0/cos01/TOSCA-Simple-Profile-YAML-v1.0-cos01.html
+ #DEFN_ELEMENT_REQUIREMENT_ASSIGNMENT>`__
+ """
+
+ # The example in 3.7.2.2.2 shows unknown fields in addition to these, but is this a mistake?
+
+ @field_validator(capability_definition_or_type_validator)
+ @primitive_field(str)
+ def capability(self):
+ """
+ The optional reserved keyname used to provide the name of either a:
+
+ * Capability definition within a target node template that can fulfill the requirement.
+ * Capability Type that the provider will use to select a type-compatible target node
+ template to fulfill the requirement at runtime.
+
+ :type: :obj:`basestring`
+ """
+
+ @field_validator(node_template_or_type_validator)
+ @primitive_field(str)
+ def node(self):
+ """
+ The optional reserved keyname used to identify the target node of a relationship.
+ Specifically, it is used to provide either a:
+
+ * Node Template name that can fulfill the target node requirement.
+ * Node Type name that the provider will use to select a type-compatible node template to
+ fulfill the requirement at runtime.
+
+ :type: :obj:`basestring`
+ """
+
+ @object_field(RelationshipAssignment)
+ def relationship(self):
+ """
+ The optional reserved keyname used to provide the name of either a:
+
+ * Relationship Template to use to relate the source node to the (capability in the) target
+ node when fulfilling the requirement.
+ * Relationship Type that the provider will use to select a type-compatible relationship
+ template to relate the source node to the target node at runtime.
+
+ :type: :class:`RelationshipAssignment`
+ """
+
+ @field_validator(node_filter_validator)
+ @object_field(NodeFilter)
+ def node_filter(self):
+ """
+ The optional filter definition that TOSCA orchestrators or providers would use to select a
+ type-compatible target node that can fulfill the associated abstract requirement at runtime.
+
+ :type: :class:`NodeFilter`
+ """
+
+ @cachedmethod
+ def _get_node(self, context):
+ node = self.node
+
+ if node is not None:
+ node_template = context.presentation.get_from_dict('service_template',
+ 'topology_template',
+ 'node_templates', node)
+ if node_template is not None:
+ return node_template, 'node_template'
+ node_type = get_type_by_name(context, node, 'node_types')
+ if node_type is not None:
+ return node_type, 'node_type'
+
+ return None, None
+
+ @cachedmethod
+ def _get_capability(self, context):
+ capability = self.capability
+
+ if capability is not None:
+ node, node_variant = self._get_node(context)
+ if node_variant == 'node_template':
+ capabilities = node._get_capabilities(context)
+ if capability in capabilities:
+ return capabilities[capability], 'capability_assignment'
+ capability_type = get_type_by_name(context, capability, 'capability_types')
+ if capability_type is not None:
+ return capability_type, 'capability_type'
+
+ return None, None
+
+
+@implements_specification('3.5.11', 'tosca-simple-1.0')
+class AttributeAssignment(AsIsPresentation):
+ """
+ This section defines the grammar for assigning values to named attributes within TOSCA Node and
+ Relationship templates which are defined in their corresponding named types.
+
+ See the `TOSCA Simple Profile v1.0 cos01 specification <http://docs.oasis-open.org/tosca
+ /TOSCA-Simple-Profile-YAML/v1.0/cos01/TOSCA-Simple-Profile-YAML-v1.0-cos01.html
+ #DEFN_ELEMENT_ATTRIBUTE_VALUE_ASSIGNMENT>`__
+ """
+
+
+@has_fields
+@implements_specification('3.7.1', 'tosca-simple-1.0')
+class CapabilityAssignment(ExtensiblePresentation):
+ """
+ A capability assignment allows node template authors to assign values to properties and
+ attributes for a named capability definition that is part of a Node Template's type definition.
+
+ See the `TOSCA Simple Profile v1.0 cos01 specification <http://docs.oasis-open.org/tosca
+ /TOSCA-Simple-Profile-YAML/v1.0/cos01/TOSCA-Simple-Profile-YAML-v1.0-cos01.html
+ #DEFN_ELEMENT_CAPABILITY_ASSIGNMENT>`__
+ """
+
+ @object_dict_field(PropertyAssignment)
+ def properties(self):
+ """
+ An optional list of property definitions for the Capability definition.
+
+ :type: {:obj:`basestring`: :class:`PropertyAssignment`}
+ """
+
+ @object_dict_field(AttributeAssignment)
+ def attributes(self):
+ """
+ An optional list of attribute definitions for the Capability definition.
+
+ :type: {:obj:`basestring`: :class:`AttributeAssignment`}
+ """
+
+ @cachedmethod
+ def _get_definition(self, context):
+ node_type = self._container._get_type(context)
+ capability_definitions = node_type._get_capabilities(context) \
+ if node_type is not None else None
+ return capability_definitions.get(self._name) \
+ if capability_definitions is not None else None
+
+ @cachedmethod
+ def _get_type(self, context):
+ capability_definition = self._get_definition(context)
+ return capability_definition._get_type(context) \
+ if capability_definition is not None else None
+
+
+@has_fields
+@implements_specification('3.5.6', 'tosca-simple-1.0')
+class ArtifactAssignmentForType(ExtensiblePresentation):
+ """
+ An artifact definition defines a named, typed file that can be associated with Node Type or Node
+ Template and used by orchestration engine to facilitate deployment and implementation of
+ interface operations.
+
+ See the `TOSCA Simple Profile v1.0 cos01 specification <http://docs.oasis-open.org/tosca
+ /TOSCA-Simple-Profile-YAML/v1.0/cos01/TOSCA-Simple-Profile-YAML-v1.0-cos01.html
+ #DEFN_ENTITY_ARTIFACT_DEF>`__
+ """
+
+ @field_validator(type_validator('artifact type', convert_name_to_full_type_name,
+ 'artifact_types'))
+ @primitive_field(str, required=True)
+ def type(self):
+ """
+ The required artifact type for the artifact definition.
+
+ :type: :obj:`basestring`
+ """
+
+ @primitive_field(str, required=True)
+ def file(self):
+ """
+ The required URI string (relative or absolute) which can be used to locate the artifact's
+ file.
+
+ :type: :obj:`basestring`
+ """
+
+ @field_validator(type_validator('repository', 'repositories'))
+ @primitive_field(str)
+ def repository(self):
+ """
+ The optional name of the repository definition which contains the location of the external
+ repository that contains the artifact. The artifact is expected to be referenceable by its
+ file URI within the repository.
+
+ :type: :obj:`basestring`
+ """
+
+ @object_field(Description)
+ def description(self):
+ """
+ The optional description for the artifact definition.
+
+ :type: :class:`Description`
+ """
+
+ @primitive_field(str)
+ def deploy_path(self):
+ """
+ The file path the associated file would be deployed into within the target node's container.
+
+ :type: :obj:`basestring`
+ """
+
+ @object_dict_field(PropertyAssignment)
+ def properties(self):
+ """
+ ARIA NOTE: This field is not mentioned in the spec, but is implied.
+
+ :type: {:obj:`basestring`: :class:`PropertyAssignment`}
+ """
+
+ @cachedmethod
+ def _get_type(self, context):
+ return get_type_by_name(context, self.type, 'artifact_types')
+
+ @cachedmethod
+ def _get_repository(self, context):
+ return context.presentation.get_from_dict('service_template', 'repositories',
+ self.repository)
+
+ @cachedmethod
+ def _get_property_values(self, context):
+ return FrozenDict(get_assigned_and_defined_parameter_values(context, self, 'property'))
+
+ @cachedmethod
+ def _validate(self, context):
+ super(ArtifactAssignmentForType, self)._validate(context)
+
+
+class ArtifactAssignment(ArtifactAssignmentForType):
+ @cachedmethod
+ def _validate(self, context):
+ super(ArtifactAssignment, self)._validate(context)
+ self._get_property_values(context)
diff --git a/azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/simple_v1_0/data_types.py b/azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/simple_v1_0/data_types.py
new file mode 100644
index 0000000..216f1e4
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/simple_v1_0/data_types.py
@@ -0,0 +1,561 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import re
+from datetime import (datetime, tzinfo, timedelta)
+try:
+ from functools import total_ordering
+except ImportError:
+ from total_ordering import total_ordering
+
+from aria.parser import implements_specification
+from aria.utils.collections import (StrictDict, OrderedDict)
+from aria.utils.formatting import safe_repr
+
+from .modeling.data_types import (coerce_to_data_type_class, report_issue_for_bad_format,
+ coerce_value)
+
+
+class Timezone(tzinfo):
+ """
+ Timezone as fixed offset in hours and minutes east of UTC.
+ """
+
+ def __init__(self, hours=0, minutes=0):
+ super(Timezone, self).__init__()
+ self._offset = timedelta(hours=hours, minutes=minutes)
+
+ def utcoffset(self, dt): # pylint: disable=unused-argument
+ return self._offset
+
+ def tzname(self, dt): # pylint: disable=unused-argument
+ return str(self._offset)
+
+ def dst(self, dt): # pylint: disable=unused-argument
+ return Timezone._ZERO
+
+ _ZERO = timedelta(0)
+
+
+UTC = Timezone()
+
+
+@total_ordering
+@implements_specification('timestamp', 'yaml-1.1')
+class Timestamp(object):
+ '''
+ TOSCA timestamps follow the YAML specification, which in turn is a variant of ISO8601.
+
+ Long forms and short forms (without time of day and assuming UTC timezone) are supported for
+ parsing. The canonical form (for rendering) matches the long form at the UTC timezone.
+
+ See the `Timestamp Language-Independent Type for YAML Version 1.1 (Working Draft 2005-01-18)
+ <http://yaml.org/type/timestamp.html>`__
+ '''
+
+ REGULAR_SHORT = r'^(?P<year>[0-9][0-9][0-9][0-9])-(?P<month>[0-9][0-9])-(?P<day>[0-9][0-9])$'
+ REGULAR_LONG = \
+ r'^(?P<year>[0-9][0-9][0-9][0-9])-(?P<month>[0-9][0-9]?)-(?P<day>[0-9][0-9]?)' + \
+ r'([Tt]|[ \t]+)' \
+ r'(?P<hour>[0-9][0-9]?):(?P<minute>[0-9][0-9]):(?P<second>[0-9][0-9])' + \
+ r'(?P<fraction>\.[0-9]*)?' + \
+ r'(([ \t]*)Z|(?P<tzhour>[-+][0-9][0-9])?(:(?P<tzminute>[0-9][0-9])?)?)?$'
+ CANONICAL = '%Y-%m-%dT%H:%M:%S'
+
+ def __init__(self, entry_schema, constraints, value, aspect): # pylint: disable=unused-argument
+ value = str(value)
+ match = re.match(Timestamp.REGULAR_SHORT, value)
+ if match is not None:
+ # Parse short form
+ year = int(match.group('year'))
+ month = int(match.group('month'))
+ day = int(match.group('day'))
+ self.value = datetime(year, month, day, tzinfo=UTC)
+ else:
+ match = re.match(Timestamp.REGULAR_LONG, value)
+ if match is not None:
+ # Parse long form
+ year = int(match.group('year'))
+ month = int(match.group('month'))
+ day = int(match.group('day'))
+ hour = match.group('hour')
+ if hour is not None:
+ hour = int(hour)
+ minute = match.group('minute')
+ if minute is not None:
+ minute = int(minute)
+ second = match.group('second')
+ if second is not None:
+ second = int(second)
+ fraction = match.group('fraction')
+ if fraction is not None:
+ fraction = int(float(fraction) * 1000000.0) # convert to microseconds
+ tzhour = match.group('tzhour')
+ if tzhour is not None:
+ tzhour = int(tzhour)
+ else:
+ tzhour = 0
+ tzminute = match.group('tzminute')
+ if tzminute is not None:
+ tzminute = int(tzminute)
+ else:
+ tzminute = 0
+ self.value = datetime(year, month, day, hour, minute, second, fraction,
+ Timezone(tzhour, tzminute))
+ else:
+ raise ValueError(
+ 'timestamp must be formatted as YAML ISO8601 variant or "YYYY-MM-DD": %s'
+ % safe_repr(value))
+
+ @property
+ def as_datetime_utc(self):
+ return self.value.astimezone(UTC)
+
+ @property
+ def as_raw(self):
+ return self.__str__()
+
+ def __str__(self):
+ the_datetime = self.as_datetime_utc
+ return '%s%sZ' \
+ % (the_datetime.strftime(Timestamp.CANONICAL), Timestamp._fraction_as_str(the_datetime))
+
+ def __repr__(self):
+ return repr(self.__str__())
+
+ def __eq__(self, timestamp):
+ if not isinstance(timestamp, Timestamp):
+ return False
+ return self.value == timestamp.value
+
+ def __lt__(self, timestamp):
+ return self.value < timestamp.value
+
+ @staticmethod
+ def _fraction_as_str(the_datetime):
+ return '{0:g}'.format(the_datetime.microsecond / 1000000.0).lstrip('0')
+
+
+@total_ordering
+@implements_specification('3.2.2', 'tosca-simple-1.0')
+class Version(object):
+ """
+ TOSCA supports the concept of "reuse" of type definitions, as well as template definitions which
+ could be version and change over time. It is important to provide a reliable, normative means to
+ represent a version string which enables the comparison and management of types and templates
+ over time. Therefore, the TOSCA TC intends to provide a normative version type (string) for this
+ purpose in future Working Drafts of this specification.
+
+ See the `TOSCA Simple Profile v1.0 cos01 specification <http://docs.oasis-open.org/tosca
+ /TOSCA-Simple-Profile-YAML/v1.0/cos01/TOSCA-Simple-Profile-YAML-v1.0-cos01.html
+ #TYPE_TOSCA_VERSION>`__
+ """
+
+ REGEX = \
+ r'^(?P<major>\d+)\.(?P<minor>\d+)(\.(?P<fix>\d+)' + \
+ r'((\.(?P<qualifier>\d+))(\-(?P<build>\d+))?)?)?$'
+
+ @staticmethod
+ def key(version):
+ """
+ Key method for fast sorting.
+ """
+ return (version.major, version.minor, version.fix, version.qualifier, version.build)
+
+ def __init__(self, entry_schema, constraints, value, aspect): # pylint: disable=unused-argument
+ str_value = str(value)
+ match = re.match(Version.REGEX, str_value)
+ if match is None:
+ raise ValueError(
+ 'version must be formatted as <major_version>.<minor_version>'
+ '[.<fix_version>[.<qualifier>[-<build_version]]]: %s'
+ % safe_repr(value))
+
+ self.value = str_value
+
+ self.major = match.group('major')
+ self.major = int(self.major)
+ self.minor = match.group('minor')
+ self.minor = int(self.minor)
+ self.fix = match.group('fix')
+ if self.fix is not None:
+ self.fix = int(self.fix)
+ self.qualifier = match.group('qualifier')
+ if self.qualifier is not None:
+ self.qualifier = int(self.qualifier)
+ self.build = match.group('build')
+ if self.build is not None:
+ self.build = int(self.build)
+
+ @property
+ def as_raw(self):
+ return self.value
+
+ def __str__(self):
+ return self.value
+
+ def __repr__(self):
+ return repr(self.__str__())
+
+ def __eq__(self, version):
+ if not isinstance(version, Version):
+ return False
+ return (self.major, self.minor, self.fix, self.qualifier, self.build) == \
+ (version.major, version.minor, version.fix, version.qualifier, version.build)
+
+ def __lt__(self, version):
+ if self.major < version.major:
+ return True
+ elif self.major == version.major:
+ if self.minor < version.minor:
+ return True
+ elif self.minor == version.minor:
+ if self.fix < version.fix:
+ return True
+ elif self.fix == version.fix:
+ if self.qualifier < version.qualifier:
+ return True
+ elif self.qualifier == version.qualifier:
+ if self.build < version.build:
+ return True
+ return False
+
+
+@implements_specification('3.2.3', 'tosca-simple-1.0')
+class Range(object):
+ """
+ The range type can be used to define numeric ranges with a lower and upper boundary. For
+ example, this allows for specifying a range of ports to be opened in a firewall.
+
+ See the `TOSCA Simple Profile v1.0 cos01 specification <http://docs.oasis-open.org/tosca
+ /TOSCA-Simple-Profile-YAML/v1.0/cos01/TOSCA-Simple-Profile-YAML-v1.0-cos01.html
+ #TYPE_TOSCA_RANGE>`__
+ """
+
+ def __init__(self, entry_schema, constraints, value, aspect): # pylint: disable=unused-argument
+ if not isinstance(value, list):
+ raise ValueError('range value is not a list: %s' % safe_repr(value))
+ if len(value) != 2:
+ raise ValueError('range value does not have exactly 2 elements: %s' % safe_repr(value))
+
+ def is_int(v):
+ return isinstance(v, int) and (not isinstance(v, bool)) # In Python bool is an int
+
+ if not is_int(value[0]):
+ raise ValueError('lower bound of range is not a valid integer: %s'
+ % safe_repr(value[0]))
+
+ if value[1] != 'UNBOUNDED':
+ if not is_int(value[1]):
+ raise ValueError('upper bound of range is not a valid integer or "UNBOUNDED": %s'
+ % safe_repr(value[0]))
+
+ if value[0] >= value[1]:
+ raise ValueError(
+ 'upper bound of range is not greater than the lower bound: %s >= %s'
+ % (safe_repr(value[0]), safe_repr(value[1])))
+
+ self.value = value
+
+ def is_in(self, value):
+ if value < self.value[0]:
+ return False
+ if (self.value[1] != 'UNBOUNDED') and (value > self.value[1]):
+ return False
+ return True
+
+ @property
+ def as_raw(self):
+ return list(self.value)
+
+
+@implements_specification('3.2.4', 'tosca-simple-1.0')
+class List(list):
+ """
+ The list type allows for specifying multiple values for a parameter of property. For example, if
+ an application allows for being configured to listen on multiple ports, a list of ports could be
+ configured using the list data type.
+
+ See the `TOSCA Simple Profile v1.0 cos01 specification <http://docs.oasis-open.org/tosca
+ /TOSCA-Simple-Profile-YAML/v1.0/cos01/TOSCA-Simple-Profile-YAML-v1.0-cos01.html
+ #TYPE_TOSCA_LIST>`__
+ """
+
+ @staticmethod
+ def _create(context, presentation, entry_schema, constraints, value, aspect): # pylint: disable=unused-argument
+ if not isinstance(value, list):
+ raise ValueError('"list" data type value is not a list: %s' % safe_repr(value))
+
+ entry_schema_type = entry_schema._get_type(context)
+ entry_schema_constraints = entry_schema.constraints
+
+ the_list = List()
+ for v in value:
+ v = coerce_value(context, presentation, entry_schema_type, None,
+ entry_schema_constraints, v, aspect)
+ if v is not None:
+ the_list.append(v)
+
+ return the_list
+
+ # Can't define as property because it's old-style Python class
+ def as_raw(self):
+ return list(self)
+
+
+@implements_specification('3.2.5', 'tosca-simple-1.0')
+class Map(StrictDict):
+ """
+ The map type allows for specifying multiple values for a parameter of property as a map. In
+ contrast to the list type, where each entry can only be addressed by its index in the list,
+ entries in a map are named elements that can be addressed by their keys.
+
+ See the `TOSCA Simple Profile v1.0 cos01 specification <http://docs.oasis-open.org/tosca
+ /TOSCA-Simple-Profile-YAML/v1.0/cos01/TOSCA-Simple-Profile-YAML-v1.0-cos01.html
+ #TYPE_TOSCA_MAP>`__
+ """
+
+ @staticmethod
+ def _create(context, presentation, entry_schema, constraints, value, aspect): # pylint: disable=unused-argument
+ if not isinstance(value, dict):
+ raise ValueError('"map" data type value is not a dict: %s' % safe_repr(value))
+
+ if entry_schema is None:
+ raise ValueError('"map" data type does not define "entry_schema"')
+
+ entry_schema_type = entry_schema._get_type(context)
+ entry_schema_constraints = entry_schema.constraints
+
+ the_map = Map()
+ for k, v in value.iteritems():
+ v = coerce_value(context, presentation, entry_schema_type, None,
+ entry_schema_constraints, v, aspect)
+ if v is not None:
+ the_map[k] = v
+
+ return the_map
+
+ def __init__(self, items=None):
+ super(Map, self).__init__(items, key_class=str)
+
+ # Can't define as property because it's old-style Python class
+ def as_raw(self):
+ return OrderedDict(self)
+
+
+@total_ordering
+@implements_specification('3.2.6', 'tosca-simple-1.0')
+class Scalar(object):
+ """
+ The scalar-unit type can be used to define scalar values along with a unit from the list of
+ recognized units.
+
+ See the `TOSCA Simple Profile v1.0 cos01 specification <http://docs.oasis-open.org/tosca
+ /TOSCA-Simple-Profile-YAML/v1.0/cos01/TOSCA-Simple-Profile-YAML-v1.0-cos01.html
+ #TYPE_TOSCA_SCALAR_UNIT>`__
+ """
+
+ @staticmethod
+ def key(scalar):
+ """
+ Key method for fast sorting.
+ """
+ return scalar.value
+
+ def __init__(self, entry_schema, constraints, value, aspect): # pylint: disable=unused-argument
+ str_value = str(value)
+ match = re.match(self.REGEX, str_value) # pylint: disable=no-member
+ if match is None:
+ raise ValueError('scalar must be formatted as <scalar> <unit>: %s' % safe_repr(value))
+
+ self.factor = float(match.group('scalar'))
+ if self.factor < 0:
+ raise ValueError('scalar is negative: %s' % safe_repr(self.factor))
+
+ self.unit = match.group('unit')
+
+ unit_lower = self.unit.lower()
+ unit_size = None
+ for k, v in self.UNITS.iteritems(): # pylint: disable=no-member
+ if k.lower() == unit_lower:
+ self.unit = k
+ unit_size = v
+ break
+ if unit_size is None:
+ raise ValueError('scalar specified with unsupported unit: %s' % safe_repr(self.unit))
+
+ self.value = self.TYPE(self.factor * unit_size) # pylint: disable=no-member
+
+ @property
+ def as_raw(self):
+ return OrderedDict((
+ ('value', self.value),
+ ('factor', self.factor),
+ ('unit', self.unit),
+ ('unit_size', self.UNITS[self.unit]))) # pylint: disable=no-member
+
+ def __str__(self):
+ return '%s %s' % (self.value, self.UNIT) # pylint: disable=no-member
+
+ def __repr__(self):
+ return repr(self.__str__())
+
+ def __eq__(self, scalar):
+ if isinstance(scalar, Scalar):
+ value = scalar.value
+ else:
+ value = self.TYPE(scalar) # pylint: disable=no-member
+ return self.value == value
+
+ def __lt__(self, scalar):
+ if isinstance(scalar, Scalar):
+ value = scalar.value
+ else:
+ value = self.TYPE(scalar) # pylint: disable=no-member
+ return self.value < value
+
+
+@implements_specification('3.2.6.4', 'tosca-simple-1.0')
+class ScalarSize(Scalar):
+ """
+ Integer scalar for counting bytes.
+
+ See the `TOSCA Simple Profile v1.0 cos01 specification <http://docs.oasis-open.org/tosca
+ /TOSCA-Simple-Profile-YAML/v1.0/cos01/TOSCA-Simple-Profile-YAML-v1.0-cos01.html
+ #TYPE_TOSCA_SCALAR_UNIT_SIZE>`__
+ """
+
+ # See: http://www.regular-expressions.info/floatingpoint.html
+ REGEX = \
+ r'^(?P<scalar>[0-9]*\.?[0-9]+([eE][-+]?[0-9]+)?)\s*(?P<unit>B|kB|KiB|MB|MiB|GB|GiB|TB|TiB)$'
+
+ UNITS = {
+ 'B': 1,
+ 'kB': 1000,
+ 'KiB': 1024,
+ 'MB': 1000000,
+ 'MiB': 1048576,
+ 'GB': 1000000000,
+ 'GiB': 1073741824,
+ 'TB': 1000000000000,
+ 'TiB': 1099511627776}
+
+ TYPE = int
+ UNIT = 'bytes'
+
+
+@implements_specification('3.2.6.5', 'tosca-simple-1.0')
+class ScalarTime(Scalar):
+ """
+ Floating point scalar for counting seconds.
+
+ See the `TOSCA Simple Profile v1.0 cos01 specification <http://docs.oasis-open.org/tosca
+ /TOSCA-Simple-Profile-YAML/v1.0/cos01/TOSCA-Simple-Profile-YAML-v1.0-cos01.html
+ #TYPE_TOSCA_SCALAR_UNIT_TIME>`__
+ """
+
+ # See: http://www.regular-expressions.info/floatingpoint.html
+ REGEX = r'^(?P<scalar>[0-9]*\.?[0-9]+([eE][-+]?[0-9]+)?)\s*(?P<unit>ns|us|ms|s|m|h|d)$'
+
+ UNITS = {
+ 'ns': 0.000000001,
+ 'us': 0.000001,
+ 'ms': 0.001,
+ 's': 1.0,
+ 'm': 60.0,
+ 'h': 3600.0,
+ 'd': 86400.0}
+
+ TYPE = float
+ UNIT = 'seconds'
+
+
+@implements_specification('3.2.6.6', 'tosca-simple-1.0')
+class ScalarFrequency(Scalar):
+ """
+ Floating point scalar for counting cycles per second (Hz).
+
+ See the `TOSCA Simple Profile v1.0 cos01 specification <http://docs.oasis-open.org/tosca
+ /TOSCA-Simple-Profile-YAML/v1.0/cos01/TOSCA-Simple-Profile-YAML-v1.0-cos01.html
+ #TYPE_TOSCA_SCALAR_UNIT_FREQUENCY>`__
+ """
+
+ # See: http://www.regular-expressions.info/floatingpoint.html
+ REGEX = r'^(?P<scalar>[0-9]*\.?[0-9]+([eE][-+]?[0-9]+)?)\s*(?P<unit>Hz|kHz|MHz|GHz)$'
+
+ UNITS = {
+ 'Hz': 1.0,
+ 'kHz': 1000.0,
+ 'MHz': 1000000.0,
+ 'GHz': 1000000000.0}
+
+ TYPE = float
+ UNIT = 'Hz'
+
+
+#
+# The following are hooked in the YAML as 'coerce_value' extensions
+#
+
+def coerce_timestamp(context, presentation, the_type, entry_schema, constraints, value, aspect): # pylint: disable=unused-argument
+ return coerce_to_data_type_class(context, presentation, Timestamp, entry_schema, constraints,
+ value, aspect)
+
+
+def coerce_version(context, presentation, the_type, entry_schema, constraints, value, aspect): # pylint: disable=unused-argument
+ return coerce_to_data_type_class(context, presentation, Version, entry_schema, constraints,
+ value, aspect)
+
+
+def coerce_range(context, presentation, the_type, entry_schema, constraints, value, aspect):
+ if aspect == 'in_range':
+ # When we're in a "in_range" constraint, the values are *not* themselves ranges, but numbers
+ try:
+ return float(value)
+ except ValueError as e:
+ report_issue_for_bad_format(context, presentation, the_type, value, aspect, e)
+ except TypeError as e:
+ report_issue_for_bad_format(context, presentation, the_type, value, aspect, e)
+ else:
+ return coerce_to_data_type_class(context, presentation, Range, entry_schema, constraints,
+ value, aspect)
+
+
+def coerce_list(context, presentation, the_type, entry_schema, constraints, value, aspect): # pylint: disable=unused-argument
+ return coerce_to_data_type_class(context, presentation, List, entry_schema, constraints,
+ value, aspect)
+
+
+def coerce_map_value(context, presentation, the_type, entry_schema, constraints, value, aspect): # pylint: disable=unused-argument
+ return coerce_to_data_type_class(context, presentation, Map, entry_schema, constraints, value,
+ aspect)
+
+
+def coerce_scalar_unit_size(context, presentation, the_type, entry_schema, constraints, value, # pylint: disable=unused-argument
+ aspect):
+ return coerce_to_data_type_class(context, presentation, ScalarSize, entry_schema, constraints,
+ value, aspect)
+
+
+def coerce_scalar_unit_time(context, presentation, the_type, entry_schema, constraints, value, # pylint: disable=unused-argument
+ aspect):
+ return coerce_to_data_type_class(context, presentation, ScalarTime, entry_schema, constraints,
+ value, aspect)
+
+
+def coerce_scalar_unit_frequency(context, presentation, the_type, entry_schema, constraints, value, # pylint: disable=unused-argument
+ aspect):
+ return coerce_to_data_type_class(context, presentation, ScalarFrequency, entry_schema,
+ constraints, value, aspect)
diff --git a/azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/simple_v1_0/definitions.py b/azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/simple_v1_0/definitions.py
new file mode 100644
index 0000000..9158776
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/simple_v1_0/definitions.py
@@ -0,0 +1,518 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from aria.utils.collections import FrozenDict
+from aria.utils.caching import cachedmethod
+from aria.parser import implements_specification
+from aria.parser.presentation import (has_fields, short_form_field, allow_unknown_fields,
+ primitive_field, primitive_list_field, object_field,
+ object_list_field, object_dict_field,
+ object_dict_unknown_fields, field_validator,
+ field_getter, type_validator, list_type_validator)
+
+from .data_types import Range
+from .misc import (Description, ConstraintClause, OperationImplementation, EntrySchema)
+from .presentation.extensible import ExtensiblePresentation
+from .presentation.field_getters import data_type_class_getter
+from .presentation.field_validators import (data_type_validator, data_value_validator,
+ entry_schema_validator)
+from .presentation.types import (convert_name_to_full_type_name, get_type_by_name)
+from .modeling.data_types import get_data_type, get_property_constraints
+from .modeling.interfaces import (get_and_override_input_definitions_from_type,
+ get_and_override_operation_definitions_from_type)
+
+
+@has_fields
+@implements_specification('3.5.8', 'tosca-simple-1.0')
+class PropertyDefinition(ExtensiblePresentation):
+ """
+ A property definition defines a named, typed value and related data that can be associated with
+ an entity defined in this specification (e.g., Node Types, Relationship Types, Capability Types,
+ etc.). Properties are used by template authors to provide input values to TOSCA entities which
+ indicate their "desired state" when they are instantiated. The value of a property can be
+ retrieved using the ``get_property`` function within TOSCA Service Templates.
+
+ See the `TOSCA Simple Profile v1.0 cos01 specification <http://docs.oasis-open.org/tosca
+ /TOSCA-Simple-Profile-YAML/v1.0/cos01/TOSCA-Simple-Profile-YAML-v1.0-cos01.html
+ #DEFN_ELEMENT_PROPERTY_DEFN>`__
+ """
+
+ @field_validator(data_type_validator())
+ @primitive_field(str, required=True)
+ def type(self):
+ """
+ The required data type for the property.
+
+ :type: :obj:`basestring`
+ """
+
+ @object_field(Description)
+ def description(self):
+ """
+ The optional description for the property.
+
+ :type: :class:`Description`
+ """
+
+ @primitive_field(bool, default=True)
+ def required(self):
+ """
+ An optional key that declares a property as required (true) or not (false).
+
+ :type: bool
+ """
+
+ @field_validator(data_value_validator)
+ @primitive_field()
+ def default(self):
+ """
+ An optional key that may provide a value to be used as a default if not provided by another
+ means.
+
+ :type: :obj:`basestring`
+ """
+
+ @primitive_field(str, default='supported', allowed=('supported', 'unsupported', 'experimental',
+ 'deprecated'))
+ @implements_specification(section='3.5.8.3', spec='tosca-simple-1.0')
+ def status(self):
+ """
+ The optional status of the property relative to the specification or implementation.
+
+ :type: :obj:`basestring`
+ """
+
+ @object_list_field(ConstraintClause)
+ def constraints(self):
+ """
+ The optional list of sequenced constraint clauses for the property.
+
+ :type: list of (str, :class:`ConstraintClause`)
+ """
+
+ @field_validator(entry_schema_validator)
+ @object_field(EntrySchema)
+ def entry_schema(self):
+ """
+ The optional key that is used to declare the name of the Datatype definition for entries of
+ set types such as the TOSCA list or map.
+
+ :type: :obj:`basestring`
+ """
+
+ @cachedmethod
+ def _get_type(self, context):
+ return get_data_type(context, self, 'type')
+
+ @cachedmethod
+ def _get_constraints(self, context):
+ return get_property_constraints(context, self)
+
+
+@has_fields
+@implements_specification('3.5.10', 'tosca-simple-1.0')
+class AttributeDefinition(ExtensiblePresentation):
+ """
+ An attribute definition defines a named, typed value that can be associated with an entity
+ defined in this specification (e.g., a Node, Relationship or Capability Type). Specifically, it
+ is used to expose the "actual state" of some property of a TOSCA entity after it has been
+ deployed and instantiated (as set by the TOSCA orchestrator). Attribute values can be retrieved
+ via the ``get_attribute`` function from the instance model and used as values to other
+ entities within TOSCA Service Templates.
+
+ See the `TOSCA Simple Profile v1.0 cos01 specification <http://docs.oasis-open.org/tosca
+ /TOSCA-Simple-Profile-YAML/v1.0/cos01/TOSCA-Simple-Profile-YAML-v1.0-cos01.html
+ #DEFN_ELEMENT_ATTRIBUTE_DEFN>`__
+ """
+
+ @field_validator(data_type_validator())
+ @primitive_field(str, required=True)
+ def type(self):
+ """
+ The required data type for the attribute.
+
+ :type: :obj:`basestring`
+ """
+
+ @object_field(Description)
+ def description(self):
+ """
+ The optional description for the attribute.
+
+ :type: :class:`Description`
+ """
+
+ @field_validator(data_value_validator)
+ @primitive_field()
+ def default(self):
+ """
+ An optional key that may provide a value to be used as a default if not provided by another
+ means.
+
+ This value SHALL be type compatible with the type declared by the property definition's type
+ keyname.
+
+ :type: :obj:`basestring`
+ """
+
+ @primitive_field(str, default='supported', allowed=('supported', 'unsupported', 'experimental',
+ 'deprecated'))
+ def status(self):
+ """
+ The optional status of the attribute relative to the specification or implementation.
+
+ :type: :obj:`basestring`
+ """
+
+ @field_validator(entry_schema_validator)
+ @object_field(EntrySchema)
+ def entry_schema(self):
+ """
+ The optional key that is used to declare the name of the Datatype definition for entries of
+ set types such as the TOSCA list or map.
+
+ :type: :obj:`basestring`
+ """
+
+ @cachedmethod
+ def _get_type(self, context):
+ return get_data_type(context, self, 'type')
+
+
+@has_fields
+@implements_specification('3.5.12', 'tosca-simple-1.0')
+class ParameterDefinition(PropertyDefinition):
+ """
+ A parameter definition is essentially a TOSCA property definition; however, it also allows a
+ value to be assigned to it (as for a TOSCA property assignment). In addition, in the case of
+ output parameters, it can optionally inherit the data type of the value assigned to it rather
+ than have an explicit data type defined for it.
+
+ See the `TOSCA Simple Profile v1.0 cos01 specification <http://docs.oasis-open.org/tosca
+ /TOSCA-Simple-Profile-YAML/v1.0/cos01/TOSCA-Simple-Profile-YAML-v1.0-cos01.html
+ #DEFN_ELEMENT_PARAMETER_DEF>`__
+ """
+
+ @field_validator(data_type_validator())
+ @primitive_field(str)
+ def type(self):
+ """
+ The required data type for the parameter.
+
+ Note: This keyname is required for a TOSCA Property definition, but is not for a TOSCA
+ Parameter definition.
+
+ :type: :obj:`basestring`
+ """
+
+ @field_validator(data_value_validator)
+ @primitive_field()
+ def value(self):
+ """
+ The type-compatible value to assign to the named parameter. Parameter values may be provided
+ as the result from the evaluation of an expression or a function.
+ """
+
+
+@short_form_field('implementation')
+@has_fields
+@implements_specification('3.5.13-1', 'tosca-simple-1.0')
+class OperationDefinition(ExtensiblePresentation):
+ """
+ An operation definition defines a named function or procedure that can be bound to an
+ implementation artifact (e.g., a script).
+
+ See the `TOSCA Simple Profile v1.0 cos01 specification <http://docs.oasis-open.org/tosca
+ /TOSCA-Simple-Profile-YAML/v1.0/cos01/TOSCA-Simple-Profile-YAML-v1.0-cos01.html
+ #DEFN_ELEMENT_OPERATION_DEF>`__
+ """
+
+ @object_field(Description)
+ def description(self):
+ """
+ The optional description string for the associated named operation.
+
+ :type: :class:`Description`
+ """
+
+ @object_field(OperationImplementation)
+ def implementation(self):
+ """
+ The optional implementation artifact name (e.g., a script file name within a TOSCA CSAR
+ file).
+
+ :type: :class:`OperationImplementation`
+ """
+
+ @object_dict_field(PropertyDefinition)
+ def inputs(self):
+ """
+ The optional list of input property definitions available to all defined operations for
+ interface definitions that are within TOSCA Node or Relationship Type definitions. This
+ includes when interface definitions are included as part of a Requirement definition in a
+ Node Type.
+
+ :type: {:obj:`basestring`: :class:`PropertyDefinition`}
+ """
+
+
+@allow_unknown_fields
+@has_fields
+@implements_specification('3.5.14-1', 'tosca-simple-1.0')
+class InterfaceDefinition(ExtensiblePresentation):
+ """
+ An interface definition defines a named interface that can be associated with a Node or
+ Relationship Type.
+
+ See the `TOSCA Simple Profile v1.0 cos01 specification <http://docs.oasis-open.org/tosca
+ /TOSCA-Simple-Profile-YAML/v1.0/cos01/TOSCA-Simple-Profile-YAML-v1.0-cos01.html
+ #DEFN_ELEMENT_INTERFACE_DEF>`__
+ """
+
+ @field_validator(type_validator('interface type', convert_name_to_full_type_name,
+ 'interface_types'))
+ @primitive_field(str)
+ def type(self):
+ """
+ ARIA NOTE: This field is not mentioned in the spec, but is implied.
+
+ :type: :obj:`basestring`
+ """
+
+ @object_dict_field(PropertyDefinition)
+ def inputs(self):
+ """
+ The optional list of input property definitions available to all defined operations for
+ interface definitions that are within TOSCA Node or Relationship Type definitions. This
+ includes when interface definitions are included as part of a Requirement definition in a
+ Node Type.
+
+ :type: {:obj:`basestring`: :class:`PropertyDefinition`}
+ """
+
+ @object_dict_unknown_fields(OperationDefinition)
+ def operations(self):
+ """
+ :type: {:obj:`basestring`: :class:`OperationDefinition`}
+ """
+
+ @cachedmethod
+ def _get_type(self, context):
+ return get_type_by_name(context, self.type, 'interface_types')
+
+ @cachedmethod
+ def _get_inputs(self, context):
+ return FrozenDict(get_and_override_input_definitions_from_type(context, self))
+
+ @cachedmethod
+ def _get_operations(self, context):
+ return FrozenDict(get_and_override_operation_definitions_from_type(context, self))
+
+ def _validate(self, context):
+ super(InterfaceDefinition, self)._validate(context)
+ if self.operations:
+ for operation in self.operations.itervalues(): # pylint: disable=no-member
+ operation._validate(context)
+
+
+@short_form_field('type')
+@has_fields
+class RelationshipDefinition(ExtensiblePresentation):
+ """
+ Relationship definition.
+ """
+
+ @field_validator(type_validator('relationship type', convert_name_to_full_type_name,
+ 'relationship_types'))
+ @primitive_field(str, required=True)
+ def type(self):
+ """
+ The optional reserved keyname used to provide the name of the Relationship Type for the
+ requirement definition's relationship keyname.
+
+ :type: :obj:`basestring`
+ """
+
+ @object_dict_field(InterfaceDefinition)
+ def interfaces(self):
+ """
+ The optional reserved keyname used to reference declared (named) interface definitions of
+ the corresponding Relationship Type in order to declare additional Property definitions for
+ these interfaces or operations of these interfaces.
+
+ :type: list of :class:`InterfaceDefinition`
+ """
+
+ @cachedmethod
+ def _get_type(self, context):
+ return get_type_by_name(context, self.type, 'relationship_types')
+
+
+
+@short_form_field('capability')
+@has_fields
+@implements_specification('3.6.2', 'tosca-simple-1.0')
+class RequirementDefinition(ExtensiblePresentation):
+ """
+ The Requirement definition describes a named requirement (dependencies) of a TOSCA Node Type or
+ Node template which needs to be fulfilled by a matching Capability definition declared by
+ another TOSCA modelable entity. The requirement definition may itself include the specific name
+ of the fulfilling entity (explicitly) or provide an abstract type, along with additional
+ filtering characteristics, that a TOSCA orchestrator can use to fulfill the capability at
+ runtime (implicitly).
+
+ See the `TOSCA Simple Profile v1.0 cos01 specification <http://docs.oasis-open.org/tosca
+ /TOSCA-Simple-Profile-YAML/v1.0/cos01/TOSCA-Simple-Profile-YAML-v1.0-cos01.html
+ #DEFN_ELEMENT_REQUIREMENT_DEF>`__
+ """
+
+ @field_validator(type_validator('capability type', convert_name_to_full_type_name,
+ 'capability_types'))
+ @primitive_field(str, required=True)
+ def capability(self):
+ """
+ The required reserved keyname used that can be used to provide the name of a valid
+ Capability Type that can fulfill the requirement.
+
+ :type: :obj:`basestring`
+ """
+
+ @field_validator(type_validator('node type', convert_name_to_full_type_name,
+ 'node_types'))
+ @primitive_field(str)
+ def node(self):
+ """
+ The optional reserved keyname used to provide the name of a valid Node Type that contains
+ the capability definition that can be used to fulfill the requirement.
+
+ :type: :obj:`basestring`
+ """
+
+ @object_field(RelationshipDefinition)
+ def relationship(self):
+ """
+ The optional reserved keyname used to provide the name of a valid Relationship Type to
+ construct when fulfilling the requirement.
+
+ :type: :class:`RelationshipDefinition`
+ """
+
+ @field_getter(data_type_class_getter(Range))
+ @primitive_field()
+ def occurrences(self):
+ """
+ The optional minimum and maximum occurrences for the requirement.
+
+ Note: the keyword UNBOUNDED is also supported to represent any positive integer.
+
+ :type: :class:`Range`
+ """
+
+ @cachedmethod
+ def _get_capability_type(self, context):
+ return get_type_by_name(context, self.capability, 'capability_types')
+
+ @cachedmethod
+ def _get_node_type(self, context):
+ return context.presentation.get_from_dict('service_template', 'node_types', self.node)
+
+
+@short_form_field('type')
+@has_fields
+@implements_specification('3.6.1', 'tosca-simple-1.0')
+class CapabilityDefinition(ExtensiblePresentation):
+ """
+ A capability definition defines a named, typed set of data that can be associated with Node Type
+ or Node Template to describe a transparent capability or feature of the software component the
+ node describes.
+
+ See the `TOSCA Simple Profile v1.0 cos01 specification <http://docs.oasis-open.org/tosca
+ /TOSCA-Simple-Profile-YAML/v1.0/cos01/TOSCA-Simple-Profile-YAML-v1.0-cos01.html
+ #DEFN_ELEMENT_CAPABILITY_DEFN>`__
+ """
+
+ @field_validator(type_validator('capability type', convert_name_to_full_type_name,
+ 'capability_types'))
+ @primitive_field(str, required=True)
+ def type(self):
+ """
+ The required name of the Capability Type the capability definition is based upon.
+
+ :type: :obj:`basestring`
+ """
+
+ @object_field(Description)
+ def description(self):
+ """
+ The optional description of the Capability definition.
+
+ :type: :class:`Description`
+ """
+
+ @object_dict_field(PropertyDefinition)
+ def properties(self):
+ """
+ An optional list of property definitions for the Capability definition.
+
+ :type: {:obj:`basestring`: :class:`PropertyDefinition`}
+ """
+
+ @object_dict_field(AttributeDefinition)
+ def attributes(self):
+ """
+ An optional list of attribute definitions for the Capability definition.
+
+ :type: {:obj:`basestring`: :class:`AttributeDefinition`}
+ """
+
+ @field_validator(list_type_validator('node type', convert_name_to_full_type_name,
+ 'node_types'))
+ @primitive_list_field(str)
+ def valid_source_types(self):
+ """
+ An optional list of one or more valid names of Node Types that are supported as valid
+ sources of any relationship established to the declared Capability Type.
+
+ :type: [:obj:`basestring`]
+ """
+
+ @field_getter(data_type_class_getter(Range))
+ @primitive_field()
+ def occurrences(self):
+ """
+ The optional minimum and maximum occurrences for the capability. By default, an exported
+ Capability should allow at least one relationship to be formed with it with a maximum of
+ ``UNBOUNDED`` relationships.
+
+ Note: the keyword ``UNBOUNDED`` is also supported to represent any positive integer.
+
+ ARIA NOTE: The spec seems wrong here: the implied default should be ``[0,UNBOUNDED]``, not
+ ``[1,UNBOUNDED]``, otherwise it would imply that at 1 least one relationship *must* be
+ formed.
+
+ :type: :class:`Range`
+ """
+
+ @cachedmethod
+ def _get_type(self, context):
+ return get_type_by_name(context, self.type, 'capability_types')
+
+ @cachedmethod
+ def _get_parent(self, context):
+ container_parent = self._container._get_parent(context)
+ container_parent_capabilities = container_parent._get_capabilities(context) \
+ if container_parent is not None else None
+ return container_parent_capabilities.get(self._name) \
+ if container_parent_capabilities is not None else None
diff --git a/azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/simple_v1_0/filters.py b/azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/simple_v1_0/filters.py
new file mode 100644
index 0000000..95d84b2
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/simple_v1_0/filters.py
@@ -0,0 +1,107 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from aria.utils.caching import cachedmethod
+from aria.parser import implements_specification
+from aria.parser.presentation import (has_fields, object_sequenced_list_field, field_validator)
+
+from .misc import ConstraintClause
+from .presentation.extensible import ExtensiblePresentation
+from .presentation.field_validators import (node_filter_properties_validator,
+ node_filter_capabilities_validator)
+
+
+@has_fields
+class CapabilityFilter(ExtensiblePresentation):
+ """
+ Capability filter.
+ """
+
+ @object_sequenced_list_field(ConstraintClause)
+ def properties(self):
+ pass
+
+ @cachedmethod
+ def _get_node_type(self, context):
+ return self._container._get_node_type(context)
+
+ @cachedmethod
+ def _get_type_for_name(self, context, name):
+ node_type = self._get_node_type(context)
+ if node_type is not None:
+ capabilities = node_type._get_capabilities(context)
+ capability = capabilities.get(self._name)
+ properties = capability.properties if capability is not None else None
+ prop = properties.get(name) if properties is not None else None
+ return prop._get_type(context) if prop is not None else None
+
+ return None
+
+
+@has_fields
+@implements_specification('3.5.4', 'tosca-simple-1.0')
+class NodeFilter(ExtensiblePresentation):
+ """
+ A node filter definition defines criteria for selection of a TOSCA Node Template based upon the
+ template's property values, capabilities and capability properties.
+
+ See the `TOSCA Simple Profile v1.0 cos01 specification <http://docs.oasis-open.org/tosca
+ /TOSCA-Simple-Profile-YAML/v1.0/cos01/TOSCA-Simple-Profile-YAML-v1.0-cos01.html
+ #DEFN_ELEMENT_NODE_FILTER_DEFN>`__
+ """
+
+ @field_validator(node_filter_properties_validator)
+ @object_sequenced_list_field(ConstraintClause)
+ @implements_specification('3.5.3', 'tosca-simple-1.0')
+ def properties(self):
+ """
+ An optional sequenced list of property filters that would be used to select (filter)
+ matching TOSCA entities (e.g., Node Template, Node Type, Capability Types, etc.) based upon
+ their property definitions' values.
+
+ See the `TOSCA Simple Profile v1.0 cos01 specification <http://docs.oasis-open.org/tosca
+ /TOSCA-Simple-Profile-YAML/v1.0/cos01/TOSCA-Simple-Profile-YAML-v1.0-cos01.html
+ #DEFN_ELEMENT_PROPERTY_FILTER_DEFN>`__
+
+ :type: list of (str, :class:`ConstraintClause`)
+ """
+
+ @field_validator(node_filter_capabilities_validator)
+ @object_sequenced_list_field(CapabilityFilter)
+ def capabilities(self):
+ """
+ An optional sequenced list of property filters that would be used to select (filter)
+ matching TOSCA entities (e.g., Node Template, Node Type, Capability Types, etc.) based upon
+ their capabilities' property definitions' values.
+
+ :type: list of (str, :class:`CapabilityDefinition`)
+ """
+
+ @cachedmethod
+ def _get_node_type(self, context):
+ if hasattr(self._container, '_get_node'):
+ node_type, node_type_variant = self._container._get_node(context)
+ return node_type if node_type_variant == 'node_type' else None
+ return None
+
+ @cachedmethod
+ def _get_type_for_name(self, context, name):
+ node_type = self._get_node_type(context)
+ if node_type is not None:
+ properties = node_type._get_properties(context)
+ prop = properties.get(name)
+ return prop._get_type(context) if prop is not None else None
+
+ return None
diff --git a/azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/simple_v1_0/misc.py b/azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/simple_v1_0/misc.py
new file mode 100644
index 0000000..221163c
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/simple_v1_0/misc.py
@@ -0,0 +1,444 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from aria.utils.caching import cachedmethod
+from aria.utils.console import puts
+from aria.utils.formatting import as_raw
+from aria.parser import implements_specification
+from aria.parser.presentation import (AsIsPresentation, has_fields, allow_unknown_fields,
+ short_form_field, primitive_field, primitive_list_field,
+ primitive_dict_unknown_fields, object_field,
+ object_list_field, object_dict_field, field_validator,
+ type_validator)
+
+from .modeling.data_types import (get_data_type, get_data_type_value, get_property_constraints,
+ apply_constraint_to_value)
+from .modeling.substitution_mappings import (validate_substitution_mappings_requirement,
+ validate_substitution_mappings_capability)
+from .presentation.extensible import ExtensiblePresentation
+from .presentation.field_validators import (constraint_clause_field_validator,
+ constraint_clause_in_range_validator,
+ constraint_clause_valid_values_validator,
+ constraint_clause_pattern_validator,
+ data_type_validator)
+from .presentation.types import (convert_name_to_full_type_name, get_type_by_name)
+
+
+
+@implements_specification('3.5.1', 'tosca-simple-1.0')
+class Description(AsIsPresentation):
+ """
+ Human-readable description.
+
+ See the `TOSCA Simple Profile v1.0 cos01 specification <http://docs.oasis-open.org/tosca
+ /TOSCA-Simple-Profile-YAML/v1.0/cos01/TOSCA-Simple-Profile-YAML-v1.0-cos01.html
+ #DEFN_ELEMENT_DESCRIPTION>`__
+ """
+
+ def __init__(self, name=None, raw=None, container=None, cls=None): # pylint: disable=unused-argument
+ super(Description, self).__init__(name, raw, container, cls=unicode)
+
+ def _dump(self, context):
+ value = as_raw(self.value)
+ puts(context.style.meta_style(value))
+
+
+@allow_unknown_fields
+@has_fields
+@implements_specification('3.9.3.2', 'tosca-simple-1.0')
+class MetaData(ExtensiblePresentation):
+ """
+ Meta data.
+ """
+
+ @primitive_field(str)
+ @implements_specification('3.9.3.3', 'tosca-simple-1.0')
+ def template_name(self):
+ """
+ This optional metadata keyname can be used to declare the name of service template as a
+ single-line string value.
+ """
+
+ @primitive_field(str)
+ @implements_specification('3.9.3.4', 'tosca-simple-1.0')
+ def template_author(self):
+ """
+ This optional metadata keyname can be used to declare the author(s) of the service template
+ as a single-line string value.
+ """
+
+ @primitive_field(str)
+ @implements_specification('3.9.3.5', 'tosca-simple-1.0')
+ def template_version(self):
+ """
+ This optional metadata keyname can be used to declare a domain specific version of the
+ service template as a single-line string value.
+ """
+
+ @primitive_dict_unknown_fields()
+ def custom(self):
+ """
+ :type: dict
+ """
+
+
+@short_form_field('url')
+@has_fields
+@implements_specification('3.5.5', 'tosca-simple-1.0')
+class Repository(ExtensiblePresentation):
+ """
+ A repository definition defines a named external repository which contains deployment and
+ implementation artifacts that are referenced within the TOSCA Service Template.
+
+ See the `TOSCA Simple Profile v1.0 cos01 specification <http://docs.oasis-open.org/tosca
+ /TOSCA-Simple-Profile-YAML/v1.0/cos01/TOSCA-Simple-Profile-YAML-v1.0-cos01.html
+ #DEFN_ELEMENT_REPOSITORY_DEF>`__
+ """
+
+ @object_field(Description)
+ def description(self):
+ """
+ The optional description for the repository.
+
+ :type: :class:`Description`
+ """
+
+ @primitive_field(str, required=True)
+ def url(self):
+ """
+ The required URL or network address used to access the repository.
+
+ :type: :obj:`basestring`
+ """
+
+ @primitive_field()
+ def credential(self):
+ """
+ The optional Credential used to authorize access to the repository.
+
+ :type: tosca.datatypes.Credential
+ """
+
+ @cachedmethod
+ def _get_credential(self, context):
+ return get_data_type_value(context, self, 'credential', 'tosca.datatypes.Credential')
+
+
+@short_form_field('file')
+@has_fields
+@implements_specification('3.5.7', 'tosca-simple-1.0')
+class Import(ExtensiblePresentation):
+ """
+ An import definition is used within a TOSCA Service Template to locate and uniquely name another
+ TOSCA Service Template file which has type and template definitions to be imported (included)
+ and referenced within another Service Template.
+
+ See the `TOSCA Simple Profile v1.0 cos01 specification <http://docs.oasis-open.org/tosca
+ /TOSCA-Simple-Profile-YAML/v1.0/cos01/TOSCA-Simple-Profile-YAML-v1.0-cos01.html
+ #DEFN_ELEMENT_IMPORT_DEF>`__
+ """
+
+ @primitive_field(str, required=True)
+ def file(self):
+ """
+ The required symbolic name for the imported file.
+
+ :type: :obj:`basestring`
+ """
+
+ @primitive_field(str)
+ def repository(self):
+ """
+ The optional symbolic name of the repository definition where the imported file can be found
+ as a string.
+
+ :type: :obj:`basestring`
+ """
+
+ @primitive_field(str)
+ def namespace_uri(self):
+ """
+ The optional namespace URI to that will be applied to type definitions found within the
+ imported file as a string.
+
+ :type: :obj:`basestring`
+ """
+
+ @primitive_field(str)
+ def namespace_prefix(self):
+ """
+ The optional namespace prefix (alias) that will be used to indicate the namespace_uri when
+ forming a qualified name (i.e., qname) when referencing type definitions from the imported
+ file.
+
+ :type: :obj:`basestring`
+ """
+
+
+@has_fields
+@implements_specification('3.5.2-1', 'tosca-simple-1.0')
+class ConstraintClause(ExtensiblePresentation):
+ """
+ A constraint clause defines an operation along with one or more compatible values that can be
+ used to define a constraint on a property or parameter's allowed values when it is defined in a
+ TOSCA Service Template or one of its entities.
+
+ See the `TOSCA Simple Profile v1.0 cos01 specification <http://docs.oasis-open.org/tosca
+ /TOSCA-Simple-Profile-YAML/v1.0/cos01/TOSCA-Simple-Profile-YAML-v1.0-cos01.html
+ #DEFN_ELEMENT_CONSTRAINTS_CLAUSE>`__
+ """
+
+ @field_validator(constraint_clause_field_validator)
+ @primitive_field()
+ def equal(self):
+ """
+ Constrains a property or parameter to a value equal to ('=') the value declared.
+ """
+
+ @field_validator(constraint_clause_field_validator)
+ @primitive_field()
+ def greater_than(self):
+ """
+ Constrains a property or parameter to a value greater than ('>') the value declared.
+ """
+
+ @field_validator(constraint_clause_field_validator)
+ @primitive_field()
+ def greater_or_equal(self):
+ """
+ Constrains a property or parameter to a value greater than or equal to ('>=') the value
+ declared.
+ """
+
+ @field_validator(constraint_clause_field_validator)
+ @primitive_field()
+ def less_than(self):
+ """
+ Constrains a property or parameter to a value less than ('<') the value declared.
+ """
+
+ @field_validator(constraint_clause_field_validator)
+ @primitive_field()
+ def less_or_equal(self):
+ """
+ Constrains a property or parameter to a value less than or equal to ('<=') the value
+ declared.
+ """
+
+ @field_validator(constraint_clause_in_range_validator)
+ @primitive_list_field()
+ def in_range(self):
+ """
+ Constrains a property or parameter to a value in range of (inclusive) the two values
+ declared.
+
+ Note: subclasses or templates of types that declare a property with the ``in_range``
+ constraint MAY only further restrict the range specified by the parent type.
+ """
+
+ @field_validator(constraint_clause_valid_values_validator)
+ @primitive_list_field()
+ def valid_values(self):
+ """
+ Constrains a property or parameter to a value that is in the list of declared values.
+ """
+
+ @primitive_field(int)
+ def length(self):
+ """
+ Constrains the property or parameter to a value of a given length.
+ """
+
+ @primitive_field(int)
+ def min_length(self):
+ """
+ Constrains the property or parameter to a value to a minimum length.
+ """
+
+ @primitive_field(int)
+ def max_length(self):
+ """
+ Constrains the property or parameter to a value to a maximum length.
+ """
+
+ @field_validator(constraint_clause_pattern_validator)
+ @primitive_field(str)
+ def pattern(self):
+ """
+ Constrains the property or parameter to a value that is allowed by the provided regular
+ expression.
+
+ Note: Future drafts of this specification will detail the use of regular expressions and
+ reference an appropriate standardized grammar.
+ """
+
+ @cachedmethod
+ def _get_type(self, context):
+ if hasattr(self._container, '_get_type_for_name'):
+ # NodeFilter or CapabilityFilter
+ return self._container._get_type_for_name(context, self._name)
+ elif hasattr(self._container, '_get_type'):
+ # Properties
+ return self._container._get_type(context)
+ else:
+ # DataType (the DataType itself is our type)
+ return self._container
+
+ def _apply_to_value(self, context, presentation, value):
+ return apply_constraint_to_value(context, presentation, self, value)
+
+
+@short_form_field('type')
+@has_fields
+class EntrySchema(ExtensiblePresentation):
+ """
+ ARIA NOTE: The specification does not properly explain this type, however it is implied by
+ examples.
+ """
+
+ @field_validator(data_type_validator('entry schema data type'))
+ @primitive_field(str, required=True)
+ def type(self):
+ """
+ :type: :obj:`basestring`
+ """
+
+ @object_field(Description)
+ def description(self):
+ """
+ :type: :class:`Description`
+ """
+
+ @object_list_field(ConstraintClause)
+ def constraints(self):
+ """
+ :type: list of (str, :class:`ConstraintClause`)
+ """
+
+ @cachedmethod
+ def _get_type(self, context):
+ return get_data_type(context, self, 'type')
+
+ @cachedmethod
+ def _get_constraints(self, context):
+ return get_property_constraints(context, self)
+
+
+@short_form_field('primary')
+@has_fields
+class OperationImplementation(ExtensiblePresentation):
+ """
+ Operation implementation.
+ """
+
+ @primitive_field(str)
+ def primary(self):
+ """
+ The optional implementation artifact name (i.e., the primary script file name within a
+ TOSCA CSAR file).
+
+ :type: :obj:`basestring`
+ """
+
+ @primitive_list_field(str)
+ def dependencies(self):
+ """
+ The optional ordered list of one or more dependent or secondary implementation artifact name
+ which are referenced by the primary implementation artifact (e.g., a library the script
+ installs or a secondary script).
+
+ :type: [:obj:`basestring`]
+ """
+
+
+class SubstitutionMappingsRequirement(AsIsPresentation):
+ """
+ Substitution mapping for requirement.
+ """
+
+ @property
+ @cachedmethod
+ def node_template(self):
+ return str(self._raw[0])
+
+ @property
+ @cachedmethod
+ def requirement(self):
+ return str(self._raw[1])
+
+ def _validate(self, context):
+ super(SubstitutionMappingsRequirement, self)._validate(context)
+ validate_substitution_mappings_requirement(context, self)
+
+
+class SubstitutionMappingsCapability(AsIsPresentation):
+ """
+ Substitution mapping for capability.
+ """
+
+ @property
+ @cachedmethod
+ def node_template(self):
+ return str(self._raw[0])
+
+ @property
+ @cachedmethod
+ def capability(self):
+ return str(self._raw[1])
+
+ def _validate(self, context):
+ super(SubstitutionMappingsCapability, self)._validate(context)
+ validate_substitution_mappings_capability(context, self)
+
+
+@has_fields
+@implements_specification('2.10', 'tosca-simple-1.0')
+class SubstitutionMappings(ExtensiblePresentation):
+ """
+ Substitution mappings.
+ """
+
+ @field_validator(type_validator('node type', convert_name_to_full_type_name, 'node_types'))
+ @primitive_field(str, required=True)
+ def node_type(self):
+ """
+ :type: :obj:`basestring`
+ """
+
+ @object_dict_field(SubstitutionMappingsRequirement)
+ def requirements(self):
+ """
+ :type: {:obj:`basestring`: :class:`SubstitutionMappingsRequirement`}
+ """
+
+ @object_dict_field(SubstitutionMappingsCapability)
+ def capabilities(self):
+ """
+ :type: {:obj:`basestring`: :class:`SubstitutionMappingsCapability`}
+ """
+
+ @cachedmethod
+ def _get_type(self, context):
+ return get_type_by_name(context, self.node_type, 'node_types')
+
+ def _validate(self, context):
+ super(SubstitutionMappings, self)._validate(context)
+ self._get_type(context)
+
+ def _dump(self, context):
+ self._dump_content(context, (
+ 'node_type',
+ 'requirements',
+ 'capabilities'))
diff --git a/azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/simple_v1_0/modeling/__init__.py b/azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/simple_v1_0/modeling/__init__.py
new file mode 100644
index 0000000..d960e05
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/simple_v1_0/modeling/__init__.py
@@ -0,0 +1,750 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Creates ARIA service template models based on the TOSCA presentation.
+
+Relies on many helper methods in the presentation classes.
+"""
+
+#pylint: disable=unsubscriptable-object
+
+import os
+import re
+from types import FunctionType
+from datetime import datetime
+
+from ruamel import yaml
+
+from aria.parser.validation import Issue
+from aria.utils.formatting import string_list_as_string
+from aria.utils.collections import (StrictDict, OrderedDict)
+from aria.orchestrator import WORKFLOW_DECORATOR_RESERVED_ARGUMENTS
+from aria.modeling.models import (Type, ServiceTemplate, NodeTemplate,
+ RequirementTemplate, RelationshipTemplate, CapabilityTemplate,
+ GroupTemplate, PolicyTemplate, SubstitutionTemplate,
+ SubstitutionTemplateMapping, InterfaceTemplate, OperationTemplate,
+ ArtifactTemplate, Metadata, Input, Output, Property,
+ Attribute, Configuration, PluginSpecification)
+
+from .parameters import coerce_parameter_value
+from .constraints import (Equal, GreaterThan, GreaterOrEqual, LessThan, LessOrEqual, InRange,
+ ValidValues, Length, MinLength, MaxLength, Pattern)
+from ..data_types import coerce_value
+
+
+# These match the first un-escaped ">"
+# See: http://stackoverflow.com/a/11819111/849021
+IMPLEMENTATION_PREFIX_REGEX = re.compile(r'(?<!\\)(?:\\\\)*>')
+
+
+def create_service_template_model(context): # pylint: disable=too-many-locals,too-many-branches
+ model = ServiceTemplate(created_at=datetime.now(),
+ main_file_name=os.path.basename(str(context.presentation.location)))
+
+ model.description = context.presentation.get('service_template', 'description', 'value')
+
+ # Metadata
+ metadata = context.presentation.get('service_template', 'metadata')
+ if metadata is not None:
+ create_metadata_models(context, model, metadata)
+
+ # Types
+ model.node_types = Type(variant='node')
+ create_types(context,
+ model.node_types,
+ context.presentation.get('service_template', 'node_types'))
+ model.group_types = Type(variant='group')
+ create_types(context,
+ model.group_types,
+ context.presentation.get('service_template', 'group_types'))
+ model.policy_types = Type(variant='policy')
+ create_types(context,
+ model.policy_types,
+ context.presentation.get('service_template', 'policy_types'))
+ model.relationship_types = Type(variant='relationship')
+ create_types(context,
+ model.relationship_types,
+ context.presentation.get('service_template', 'relationship_types'))
+ model.capability_types = Type(variant='capability')
+ create_types(context,
+ model.capability_types,
+ context.presentation.get('service_template', 'capability_types'))
+ model.interface_types = Type(variant='interface')
+ create_types(context,
+ model.interface_types,
+ context.presentation.get('service_template', 'interface_types'))
+ model.artifact_types = Type(variant='artifact')
+ create_types(context,
+ model.artifact_types,
+ context.presentation.get('service_template', 'artifact_types'))
+
+ # Topology template
+ topology_template = context.presentation.get('service_template', 'topology_template')
+ if topology_template is not None:
+ model.inputs.update(
+ create_input_models_from_values(topology_template._get_input_values(context)))
+ model.outputs.update(
+ create_output_models_from_values(topology_template._get_output_values(context)))
+
+ # Plugin specifications
+ policies = context.presentation.get('service_template', 'topology_template', 'policies')
+ if policies:
+ for policy in policies.itervalues():
+ role = model.policy_types.get_descendant(policy.type).role
+ if role == 'plugin':
+ plugin_specification = create_plugin_specification_model(context, policy)
+ model.plugin_specifications[plugin_specification.name] = plugin_specification
+ elif role == 'workflow':
+ operation_template = create_workflow_operation_template_model(context,
+ model, policy)
+ model.workflow_templates[operation_template.name] = operation_template
+
+ # Node templates
+ node_templates = context.presentation.get('service_template', 'topology_template',
+ 'node_templates')
+ if node_templates:
+ for node_template in node_templates.itervalues():
+ node_template_model = create_node_template_model(context, model, node_template)
+ model.node_templates[node_template_model.name] = node_template_model
+ for node_template in node_templates.itervalues():
+ fix_node_template_model(context, model, node_template)
+
+ # Group templates
+ groups = context.presentation.get('service_template', 'topology_template', 'groups')
+ if groups:
+ for group in groups.itervalues():
+ group_template_model = create_group_template_model(context, model, group)
+ model.group_templates[group_template_model.name] = group_template_model
+
+ # Policy templates
+ policies = context.presentation.get('service_template', 'topology_template', 'policies')
+ if policies:
+ for policy in policies.itervalues():
+ policy_template_model = create_policy_template_model(context, model, policy)
+ model.policy_templates[policy_template_model.name] = policy_template_model
+
+ # Substitution template
+ substitution_mappings = context.presentation.get('service_template', 'topology_template',
+ 'substitution_mappings')
+ if substitution_mappings:
+ model.substitution_template = create_substitution_template_model(context, model,
+ substitution_mappings)
+
+ return model
+
+
+def create_metadata_models(context, service_template, metadata):
+ service_template.meta_data['template_name'] = Metadata(name='template_name',
+ value=metadata.template_name)
+ service_template.meta_data['template_author'] = Metadata(name='template_author',
+ value=metadata.template_author)
+ service_template.meta_data['template_version'] = Metadata(name='template_version',
+ value=metadata.template_version)
+ custom = metadata.custom
+ if custom:
+ for name, value in custom.iteritems():
+ service_template.meta_data[name] = Metadata(name=name,
+ value=value)
+
+
+def create_node_template_model(context, service_template, node_template):
+ node_type = node_template._get_type(context)
+ node_type = service_template.node_types.get_descendant(node_type._name)
+ model = NodeTemplate(name=node_template._name, type=node_type)
+
+ if node_template.description:
+ model.description = node_template.description.value
+
+ if node_template.directives:
+ model.directives = node_template.directives
+
+ model.properties.update(create_property_models_from_values(
+ template_properties=node_template._get_property_values(context)))
+ model.attributes.update(create_attribute_models_from_values(
+ template_attributes=node_template._get_attribute_default_values(context)))
+
+ create_interface_template_models(context, service_template, model.interface_templates,
+ node_template._get_interfaces(context))
+
+ artifacts = node_template._get_artifacts(context)
+ if artifacts:
+ for artifact_name, artifact in artifacts.iteritems():
+ model.artifact_templates[artifact_name] = \
+ create_artifact_template_model(context, service_template, artifact)
+
+ capabilities = node_template._get_capabilities(context)
+ if capabilities:
+ for capability_name, capability in capabilities.iteritems():
+ model.capability_templates[capability_name] = \
+ create_capability_template_model(context, service_template, capability)
+
+ if node_template.node_filter:
+ model.target_node_template_constraints = []
+ create_node_filter_constraints(context, node_template.node_filter,
+ model.target_node_template_constraints)
+
+ return model
+
+
+def fix_node_template_model(context, service_template, node_template):
+ # Requirements have to be created after all node templates have been created, because
+ # requirements might reference another node template
+ model = service_template.node_templates[node_template._name]
+ requirements = node_template._get_requirements(context)
+ if requirements:
+ for _, requirement in requirements:
+ model.requirement_templates.append(create_requirement_template_model(context,
+ service_template,
+ requirement))
+
+
+def create_group_template_model(context, service_template, group):
+ group_type = group._get_type(context)
+ group_type = service_template.group_types.get_descendant(group_type._name)
+ model = GroupTemplate(name=group._name,
+ type=group_type)
+
+ if group.description:
+ model.description = group.description.value
+
+ model.properties.update(create_property_models_from_values(group._get_property_values(context)))
+
+ create_interface_template_models(context, service_template, model.interface_templates,
+ group._get_interfaces(context))
+ members = group.members
+ if members:
+ for member in members:
+ node_template = service_template.node_templates[member]
+ assert node_template
+ model.node_templates.append(node_template)
+
+ return model
+
+
+def create_policy_template_model(context, service_template, policy):
+ policy_type = policy._get_type(context)
+ policy_type = service_template.policy_types.get_descendant(policy_type._name)
+ model = PolicyTemplate(name=policy._name,
+ type=policy_type)
+
+ if policy.description:
+ model.description = policy.description.value
+
+ model.properties.update(
+ create_property_models_from_values(policy._get_property_values(context)))
+
+ node_templates, groups = policy._get_targets(context)
+ if node_templates:
+ for target in node_templates:
+ node_template = service_template.node_templates[target._name]
+ assert node_template
+ model.node_templates.append(node_template)
+ if groups:
+ for target in groups:
+ group_template = service_template.group_templates[target._name]
+ assert group_template
+ model.group_templates.append(group_template)
+
+ return model
+
+
+def create_requirement_template_model(context, service_template, requirement):
+ model = {'name': requirement._name}
+
+ node, node_variant = requirement._get_node(context)
+ if node is not None:
+ if node_variant == 'node_type':
+ node_type = service_template.node_types.get_descendant(node._name)
+ model['target_node_type'] = node_type
+ else:
+ node_template = service_template.node_templates[node._name]
+ model['target_node_template'] = node_template
+
+ capability, capability_variant = requirement._get_capability(context)
+ if capability is not None:
+ if capability_variant == 'capability_type':
+ capability_type = \
+ service_template.capability_types.get_descendant(capability._name)
+ model['target_capability_type'] = capability_type
+ else:
+ model['target_capability_name'] = capability._name
+
+ model = RequirementTemplate(**model)
+
+ if requirement.node_filter:
+ model.target_node_template_constraints = []
+ create_node_filter_constraints(context, requirement.node_filter,
+ model.target_node_template_constraints)
+
+ relationship = requirement.relationship
+ if relationship is not None:
+ model.relationship_template = \
+ create_relationship_template_model(context, service_template, relationship)
+ model.relationship_template.name = requirement._name
+
+ return model
+
+
+def create_relationship_template_model(context, service_template, relationship):
+ relationship_type, relationship_type_variant = relationship._get_type(context)
+ if relationship_type_variant == 'relationship_type':
+ relationship_type = service_template.relationship_types.get_descendant(
+ relationship_type._name)
+ model = RelationshipTemplate(type=relationship_type)
+ else:
+ relationship_template = relationship_type
+ relationship_type = relationship_template._get_type(context)
+ relationship_type = service_template.relationship_types.get_descendant(
+ relationship_type._name)
+ model = RelationshipTemplate(type=relationship_type)
+ if relationship_template.description:
+ model.description = relationship_template.description.value
+
+ create_parameter_models_from_assignments(model.properties,
+ relationship.properties,
+ model_cls=Property)
+ create_interface_template_models(context, service_template, model.interface_templates,
+ relationship.interfaces)
+
+ return model
+
+
+def create_capability_template_model(context, service_template, capability):
+ capability_type = capability._get_type(context)
+ capability_type = service_template.capability_types.get_descendant(capability_type._name)
+ model = CapabilityTemplate(name=capability._name,
+ type=capability_type)
+
+ capability_definition = capability._get_definition(context)
+ if capability_definition.description:
+ model.description = capability_definition.description.value
+ occurrences = capability_definition.occurrences
+ if occurrences is not None:
+ model.min_occurrences = occurrences.value[0]
+ if occurrences.value[1] != 'UNBOUNDED':
+ model.max_occurrences = occurrences.value[1]
+
+ valid_source_types = capability_definition.valid_source_types
+ if valid_source_types:
+ for valid_source_type in valid_source_types:
+ # TODO: handle shortcut type names
+ node_type = service_template.node_types.get_descendant(valid_source_type)
+ model.valid_source_node_types.append(node_type)
+
+ create_parameter_models_from_assignments(model.properties,
+ capability.properties,
+ model_cls=Property)
+
+ return model
+
+
+def create_interface_template_model(context, service_template, interface):
+ interface_type = interface._get_type(context)
+ interface_type = service_template.interface_types.get_descendant(interface_type._name)
+ model = InterfaceTemplate(name=interface._name, type=interface_type)
+
+ if interface_type.description:
+ model.description = interface_type.description
+
+ create_parameter_models_from_assignments(model.inputs, interface.inputs, model_cls=Input)
+
+ operations = interface.operations
+ if operations:
+ for operation_name, operation in operations.iteritems():
+ model.operation_templates[operation_name] = \
+ create_operation_template_model(context, service_template, operation)
+
+ return model if model.operation_templates else None
+
+
+def create_operation_template_model(context, service_template, operation):
+ model = OperationTemplate(name=operation._name)
+
+ if operation.description:
+ model.description = operation.description.value
+
+ implementation = operation.implementation
+ if implementation is not None:
+ primary = implementation.primary
+ extract_implementation_primary(context, service_template, operation, model, primary)
+ relationship_edge = operation._get_extensions(context).get('relationship_edge')
+ if relationship_edge is not None:
+ if relationship_edge == 'source':
+ model.relationship_edge = False
+ elif relationship_edge == 'target':
+ model.relationship_edge = True
+
+ dependencies = implementation.dependencies
+ configuration = OrderedDict()
+ if dependencies:
+ for dependency in dependencies:
+ key, value = split_prefix(dependency)
+ if key is not None:
+ # Special ARIA prefix: signifies configuration parameters
+
+ # Parse as YAML
+ try:
+ value = yaml.load(value)
+ except yaml.parser.MarkedYAMLError as e:
+ context.validation.report(
+ 'YAML parser {0} in operation configuration: {1}'
+ .format(e.problem, value),
+ locator=implementation._locator,
+ level=Issue.FIELD)
+ continue
+
+ # Coerce to intrinsic functions, if there are any
+ value = coerce_parameter_value(context, implementation, None, value).value
+
+ # Support dot-notation nesting
+ set_nested(configuration, key.split('.'), value)
+ else:
+ if model.dependencies is None:
+ model.dependencies = []
+ model.dependencies.append(dependency)
+
+ # Convert configuration to Configuration models
+ for key, value in configuration.iteritems():
+ model.configurations[key] = Configuration.wrap(key, value,
+ description='Operation configuration.')
+
+ create_parameter_models_from_assignments(model.inputs, operation.inputs, model_cls=Input)
+ return model
+
+
+def create_artifact_template_model(context, service_template, artifact):
+ artifact_type = artifact._get_type(context)
+ artifact_type = service_template.artifact_types.get_descendant(artifact_type._name)
+ model = ArtifactTemplate(name=artifact._name,
+ type=artifact_type,
+ source_path=artifact.file)
+
+ if artifact.description:
+ model.description = artifact.description.value
+
+ model.target_path = artifact.deploy_path
+
+ repository = artifact._get_repository(context)
+ if repository is not None:
+ model.repository_url = repository.url
+ credential = repository._get_credential(context)
+ if credential:
+ model.repository_credential = {}
+ for k, v in credential.iteritems():
+ model.repository_credential[k] = v
+
+ model.properties.update(
+ create_property_models_from_values(artifact._get_property_values(context)))
+
+ return model
+
+
+def create_substitution_template_model(context, service_template, substitution_mappings):
+ node_type = service_template.node_types.get_descendant(substitution_mappings.node_type)
+ model = SubstitutionTemplate(node_type=node_type)
+
+ capabilities = substitution_mappings.capabilities
+ if capabilities:
+ for mapped_capability_name, capability in capabilities.iteritems():
+ name = 'capability.' + mapped_capability_name
+ node_template_model = service_template.node_templates[capability.node_template]
+ capability_template_model = \
+ node_template_model.capability_templates[capability.capability]
+ model.mappings[name] = \
+ SubstitutionTemplateMapping(name=name,
+ capability_template=capability_template_model)
+
+ requirements = substitution_mappings.requirements
+ if requirements:
+ for mapped_requirement_name, requirement in requirements.iteritems():
+ name = 'requirement.' + mapped_requirement_name
+ node_template_model = service_template.node_templates[requirement.node_template]
+ requirement_template_model = None
+ for a_model in node_template_model.requirement_templates:
+ if a_model.name == requirement.requirement:
+ requirement_template_model = a_model
+ break
+ model.mappings[name] = \
+ SubstitutionTemplateMapping(name=name,
+ requirement_template=requirement_template_model)
+
+ return model
+
+
+def create_plugin_specification_model(context, policy):
+ properties = policy.properties
+
+ def get(name, default=None):
+ prop = properties.get(name)
+ return prop.value if prop is not None else default
+
+ model = PluginSpecification(name=policy._name,
+ version=get('version'),
+ enabled=get('enabled', True))
+
+ return model
+
+
+def create_workflow_operation_template_model(context, service_template, policy):
+ model = OperationTemplate(name=policy._name)
+ # since we use backpopulates, these fields are populated upon commit, we get a weird(temporary)
+ # behavior where in previous code service_template.workflow_templates is a dict which has None
+ # as key for the value of model.
+ service_template.workflow_templates[model.name] = model
+
+ if policy.description:
+ model.description = policy.description.value
+
+ properties = policy._get_property_values(context)
+ for prop_name, prop in properties.iteritems():
+ if prop_name == 'implementation':
+ model.function = prop.value
+ else:
+ input_model = create_parameter_model_from_value(prop, prop_name, model_cls=Input)
+ input_model.required = prop.required
+ model.inputs[prop_name] = input_model
+
+ used_reserved_names = WORKFLOW_DECORATOR_RESERVED_ARGUMENTS.intersection(model.inputs.keys())
+ if used_reserved_names:
+ context.validation.report('using reserved arguments in workflow policy "{0}": {1}'
+ .format(
+ policy._name,
+ string_list_as_string(used_reserved_names)),
+ locator=policy._locator,
+ level=Issue.EXTERNAL)
+ return model
+
+
+#
+# Utils
+#
+
+def create_types(context, root, types):
+ if types is None:
+ return
+
+ def added_all():
+ for name in types:
+ if root.get_descendant(name) is None:
+ return False
+ return True
+
+ while not added_all():
+ for name, the_type in types.iteritems():
+ if root.get_descendant(name) is None:
+ parent_type = the_type._get_parent(context)
+ model = Type(name=the_type._name,
+ role=the_type._get_extension('role'))
+ if the_type.description:
+ model.description = the_type.description.value
+ if parent_type is None:
+ model.parent = root
+ model.variant = root.variant
+ root.children.append(model)
+ else:
+ container = root.get_descendant(parent_type._name)
+ if container is not None:
+ model.parent = container
+ model.variant = container.variant
+ container.children.append(model)
+
+
+def create_input_models_from_values(template_inputs):
+ model_inputs = {}
+ if template_inputs:
+ for template_input_name, template_input in template_inputs.iteritems():
+ model_input = create_parameter_model_from_value(template_input, template_input_name,
+ model_cls=Input)
+ model_input.required = template_input.required
+ model_inputs[model_input.name] = model_input
+ return model_inputs
+
+def create_output_models_from_values(template_outputs):
+ model_outputs = {}
+ for template_output_name, template_output in template_outputs.iteritems():
+ model_outputs[template_output_name] = \
+ create_parameter_model_from_value(template_output,
+ template_output_name,
+ model_cls=Output)
+ return model_outputs
+
+
+def create_property_models_from_values(template_properties):
+ model_properties = {}
+ for template_property_name, template_property in template_properties.iteritems():
+ model_properties[template_property_name] = \
+ create_parameter_model_from_value(template_property,
+ template_property_name,
+ model_cls=Property)
+ return model_properties
+
+def create_attribute_models_from_values(template_attributes):
+ model_attributes = {}
+ for template_attribute_name, template_attribute in template_attributes.iteritems():
+ model_attributes[template_attribute_name] = \
+ create_parameter_model_from_value(template_attribute,
+ template_attribute_name,
+ model_cls=Attribute)
+ return model_attributes
+
+
+def create_parameter_model_from_value(template_parameter, template_parameter_name, model_cls):
+ return model_cls(name=template_parameter_name,
+ type_name=template_parameter.type,
+ value=template_parameter.value,
+ description=template_parameter.description)
+
+
+def create_parameter_models_from_assignments(properties, source_properties, model_cls):
+ if source_properties:
+ for property_name, prop in source_properties.iteritems():
+ properties[property_name] = model_cls(name=property_name, # pylint: disable=unexpected-keyword-arg
+ type_name=prop.value.type,
+ value=prop.value.value,
+ description=prop.value.description)
+
+
+def create_interface_template_models(context, service_template, interfaces, source_interfaces):
+ if source_interfaces:
+ for interface_name, interface in source_interfaces.iteritems():
+ interface = create_interface_template_model(context, service_template, interface)
+ if interface is not None:
+ interfaces[interface_name] = interface
+
+
+def create_node_filter_constraints(context, node_filter, target_node_template_constraints):
+ properties = node_filter.properties
+ if properties is not None:
+ for property_name, constraint_clause in properties:
+ constraint = create_constraint(context, node_filter, constraint_clause, property_name,
+ None)
+ target_node_template_constraints.append(constraint)
+
+ capabilities = node_filter.capabilities
+ if capabilities is not None:
+ for capability_name, capability in capabilities:
+ properties = capability.properties
+ if properties is not None:
+ for property_name, constraint_clause in properties:
+ constraint = create_constraint(context, node_filter, constraint_clause,
+ property_name, capability_name)
+ target_node_template_constraints.append(constraint)
+
+
+def create_constraint(context, node_filter, constraint_clause, property_name, capability_name): # pylint: disable=too-many-return-statements
+ constraint_key = constraint_clause._raw.keys()[0]
+
+ the_type = constraint_clause._get_type(context)
+
+ def coerce_constraint(constraint):
+ if the_type is not None:
+ return coerce_value(context, node_filter, the_type, None, None, constraint,
+ constraint_key)
+ else:
+ return constraint
+
+ def coerce_constraints(constraints):
+ if the_type is not None:
+ return tuple(coerce_constraint(constraint) for constraint in constraints)
+ else:
+ return constraints
+
+ if constraint_key == 'equal':
+ return Equal(property_name, capability_name,
+ coerce_constraint(constraint_clause.equal))
+ elif constraint_key == 'greater_than':
+ return GreaterThan(property_name, capability_name,
+ coerce_constraint(constraint_clause.greater_than))
+ elif constraint_key == 'greater_or_equal':
+ return GreaterOrEqual(property_name, capability_name,
+ coerce_constraint(constraint_clause.greater_or_equal))
+ elif constraint_key == 'less_than':
+ return LessThan(property_name, capability_name,
+ coerce_constraint(constraint_clause.less_than))
+ elif constraint_key == 'less_or_equal':
+ return LessOrEqual(property_name, capability_name,
+ coerce_constraint(constraint_clause.less_or_equal))
+ elif constraint_key == 'in_range':
+ return InRange(property_name, capability_name,
+ coerce_constraints(constraint_clause.in_range))
+ elif constraint_key == 'valid_values':
+ return ValidValues(property_name, capability_name,
+ coerce_constraints(constraint_clause.valid_values))
+ elif constraint_key == 'length':
+ return Length(property_name, capability_name,
+ coerce_constraint(constraint_clause.length))
+ elif constraint_key == 'min_length':
+ return MinLength(property_name, capability_name,
+ coerce_constraint(constraint_clause.min_length))
+ elif constraint_key == 'max_length':
+ return MaxLength(property_name, capability_name,
+ coerce_constraint(constraint_clause.max_length))
+ elif constraint_key == 'pattern':
+ return Pattern(property_name, capability_name,
+ coerce_constraint(constraint_clause.pattern))
+ else:
+ raise ValueError('malformed node_filter: {0}'.format(constraint_key))
+
+
+def split_prefix(string):
+ """
+ Splits the prefix on the first non-escaped ">".
+ """
+
+ split = IMPLEMENTATION_PREFIX_REGEX.split(string, 1)
+ if len(split) < 2:
+ return None, None
+ return split[0].strip(), split[1].strip()
+
+
+def set_nested(the_dict, keys, value):
+ """
+ If the ``keys`` list has just one item, puts the value in the the dict. If there are more items,
+ puts the value in a sub-dict, creating sub-dicts as necessary for each key.
+
+ For example, if ``the_dict`` is an empty dict, keys is ``['first', 'second', 'third']`` and
+ value is ``'value'``, then the_dict will be: ``{'first':{'second':{'third':'value'}}}``.
+
+ :param the_dict: Dict to change
+ :type the_dict: {}
+ :param keys: Keys
+ :type keys: [basestring]
+ :param value: Value
+ """
+ key = keys.pop(0)
+ if len(keys) == 0:
+ the_dict[key] = value
+ else:
+ if key not in the_dict:
+ the_dict[key] = StrictDict(key_class=basestring)
+ set_nested(the_dict[key], keys, value)
+
+
+def extract_implementation_primary(context, service_template, presentation, model, primary):
+ prefix, postfix = split_prefix(primary)
+ if prefix:
+ # Special ARIA prefix
+ model.plugin_specification = service_template.plugin_specifications.get(prefix)
+ model.function = postfix
+ if model.plugin_specification is None:
+ context.validation.report(
+ 'no policy for plugin "{0}" specified in operation implementation: {1}'
+ .format(prefix, primary),
+ locator=presentation._get_child_locator('properties', 'implementation'),
+ level=Issue.BETWEEN_TYPES)
+ else:
+ # Standard TOSCA artifact with default plugin
+ model.implementation = primary
diff --git a/azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/simple_v1_0/modeling/artifacts.py b/azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/simple_v1_0/modeling/artifacts.py
new file mode 100644
index 0000000..b45615a
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/simple_v1_0/modeling/artifacts.py
@@ -0,0 +1,44 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from aria.utils.collections import OrderedDict
+
+
+#
+# NodeType, NodeTemplate
+#
+
+def get_inherited_artifact_definitions(context, presentation, for_presentation=None):
+ if for_presentation is None:
+ for_presentation = presentation
+
+ if hasattr(presentation, '_get_type'):
+ # In NodeTemplate
+ parent = presentation._get_type(context)
+ else:
+ # In NodeType
+ parent = presentation._get_parent(context)
+
+ # Get artifact definitions from parent
+ artifacts = get_inherited_artifact_definitions(context, parent, for_presentation) \
+ if parent is not None else OrderedDict()
+
+ # Add/override our artifact definitions
+ our_artifacts = presentation.artifacts
+ if our_artifacts:
+ for artifact_name, artifact in our_artifacts.iteritems():
+ artifacts[artifact_name] = artifact._clone(for_presentation)
+
+ return artifacts
diff --git a/azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/simple_v1_0/modeling/capabilities.py b/azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/simple_v1_0/modeling/capabilities.py
new file mode 100644
index 0000000..1b95bec
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/simple_v1_0/modeling/capabilities.py
@@ -0,0 +1,220 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from aria.utils.collections import deepcopy_with_locators, OrderedDict
+from aria.parser.validation import Issue
+
+from .parameters import (convert_parameter_definitions_to_values, merge_raw_parameter_definitions,
+ get_assigned_and_defined_parameter_values)
+
+
+#
+# CapabilityType
+#
+
+def get_inherited_valid_source_types(context, presentation):
+ """
+ If we haven't set the ``valid_source_types`` fields, uses that value from our parent, if we have
+ one (recursively).
+ """
+
+ valid_source_types = presentation.valid_source_types
+
+ if valid_source_types is None:
+ parent = presentation._get_parent(context)
+ valid_source_types = get_inherited_valid_source_types(context, parent) \
+ if parent is not None else None
+
+ return valid_source_types
+
+
+#
+# NodeType
+#
+
+def get_inherited_capability_definitions(context, presentation, for_presentation=None):
+ """
+ Returns our capability capability definitions added on top of those of our parent, if we have
+ one (recursively).
+
+ Allows overriding all aspects of parent capability properties except data type.
+ """
+
+ if for_presentation is None:
+ for_presentation = presentation
+
+ # Get capability definitions from parent
+ parent = presentation._get_parent(context)
+ capability_definitions = get_inherited_capability_definitions(
+ context, parent, for_presentation) if parent is not None else OrderedDict()
+
+ # Add/merge our capability definitions
+ our_capability_definitions = presentation.capabilities
+ if our_capability_definitions:
+ for capability_name, our_capability_definition in our_capability_definitions.iteritems():
+ if capability_name in capability_definitions:
+ capability_definition = capability_definitions[capability_name]
+
+ # Check if we changed the type
+ type1 = capability_definition._get_type(context)
+ type2 = our_capability_definition._get_type(context)
+
+ if not type1._is_descendant(context, type2):
+ context.validation.report(
+ 'capability definition type "{0}" is not a descendant of overridden '
+ 'capability definition type "{1}"' \
+ .format(type1._name, type2._name),
+ locator=our_capability_definition._locator, level=Issue.BETWEEN_TYPES)
+
+ merge_capability_definition(context, presentation, capability_definition,
+ our_capability_definition)
+ else:
+ capability_definition = our_capability_definition._clone(for_presentation)
+ if isinstance(capability_definition._raw, basestring):
+ # Make sure we have a dict
+ the_type = capability_definition._raw
+ capability_definition._raw = OrderedDict()
+ capability_definition._raw['type'] = the_type
+ capability_definitions[capability_name] = capability_definition
+
+ merge_capability_definition_from_type(context, presentation, capability_definition)
+
+ for capability_definition in capability_definitions.itervalues():
+ capability_definition._reset_method_cache()
+
+ return capability_definitions
+
+
+#
+# NodeTemplate
+#
+
+def get_template_capabilities(context, presentation):
+ """
+ Returns the node type's capabilities with our assignments to properties and attributes merged
+ in.
+
+ Capability properties' default values, if available, will be used if we did not assign them.
+
+ Makes sure that required properties indeed end up with a value.
+ """
+
+ capability_assignments = OrderedDict()
+
+ the_type = presentation._get_type(context) # NodeType
+ capability_definitions = the_type._get_capabilities(context) if the_type is not None else None
+
+ # Copy over capability definitions from the type (will initialize properties with default
+ # values)
+ if capability_definitions:
+ for capability_name, capability_definition in capability_definitions.iteritems():
+ capability_assignments[capability_name] = \
+ convert_capability_from_definition_to_assignment(context, capability_definition,
+ presentation)
+
+ # Fill in our capability assignments
+ our_capability_assignments = presentation.capabilities
+ if our_capability_assignments:
+ for capability_name, our_capability_assignment in our_capability_assignments.iteritems():
+ if capability_name in capability_assignments:
+ capability_assignment = capability_assignments[capability_name]
+
+ # Assign properties
+ values = get_assigned_and_defined_parameter_values(context,
+ our_capability_assignment,
+ 'property')
+
+ if values:
+ capability_assignment._raw['properties'] = values
+ capability_assignment._reset_method_cache()
+ else:
+ context.validation.report(
+ 'capability "{0}" not declared at node type "{1}" in "{2}"'
+ .format(capability_name, presentation.type, presentation._fullname),
+ locator=our_capability_assignment._locator, level=Issue.BETWEEN_TYPES)
+
+ return capability_assignments
+
+
+#
+# Utils
+#
+
+def convert_capability_from_definition_to_assignment(context, presentation, container):
+ from ..assignments import CapabilityAssignment
+
+ raw = OrderedDict()
+
+ properties = presentation.properties
+ if properties is not None:
+ raw['properties'] = convert_parameter_definitions_to_values(context, properties)
+
+ # TODO attributes
+
+ return CapabilityAssignment(name=presentation._name, raw=raw, container=container)
+
+
+def merge_capability_definition(context, presentation, capability_definition,
+ from_capability_definition):
+ raw_properties = OrderedDict()
+
+ capability_definition._raw['type'] = from_capability_definition.type
+
+ # Merge properties from type
+ from_property_defintions = from_capability_definition.properties
+ merge_raw_parameter_definitions(context, presentation, raw_properties, from_property_defintions,
+ 'properties')
+
+ # Merge our properties
+ merge_raw_parameter_definitions(context, presentation, raw_properties,
+ capability_definition.properties, 'properties')
+
+ if raw_properties:
+ capability_definition._raw['properties'] = raw_properties
+ capability_definition._reset_method_cache()
+
+ # Merge occurrences
+ occurrences = from_capability_definition._raw.get('occurrences')
+ if (occurrences is not None) and (capability_definition._raw.get('occurrences') is None):
+ capability_definition._raw['occurrences'] = \
+ deepcopy_with_locators(occurrences)
+
+
+def merge_capability_definition_from_type(context, presentation, capability_definition):
+ """
+ Merge ``properties`` and ``valid_source_types`` from the node type's capability definition
+ over those taken from the parent node type.
+ """
+ raw_properties = OrderedDict()
+
+ # Merge properties from parent
+ the_type = capability_definition._get_type(context)
+ type_property_defintions = the_type._get_properties(context)
+ merge_raw_parameter_definitions(context, presentation, raw_properties, type_property_defintions,
+ 'properties')
+
+ # Merge our properties (might override definitions in parent)
+ merge_raw_parameter_definitions(context, presentation, raw_properties,
+ capability_definition.properties, 'properties')
+
+ if raw_properties:
+ capability_definition._raw['properties'] = raw_properties
+
+ # Override valid_source_types
+ if capability_definition._raw.get('valid_source_types') is None:
+ valid_source_types = the_type._get_valid_source_types(context)
+ if valid_source_types is not None:
+ capability_definition._raw['valid_source_types'] = \
+ deepcopy_with_locators(valid_source_types)
diff --git a/azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/simple_v1_0/modeling/constraints.py b/azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/simple_v1_0/modeling/constraints.py
new file mode 100644
index 0000000..9a30cc1
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/simple_v1_0/modeling/constraints.py
@@ -0,0 +1,144 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import re
+
+from aria.modeling.constraints import NodeTemplateConstraint
+from aria.modeling.utils import NodeTemplateContainerHolder
+from aria.modeling.functions import evaluate
+from aria.parser import implements_specification
+
+
+@implements_specification('3.5.2-2', 'tosca-simple-1.0')
+class EvaluatingNodeTemplateConstraint(NodeTemplateConstraint):
+ """
+ A version of :class:`NodeTemplateConstraint` with boilerplate initialization for TOSCA
+ constraints.
+ """
+
+ def __init__(self, property_name, capability_name, constraint, as_list=False):
+ self.property_name = property_name
+ self.capability_name = capability_name
+ self.constraint = constraint
+ self.as_list = as_list
+
+ def matches(self, source_node_template, target_node_template):
+ # TOSCA node template constraints can refer to either capability properties or node
+ # template properties
+ if self.capability_name is not None:
+ # Capability property
+ capability = target_node_template.capability_templates.get(self.capability_name)
+ value = capability.properties.get(self.property_name) \
+ if capability is not None else None # Parameter
+ else:
+ # Node template property
+ value = target_node_template.properties.get(self.property_name) # Parameter
+
+ value = value.value if value is not None else None
+
+ container_holder = NodeTemplateContainerHolder(source_node_template)
+
+ if self.as_list:
+ constraints = []
+ for constraint in self.constraint:
+ evaluation = evaluate(constraint, container_holder)
+ if evaluation is not None:
+ constraints.append(evaluation.value)
+ else:
+ constraints.append(constraint)
+ constraint = constraints
+ else:
+ evaluation = evaluate(self.constraint, container_holder)
+ if evaluation is not None:
+ constraint = evaluation.value
+ else:
+ constraint = self.constraint
+
+ return self.matches_evaluated(value, constraint)
+
+ def matches_evaluated(self, value, constraint):
+ raise NotImplementedError
+
+
+class Equal(EvaluatingNodeTemplateConstraint):
+ def matches_evaluated(self, value, constraint):
+ return value == constraint
+
+
+class GreaterThan(EvaluatingNodeTemplateConstraint):
+ def matches_evaluated(self, value, constraint):
+ return value > constraint
+
+
+class GreaterOrEqual(EvaluatingNodeTemplateConstraint):
+ def matches_evaluated(self, value, constraint):
+ return value >= constraint
+
+
+class LessThan(EvaluatingNodeTemplateConstraint):
+ def matches_evaluated(self, value, constraint):
+ return value < constraint
+
+
+class LessOrEqual(EvaluatingNodeTemplateConstraint):
+ def matches_evaluated(self, value, constraint):
+ return value <= constraint
+
+
+class InRange(EvaluatingNodeTemplateConstraint):
+ def __init__(self, property_name, capability_name, constraint):
+ super(InRange, self).__init__(property_name, capability_name, constraint, as_list=True)
+
+ def matches_evaluated(self, value, constraints):
+ lower, upper = constraints
+ if value < lower:
+ return False
+ if (upper != 'UNBOUNDED') and (value > upper):
+ return False
+ return True
+
+
+class ValidValues(EvaluatingNodeTemplateConstraint):
+ def __init__(self, property_name, capability_name, constraint):
+ super(ValidValues, self).__init__(property_name, capability_name, constraint, as_list=True)
+
+ def matches_evaluated(self, value, constraints):
+ return value in constraints
+
+
+class Length(EvaluatingNodeTemplateConstraint):
+ def matches_evaluated(self, value, constraint):
+ return len(value) == constraint
+
+
+class MinLength(EvaluatingNodeTemplateConstraint):
+ def matches_evaluated(self, value, constraint):
+ return len(value) >= constraint
+
+
+class MaxLength(EvaluatingNodeTemplateConstraint):
+ def matches_evaluated(self, value, constraint):
+ return len(value) <= constraint
+
+
+class Pattern(EvaluatingNodeTemplateConstraint):
+ def matches_evaluated(self, value, constraint):
+ # From TOSCA 1.0 3.5.2.1:
+ #
+ # "Note: Future drafts of this specification will detail the use of regular expressions and
+ # reference an appropriate standardized grammar."
+ #
+ # So we will just use Python's.
+ return re.match(constraint, unicode(value)) is not None
diff --git a/azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/simple_v1_0/modeling/copy.py b/azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/simple_v1_0/modeling/copy.py
new file mode 100644
index 0000000..bd9037f
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/simple_v1_0/modeling/copy.py
@@ -0,0 +1,32 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#
+# NodeTemplate, RelationshipTemplate
+#
+
+def get_default_raw_from_copy(presentation, field_name):
+ """
+ Used for the ``_get_default_raw`` field hook.
+ """
+
+ copy = presentation._raw.get('copy')
+ if copy is not None:
+ templates = getattr(presentation._container, field_name)
+ if templates is not None:
+ template = templates.get(copy)
+ if template is not None:
+ return template._raw
+ return None
diff --git a/azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/simple_v1_0/modeling/data_types.py b/azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/simple_v1_0/modeling/data_types.py
new file mode 100644
index 0000000..13ce9a3
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/simple_v1_0/modeling/data_types.py
@@ -0,0 +1,514 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import re
+
+from aria.utils.collections import OrderedDict
+from aria.utils.formatting import safe_repr
+from aria.utils.type import full_type_name
+from aria.utils.imports import import_fullname
+from aria.parser import implements_specification
+from aria.parser.presentation import (get_locator, validate_primitive)
+from aria.parser.validation import Issue
+
+from .functions import get_function
+from ..presentation.types import get_type_by_name
+
+
+#
+# DataType
+#
+
+def get_inherited_constraints(context, presentation):
+ """
+ If we don't have constraints, will return our parent's constraints (if we have one),
+ recursively.
+
+ Implication: if we define even one constraint, the parent's constraints will not be inherited.
+ """
+
+ constraints = presentation.constraints
+
+ if constraints is None:
+ # If we don't have any, use our parent's
+ parent = presentation._get_parent(context)
+ parent_constraints = get_inherited_constraints(context, parent) \
+ if parent is not None else None
+ if parent_constraints is not None:
+ constraints = parent_constraints
+
+ return constraints
+
+
+def coerce_data_type_value(context, presentation, data_type, entry_schema, constraints, value, # pylint: disable=unused-argument
+ aspect):
+ """
+ Handles the ``_coerce_data()`` hook for complex data types.
+
+ There are two kinds of handling:
+
+ 1. If we have a primitive type as our great ancestor, then we do primitive type coersion, and
+ just check for constraints.
+
+ 2. Otherwise, for normal complex data types we return the assigned property values while making
+ sure they are defined in our type. The property definition's default value, if available,
+ will be used if we did not assign it. We also make sure that required definitions indeed end
+ up with a value.
+ """
+
+ primitive_type = data_type._get_primitive_ancestor(context)
+ if primitive_type is not None:
+ # Must be coercible to primitive ancestor
+ value = coerce_to_primitive(context, presentation, primitive_type, constraints, value,
+ aspect)
+ else:
+ definitions = data_type._get_properties(context)
+ if isinstance(value, dict):
+ temp = OrderedDict()
+
+ # Fill in our values, but make sure they are defined
+ for name, v in value.iteritems():
+ if name in definitions:
+ definition = definitions[name]
+ definition_type = definition._get_type(context)
+ definition_entry_schema = definition.entry_schema
+ definition_constraints = definition._get_constraints(context)
+ temp[name] = coerce_value(context, presentation, definition_type,
+ definition_entry_schema, definition_constraints, v,
+ aspect)
+ else:
+ context.validation.report(
+ 'assignment to undefined property "%s" in type "%s" in "%s"'
+ % (name, data_type._fullname, presentation._fullname),
+ locator=get_locator(v, value, presentation), level=Issue.BETWEEN_TYPES)
+
+ # Fill in defaults from the definitions, and check if required definitions have not been
+ # assigned
+ for name, definition in definitions.iteritems():
+ if (temp.get(name) is None) and hasattr(definition, 'default') \
+ and (definition.default is not None):
+ definition_type = definition._get_type(context)
+ definition_entry_schema = definition.entry_schema
+ definition_constraints = definition._get_constraints(context)
+ temp[name] = coerce_value(context, presentation, definition_type,
+ definition_entry_schema, definition_constraints,
+ definition.default, 'default')
+
+ if getattr(definition, 'required', False) and (temp.get(name) is None):
+ context.validation.report(
+ 'required property "%s" in type "%s" is not assigned a value in "%s"'
+ % (name, data_type._fullname, presentation._fullname),
+ locator=presentation._get_child_locator('definitions'),
+ level=Issue.BETWEEN_TYPES)
+
+ value = temp
+ elif value is not None:
+ context.validation.report('value of type "%s" is not a dict in "%s"'
+ % (data_type._fullname, presentation._fullname),
+ locator=get_locator(value, presentation),
+ level=Issue.BETWEEN_TYPES)
+ value = None
+
+ return value
+
+
+def validate_data_type_name(context, presentation):
+ """
+ Makes sure the complex data type's name is not that of a built-in type.
+ """
+
+ name = presentation._name
+ if get_primitive_data_type(name) is not None:
+ context.validation.report('data type name is that of a built-in type: %s'
+ % safe_repr(name),
+ locator=presentation._locator, level=Issue.BETWEEN_TYPES)
+
+
+#
+# PropertyDefinition, AttributeDefinition, EntrySchema, DataType
+#
+
+def get_data_type(context, presentation, field_name, allow_none=False):
+ """
+ Returns the type, whether it's a complex data type (a DataType instance) or a primitive (a
+ Python primitive type class).
+
+ If the type is not specified, defaults to :class:`str`, per note in section 3.2.1.1 of the
+ `TOSCA Simple Profile v1.0 cos01 specification <http://docs.oasis-open.org/tosca
+ /TOSCA-Simple-Profile-YAML/v1.0/cos01/TOSCA-Simple-Profile-YAML-v1.0-cos01.html
+ #_Toc379455072>`__
+ """
+
+ type_name = getattr(presentation, field_name)
+
+ if type_name is None:
+ if allow_none:
+ return None
+ else:
+ return str
+
+ # Avoid circular definitions
+ container_data_type = get_container_data_type(presentation)
+ if (container_data_type is not None) and (container_data_type._name == type_name):
+ return None
+
+ # Try complex data type
+ data_type = get_type_by_name(context, type_name, 'data_types')
+ if data_type is not None:
+ return data_type
+
+ # Try primitive data type
+ return get_primitive_data_type(type_name)
+
+
+#
+# PropertyDefinition, EntrySchema
+#
+
+def get_property_constraints(context, presentation):
+ """
+ If we don't have constraints, will return our type's constraints (if we have one), recursively.
+
+ Implication: if we define even one constraint, the type's constraints will not be inherited.
+ """
+
+ constraints = presentation.constraints
+
+ if constraints is None:
+ # If we don't have any, use our type's
+ the_type = presentation._get_type(context)
+ type_constraints = the_type._get_constraints(context) \
+ if hasattr(the_type, '_get_constraints') else None
+ if type_constraints is not None:
+ constraints = type_constraints
+
+ return constraints
+
+
+#
+# ConstraintClause
+#
+
+def apply_constraint_to_value(context, presentation, constraint_clause, value): # pylint: disable=too-many-statements,too-many-return-statements,too-many-branches
+ """
+ Returns false if the value does not conform to the constraint.
+ """
+
+ constraint_key = constraint_clause._raw.keys()[0]
+ the_type = constraint_clause._get_type(context)
+ # PropertyAssignment does not have this:
+ entry_schema = getattr(presentation, 'entry_schema', None)
+
+ def coerce_constraint(constraint):
+ return coerce_value(context, presentation, the_type, entry_schema, None, constraint,
+ constraint_key)
+
+ def report(message, constraint):
+ context.validation.report('value %s %s per constraint in "%s": %s'
+ % (message, safe_repr(constraint),
+ presentation._name or presentation._container._name,
+ safe_repr(value)),
+ locator=presentation._locator, level=Issue.BETWEEN_FIELDS)
+
+ if constraint_key == 'equal':
+ constraint = coerce_constraint(constraint_clause.equal)
+ if value != constraint:
+ report('is not equal to', constraint)
+ return False
+
+ elif constraint_key == 'greater_than':
+ constraint = coerce_constraint(constraint_clause.greater_than)
+ if value <= constraint:
+ report('is not greater than', constraint)
+ return False
+
+ elif constraint_key == 'greater_or_equal':
+ constraint = coerce_constraint(constraint_clause.greater_or_equal)
+ if value < constraint:
+ report('is not greater than or equal to', constraint)
+ return False
+
+ elif constraint_key == 'less_than':
+ constraint = coerce_constraint(constraint_clause.less_than)
+ if value >= constraint:
+ report('is not less than', constraint)
+ return False
+
+ elif constraint_key == 'less_or_equal':
+ constraint = coerce_constraint(constraint_clause.less_or_equal)
+ if value > constraint:
+ report('is not less than or equal to', constraint)
+ return False
+
+ elif constraint_key == 'in_range':
+ lower, upper = constraint_clause.in_range
+ lower, upper = coerce_constraint(lower), coerce_constraint(upper)
+ if value < lower:
+ report('is not greater than or equal to lower bound', lower)
+ return False
+ if (upper != 'UNBOUNDED') and (value > upper):
+ report('is not lesser than or equal to upper bound', upper)
+ return False
+
+ elif constraint_key == 'valid_values':
+ constraint = tuple(coerce_constraint(v) for v in constraint_clause.valid_values)
+ if value not in constraint:
+ report('is not one of', constraint)
+ return False
+
+ elif constraint_key == 'length':
+ constraint = constraint_clause.length
+ try:
+ if len(value) != constraint:
+ report('is not of length', constraint)
+ return False
+ except TypeError:
+ pass # should be validated elsewhere
+
+ elif constraint_key == 'min_length':
+ constraint = constraint_clause.min_length
+ try:
+ if len(value) < constraint:
+ report('has a length lesser than', constraint)
+ return False
+ except TypeError:
+ pass # should be validated elsewhere
+
+ elif constraint_key == 'max_length':
+ constraint = constraint_clause.max_length
+ try:
+ if len(value) > constraint:
+ report('has a length greater than', constraint)
+ return False
+ except TypeError:
+ pass # should be validated elsewhere
+
+ elif constraint_key == 'pattern':
+ constraint = constraint_clause.pattern
+ try:
+ # From TOSCA 1.0 3.5.2.1:
+ #
+ # "Note: Future drafts of this specification will detail the use of regular expressions
+ # and reference an appropriate standardized grammar."
+ #
+ # So we will just use Python's.
+ if re.match(constraint, str(value)) is None:
+ report('does not match regular expression', constraint)
+ return False
+ except re.error:
+ pass # should be validated elsewhere
+
+ return True
+
+
+#
+# Repository
+#
+
+def get_data_type_value(context, presentation, field_name, type_name):
+ the_type = get_type_by_name(context, type_name, 'data_types')
+ if the_type is not None:
+ value = getattr(presentation, field_name)
+ if value is not None:
+ return coerce_data_type_value(context, presentation, the_type, None, None, value, None)
+ else:
+ context.validation.report('field "%s" in "%s" refers to unknown data type "%s"'
+ % (field_name, presentation._fullname, type_name),
+ locator=presentation._locator, level=Issue.BETWEEN_TYPES)
+ return None
+
+
+#
+# Utils
+#
+
+PRIMITIVE_DATA_TYPES = {
+ # YAML 1.2:
+ 'tag:yaml.org,2002:str': unicode,
+ 'tag:yaml.org,2002:integer': int,
+ 'tag:yaml.org,2002:float': float,
+ 'tag:yaml.org,2002:bool': bool,
+ 'tag:yaml.org,2002:null': None.__class__,
+
+ # TOSCA aliases:
+ 'string': unicode,
+ 'integer': int,
+ 'float': float,
+ 'boolean': bool,
+ 'null': None.__class__}
+
+
+@implements_specification('3.2.1-3', 'tosca-simple-1.0')
+def get_primitive_data_type(type_name):
+ """
+ Many of the types we use in this profile are built-in types from the YAML 1.2 specification
+ (i.e., those identified by the "tag:yaml.org,2002" version tag) [YAML-1.2].
+
+ See the `TOSCA Simple Profile v1.0 cos01 specification <http://docs.oasis-open.org/tosca
+ /TOSCA-Simple-Profile-YAML/v1.0/cos01/TOSCA-Simple-Profile-YAML-v1.0-cos01.html
+ #_Toc373867862>`__
+ """
+
+ return PRIMITIVE_DATA_TYPES.get(type_name)
+
+
+def get_data_type_name(the_type):
+ """
+ Returns the name of the type, whether it's a DataType, a primitive type, or another class.
+ """
+
+ return the_type._name if hasattr(the_type, '_name') else full_type_name(the_type)
+
+
+def coerce_value(context, presentation, the_type, entry_schema, constraints, value, aspect=None): # pylint: disable=too-many-return-statements
+ """
+ Returns the value after it's coerced to its type, reporting validation errors if it cannot be
+ coerced.
+
+ Supports both complex data types and primitives.
+
+ Data types can use the ``coerce_value`` extension to hook their own specialized function.
+ If the extension is present, we will delegate to that hook.
+ """
+
+ # TODO: should support models as well as presentations
+
+ is_function, func = get_function(context, presentation, value)
+ if is_function:
+ return func
+
+ if the_type is None:
+ return value
+
+ if the_type == None.__class__:
+ if value is not None:
+ context.validation.report('field "%s" is of type "null" but has a non-null value: %s'
+ % (presentation._name, safe_repr(value)),
+ locator=presentation._locator, level=Issue.BETWEEN_FIELDS)
+ return None
+
+ # Delegate to 'coerce_value' extension
+ if hasattr(the_type, '_get_extension'):
+ coerce_value_fn_name = the_type._get_extension('coerce_value')
+ if coerce_value_fn_name is not None:
+ if value is None:
+ return None
+ coerce_value_fn = import_fullname(coerce_value_fn_name)
+ return coerce_value_fn(context, presentation, the_type, entry_schema, constraints,
+ value, aspect)
+
+ if hasattr(the_type, '_coerce_value'):
+ # Delegate to '_coerce_value' (likely a DataType instance)
+ return the_type._coerce_value(context, presentation, entry_schema, constraints, value,
+ aspect)
+
+ # Coerce to primitive type
+ return coerce_to_primitive(context, presentation, the_type, constraints, value, aspect)
+
+
+def coerce_to_primitive(context, presentation, primitive_type, constraints, value, aspect=None):
+ """
+ Returns the value after it's coerced to a primitive type, translating exceptions to validation
+ errors if it cannot be coerced.
+ """
+
+ if value is None:
+ return None
+
+ try:
+ # Coerce
+ value = validate_primitive(value, primitive_type,
+ context.validation.allow_primitive_coersion)
+
+ # Check constraints
+ apply_constraints_to_value(context, presentation, constraints, value)
+ except (ValueError, TypeError) as e:
+ report_issue_for_bad_format(context, presentation, primitive_type, value, aspect, e)
+ value = None
+
+ return value
+
+
+def coerce_to_data_type_class(context, presentation, cls, entry_schema, constraints, value,
+ aspect=None):
+ """
+ Returns the value after it's coerced to a data type class, reporting validation errors if it
+ cannot be coerced. Constraints will be applied after coersion.
+
+ Will either call a ``_create`` static function in the class, or instantiate it using a
+ constructor if ``_create`` is not available.
+
+ This will usually be called by a ``coerce_value`` extension hook in a :class:`DataType`.
+ """
+
+ try:
+ if hasattr(cls, '_create'):
+ # Instantiate using creator function
+ value = cls._create(context, presentation, entry_schema, constraints, value, aspect)
+ else:
+ # Normal instantiation
+ value = cls(entry_schema, constraints, value, aspect)
+ except ValueError as e:
+ report_issue_for_bad_format(context, presentation, cls, value, aspect, e)
+ value = None
+
+ # Check constraints
+ value = apply_constraints_to_value(context, presentation, constraints, value)
+
+ return value
+
+
+def apply_constraints_to_value(context, presentation, constraints, value):
+ """
+ Applies all constraints to the value. If the value conforms, returns the value. If it does not
+ conform, returns None.
+ """
+
+ if (value is not None) and (constraints is not None):
+ valid = True
+ for constraint in constraints:
+ if not constraint._apply_to_value(context, presentation, value):
+ valid = False
+ if not valid:
+ value = None
+ return value
+
+
+def get_container_data_type(presentation):
+ if presentation is None:
+ return None
+ if type(presentation).__name__ == 'DataType':
+ return presentation
+ return get_container_data_type(presentation._container)
+
+
+def report_issue_for_bad_format(context, presentation, the_type, value, aspect, e):
+ if aspect == 'default':
+ aspect = '"default" value'
+ elif aspect is not None:
+ aspect = '"%s" aspect' % aspect
+
+ if aspect is not None:
+ context.validation.report('%s for field "%s" is not a valid "%s": %s'
+ % (aspect, presentation._name or presentation._container._name,
+ get_data_type_name(the_type), safe_repr(value)),
+ locator=presentation._locator, level=Issue.BETWEEN_FIELDS,
+ exception=e)
+ else:
+ context.validation.report('field "%s" is not a valid "%s": %s'
+ % (presentation._name or presentation._container._name,
+ get_data_type_name(the_type), safe_repr(value)),
+ locator=presentation._locator, level=Issue.BETWEEN_FIELDS,
+ exception=e)
diff --git a/azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/simple_v1_0/modeling/functions.py b/azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/simple_v1_0/modeling/functions.py
new file mode 100644
index 0000000..ecbfde9
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/simple_v1_0/modeling/functions.py
@@ -0,0 +1,681 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from StringIO import StringIO # Note: cStringIO does not support Unicode
+import re
+
+from aria.utils.collections import FrozenList
+from aria.utils.formatting import (as_raw, safe_repr)
+from aria.utils.type import full_type_name
+from aria.parser import implements_specification
+from aria.parser.exceptions import InvalidValueError
+from aria.parser.validation import Issue
+from aria.modeling.exceptions import CannotEvaluateFunctionException
+from aria.modeling.models import (Node, NodeTemplate, Relationship, RelationshipTemplate)
+from aria.modeling.functions import (Function, Evaluation)
+
+
+#
+# Intrinsic
+#
+
+@implements_specification('4.3.1', 'tosca-simple-1.0')
+class Concat(Function):
+ """
+ The ``concat`` function is used to concatenate two or more string values within a TOSCA
+ service template.
+ """
+
+ def __init__(self, context, presentation, argument):
+ self.locator = presentation._locator
+
+ if not isinstance(argument, list):
+ raise InvalidValueError(
+ 'function "concat" argument must be a list of string expressions: {0}'
+ .format(safe_repr(argument)),
+ locator=self.locator)
+
+ string_expressions = []
+ for index, an_argument in enumerate(argument):
+ string_expressions.append(parse_string_expression(context, presentation, 'concat',
+ index, None, an_argument))
+ self.string_expressions = FrozenList(string_expressions)
+
+ @property
+ def as_raw(self):
+ string_expressions = []
+ for string_expression in self.string_expressions:
+ if hasattr(string_expression, 'as_raw'):
+ string_expression = as_raw(string_expression)
+ string_expressions.append(string_expression)
+ return {'concat': string_expressions}
+
+ def __evaluate__(self, container_holder):
+ final = True
+ value = StringIO()
+ for e in self.string_expressions:
+ e, final = evaluate(e, final, container_holder)
+ if e is not None:
+ value.write(unicode(e))
+ value = value.getvalue() or u''
+ return Evaluation(value, final)
+
+
+@implements_specification('4.3.2', 'tosca-simple-1.0')
+class Token(Function):
+ """
+ The ``token`` function is used within a TOSCA service template on a string to parse out
+ (tokenize) substrings separated by one or more token characters within a larger string.
+ """
+
+ def __init__(self, context, presentation, argument):
+ self.locator = presentation._locator
+
+ if (not isinstance(argument, list)) or (len(argument) != 3):
+ raise InvalidValueError('function "token" argument must be a list of 3 parameters: {0}'
+ .format(safe_repr(argument)),
+ locator=self.locator)
+
+ self.string_with_tokens = parse_string_expression(context, presentation, 'token', 0,
+ 'the string to tokenize', argument[0])
+ self.string_of_token_chars = parse_string_expression(context, presentation, 'token', 1,
+ 'the token separator characters',
+ argument[1])
+ self.substring_index = parse_int(context, presentation, 'token', 2,
+ 'the 0-based index of the token to return', argument[2])
+
+ @property
+ def as_raw(self):
+ string_with_tokens = self.string_with_tokens
+ if hasattr(string_with_tokens, 'as_raw'):
+ string_with_tokens = as_raw(string_with_tokens)
+ string_of_token_chars = self.string_of_token_chars
+ if hasattr(string_of_token_chars, 'as_raw'):
+ string_of_token_chars = as_raw(string_of_token_chars)
+ return {'token': [string_with_tokens, string_of_token_chars, self.substring_index]}
+
+ def __evaluate__(self, container_holder):
+ final = True
+ string_with_tokens, final = evaluate(self.string_with_tokens, final, container_holder)
+ string_of_token_chars, final = evaluate(self.string_of_token_chars, final, container_holder)
+
+ if string_of_token_chars:
+ regex = '[' + ''.join(re.escape(c) for c in string_of_token_chars) + ']'
+ split = re.split(regex, string_with_tokens)
+ if self.substring_index < len(split):
+ return Evaluation(split[self.substring_index], final)
+
+ raise CannotEvaluateFunctionException()
+
+
+#
+# Property
+#
+
+@implements_specification('4.4.1', 'tosca-simple-1.0')
+class GetInput(Function):
+ """
+ The ``get_input`` function is used to retrieve the values of properties declared within the
+ inputs section of a TOSCA Service Template.
+ """
+
+ def __init__(self, context, presentation, argument):
+ self.locator = presentation._locator
+
+ self.input_property_name = parse_string_expression(context, presentation, 'get_input',
+ None, 'the input property name',
+ argument)
+
+ if isinstance(self.input_property_name, basestring):
+ the_input = context.presentation.get_from_dict('service_template', 'topology_template',
+ 'inputs', self.input_property_name)
+ if the_input is None:
+ raise InvalidValueError(
+ 'function "get_input" argument is not a valid input name: {0}'
+ .format(safe_repr(argument)),
+ locator=self.locator)
+
+ @property
+ def as_raw(self):
+ return {'get_input': as_raw(self.input_property_name)}
+
+ def __evaluate__(self, container_holder):
+ service = container_holder.service
+ if service is None:
+ raise CannotEvaluateFunctionException()
+
+ value = service.inputs.get(self.input_property_name)
+ if value is not None:
+ value = value.value
+ return Evaluation(value, False) # We never return final evaluations!
+
+ raise InvalidValueError(
+ 'function "get_input" argument is not a valid input name: {0}'
+ .format(safe_repr(self.input_property_name)),
+ locator=self.locator)
+
+
+@implements_specification('4.4.2', 'tosca-simple-1.0')
+class GetProperty(Function):
+ """
+ The ``get_property`` function is used to retrieve property values between modelable entities
+ defined in the same service template.
+ """
+
+ def __init__(self, context, presentation, argument):
+ self.locator = presentation._locator
+
+ if (not isinstance(argument, list)) or (len(argument) < 2):
+ raise InvalidValueError(
+ 'function "get_property" argument must be a list of at least 2 string expressions: '
+ '{0}'.format(safe_repr(argument)),
+ locator=self.locator)
+
+ self.modelable_entity_name = parse_modelable_entity_name(context, presentation,
+ 'get_property', 0, argument[0])
+ # The first of these will be tried as a req-or-cap name:
+ self.nested_property_name_or_index = argument[1:]
+
+ @property
+ def as_raw(self):
+ return {'get_property': [self.modelable_entity_name] + self.nested_property_name_or_index}
+
+ def __evaluate__(self, container_holder):
+ modelable_entities = get_modelable_entities(container_holder, 'get_property', self.locator,
+ self.modelable_entity_name)
+ req_or_cap_name = self.nested_property_name_or_index[0]
+
+ for modelable_entity in modelable_entities:
+ properties = None
+
+ # First argument refers to a requirement template?
+ if hasattr(modelable_entity, 'requirement_templates') \
+ and modelable_entity.requirement_templates \
+ and (req_or_cap_name in [v.name for v in modelable_entity.requirement_templates]):
+ for requirement in modelable_entity.requirement_templates:
+ if requirement.name == req_or_cap_name:
+ # TODO
+ raise CannotEvaluateFunctionException()
+ # First argument refers to a capability?
+ elif hasattr(modelable_entity, 'capabilities') \
+ and modelable_entity.capabilities \
+ and (req_or_cap_name in modelable_entity.capabilities):
+ properties = modelable_entity.capabilities[req_or_cap_name].properties
+ nested_property_name_or_index = self.nested_property_name_or_index[1:]
+ # First argument refers to a capability template?
+ elif hasattr(modelable_entity, 'capability_templates') \
+ and modelable_entity.capability_templates \
+ and (req_or_cap_name in modelable_entity.capability_templates):
+ properties = modelable_entity.capability_templates[req_or_cap_name].properties
+ nested_property_name_or_index = self.nested_property_name_or_index[1:]
+ else:
+ properties = modelable_entity.properties
+ nested_property_name_or_index = self.nested_property_name_or_index
+
+ evaluation = get_modelable_entity_parameter(modelable_entity, properties,
+ nested_property_name_or_index)
+ if evaluation is not None:
+ return evaluation
+
+ raise InvalidValueError(
+ 'function "get_property" could not find "{0}" in modelable entity "{1}"'
+ .format('.'.join(self.nested_property_name_or_index), self.modelable_entity_name),
+ locator=self.locator)
+
+
+#
+# Attribute
+#
+
+@implements_specification('4.5.1', 'tosca-simple-1.0')
+class GetAttribute(Function):
+ """
+ The ``get_attribute`` function is used to retrieve the values of named attributes declared
+ by the referenced node or relationship template name.
+ """
+
+ def __init__(self, context, presentation, argument):
+ self.locator = presentation._locator
+
+ if (not isinstance(argument, list)) or (len(argument) < 2):
+ raise InvalidValueError(
+ 'function "get_attribute" argument must be a list of at least 2 string expressions:'
+ ' {0}'.format(safe_repr(argument)),
+ locator=self.locator)
+
+ self.modelable_entity_name = parse_modelable_entity_name(context, presentation,
+ 'get_attribute', 0, argument[0])
+ # The first of these will be tried as a req-or-cap name:
+ self.nested_attribute_name_or_index = argument[1:]
+
+ @property
+ def as_raw(self):
+ return {'get_attribute': [self.modelable_entity_name] + self.nested_attribute_name_or_index}
+
+ def __evaluate__(self, container_holder):
+ modelable_entities = get_modelable_entities(container_holder, 'get_attribute', self.locator,
+ self.modelable_entity_name)
+ for modelable_entity in modelable_entities:
+ attributes = modelable_entity.attributes
+ nested_attribute_name_or_index = self.nested_attribute_name_or_index
+ evaluation = get_modelable_entity_parameter(modelable_entity, attributes,
+ nested_attribute_name_or_index)
+ if evaluation is not None:
+ evaluation.final = False # We never return final evaluations!
+ return evaluation
+
+ raise InvalidValueError(
+ 'function "get_attribute" could not find "{0}" in modelable entity "{1}"'
+ .format('.'.join(self.nested_attribute_name_or_index), self.modelable_entity_name),
+ locator=self.locator)
+
+
+#
+# Operation
+#
+
+@implements_specification('4.6.1', 'tosca-simple-1.0') # pylint: disable=abstract-method
+class GetOperationOutput(Function):
+ """
+ The ``get_operation_output`` function is used to retrieve the values of variables exposed /
+ exported from an interface operation.
+ """
+
+ def __init__(self, context, presentation, argument):
+ self.locator = presentation._locator
+
+ if (not isinstance(argument, list)) or (len(argument) != 4):
+ raise InvalidValueError(
+ 'function "get_operation_output" argument must be a list of 4 parameters: {0}'
+ .format(safe_repr(argument)),
+ locator=self.locator)
+
+ self.modelable_entity_name = parse_string_expression(context, presentation,
+ 'get_operation_output', 0,
+ 'modelable entity name', argument[0])
+ self.interface_name = parse_string_expression(context, presentation, 'get_operation_output',
+ 1, 'the interface name', argument[1])
+ self.operation_name = parse_string_expression(context, presentation, 'get_operation_output',
+ 2, 'the operation name', argument[2])
+ self.output_variable_name = parse_string_expression(context, presentation,
+ 'get_operation_output', 3,
+ 'the output name', argument[3])
+
+ @property
+ def as_raw(self):
+ interface_name = self.interface_name
+ if hasattr(interface_name, 'as_raw'):
+ interface_name = as_raw(interface_name)
+ operation_name = self.operation_name
+ if hasattr(operation_name, 'as_raw'):
+ operation_name = as_raw(operation_name)
+ output_variable_name = self.output_variable_name
+ if hasattr(output_variable_name, 'as_raw'):
+ output_variable_name = as_raw(output_variable_name)
+ return {'get_operation_output': [self.modelable_entity_name, interface_name, operation_name,
+ output_variable_name]}
+
+
+#
+# Navigation
+#
+
+@implements_specification('4.7.1', 'tosca-simple-1.0')
+class GetNodesOfType(Function):
+ """
+ The ``get_nodes_of_type`` function can be used to retrieve a list of all known instances of
+ nodes of the declared Node Type.
+ """
+
+ def __init__(self, context, presentation, argument):
+ self.locator = presentation._locator
+
+ self.node_type_name = parse_string_expression(context, presentation, 'get_nodes_of_type',
+ None, 'the node type name', argument)
+
+ if isinstance(self.node_type_name, basestring):
+ node_types = context.presentation.get('service_template', 'node_types')
+ if (node_types is None) or (self.node_type_name not in node_types):
+ raise InvalidValueError(
+ 'function "get_nodes_of_type" argument is not a valid node type name: {0}'
+ .format(safe_repr(argument)),
+ locator=self.locator)
+
+ @property
+ def as_raw(self):
+ node_type_name = self.node_type_name
+ if hasattr(node_type_name, 'as_raw'):
+ node_type_name = as_raw(node_type_name)
+ return {'get_nodes_of_type': node_type_name}
+
+ def __evaluate__(self, container):
+ pass
+
+
+#
+# Artifact
+#
+
+@implements_specification('4.8.1', 'tosca-simple-1.0') # pylint: disable=abstract-method
+class GetArtifact(Function):
+ """
+ The ``get_artifact`` function is used to retrieve artifact location between modelable
+ entities defined in the same service template.
+ """
+
+ def __init__(self, context, presentation, argument):
+ self.locator = presentation._locator
+
+ if (not isinstance(argument, list)) or (len(argument) < 2) or (len(argument) > 4):
+ raise InvalidValueError(
+ 'function "get_artifact" argument must be a list of 2 to 4 parameters: {0}'
+ .format(safe_repr(argument)),
+ locator=self.locator)
+
+ self.modelable_entity_name = parse_string_expression(context, presentation, 'get_artifact',
+ 0, 'modelable entity name',
+ argument[0])
+ self.artifact_name = parse_string_expression(context, presentation, 'get_artifact', 1,
+ 'the artifact name', argument[1])
+ self.location = parse_string_expression(context, presentation, 'get_artifact', 2,
+ 'the location or "LOCAL_FILE"', argument[2])
+ self.remove = parse_bool(context, presentation, 'get_artifact', 3, 'the removal flag',
+ argument[3])
+
+ @property
+ def as_raw(self):
+ artifact_name = self.artifact_name
+ if hasattr(artifact_name, 'as_raw'):
+ artifact_name = as_raw(artifact_name)
+ location = self.location
+ if hasattr(location, 'as_raw'):
+ location = as_raw(location)
+ return {'get_artifacts': [self.modelable_entity_name, artifact_name, location, self.remove]}
+
+
+#
+# Utils
+#
+
+def get_function(context, presentation, value):
+ functions = context.presentation.presenter.functions
+ if isinstance(value, dict) and (len(value) == 1):
+ key = value.keys()[0]
+ if key in functions:
+ try:
+ return True, functions[key](context, presentation, value[key])
+ except InvalidValueError as e:
+ context.validation.report(issue=e.issue)
+ return True, None
+ return False, None
+
+
+def parse_string_expression(context, presentation, name, index, explanation, value): # pylint: disable=unused-argument
+ is_function, func = get_function(context, presentation, value)
+ if is_function:
+ return func
+ else:
+ value = str(value)
+ return value
+
+
+def parse_int(context, presentation, name, index, explanation, value): # pylint: disable=unused-argument
+ if not isinstance(value, int):
+ try:
+ value = int(value)
+ except ValueError:
+ raise invalid_value(name, index, 'an integer', explanation, value,
+ presentation._locator)
+ return value
+
+
+def parse_bool(context, presentation, name, index, explanation, value): # pylint: disable=unused-argument
+ if not isinstance(value, bool):
+ raise invalid_value(name, index, 'a boolean', explanation, value, presentation._locator)
+ return value
+
+
+def parse_modelable_entity_name(context, presentation, name, index, value):
+ value = parse_string_expression(context, presentation, name, index, 'the modelable entity name',
+ value)
+ if value == 'SELF':
+ the_self, _ = parse_self(presentation)
+ if the_self is None:
+ raise invalid_modelable_entity_name(name, index, value, presentation._locator,
+ 'a node template or a relationship template')
+ elif value == 'HOST':
+ _, self_variant = parse_self(presentation)
+ if self_variant != 'node_template':
+ raise invalid_modelable_entity_name(name, index, value, presentation._locator,
+ 'a node template')
+ elif (value == 'SOURCE') or (value == 'TARGET'):
+ _, self_variant = parse_self(presentation)
+ if self_variant != 'relationship_template':
+ raise invalid_modelable_entity_name(name, index, value, presentation._locator,
+ 'a relationship template')
+ elif isinstance(value, basestring):
+ node_templates = \
+ context.presentation.get('service_template', 'topology_template', 'node_templates') \
+ or {}
+ relationship_templates = \
+ context.presentation.get('service_template', 'topology_template',
+ 'relationship_templates') \
+ or {}
+ if (value not in node_templates) and (value not in relationship_templates):
+ raise InvalidValueError(
+ 'function "{0}" parameter {1:d} is not a valid modelable entity name: {2}'
+ .format(name, index + 1, safe_repr(value)),
+ locator=presentation._locator, level=Issue.BETWEEN_TYPES)
+ return value
+
+
+def parse_self(presentation):
+ from ..types import (NodeType, RelationshipType)
+ from ..templates import (
+ NodeTemplate as NodeTemplatePresentation,
+ RelationshipTemplate as RelationshipTemplatePresentation
+ )
+
+ if presentation is None:
+ return None, None
+ elif isinstance(presentation, NodeTemplatePresentation) or isinstance(presentation, NodeType):
+ return presentation, 'node_template'
+ elif isinstance(presentation, RelationshipTemplatePresentation) \
+ or isinstance(presentation, RelationshipType):
+ return presentation, 'relationship_template'
+ else:
+ return parse_self(presentation._container)
+
+
+def evaluate(value, final, container_holder):
+ """
+ Calls ``__evaluate__`` and passes on ``final`` state.
+ """
+
+ if hasattr(value, '__evaluate__'):
+ value = value.__evaluate__(container_holder)
+ if not value.final:
+ final = False
+ return value.value, final
+ else:
+ return value, final
+
+
+@implements_specification('4.1', 'tosca-simple-1.0')
+def get_modelable_entities(container_holder, name, locator, modelable_entity_name):
+ """
+ The following keywords MAY be used in some TOSCA function in place of a TOSCA Node or
+ Relationship Template name.
+ """
+
+ if modelable_entity_name == 'SELF':
+ return get_self(container_holder, name, locator)
+ elif modelable_entity_name == 'HOST':
+ return get_hosts(container_holder, name, locator)
+ elif modelable_entity_name == 'SOURCE':
+ return get_source(container_holder, name, locator)
+ elif modelable_entity_name == 'TARGET':
+ return get_target(container_holder, name, locator)
+ elif isinstance(modelable_entity_name, basestring):
+ modelable_entities = []
+
+ service = container_holder.service
+ if service is not None:
+ for node in service.nodes.itervalues():
+ if node.node_template.name == modelable_entity_name:
+ modelable_entities.append(node)
+ else:
+ service_template = container_holder.service_template
+ if service_template is not None:
+ for node_template in service_template.node_templates.itervalues():
+ if node_template.name == modelable_entity_name:
+ modelable_entities.append(node_template)
+
+ if not modelable_entities:
+ raise CannotEvaluateFunctionException()
+
+ return modelable_entities
+
+ raise InvalidValueError('function "{0}" could not find modelable entity "{1}"'
+ .format(name, modelable_entity_name),
+ locator=locator)
+
+
+def get_self(container_holder, name, locator):
+ """
+ A TOSCA orchestrator will interpret this keyword as the Node or Relationship Template instance
+ that contains the function at the time the function is evaluated.
+ """
+
+ container = container_holder.container
+ if (not isinstance(container, Node)) and \
+ (not isinstance(container, NodeTemplate)) and \
+ (not isinstance(container, Relationship)) and \
+ (not isinstance(container, RelationshipTemplate)):
+ raise InvalidValueError('function "{0}" refers to "SELF" but it is not contained in '
+ 'a node or a relationship: {1}'.format(name,
+ full_type_name(container)),
+ locator=locator)
+
+ return [container]
+
+
+def get_hosts(container_holder, name, locator):
+ """
+ A TOSCA orchestrator will interpret this keyword to refer to the all nodes that "host" the node
+ using this reference (i.e., as identified by its HostedOn relationship).
+
+ Specifically, TOSCA orchestrators that encounter this keyword when evaluating the get_attribute
+ or ``get_property`` functions SHALL search each node along the "HostedOn" relationship chain
+ starting at the immediate node that hosts the node where the function was evaluated (and then
+ that node's host node, and so forth) until a match is found or the "HostedOn" relationship chain
+ ends.
+ """
+
+ container = container_holder.container
+ if (not isinstance(container, Node)) and (not isinstance(container, NodeTemplate)):
+ raise InvalidValueError('function "{0}" refers to "HOST" but it is not contained in '
+ 'a node: {1}'.format(name, full_type_name(container)),
+ locator=locator)
+
+ if not isinstance(container, Node):
+ # NodeTemplate does not have "host"; we'll wait until instantiation
+ raise CannotEvaluateFunctionException()
+
+ host = container.host
+ if host is None:
+ # We might have a host later
+ raise CannotEvaluateFunctionException()
+
+ return [host]
+
+
+def get_source(container_holder, name, locator):
+ """
+ A TOSCA orchestrator will interpret this keyword as the Node Template instance that is at the
+ source end of the relationship that contains the referencing function.
+ """
+
+ container = container_holder.container
+ if (not isinstance(container, Relationship)) and \
+ (not isinstance(container, RelationshipTemplate)):
+ raise InvalidValueError('function "{0}" refers to "SOURCE" but it is not contained in '
+ 'a relationship: {1}'.format(name, full_type_name(container)),
+ locator=locator)
+
+ if not isinstance(container, RelationshipTemplate):
+ # RelationshipTemplate does not have "source_node"; we'll wait until instantiation
+ raise CannotEvaluateFunctionException()
+
+ return [container.source_node]
+
+
+def get_target(container_holder, name, locator):
+ """
+ A TOSCA orchestrator will interpret this keyword as the Node Template instance that is at the
+ target end of the relationship that contains the referencing function.
+ """
+
+ container = container_holder.container
+ if (not isinstance(container, Relationship)) and \
+ (not isinstance(container, RelationshipTemplate)):
+ raise InvalidValueError('function "{0}" refers to "TARGET" but it is not contained in '
+ 'a relationship: {1}'.format(name, full_type_name(container)),
+ locator=locator)
+
+ if not isinstance(container, RelationshipTemplate):
+ # RelationshipTemplate does not have "target_node"; we'll wait until instantiation
+ raise CannotEvaluateFunctionException()
+
+ return [container.target_node]
+
+
+def get_modelable_entity_parameter(modelable_entity, parameters, nested_parameter_name_or_index):
+ if not parameters:
+ return Evaluation(None, True)
+
+ found = True
+ final = True
+ value = parameters
+
+ for name_or_index in nested_parameter_name_or_index:
+ if (isinstance(value, dict) and (name_or_index in value)) \
+ or ((isinstance(value, list) and (name_or_index < len(value)))):
+ value = value[name_or_index] # Parameter
+ # We are not using Parameter.value, but rather Parameter._value, because we want to make
+ # sure to get "final" (it is swallowed by Parameter.value)
+ value, final = evaluate(value._value, final, value)
+ else:
+ found = False
+ break
+
+ return Evaluation(value, final) if found else None
+
+
+def invalid_modelable_entity_name(name, index, value, locator, contexts):
+ return InvalidValueError('function "{0}" parameter {1:d} can be "{2}" only in {3}'
+ .format(name, index + 1, value, contexts),
+ locator=locator, level=Issue.FIELD)
+
+
+def invalid_value(name, index, the_type, explanation, value, locator):
+ return InvalidValueError(
+ 'function "{0}" {1} is not {2}{3}: {4}'
+ .format(name,
+ 'parameter {0:d}'.format(index + 1) if index is not None else 'argument',
+ the_type,
+ ', {0}'.format(explanation) if explanation is not None else '',
+ safe_repr(value)),
+ locator=locator, level=Issue.FIELD)
diff --git a/azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/simple_v1_0/modeling/interfaces.py b/azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/simple_v1_0/modeling/interfaces.py
new file mode 100644
index 0000000..23a03b7
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/simple_v1_0/modeling/interfaces.py
@@ -0,0 +1,530 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from aria.utils.collections import (merge, deepcopy_with_locators, OrderedDict)
+from aria.parser.presentation import get_locator
+from aria.parser.validation import Issue
+
+from .parameters import (coerce_parameter_value, convert_parameter_definitions_to_values)
+
+
+#
+# InterfaceType
+#
+
+def get_inherited_operations(context, presentation):
+ """
+ Returns our operation definitions added on top of those of our parent, if we have one
+ (recursively).
+
+ Allows overriding all aspects of parent operations except input data types.
+ """
+
+ # Get operations from parent
+ parent = presentation._get_parent(context)
+ operations = get_inherited_operations(context, parent) if parent is not None else OrderedDict()
+
+ # Add/merge our operations
+ our_operations = presentation.operations # OperationDefinition
+ merge_operation_definitions(context, operations, our_operations, presentation._name,
+ presentation, 'type')
+
+ for operation in operations.itervalues():
+ operation._reset_method_cache()
+
+ return operations
+
+
+#
+# InterfaceDefinition
+#
+
+def get_and_override_input_definitions_from_type(context, presentation):
+ """
+ Returns our input definitions added on top of those of the interface type, if specified.
+
+ Allows overriding all aspects of parent interface type inputs except data types.
+ """
+
+ inputs = OrderedDict()
+
+ # Get inputs from type
+ the_type = presentation._get_type(context) # InterfaceType
+ type_inputs = the_type._get_inputs(context) if the_type is not None else None
+ if type_inputs:
+ for input_name, type_input in type_inputs.iteritems():
+ inputs[input_name] = type_input._clone(presentation)
+
+ # Add/merge our inputs
+ our_inputs = presentation.inputs # PropertyDefinition
+ if our_inputs:
+ merge_input_definitions(context, inputs, our_inputs, presentation._name, None, presentation,
+ 'definition')
+
+ return inputs
+
+
+def get_and_override_operation_definitions_from_type(context, presentation):
+ """
+ Returns our operation definitions added on top of those of the interface type, if specified.
+
+ Allows overriding all aspects of parent interface type inputs except data types.
+ """
+
+ operations = OrderedDict()
+
+ # Get operations from type
+ the_type = presentation._get_type(context) # InterfaceType
+ type_operations = the_type._get_operations(context) if the_type is not None else None
+ if type_operations:
+ for operations_name, type_operation in type_operations.iteritems():
+ operations[operations_name] = type_operation._clone(presentation)
+
+ # Add/merge our operations
+ our_operations = presentation.operations # OperationDefinition
+ merge_operation_definitions(context, operations, our_operations, presentation._name,
+ presentation, 'definition')
+
+ return operations
+
+
+#
+# NodeType, RelationshipType, GroupType
+#
+
+def get_inherited_interface_definitions(context, presentation, type_name, for_presentation=None):
+ """
+ Returns our interface definitions added on top of those of our parent, if we have one
+ (recursively).
+
+ Allows overriding all aspects of parent interfaces except interface and operation input data
+ types.
+ """
+
+ if for_presentation is None:
+ for_presentation = presentation
+
+ # Get interfaces from parent
+ parent = presentation._get_parent(context)
+ interfaces = get_inherited_interface_definitions(context, parent, type_name, for_presentation) \
+ if parent is not None else OrderedDict()
+
+ # Add/merge interfaces from their types
+ merge_interface_definitions_from_their_types(context, interfaces, presentation)
+
+ # Add/merge our interfaces
+ our_interfaces = presentation.interfaces
+ merge_interface_definitions(context, interfaces, our_interfaces, presentation, for_presentation)
+
+ return interfaces
+
+
+#
+# NodeTemplate, RelationshipTemplate, GroupTemplate
+#
+
+def get_template_interfaces(context, presentation, type_name):
+ """
+ Returns the assigned interface_template values while making sure they are defined in the type.
+ This includes the interfaces themselves, their operations, and inputs for interfaces and
+ operations.
+
+ Interface and operation inputs' default values, if available, will be used if we did not assign
+ them.
+
+ Makes sure that required inputs indeed end up with a value.
+
+ This code is especially complex due to the many levels of nesting involved.
+ """
+
+ template_interfaces = OrderedDict()
+
+ the_type = presentation._get_type(context) # NodeType, RelationshipType, GroupType
+ # InterfaceDefinition (or InterfaceAssignment in the case of RelationshipTemplate):
+ interface_definitions = the_type._get_interfaces(context) if the_type is not None else None
+
+ # Copy over interfaces from the type (will initialize inputs with default values)
+ if interface_definitions is not None:
+ for interface_name, interface_definition in interface_definitions.iteritems():
+ # Note that in the case of a RelationshipTemplate, we will already have the values as
+ # InterfaceAssignment. It will not be converted, just cloned.
+ template_interfaces[interface_name] = \
+ convert_interface_definition_from_type_to_template(context, interface_definition,
+ presentation)
+
+ # Fill in our interfaces
+ our_interface_assignments = presentation.interfaces
+ if our_interface_assignments:
+ # InterfaceAssignment:
+ for interface_name, our_interface_assignment in our_interface_assignments.iteritems():
+ if interface_name in template_interfaces:
+ interface_assignment = template_interfaces[interface_name] # InterfaceAssignment
+ # InterfaceDefinition (or InterfaceAssignment in the case of RelationshipTemplate):
+ interface_definition = interface_definitions[interface_name]
+ merge_interface(context, presentation, interface_assignment,
+ our_interface_assignment, interface_definition, interface_name)
+ else:
+ context.validation.report(
+ 'interface definition "%s" not declared at %s "%s" in "%s"'
+ % (interface_name, type_name, presentation.type, presentation._fullname),
+ locator=our_interface_assignment._locator, level=Issue.BETWEEN_TYPES)
+
+ # Check that there are no required inputs that we haven't assigned
+ for interface_name, interface_template in template_interfaces.iteritems():
+ if interface_name in interface_definitions:
+ # InterfaceDefinition (or InterfaceAssignment in the case of RelationshipTemplate):
+ interface_definition = interface_definitions[interface_name]
+ our_interface_assignment = our_interface_assignments.get(interface_name) \
+ if our_interface_assignments is not None else None
+ validate_required_inputs(context, presentation, interface_template,
+ interface_definition, our_interface_assignment, interface_name)
+
+ return template_interfaces
+
+
+#
+# Utils
+#
+
+def convert_interface_definition_from_type_to_template(context, presentation, container):
+ from ..assignments import InterfaceAssignment
+
+ if isinstance(presentation, InterfaceAssignment):
+ # Nothing to convert, so just clone
+ return presentation._clone(container)
+
+ raw = convert_interface_definition_from_type_to_raw_template(context, presentation)
+ return InterfaceAssignment(name=presentation._name, raw=raw, container=container)
+
+
+def convert_interface_definition_from_type_to_raw_template(context, presentation): # pylint: disable=invalid-name
+ raw = OrderedDict()
+
+ # Copy default values for inputs
+ interface_inputs = presentation._get_inputs(context)
+ if interface_inputs is not None:
+ raw['inputs'] = convert_parameter_definitions_to_values(context, interface_inputs)
+
+ # Copy operations
+ operations = presentation._get_operations(context)
+ if operations:
+ for operation_name, operation in operations.iteritems():
+ raw[operation_name] = OrderedDict()
+ description = operation.description
+ if description is not None:
+ raw[operation_name]['description'] = deepcopy_with_locators(description._raw)
+ implementation = operation.implementation
+ if implementation is not None:
+ raw[operation_name]['implementation'] = deepcopy_with_locators(implementation._raw)
+ inputs = operation.inputs
+ if inputs is not None:
+ raw[operation_name]['inputs'] = convert_parameter_definitions_to_values(context,
+ inputs)
+
+ return raw
+
+
+def convert_requirement_interface_definitions_from_type_to_raw_template(context, raw_requirement, # pylint: disable=invalid-name
+ interface_definitions):
+ if not interface_definitions:
+ return
+ if 'interfaces' not in raw_requirement:
+ raw_requirement['interfaces'] = OrderedDict()
+ for interface_name, interface_definition in interface_definitions.iteritems():
+ raw_interface = convert_interface_definition_from_type_to_raw_template(context,
+ interface_definition)
+ if interface_name in raw_requirement['interfaces']:
+ merge(raw_requirement['interfaces'][interface_name], raw_interface)
+ else:
+ raw_requirement['interfaces'][interface_name] = raw_interface
+
+
+def merge_interface(context, presentation, interface_assignment, our_interface_assignment,
+ interface_definition, interface_name):
+ # Assign/merge interface inputs
+ assign_raw_inputs(context, interface_assignment._raw, our_interface_assignment.inputs,
+ interface_definition._get_inputs(context), interface_name, None, presentation)
+
+ # Assign operation implementations and inputs
+ our_operation_templates = our_interface_assignment.operations # OperationAssignment
+ # OperationDefinition or OperationAssignment:
+ operation_definitions = interface_definition._get_operations(context) \
+ if hasattr(interface_definition, '_get_operations') else interface_definition.operations
+ if our_operation_templates:
+ # OperationAssignment:
+ for operation_name, our_operation_template in our_operation_templates.iteritems():
+ operation_definition = operation_definitions.get(operation_name) # OperationDefinition
+
+ our_input_assignments = our_operation_template.inputs
+ our_implementation = our_operation_template.implementation
+
+ if operation_definition is None:
+ context.validation.report(
+ 'interface definition "%s" refers to an unknown operation "%s" in "%s"'
+ % (interface_name, operation_name, presentation._fullname),
+ locator=our_operation_template._locator, level=Issue.BETWEEN_TYPES)
+
+ if (our_input_assignments is not None) or (our_implementation is not None):
+ # Make sure we have the dict
+ if (operation_name not in interface_assignment._raw) \
+ or (interface_assignment._raw[operation_name] is None):
+ interface_assignment._raw[operation_name] = OrderedDict()
+
+ if our_implementation is not None:
+ interface_assignment._raw[operation_name]['implementation'] = \
+ deepcopy_with_locators(our_implementation._raw)
+
+ # Assign/merge operation inputs
+ input_definitions = operation_definition.inputs \
+ if operation_definition is not None else None
+ assign_raw_inputs(context, interface_assignment._raw[operation_name],
+ our_input_assignments, input_definitions, interface_name,
+ operation_name, presentation)
+
+
+def merge_raw_input_definition(context, the_raw_input, our_input, interface_name, operation_name,
+ presentation, type_name):
+ # Check if we changed the type
+ # TODO: allow a sub-type?
+ input_type1 = the_raw_input.get('type')
+ input_type2 = our_input.type
+ if input_type1 != input_type2:
+ if operation_name is not None:
+ context.validation.report(
+ 'interface %s "%s" changes operation input "%s.%s" type from "%s" to "%s" in "%s"'
+ % (type_name, interface_name, operation_name, our_input._name, input_type1,
+ input_type2, presentation._fullname),
+ locator=input_type2._locator, level=Issue.BETWEEN_TYPES)
+ else:
+ context.validation.report(
+ 'interface %s "%s" changes input "%s" type from "%s" to "%s" in "%s"'
+ % (type_name, interface_name, our_input._name, input_type1, input_type2,
+ presentation._fullname),
+ locator=input_type2._locator, level=Issue.BETWEEN_TYPES)
+
+ # Merge
+ merge(the_raw_input, our_input._raw)
+
+
+def merge_input_definitions(context, inputs, our_inputs, interface_name, operation_name,
+ presentation, type_name):
+ for input_name, our_input in our_inputs.iteritems():
+ if input_name in inputs:
+ merge_raw_input_definition(context, inputs[input_name]._raw, our_input, interface_name,
+ operation_name, presentation, type_name)
+ else:
+ inputs[input_name] = our_input._clone(presentation)
+
+
+def merge_raw_input_definitions(context, raw_inputs, our_inputs, interface_name, operation_name,
+ presentation, type_name):
+ for input_name, our_input in our_inputs.iteritems():
+ if input_name in raw_inputs:
+ merge_raw_input_definition(context, raw_inputs[input_name], our_input, interface_name,
+ operation_name, presentation, type_name)
+ else:
+ raw_inputs[input_name] = deepcopy_with_locators(our_input._raw)
+
+
+def merge_raw_operation_definition(context, raw_operation, our_operation, interface_name,
+ presentation, type_name):
+ if not isinstance(our_operation._raw, dict):
+ # Convert short form to long form
+ raw_operation['implementation'] = deepcopy_with_locators(our_operation._raw)
+ return
+
+ # Add/merge inputs
+ our_operation_inputs = our_operation.inputs
+ if our_operation_inputs:
+ # Make sure we have the dict
+ if ('inputs' not in raw_operation) or (raw_operation.get('inputs') is None):
+ raw_operation['inputs'] = OrderedDict()
+
+ merge_raw_input_definitions(context, raw_operation['inputs'], our_operation_inputs,
+ interface_name, our_operation._name, presentation, type_name)
+
+ # Override the description
+ if our_operation._raw.get('description') is not None:
+ raw_operation['description'] = deepcopy_with_locators(our_operation._raw['description'])
+
+ # Add/merge implementation
+ if our_operation._raw.get('implementation') is not None:
+ if raw_operation.get('implementation') is not None:
+ merge(raw_operation['implementation'],
+ deepcopy_with_locators(our_operation._raw['implementation']))
+ else:
+ raw_operation['implementation'] = \
+ deepcopy_with_locators(our_operation._raw['implementation'])
+
+
+def merge_operation_definitions(context, operations, our_operations, interface_name, presentation,
+ type_name):
+ if not our_operations:
+ return
+ for operation_name, our_operation in our_operations.iteritems():
+ if operation_name in operations:
+ merge_raw_operation_definition(context, operations[operation_name]._raw, our_operation,
+ interface_name, presentation, type_name)
+ else:
+ operations[operation_name] = our_operation._clone(presentation)
+
+
+def merge_raw_operation_definitions(context, raw_operations, our_operations, interface_name,
+ presentation, type_name):
+ for operation_name, our_operation in our_operations.iteritems():
+ if operation_name in raw_operations:
+ raw_operation = raw_operations[operation_name]
+ if isinstance(raw_operation, basestring):
+ # Convert short form to long form
+ raw_operations[operation_name] = OrderedDict((('implementation', raw_operation),))
+ raw_operation = raw_operations[operation_name]
+ merge_raw_operation_definition(context, raw_operation, our_operation, interface_name,
+ presentation, type_name)
+ else:
+ raw_operations[operation_name] = deepcopy_with_locators(our_operation._raw)
+
+
+# From either an InterfaceType or an InterfaceDefinition:
+def merge_interface_definition(context, interface, our_source, presentation, type_name):
+ if hasattr(our_source, 'type'):
+ # Check if we changed the interface type
+ type1 = interface._get_type(context)
+ type2 = our_source._get_type(context)
+
+ if (type2 is not None) and not type1._is_descendant(context, type2):
+ context.validation.report(
+ 'interface definition type "{0}" is not a descendant of overridden '
+ 'interface definition type "{1}"' \
+ .format(type1._name, type2._name),
+ locator=our_source._locator, level=Issue.BETWEEN_TYPES)
+
+ # Add/merge inputs
+ our_interface_inputs = our_source._get_inputs(context) \
+ if hasattr(our_source, '_get_inputs') else our_source.inputs
+ if our_interface_inputs:
+ # Make sure we have the dict
+ if ('inputs' not in interface._raw) or (interface._raw.get('inputs') is None):
+ interface._raw['inputs'] = OrderedDict()
+
+ merge_raw_input_definitions(context, interface._raw['inputs'], our_interface_inputs,
+ our_source._name, None, presentation, type_name)
+
+ # Add/merge operations
+ our_operations = our_source._get_operations(context) \
+ if hasattr(our_source, '_get_operations') else our_source.operations
+ if our_operations is not None:
+ merge_raw_operation_definitions(context, interface._raw, our_operations, our_source._name,
+ presentation, type_name)
+
+
+def merge_interface_definitions(context, interfaces, our_interfaces, presentation,
+ for_presentation=None):
+ if not our_interfaces:
+ return
+ for name, our_interface in our_interfaces.iteritems():
+ if name in interfaces:
+ merge_interface_definition(context, interfaces[name], our_interface, presentation,
+ 'definition')
+ else:
+ interfaces[name] = our_interface._clone(for_presentation)
+
+
+def merge_interface_definitions_from_their_types(context, interfaces, presentation):
+ for interface in interfaces.itervalues():
+ the_type = interface._get_type(context) # InterfaceType
+ if the_type is not None:
+ merge_interface_definition(context, interface, the_type, presentation, 'type')
+
+
+def assign_raw_inputs(context, values, assignments, definitions, interface_name, operation_name,
+ presentation):
+ if not assignments:
+ return
+
+ # Make sure we have the dict
+ if ('inputs' not in values) or (values['inputs'] is None):
+ values['inputs'] = OrderedDict()
+
+ # Assign inputs
+ for input_name, assignment in assignments.iteritems():
+ if (definitions is not None) and (input_name not in definitions):
+ if operation_name is not None:
+ context.validation.report(
+ 'interface definition "%s" assigns a value to an unknown operation input'
+ ' "%s.%s" in "%s"'
+ % (interface_name, operation_name, input_name, presentation._fullname),
+ locator=assignment._locator, level=Issue.BETWEEN_TYPES)
+ else:
+ context.validation.report(
+ 'interface definition "%s" assigns a value to an unknown input "%s" in "%s"'
+ % (interface_name, input_name, presentation._fullname),
+ locator=assignment._locator, level=Issue.BETWEEN_TYPES)
+
+ definition = definitions.get(input_name) if definitions is not None else None
+
+ # Note: default value has already been assigned
+
+ # Coerce value
+ values['inputs'][input_name] = coerce_parameter_value(context, assignment, definition,
+ assignment.value)
+
+
+def validate_required_inputs(context, presentation, assignment, definition, original_assignment,
+ interface_name, operation_name=None):
+ # The validation of the `required` field of inputs that belong to operations and interfaces
+ # (as opposed to topology template and workflow inputs) is done only in the parsing stage.
+ # This reasoning follows the TOSCA spirit, where anything that is declared as required in the
+ # type, must be assigned in the corresponding template.
+ input_definitions = definition.inputs
+ if input_definitions:
+ for input_name, input_definition in input_definitions.iteritems():
+ if input_definition.required:
+ prop = assignment.inputs.get(input_name) \
+ if ((assignment is not None) and (assignment.inputs is not None)) else None
+ value = prop.value if prop is not None else None
+ value = value.value if value is not None else None
+ if value is None:
+ if operation_name is not None:
+ context.validation.report(
+ 'interface definition "%s" does not assign a value to a required'
+ ' operation input "%s.%s" in "%s"'
+ % (interface_name, operation_name, input_name, presentation._fullname),
+ locator=get_locator(original_assignment, presentation._locator),
+ level=Issue.BETWEEN_TYPES)
+ else:
+ context.validation.report(
+ 'interface definition "%s" does not assign a value to a required input'
+ ' "%s" in "%s"'
+ % (interface_name, input_name, presentation._fullname),
+ locator=get_locator(original_assignment, presentation._locator),
+ level=Issue.BETWEEN_TYPES)
+
+ if operation_name is not None:
+ return
+
+ assignment_operations = assignment.operations
+ operation_definitions = definition._get_operations(context)
+ if operation_definitions:
+ for operation_name, operation_definition in operation_definitions.iteritems():
+ assignment_operation = assignment_operations.get(operation_name) \
+ if assignment_operations is not None else None
+ original_operation = \
+ original_assignment.operations.get(operation_name, original_assignment) \
+ if (original_assignment is not None) \
+ and (original_assignment.operations is not None) \
+ else original_assignment
+ validate_required_inputs(context, presentation, assignment_operation,
+ operation_definition, original_operation, interface_name,
+ operation_name)
diff --git a/azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/simple_v1_0/modeling/parameters.py b/azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/simple_v1_0/modeling/parameters.py
new file mode 100644
index 0000000..9bafeec
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/simple_v1_0/modeling/parameters.py
@@ -0,0 +1,230 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from aria.utils.collections import (merge, deepcopy_with_locators, OrderedDict)
+from aria.utils.formatting import pluralize
+from aria.parser.presentation import Value
+from aria.parser.validation import Issue
+
+from .data_types import (coerce_value, get_primitive_data_type)
+from ..presentation.types import get_type_by_name
+
+
+#
+# ArtifactType, DataType, CapabilityType, RelationshipType, NodeType, GroupType, PolicyType
+#
+
+def get_inherited_parameter_definitions(context, presentation, field_name, for_presentation=None):
+ """
+ Returns our parameter definitions added on top of those of our parent, if we have one
+ (recursively).
+
+ Allows overriding all aspects of parent properties except data type.
+ """
+
+ if for_presentation is None:
+ for_presentation = presentation
+
+ # Get definitions from parent
+ # If we inherit from a primitive, it does not have a parent:
+ parent = presentation._get_parent(context) if hasattr(presentation, '_get_parent') else None
+ definitions = get_inherited_parameter_definitions(context, parent, field_name,
+ for_presentation) \
+ if parent is not None else OrderedDict()
+
+ # Add/merge our definitions
+ # If we inherit from a primitive, it does not have our field
+ our_definitions = getattr(presentation, field_name, None)
+ if our_definitions:
+ our_definitions_clone = OrderedDict()
+ for name, our_definition in our_definitions.iteritems():
+ our_definitions_clone[name] = our_definition._clone(for_presentation)
+ our_definitions = our_definitions_clone
+ merge_parameter_definitions(context, presentation, definitions, our_definitions, field_name)
+
+ for definition in definitions.itervalues():
+ definition._reset_method_cache()
+
+ return definitions
+
+
+#
+# NodeTemplate, RelationshipTemplate, GroupTemplate, PolicyTemplate
+#
+
+def get_assigned_and_defined_parameter_values(context, presentation, field_name):
+ """
+ Returns the assigned parameter values while making sure they are defined in our type.
+
+ The parameter definition's default value, if available, will be used if we did not assign it.
+
+ Makes sure that required parameters indeed end up with a value.
+ """
+
+ values = OrderedDict()
+
+ the_type = presentation._get_type(context)
+ field_name_plural = pluralize(field_name)
+ assignments = getattr(presentation, field_name_plural)
+ get_fn_name = '_get_{0}'.format(field_name_plural)
+ definitions = getattr(the_type, get_fn_name)(context) if the_type is not None else None
+
+ # Fill in our assignments, but make sure they are defined
+ if assignments:
+ for name, value in assignments.iteritems():
+ if (definitions is not None) and (name in definitions):
+ definition = definitions[name]
+ values[name] = coerce_parameter_value(context, value, definition, value.value)
+ else:
+ context.validation.report('assignment to undefined {0} "{1}" in "{2}"'
+ .format(field_name, name, presentation._fullname),
+ locator=value._locator, level=Issue.BETWEEN_TYPES)
+
+ # Fill in defaults from the definitions
+ if definitions:
+ for name, definition in definitions.iteritems():
+ # Note: attributes will always have a default value, even if it's None
+ if (name not in values) and \
+ (('default' in definition._raw) or (field_name == 'attribute')):
+ values[name] = coerce_parameter_value(context, presentation, definition,
+ definition.default)
+
+ validate_required_values(context, presentation, values, definitions)
+
+ # Fill in nulls for missing values that are *not* required
+ if definitions:
+ for name, definition in definitions.iteritems():
+ if (name not in values) and not getattr(definition, 'required', False):
+ values[name] = coerce_parameter_value(context, presentation, definition, None)
+
+ return values
+
+
+#
+# TopologyTemplate
+#
+
+def get_parameter_values(context, presentation, field_name):
+ values = OrderedDict()
+
+ parameters = getattr(presentation, field_name)
+
+ # Fill in defaults and values
+ if parameters:
+ for name, parameter in parameters.iteritems():
+ if values.get(name) is None:
+ if hasattr(parameter, 'value') and (parameter.value is not None):
+ # For parameters only:
+ values[name] = coerce_parameter_value(context, presentation, parameter,
+ parameter.value)
+ else:
+ default = parameter.default if hasattr(parameter, 'default') else None
+ values[name] = coerce_parameter_value(context, presentation, parameter, default)
+
+ return values
+
+
+#
+# Utils
+#
+
+def validate_required_values(context, presentation, values, definitions):
+ """
+ Check if required properties have not been assigned.
+ """
+
+ if not definitions:
+ return
+ for name, definition in definitions.iteritems():
+ if getattr(definition, 'required', False) and \
+ ((values is None) or (values.get(name) is None)):
+ context.validation.report('required property "%s" is not assigned a value in "%s"'
+ % (name, presentation._fullname),
+ locator=presentation._get_child_locator('properties'),
+ level=Issue.BETWEEN_TYPES)
+
+
+def merge_raw_parameter_definition(context, presentation, raw_property_definition,
+ our_property_definition, field_name, property_name):
+ # Check if we changed the parameter type
+ type1_name = raw_property_definition.get('type')
+ type1 = get_type_by_name(context, type1_name, 'data_types')
+ if type1 is None:
+ type1 = get_primitive_data_type(type1_name)
+ our_property_definition._reset_method_cache()
+ type2 = our_property_definition._get_type(context)
+
+ if type1 != type2:
+ if not hasattr(type1, '_is_descendant') or not type1._is_descendant(context, type2):
+ context.validation.report(
+ 'property definition type "{0}" is not a descendant of overridden '
+ 'property definition type "{1}"' \
+ .format(type1_name, type2._name),
+ locator=presentation._get_child_locator(field_name, property_name),
+ level=Issue.BETWEEN_TYPES)
+
+ merge(raw_property_definition, our_property_definition._raw)
+
+
+def merge_raw_parameter_definitions(context, presentation, raw_property_definitions,
+ our_property_definitions, field_name):
+ if not our_property_definitions:
+ return
+ for property_name, our_property_definition in our_property_definitions.iteritems():
+ if property_name in raw_property_definitions:
+ raw_property_definition = raw_property_definitions[property_name]
+ merge_raw_parameter_definition(context, presentation, raw_property_definition,
+ our_property_definition, field_name, property_name)
+ else:
+ raw_property_definitions[property_name] = \
+ deepcopy_with_locators(our_property_definition._raw)
+
+
+def merge_parameter_definitions(context, presentation, property_definitions,
+ our_property_definitions, field_name):
+ if not our_property_definitions:
+ return
+ for property_name, our_property_definition in our_property_definitions.iteritems():
+ if property_name in property_definitions:
+ property_definition = property_definitions[property_name]
+ merge_raw_parameter_definition(context, presentation, property_definition._raw,
+ our_property_definition, field_name, property_name)
+ else:
+ property_definitions[property_name] = our_property_definition
+
+
+# Works on properties, inputs, and parameters
+def coerce_parameter_value(context, presentation, definition, value, aspect=None):
+ the_type = definition._get_type(context) if definition is not None else None
+ entry_schema = definition.entry_schema if definition is not None else None
+ constraints = definition._get_constraints(context) \
+ if ((definition is not None) and hasattr(definition, '_get_constraints')) else None
+ value = coerce_value(context, presentation, the_type, entry_schema, constraints, value, aspect)
+ if (the_type is not None) and hasattr(the_type, '_name'):
+ type_name = the_type._name
+ else:
+ type_name = getattr(definition, 'type', None)
+ description = getattr(definition, 'description', None)
+ description = description.value if description is not None else None
+ required = getattr(definition, 'required', None)
+ return Value(type_name, value, description, required)
+
+
+def convert_parameter_definitions_to_values(context, definitions):
+ values = OrderedDict()
+ for name, definition in definitions.iteritems():
+ default = definition.default
+ values[name] = coerce_parameter_value(context, definition, definition, default)
+ return values
diff --git a/azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/simple_v1_0/modeling/policies.py b/azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/simple_v1_0/modeling/policies.py
new file mode 100644
index 0000000..0376798
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/simple_v1_0/modeling/policies.py
@@ -0,0 +1,79 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from ..presentation.types import convert_name_to_full_type_name
+
+
+#
+# PolicyType
+#
+
+def get_inherited_targets(context, presentation):
+ """
+ Returns our target node types and group types if we have them or those of our parent, if we have
+ one (recursively).
+ """
+
+ parent = presentation._get_parent(context)
+
+ node_types, group_types = get_inherited_targets(context, parent) \
+ if parent is not None else ([], [])
+
+ our_targets = presentation.targets
+ if our_targets:
+ all_node_types = context.presentation.get('service_template', 'node_types') or {}
+ all_group_types = context.presentation.get('service_template', 'group_types') or {}
+ node_types = []
+ group_types = []
+
+ for our_target in our_targets:
+ if our_target in all_node_types:
+ our_target = convert_name_to_full_type_name(context, our_target, all_node_types)
+ node_types.append(all_node_types[our_target])
+ elif our_target in all_group_types:
+ our_target = convert_name_to_full_type_name(context, our_target, all_group_types)
+ group_types.append(all_group_types[our_target])
+
+ return node_types, group_types
+
+
+#
+# PolicyTemplate
+#
+
+def get_policy_targets(context, presentation):
+ """
+ Returns our target node templates and groups if we have them.
+ """
+
+ node_templates = []
+ groups = []
+
+ our_targets = presentation.targets
+ if our_targets:
+ all_node_templates = \
+ context.presentation.get('service_template', 'topology_template', 'node_templates') \
+ or {}
+ all_groups = \
+ context.presentation.get('service_template', 'topology_template', 'groups') \
+ or {}
+
+ for our_target in our_targets:
+ if our_target in all_node_templates:
+ node_templates.append(all_node_templates[our_target])
+ elif our_target in all_groups:
+ groups.append(all_groups[our_target])
+
+ return node_templates, groups
diff --git a/azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/simple_v1_0/modeling/requirements.py b/azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/simple_v1_0/modeling/requirements.py
new file mode 100644
index 0000000..6bdb5b1
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/simple_v1_0/modeling/requirements.py
@@ -0,0 +1,364 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from aria.parser.validation import Issue
+from aria.utils.collections import (deepcopy_with_locators, OrderedDict)
+
+from .parameters import (convert_parameter_definitions_to_values, validate_required_values,
+ coerce_parameter_value)
+from .interfaces import (convert_requirement_interface_definitions_from_type_to_raw_template,
+ merge_interface_definitions, merge_interface, validate_required_inputs)
+
+
+#
+# NodeType
+#
+
+def get_inherited_requirement_definitions(context, presentation):
+ """
+ Returns our requirement definitions added on top of those of our parent, if we have one
+ (recursively).
+
+ Allows overriding requirement definitions if they have the same name.
+ """
+
+ parent = presentation._get_parent(context)
+ requirement_definitions = get_inherited_requirement_definitions(context, parent) \
+ if parent is not None else []
+
+ our_requirement_definitions = presentation.requirements
+ if our_requirement_definitions:
+ for requirement_name, our_requirement_definition in our_requirement_definitions:
+ # Remove existing requirement definitions of this name if they exist
+ for name, requirement_definition in requirement_definitions:
+ if name == requirement_name:
+ requirement_definitions.remove((name, requirement_definition))
+
+ requirement_definitions.append((requirement_name, our_requirement_definition))
+
+ return requirement_definitions
+
+
+#
+# NodeTemplate
+#
+
+def get_template_requirements(context, presentation):
+ """
+ Returns our requirements added on top of those of the node type if they exist there.
+
+ If the requirement has a relationship, the relationship properties and interfaces are assigned.
+
+ Returns the assigned property, interface input, and interface operation input values while
+ making sure they are defined in our type. Default values, if available, will be used if we did
+ not assign them. Also makes sure that required properties and inputs indeed end up with a value.
+ """
+
+ requirement_assignments = []
+
+ the_type = presentation._get_type(context) # NodeType
+ requirement_definitions = the_type._get_requirements(context) if the_type is not None else None
+
+ # Add our requirement assignments
+ our_requirement_assignments = presentation.requirements
+ if our_requirement_assignments:
+ add_requirement_assignments(context, presentation, requirement_assignments,
+ requirement_definitions, our_requirement_assignments)
+
+ # Validate occurrences
+ if requirement_definitions:
+ for requirement_name, requirement_definition in requirement_definitions:
+ # Allowed occurrences
+ allowed_occurrences = requirement_definition.occurrences
+ allowed_occurrences = allowed_occurrences if allowed_occurrences is not None else None
+
+ # Count actual occurrences
+ actual_occurrences = 0
+ for name, _ in requirement_assignments:
+ if name == requirement_name:
+ actual_occurrences += 1
+
+ if allowed_occurrences is None:
+ # If not specified, we interpret this to mean that exactly 1 occurrence is required
+ if actual_occurrences == 0:
+ # If it's not there, we will automatically add it (this behavior is not in the
+ # TOSCA spec, but seems implied)
+ requirement_assignment, \
+ relationship_property_definitions, \
+ relationship_interface_definitions = \
+ convert_requirement_from_definition_to_assignment(context,
+ requirement_definition,
+ None, presentation)
+ validate_requirement_assignment(context, presentation, requirement_assignment,
+ relationship_property_definitions,
+ relationship_interface_definitions)
+ requirement_assignments.append((requirement_name, requirement_assignment))
+ elif actual_occurrences > 1:
+ context.validation.report(
+ 'requirement "%s" is allowed only one occurrence in "%s": %d'
+ % (requirement_name, presentation._fullname, actual_occurrences),
+ locator=presentation._locator, level=Issue.BETWEEN_TYPES)
+ else:
+ if not allowed_occurrences.is_in(actual_occurrences):
+ if allowed_occurrences.value[1] == 'UNBOUNDED':
+ context.validation.report(
+ 'requirement "%s" does not have at least %d occurrences in "%s": has %d'
+ % (requirement_name, allowed_occurrences.value[0],
+ presentation._fullname, actual_occurrences),
+ locator=presentation._locator, level=Issue.BETWEEN_TYPES)
+ else:
+ context.validation.report(
+ 'requirement "%s" is allowed between %d and %d occurrences in "%s":'
+ ' has %d'
+ % (requirement_name, allowed_occurrences.value[0],
+ allowed_occurrences.value[1], presentation._fullname,
+ actual_occurrences),
+ locator=presentation._locator, level=Issue.BETWEEN_TYPES)
+
+ return requirement_assignments
+
+
+#
+# Utils
+#
+
+def convert_requirement_from_definition_to_assignment(context, requirement_definition, # pylint: disable=too-many-branches
+ our_requirement_assignment, container):
+ from ..assignments import RequirementAssignment
+
+ raw = OrderedDict()
+
+ # Capability type name:
+ raw['capability'] = deepcopy_with_locators(requirement_definition.capability)
+
+ node_type = requirement_definition._get_node_type(context)
+ if node_type is not None:
+ raw['node'] = deepcopy_with_locators(node_type._name)
+
+ relationship_type = None
+ relationship_template = None
+ relationship_property_definitions = None
+ relationship_interface_definitions = None
+
+ # First try to find the relationship if we declared it
+ # RelationshipAssignment:
+ our_relationship = our_requirement_assignment.relationship \
+ if our_requirement_assignment is not None else None
+ if our_relationship is not None:
+ relationship_type, relationship_type_variant = our_relationship._get_type(context)
+ if relationship_type_variant == 'relationship_template':
+ relationship_template = relationship_type
+ relationship_type = relationship_template._get_type(context)
+
+ definition_relationship_type = None
+ relationship_definition = requirement_definition.relationship # RelationshipDefinition
+ if relationship_definition is not None:
+ definition_relationship_type = relationship_definition._get_type(context)
+
+ # If not exists, try at the node type
+ if relationship_type is None:
+ relationship_type = definition_relationship_type
+ else:
+ # Make sure the type is derived
+ if not definition_relationship_type._is_descendant(context, relationship_type):
+ context.validation.report(
+ 'assigned relationship type "%s" is not a descendant of declared relationship type'
+ ' "%s"' \
+ % (relationship_type._name, definition_relationship_type._name),
+ locator=container._locator, level=Issue.BETWEEN_TYPES)
+
+ if relationship_type is not None:
+ raw['relationship'] = OrderedDict()
+
+ type_name = our_relationship.type if our_relationship is not None else None
+ if type_name is None:
+ type_name = relationship_type._name
+
+ raw['relationship']['type'] = deepcopy_with_locators(type_name)
+
+ # These are our property definitions
+ relationship_property_definitions = relationship_type._get_properties(context)
+
+ if relationship_template is not None:
+ # Property values from template
+ raw['relationship']['properties'] = relationship_template._get_property_values(context)
+ else:
+ if relationship_property_definitions:
+ # Convert property definitions to values
+ raw['relationship']['properties'] = \
+ convert_parameter_definitions_to_values(context,
+ relationship_property_definitions)
+
+ # These are our interface definitions
+ # InterfaceDefinition:
+ relationship_interface_definitions = OrderedDict(relationship_type._get_interfaces(context))
+
+ # Convert interface definitions to templates
+ convert_requirement_interface_definitions_from_type_to_raw_template(
+ context,
+ raw['relationship'],
+ relationship_interface_definitions)
+
+ if relationship_definition:
+ # Merge extra interface definitions
+ # InterfaceDefinition:
+ definition_interface_definitions = relationship_definition.interfaces
+ merge_interface_definitions(context, relationship_interface_definitions,
+ definition_interface_definitions, requirement_definition,
+ container)
+
+ if relationship_template is not None:
+ # Interfaces from template
+ interfaces = relationship_template._get_interfaces(context)
+ if interfaces:
+ raw['relationship']['interfaces'] = OrderedDict()
+ for interface_name, interface in interfaces.iteritems():
+ raw['relationship']['interfaces'][interface_name] = interface._raw
+
+ return \
+ RequirementAssignment(name=requirement_definition._name, raw=raw, container=container), \
+ relationship_property_definitions, \
+ relationship_interface_definitions
+
+
+def add_requirement_assignments(context, presentation, requirement_assignments,
+ requirement_definitions, our_requirement_assignments):
+ for requirement_name, our_requirement_assignment in our_requirement_assignments:
+ requirement_definition = get_first_requirement(requirement_definitions, requirement_name)
+ if requirement_definition is not None:
+ requirement_assignment, \
+ relationship_property_definitions, \
+ relationship_interface_definitions = \
+ convert_requirement_from_definition_to_assignment(context, requirement_definition,
+ our_requirement_assignment,
+ presentation)
+ merge_requirement_assignment(context,
+ relationship_property_definitions,
+ relationship_interface_definitions,
+ requirement_assignment, our_requirement_assignment)
+ validate_requirement_assignment(context,
+ our_requirement_assignment.relationship \
+ or our_requirement_assignment,
+ requirement_assignment,
+ relationship_property_definitions,
+ relationship_interface_definitions)
+ requirement_assignments.append((requirement_name, requirement_assignment))
+ else:
+ context.validation.report('requirement "%s" not declared at node type "%s" in "%s"'
+ % (requirement_name, presentation.type,
+ presentation._fullname),
+ locator=our_requirement_assignment._locator,
+ level=Issue.BETWEEN_TYPES)
+
+
+def merge_requirement_assignment(context, relationship_property_definitions,
+ relationship_interface_definitions, requirement, our_requirement):
+ our_capability = our_requirement.capability
+ if our_capability is not None:
+ requirement._raw['capability'] = deepcopy_with_locators(our_capability)
+
+ our_node = our_requirement.node
+ if our_node is not None:
+ requirement._raw['node'] = deepcopy_with_locators(our_node)
+
+ our_node_filter = our_requirement.node_filter
+ if our_node_filter is not None:
+ requirement._raw['node_filter'] = deepcopy_with_locators(our_node_filter._raw)
+
+ our_relationship = our_requirement.relationship # RelationshipAssignment
+ if (our_relationship is not None) and (our_relationship.type is None):
+ # Make sure we have a dict
+ if 'relationship' not in requirement._raw:
+ requirement._raw['relationship'] = OrderedDict()
+
+ merge_requirement_assignment_relationship(context, our_relationship,
+ relationship_property_definitions,
+ relationship_interface_definitions,
+ requirement, our_relationship)
+
+
+def merge_requirement_assignment_relationship(context, presentation, property_definitions,
+ interface_definitions, requirement, our_relationship):
+ our_relationship_properties = our_relationship._raw.get('properties')
+ if our_relationship_properties:
+ # Make sure we have a dict
+ if 'properties' not in requirement._raw['relationship']:
+ requirement._raw['relationship']['properties'] = OrderedDict()
+
+ # Merge our properties
+ for property_name, prop in our_relationship_properties.iteritems():
+ if property_name in property_definitions:
+ definition = property_definitions[property_name]
+ requirement._raw['relationship']['properties'][property_name] = \
+ coerce_parameter_value(context, presentation, definition, prop)
+ else:
+ context.validation.report(
+ 'relationship property "%s" not declared at definition of requirement "%s"'
+ ' in "%s"'
+ % (property_name, requirement._fullname,
+ presentation._container._container._fullname),
+ locator=our_relationship._get_child_locator('properties', property_name),
+ level=Issue.BETWEEN_TYPES)
+
+ our_interfaces = our_relationship.interfaces
+ if our_interfaces:
+ # Make sure we have a dict
+ if 'interfaces' not in requirement._raw['relationship']:
+ requirement._raw['relationship']['interfaces'] = OrderedDict()
+
+ # Merge interfaces
+ for interface_name, our_interface in our_interfaces.iteritems():
+ if interface_name not in requirement._raw['relationship']['interfaces']:
+ requirement._raw['relationship']['interfaces'][interface_name] = OrderedDict()
+
+ if (interface_definitions is not None) and (interface_name in interface_definitions):
+ interface_definition = interface_definitions[interface_name]
+ interface_assignment = requirement.relationship.interfaces[interface_name]
+ merge_interface(context, presentation, interface_assignment, our_interface,
+ interface_definition, interface_name)
+ else:
+ context.validation.report(
+ 'relationship interface "%s" not declared at definition of requirement "%s"'
+ ' in "%s"'
+ % (interface_name, requirement._fullname,
+ presentation._container._container._fullname),
+ locator=our_relationship._locator, level=Issue.BETWEEN_TYPES)
+
+
+def validate_requirement_assignment(context, presentation, requirement_assignment,
+ relationship_property_definitions,
+ relationship_interface_definitions):
+ relationship = requirement_assignment.relationship
+ if relationship is None:
+ return
+
+ validate_required_values(context, presentation, relationship.properties,
+ relationship_property_definitions)
+
+ if relationship_interface_definitions:
+ for interface_name, relationship_interface_definition \
+ in relationship_interface_definitions.iteritems():
+ interface_assignment = relationship.interfaces.get(interface_name) \
+ if relationship.interfaces is not None else None
+ validate_required_inputs(context, presentation, interface_assignment,
+ relationship_interface_definition, None, interface_name)
+
+
+def get_first_requirement(requirement_definitions, name):
+ if requirement_definitions is not None:
+ for requirement_name, requirement_definition in requirement_definitions:
+ if requirement_name == name:
+ return requirement_definition
+ return None
diff --git a/azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/simple_v1_0/modeling/substitution_mappings.py b/azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/simple_v1_0/modeling/substitution_mappings.py
new file mode 100644
index 0000000..e2af4b8
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/simple_v1_0/modeling/substitution_mappings.py
@@ -0,0 +1,167 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from aria.utils.formatting import safe_repr
+from aria.parser.validation import Issue
+
+
+def validate_substitution_mappings_requirement(context, presentation):
+
+ # validate that the requirement in substitution_mapping is defined in the substitution node type
+ substitution_node_type = presentation._container._get_type(context)
+ if substitution_node_type is None:
+ return
+ for req_name, req in substitution_node_type._get_requirements(context):
+ if req_name == presentation._name:
+ substitution_type_requirement = req
+ break
+ else:
+ context.validation.report(
+ 'substitution mapping requirement "{0}" is not declared in node type "{1}"'.format(
+ presentation._name, substitution_node_type._name),
+ locator=presentation._locator, level=Issue.BETWEEN_TYPES)
+ return
+
+ if not _validate_mapping_format(presentation):
+ _report_invalid_mapping_format(context, presentation, field='requirement')
+ return
+
+ # validate that the mapped requirement is defined in the corresponding node template
+ node_template = _get_node_template(context, presentation)
+ if node_template is None:
+ _report_missing_node_template(context, presentation, field='requirement')
+ return
+ mapped_requirement_name = presentation._raw[1]
+ for req_name, req in node_template._get_requirements(context):
+ if req_name == mapped_requirement_name:
+ node_template_requirement = req
+ break
+ else:
+ context.validation.report(
+ 'substitution mapping requirement "{0}" refers to an unknown requirement of node '
+ 'template "{1}": {mapped_requirement_name}'.format(
+ presentation._name, node_template._name,
+ mapped_requirement_name=safe_repr(mapped_requirement_name)),
+ locator=presentation._locator, level=Issue.BETWEEN_TYPES)
+ return
+
+ # validate that the requirement's capability type in substitution_mapping is derived from the
+ # requirement's capability type in the corresponding node template
+ substitution_type_requirement_capability_type = \
+ substitution_type_requirement._get_capability_type(context)
+ node_template_requirement_capability_type = \
+ node_template_requirement._get_capability(context)[0]
+ if not node_template_requirement_capability_type._is_descendant(
+ context, substitution_type_requirement_capability_type):
+ context.validation.report(
+ 'substitution mapping requirement "{0}" of capability type "{1}" is not a descendant '
+ 'of the mapped node template capability type "{2}"'.format(
+ presentation._name,
+ substitution_type_requirement_capability_type._name,
+ node_template_requirement_capability_type._name),
+ locator=presentation._locator, level=Issue.BETWEEN_TYPES)
+
+
+def validate_substitution_mappings_capability(context, presentation):
+
+ # validate that the capability in substitution_mapping is defined in the substitution node type
+ substitution_node_type = presentation._container._get_type(context)
+ if substitution_node_type is None:
+ return
+ substitution_type_capabilities = substitution_node_type._get_capabilities(context)
+ substitution_type_capability = substitution_type_capabilities.get(presentation._name)
+ if substitution_type_capability is None:
+ context.validation.report(
+ 'substitution mapping capability "{0}" '
+ 'is not declared in node type "{substitution_type}"'.format(
+ presentation._name, substitution_type=substitution_node_type._name),
+ locator=presentation._locator, level=Issue.BETWEEN_TYPES)
+ return
+
+ if not _validate_mapping_format(presentation):
+ _report_invalid_mapping_format(context, presentation, field='capability')
+ return
+
+ # validate that the capability in substitution_mapping is declared in the corresponding
+ # node template
+ node_template = _get_node_template(context, presentation)
+ if node_template is None:
+ _report_missing_node_template(context, presentation, field='capability')
+ return
+ mapped_capability_name = presentation._raw[1]
+ node_template_capability = node_template._get_capabilities(context).get(mapped_capability_name)
+
+ if node_template_capability is None:
+ context.validation.report(
+ 'substitution mapping capability "{0}" refers to an unknown '
+ 'capability of node template "{1}": {mapped_capability_name}'.format(
+ presentation._name, node_template._name,
+ mapped_capability_name=safe_repr(mapped_capability_name)),
+ locator=presentation._locator, level=Issue.BETWEEN_TYPES)
+ return
+
+ # validate that the capability type in substitution_mapping is derived from the capability type
+ # in the corresponding node template
+ substitution_type_capability_type = substitution_type_capability._get_type(context)
+ node_template_capability_type = node_template_capability._get_type(context)
+
+ if not substitution_type_capability_type._is_descendant(context, node_template_capability_type):
+ context.validation.report(
+ 'node template capability type "{0}" is not a descendant of substitution mapping '
+ 'capability "{1}" of type "{2}"'.format(
+ node_template_capability_type._name,
+ presentation._name,
+ substitution_type_capability_type._name),
+ locator=presentation._locator, level=Issue.BETWEEN_TYPES)
+
+
+#
+# Utils
+#
+
+def _validate_mapping_format(presentation):
+ """Validate that the mapping is a list of 2 strings"""
+ if not isinstance(presentation._raw, list) or \
+ len(presentation._raw) != 2 or \
+ not isinstance(presentation._raw[0], basestring) or \
+ not isinstance(presentation._raw[1], basestring):
+ return False
+ return True
+
+
+def _get_node_template(context, presentation):
+ node_template_name = presentation._raw[0]
+ node_template = context.presentation.get_from_dict('service_template', 'topology_template',
+ 'node_templates', node_template_name)
+ return node_template
+
+
+def _report_missing_node_template(context, presentation, field):
+ context.validation.report(
+ 'substitution mappings {field} "{node_template_mapping}" '
+ 'refers to an unknown node template: {node_template_name}'.format(
+ field=field,
+ node_template_mapping=presentation._name,
+ node_template_name=safe_repr(presentation._raw[0])),
+ locator=presentation._locator, level=Issue.FIELD)
+
+
+def _report_invalid_mapping_format(context, presentation, field):
+ context.validation.report(
+ 'substitution mapping {field} "{field_name}" is not a list of 2 strings: {value}'.format(
+ field=field,
+ field_name=presentation._name,
+ value=safe_repr(presentation._raw)),
+ locator=presentation._locator, level=Issue.FIELD)
diff --git a/azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/simple_v1_0/presentation/__init__.py b/azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/simple_v1_0/presentation/__init__.py
new file mode 100644
index 0000000..ae1e83e
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/simple_v1_0/presentation/__init__.py
@@ -0,0 +1,14 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/simple_v1_0/presentation/extensible.py b/azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/simple_v1_0/presentation/extensible.py
new file mode 100644
index 0000000..0e3c94d
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/simple_v1_0/presentation/extensible.py
@@ -0,0 +1,33 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from aria.utils.caching import cachedmethod
+from aria.parser.presentation import (Presentation, has_fields, primitive_dict_field)
+
+
+@has_fields
+class ExtensiblePresentation(Presentation):
+ """
+ A presentation that supports an optional ``_extensions`` dict field.
+ """
+
+ @primitive_dict_field()
+ def _extensions(self):
+ pass
+
+ @cachedmethod
+ def _get_extension(self, name, default=None):
+ extensions = self._extensions
+ return extensions.get(name, default) if extensions is not None else None # pylint: disable=no-member
diff --git a/azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/simple_v1_0/presentation/field_getters.py b/azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/simple_v1_0/presentation/field_getters.py
new file mode 100644
index 0000000..f14164a
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/simple_v1_0/presentation/field_getters.py
@@ -0,0 +1,37 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from aria.utils.formatting import safe_repr
+from aria.parser.exceptions import InvalidValueError
+
+
+def data_type_class_getter(cls):
+ """
+ Wraps the field value in a specialized data type class.
+
+ Can be used with the :func:`field_getter` decorator.
+ """
+
+ def getter(field, presentation, context=None):
+ raw = field.default_get(presentation, context)
+ if raw is not None:
+ try:
+ return cls(None, None, raw, None)
+ except ValueError as e:
+ raise InvalidValueError(
+ '%s is not a valid "%s" in "%s": %s'
+ % (field.full_name, field.full_cls_name, presentation._name, safe_repr(raw)),
+ cause=e, locator=field.get_locator(raw))
+ return getter
diff --git a/azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/simple_v1_0/presentation/field_validators.py b/azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/simple_v1_0/presentation/field_validators.py
new file mode 100644
index 0000000..e5853d8
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/simple_v1_0/presentation/field_validators.py
@@ -0,0 +1,588 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import re
+
+from aria.utils.formatting import safe_repr
+from aria.parser import implements_specification
+from aria.parser.presentation import (report_issue_for_unknown_type, derived_from_validator)
+from aria.parser.validation import Issue
+
+from ..modeling.data_types import (get_primitive_data_type, get_data_type_name, coerce_value,
+ get_container_data_type)
+from .types import (get_type_by_name, convert_name_to_full_type_name)
+
+
+
+#
+# NodeTemplate, RelationshipTemplate
+#
+
+@implements_specification('3.7.3.3', 'tosca-simple-1.0')
+def copy_validator(template_type_name, templates_dict_name):
+ """
+ Makes sure that the field refers to an existing template defined in the root presenter.
+
+ Use with the :func:`field_validator` decorator for the ``copy`` field in
+ :class:`NodeTemplate` and :class:`RelationshipTemplate`.
+ """
+
+ def validator_fn(field, presentation, context):
+ field.default_validate(presentation, context)
+
+ # Make sure type exists
+ value = getattr(presentation, field.name)
+ if value is not None:
+ copy = context.presentation.get_from_dict('service_template', 'topology_template',
+ templates_dict_name, value)
+ if copy is None:
+ report_issue_for_unknown_type(context, presentation, template_type_name, field.name)
+ else:
+ if copy.copy is not None:
+ context.validation.report(
+ '"copy" field refers to a %s that itself is a copy in "%s": %s'
+ % (template_type_name, presentation._fullname, safe_repr(value)),
+ locator=presentation._locator, level=Issue.BETWEEN_TYPES)
+
+ return validator_fn
+
+
+#
+# PropertyDefinition, AttributeDefinition, ParameterDefinition, EntrySchema
+#
+
+def data_type_validator(type_name='data type'):
+ """
+ Makes sure that the field refers to a valid data type, whether complex or primitive.
+
+ Used with the :func:`field_validator` decorator for the ``type`` fields in
+ :class:`PropertyDefinition`, :class:`AttributeDefinition`, :class:`ParameterDefinition`,
+ and :class:`EntrySchema`.
+
+ Extra behavior beyond validation: generated function returns true if field is a complex data
+ type.
+ """
+
+ def validator(field, presentation, context):
+ field.default_validate(presentation, context)
+
+ value = getattr(presentation, field.name)
+ if value is not None:
+ # Test for circular definitions
+ container_data_type = get_container_data_type(presentation)
+ if (container_data_type is not None) and (container_data_type._name == value):
+ context.validation.report(
+ 'type of property "%s" creates a circular value hierarchy: %s'
+ % (presentation._fullname, safe_repr(value)),
+ locator=presentation._get_child_locator('type'), level=Issue.BETWEEN_TYPES)
+
+ # Can be a complex data type
+ if get_type_by_name(context, value, 'data_types') is not None:
+ return True
+
+ # Can be a primitive data type
+ if get_primitive_data_type(value) is None:
+ report_issue_for_unknown_type(context, presentation, type_name, field.name)
+
+ return False
+
+ return validator
+
+
+#
+# PropertyDefinition, AttributeDefinition
+#
+
+def entry_schema_validator(field, presentation, context):
+ """
+ According to whether the data type supports ``entry_schema`` (e.g., it is or inherits from
+ list or map), make sure that we either have or don't have a valid data type value.
+
+ Used with the :func:`field_validator` decorator for the ``entry_schema`` field in
+ :class:`PropertyDefinition` and :class:`AttributeDefinition`.
+ """
+
+ field.default_validate(presentation, context)
+
+ def type_uses_entry_schema(the_type):
+ use_entry_schema = the_type._get_extension('use_entry_schema', False) \
+ if hasattr(the_type, '_get_extension') else False
+ if use_entry_schema:
+ return True
+ parent = the_type._get_parent(context) if hasattr(the_type, '_get_parent') else None
+ if parent is None:
+ return False
+ return type_uses_entry_schema(parent)
+
+ value = getattr(presentation, field.name)
+ the_type = presentation._get_type(context)
+ if the_type is None:
+ return
+ use_entry_schema = type_uses_entry_schema(the_type)
+
+ if use_entry_schema:
+ if value is None:
+ context.validation.report(
+ '"entry_schema" does not have a value as required by data type "%s" in "%s"'
+ % (get_data_type_name(the_type), presentation._container._fullname),
+ locator=presentation._locator, level=Issue.BETWEEN_TYPES)
+ else:
+ if value is not None:
+ context.validation.report(
+ '"entry_schema" has a value but it is not used by data type "%s" in "%s"'
+ % (get_data_type_name(the_type), presentation._container._fullname),
+ locator=presentation._locator, level=Issue.BETWEEN_TYPES)
+
+
+def data_value_validator(field, presentation, context):
+ """
+ Makes sure that the field contains a valid value according to data type and constraints.
+
+ Used with the :func:`field_validator` decorator for the ``default`` field in
+ :class:`PropertyDefinition` and :class:`AttributeDefinition`.
+ """
+
+ field.default_validate(presentation, context)
+
+ value = getattr(presentation, field.name)
+ if value is not None:
+ the_type = presentation._get_type(context)
+ entry_schema = presentation.entry_schema
+ # AttributeDefinition does not have this:
+ constraints = presentation._get_constraints(context) \
+ if hasattr(presentation, '_get_constraints') else None
+ coerce_value(context, presentation, the_type, entry_schema, constraints, value, field.name)
+
+
+#
+# DataType
+#
+
+_data_type_validator = data_type_validator()
+_data_type_derived_from_validator = derived_from_validator(convert_name_to_full_type_name,
+ 'data_types')
+
+
+def data_type_derived_from_validator(field, presentation, context):
+ """
+ Makes sure that the field refers to a valid parent data type (complex or primitive).
+
+ Used with the :func:`field_validator` decorator for the ``derived_from`` field in
+ :class:`DataType`.
+ """
+
+ if _data_type_validator(field, presentation, context):
+ # Validate derivation only if a complex data type (primitive types have no derivation
+ # hierarchy)
+ _data_type_derived_from_validator(field, presentation, context)
+
+
+def data_type_constraints_validator(field, presentation, context):
+ """
+ Makes sure that we do not have constraints if we are a complex type (with no primitive
+ ancestor).
+ """
+
+ field.default_validate(presentation, context)
+
+ value = getattr(presentation, field.name)
+ if value is not None:
+ if presentation._get_primitive_ancestor(context) is None:
+ context.validation.report(
+ 'data type "%s" defines constraints but does not have a primitive ancestor'
+ % presentation._fullname,
+ locator=presentation._get_child_locator(field.name), level=Issue.BETWEEN_TYPES)
+
+
+def data_type_properties_validator(field, presentation, context):
+ """
+ Makes sure that we do not have properties if we have a primitive ancestor.
+
+ Used with the :func:`field_validator` decorator for the ``properties`` field in
+ :class:`DataType`.
+ """
+
+ field.default_validate(presentation, context)
+
+ values = getattr(presentation, field.name)
+ if values is not None:
+ if presentation._get_primitive_ancestor(context) is not None:
+ context.validation.report(
+ 'data type "%s" defines properties even though it has a primitive ancestor'
+ % presentation._fullname,
+ locator=presentation._get_child_locator(field.name), level=Issue.BETWEEN_TYPES)
+
+
+#
+# ConstraintClause
+#
+
+def constraint_clause_field_validator(field, presentation, context):
+ """
+ Makes sure that field contains a valid value for the container type.
+
+ Used with the :func:`field_validator` decorator for various field in :class:`ConstraintClause`.
+ """
+
+ field.default_validate(presentation, context)
+
+ value = getattr(presentation, field.name)
+ if value is not None:
+ the_type = presentation._get_type(context)
+ constraints = the_type._get_constraints(context) \
+ if hasattr(the_type, '_get_constraints') else None
+ coerce_value(context, presentation, the_type, None, constraints, value, field.name)
+
+
+def constraint_clause_in_range_validator(field, presentation, context):
+ """
+ Makes sure that the value is a list with exactly two elements, that both lower bound contains a
+ valid value for the container type, and that the upper bound is either "UNBOUNDED" or a valid
+ value for the container type.
+
+ Used with the :func:`field_validator` decorator for the ``in_range`` field in
+ :class:`ConstraintClause`.
+ """
+
+ field.default_validate(presentation, context)
+
+ values = getattr(presentation, field.name)
+ if isinstance(values, list):
+ # Make sure list has exactly two elements
+ if len(values) == 2:
+ lower, upper = values
+ the_type = presentation._get_type(context)
+
+ # Lower bound must be coercible
+ lower = coerce_value(context, presentation, the_type, None, None, lower, field.name)
+
+ if upper != 'UNBOUNDED':
+ # Upper bound be coercible
+ upper = coerce_value(context, presentation, the_type, None, None, upper, field.name)
+
+ # Second "in_range" value must be greater than first
+ if (lower is not None) and (upper is not None) and (lower >= upper):
+ context.validation.report(
+ 'upper bound of "in_range" constraint is not greater than the lower bound'
+ ' in "%s": %s <= %s'
+ % (presentation._container._fullname, safe_repr(lower), safe_repr(upper)),
+ locator=presentation._locator, level=Issue.FIELD)
+ else:
+ context.validation.report(
+ 'constraint "%s" is not a list of exactly 2 elements in "%s"'
+ % (field.name, presentation._fullname),
+ locator=presentation._get_child_locator(field.name), level=Issue.FIELD)
+
+
+def constraint_clause_valid_values_validator(field, presentation, context):
+ """
+ Makes sure that the value is a list of valid values for the container type.
+
+ Used with the :func:`field_validator` decorator for the ``valid_values`` field in
+ :class:`ConstraintClause`.
+ """
+
+ field.default_validate(presentation, context)
+
+ values = getattr(presentation, field.name)
+ if isinstance(values, list):
+ the_type = presentation._get_type(context)
+ for value in values:
+ coerce_value(context, presentation, the_type, None, None, value, field.name)
+
+
+def constraint_clause_pattern_validator(field, presentation, context):
+ """
+ Makes sure that the value is a valid regular expression.
+
+ Used with the :func:`field_validator` decorator for the ``pattern`` field in
+ :class:`ConstraintClause`.
+ """
+
+ field.default_validate(presentation, context)
+
+ value = getattr(presentation, field.name)
+ if value is not None:
+ try:
+ # From TOSCA 1.0 3.5.2.1:
+ #
+ # "Note: Future drafts of this specification will detail the use of regular expressions
+ # and reference an appropriate standardized grammar."
+ #
+ # So we will just use Python's.
+ re.compile(value)
+ except re.error as e:
+ context.validation.report(
+ 'constraint "%s" is not a valid regular expression in "%s"'
+ % (field.name, presentation._fullname),
+ locator=presentation._get_child_locator(field.name), level=Issue.FIELD, exception=e)
+
+
+#
+# RequirementAssignment
+#
+
+def node_template_or_type_validator(field, presentation, context):
+ """
+ Makes sure that the field refers to either a node template or a node type.
+
+ Used with the :func:`field_validator` decorator for the ``node`` field in
+ :class:`RequirementAssignment`.
+ """
+
+ field.default_validate(presentation, context)
+
+ value = getattr(presentation, field.name)
+ if value is not None:
+ node_templates = \
+ context.presentation.get('service_template', 'topology_template', 'node_templates') \
+ or {}
+ if (value not in node_templates) and \
+ (get_type_by_name(context, value, 'node_types') is None):
+ report_issue_for_unknown_type(context, presentation, 'node template or node type',
+ field.name)
+
+
+def capability_definition_or_type_validator(field, presentation, context):
+ """
+ Makes sure refers to either a capability assignment name in the node template referred to by the
+ ``node`` field or a general capability type.
+
+ If the value refers to a capability type, make sure the ``node`` field was not assigned.
+
+ Used with the :func:`field_validator` decorator for the ``capability`` field in
+ :class:`RequirementAssignment`.
+ """
+
+ field.default_validate(presentation, context)
+
+ value = getattr(presentation, field.name)
+ if value is not None:
+ node, node_variant = presentation._get_node(context)
+ if node_variant == 'node_template':
+ capabilities = node._get_capabilities(context)
+ if value in capabilities:
+ return
+
+ if get_type_by_name(context, value, 'capability_types') is not None:
+ if node is not None:
+ context.validation.report(
+ '"%s" refers to a capability type even though "node" has a value in "%s"'
+ % (presentation._name, presentation._container._fullname),
+ locator=presentation._get_child_locator(field.name), level=Issue.BETWEEN_FIELDS)
+ return
+
+ if node_variant == 'node_template':
+ context.validation.report(
+ 'requirement "%s" refers to an unknown capability definition name or capability'
+ ' type in "%s": %s'
+ % (presentation._name, presentation._container._fullname, safe_repr(value)),
+ locator=presentation._get_child_locator(field.name), level=Issue.BETWEEN_TYPES)
+ else:
+ context.validation.report(
+ 'requirement "%s" refers to an unknown capability type in "%s": %s'
+ % (presentation._name, presentation._container._fullname, safe_repr(value)),
+ locator=presentation._get_child_locator(field.name), level=Issue.BETWEEN_TYPES)
+
+
+def node_filter_validator(field, presentation, context):
+ """
+ Makes sure that the field has a value only if "node" refers to a node type.
+
+ Used with the :func:`field_validator` decorator for the ``node_filter`` field in
+ :class:`RequirementAssignment`.
+ """
+
+ field.default_validate(presentation, context)
+
+ value = getattr(presentation, field.name)
+ if value is not None:
+ _, node_type_variant = presentation._get_node(context)
+ if node_type_variant != 'node_type':
+ context.validation.report(
+ 'requirement "%s" has a node filter even though "node" does not refer to a node'
+ ' type in "%s"'
+ % (presentation._fullname, presentation._container._fullname),
+ locator=presentation._locator, level=Issue.BETWEEN_FIELDS)
+
+
+#
+# RelationshipAssignment
+#
+
+def relationship_template_or_type_validator(field, presentation, context):
+ """
+ Makes sure that the field refers to either a relationship template or a relationship type.
+
+ Used with the :func:`field_validator` decorator for the ``type`` field in
+ :class:`RelationshipAssignment`.
+ """
+
+ field.default_validate(presentation, context)
+
+ value = getattr(presentation, field.name)
+ if value is not None:
+ relationship_templates = \
+ context.presentation.get('service_template', 'topology_template',
+ 'relationship_templates') \
+ or {}
+ if (value not in relationship_templates) and \
+ (get_type_by_name(context, value, 'relationship_types') is None):
+ report_issue_for_unknown_type(context, presentation,
+ 'relationship template or relationship type', field.name)
+
+
+#
+# PolicyType
+#
+
+def list_node_type_or_group_type_validator(field, presentation, context):
+ """
+ Makes sure that the field's elements refer to either node types or a group types.
+
+ Used with the :func:`field_validator` decorator for the ``targets`` field in
+ :class:`PolicyType`.
+ """
+
+ field.default_validate(presentation, context)
+
+ values = getattr(presentation, field.name)
+ if values is not None:
+ for value in values:
+ if (get_type_by_name(context, value, 'node_types') is None) and \
+ (get_type_by_name(context, value, 'group_types') is None):
+ report_issue_for_unknown_type(context, presentation, 'node type or group type',
+ field.name, value)
+
+
+#
+# PolicyTemplate
+#
+
+def policy_targets_validator(field, presentation, context):
+ """
+ Makes sure that the field's elements refer to either node templates or groups, and that
+ they match the node types and group types declared in the policy type.
+
+ Used with the :func:`field_validator` decorator for the ``targets`` field in
+ :class:`PolicyTemplate`.
+ """
+
+ field.default_validate(presentation, context)
+
+ values = getattr(presentation, field.name)
+ if values is not None:
+ for value in values:
+ node_templates = \
+ context.presentation.get('service_template', 'topology_template',
+ 'node_templates') \
+ or {}
+ groups = context.presentation.get('service_template', 'topology_template', 'groups') \
+ or {}
+ if (value not in node_templates) and (value not in groups):
+ report_issue_for_unknown_type(context, presentation, 'node template or group',
+ field.name, value)
+
+ policy_type = presentation._get_type(context)
+ if policy_type is None:
+ break
+
+ node_types, group_types = policy_type._get_targets(context)
+
+ is_valid = False
+
+ if value in node_templates:
+ our_node_type = node_templates[value]._get_type(context)
+ for node_type in node_types:
+ if node_type._is_descendant(context, our_node_type):
+ is_valid = True
+ break
+
+ elif value in groups:
+ our_group_type = groups[value]._get_type(context)
+ for group_type in group_types:
+ if group_type._is_descendant(context, our_group_type):
+ is_valid = True
+ break
+
+ if not is_valid:
+ context.validation.report(
+ 'policy definition target does not match either a node type or a group type'
+ ' declared in the policy type in "%s": %s'
+ % (presentation._name, safe_repr(value)),
+ locator=presentation._locator, level=Issue.BETWEEN_TYPES)
+
+
+#
+# NodeFilter
+#
+
+def node_filter_properties_validator(field, presentation, context):
+ """
+ Makes sure that the field's elements refer to defined properties in the target node type.
+
+ Used with the :func:`field_validator` decorator for the ``properties`` field in
+ :class:`NodeFilter`.
+ """
+
+ field.default_validate(presentation, context)
+
+ values = getattr(presentation, field.name)
+ if values is not None:
+ node_type = presentation._get_node_type(context)
+ if node_type is not None:
+ properties = node_type._get_properties(context)
+ for name, _ in values:
+ if name not in properties:
+ context.validation.report(
+ 'node filter refers to an unknown property definition in "%s": %s'
+ % (node_type._name, name),
+ locator=presentation._locator, level=Issue.BETWEEN_TYPES)
+
+
+def node_filter_capabilities_validator(field, presentation, context):
+ """
+ Makes sure that the field's elements refer to defined capabilities and properties in the target
+ node type.
+
+ Used with the :func:`field_validator` decorator for the ``capabilities`` field in
+ :class:`NodeFilter`.
+ """
+
+ field.default_validate(presentation, context)
+
+ values = getattr(presentation, field.name)
+ if values is not None: # pylint: disable=too-many-nested-blocks
+ node_type = presentation._get_node_type(context)
+ if node_type is not None:
+ capabilities = node_type._get_capabilities(context)
+ for name, value in values:
+ capability = capabilities.get(name)
+ if capability is not None:
+ properties = value.properties
+ capability_properties = capability.properties
+ if (properties is not None) and (capability_properties is not None):
+ for property_name, _ in properties:
+ if property_name not in capability_properties:
+ context.validation.report(
+ 'node filter refers to an unknown capability definition'
+ ' property in "%s": %s'
+ % (node_type._name, property_name),
+ locator=presentation._locator, level=Issue.BETWEEN_TYPES)
+ else:
+ context.validation.report(
+ 'node filter refers to an unknown capability definition in "%s": %s'
+ % (node_type._name, name),
+ locator=presentation._locator, level=Issue.BETWEEN_TYPES)
diff --git a/azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/simple_v1_0/presentation/types.py b/azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/simple_v1_0/presentation/types.py
new file mode 100644
index 0000000..5f9750e
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/simple_v1_0/presentation/types.py
@@ -0,0 +1,63 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+def convert_name_to_full_type_name(context, name, types_dict): # pylint: disable=unused-argument
+ """
+ Converts a type name to its full type name, or else returns it unchanged.
+
+ Works by checking for ``shorthand_name`` and ``type_qualified_name`` in the types'
+ ``_extensions`` field. See also
+ :class:`aria_extension_tosca.v1_0.presentation.extensible.ExtensiblePresentation`.
+
+ Can be used as the conversion function argument in ``type_validator`` and
+ ``derived_from_validator``.
+ """
+
+ if (name is not None) and types_dict and (name not in types_dict):
+ for full_name, the_type in types_dict.iteritems():
+ if hasattr(the_type, '_extensions') and the_type._extensions \
+ and ((the_type._extensions.get('shorthand_name') == name) \
+ or (the_type._extensions.get('type_qualified_name') == name)):
+ return full_name
+ return name
+
+
+def get_type_by_name(context, name, *types_dict_names):
+ """
+ Gets a type either by its full name or its shorthand name or type-qualified name.
+
+ Works by checking for ``shorthand_name`` and ``type_qualified_name`` in the types'
+ ``_extensions`` field. See also
+ :class:`~aria_extension_tosca.v1_0.presentation.extensible.ExtensiblePresentation`.
+
+ The arguments from the third onwards are used to locate a nested field under
+ ``service_template`` under the root presenter.
+ """
+
+ if name is not None:
+ types_dict = context.presentation.get('service_template', *types_dict_names)
+ if types_dict:
+ the_type = types_dict.get(name)
+ if the_type is not None:
+ # Full name
+ return the_type
+ for the_type in types_dict.itervalues():
+ if hasattr(the_type, '_extensions') and the_type._extensions \
+ and ((the_type._extensions.get('shorthand_name') == name) \
+ or (the_type._extensions.get('type_qualified_name') == name)):
+ # Shorthand name
+ return the_type
+ return None
diff --git a/azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/simple_v1_0/presenter.py b/azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/simple_v1_0/presenter.py
new file mode 100644
index 0000000..8e1809f
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/simple_v1_0/presenter.py
@@ -0,0 +1,83 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from aria.utils.collections import (FrozenList, EMPTY_READ_ONLY_LIST)
+from aria.utils.caching import cachedmethod
+from aria.parser.presentation import Presenter
+
+from .modeling import create_service_template_model
+from .modeling.functions import (Concat, Token, GetInput, GetProperty, GetAttribute,
+ GetOperationOutput, GetNodesOfType, GetArtifact)
+from .templates import ServiceTemplate
+
+
+class ToscaSimplePresenter1_0(Presenter): # pylint: disable=invalid-name,abstract-method
+ """
+ ARIA presenter for the `TOSCA Simple Profile v1.0 cos01 <http://docs.oasis-open.org/tosca
+ /TOSCA-Simple-Profile-YAML/v1.0/cos01/TOSCA-Simple-Profile-YAML-v1.0-cos01.html>`__.
+
+ Supported ``tosca_definitions_version`` values:
+
+ * ``tosca_simple_yaml_1_0``
+ """
+
+ DSL_VERSIONS = ('tosca_simple_yaml_1_0',)
+ ALLOWED_IMPORTED_DSL_VERSIONS = ('tosca_simple_yaml_1_0',)
+ SIMPLE_PROFILE_LOCATION = 'tosca-simple-1.0/tosca-simple-1.0.yaml'
+ SPECIAL_IMPORTS = {
+ 'aria-1.0': 'aria-1.0/aria-1.0.yaml',
+ 'azure-plugin':'azure-plugin/azureplugin.yaml'}
+
+ @property
+ @cachedmethod
+ def service_template(self):
+ return ServiceTemplate(raw=self._raw)
+
+ @property
+ @cachedmethod
+ def functions(self):
+ return {
+ 'concat': Concat,
+ 'token': Token,
+ 'get_input': GetInput,
+ 'get_property': GetProperty,
+ 'get_attribute': GetAttribute,
+ 'get_operation_output': GetOperationOutput,
+ 'get_nodes_of_type': GetNodesOfType,
+ 'get_artifact': GetArtifact}
+
+ # Presentation
+
+ def _dump(self, context):
+ self.service_template._dump(context)
+
+ def _validate(self, context):
+ self.service_template._validate(context)
+
+ # Presenter
+
+ @cachedmethod
+ def _get_import_locations(self, context):
+ import_locations = []
+ if context.presentation.import_profile:
+ import_locations.append(self.SIMPLE_PROFILE_LOCATION)
+ imports = self._get('service_template', 'imports')
+ if imports:
+ import_locations += [self.SPECIAL_IMPORTS.get(i.file, i.file) for i in imports]
+ return FrozenList(import_locations) if import_locations else EMPTY_READ_ONLY_LIST
+
+ @cachedmethod
+ def _get_model(self, context): # pylint: disable=no-self-use
+ return create_service_template_model(context)
diff --git a/azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/simple_v1_0/templates.py b/azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/simple_v1_0/templates.py
new file mode 100644
index 0000000..3c36bb8
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/simple_v1_0/templates.py
@@ -0,0 +1,736 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from aria.utils.collections import (FrozenDict, FrozenList)
+from aria.utils.caching import cachedmethod
+from aria.parser import implements_specification
+from aria.parser.presentation import (has_fields, primitive_field, primitive_list_field,
+ object_field, object_list_field, object_dict_field,
+ object_sequenced_list_field, field_validator,
+ type_validator, list_type_validator)
+
+from .assignments import (PropertyAssignment, AttributeAssignment, RequirementAssignment,
+ CapabilityAssignment, InterfaceAssignment, ArtifactAssignment)
+from .definitions import ParameterDefinition
+from .filters import NodeFilter
+from .misc import (Description, MetaData, Repository, Import, SubstitutionMappings)
+from .modeling.parameters import (get_assigned_and_defined_parameter_values, get_parameter_values)
+from .modeling.interfaces import get_template_interfaces
+from .modeling.requirements import get_template_requirements
+from .modeling.capabilities import get_template_capabilities
+from .modeling.artifacts import get_inherited_artifact_definitions
+from .modeling.policies import get_policy_targets
+from .modeling.copy import get_default_raw_from_copy
+from .presentation.extensible import ExtensiblePresentation
+from .presentation.field_validators import copy_validator, policy_targets_validator
+from .presentation.types import (convert_name_to_full_type_name, get_type_by_name)
+from .types import (ArtifactType, DataType, CapabilityType, InterfaceType, RelationshipType,
+ NodeType, GroupType, PolicyType)
+
+
+@has_fields
+@implements_specification('3.7.3', 'tosca-simple-1.0')
+class NodeTemplate(ExtensiblePresentation):
+ """
+ A Node Template specifies the occurrence of a manageable software component as part of an
+ application's topology model which is defined in a TOSCA Service Template. A Node template is an
+ instance of a specified Node Type and can provide customized properties, constraints or
+ operations which override the defaults provided by its Node Type and its implementations.
+
+ See the `TOSCA Simple Profile v1.0 cos01 specification <http://docs.oasis-open.org/tosca
+ /TOSCA-Simple-Profile-YAML/v1.0/cos01/TOSCA-Simple-Profile-YAML-v1.0-cos01.html
+ #DEFN_ENTITY_NODE_TEMPLATE>`__
+ """
+
+ @field_validator(type_validator('node type', convert_name_to_full_type_name, 'node_types'))
+ @primitive_field(str, required=True)
+ def type(self):
+ """
+ The required name of the Node Type the Node Template is based upon.
+
+ :type: :obj:`basestring`
+ """
+
+ @object_field(Description)
+ def description(self):
+ """
+ An optional description for the Node Template.
+
+ :type: :class:`Description`
+ """
+
+ @primitive_list_field(str)
+ def directives(self):
+ """
+ An optional list of directive values to provide processing instructions to orchestrators and
+ tooling.
+
+ :type: [:obj:`basestring`]
+ """
+
+ @object_dict_field(PropertyAssignment)
+ def properties(self):
+ """
+ An optional list of property value assignments for the Node Template.
+
+ :type: {:obj:`basestring`: :class:`PropertyAssignment`}
+ """
+
+ @object_dict_field(AttributeAssignment)
+ def attributes(self):
+ """
+ An optional list of attribute value assignments for the Node Template.
+
+ :type: {:obj:`basestring`: :class:`AttributeAssignment`}
+ """
+
+ @object_sequenced_list_field(RequirementAssignment)
+ def requirements(self):
+ """
+ An optional sequenced list of requirement assignments for the Node Template.
+
+ :type: list of (str, :class:`RequirementAssignment`)
+ """
+
+ @object_dict_field(CapabilityAssignment)
+ def capabilities(self):
+ """
+ An optional list of capability assignments for the Node Template.
+
+ :type: {:obj:`basestring`: :class:`CapabilityAssignment`}
+ """
+
+ @object_dict_field(InterfaceAssignment)
+ def interfaces(self):
+ """
+ An optional list of named interface definitions for the Node Template.
+
+ :type: {:obj:`basestring`: :class:`InterfaceAssignment`}
+ """
+
+ @object_dict_field(ArtifactAssignment)
+ def artifacts(self):
+ """
+ An optional list of named artifact definitions for the Node Template.
+
+ :type: {:obj:`basestring`: :class:`ArtifactAssignment`}
+ """
+
+ @object_field(NodeFilter)
+ def node_filter(self):
+ """
+ The optional filter definition that TOSCA orchestrators would use to select the correct
+ target node. This keyname is only valid if the directive has the value of "selectable" set.
+
+ :type: :class:`NodeFilter`
+ """
+
+ @field_validator(copy_validator('node template', 'node_templates'))
+ @primitive_field(str)
+ def copy(self):
+ """
+ The optional (symbolic) name of another node template to copy into (all keynames and values)
+ and use as a basis for this node template.
+
+ :type: :obj:`basestring`
+ """
+
+ @cachedmethod
+ def _get_default_raw(self):
+ return get_default_raw_from_copy(self, 'node_templates')
+
+ @cachedmethod
+ def _get_type(self, context):
+ return get_type_by_name(context, self.type, 'node_types')
+
+ @cachedmethod
+ def _get_property_values(self, context):
+ return FrozenDict(get_assigned_and_defined_parameter_values(context, self, 'property'))
+
+ @cachedmethod
+ def _get_attribute_default_values(self, context):
+ return FrozenDict(get_assigned_and_defined_parameter_values(context, self, 'attribute'))
+
+ @cachedmethod
+ def _get_requirements(self, context):
+ return FrozenList(get_template_requirements(context, self))
+
+ @cachedmethod
+ def _get_capabilities(self, context):
+ return FrozenDict(get_template_capabilities(context, self))
+
+ @cachedmethod
+ def _get_interfaces(self, context):
+ return FrozenDict(get_template_interfaces(context, self, 'node template'))
+
+ @cachedmethod
+ def _get_artifacts(self, context):
+ return FrozenDict(get_inherited_artifact_definitions(context, self))
+
+ def _validate(self, context):
+ super(NodeTemplate, self)._validate(context)
+ self._get_property_values(context)
+ self._get_requirements(context)
+ self._get_capabilities(context)
+ self._get_interfaces(context)
+ self._get_artifacts(context)
+
+ def _dump(self, context):
+ self._dump_content(context, (
+ 'description',
+ 'type',
+ 'directives',
+ 'properties',
+ 'attributes',
+ 'requirements',
+ 'capabilities',
+ 'interfaces',
+ 'artifacts',
+ 'node_filter',
+ 'copy'))
+
+
+@has_fields
+@implements_specification('3.7.4', 'tosca-simple-1.0')
+class RelationshipTemplate(ExtensiblePresentation):
+ """
+ A Relationship Template specifies the occurrence of a manageable relationship between node
+ templates as part of an application's topology model that is defined in a TOSCA Service
+ Template. A Relationship template is an instance of a specified Relationship Type and can
+ provide customized properties, constraints or operations which override the defaults provided by
+ its Relationship Type and its implementations.
+
+ See the `TOSCA Simple Profile v1.0 cos01 specification <http://docs.oasis-open.org/tosca
+ /TOSCA-Simple-Profile-YAML/v1.0/cos01/TOSCA-Simple-Profile-YAML-v1.0-cos01.html
+ #DEFN_ENTITY_RELATIONSHIP_TEMPLATE>`__
+ """
+
+ @field_validator(type_validator('relationship type', convert_name_to_full_type_name,
+ 'relationship_types'))
+ @primitive_field(str, required=True)
+ def type(self):
+ """
+ The required name of the Relationship Type the Relationship Template is based upon.
+
+ :type: :obj:`basestring`
+ """
+
+ @object_field(Description)
+ def description(self):
+ """
+ An optional description for the Relationship Template.
+
+ :type: :class:`Description`
+ """
+
+ @object_dict_field(PropertyAssignment)
+ def properties(self):
+ """
+ An optional list of property assignments for the Relationship Template.
+
+ :type: {:obj:`basestring`: :class:`PropertyAssignment`}
+ """
+
+ @object_dict_field(AttributeAssignment)
+ def attributes(self):
+ """
+ An optional list of attribute assignments for the Relationship Template.
+
+ :type: {:obj:`basestring`: :class:`AttributeAssignment`}
+ """
+
+ @object_dict_field(InterfaceAssignment)
+ def interfaces(self):
+ """
+ An optional list of named interface definitions for the Node Template.
+
+ ARIA NOTE: Spec is wrong here, should be Relationship Template.
+
+ :type: {:obj:`basestring`: :class:`InterfaceAssignment`}
+ """
+
+ @field_validator(copy_validator('relationship template', 'relationship_templates'))
+ @primitive_field(str)
+ def copy(self):
+ """
+ The optional (symbolic) name of another relationship template to copy into (all keynames and
+ values) and use as a basis for this relationship template.
+
+ :type: :obj:`basestring`
+ """
+
+ @cachedmethod
+ def _get_default_raw(self):
+ return get_default_raw_from_copy(self, 'relationship_templates')
+
+ @cachedmethod
+ def _get_type(self, context):
+ return get_type_by_name(context, self.type, 'relationship_types')
+
+ @cachedmethod
+ def _get_property_values(self, context):
+ return FrozenDict(get_assigned_and_defined_parameter_values(context, self, 'property'))
+
+ @cachedmethod
+ def _get_interfaces(self, context):
+ return FrozenDict(get_template_interfaces(context, self, 'relationship template'))
+
+ def _validate(self, context):
+ super(RelationshipTemplate, self)._validate(context)
+ self._get_property_values(context)
+ self._get_interfaces(context)
+
+ def _dump(self, context):
+ self._dump_content(context, (
+ 'description',
+ 'type',
+ 'properties',
+ 'attributes',
+ 'interfaces',
+ 'copy'))
+
+
+@has_fields
+@implements_specification('3.7.5', 'tosca-simple-1.0')
+class GroupTemplate(ExtensiblePresentation):
+ """
+ A group definition defines a logical grouping of node templates, typically for management
+ purposes, but is separate from the application's topology template.
+
+ See the `TOSCA Simple Profile v1.0 cos01 specification <http://docs.oasis-open.org/tosca
+ /TOSCA-Simple-Profile-YAML/v1.0/cos01/TOSCA-Simple-Profile-YAML-v1.0-cos01.html
+ #DEFN_ELEMENT_GROUP_DEF>`__
+ """
+
+ @field_validator(type_validator('group type', convert_name_to_full_type_name,
+ 'group_types'))
+ @primitive_field(str, required=True)
+ def type(self):
+ """
+ The required name of the group type the group definition is based upon.
+
+ :type: :obj:`basestring`
+ """
+
+ @object_field(Description)
+ def description(self):
+ """
+ The optional description for the group definition.
+
+ :type: :class:`Description`
+ """
+
+ @object_dict_field(PropertyAssignment)
+ def properties(self):
+ """
+ An optional list of property value assignments for the group definition.
+
+ :type: {:obj:`basestring`: :class:`PropertyAssignment`}
+ """
+
+ @field_validator(list_type_validator('node template', 'topology_template', 'node_templates'))
+ @primitive_list_field(str)
+ def members(self):
+ """
+ The optional list of one or more node template names that are members of this group
+ definition.
+
+ :type: [:obj:`basestring`]
+ """
+
+ @object_dict_field(InterfaceAssignment)
+ def interfaces(self):
+ """
+ An optional list of named interface definitions for the group definition.
+
+ :type: {:obj:`basestring`: :class:`InterfaceDefinition`}
+ """
+
+ @cachedmethod
+ def _get_type(self, context):
+ return get_type_by_name(context, self.type, 'group_types')
+
+ @cachedmethod
+ def _get_property_values(self, context):
+ return FrozenDict(get_assigned_and_defined_parameter_values(context, self, 'property'))
+
+ @cachedmethod
+ def _get_interfaces(self, context):
+ return FrozenDict(get_template_interfaces(context, self, 'group definition'))
+
+ def _validate(self, context):
+ super(GroupTemplate, self)._validate(context)
+ self._get_property_values(context)
+ self._get_interfaces(context)
+
+
+@has_fields
+@implements_specification('3.7.6', 'tosca-simple-1.0')
+class PolicyTemplate(ExtensiblePresentation):
+ """
+ A policy definition defines a policy that can be associated with a TOSCA topology or top-level
+ entity definition (e.g., group definition, node template, etc.).
+
+ See the `TOSCA Simple Profile v1.0 cos01 specification <http://docs.oasis-open.org/tosca
+ /TOSCA-Simple-Profile-YAML/v1.0/cos01/TOSCA-Simple-Profile-YAML-v1.0-cos01.html
+ #DEFN_ELEMENT_POLICY_DEF>`__
+ """
+
+ @field_validator(type_validator('policy type', convert_name_to_full_type_name, 'policy_types'))
+ @primitive_field(str, required=True)
+ def type(self):
+ """
+ The required name of the policy type the policy definition is based upon.
+
+ :type: :obj:`basestring`
+ """
+
+ @object_field(Description)
+ def description(self):
+ """
+ The optional description for the policy definition.
+
+ :type: :class:`Description`
+ """
+
+ @object_dict_field(PropertyAssignment)
+ def properties(self):
+ """
+ An optional list of property value assignments for the policy definition.
+
+ :type: {:obj:`basestring`: :class:`PropertyAssignment`
+ """
+
+ @field_validator(policy_targets_validator)
+ @primitive_list_field(str)
+ def targets(self):
+ """
+ An optional list of valid Node Templates or Groups the Policy can be applied to.
+
+ :type: [:obj:`basestring`]
+ """
+
+ @cachedmethod
+ def _get_type(self, context):
+ return get_type_by_name(context, self.type, 'policy_types')
+
+ @cachedmethod
+ def _get_property_values(self, context):
+ return FrozenDict(get_assigned_and_defined_parameter_values(context, self, 'property'))
+
+ @cachedmethod
+ def _get_targets(self, context):
+ node_templates, groups = get_policy_targets(context, self)
+ return FrozenList(node_templates), FrozenList(groups)
+
+ def _validate(self, context):
+ super(PolicyTemplate, self)._validate(context)
+ self._get_property_values(context)
+
+
+@has_fields
+@implements_specification('3.8', 'tosca-simple-1.0')
+class TopologyTemplate(ExtensiblePresentation):
+ """
+ This section defines the topology template of a cloud application. The main ingredients of the
+ topology template are node templates representing components of the application and relationship
+ templates representing links between the components. These elements are defined in the nested
+ ``node_templates`` section and the nested relationship_templates sections, respectively.
+ Furthermore, a topology template allows for defining input parameters, output parameters as well
+ as grouping of node templates.
+
+ See the `TOSCA Simple Profile v1.0 cos01 specification <http://docs.oasis-open.org/tosca
+ /TOSCA-Simple-Profile-YAML/v1.0/cos01/TOSCA-Simple-Profile-YAML-v1.0-cos01.html
+ #DEFN_ENTITY_TOPOLOGY_TEMPLATE>`__
+ """
+
+ @object_field(Description)
+ def description(self):
+ """
+ The optional description for the Topology Template.
+
+ :type: :class:`Description`
+ """
+
+ @object_dict_field(ParameterDefinition)
+ def inputs(self):
+ """
+ An optional list of input parameters (i.e., as parameter definitions) for the Topology
+ Template.
+
+ :type: {:obj:`basestring`: :class:`ParameterDefinition`}
+ """
+
+ @object_dict_field(NodeTemplate)
+ def node_templates(self):
+ """
+ An optional list of node template definitions for the Topology Template.
+
+ :type: {:obj:`basestring`: :class:`NodeTemplate`}
+ """
+
+ @object_dict_field(RelationshipTemplate)
+ def relationship_templates(self):
+ """
+ An optional list of relationship templates for the Topology Template.
+
+ :type: {:obj:`basestring`: :class:`RelationshipTemplate`}
+ """
+
+ @object_dict_field(GroupTemplate)
+ def groups(self):
+ """
+ An optional list of Group definitions whose members are node templates defined within this
+ same Topology Template.
+
+ :class:`GroupTemplate`
+ """
+
+ @object_dict_field(PolicyTemplate)
+ def policies(self):
+ """
+ An optional list of Policy definitions for the Topology Template.
+
+ :type: {:obj:`basestring`: :class:`PolicyTemplate`}
+ """
+
+ @object_dict_field(ParameterDefinition)
+ def outputs(self):
+ """
+ An optional list of output parameters (i.e., as parameter definitions) for the Topology
+ Template.
+
+ :type: {:obj:`basestring`: :class:`ParameterDefinition`}
+ """
+
+ @object_field(SubstitutionMappings)
+ def substitution_mappings(self):
+ """
+ An optional declaration that exports the topology template as an implementation of a Node
+ type.
+
+ This also includes the mappings between the external Node Types named capabilities and
+ requirements to existing implementations of those capabilities and requirements on Node
+ templates declared within the topology template.
+ """
+
+ @cachedmethod
+ def _get_input_values(self, context):
+ return FrozenDict(get_parameter_values(context, self, 'inputs'))
+
+ @cachedmethod
+ def _get_output_values(self, context):
+ return FrozenDict(get_parameter_values(context, self, 'outputs'))
+
+ def _validate(self, context):
+ super(TopologyTemplate, self)._validate(context)
+ self._get_input_values(context)
+ self._get_output_values(context)
+
+ def _dump(self, context):
+ self._dump_content(context, (
+ 'description',
+ 'inputs',
+ 'node_templates',
+ 'relationship_templates',
+ 'groups',
+ 'policies',
+ 'outputs',
+ 'substitution_mappings'))
+
+
+@has_fields
+@implements_specification('3.9', 'tosca-simple-1.0')
+class ServiceTemplate(ExtensiblePresentation):
+ """
+ Servicate template.
+
+ See the `TOSCA Simple Profile v1.0 cos01 specification <http://docs.oasis-open.org/tosca
+ /TOSCA-Simple-Profile-YAML/v1.0/cos01/TOSCA-Simple-Profile-YAML-v1.0-cos01.html
+ #DEFN_ELEMENT_SERVICE_TEMPLATE>`__.
+ """
+
+ @primitive_field(str)
+ @implements_specification('3.9.3.1', 'tosca-simple-1.0')
+ def tosca_definitions_version(self):
+ """
+ Defines the version of the TOSCA Simple Profile specification the template (grammar)
+ complies with.
+
+ See the `TOSCA Simple Profile v1.0 cos01 specification <http://docs.oasis-open.org/tosca
+ /TOSCA-Simple-Profile-YAML/v1.0/cos01/TOSCA-Simple-Profile-YAML-v1.0-cos01.html
+ #_Toc379455047>`__
+
+ :type: :obj:`basestring`
+ """
+
+ @object_field(MetaData)
+ def metadata(self):
+ """
+ Defines a section used to declare additional metadata information. Domain-specific TOSCA
+ profile specifications may define keynames that are required for their implementations.
+
+ See the `TOSCA Simple Profile v1.0 cos01 specification <http://docs.oasis-open.org/tosca
+ /TOSCA-Simple-Profile-YAML/v1.0/cos01/TOSCA-Simple-Profile-YAML-v1.0-cos01.html
+ #_Toc379455048>`__
+
+ :type: :class:`MetaData`
+ """
+
+ @object_field(Description)
+ @implements_specification('3.9.3.6', 'tosca-simple-1.0')
+ def description(self):
+ """
+ Declares a description for this Service Template and its contents.
+
+ :type: :class:`Description`
+ """
+
+ @primitive_field()
+ @implements_specification('3.9.3.7', 'tosca-simple-1.0')
+ def dsl_definitions(self):
+ """
+ Declares optional DSL-specific definitions and conventions. For example, in YAML, this
+ allows defining reusable YAML macros (i.e., YAML alias anchors) for use throughout the TOSCA
+ Service Template.
+
+ See the `TOSCA Simple Profile v1.0 cos01 specification <http://docs.oasis-open.org/tosca
+ /TOSCA-Simple-Profile-YAML/v1.0/cos01/TOSCA-Simple-Profile-YAML-v1.0-cos01.html
+ #_Toc397688790>`__
+ """
+
+ @object_dict_field(Repository)
+ @implements_specification('3.9.3.8', 'tosca-simple-1.0')
+ def repositories(self):
+ """
+ Declares the list of external repositories which contain artifacts that are referenced in
+ the service template along with their addresses and necessary credential information used to
+ connect to them in order to retrieve the artifacts.
+
+ :type: {:obj:`basestring`: :class:`Repository`}
+ """
+
+ @object_list_field(Import)
+ @implements_specification('3.9.3.9', 'tosca-simple-1.0')
+ def imports(self):
+ """
+ Declares import statements external TOSCA Definitions documents. For example, these may be
+ file location or URIs relative to the service template file within the same TOSCA CSAR file.
+
+ :type: list of :class:`Import`
+ """
+
+ @object_dict_field(ArtifactType)
+ @implements_specification('3.9.3.10', 'tosca-simple-1.0')
+ def artifact_types(self):
+ """
+ This section contains an optional list of artifact type definitions for use in the service
+ template.
+
+ :type: {:obj:`basestring`: :class:`ArtifactType`}
+ """
+
+ @object_dict_field(DataType)
+ @implements_specification('3.9.3.11', 'tosca-simple-1.0')
+ def data_types(self):
+ """
+ Declares a list of optional TOSCA Data Type definitions.
+
+ :type: {:obj:`basestring`: :class:`DataType`}
+ """
+
+ @object_dict_field(CapabilityType)
+ @implements_specification('3.9.3.12', 'tosca-simple-1.0')
+ def capability_types(self):
+ """
+ This section contains an optional list of capability type definitions for use in the service
+ template.
+
+ :type: {:obj:`basestring`: :class:`CapabilityType`}
+ """
+
+ @object_dict_field(InterfaceType)
+ @implements_specification('3.9.3.13', 'tosca-simple-1.0')
+ def interface_types(self):
+ """
+ This section contains an optional list of interface type definitions for use in the service
+ template.
+
+ :type: {:obj:`basestring`: :class:`InterfaceType`}
+ """
+
+ @object_dict_field(RelationshipType)
+ @implements_specification('3.9.3.14', 'tosca-simple-1.0')
+ def relationship_types(self):
+ """
+ This section contains a set of relationship type definitions for use in the service
+ template.
+
+ :type: {:obj:`basestring`: :class:`RelationshipType`}
+ """
+
+ @object_dict_field(NodeType)
+ @implements_specification('3.9.3.15', 'tosca-simple-1.0')
+ def node_types(self):
+ """
+ This section contains a set of node type definitions for use in the service template.
+
+ :type: {:obj:`basestring`: :class:`NodeType`}
+ """
+
+ @object_dict_field(GroupType)
+ @implements_specification('3.9.3.16', 'tosca-simple-1.0')
+ def group_types(self):
+ """
+ This section contains a list of group type definitions for use in the service template.
+
+ :type: {:obj:`basestring`: :class:`GroupType`}
+ """
+
+ @object_dict_field(PolicyType)
+ @implements_specification('3.9.3.17', 'tosca-simple-1.0')
+ def policy_types(self):
+ """
+ This section contains a list of policy type definitions for use in the service template.
+
+ :type: {:obj:`basestring`: :class:`PolicyType`}
+ """
+
+ @object_field(TopologyTemplate)
+ def topology_template(self):
+ """
+ Defines the topology template of an application or service, consisting of node templates
+ that represent the application's or service's components, as well as relationship templates
+ representing relations between the components.
+
+ :type: :class:`TopologyTemplate`
+ """
+
+ def _dump(self, context):
+ self._dump_content(context, (
+ 'description',
+ 'tosca_definitions_version',
+ 'metadata',
+ 'repositories',
+ 'imports',
+ 'artifact_types',
+ 'data_types',
+ 'capability_types',
+ 'interface_types',
+ 'relationship_types',
+ 'node_types',
+ 'group_types',
+ 'policy_types',
+ 'topology_template'))
diff --git a/azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/simple_v1_0/types.py b/azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/simple_v1_0/types.py
new file mode 100644
index 0000000..43af44b
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/extensions/aria_extension_tosca/simple_v1_0/types.py
@@ -0,0 +1,892 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from aria.utils.collections import (FrozenDict, FrozenList)
+from aria.utils.caching import cachedmethod
+from aria.parser import implements_specification
+from aria.parser.presentation import (has_fields, allow_unknown_fields, primitive_field,
+ primitive_list_field, object_field, object_dict_field,
+ object_list_field, object_sequenced_list_field,
+ object_dict_unknown_fields, field_getter, field_validator,
+ list_type_validator, derived_from_validator,
+ get_parent_presentation)
+
+from .assignments import ArtifactAssignmentForType
+from .data_types import Version
+from .definitions import (PropertyDefinition, AttributeDefinition, InterfaceDefinition,
+ RequirementDefinition, CapabilityDefinition, OperationDefinition)
+from .misc import (Description, ConstraintClause)
+from .modeling.artifacts import get_inherited_artifact_definitions
+from .modeling.capabilities import (get_inherited_valid_source_types,
+ get_inherited_capability_definitions)
+from .modeling.data_types import (get_data_type, get_inherited_constraints, coerce_data_type_value,
+ validate_data_type_name)
+from .modeling.interfaces import (get_inherited_interface_definitions, get_inherited_operations)
+from .modeling.policies import get_inherited_targets
+from .modeling.parameters import get_inherited_parameter_definitions
+from .modeling.requirements import get_inherited_requirement_definitions
+from .presentation.extensible import ExtensiblePresentation
+from .presentation.field_getters import data_type_class_getter
+from .presentation.field_validators import (data_type_derived_from_validator,
+ data_type_constraints_validator,
+ data_type_properties_validator,
+ list_node_type_or_group_type_validator)
+from .presentation.types import convert_name_to_full_type_name
+
+
+
+@has_fields
+@implements_specification('3.6.3', 'tosca-simple-1.0')
+class ArtifactType(ExtensiblePresentation):
+ """
+ An Artifact Type is a reusable entity that defines the type of one or more files that are used
+ to define implementation or deployment artifacts that are referenced by nodes or relationships
+ on their operations.
+
+ See the `TOSCA Simple Profile v1.0 cos01 specification <http://docs.oasis-open.org/tosca
+ /TOSCA-Simple-Profile-YAML/v1.0/cos01/TOSCA-Simple-Profile-YAML-v1.0-cos01.html
+ #DEFN_ENTITY_ARTIFACT_TYPE>`__
+ """
+
+ @field_validator(derived_from_validator(convert_name_to_full_type_name, 'artifact_types'))
+ @primitive_field(str)
+ def derived_from(self):
+ """
+ An optional parent Artifact Type name the Artifact Type derives from.
+
+ :type: :obj:`basestring`
+ """
+
+ @field_getter(data_type_class_getter(Version))
+ @primitive_field()
+ def version(self):
+ """
+ An optional version for the Artifact Type definition.
+
+ :type: :class:`Version`
+ """
+
+ @object_field(Description)
+ def description(self):
+ """
+ An optional description for the Artifact Type.
+
+ :type: :class:`Description`
+ """
+
+ @primitive_field(str)
+ def mime_type(self):
+ """
+ The required mime type property for the Artifact Type.
+
+ :type: :obj:`basestring`
+ """
+
+ @primitive_list_field(str)
+ def file_ext(self):
+ """
+ The required file extension property for the Artifact Type.
+
+ :type: [:obj:`basestring`]
+ """
+
+ @object_dict_field(PropertyDefinition)
+ def properties(self):
+ """
+ An optional list of property definitions for the Artifact Type.
+
+ :type: {:obj:`basestring`: :class:`PropertyDefinition`}
+ """
+
+ @cachedmethod
+ def _get_parent(self, context):
+ return get_parent_presentation(context, self, convert_name_to_full_type_name,
+ 'artifact_types')
+
+ @cachedmethod
+ def _get_properties(self, context):
+ return FrozenDict(get_inherited_parameter_definitions(context, self, 'properties'))
+
+ def _validate(self, context):
+ super(ArtifactType, self)._validate(context)
+ self._get_properties(context)
+
+ def _dump(self, context):
+ self._dump_content(context, (
+ 'description',
+ 'version',
+ 'derived_from',
+ 'mime_type',
+ 'file_ext',
+ 'properties'))
+
+
+@has_fields
+@implements_specification('3.6.5', 'tosca-simple-1.0')
+class DataType(ExtensiblePresentation):
+ """
+ A Data Type definition defines the schema for new named datatypes in TOSCA.
+
+ See the `TOSCA Simple Profile v1.0 cos01 specification <http://docs.oasis-open.org/tosca
+ /TOSCA-Simple-Profile-YAML/v1.0/cos01/TOSCA-Simple-Profile-YAML-v1.0-cos01.html
+ #DEFN_ENTITY_DATA_TYPE>`__
+ """
+
+ @field_validator(data_type_derived_from_validator)
+ @primitive_field(str)
+ def derived_from(self):
+ """
+ The optional key used when a datatype is derived from an existing TOSCA Data Type.
+
+ :type: :obj:`basestring`
+ """
+
+ @object_field(Version)
+ def version(self):
+ """
+ An optional version for the Data Type definition.
+
+ :type: :class:`Version`
+ """
+
+ @object_field(Description)
+ def description(self):
+ """
+ The optional description for the Data Type.
+
+ :type: :class:`Description`
+ """
+
+ @field_validator(data_type_constraints_validator)
+ @object_list_field(ConstraintClause)
+ def constraints(self):
+ """
+ The optional list of sequenced constraint clauses for the Data Type.
+
+ :type: list of (str, :class:`ConstraintClause`)
+ """
+
+ @field_validator(data_type_properties_validator)
+ @object_dict_field(PropertyDefinition)
+ def properties(self):
+ """
+ The optional list property definitions that comprise the schema for a complex Data Type in
+ TOSCA.
+
+ :type: {:obj:`basestring`: :class:`PropertyDefinition`}
+ """
+
+ @cachedmethod
+ def _get_parent(self, context):
+ return get_data_type(context, self, 'derived_from', allow_none=True)
+
+ @cachedmethod
+ def _is_descendant(self, context, the_type):
+ if the_type is None:
+ return False
+ if not hasattr(the_type, '_name'):
+ # Must be a primitive type
+ return self._get_primitive_ancestor(context) == the_type
+ if the_type._name == self._name:
+ return True
+ return self._is_descendant(context, the_type._get_parent(context))
+
+ @cachedmethod
+ def _get_primitive_ancestor(self, context):
+ parent = self._get_parent(context)
+ if parent is not None:
+ if not isinstance(parent, DataType):
+ return parent
+ else:
+ return parent._get_primitive_ancestor(context) # pylint: disable=no-member
+ return None
+
+ @cachedmethod
+ def _get_properties(self, context):
+ return FrozenDict(get_inherited_parameter_definitions(context, self, 'properties'))
+
+ @cachedmethod
+ def _get_constraints(self, context):
+ return get_inherited_constraints(context, self)
+
+ def _validate(self, context):
+ super(DataType, self)._validate(context)
+ validate_data_type_name(context, self)
+ self._get_properties(context)
+
+ def _coerce_value(self, context, presentation, entry_schema, constraints, value, aspect):
+ return coerce_data_type_value(context, presentation, self, entry_schema, constraints, value,
+ aspect)
+
+ def _dump(self, context):
+ self._dump_content(context, (
+ 'description',
+ 'version',
+ 'derived_from',
+ 'constraints',
+ 'properties'))
+
+
+@has_fields
+@implements_specification('3.6.6', 'tosca-simple-1.0')
+class CapabilityType(ExtensiblePresentation):
+ """
+ A Capability Type is a reusable entity that describes a kind of capability that a Node Type can
+ declare to expose. Requirements (implicit or explicit) that are declared as part of one node can
+ be matched to (i.e., fulfilled by) the Capabilities declared by another node.
+
+ See the `TOSCA Simple Profile v1.0 cos01 specification <http://docs.oasis-open.org/tosca
+ /TOSCA-Simple-Profile-YAML/v1.0/cos01/TOSCA-Simple-Profile-YAML-v1.0-cos01.html
+ #DEFN_ENTITY_CAPABILITY_TYPE>`__
+ """
+
+ @field_validator(derived_from_validator(convert_name_to_full_type_name, 'capability_types'))
+ @primitive_field(str)
+ def derived_from(self):
+ """
+ An optional parent capability type name this new Capability Type derives from.
+
+ :type: :obj:`basestring`
+ """
+
+ @object_field(Version)
+ def version(self):
+ """
+ An optional version for the Capability Type definition.
+
+ :type: :class:`Version`
+ """
+
+ @object_field(Description)
+ def description(self):
+ """
+ An optional description for the Capability Type.
+
+ :type: :class:`Description`
+ """
+
+ @object_dict_field(PropertyDefinition)
+ def properties(self):
+ """
+ An optional list of property definitions for the Capability Type.
+
+ ARIA NOTE: The spec says 'list', but the examples are all of dicts.
+
+ :type: {:obj:`basestring`: :class:`PropertyDefinition`}
+ """
+
+ @object_dict_field(AttributeDefinition)
+ def attributes(self):
+ """
+ An optional list of attribute definitions for the Capability Type.
+
+ :type: {:obj:`basestring`: :class:`AttributeDefinition`}
+ """
+
+ @field_validator(list_type_validator('node type', convert_name_to_full_type_name, 'node_types'))
+ @primitive_list_field(str)
+ def valid_source_types(self):
+ """
+ An optional list of one or more valid names of Node Types that are supported as valid
+ sources of any relationship established to the declared Capability Type.
+
+ :type: [:obj:`basestring`]
+ """
+
+ @cachedmethod
+ def _get_parent(self, context):
+ return get_parent_presentation(context, self, convert_name_to_full_type_name,
+ 'capability_types')
+
+ @cachedmethod
+ def _is_descendant(self, context, other_type):
+ """returns True iff `other_type` is a descendant of the represented capability type"""
+ if other_type is None:
+ return False
+ elif other_type._name == self._name:
+ return True
+ return self._is_descendant(context, other_type._get_parent(context))
+
+ @cachedmethod
+ def _get_properties(self, context):
+ return FrozenDict(get_inherited_parameter_definitions(context, self, 'properties'))
+
+ @cachedmethod
+ def _get_valid_source_types(self, context):
+ return get_inherited_valid_source_types(context, self)
+
+ def _validate(self, context):
+ super(CapabilityType, self)._validate(context)
+ self._get_properties(context)
+
+ def _dump(self, context):
+ self._dump_content(context, (
+ 'description',
+ 'version',
+ 'derived_from',
+ 'valid_source_types',
+ 'properties',
+ 'attributes'))
+
+
+@allow_unknown_fields
+@has_fields
+@implements_specification('3.6.4', 'tosca-simple-1.0')
+class InterfaceType(ExtensiblePresentation):
+ """
+ An Interface Type is a reusable entity that describes a set of operations that can be used to
+ interact with or manage a node or relationship in a TOSCA topology.
+
+ See the `TOSCA Simple Profile v1.0 cos01 specification <http://docs.oasis-open.org/tosca
+ /TOSCA-Simple-Profile-YAML/v1.0/cos01/TOSCA-Simple-Profile-YAML-v1.0-cos01.html
+ #DEFN_ENTITY_INTERFACE_TYPE>`__
+ """
+
+ @field_validator(derived_from_validator(convert_name_to_full_type_name, 'interface_types'))
+ @primitive_field(str)
+ def derived_from(self):
+ """
+ An optional parent Interface Type name this new Interface Type derives from.
+
+ :type: :obj:`basestring`
+ """
+
+ @object_field(Version)
+ def version(self):
+ """
+ An optional version for the Interface Type definition.
+
+ :type: :class:`Version`
+ """
+
+ @object_field(Description)
+ def description(self):
+ """
+ An optional description for the Interface Type.
+
+ :type: :class:`Description`
+ """
+
+ @object_dict_field(PropertyDefinition)
+ def inputs(self):
+ """
+ The optional list of input parameter definitions.
+
+ :type: {:obj:`basestring`: :class:`PropertyDefinition`}
+ """
+
+ @object_dict_unknown_fields(OperationDefinition)
+ def operations(self):
+ """
+ :type: {:obj:`basestring`: :class:`OperationDefinition`}
+ """
+
+ @cachedmethod
+ def _get_parent(self, context):
+ return get_parent_presentation(context, self, convert_name_to_full_type_name,
+ 'interface_types')
+
+ @cachedmethod
+ def _is_descendant(self, context, the_type):
+ if the_type is None:
+ return False
+ elif the_type._name == self._name:
+ return True
+ return self._is_descendant(context, the_type._get_parent(context))
+
+ @cachedmethod
+ def _get_inputs(self, context):
+ return FrozenDict(get_inherited_parameter_definitions(context, self, 'inputs'))
+
+ @cachedmethod
+ def _get_operations(self, context):
+ return FrozenDict(get_inherited_operations(context, self))
+
+ def _validate(self, context):
+ super(InterfaceType, self)._validate(context)
+ self._get_inputs(context)
+ for operation in self.operations.itervalues(): # pylint: disable=no-member
+ operation._validate(context)
+
+ def _dump(self, context):
+ self._dump_content(context, (
+ 'description',
+ 'version',
+ 'derived_from',
+ 'inputs',
+ 'operations'))
+
+
+@has_fields
+@implements_specification('3.6.9', 'tosca-simple-1.0')
+class RelationshipType(ExtensiblePresentation):
+ """
+ A Relationship Type is a reusable entity that defines the type of one or more relationships
+ between Node Types or Node Templates.
+
+ See the `TOSCA Simple Profile v1.0 cos01 specification <http://docs.oasis-open.org/tosca
+ /TOSCA-Simple-Profile-YAML/v1.0/cos01/TOSCA-Simple-Profile-YAML-v1.0-cos01.html
+ #DEFN_ENTITY_RELATIONSHIP_TYPE>`__
+ """
+
+ @field_validator(derived_from_validator(convert_name_to_full_type_name, 'relationship_types'))
+ @primitive_field(str)
+ def derived_from(self):
+ """
+ An optional parent Relationship Type name the Relationship Type derives from.
+
+ :type: :obj:`basestring`
+ """
+
+ @object_field(Version)
+ def version(self):
+ """
+ An optional version for the Relationship Type definition.
+
+ :type: :class:`Version`
+ """
+
+ @object_field(Description)
+ def description(self):
+ """
+ An optional description for the Relationship Type.
+
+ :type: :class:`Description`
+ """
+
+ @object_dict_field(PropertyDefinition)
+ def properties(self):
+ """
+ An optional list of property definitions for the Relationship Type.
+
+ :type: {:obj:`basestring`: :class:`PropertyDefinition`}
+ """
+
+ @object_dict_field(AttributeDefinition)
+ def attributes(self):
+ """
+ An optional list of attribute definitions for the Relationship Type.
+
+ :type: {:obj:`basestring`: :class:`AttributeDefinition`}
+ """
+
+ @object_dict_field(InterfaceDefinition)
+ def interfaces(self):
+ """
+ An optional list of interface definitions interfaces supported by the Relationship Type.
+
+ :type: {:obj:`basestring`: :class:`InterfaceDefinition`}
+ """
+
+ @field_validator(list_type_validator('capability type', convert_name_to_full_type_name,
+ 'capability_types'))
+ @primitive_list_field(str)
+ def valid_target_types(self):
+ """
+ An optional list of one or more names of Capability Types that are valid targets for this
+ relationship.
+
+ :type: [:obj:`basestring`]
+ """
+
+ @cachedmethod
+ def _get_parent(self, context):
+ return get_parent_presentation(context, self, convert_name_to_full_type_name,
+ 'relationship_types')
+
+ @cachedmethod
+ def _is_descendant(self, context, the_type):
+ if the_type is None:
+ return False
+ elif the_type._name == self._name:
+ return True
+ return self._is_descendant(context, the_type._get_parent(context))
+
+ @cachedmethod
+ def _get_properties(self, context):
+ return FrozenDict(get_inherited_parameter_definitions(context, self, 'properties'))
+
+ @cachedmethod
+ def _get_attributes(self, context):
+ return FrozenDict(get_inherited_parameter_definitions(context, self, 'attributes'))
+
+ @cachedmethod
+ def _get_interfaces(self, context):
+ return FrozenDict(get_inherited_interface_definitions(context, self, 'relationship type'))
+
+ def _validate(self, context):
+ super(RelationshipType, self)._validate(context)
+ self._get_properties(context)
+ self._get_attributes(context)
+ self._get_interfaces(context)
+
+ def _dump(self, context):
+ self._dump_content(context, (
+ 'description',
+ 'version',
+ 'derived_from',
+ 'valid_target_types',
+ 'properties',
+ 'attributes',
+ 'interfaces'))
+
+
+@has_fields
+@implements_specification('3.6.8', 'tosca-simple-1.0')
+class NodeType(ExtensiblePresentation):
+ """
+ A Node Type is a reusable entity that defines the type of one or more Node Templates. As such, a
+ Node Type defines the structure of observable properties via a Properties Definition, the
+ Requirements and Capabilities of the node as well as its supported interfaces.
+
+ See the `TOSCA Simple Profile v1.0 cos01 specification <http://docs.oasis-open.org/tosca
+ /TOSCA-Simple-Profile-YAML/v1.0/cos01/TOSCA-Simple-Profile-YAML-v1.0-cos01.html
+ #DEFN_ENTITY_NODE_TYPE>`__
+ """
+
+ @field_validator(derived_from_validator(convert_name_to_full_type_name, 'node_types'))
+ @primitive_field(str)
+ def derived_from(self):
+ """
+ An optional parent Node Type name this new Node Type derives from.
+
+ :type: :obj:`basestring`
+ """
+
+ @object_field(Version)
+ def version(self):
+ """
+ An optional version for the Node Type definition.
+
+ :type: :class:`Version`
+ """
+
+ @object_field(Description)
+ def description(self):
+ """
+ An optional description for the Node Type.
+
+ :type: :class:`Description`
+ """
+
+ @object_dict_field(PropertyDefinition)
+ def properties(self):
+ """
+ An optional list of property definitions for the Node Type.
+
+ :type: {:obj:`basestring`: :class:`PropertyDefinition`}
+ """
+
+ @object_dict_field(AttributeDefinition)
+ def attributes(self):
+ """
+ An optional list of attribute definitions for the Node Type.
+
+ :type: {:obj:`basestring`: :class:`AttributeDefinition`}
+ """
+
+ @object_sequenced_list_field(RequirementDefinition)
+ def requirements(self):
+ """
+ An optional sequenced list of requirement definitions for the Node Type.
+
+ ARIA NOTE: The spec seems wrong to make this a sequenced list. It seems that when you have
+ more than one requirement of the same name, behavior is undefined. The idea is to use the
+ "occurrences" field if you need to limit the number of requirement assignments.
+
+ :type: list of (str, :class:`RequirementDefinition`)
+ """
+
+ @object_dict_field(CapabilityDefinition)
+ def capabilities(self):
+ """
+ An optional list of capability definitions for the Node Type.
+
+ :type: list of :class:`CapabilityDefinition`
+ """
+
+ @object_dict_field(InterfaceDefinition)
+ def interfaces(self):
+ """
+ An optional list of interface definitions supported by the Node Type.
+
+ :type: {:obj:`basestring`: :class:`InterfaceDefinition`}
+ """
+
+ @object_dict_field(ArtifactAssignmentForType)
+ def artifacts(self):
+ """
+ An optional list of named artifact definitions for the Node Type.
+
+ :type: {:obj:`basestring`: :class:`ArtifactAssignmentForType`}
+ """
+
+ @cachedmethod
+ def _get_parent(self, context):
+ return get_parent_presentation(context, self, convert_name_to_full_type_name, 'node_types')
+
+ @cachedmethod
+ def _is_descendant(self, context, the_type):
+ if the_type is None:
+ return False
+ elif the_type._name == self._name:
+ return True
+ return self._is_descendant(context, the_type._get_parent(context))
+
+ @cachedmethod
+ def _get_properties(self, context):
+ return FrozenDict(get_inherited_parameter_definitions(context, self, 'properties'))
+
+ @cachedmethod
+ def _get_attributes(self, context):
+ return FrozenDict(get_inherited_parameter_definitions(context, self, 'attributes'))
+
+ @cachedmethod
+ def _get_requirements(self, context):
+ return FrozenList(get_inherited_requirement_definitions(context, self))
+
+ @cachedmethod
+ def _get_capabilities(self, context):
+ return FrozenDict(get_inherited_capability_definitions(context, self))
+
+ @cachedmethod
+ def _get_interfaces(self, context):
+ return FrozenDict(get_inherited_interface_definitions(context, self, 'node type'))
+
+ @cachedmethod
+ def _get_artifacts(self, context):
+ return FrozenDict(get_inherited_artifact_definitions(context, self))
+
+ def _validate(self, context):
+ super(NodeType, self)._validate(context)
+ self._get_properties(context)
+ self._get_attributes(context)
+ self._get_requirements(context)
+ self._get_capabilities(context)
+ self._get_interfaces(context)
+ self._get_artifacts(context)
+
+ def _dump(self, context):
+ self._dump_content(context, (
+ 'description',
+ 'version',
+ 'derived_from',
+ 'properties',
+ 'attributes',
+ 'interfaces',
+ 'artifacts',
+ 'requirements',
+ 'capabilities'))
+
+
+@has_fields
+@implements_specification('3.6.10', 'tosca-simple-1.0')
+class GroupType(ExtensiblePresentation):
+ """
+ A Group Type defines logical grouping types for nodes, typically for different management
+ purposes. Groups can effectively be viewed as logical nodes that are not part of the physical
+ deployment topology of an application, yet can have capabilities and the ability to attach
+ policies and interfaces that can be applied (depending on the group type) to its member nodes.
+
+ Conceptually, group definitions allow the creation of logical "membership" relationships to
+ nodes in a service template that are not a part of the application's explicit requirement
+ dependencies in the topology template (i.e. those required to actually get the application
+ deployed and running). Instead, such logical membership allows for the introduction of things
+ such as group management and uniform application of policies (i.e., requirements that are also
+ not bound to the application itself) to the group's members.
+
+ See the `TOSCA Simple Profile v1.0 cos01 specification <http://docs.oasis-open.org/tosca
+ /TOSCA-Simple-Profile-YAML/v1.0/cos01/TOSCA-Simple-Profile-YAML-v1.0-cos01.html
+ #DEFN_ENTITY_GROUP_TYPE>`__
+ """
+
+ @field_validator(derived_from_validator(convert_name_to_full_type_name, 'group_types'))
+ @primitive_field(str)
+ def derived_from(self):
+ """
+ An optional parent Group Type name the Group Type derives from.
+
+ :type: :obj:`basestring`
+ """
+
+ @object_field(Version)
+ def version(self):
+ """
+ An optional version for the Group Type definition.
+
+ :type: :class:`Version`
+ """
+
+ @object_field(Description)
+ def description(self):
+ """
+ The optional description for the Group Type.
+
+ :type: :class:`Description`
+ """
+
+ @object_dict_field(PropertyDefinition)
+ def properties(self):
+ """
+ An optional list of property definitions for the Group Type.
+
+ :type: {:obj:`basestring`: :class:`PropertyDefinition`}
+ """
+
+ @field_validator(list_type_validator('node type', convert_name_to_full_type_name, 'node_types'))
+ @primitive_list_field(str)
+ def members(self):
+ """
+ An optional list of one or more names of Node Types that are valid (allowed) as members of
+ the Group Type.
+
+ Note: This can be viewed by TOSCA Orchestrators as an implied relationship from the listed
+ members nodes to the group, but one that does not have operational lifecycle considerations.
+ For example, if we were to name this as an explicit Relationship Type we might call this
+ "MemberOf" (group).
+
+ :type: [:obj:`basestring`]
+ """
+
+ @object_dict_field(InterfaceDefinition)
+ def interfaces(self):
+ """
+ An optional list of interface definitions supported by the Group Type.
+
+ :type: {:obj:`basestring`: :class:`InterfaceDefinition`}
+ """
+
+ @cachedmethod
+ def _get_parent(self, context):
+ return get_parent_presentation(context, self, convert_name_to_full_type_name,
+ 'group_types')
+
+ @cachedmethod
+ def _is_descendant(self, context, the_type):
+ if the_type is None:
+ return False
+ elif the_type._name == self._name:
+ return True
+ return self._is_descendant(context, the_type._get_parent(context))
+
+ @cachedmethod
+ def _get_properties(self, context):
+ return FrozenDict(get_inherited_parameter_definitions(context, self, 'properties'))
+
+ @cachedmethod
+ def _get_interfaces(self, context):
+ return FrozenDict(get_inherited_interface_definitions(context, self, 'group type'))
+
+ def _validate(self, context):
+ super(GroupType, self)._validate(context)
+ self._get_properties(context)
+ self._get_interfaces(context)
+
+ def _dump(self, context):
+ self._dump_content(context, (
+ 'description',
+ 'version',
+ 'derived_from',
+ 'members',
+ 'properties',
+ 'interfaces'))
+
+
+@has_fields
+@implements_specification('3.6.11', 'tosca-simple-1.0')
+class PolicyType(ExtensiblePresentation):
+ """
+ A Policy Type defines a type of requirement that affects or governs an application or service's
+ topology at some stage of its lifecycle, but is not explicitly part of the topology itself
+ (i.e., it does not prevent the application or service from being deployed or run if it did not
+ exist).
+
+ See the `TOSCA Simple Profile v1.0 cos01 specification <http://docs.oasis-open.org/tosca
+ /TOSCA-Simple-Profile-YAML/v1.0/cos01/TOSCA-Simple-Profile-YAML-v1.0-cos01.html
+ #DEFN_ENTITY_POLICY_TYPE>`__
+ """
+
+ @field_validator(derived_from_validator(convert_name_to_full_type_name, 'policy_types'))
+ @primitive_field(str)
+ def derived_from(self):
+ """
+ An optional parent Policy Type name the Policy Type derives from.
+
+ :type: :obj:`basestring`
+ """
+
+ @object_field(Version)
+ def version(self):
+ """
+ An optional version for the Policy Type definition.
+
+ :type: :class:`Version`
+ """
+
+ @object_field(Description)
+ def description(self):
+ """
+ The optional description for the Policy Type.
+
+ :type: :class:`Description`
+ """
+
+ @object_dict_field(PropertyDefinition)
+ def properties(self):
+ """
+ An optional list of property definitions for the Policy Type.
+
+ :type: :class:`PropertyDefinition`
+ """
+
+ @field_validator(list_node_type_or_group_type_validator)
+ @primitive_list_field(str)
+ def targets(self):
+ """
+ An optional list of valid Node Types or Group Types the Policy Type can be applied to.
+
+ Note: This can be viewed by TOSCA Orchestrators as an implied relationship to the target
+ nodes, but one that does not have operational lifecycle considerations. For example, if we
+ were to name this as an explicit Relationship Type we might call this "AppliesTo" (node or
+ group).
+
+ :type: [:obj:`basestring`]
+ """
+
+ @cachedmethod
+ def _get_parent(self, context):
+ return get_parent_presentation(context, self, convert_name_to_full_type_name,
+ 'policy_types')
+
+ @cachedmethod
+ def _get_properties(self, context):
+ return FrozenDict(get_inherited_parameter_definitions(context, self, 'properties'))
+
+ @cachedmethod
+ def _get_targets(self, context):
+ node_types, group_types = get_inherited_targets(context, self)
+ return FrozenList(node_types), FrozenList(group_types)
+
+ def _validate(self, context):
+ super(PolicyType, self)._validate(context)
+ self._get_properties(context)
+
+ def _dump(self, context):
+ self._dump_content(context, (
+ 'description',
+ 'version',
+ 'derived_from',
+ 'targets',
+ 'properties'))
diff --git a/azure/aria/aria-extension-cloudify/src/aria/release/asf-release.sh b/azure/aria/aria-extension-cloudify/src/aria/release/asf-release.sh
new file mode 100644
index 0000000..18f5b38
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/release/asf-release.sh
@@ -0,0 +1,283 @@
+#!/bin/bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+# This script is meant to help with the creation of Apache-compliant
+# release candidates, as well as finalizing releases by using said candidates.
+#
+# Creation of a release candidate includes:
+# 1) Creating a source package (a snapshot of the repository)
+# 2) Creating a Pythonic sdist (generated docs, examples, etc., but no tests etc.)
+# 3) Creating a Pythonic bdist (Wheel; binary distribution)
+# 4) Publishing these packages on to https://dist.apache.org/repos/dist/dev/incubator/ariatosca/
+# 5) Publishing the sdist and bdist packages on test-PyPI (https://test.pypi.org/)
+#
+# Finalization of a release includes:
+# 1) Copying of the source, sdist and bdist packages from /dist/dev to /dist/release
+# (i.e. from https://dist.apache.org/repos/dist/dev/incubator/ariatosca/
+# to https://dist.apache.org/repos/dist/release/incubator/ariatosca/)
+# 2) Publishing the sdist and bdist packages on PyPI (https://pypi.org)
+# 3) Tagging the git repository for the release version
+#
+# Read more about Apache release rules and regulations at:
+# 1) https://www.apache.org/dev/#releases
+# 2) https://www.apache.org/legal/release-policy.html
+# 3) https://www.apache.org/dev/release-distribution.html
+# 4) https://www.apache.org/dev/release-publishing.html
+# 5) https://www.apache.org/dev/release-signing.html
+# 6) http://incubator.apache.org/incubation/Incubation_Policy.html#Releases
+# 7) http://incubator.apache.org/guides/releasemanagement.html
+
+
+set -e
+
+
+function create_apache_release_candidate {
+ if [ "$#" -lt 1 ]; then
+ echo "Must provide git branch for release candidate" >&2
+ return 1
+ fi
+
+ local GIT_BRANCH=$1
+ local OPTIONAL_ARIATOSCA_DIST_DEV_PATH=$2
+
+ ARIA_DIR=$(_get_aria_dir)
+ pushd ${ARIA_DIR}
+
+ git checkout ${GIT_BRANCH}
+ local VERSION=$(cat VERSION)
+
+ echo "Creating Apache release candidate for version ${VERSION}..."
+
+ make clean
+ _create_source_package ${GIT_BRANCH} ${VERSION}
+ _create_sdist_and_bdist_packages
+ _publish_to_apache_dev ${VERSION} ${OPTIONAL_ARIATOSCA_DIST_DEV_PATH}
+ _publish_to_test_pypi
+ git checkout -
+ popd
+}
+
+
+function finalize_apache_release {
+ if [ "$#" -ne 1 ]; then
+ echo "Must provide git branch for release tagging" >&2
+ return 1
+ fi
+
+ local GIT_BRANCH=$1
+
+ ARIA_DIR=$(_get_aria_dir)
+ pushd ${ARIA_DIR}
+
+ git checkout ${GIT_BRANCH}
+ local VERSION=$(cat VERSION)
+
+ read -p "Enter 'Yes' to confirm release finalization for version ${VERSION}: " yn
+ case $yn in
+ Yes ) echo "Finalizing Apache release...";;
+ * ) git checkout -; return;;
+ esac
+
+ _publish_to_apache_release ${VERSION}
+ _publish_to_real_pypi
+ _create_git_tag ${VERSION}
+ git checkout -
+ popd
+}
+
+
+function _get_aria_dir {
+ SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
+ ARIA_DIR="$(dirname "${SCRIPT_DIR}")"
+ echo ${ARIA_DIR}
+}
+
+
+function _create_source_package {
+ local GIT_BRANCH=$1
+ local VERSION=$2
+ local INCUBATING_ARCHIVE_CONTENT_DIR=apache-ariatosca-${VERSION}-incubating # e.g. apache-ariatosca-0.1.0-incubating
+ local INCUBATING_ARCHIVE=${INCUBATING_ARCHIVE_CONTENT_DIR}.tar.gz # e.g. apache-ariatosca-0.1.0-incubating.tar.gz
+ local SOURCE_PACKAGE_DIR="source"
+
+ echo "Creating source package..."
+ mkdir -p dist/${SOURCE_PACKAGE_DIR}
+ pushd dist/${SOURCE_PACKAGE_DIR}
+ # re-cloning repository, to ensure repo snapshot is clean and not environment-dependent
+ wget https://github.com/apache/incubator-ariatosca/archive/${GIT_BRANCH}.zip
+ unzip ${GIT_BRANCH}.zip > /dev/null
+ mv incubator-ariatosca-${GIT_BRANCH} ${INCUBATING_ARCHIVE_CONTENT_DIR}
+ tar -czvf ${INCUBATING_ARCHIVE} ${INCUBATING_ARCHIVE_CONTENT_DIR} > /dev/null
+ rm -rf ${INCUBATING_ARCHIVE_CONTENT_DIR}
+ rm ${GIT_BRANCH}.zip
+
+ _sign_package ${INCUBATING_ARCHIVE}
+ popd
+}
+
+function _sign_package {
+ local ARCHIVE_NAME=$1
+
+ echo "Signing archive ${ARCHIVE_NAME}..."
+ gpg --armor --output ${ARCHIVE_NAME}.asc --detach-sig ${ARCHIVE_NAME}
+ gpg --print-md MD5 ${ARCHIVE_NAME} > ${ARCHIVE_NAME}.md5
+ gpg --print-md SHA512 ${ARCHIVE_NAME} > ${ARCHIVE_NAME}.sha
+}
+
+
+function _create_sdist_and_bdist_packages {
+ local SDIST_PACKAGE_DIR="sdist"
+ local BDIST_PACKAGE_DIR="bdist"
+
+ echo "Creating sdist and bdist packages..."
+ make docs
+ python setup.py sdist -d dist/${SDIST_PACKAGE_DIR} bdist_wheel -d dist/${BDIST_PACKAGE_DIR}
+
+ # pushing LICENSE and additional files into the binary distribution archive
+ find dist/${BDIST_PACKAGE_DIR} -type f -name '*.whl' -exec zip -u {} LICENSE NOTICE DISCLAIMER \;
+
+ pushd dist/${SDIST_PACKAGE_DIR}
+ local SDIST_ARCHIVE=$(find . -type f -name "*.tar.gz" -printf '%P\n')
+ _sign_package ${SDIST_ARCHIVE}
+ popd
+
+ pushd dist/${BDIST_PACKAGE_DIR}
+ local BDIST_ARCHIVE=$(find . -type f -name "*.whl" -printf '%P\n')
+ _sign_package ${BDIST_ARCHIVE}
+ popd
+}
+
+
+function _publish_to_test_pypi {
+ echo "Publishing to test PyPI..."
+ _publish_to_pypi https://test.pypi.org/legacy/
+}
+
+
+function _publish_to_apache_dev {
+ local VERSION=$1
+ local ARIATOSCA_DIST_DEV_PATH=$2
+
+ local DIST_DIR=$(pwd)/dist
+ local RELEASE_DIR=${VERSION}-incubating # e.g. 0.1.0-incubating
+
+ echo "Publishing to Apache dist dev..."
+ if [ -z "${ARIATOSCA_DIST_DEV_PATH}" ]; then
+ local TMP_DIR=$(mktemp -d)
+ echo "Checking out ARIA dist dev to ${TMP_DIR}"
+ pushd ${TMP_DIR}
+ svn co https://dist.apache.org/repos/dist/dev/incubator/ariatosca/
+ popd
+ pushd ${TMP_DIR}/ariatosca
+ else
+ pushd ${ARIATOSCA_DIST_DEV_PATH}
+ fi
+
+ svn up
+ cp -r ${DIST_DIR} .
+ mv dist/ ${RELEASE_DIR}/
+ svn add ${RELEASE_DIR}
+ svn commit -m "ARIA ${VERSION} release candidate"
+ popd
+}
+
+
+function _publish_to_real_pypi {
+ echo "Publishing to PyPI..."
+ _publish_to_pypi https://upload.pypi.org/legacy/
+}
+
+
+function _publish_to_pypi {
+ local REPOSITORY_URL=$1
+
+ pushd dist
+
+ pushd sdist
+ local SDIST_ARCHIVE=$(find . -type f -name "*.tar.gz" -printf '%P\n')
+ twine upload --repository-url ${REPOSITORY_URL} ${SDIST_ARCHIVE} ${SDIST_ARCHIVE}.asc
+ popd
+
+ pushd bdist
+ local BDIST_ARCHIVE=$(find . -type f -name "*.whl" -printf '%P\n')
+ twine upload --repository-url ${REPOSITORY_URL} ${BDIST_ARCHIVE} ${BDIST_ARCHIVE}.asc
+ popd
+
+ popd
+}
+
+
+function _publish_to_apache_release {
+ local VERSION=$1
+ local RELEASE_DIR=${VERSION}-incubating # e.g. 0.1.0-incubating
+
+ echo "Publishing to Apache dist..."
+
+ local TMP_DIR=$(mktemp -d)
+ echo "Checking out ARIA dist dev to ${TMP_DIR}"
+ pushd ${TMP_DIR}
+
+ svn co https://dist.apache.org/repos/dist/dev/incubator/ariatosca/ ariatosca-dev
+ svn co https://dist.apache.org/repos/dist/release/incubator/ariatosca/ ariatosca-release
+ cp -r ariatosca-dev/${RELEASE_DIR} ariatosca-release
+
+ pushd ariatosca-release
+ svn add ${RELEASE_DIR}
+ # TODO: remove older releases?
+ svn commit -m "ARIA ${VERSION} release"
+ popd
+ popd
+}
+
+
+function _create_git_tag {
+ local VERSION=$1
+
+ echo "Creating git tag ${VERSION}"
+ git tag -a ${VERSION} -m "ARIA ${VERSION}"
+ git push --tags origin
+}
+
+
+function pushd {
+ command pushd "$@" > /dev/null
+}
+
+
+
+function popd {
+ command popd "$@" > /dev/null
+}
+
+
+
+if [ "$#" -ne 2 ]; then
+ echo "Usage: $0 {candidate,package} <git-branch>" >&2
+ exit 1
+fi
+
+OPERATION=$1
+GIT_BRANCH=$2
+
+if [ "${OPERATION}" == "candidate" ]; then
+ create_apache_release_candidate ${GIT_BRANCH}
+elif [ "${OPERATION}" == "package" ]; then
+ finalize_apache_release ${GIT_BRANCH}
+else
+ echo "First parameter must be either 'candidate' or 'package'" >&2
+ exit 1
+fi
diff --git a/azure/aria/aria-extension-cloudify/src/aria/requirements.in b/azure/aria/aria-extension-cloudify/src/aria/requirements.in
new file mode 100644
index 0000000..a451c38
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/requirements.in
@@ -0,0 +1,41 @@
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# In order to create the requirements.txt file, execute
+# pip-compile --output-file requirements.txt requirements.in (pip-tools package is needed).
+
+requests>=2.3.0, <=2.16.0
+networkx>=1.9, <1.10 # version 1.10 dropped support of python 2.6
+retrying>=1.3.0, <1.4.0
+blinker>1.3, <1.5
+jsonpickle>0.9.0, <=0.9.4
+ruamel.yaml>=0.11.12, <0.12.0 # version 0.12.0 dropped support of python 2.6
+Jinja2>=2.8, <2.9
+shortuuid>=0.5, <0.6
+CacheControl[filecache]>=0.11.0, <0.13
+SQLAlchemy>=1.1.0, <1.2 # version 1.2 dropped support of python 2.6
+wagon==0.6.0
+bottle>=0.12.0, <0.13
+setuptools>=35.0.0, <36.0.0
+click>=6.0, < 7.0
+colorama>=0.3.7, <=0.3.9
+PrettyTable>=0.7,<0.8
+click_didyoumean==0.0.3
+backports.shutil_get_terminal_size==1.0.0
+logutils==0.3.4.1
+psutil>=5.2.2, < 6.0.0
+importlib ; python_version < '2.7'
+ordereddict ; python_version < '2.7'
+total-ordering ; python_version < '2.7' # only one version on pypi
+wheel<=0.29.0 ; python_version < '2.7'
+Fabric==1.13.0
+pycrypto==2.6.1
diff --git a/azure/aria/aria-extension-cloudify/src/aria/requirements.txt b/azure/aria/aria-extension-cloudify/src/aria/requirements.txt
new file mode 100644
index 0000000..936662f
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/requirements.txt
@@ -0,0 +1,43 @@
+#
+# This file is autogenerated by pip-compile
+# To update, run:
+#
+# pip-compile --output-file requirements.txt requirements.in
+#
+appdirs==1.4.3 # via setuptools
+backports.shutil_get_terminal_size==1.0.0
+blinker==1.4
+bottle==0.12.13
+cachecontrol[filecache]==0.12.1
+click==6.7
+click_didyoumean==0.0.3
+colorama==0.3.9
+decorator==4.0.11 # via networkx
+Fabric==1.13.0
+importlib==1.0.4 ; python_version < "2.7"
+jinja2==2.8.1
+jsonpickle==0.9.4
+lockfile==0.12.2 # via cachecontrol
+logutils==0.3.4.1
+markupsafe==1.0 # via jinja2
+msgpack-python==0.4.8 # via cachecontrol
+networkx==1.9.1
+ordereddict==1.1 ; python_version < "2.7"
+packaging==16.8 # via setuptools
+prettytable==0.7.2
+psutil==5.2.2
+pycrypto==2.6.1
+pyparsing==2.2.0 # via packaging
+requests==2.13.0
+retrying==1.3.3
+ruamel.ordereddict==0.4.9 # via ruamel.yaml
+ruamel.yaml==0.11.15
+shortuuid==0.5.0
+six==1.10.0 # via packaging, retrying, setuptools
+sqlalchemy==1.1.6
+total-ordering==0.1.0 ; python_version < "2.7"
+wagon==0.6.0
+wheel==0.29.0 ; python_version < "2.7"
+
+# The following packages are considered to be unsafe in a requirements file:
+setuptools==35.0.2
diff --git a/azure/aria/aria-extension-cloudify/src/aria/setup.py b/azure/aria/aria-extension-cloudify/src/aria/setup.py
new file mode 100644
index 0000000..04795a5
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/setup.py
@@ -0,0 +1,174 @@
+#!/usr/bin/env python
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import sys
+
+from setuptools import setup, find_packages
+from setuptools.command.install import install
+from setuptools.command.develop import develop
+
+
+_PACKAGE_NAME = 'apache-ariatosca'
+_PYTHON_SUPPORTED_VERSIONS = [(2, 6), (2, 7)]
+_EXTENSION_DIR = 'extensions'
+_EXTENSION_NAMES = [
+ 'aria_extension_tosca'
+]
+
+if (sys.version_info[0], sys.version_info[1]) not in _PYTHON_SUPPORTED_VERSIONS:
+ raise NotImplementedError(
+ '{0} Package support Python version 2.6 & 2.7 Only'.format(
+ _PACKAGE_NAME))
+
+root_dir = os.path.dirname(__file__)
+
+with open(os.path.join(root_dir, 'VERSION')) as version_file:
+ __version__ = version_file.read().strip()
+ incubating_version = '{0}-incubating'.format(__version__)
+
+with open(os.path.join(root_dir, 'README.rst')) as readme:
+ long_description = readme.read()
+
+install_requires = []
+
+ssh_requires = [
+ 'Fabric>=1.13.0, <1.14',
+]
+win_ssh_requires = [
+ # Fabric depends on the pypiwin32 on Windows, but doesn't install it
+ 'pypiwin32==219'
+]
+
+extras_require = {
+ 'ssh': ssh_requires,
+ 'ssh:sys_platform=="win32"': win_ssh_requires
+}
+
+with open(os.path.join(root_dir, 'requirements.in')) as requirements:
+ for requirement in requirements.readlines():
+ requirement = requirement.split('#')[0].strip() # get rid of comments or trailing comments
+ if not requirement:
+ continue # skip empty and comment lines
+
+ # dependencies which use environment markers have to go in as conditional dependencies
+ # under "extra_require" rather than "install_requires", or otherwise the environment
+ # markers get ignored when installing from wheel. See more here:
+ # https://wheel.readthedocs.io/en/latest/index.html#defining-conditional-dependencies
+ # https://hynek.me/articles/conditional-python-dependencies/
+ if ';' in requirement:
+ package, condition = requirement.split(';')
+ cond_name = ':{0}'.format(condition.strip())
+ extras_require.setdefault(cond_name, [])
+ extras_require[cond_name].append(package.strip())
+ else:
+ install_requires.append(requirement)
+
+
+console_scripts = ['aria = aria.cli.main:main']
+
+
+def _generate_user_options(command):
+ return command.user_options + [
+ ('skip-ctx', None, 'Install with or without the ctx (Defaults to False)')
+ ]
+
+
+def _generate_boolean_options(command):
+ return command.boolean_options + ['skip-ctx']
+
+
+def _initialize_options(custom_cmd):
+ custom_cmd.command.initialize_options(custom_cmd)
+ custom_cmd.skip_ctx = False
+
+
+def _run(custom_cmd):
+ if custom_cmd.skip_ctx is False:
+ console_scripts.append('ctx = aria.orchestrator.execution_plugin.ctx_proxy.client:main')
+ custom_cmd.command.run(custom_cmd)
+
+
+class InstallCommand(install):
+ command = install
+
+ user_options = _generate_user_options(install)
+ boolean_options = _generate_boolean_options(install)
+ initialize_options = _initialize_options
+ run = _run
+
+
+class DevelopCommand(develop):
+ command = develop
+
+ user_options = _generate_user_options(develop)
+ boolean_options = _generate_boolean_options(develop)
+ initialize_options = _initialize_options
+ run = _run
+
+setup(
+ name=_PACKAGE_NAME,
+ version=__version__,
+ description='ARIA',
+ long_description=long_description,
+ license='Apache License 2.0',
+ author='ARIA',
+ author_email='dev@ariatosca.incubator.apache.org',
+ url='http://ariatosca.incubator.apache.org/',
+ download_url=(
+ 'https://dist.apache.org/repos/dist/release/incubator/ariatosca/' + incubating_version),
+ classifiers=[
+ 'Development Status :: 4 - Beta',
+ 'Environment :: Console',
+ 'Environment :: Web Environment',
+ 'Intended Audience :: Developers',
+ 'Intended Audience :: System Administrators',
+ 'License :: OSI Approved :: Apache Software License',
+ 'Operating System :: OS Independent',
+ 'Programming Language :: Python',
+ 'Programming Language :: Python :: 2',
+ 'Programming Language :: Python :: 2.6',
+ 'Programming Language :: Python :: 2.7',
+ 'Topic :: Software Development :: Libraries :: Python Modules',
+ 'Topic :: System :: Networking',
+ 'Topic :: System :: Systems Administration'],
+ packages=find_packages(include=['aria*']) +
+ find_packages(where=_EXTENSION_DIR,
+ include=['{0}*'.format(name) for name in _EXTENSION_NAMES]),
+ package_dir=dict((name, '{0}/{1}'.format(_EXTENSION_DIR, name)) for name in _EXTENSION_NAMES),
+ package_data={
+ 'aria': [
+ 'cli/config/config_template.yaml'
+ ],
+ 'aria_extension_tosca': [
+ 'profiles/tosca-simple-1.0/**',
+ 'profiles/tosca-simple-nfv-1.0/**',
+ 'profiles/aria-1.0/**',
+ 'profiles/azure-plugin/**'
+ ]
+ },
+ platforms=['any'],
+ zip_safe=False,
+ install_requires=install_requires,
+ extras_require=extras_require,
+ entry_points={
+ 'console_scripts': console_scripts
+ },
+ cmdclass={
+ 'install': InstallCommand, # used in pip install ...
+ 'develop': DevelopCommand # used in pip install -e ...
+ }
+)
diff --git a/azure/aria/aria-extension-cloudify/src/aria/test_ssh.py b/azure/aria/aria-extension-cloudify/src/aria/test_ssh.py
new file mode 100644
index 0000000..5256cf8
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/test_ssh.py
@@ -0,0 +1,528 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import contextlib
+import json
+import logging
+import os
+
+import pytest
+
+import fabric.api
+from fabric.contrib import files
+from fabric import context_managers
+
+from aria.modeling import models
+from aria.orchestrator import events
+from aria.orchestrator import workflow
+from aria.orchestrator.workflows import api
+from aria.orchestrator.workflows.executor import process
+from aria.orchestrator.workflows.core import engine, graph_compiler
+from aria.orchestrator.workflows.exceptions import ExecutorException
+from aria.orchestrator.exceptions import TaskAbortException, TaskRetryException
+from aria.orchestrator.execution_plugin import operations
+from aria.orchestrator.execution_plugin import constants
+from aria.orchestrator.execution_plugin.exceptions import ProcessException, TaskException
+from aria.orchestrator.execution_plugin.ssh import operations as ssh_operations
+
+from tests import mock, storage, resources
+from tests.orchestrator.workflows.helpers import events_collector
+
+_CUSTOM_BASE_DIR = '/tmp/new-aria-ctx'
+
+import tests
+KEY_FILENAME = os.path.join(tests.ROOT_DIR, 'tests/resources/keys/test')
+
+_FABRIC_ENV = {
+ 'disable_known_hosts': True,
+ 'user': 'test',
+ 'key_filename': KEY_FILENAME
+}
+
+
+import mockssh
+@pytest.fixture(scope='session')
+def server():
+ with mockssh.Server({'test': KEY_FILENAME}) as s:
+ yield s
+
+
+#@pytest.mark.skipif(not os.environ.get('TRAVIS'), reason='actual ssh server required')
+class TestWithActualSSHServer(object):
+
+ def test_run_script_basic(self):
+ expected_attribute_value = 'some_value'
+ props = self._execute(env={'test_value': expected_attribute_value})
+ assert props['test_value'].value == expected_attribute_value
+
+ @pytest.mark.skip(reason='sudo privileges are required')
+ def test_run_script_as_sudo(self):
+ self._execute(use_sudo=True)
+ with self._ssh_env():
+ assert files.exists('/opt/test_dir')
+ fabric.api.sudo('rm -rf /opt/test_dir')
+
+ def test_run_script_default_base_dir(self):
+ props = self._execute()
+ assert props['work_dir'].value == '{0}/work'.format(constants.DEFAULT_BASE_DIR)
+
+ @pytest.mark.skip(reason='Re-enable once output from process executor can be captured')
+ @pytest.mark.parametrize('hide_groups', [[], ['everything']])
+ def test_run_script_with_hide(self, hide_groups):
+ self._execute(hide_output=hide_groups)
+ output = 'TODO'
+ expected_log_message = ('[localhost] run: source {0}/scripts/'
+ .format(constants.DEFAULT_BASE_DIR))
+ if hide_groups:
+ assert expected_log_message not in output
+ else:
+ assert expected_log_message in output
+
+ def test_run_script_process_config(self):
+ expected_env_value = 'test_value_env'
+ expected_arg1_value = 'test_value_arg1'
+ expected_arg2_value = 'test_value_arg2'
+ expected_cwd = '/tmp'
+ expected_base_dir = _CUSTOM_BASE_DIR
+ props = self._execute(
+ env={'test_value_env': expected_env_value},
+ process={
+ 'args': [expected_arg1_value, expected_arg2_value],
+ 'cwd': expected_cwd,
+ 'base_dir': expected_base_dir
+ })
+ assert props['env_value'].value == expected_env_value
+ assert len(props['bash_version'].value) > 0
+ assert props['arg1_value'].value == expected_arg1_value
+ assert props['arg2_value'].value == expected_arg2_value
+ assert props['cwd'].value == expected_cwd
+ assert props['ctx_path'].value == '{0}/ctx'.format(expected_base_dir)
+
+ def test_run_script_command_prefix(self):
+ props = self._execute(process={'command_prefix': 'bash -i'})
+ assert 'i' in props['dollar_dash'].value
+
+ def test_run_script_reuse_existing_ctx(self):
+ expected_test_value_1 = 'test_value_1'
+ expected_test_value_2 = 'test_value_2'
+ props = self._execute(
+ test_operations=['{0}_1'.format(self.test_name),
+ '{0}_2'.format(self.test_name)],
+ env={'test_value1': expected_test_value_1,
+ 'test_value2': expected_test_value_2})
+ assert props['test_value1'].value == expected_test_value_1
+ assert props['test_value2'].value == expected_test_value_2
+
+ def test_run_script_download_resource_plain(self, tmpdir):
+ resource = tmpdir.join('resource')
+ resource.write('content')
+ self._upload(str(resource), 'test_resource')
+ props = self._execute()
+ assert props['test_value'].value == 'content'
+
+ def test_run_script_download_resource_and_render(self, tmpdir):
+ resource = tmpdir.join('resource')
+ resource.write('{{ctx.service.name}}')
+ self._upload(str(resource), 'test_resource')
+ props = self._execute()
+ assert props['test_value'].value == self._workflow_context.service.name
+
+ @pytest.mark.parametrize('value', ['string-value', [1, 2, 3], {'key': 'value'}])
+ def test_run_script_inputs_as_env_variables_no_override(self, value):
+ props = self._execute(custom_input=value)
+ return_value = props['test_value'].value
+ expected = return_value if isinstance(value, basestring) else json.loads(return_value)
+ assert value == expected
+
+ @pytest.mark.parametrize('value', ['string-value', [1, 2, 3], {'key': 'value'}])
+ def test_run_script_inputs_as_env_variables_process_env_override(self, value):
+ props = self._execute(custom_input='custom-input-value',
+ env={'custom_env_var': value})
+ return_value = props['test_value'].value
+ expected = return_value if isinstance(value, basestring) else json.loads(return_value)
+ assert value == expected
+
+ def test_run_script_error_in_script(self):
+ exception = self._execute_and_get_task_exception()
+ assert isinstance(exception, TaskException)
+
+ def test_run_script_abort_immediate(self):
+ exception = self._execute_and_get_task_exception()
+ assert isinstance(exception, TaskAbortException)
+ assert exception.message == 'abort-message'
+
+ def test_run_script_retry(self):
+ exception = self._execute_and_get_task_exception()
+ assert isinstance(exception, TaskRetryException)
+ assert exception.message == 'retry-message'
+
+ def test_run_script_abort_error_ignored_by_script(self):
+ exception = self._execute_and_get_task_exception()
+ assert isinstance(exception, TaskAbortException)
+ assert exception.message == 'abort-message'
+
+ def test_run_commands(self):
+ temp_file_path = '/tmp/very_temporary_file'
+ with self._ssh_env():
+ if files.exists(temp_file_path):
+ fabric.api.run('rm {0}'.format(temp_file_path))
+ self._execute(commands=['touch {0}'.format(temp_file_path)])
+ with self._ssh_env():
+ assert files.exists(temp_file_path)
+ fabric.api.run('rm {0}'.format(temp_file_path))
+
+ @pytest.fixture(autouse=True)
+ def _setup(self, request, workflow_context, executor, capfd, server):
+ print 'HI!!!!!!!!!!', server.port
+ self._workflow_context = workflow_context
+ self._executor = executor
+ self._capfd = capfd
+ self.test_name = request.node.originalname or request.node.name
+ with self._ssh_env(server):
+ for directory in [constants.DEFAULT_BASE_DIR, _CUSTOM_BASE_DIR]:
+ if files.exists(directory):
+ fabric.api.run('rm -rf {0}'.format(directory))
+
+ @contextlib.contextmanager
+ def _ssh_env(self, server):
+ with self._capfd.disabled():
+ with context_managers.settings(fabric.api.hide('everything'),
+ host_string='localhost:{0}'.format(server.port),
+ **_FABRIC_ENV):
+ yield
+
+ def _execute(self,
+ env=None,
+ use_sudo=False,
+ hide_output=None,
+ process=None,
+ custom_input='',
+ test_operations=None,
+ commands=None):
+ process = process or {}
+ if env:
+ process.setdefault('env', {}).update(env)
+
+ test_operations = test_operations or [self.test_name]
+
+ local_script_path = os.path.join(resources.DIR, 'scripts', 'test_ssh.sh')
+ script_path = os.path.basename(local_script_path)
+ self._upload(local_script_path, script_path)
+
+ if commands:
+ operation = operations.run_commands_with_ssh
+ else:
+ operation = operations.run_script_with_ssh
+
+ node = self._workflow_context.model.node.get_by_name(mock.models.DEPENDENCY_NODE_NAME)
+ arguments = {
+ 'script_path': script_path,
+ 'fabric_env': _FABRIC_ENV,
+ 'process': process,
+ 'use_sudo': use_sudo,
+ 'custom_env_var': custom_input,
+ 'test_operation': '',
+ }
+ if hide_output:
+ arguments['hide_output'] = hide_output
+ if commands:
+ arguments['commands'] = commands
+ interface = mock.models.create_interface(
+ node.service,
+ 'test',
+ 'op',
+ operation_kwargs=dict(
+ function='{0}.{1}'.format(
+ operations.__name__,
+ operation.__name__),
+ arguments=arguments)
+ )
+ node.interfaces[interface.name] = interface
+
+ @workflow
+ def mock_workflow(ctx, graph):
+ ops = []
+ for test_operation in test_operations:
+ op_arguments = arguments.copy()
+ op_arguments['test_operation'] = test_operation
+ ops.append(api.task.OperationTask(
+ node,
+ interface_name='test',
+ operation_name='op',
+ arguments=op_arguments))
+
+ graph.sequence(*ops)
+ return graph
+ tasks_graph = mock_workflow(ctx=self._workflow_context) # pylint: disable=no-value-for-parameter
+ graph_compiler.GraphCompiler(
+ self._workflow_context, self._executor.__class__).compile(tasks_graph)
+ eng = engine.Engine({self._executor.__class__: self._executor})
+ eng.execute(self._workflow_context)
+ return self._workflow_context.model.node.get_by_name(
+ mock.models.DEPENDENCY_NODE_NAME).attributes
+
+ def _execute_and_get_task_exception(self, *args, **kwargs):
+ signal = events.on_failure_task_signal
+ with events_collector(signal) as collected:
+ with pytest.raises(ExecutorException):
+ self._execute(*args, **kwargs)
+ return collected[signal][0]['kwargs']['exception']
+
+ def _upload(self, source, path):
+ self._workflow_context.resource.service.upload(
+ entry_id=str(self._workflow_context.service.id),
+ source=source,
+ path=path)
+
+ @pytest.fixture
+ def executor(self):
+ result = process.ProcessExecutor()
+ try:
+ yield result
+ finally:
+ result.close()
+
+ @pytest.fixture
+ def workflow_context(self, tmpdir):
+ workflow_context = mock.context.simple(str(tmpdir))
+ workflow_context.states = []
+ workflow_context.exception = None
+ yield workflow_context
+ storage.release_sqlite_storage(workflow_context.model)
+
+
+class TestFabricEnvHideGroupsAndRunCommands(object):
+
+ def test_fabric_env_default_override(self):
+ # first sanity for no override
+ self._run()
+ assert self.mock.settings_merged['timeout'] == constants.FABRIC_ENV_DEFAULTS['timeout']
+ # now override
+ invocation_fabric_env = self.default_fabric_env.copy()
+ timeout = 1000000
+ invocation_fabric_env['timeout'] = timeout
+ self._run(fabric_env=invocation_fabric_env)
+ assert self.mock.settings_merged['timeout'] == timeout
+
+ def test_implicit_host_string(self, mocker):
+ expected_host_address = '1.1.1.1'
+ mocker.patch.object(self._Ctx.task.actor, 'host')
+ mocker.patch.object(self._Ctx.task.actor.host, 'host_address', expected_host_address)
+ fabric_env = self.default_fabric_env.copy()
+ del fabric_env['host_string']
+ self._run(fabric_env=fabric_env)
+ assert self.mock.settings_merged['host_string'] == expected_host_address
+
+ def test_explicit_host_string(self):
+ fabric_env = self.default_fabric_env.copy()
+ host_string = 'explicit_host_string'
+ fabric_env['host_string'] = host_string
+ self._run(fabric_env=fabric_env)
+ assert self.mock.settings_merged['host_string'] == host_string
+
+ def test_override_warn_only(self):
+ fabric_env = self.default_fabric_env.copy()
+ self._run(fabric_env=fabric_env)
+ assert self.mock.settings_merged['warn_only'] is True
+ fabric_env = self.default_fabric_env.copy()
+ fabric_env['warn_only'] = False
+ self._run(fabric_env=fabric_env)
+ assert self.mock.settings_merged['warn_only'] is False
+
+ def test_missing_host_string(self):
+ with pytest.raises(TaskAbortException) as exc_ctx:
+ fabric_env = self.default_fabric_env.copy()
+ del fabric_env['host_string']
+ self._run(fabric_env=fabric_env)
+ assert '`host_string` not supplied' in str(exc_ctx.value)
+
+ def test_missing_user(self):
+ with pytest.raises(TaskAbortException) as exc_ctx:
+ fabric_env = self.default_fabric_env.copy()
+ del fabric_env['user']
+ self._run(fabric_env=fabric_env)
+ assert '`user` not supplied' in str(exc_ctx.value)
+
+ def test_missing_key_or_password(self):
+ with pytest.raises(TaskAbortException) as exc_ctx:
+ fabric_env = self.default_fabric_env.copy()
+ del fabric_env['key_filename']
+ self._run(fabric_env=fabric_env)
+ assert 'Access credentials not supplied' in str(exc_ctx.value)
+
+ def test_hide_in_settings_and_non_viable_groups(self):
+ groups = ('running', 'stdout')
+ self._run(hide_output=groups)
+ assert set(self.mock.settings_merged['hide_output']) == set(groups)
+ with pytest.raises(TaskAbortException) as exc_ctx:
+ self._run(hide_output=('running', 'bla'))
+ assert '`hide_output` must be a subset of' in str(exc_ctx.value)
+
+ def test_run_commands(self):
+ def test(use_sudo):
+ commands = ['command1', 'command2']
+ self._run(
+ commands=commands,
+ use_sudo=use_sudo)
+ assert all(item in self.mock.settings_merged.items() for
+ item in self.default_fabric_env.items())
+ assert self.mock.settings_merged['warn_only'] is True
+ assert self.mock.settings_merged['use_sudo'] == use_sudo
+ assert self.mock.commands == commands
+ self.mock.settings_merged = {}
+ self.mock.commands = []
+ test(use_sudo=False)
+ test(use_sudo=True)
+
+ def test_failed_command(self):
+ with pytest.raises(ProcessException) as exc_ctx:
+ self._run(commands=['fail'])
+ exception = exc_ctx.value
+ assert exception.stdout == self.MockCommandResult.stdout
+ assert exception.stderr == self.MockCommandResult.stderr
+ assert exception.command == self.MockCommandResult.command
+ assert exception.exit_code == self.MockCommandResult.return_code
+
+ class MockCommandResult(object):
+ stdout = 'mock_stdout'
+ stderr = 'mock_stderr'
+ command = 'mock_command'
+ return_code = 1
+
+ def __init__(self, failed):
+ self.failed = failed
+
+ class MockFabricApi(object):
+
+ def __init__(self):
+ self.commands = []
+ self.settings_merged = {}
+
+ @contextlib.contextmanager
+ def settings(self, *args, **kwargs):
+ self.settings_merged.update(kwargs)
+ if args:
+ groups = args[0]
+ self.settings_merged.update({'hide_output': groups})
+ yield
+
+ def run(self, command):
+ self.commands.append(command)
+ self.settings_merged['use_sudo'] = False
+ return TestFabricEnvHideGroupsAndRunCommands.MockCommandResult(command == 'fail')
+
+ def sudo(self, command):
+ self.commands.append(command)
+ self.settings_merged['use_sudo'] = True
+ return TestFabricEnvHideGroupsAndRunCommands.MockCommandResult(command == 'fail')
+
+ def hide(self, *groups):
+ return groups
+
+ def exists(self, *args, **kwargs):
+ raise RuntimeError
+
+ class _Ctx(object):
+ INSTRUMENTATION_FIELDS = ()
+
+ class Task(object):
+ @staticmethod
+ def abort(message=None):
+ models.Task.abort(message)
+ actor = None
+
+ class Actor(object):
+ host = None
+
+ class Model(object):
+ @contextlib.contextmanager
+ def instrument(self, *args, **kwargs):
+ yield
+ task = Task
+ task.actor = Actor
+ model = Model()
+ logger = logging.getLogger()
+
+ @staticmethod
+ @contextlib.contextmanager
+ def _mock_self_logging(*args, **kwargs):
+ yield
+ _Ctx.logging_handlers = _mock_self_logging
+
+ @pytest.fixture(autouse=True)
+ def _setup(self, mocker):
+ self.default_fabric_env = {
+ 'host_string': 'test',
+ 'user': 'test',
+ 'key_filename': 'test',
+ }
+ self.mock = self.MockFabricApi()
+ mocker.patch('fabric.api', self.mock)
+
+ def _run(self,
+ commands=(),
+ fabric_env=None,
+ process=None,
+ use_sudo=False,
+ hide_output=None):
+ operations.run_commands_with_ssh(
+ ctx=self._Ctx,
+ commands=commands,
+ process=process,
+ fabric_env=fabric_env or self.default_fabric_env,
+ use_sudo=use_sudo,
+ hide_output=hide_output)
+
+
+class TestUtilityFunctions(object):
+
+ def test_paths(self):
+ base_dir = '/path'
+ local_script_path = '/local/script/path.py'
+ paths = ssh_operations._Paths(base_dir=base_dir,
+ local_script_path=local_script_path)
+ assert paths.local_script_path == local_script_path
+ assert paths.remote_ctx_dir == base_dir
+ assert paths.base_script_path == 'path.py'
+ assert paths.remote_ctx_path == '/path/ctx'
+ assert paths.remote_scripts_dir == '/path/scripts'
+ assert paths.remote_work_dir == '/path/work'
+ assert paths.remote_env_script_path.startswith('/path/scripts/env-path.py-')
+ assert paths.remote_script_path.startswith('/path/scripts/path.py-')
+
+ def test_write_environment_script_file(self):
+ base_dir = '/path'
+ local_script_path = '/local/script/path.py'
+ paths = ssh_operations._Paths(base_dir=base_dir,
+ local_script_path=local_script_path)
+ env = {'one': "'1'"}
+ local_socket_url = 'local_socket_url'
+ remote_socket_url = 'remote_socket_url'
+ env_script_lines = set([l for l in ssh_operations._write_environment_script_file(
+ process={'env': env},
+ paths=paths,
+ local_socket_url=local_socket_url,
+ remote_socket_url=remote_socket_url
+ ).getvalue().split('\n') if l])
+ expected_env_script_lines = set([
+ 'export PATH=/path:$PATH',
+ 'export PYTHONPATH=/path:$PYTHONPATH',
+ 'chmod +x /path/ctx',
+ 'chmod +x {0}'.format(paths.remote_script_path),
+ 'export CTX_SOCKET_URL={0}'.format(remote_socket_url),
+ 'export LOCAL_CTX_SOCKET_URL={0}'.format(local_socket_url),
+ 'export one=\'1\''
+ ])
+ assert env_script_lines == expected_env_script_lines
diff --git a/azure/aria/aria-extension-cloudify/src/aria/tests/.pylintrc b/azure/aria/aria-extension-cloudify/src/aria/tests/.pylintrc
new file mode 100644
index 0000000..0352dd3
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/tests/.pylintrc
@@ -0,0 +1,422 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+[MASTER]
+
+# Python code to execute, usually for sys.path manipulation such as
+# pygtk.require().
+#init-hook=
+
+# Add files or directories to the blacklist. They should be base names, not
+# paths.
+ignore=.git
+
+# Add files or directories matching the regex patterns to the blacklist. The
+# regex matches against base names, not paths.
+ignore-patterns=
+
+# Pickle collected data for later comparisons.
+persistent=yes
+
+# List of plugins (as comma separated values of python modules names) to load,
+# usually to register additional checkers.
+load-plugins=
+
+# Use multiple processes to speed up Pylint.
+jobs=4
+
+# Allow loading of arbitrary C extensions. Extensions are imported into the
+# active Python interpreter and may run arbitrary code.
+unsafe-load-any-extension=no
+
+# A comma-separated list of package or module names from where C extensions may
+# be loaded. Extensions are loading into the active Python interpreter and may
+# run arbitrary code
+extension-pkg-whitelist=
+
+# Allow optimization of some AST trees. This will activate a peephole AST
+# optimizer, which will apply various small optimizations. For instance, it can
+# be used to obtain the result of joining multiple strings with the addition
+# operator. Joining a lot of strings can lead to a maximum recursion error in
+# Pylint and this flag can prevent that. It has one side effect, the resulting
+# AST will be different than the one from reality. This option is deprecated
+# and it will be removed in Pylint 2.0.
+optimize-ast=no
+
+
+[MESSAGES CONTROL]
+
+# Only show warnings with the listed confidence levels. Leave empty to show
+# all. Valid levels: HIGH, INFERENCE, INFERENCE_FAILURE, UNDEFINED
+confidence=
+
+# Enable the message, report, category or checker with the given id(s). You can
+# either give multiple identifier separated by comma (,) or put this option
+# multiple time (only on the command line, not in the configuration file where
+# it should appear only once). See also the "--disable" option for examples.
+#enable=
+
+# Disable the message, report, category or checker with the given id(s). You
+# can either give multiple identifiers separated by comma (,) or put this
+# option multiple times (only on the command line, not in the configuration
+# file where it should appear only once).You can also use "--disable=all" to
+# disable everything first and then reenable specific checks. For example, if
+# you want to run only the similarities checker, you can use "--disable=all
+# --enable=similarities". If you want to run only the classes checker, but have
+# no Warning level messages displayed, use"--disable=all --enable=classes
+# --disable=W"
+disable=import-star-module-level,old-octal-literal,oct-method,print-statement,unpacking-in-except,parameter-unpacking,backtick,old-raise-syntax,old-ne-operator,long-suffix,dict-view-method,dict-iter-method,metaclass-assignment,next-method-called,raising-string,indexing-exception,raw_input-builtin,long-builtin,file-builtin,execfile-builtin,coerce-builtin,cmp-builtin,buffer-builtin,basestring-builtin,apply-builtin,filter-builtin-not-iterating,using-cmp-argument,useless-suppression,range-builtin-not-iterating,suppressed-message,no-absolute-import,old-division,cmp-method,reload-builtin,zip-builtin-not-iterating,intern-builtin,unichr-builtin,reduce-builtin,standarderror-builtin,unicode-builtin,xrange-builtin,coerce-method,delslice-method,getslice-method,setslice-method,input-builtin,round-builtin,hex-method,nonzero-method,map-builtin-not-iterating,redefined-builtin,no-self-use,missing-docstring,attribute-defined-outside-init,redefined-outer-name,import-error,redefined-variable-type,broad-except,protected-access,global-statement,too-many-locals,abstract-method,no-member,unused-argument
+
+[REPORTS]
+
+# Set the output format. Available formats are text, parseable, colorized, msvs
+# (visual studio) and html. You can also give a reporter class, eg
+# mypackage.mymodule.MyReporterClass.
+output-format=colorized
+
+# Put messages in a separate file for each module / package specified on the
+# command line instead of printing them on stdout. Reports (if any) will be
+# written in a file name "pylint_global.[txt|html]". This option is deprecated
+# and it will be removed in Pylint 2.0.
+files-output=no
+
+# Tells whether to display a full report or only the messages
+reports=yes
+
+# Python expression which should return a note less than 10 (10 is the highest
+# note). You have access to the variables errors warning, statement which
+# respectively contain the number of errors / warnings messages and the total
+# number of statements analyzed. This is used by the global evaluation report
+# (RP0004).
+evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10)
+
+# Template used to display messages. This is a python new-style format string
+# used to format the message information. See doc for all details
+#msg-template=
+
+
+[VARIABLES]
+
+# Tells whether we should check for unused import in __init__ files.
+init-import=no
+
+# A regular expression matching the name of dummy variables (i.e. expectedly
+# not used).
+dummy-variables-rgx=(_+[a-zA-Z0-9]*?$)|dummy|args|kwargs
+
+# List of additional names supposed to be defined in builtins. Remember that
+# you should avoid to define new builtins when possible.
+additional-builtins=
+
+# List of strings which can identify a callback function by name. A callback
+# name must start or end with one of those strings.
+callbacks=cb_,_cb
+
+# List of qualified module names which can have objects that can redefine
+# builtins.
+redefining-builtins-modules=six.moves,future.builtins
+
+
+[SPELLING]
+
+# Spelling dictionary name. Available dictionaries: none. To make it working
+# install python-enchant package.
+spelling-dict=
+
+# List of comma separated words that should not be checked.
+spelling-ignore-words=
+
+# A path to a file that contains private dictionary; one word per line.
+spelling-private-dict-file=
+
+# Tells whether to store unknown words to indicated private dictionary in
+# --spelling-private-dict-file option instead of raising a message.
+spelling-store-unknown-words=no
+
+
+[MISCELLANEOUS]
+
+# List of note tags to take in consideration, separated by a comma.
+notes=FIXME,XXX,TODO
+
+
+[LOGGING]
+
+# Logging modules to check that the string format arguments are in logging
+# function parameter format
+logging-modules=logging
+
+
+[BASIC]
+
+# Good variable names which should always be accepted, separated by a comma
+good-names=i,j,k,v,f,ex,e,_,id
+
+# Bad variable names which should always be refused, separated by a comma
+bad-names=foo,bar,baz,toto,tutu,tata
+
+# Colon-delimited sets of names that determine each other's naming style when
+# the name regexes allow several styles.
+name-group=
+
+# Include a hint for the correct naming format with invalid-name
+include-naming-hint=no
+
+# List of decorators that produce properties, such as abc.abstractproperty. Add
+# to this list to register other decorators that produce valid properties.
+property-classes=abc.abstractproperty
+
+# Regular expression matching correct function names
+function-rgx=[a-z_][a-z0-9_]*$
+
+# Naming hint for function names
+function-name-hint=[a-z_][a-z0-9_]*$
+
+# Regular expression matching correct variable names
+variable-rgx=[a-z_][a-z0-9_]*$
+
+# Naming hint for variable names
+variable-name-hint=[a-z_][a-z0-9_]*$
+
+# Regular expression matching correct constant names
+const-rgx=(([A-Za-z_][A-Za-z0-9_]*)|(__.*__))$
+
+# Naming hint for constant names
+const-name-hint=(([A-Z_][A-Z0-9_]*)|(__.*__))$
+
+# Regular expression matching correct attribute names
+attr-rgx=[a-z_][a-z0-9_]*$
+
+# Naming hint for attribute names
+attr-name-hint=[a-z_][a-z0-9_]*$
+
+# Regular expression matching correct argument names
+argument-rgx=[a-z_][a-z0-9_]*$
+
+# Naming hint for argument names
+argument-name-hint=[a-z_][a-z0-9_]*$
+
+# Regular expression matching correct class attribute names
+class-attribute-rgx=([A-Za-z_][A-Za-z0-9_]*|(__.*__))$
+
+# Naming hint for class attribute names
+class-attribute-name-hint=([A-Za-z_][A-Za-z0-9_]*|(__.*__))$
+
+# Regular expression matching correct inline iteration names
+inlinevar-rgx=[A-Za-z_][A-Za-z0-9_]*$
+
+# Naming hint for inline iteration names
+inlinevar-name-hint=[A-Za-z_][A-Za-z0-9_]*$
+
+# Regular expression matching correct class names
+class-rgx=[A-Z_][a-zA-Z0-9]+$
+
+# Naming hint for class names
+class-name-hint=[A-Z_][a-zA-Z0-9]+$
+
+# Regular expression matching correct module names
+module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$
+
+# Naming hint for module names
+module-name-hint=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$
+
+# Regular expression matching correct method names
+method-rgx=[a-z_][a-z0-9_]*$
+
+# Naming hint for method names
+method-name-hint=[a-z_][a-z0-9_]*$
+
+# Regular expression which should only match function or class names that do
+# not require a docstring.
+no-docstring-rgx=^_
+
+# Minimum line length for functions/classes that require docstrings, shorter
+# ones are exempt.
+docstring-min-length=-1
+
+
+[ELIF]
+
+# Maximum number of nested blocks for function / method body
+max-nested-blocks=5
+
+
+[SIMILARITIES]
+
+# Minimum lines number of a similarity.
+min-similarity-lines=4
+
+# Ignore comments when computing similarities.
+ignore-comments=yes
+
+# Ignore docstrings when computing similarities.
+ignore-docstrings=yes
+
+# Ignore imports when computing similarities.
+ignore-imports=no
+
+
+[TYPECHECK]
+
+# Tells whether missing members accessed in mixin class should be ignored. A
+# mixin class is detected if its name ends with "mixin" (case insensitive).
+ignore-mixin-members=yes
+
+# List of module names for which member attributes should not be checked
+# (useful for modules/projects where namespaces are manipulated during runtime
+# and thus existing member attributes cannot be deduced by static analysis. It
+# supports qualified module names, as well as Unix pattern matching.
+ignored-modules=
+
+# List of class names for which member attributes should not be checked (useful
+# for classes with dynamically set attributes). This supports the use of
+# qualified names.
+ignored-classes=optparse.Values,thread._local,_thread._local
+
+# List of members which are set dynamically and missed by pylint inference
+# system, and so shouldn't trigger E1101 when accessed. Python regular
+# expressions are accepted.
+generated-members=
+
+# List of decorators that produce context managers, such as
+# contextlib.contextmanager. Add to this list to register other decorators that
+# produce valid context managers.
+contextmanager-decorators=contextlib.contextmanager
+
+
+[FORMAT]
+
+# Maximum number of characters on a single line.
+max-line-length=100
+
+# Regexp for a line that is allowed to be longer than the limit.
+ignore-long-lines=^\s*(# )?<?https?://\S+>?$
+
+# Allow the body of an if to be on the same line as the test if there is no
+# else.
+single-line-if-stmt=no
+
+# List of optional constructs for which whitespace checking is disabled. `dict-
+# separator` is used to allow tabulation in dicts, etc.: {1 : 1,\n222: 2}.
+# `trailing-comma` allows a space between comma and closing bracket: (a, ).
+# `empty-line` allows space-only lines.
+no-space-check=trailing-comma,dict-separator
+
+# Maximum number of lines in a module
+max-module-lines=1500
+
+# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1
+# tab).
+indent-string=' '
+
+# Number of spaces of indent required inside a hanging or continued line.
+indent-after-paren=4
+
+# Expected format of line ending, e.g. empty (any line ending), LF or CRLF.
+expected-line-ending-format=
+
+
+[CLASSES]
+
+# List of method names used to declare (i.e. assign) instance attributes.
+defining-attr-methods=__init__,__new__,setUp
+
+# List of valid names for the first argument in a class method.
+valid-classmethod-first-arg=cls
+
+# List of valid names for the first argument in a metaclass class method.
+valid-metaclass-classmethod-first-arg=mcs
+
+# List of member names, which should be excluded from the protected access
+# warning.
+exclude-protected=_asdict,_fields,_replace,_source,_make
+
+
+[DESIGN]
+
+# Maximum number of arguments for function / method
+max-args=15
+
+# Argument names that match this expression will be ignored. Default to name
+# with leading underscore
+ignored-argument-names=_.*
+
+# Maximum number of locals for function / method body
+max-locals=30
+
+# Maximum number of return / yield for function / method body
+max-returns=10
+
+# Maximum number of branch for function / method body
+max-branches=15
+
+# Maximum number of statements in function / method body
+max-statements=50
+
+# Maximum number of parents for a class (see R0901).
+max-parents=7
+
+# Maximum number of attributes for a class (see R0902).
+max-attributes=25
+
+# Minimum number of public methods for a class (see R0903).
+min-public-methods=0
+
+# Maximum number of public methods for a class (see R0904).
+max-public-methods=50
+
+# Maximum number of boolean expressions in a if statement
+max-bool-expr=5
+
+
+[IMPORTS]
+
+# Deprecated modules which should not be used, separated by a comma
+deprecated-modules=regsub,TERMIOS,Bastion,rexec
+
+# Create a graph of every (i.e. internal and external) dependencies in the
+# given file (report RP0402 must not be disabled)
+import-graph=
+
+# Create a graph of external dependencies in the given file (report RP0402 must
+# not be disabled)
+ext-import-graph=
+
+# Create a graph of internal dependencies in the given file (report RP0402 must
+# not be disabled)
+int-import-graph=
+
+# Force import order to recognize a module as part of the standard
+# compatibility libraries.
+known-standard-library=
+
+# Force import order to recognize a module as part of a third party library.
+known-third-party=enchant
+
+# Analyse import fallback blocks. This can be used to support both Python 2 and
+# 3 compatible code, which means that the block might have code that exists
+# only in one or another interpreter, leading to false positives when analysed.
+analyse-fallback-blocks=no
+
+
+[EXCEPTIONS]
+
+# Exceptions that will emit a warning when being caught. Defaults to
+# "Exception"
+overgeneral-exceptions=Exception
+
+
+[pre-commit-hook]
+limit=9.5
diff --git a/azure/aria/aria-extension-cloudify/src/aria/tests/__init__.py b/azure/aria/aria-extension-cloudify/src/aria/tests/__init__.py
new file mode 100644
index 0000000..ace30c8
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/tests/__init__.py
@@ -0,0 +1,20 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+
+from . import storage, mock
+
+ROOT_DIR = os.path.dirname(os.path.dirname(__file__))
diff --git a/azure/aria/aria-extension-cloudify/src/aria/tests/cli/__init__.py b/azure/aria/aria-extension-cloudify/src/aria/tests/cli/__init__.py
new file mode 100644
index 0000000..ae1e83e
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/tests/cli/__init__.py
@@ -0,0 +1,14 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/azure/aria/aria-extension-cloudify/src/aria/tests/cli/base_test.py b/azure/aria/aria-extension-cloudify/src/aria/tests/cli/base_test.py
new file mode 100644
index 0000000..da9d72c
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/tests/cli/base_test.py
@@ -0,0 +1,77 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+from StringIO import StringIO
+
+import pytest
+
+from . import runner
+from . import utils
+
+
+@pytest.fixture
+def mock_storage():
+ return utils.MockStorage()
+
+
+@pytest.mark.usefixtures("redirect_logger")
+class TestCliBase(object):
+
+ @staticmethod
+ @pytest.fixture(scope="class")
+ def redirect_logger():
+
+ utils.setup_logger(logger_name='aria.cli.main',
+ handlers=[logging.StreamHandler(TestCliBase._logger_output)],
+ logger_format='%(message)s')
+ yield
+ utils.setup_logger(logger_name='aria.cli.main',
+ handlers=_default_logger_config['handlers'],
+ level=_default_logger_config['level'])
+
+ _logger_output = StringIO()
+
+ def invoke(self, command):
+ self._logger_output.truncate(0)
+ return runner.invoke(command)
+
+ @property
+ def logger_output_string(self):
+ return self._logger_output.getvalue()
+
+
+def assert_exception_raised(outcome, expected_exception, expected_msg=''):
+ assert isinstance(outcome.exception, expected_exception)
+ assert expected_msg in str(outcome.exception)
+
+
+# This exists as I wanted to mocked a function using monkeypatch to return a function that raises an
+# exception. I tried doing that using a lambda in-place, but this can't be accomplished in a trivial
+# way it seems. So I wrote this silly function instead
+def raise_exception(exception, msg=''):
+
+ def inner(*args, **kwargs):
+ raise exception(msg)
+
+ return inner
+
+
+def get_default_logger_config():
+ logger = logging.getLogger('aria.cli.main')
+ return {'handlers': logger.handlers,
+ 'level': logger.level}
+
+_default_logger_config = get_default_logger_config()
diff --git a/azure/aria/aria-extension-cloudify/src/aria/tests/cli/runner.py b/azure/aria/aria-extension-cloudify/src/aria/tests/cli/runner.py
new file mode 100644
index 0000000..7e4243b
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/tests/cli/runner.py
@@ -0,0 +1,27 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import click.testing
+
+import aria.cli.commands as commands
+
+
+def invoke(command_string):
+ command_list = command_string.split()
+ command, sub, args = command_list[0], command_list[1], command_list[2:]
+ runner = click.testing.CliRunner()
+ outcome = runner.invoke(getattr(
+ getattr(commands, command), sub), args)
+ return outcome
diff --git a/azure/aria/aria-extension-cloudify/src/aria/tests/cli/test_node_templates.py b/azure/aria/aria-extension-cloudify/src/aria/tests/cli/test_node_templates.py
new file mode 100644
index 0000000..ff7ff28
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/tests/cli/test_node_templates.py
@@ -0,0 +1,133 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import pytest
+from mock import ANY, MagicMock
+
+from aria.cli.env import _Environment
+
+from .base_test import ( # pylint: disable=unused-import
+ TestCliBase,
+ mock_storage
+)
+from ..mock import models as mock_models
+
+
+class TestNodeTemplatesShow(TestCliBase):
+
+ def test_header_strings(self, monkeypatch, mock_storage):
+ monkeypatch.setattr(_Environment, 'model_storage', mock_storage)
+ self.invoke('node_templates show 1')
+ assert 'Showing node template 1' in self.logger_output_string
+ assert 'Node template properties:' in self.logger_output_string
+ assert 'Nodes:' in self.logger_output_string
+
+ def test_no_properties_no_nodes(self, monkeypatch, mock_storage):
+
+ monkeypatch.setattr(_Environment, 'model_storage', mock_storage)
+ self.invoke('node_templates show 1')
+
+ assert 'No properties' in self.logger_output_string
+ assert 'prop1' not in self.logger_output_string
+ assert 'value1' not in self.logger_output_string
+ assert 'No nodes' in self.logger_output_string
+ assert mock_models.NODE_NAME not in self.logger_output_string
+
+ def test_one_property_no_nodes(self, monkeypatch, mock_storage):
+
+ monkeypatch.setattr(_Environment, 'model_storage', mock_storage)
+ m = MagicMock(return_value=mock_models.create_node_template_with_dependencies(
+ include_property=True))
+ monkeypatch.setattr(mock_storage.node_template, 'get', m)
+ self.invoke('node_templates show 2')
+ assert 'No properties' not in self.logger_output_string
+ assert 'prop1' in self.logger_output_string and 'value1' in self.logger_output_string
+ assert 'No nodes' in self.logger_output_string
+ assert mock_models.NODE_NAME not in self.logger_output_string
+
+ def test_no_properties_one_node(self, monkeypatch, mock_storage):
+
+ monkeypatch.setattr(_Environment, 'model_storage', mock_storage)
+ m = MagicMock(return_value=mock_models.create_node_template_with_dependencies(
+ include_node=True))
+ monkeypatch.setattr(mock_storage.node_template, 'get', m)
+ self.invoke('node_templates show 3')
+ assert 'No properties' in self.logger_output_string
+ assert 'prop1' not in self.logger_output_string
+ assert 'value1' not in self.logger_output_string
+ assert 'No nodes' not in self.logger_output_string
+ assert mock_models.NODE_NAME in self.logger_output_string
+
+ def test_one_property_one_node(self, monkeypatch, mock_storage):
+
+ monkeypatch.setattr(_Environment, 'model_storage', mock_storage)
+ m = MagicMock(return_value=mock_models.create_node_template_with_dependencies(
+ include_node=True, include_property=True))
+ monkeypatch.setattr(mock_storage.node_template, 'get', m)
+ self.invoke('node_templates show 4')
+ assert 'No properties' not in self.logger_output_string
+ assert 'prop1' in self.logger_output_string and 'value1' in self.logger_output_string
+ assert 'No nodes' not in self.logger_output_string
+ assert mock_models.NODE_NAME in self.logger_output_string
+
+
+class TestNodeTemplatesList(TestCliBase):
+
+ @pytest.mark.parametrize('sort_by, order, sort_by_in_output, order_in_output', [
+ ('', '', 'service_template_name', 'asc'),
+ ('', ' --descending', 'service_template_name', 'desc'),
+ (' --sort-by name', '', 'name', 'asc'),
+ (' --sort-by name', ' --descending', 'name', 'desc')
+ ])
+ def test_list_specified_service_template(self, monkeypatch, mock_storage, sort_by, order,
+ sort_by_in_output, order_in_output):
+
+ monkeypatch.setattr(_Environment, 'model_storage', mock_storage)
+ self.invoke('node_templates list -t {service_template_name}{sort_by}{order}'
+ .format(service_template_name=mock_models.SERVICE_TEMPLATE_NAME,
+ sort_by=sort_by,
+ order=order))
+ assert 'Listing node templates for service template {name}...'\
+ .format(name=mock_models.SERVICE_TEMPLATE_NAME) in self.logger_output_string
+ assert 'Listing all node templates...' not in self.logger_output_string
+
+ node_templates_list = mock_storage.node_template.list
+ node_templates_list.assert_called_once_with(sort={sort_by_in_output: order_in_output},
+ filters={'service_template': ANY})
+ assert 'Node templates:' in self.logger_output_string
+ assert mock_models.SERVICE_TEMPLATE_NAME in self.logger_output_string
+ assert mock_models.NODE_TEMPLATE_NAME in self.logger_output_string
+
+ @pytest.mark.parametrize('sort_by, order, sort_by_in_output, order_in_output', [
+ ('', '', 'service_template_name', 'asc'),
+ ('', ' --descending', 'service_template_name', 'desc'),
+ (' --sort-by name', '', 'name', 'asc'),
+ (' --sort-by name', ' --descending', 'name', 'desc')
+ ])
+ def test_list_no_specified_service_template(self, monkeypatch, mock_storage, sort_by, order,
+ sort_by_in_output, order_in_output):
+
+ monkeypatch.setattr(_Environment, 'model_storage', mock_storage)
+ self.invoke('node_templates list{sort_by}{order}'.format(sort_by=sort_by, order=order))
+ assert 'Listing all node templates...' in self.logger_output_string
+ assert 'Listing node templates for service template {name}...'\
+ .format(name=mock_models.SERVICE_TEMPLATE_NAME) not in self.logger_output_string
+
+ node_templates_list = mock_storage.node_template.list
+ node_templates_list.assert_called_once_with(sort={sort_by_in_output: order_in_output},
+ filters={})
+ assert 'Node templates:' in self.logger_output_string
+ assert mock_models.SERVICE_TEMPLATE_NAME in self.logger_output_string
+ assert mock_models.NODE_TEMPLATE_NAME in self.logger_output_string
diff --git a/azure/aria/aria-extension-cloudify/src/aria/tests/cli/test_nodes.py b/azure/aria/aria-extension-cloudify/src/aria/tests/cli/test_nodes.py
new file mode 100644
index 0000000..0233989
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/tests/cli/test_nodes.py
@@ -0,0 +1,101 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import pytest
+import mock
+
+from aria.cli.env import _Environment
+
+from .base_test import ( # pylint: disable=unused-import
+ TestCliBase,
+ mock_storage
+)
+from ..mock import models as mock_models
+
+
+class TestNodesShow(TestCliBase):
+
+ def test_header_strings(self, monkeypatch, mock_storage):
+ monkeypatch.setattr(_Environment, 'model_storage', mock_storage)
+ self.invoke('nodes show 1')
+ assert 'Showing node 1' in self.logger_output_string
+ assert 'Node:' in self.logger_output_string
+ assert 'Node attributes:' in self.logger_output_string
+
+ def test_no_attributes(self, monkeypatch, mock_storage):
+
+ monkeypatch.setattr(_Environment, 'model_storage', mock_storage)
+ self.invoke('nodes show 2')
+ assert 'No attributes' in self.logger_output_string
+ assert 'attribute1' not in self.logger_output_string
+ assert 'value1' not in self.logger_output_string
+
+ def test_one_attribute(self, monkeypatch, mock_storage):
+
+ monkeypatch.setattr(_Environment, 'model_storage', mock_storage)
+ m = mock.MagicMock(
+ return_value=mock_models.create_node_with_dependencies(include_attribute=True))
+ monkeypatch.setattr(mock_storage.node, 'get', m)
+ self.invoke('nodes show 3')
+ assert 'No attributes' not in self.logger_output_string
+ assert 'attribute1' in self.logger_output_string
+ assert 'value1' in self.logger_output_string
+
+
+class TestNodesList(TestCliBase):
+
+ @pytest.mark.parametrize('sort_by, order, sort_by_in_output, order_in_output', [
+ ('', '', 'service_name', 'asc'),
+ ('', ' --descending', 'service_name', 'desc'),
+ (' --sort-by name', '', 'name', 'asc'),
+ (' --sort-by name', ' --descending', 'name', 'desc')
+ ])
+ def test_list_specified_service(self, monkeypatch, mock_storage, sort_by, order,
+ sort_by_in_output, order_in_output):
+
+ monkeypatch.setattr(_Environment, 'model_storage', mock_storage)
+ self.invoke('nodes list -s test_s{sort_by}{order}'.format(sort_by=sort_by,
+ order=order))
+ assert 'Listing nodes for service test_s...' in self.logger_output_string
+ assert 'Listing all nodes...' not in self.logger_output_string
+
+ nodes_list = mock_storage.node.list
+ nodes_list.assert_called_once_with(sort={sort_by_in_output: order_in_output},
+ filters={'service': mock.ANY})
+ assert 'Nodes:' in self.logger_output_string
+ assert 'test_s' in self.logger_output_string
+ assert 'test_n' in self.logger_output_string
+
+ @pytest.mark.parametrize('sort_by, order, sort_by_in_output, order_in_output', [
+ ('', '', 'service_name', 'asc'),
+ ('', ' --descending', 'service_name', 'desc'),
+ (' --sort-by name', '', 'name', 'asc'),
+ (' --sort-by name', ' --descending', 'name', 'desc')
+ ])
+ def test_list_no_specified_service(self, monkeypatch, mock_storage, sort_by, order,
+ sort_by_in_output, order_in_output):
+
+ monkeypatch.setattr(_Environment, 'model_storage', mock_storage)
+ self.invoke('nodes list{sort_by}{order}'.format(sort_by=sort_by,
+ order=order))
+ assert 'Listing nodes for service test_s...' not in self.logger_output_string
+ assert 'Listing all nodes...' in self.logger_output_string
+
+ nodes_list = mock_storage.node.list
+ nodes_list.assert_called_once_with(sort={sort_by_in_output: order_in_output},
+ filters={})
+ assert 'Nodes:' in self.logger_output_string
+ assert 'test_s' in self.logger_output_string
+ assert 'test_n' in self.logger_output_string
diff --git a/azure/aria/aria-extension-cloudify/src/aria/tests/cli/test_service_templates.py b/azure/aria/aria-extension-cloudify/src/aria/tests/cli/test_service_templates.py
new file mode 100644
index 0000000..cc0150e
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/tests/cli/test_service_templates.py
@@ -0,0 +1,273 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import os
+import zipfile
+
+import pytest
+import mock
+
+from aria.cli import service_template_utils, csar
+from aria.cli.env import _Environment
+from aria.core import Core
+from aria.exceptions import AriaException
+from aria.storage import exceptions as storage_exceptions
+
+from .base_test import ( # pylint: disable=unused-import
+ TestCliBase,
+ assert_exception_raised,
+ raise_exception,
+ mock_storage
+)
+from ..mock import models as mock_models
+
+
+class TestServiceTemplatesShow(TestCliBase):
+
+ def test_header_string(self, monkeypatch, mock_storage):
+
+ monkeypatch.setattr(_Environment, 'model_storage', mock_storage)
+ self.invoke('service_templates show test_st')
+ assert 'Showing service template test_st...' in self.logger_output_string
+
+ def test_no_services_no_description(self, monkeypatch, mock_storage):
+
+ monkeypatch.setattr(_Environment, 'model_storage', mock_storage)
+ self.invoke('service_templates show test_st')
+
+ assert 'Description:' not in self.logger_output_string
+ assert 'Existing services:' not in self.logger_output_string
+
+ def test_no_services_yes_description(self, monkeypatch, mock_storage):
+
+ monkeypatch.setattr(_Environment, 'model_storage', mock_storage)
+ st = mock_models.create_service_template(description='test_description')
+ monkeypatch.setattr(mock_storage.service_template, 'get_by_name',
+ mock.MagicMock(return_value=st))
+
+ self.invoke('service_templates show test_st')
+ assert 'Description:' in self.logger_output_string
+ assert 'test_description' in self.logger_output_string
+ assert 'Existing services:' not in self.logger_output_string
+
+ def test_one_service_no_description(self, monkeypatch, mock_storage):
+
+ monkeypatch.setattr(_Environment, 'model_storage', mock_storage)
+ st = mock_models.create_service_template()
+ s = mock_models.create_service(st)
+ st.services = {s.name: s}
+ monkeypatch.setattr(mock_storage.service_template, 'get_by_name',
+ mock.MagicMock(return_value=st))
+
+ self.invoke('service_templates show test_st')
+
+ assert 'Description:' not in self.logger_output_string
+ assert 'Existing services:' in self.logger_output_string
+ assert mock_models.SERVICE_NAME in self.logger_output_string
+
+ def test_one_service_yes_description(self, monkeypatch, mock_storage):
+
+ monkeypatch.setattr(_Environment, 'model_storage', mock_storage)
+ st = mock_models.create_service_template(description='test_description')
+ s = mock_models.create_service(st)
+ st.services = {s.name: s}
+ monkeypatch.setattr(mock_storage.service_template, 'get_by_name',
+ mock.MagicMock(return_value=st))
+
+ self.invoke('service_templates show test_st')
+
+ assert 'Description:' in self.logger_output_string
+ assert 'test_description' in self.logger_output_string
+ assert 'Existing services:' in self.logger_output_string
+ assert 'test_s' in self.logger_output_string
+
+
+class TestServiceTemplatesList(TestCliBase):
+
+ def test_header_string(self, monkeypatch, mock_storage):
+
+ monkeypatch.setattr(_Environment, 'model_storage', mock_storage)
+ self.invoke('service_templates list')
+ assert 'Listing all service templates...' in self.logger_output_string
+
+ @pytest.mark.parametrize('sort_by, order, sort_by_in_output, order_in_output', [
+ ('', '', 'created_at', 'asc'),
+ ('', ' --descending', 'created_at', 'desc'),
+ (' --sort-by name', '', 'name', 'asc'),
+ (' --sort-by name', ' --descending', 'name', 'desc')
+ ])
+ def test_all_sorting_combinations(self, monkeypatch, mock_storage, sort_by, order,
+ sort_by_in_output, order_in_output):
+
+ monkeypatch.setattr(_Environment, 'model_storage', mock_storage)
+ self.invoke('service_templates list{sort_by}{order}'.format(sort_by=sort_by, order=order))
+
+ mock_storage.service_template.list.assert_called_with(
+ sort={sort_by_in_output: order_in_output})
+ assert mock_models.SERVICE_TEMPLATE_NAME in self.logger_output_string
+
+
+class TestServiceTemplatesStore(TestCliBase):
+
+ def test_header_string(self, monkeypatch, mock_storage):
+
+ monkeypatch.setattr(_Environment, 'model_storage', mock_storage)
+ self.invoke('service_templates store stubpath test_st')
+ assert 'Storing service template test_st...' in self.logger_output_string
+
+ def test_store_no_exception(self, monkeypatch, mock_object):
+
+ monkeypatch.setattr(Core, 'create_service_template', mock_object)
+ monkeypatch.setattr(service_template_utils, 'get', mock_object)
+ monkeypatch.setattr(os.path, 'dirname', mock_object)
+ self.invoke('service_templates store stubpath {name}'.format(
+ name=mock_models.SERVICE_TEMPLATE_NAME))
+ assert 'Service template {name} stored'.format(
+ name=mock_models.SERVICE_TEMPLATE_NAME) in self.logger_output_string
+
+ def test_store_relative_path_single_yaml_file(self, monkeypatch, mock_object):
+ monkeypatch.setattr(Core, 'create_service_template', mock_object)
+ monkeypatch.setattr(os.path, 'isfile', lambda x: True)
+ monkeypatch.setattr(service_template_utils, '_is_archive', lambda x: False)
+
+ self.invoke('service_templates store service_template.yaml {name}'.format(
+ name=mock_models.SERVICE_TEMPLATE_NAME))
+
+ mock_object.assert_called_with(os.path.join(os.getcwd(), 'service_template.yaml'),
+ mock.ANY,
+ mock.ANY)
+
+ def test_store_raises_exception_resulting_from_name_uniqueness(self, monkeypatch, mock_object):
+
+ monkeypatch.setattr(service_template_utils, 'get', mock_object)
+ monkeypatch.setattr(Core,
+ 'create_service_template',
+ raise_exception(storage_exceptions.NotFoundError,
+ msg='UNIQUE constraint failed'))
+ monkeypatch.setattr(os.path, 'dirname', mock_object)
+
+ assert_exception_raised(
+ self.invoke('service_templates store stubpath test_st'),
+ expected_exception=storage_exceptions.NotFoundError,
+ expected_msg='There already a exists a service template with the same name')
+
+ def test_store_raises_exception(self, monkeypatch, mock_object):
+
+ monkeypatch.setattr(service_template_utils, 'get', mock_object)
+ monkeypatch.setattr(Core,
+ 'create_service_template',
+ raise_exception(storage_exceptions.NotFoundError))
+ monkeypatch.setattr(os.path, 'dirname', mock_object)
+
+ assert_exception_raised(
+ self.invoke('service_templates store stubpath test_st'),
+ expected_exception=storage_exceptions.StorageError)
+
+
+class TestServiceTemplatesDelete(TestCliBase):
+
+ def test_header_string(self, monkeypatch, mock_storage):
+
+ monkeypatch.setattr(_Environment, 'model_storage', mock_storage)
+ self.invoke('service_templates delete test_st')
+ assert 'Deleting service template test_st...' in self.logger_output_string
+
+ def test_delete_no_exception(self, monkeypatch, mock_object):
+
+ monkeypatch.setattr(_Environment, 'model_storage', mock_object)
+ monkeypatch.setattr(Core, 'delete_service_template', mock_object)
+ self.invoke('service_templates delete {name}'.format(
+ name=mock_models.SERVICE_TEMPLATE_NAME))
+ assert 'Service template {name} deleted'.format(
+ name=mock_models.SERVICE_TEMPLATE_NAME) in self.logger_output_string
+
+ def test_delete_raises_exception(self, monkeypatch, mock_object):
+
+ monkeypatch.setattr(_Environment, 'model_storage', mock_object)
+ monkeypatch.setattr(Core,
+ 'delete_service_template',
+ raise_exception(storage_exceptions.StorageError))
+
+ assert_exception_raised(
+ self.invoke('service_templates delete test_st'),
+ expected_exception=storage_exceptions.StorageError,
+ expected_msg='')
+
+
+class TestServiceTemplatesInputs(TestCliBase):
+
+ def test_header_string(self, monkeypatch, mock_storage):
+
+ monkeypatch.setattr(_Environment, 'model_storage', mock_storage)
+ self.invoke('service_templates inputs test_st')
+ assert 'Showing inputs for service template test_st...' in self.logger_output_string
+
+ def test_inputs_existing_inputs(self, monkeypatch, mock_storage):
+ monkeypatch.setattr(_Environment, 'model_storage', mock_storage)
+ input = mock_models.create_input(name='input1', value='value1')
+ st = mock_models.create_service_template(inputs={'input1': input})
+ monkeypatch.setattr(mock_storage.service_template, 'get_by_name',
+ mock.MagicMock(return_value=st))
+
+ self.invoke('service_templates inputs with_inputs')
+ assert 'input1' in self.logger_output_string and 'value1' in self.logger_output_string
+
+ def test_inputs_no_inputs(self, monkeypatch, mock_storage):
+ monkeypatch.setattr(_Environment, 'model_storage', mock_storage)
+ self.invoke('service_templates inputs without_inputs')
+ assert 'No inputs' in self.logger_output_string
+
+
+class TestServiceTemplatesValidate(TestCliBase):
+
+ def test_header_string(self, monkeypatch, mock_storage):
+
+ monkeypatch.setattr(_Environment, 'model_storage', mock_storage)
+ self.invoke('service_templates validate stubpath')
+ assert 'Validating service template: stubpath' in self.logger_output_string
+
+ def test_validate_no_exception(self, monkeypatch, mock_object):
+ monkeypatch.setattr(Core, 'validate_service_template', mock_object)
+ monkeypatch.setattr(service_template_utils, 'get', mock_object)
+ self.invoke('service_templates validate stubpath')
+ assert 'Service template validated successfully' in self.logger_output_string
+
+ def test_validate_raises_exception(self, monkeypatch, mock_object):
+ monkeypatch.setattr(Core, 'validate_service_template', raise_exception(AriaException))
+ monkeypatch.setattr(service_template_utils, 'get', mock_object)
+ assert_exception_raised(
+ self.invoke('service_templates validate stubpath'),
+ expected_exception=AriaException)
+
+
+class TestServiceTemplatesCreateArchive(TestCliBase):
+
+ def test_header_string(self, monkeypatch, mock_storage):
+
+ monkeypatch.setattr(_Environment, 'model_storage', mock_storage)
+ self.invoke('service_templates create_archive stubpath stubdest')
+ assert 'Creating a CSAR archive' in self.logger_output_string
+
+ def test_create_archive_successful(self, monkeypatch, mock_object):
+ monkeypatch.setattr(csar, 'write', mock_object)
+ self.invoke('service_templates create_archive stubpath stubdest')
+ assert 'CSAR archive created at stubdest' in self.logger_output_string
+
+ def test_create_archive_from_relative_path(self, monkeypatch, mock_object):
+
+ monkeypatch.setattr(os.path, 'isfile', mock_object)
+ monkeypatch.setattr(zipfile, 'ZipFile', mock.MagicMock)
+
+ self.invoke('service_templates create_archive archive stubdest')
+ mock_object.assert_called_with(os.path.join(os.getcwd(), 'archive'))
diff --git a/azure/aria/aria-extension-cloudify/src/aria/tests/cli/test_services.py b/azure/aria/aria-extension-cloudify/src/aria/tests/cli/test_services.py
new file mode 100644
index 0000000..7dc84bc
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/tests/cli/test_services.py
@@ -0,0 +1,227 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import pytest
+import mock
+
+from aria.cli.env import _Environment
+from aria.core import Core
+from aria.exceptions import DependentActiveExecutionsError, DependentAvailableNodesError
+from aria.modeling.exceptions import ParameterException
+from aria.storage import exceptions as storage_exceptions
+
+from .base_test import ( # pylint: disable=unused-import
+ TestCliBase,
+ raise_exception,
+ assert_exception_raised,
+ mock_storage
+)
+from ..mock import models as mock_models
+
+
+class TestServicesList(TestCliBase):
+
+ @pytest.mark.parametrize('sort_by, order, sort_by_in_output, order_in_output', [
+ ('', '', 'created_at', 'asc'),
+ ('', ' --descending', 'created_at', 'desc'),
+ (' --sort-by name', '', 'name', 'asc'),
+ (' --sort-by name', ' --descending', 'name', 'desc')
+ ])
+ def test_no_specified_service_template(self, monkeypatch, mock_storage, sort_by, order,
+ sort_by_in_output, order_in_output):
+
+ monkeypatch.setattr(_Environment, 'model_storage', mock_storage)
+ self.invoke('services list{sort_by}{order}'.format(sort_by=sort_by, order=order))
+ assert 'Listing all services...' in self.logger_output_string
+ assert 'Listing services for service template' not in self.logger_output_string
+
+ mock_storage.service.list.assert_called_once_with(sort={sort_by_in_output: order_in_output},
+ filters={})
+ assert 'Services:' in self.logger_output_string
+ assert mock_models.SERVICE_TEMPLATE_NAME in self.logger_output_string
+ assert mock_models.SERVICE_NAME in self.logger_output_string
+
+ @pytest.mark.parametrize('sort_by, order, sort_by_in_output, order_in_output', [
+ ('', '', 'created_at', 'asc'),
+ ('', ' --descending', 'created_at', 'desc'),
+ (' --sort-by name', '', 'name', 'asc'),
+ (' --sort-by name', ' --descending', 'name', 'desc')
+ ])
+ def test_specified_service_template(self, monkeypatch, mock_storage, sort_by, order,
+ sort_by_in_output, order_in_output):
+
+ monkeypatch.setattr(_Environment, 'model_storage', mock_storage)
+ self.invoke('services list -t test_st{sort_by}{order}'.format(sort_by=sort_by, order=order))
+ assert 'Listing services for service template test_st...' in self.logger_output_string
+ assert 'Listing all services...' not in self.logger_output_string
+
+ mock_storage.service.list.assert_called_once_with(sort={sort_by_in_output: order_in_output},
+ filters={'service_template': mock.ANY})
+ assert 'Services:' in self.logger_output_string
+ assert mock_models.SERVICE_TEMPLATE_NAME in self.logger_output_string
+ assert mock_models.SERVICE_NAME in self.logger_output_string
+
+
+class TestServicesCreate(TestCliBase):
+
+ def test_header_string(self, monkeypatch, mock_storage):
+ monkeypatch.setattr(_Environment, 'model_storage', mock_storage)
+ self.invoke('services create -t test_st test_s')
+ assert 'Creating new service from service template test_st...' in self.logger_output_string
+
+ def test_no_exception(self, monkeypatch, mock_storage):
+
+ monkeypatch.setattr(_Environment, 'model_storage', mock_storage)
+
+ m = mock.MagicMock(return_value=mock_models.create_service_with_dependencies())
+ monkeypatch.setattr(Core, 'create_service', m)
+ self.invoke('services create -t test_st test_s')
+ assert "Service created. The service's name is test_s" in self.logger_output_string
+
+ def test_raises_storage_error_resulting_from_name_uniqueness(self, monkeypatch,
+ mock_storage):
+ monkeypatch.setattr(_Environment, 'model_storage', mock_storage)
+ monkeypatch.setattr(Core,
+ 'create_service',
+ raise_exception(storage_exceptions.NotFoundError,
+ msg='UNIQUE constraint failed'))
+ assert_exception_raised(
+ self.invoke('services create -t test_st test_s'),
+ expected_exception=storage_exceptions.NotFoundError,
+ expected_msg='There already a exists a service with the same name')
+
+ assert "Service created. The service's name is test_s" not in self.logger_output_string
+
+ def test_raises_other_storage_error(self, monkeypatch, mock_object):
+ monkeypatch.setattr(_Environment, 'model_storage', mock_object)
+ monkeypatch.setattr(Core,
+ 'create_service',
+ raise_exception(storage_exceptions.NotFoundError))
+
+ assert_exception_raised(
+ self.invoke('services create -t test_st test_s'),
+ expected_exception=storage_exceptions.NotFoundError)
+
+ assert "Service created. The service's name is test_s" not in self.logger_output_string
+
+ def test_raises_inputs_exception(self, monkeypatch, mock_storage):
+ monkeypatch.setattr(_Environment, 'model_storage', mock_storage)
+ monkeypatch.setattr(Core,
+ 'create_service',
+ raise_exception(ParameterException))
+
+ assert_exception_raised(
+ self.invoke('services create -t with_inputs test_s'),
+ expected_exception=ParameterException)
+
+ assert "Service created. The service's name is test_s" not in self.logger_output_string
+
+
+class TestServicesDelete(TestCliBase):
+
+ def test_header_string(self, monkeypatch, mock_storage):
+ monkeypatch.setattr(_Environment, 'model_storage', mock_storage)
+ self.invoke('services delete test_s')
+ assert 'Deleting service test_s...' in self.logger_output_string
+
+ def test_delete_no_exception(self, monkeypatch, mock_storage, mock_object):
+
+ monkeypatch.setattr(_Environment, 'model_storage', mock_storage)
+ monkeypatch.setattr(Core, 'delete_service', mock_object)
+ self.invoke('services delete test_s')
+ assert 'Service test_s deleted' in self.logger_output_string
+
+ def test_delete_active_execution_error(self, monkeypatch, mock_storage):
+ monkeypatch.setattr(_Environment, 'model_storage', mock_storage)
+ mock_service_with_execution = \
+ mock.MagicMock(return_value=mock_models.create_service_with_dependencies(
+ include_execution=True))
+ monkeypatch.setattr(mock_storage.service, 'get', mock_service_with_execution)
+ assert_exception_raised(
+ self.invoke('services delete test_s'),
+ expected_exception=DependentActiveExecutionsError,
+ expected_msg="Can't delete service `{name}` - there is an active execution "
+ "for this service. Active execution ID: 1".format(
+ name=mock_models.SERVICE_NAME))
+
+ def test_delete_available_nodes_error(self, monkeypatch, mock_storage):
+ monkeypatch.setattr(_Environment, 'model_storage', mock_storage)
+ assert_exception_raised(
+ self.invoke('services delete test_s'),
+ expected_exception=DependentAvailableNodesError,
+ expected_msg="Can't delete service `{name}` - there are available nodes "
+ "for this service. Available node IDs: 1".format(
+ name=mock_models.SERVICE_NAME))
+
+ def test_delete_available_nodes_error_with_force(self, monkeypatch, mock_storage):
+ monkeypatch.setattr(_Environment, 'model_storage', mock_storage)
+ self.invoke('services delete service_with_available_nodes --force')
+
+ assert mock_storage.service.delete.call_count == 1
+ assert 'Service service_with_available_nodes deleted' in self.logger_output_string
+
+
+class TestServicesOutputs(TestCliBase):
+
+ def test_header_string(self, monkeypatch, mock_storage):
+ monkeypatch.setattr(_Environment, 'model_storage', mock_storage)
+ self.invoke('services outputs test_s')
+ assert 'Showing outputs for service test_s...' in self.logger_output_string
+
+ def test_outputs_no_outputs(self, monkeypatch, mock_storage):
+ monkeypatch.setattr(_Environment, 'model_storage', mock_storage)
+ self.invoke('services outputs service_with_no_outputs')
+
+ assert 'No outputs' in self.logger_output_string
+ assert 'output1' not in self.logger_output_string
+ assert 'value1' not in self.logger_output_string
+
+ def test_outputs_one_output(self, monkeypatch, mock_storage):
+ monkeypatch.setattr(_Environment, 'model_storage', mock_storage)
+ s = mock_models.create_service_with_dependencies(include_output=True)
+ monkeypatch.setattr(mock_storage.service, 'get_by_name', mock.MagicMock(return_value=s))
+
+ self.invoke('services outputs test_s')
+
+ assert 'output1' in self.logger_output_string
+ assert 'value1' in self.logger_output_string
+ assert 'No outputs' not in self.logger_output_string
+
+
+class TestServicesInputs(TestCliBase):
+
+ def test_header_string(self, monkeypatch, mock_storage):
+ monkeypatch.setattr(_Environment, 'model_storage', mock_storage)
+ self.invoke('services inputs test_s')
+ assert 'Showing inputs for service test_s...' in self.logger_output_string
+
+ def test_inputs_no_inputs(self, monkeypatch, mock_storage):
+ monkeypatch.setattr(_Environment, 'model_storage', mock_storage)
+ self.invoke('services inputs service_with_no_inputs')
+
+ assert 'No inputs' in self.logger_output_string
+ assert 'input1' not in self.logger_output_string
+ assert 'value1' not in self.logger_output_string
+
+ def test_inputs_one_input(self, monkeypatch, mock_storage):
+ monkeypatch.setattr(_Environment, 'model_storage', mock_storage)
+ s = mock_models.create_service_with_dependencies(include_input=True)
+ monkeypatch.setattr(mock_storage.service, 'get_by_name', mock.MagicMock(return_value=s))
+
+ self.invoke('services inputs test_s')
+
+ assert 'input1' in self.logger_output_string
+ assert 'value1' in self.logger_output_string
+ assert 'No inputs' not in self.logger_output_string
diff --git a/azure/aria/aria-extension-cloudify/src/aria/tests/cli/utils.py b/azure/aria/aria-extension-cloudify/src/aria/tests/cli/utils.py
new file mode 100644
index 0000000..a1e0c9a
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/tests/cli/utils.py
@@ -0,0 +1,101 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+
+from mock import MagicMock
+
+from ..mock import models as mock_models
+
+
+def setup_logger(logger_name,
+ level=logging.INFO,
+ handlers=None,
+ remove_existing_handlers=True,
+ logger_format=None,
+ propagate=True):
+ """
+ :param logger_name: Name of the logger.
+ :param level: Level for the logger (not for specific handler).
+ :param handlers: An optional list of handlers (formatter will be
+ overridden); If None, only a StreamHandler for
+ sys.stdout will be used.
+ :param remove_existing_handlers: Determines whether to remove existing
+ handlers before adding new ones
+ :param logger_format: the format this logger will have.
+ :param propagate: propagate the message the parent logger.
+ :return: A logger instance.
+ :rtype: logging.Logger
+ """
+
+ logger = logging.getLogger(logger_name)
+
+ if remove_existing_handlers:
+ for handler in logger.handlers:
+ logger.removeHandler(handler)
+
+ for handler in handlers:
+ if logger_format:
+ formatter = logging.Formatter(fmt=logger_format)
+ handler.setFormatter(formatter)
+ logger.addHandler(handler)
+
+ logger.setLevel(level)
+ if not propagate:
+ logger.propagate = False
+
+ return logger
+
+
+class MockStorage(object):
+
+ def __init__(self):
+ self.service_template = MockServiceTemplateStorage()
+ self.service = MockServiceStorage()
+ self.node_template = MockNodeTemplateStorage()
+ self.node = MockNodeStorage()
+
+
+class MockServiceTemplateStorage(object):
+
+ def __init__(self):
+ self.list = MagicMock(return_value=[mock_models.create_service_template()])
+ self.get_by_name = MagicMock(return_value=mock_models.create_service_template())
+
+
+class MockServiceStorage(object):
+
+ def __init__(self):
+
+ self.s = mock_models.create_service_with_dependencies()
+
+ self.list = MagicMock(return_value=[self.s])
+ self.create = MagicMock(return_value=self.s)
+ self.get = MagicMock(
+ return_value=mock_models.create_service_with_dependencies(include_node=True))
+ self.get_by_name = MagicMock(return_value=self.s)
+ self.delete = MagicMock()
+
+
+class MockNodeTemplateStorage(object):
+ def __init__(self):
+ self.get = MagicMock(return_value=mock_models.create_node_template_with_dependencies())
+ self.list = MagicMock(return_value=[mock_models.create_node_template_with_dependencies()])
+
+
+class MockNodeStorage(object):
+ def __init__(self):
+ self.get = MagicMock(return_value=mock_models.create_node_with_dependencies())
+ self.list = MagicMock(return_value=[mock_models.create_node_with_dependencies()])
diff --git a/azure/aria/aria-extension-cloudify/src/aria/tests/conftest.py b/azure/aria/aria-extension-cloudify/src/aria/tests/conftest.py
new file mode 100644
index 0000000..8f2c273
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/tests/conftest.py
@@ -0,0 +1,47 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+
+import pytest
+
+import aria
+from aria import logger
+
+
+@pytest.fixture(scope='session', autouse=True)
+def install_aria_extensions():
+ aria.install_aria_extensions()
+
+
+@pytest.fixture(autouse=True)
+def logging_handler_cleanup(request):
+ """
+ Each time a test runs, the loggers do not clear. we need to manually clear them or we'd have
+ logging overload.
+
+ Since every type of logger (node/relationship/workflow) share the same name, we should
+ clear the logger each test. This should not happen in real world use.
+ :param request:
+ :return:
+ """
+ def clear_logging_handlers():
+ logging.getLogger(logger.TASK_LOGGER_NAME).handlers = []
+ request.addfinalizer(clear_logging_handlers)
+
+
+@pytest.fixture
+def mock_object(mocker):
+ return mocker.MagicMock()
diff --git a/azure/aria/aria-extension-cloudify/src/aria/tests/end2end/__init__.py b/azure/aria/aria-extension-cloudify/src/aria/tests/end2end/__init__.py
new file mode 100644
index 0000000..ae1e83e
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/tests/end2end/__init__.py
@@ -0,0 +1,14 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/azure/aria/aria-extension-cloudify/src/aria/tests/end2end/test_hello_world.py b/azure/aria/aria-extension-cloudify/src/aria/tests/end2end/test_hello_world.py
new file mode 100644
index 0000000..094ffc3
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/tests/end2end/test_hello_world.py
@@ -0,0 +1,61 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import requests
+
+from .testenv import testenv # pylint: disable=unused-import
+from .. import helpers
+
+
+def test_hello_world(testenv):
+ hello_world_template_uri = helpers.get_example_uri('hello-world', 'hello-world.yaml')
+ service_name = testenv.install_service(hello_world_template_uri)
+
+ try:
+ _verify_deployed_service_in_storage(service_name, testenv.model_storage)
+ _verify_webserver_up('http://localhost:9090')
+ finally:
+ # Even if some assertions failed, attempt to execute uninstall so the
+ # webserver process doesn't stay up once the test is finished
+ testenv.uninstall_service()
+
+ _verify_webserver_down('http://localhost:9090')
+ testenv.verify_clean_storage()
+
+
+def _verify_webserver_up(http_endpoint):
+ server_response = requests.get(http_endpoint, timeout=10)
+ assert server_response.status_code == 200
+
+
+def _verify_webserver_down(http_endpoint):
+ try:
+ requests.get(http_endpoint, timeout=10)
+ assert False
+ except requests.exceptions.ConnectionError:
+ pass
+
+
+def _verify_deployed_service_in_storage(service_name, model_storage):
+ service_templates = model_storage.service_template.list()
+ assert len(service_templates) == 1
+ assert len(service_templates[0].services) == 1
+ service = service_templates[0].services[service_name]
+ assert service.name == service_name
+ assert len(service.executions) == 1
+ assert len(service.nodes) == 2
+ assert service.outputs['port'].value == 9090
+ assert all(node.state == node.STARTED for node in service.nodes.itervalues())
+ assert len(service.executions[0].logs) > 0
diff --git a/azure/aria/aria-extension-cloudify/src/aria/tests/end2end/test_nodecellar.py b/azure/aria/aria-extension-cloudify/src/aria/tests/end2end/test_nodecellar.py
new file mode 100644
index 0000000..e8cfa84
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/tests/end2end/test_nodecellar.py
@@ -0,0 +1,42 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from .testenv import testenv # pylint: disable=unused-import
+from .. import helpers
+
+
+def test_nodecellar(testenv):
+ nodecellar_template_uri = helpers.get_service_template_uri(
+ 'tosca-simple-1.0', 'node-cellar', 'node-cellar.yaml')
+
+ service_name = testenv.install_service(nodecellar_template_uri, dry=True)
+ _verify_deployed_service_in_storage(service_name, testenv.model_storage)
+
+ # testing dry execution of custom workflows
+ testenv.execute_workflow(service_name, 'maintenance_on', dry=True)
+ testenv.execute_workflow(service_name, 'maintenance_off', dry=True)
+
+ testenv.uninstall_service(dry=True)
+ testenv.verify_clean_storage()
+
+
+def _verify_deployed_service_in_storage(service_name, model_storage):
+ service_templates = model_storage.service_template.list()
+ assert len(service_templates) == 1
+ assert len(service_templates[0].services) == 1
+ service = service_templates[0].services[service_name]
+ assert service.name == service_name
+ assert len(service.executions) == 0 # dry executions leave no traces
+ assert len(service.nodes) == 15
diff --git a/azure/aria/aria-extension-cloudify/src/aria/tests/end2end/testenv.py b/azure/aria/aria-extension-cloudify/src/aria/tests/end2end/testenv.py
new file mode 100644
index 0000000..43ec274
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/tests/end2end/testenv.py
@@ -0,0 +1,102 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import sys
+
+import pytest
+import sh
+
+
+@pytest.fixture
+def testenv(tmpdir, request, monkeypatch):
+ test_name = request.node.name
+ workdir = str(tmpdir)
+
+ # Setting the workdir environment variable for the CLI
+ monkeypatch.setenv('ARIA_WORKDIR', workdir)
+ return TestEnvironment(workdir, test_name)
+
+
+class TestEnvironment(object):
+
+ def __init__(self, workdir, test_name):
+ self.workdir = workdir
+ self.test_name = test_name
+
+ self.cli = self._get_cli()
+ env = self._get_aria_env()
+ self.model_storage = env.model_storage
+ self.resource_storage = env.resource_storage
+ self.plugin_manager = env.plugin_manager
+
+ def install_service(self, service_template_path, dry=False, service_template_name=None,
+ service_name=None):
+ service_template_name = service_template_name or self.test_name
+ service_name = service_name or self.test_name
+
+ self.cli.service_templates.store(service_template_path, service_template_name)
+ self.cli.services.create(service_name, service_template_name=service_template_name)
+ self.execute_workflow(service_name, 'install', dry=dry)
+ return service_name
+
+ def uninstall_service(self, service_name=None, service_template_name=None, dry=False,
+ force_service_delete=False):
+ service_name = service_name or self.test_name
+ self.execute_workflow(service_name, 'uninstall', dry=dry)
+ self.cli.services.delete(service_name, force=force_service_delete)
+ self.cli.service_templates.delete(service_template_name or self.test_name)
+
+ def execute_workflow(self, service_name, workflow_name, dry=False):
+ self.cli.executions.start(workflow_name, service_name=service_name, dry=dry)
+
+ def verify_clean_storage(self):
+ assert len(self.model_storage.service_template.list()) == 0
+ assert len(self.model_storage.service.list()) == 0
+ assert len(self.model_storage.execution.list()) == 0
+ assert len(self.model_storage.node_template.list()) == 0
+ assert len(self.model_storage.node.list()) == 0
+ assert len(self.model_storage.log.list()) == 0
+
+ def _get_cli(self):
+ cli = sh.aria.bake('-vvv', _out=sys.stdout, _err=sys.stderr)
+
+ class PatchedCli(object):
+ """
+ The ``sh`` library supports underscore-dash auto-replacement for commands and option
+ flags yet not for subcommands (e.g. ``aria service-templates``). This class fixes this.
+ """
+ def __getattr__(self, attr):
+ if '_' in attr:
+ return cli.bake(attr.replace('_', '-'))
+ return getattr(cli, attr)
+
+ def __call__(self, *args, **kwargs):
+ """
+ This is to support the ``aria`` command itself (e.g. ``aria --version`` calls).
+ """
+ return cli(*args, **kwargs)
+
+ return PatchedCli()
+
+ def _get_aria_env(self):
+ """
+ A somewhat hacky but most simple way of acquiring environment context such as the model
+ storage, resource storage, etc. Note that the ``ARIA_WORKDIR`` environment variable must be
+ exported before the import below is used, as the import itself will initialize the ``.aria``
+ directory.
+ """
+ from aria.cli import env as cli_env
+ reload(cli_env) # reloading the module in-between tests
+ return cli_env.env
diff --git a/azure/aria/aria-extension-cloudify/src/aria/tests/fixtures.py b/azure/aria/aria-extension-cloudify/src/aria/tests/fixtures.py
new file mode 100644
index 0000000..3b1b9b5
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/tests/fixtures.py
@@ -0,0 +1,70 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import shutil
+
+import pytest
+
+from aria import (
+ application_model_storage,
+ application_resource_storage
+)
+from aria.orchestrator import plugin
+from aria.storage import (
+ sql_mapi,
+ filesystem_rapi
+)
+
+from . import storage
+
+
+@pytest.fixture
+def inmemory_model():
+ model = application_model_storage(sql_mapi.SQLAlchemyModelAPI,
+ initiator=storage.init_inmemory_model_storage)
+ yield model
+ storage.release_sqlite_storage(model)
+
+
+@pytest.fixture
+def fs_model(tmpdir):
+ result = application_model_storage(sql_mapi.SQLAlchemyModelAPI,
+ initiator_kwargs=dict(base_dir=str(tmpdir)),
+ initiator=sql_mapi.init_storage)
+ yield result
+ storage.release_sqlite_storage(result)
+
+
+@pytest.fixture
+def resource_storage(tmpdir):
+ result = tmpdir.join('resources')
+ result.mkdir()
+ resource_storage = application_resource_storage(
+ filesystem_rapi.FileSystemResourceAPI,
+ api_kwargs=dict(directory=str(result)))
+ yield resource_storage
+ shutil.rmtree(str(result))
+
+
+@pytest.fixture
+def plugins_dir(tmpdir):
+ result = tmpdir.join('plugins')
+ result.mkdir()
+ return str(result)
+
+
+@pytest.fixture
+def plugin_manager(model, plugins_dir):
+ return plugin.PluginManager(model=model, plugins_dir=plugins_dir)
diff --git a/azure/aria/aria-extension-cloudify/src/aria/tests/helpers.py b/azure/aria/aria-extension-cloudify/src/aria/tests/helpers.py
new file mode 100644
index 0000000..4c3194b
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/tests/helpers.py
@@ -0,0 +1,82 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import json
+
+from . import ROOT_DIR
+from .resources import DIR as RESOURCES_DIR
+
+
+def get_example_uri(*args):
+ return os.path.join(ROOT_DIR, 'examples', *args)
+
+
+def get_resource_uri(*args):
+ return os.path.join(RESOURCES_DIR, *args)
+
+
+def get_service_template_uri(*args):
+ return os.path.join(RESOURCES_DIR, 'service-templates', *args)
+
+
+class FilesystemDataHolder(object):
+
+ def __init__(self, path, reset=False):
+ self._path = path
+ if reset or not os.path.exists(self._path) or open(self._path).read() == '':
+ self._dump({})
+
+ def _load(self):
+ with open(self._path) as f:
+ return json.load(f)
+
+ def _dump(self, value):
+ with open(self._path, 'w') as f:
+ return json.dump(value, f)
+
+ def __contains__(self, item):
+ return item in self._load()
+
+ def __setitem__(self, key, value):
+ dict_ = self._load()
+ dict_[key] = value
+ self._dump(dict_)
+
+ def __getitem__(self, item):
+ return self._load()[item]
+
+ def __iter__(self):
+ return iter(self._load())
+
+ def get(self, item, default=None):
+ return self._load().get(item, default)
+
+ def setdefault(self, key, value):
+ dict_ = self._load()
+ return_value = dict_.setdefault(key, value)
+ self._dump(dict_)
+ return return_value
+
+ def update(self, dict_=None, **kwargs):
+ current_dict = self._load()
+ if dict_:
+ current_dict.update(dict_)
+ current_dict.update(**kwargs)
+ self._dump(current_dict)
+
+ @property
+ def path(self):
+ return self._path
diff --git a/azure/aria/aria-extension-cloudify/src/aria/tests/instantiation/__init__.py b/azure/aria/aria-extension-cloudify/src/aria/tests/instantiation/__init__.py
new file mode 100644
index 0000000..ae1e83e
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/tests/instantiation/__init__.py
@@ -0,0 +1,14 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/azure/aria/aria-extension-cloudify/src/aria/tests/instantiation/test_configuration.py b/azure/aria/aria-extension-cloudify/src/aria/tests/instantiation/test_configuration.py
new file mode 100644
index 0000000..6ac0c9c
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/tests/instantiation/test_configuration.py
@@ -0,0 +1,172 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import pytest
+
+from tests.parser.service_templates import consume_literal
+from aria.modeling.utils import parameters_as_values
+
+
+TEMPLATE = """
+tosca_definitions_version: tosca_simple_yaml_1_0
+
+interface_types:
+ MyInterface:
+ derived_from: tosca.interfaces.Root
+ inputs:
+ interface_string:
+ type: string
+ default: value1
+ interface_integer:
+ type: integer
+ default: 1
+ operation:
+ implementation: operation.sh
+ inputs:
+ operation_string:
+ type: string
+ default: value2
+ operation_integer:
+ type: integer
+ default: 2
+ interface_integer: # will override interface input
+ type: integer
+ default: 3
+
+node_types:
+ LocalNode:
+ derived_from: tosca.nodes.Root
+ interfaces:
+ MyInterface:
+ type: MyInterface
+
+ RemoteNode:
+ derived_from: tosca.nodes.Compute
+ interfaces:
+ MyInterface:
+ type: MyInterface
+
+topology_template:
+ node_templates:
+ local_node:
+ type: LocalNode
+
+ remote_node:
+ type: RemoteNode
+"""
+
+
+BROKEN_TEMPLATE = """
+tosca_definitions_version: tosca_simple_yaml_1_0
+
+interface_types:
+ MyInterface:
+ derived_from: tosca.interfaces.Root
+ inputs:
+ ctx: # reserved name
+ type: string
+ default: value1
+ interface_integer:
+ type: integer
+ default: 1
+ operation:
+ implementation: operation.sh
+ inputs:
+ operation_string:
+ type: string
+ default: value2
+ toolbelt: # reserved name
+ type: integer
+ default: 2
+
+node_types:
+ LocalNode:
+ derived_from: tosca.nodes.Root
+ interfaces:
+ MyInterface:
+ type: MyInterface
+
+topology_template:
+ node_templates:
+ local_node:
+ type: LocalNode
+"""
+
+
+@pytest.fixture
+def service():
+ context, _ = consume_literal(TEMPLATE)
+ yield context.modeling.instance
+
+
+@pytest.fixture
+def broken_service_issues():
+ context, _ = consume_literal(BROKEN_TEMPLATE, no_issues=False)
+ yield context.validation.issues
+
+
+def test_local(service):
+ interface = service.nodes['local_node_1'].interfaces['MyInterface']
+ operation = interface.operations['operation']
+ assert parameters_as_values(interface.inputs) == {
+ 'interface_string': 'value1',
+ 'interface_integer': 1
+ }
+ assert parameters_as_values(operation.inputs) == {
+ 'operation_string': 'value2',
+ 'operation_integer': 2,
+ 'interface_integer': 3
+ }
+ assert parameters_as_values(operation.arguments) == {
+ 'process': {},
+ 'script_path': 'operation.sh',
+ 'interface_string': 'value1',
+ 'interface_integer': 3,
+ 'operation_string': 'value2',
+ 'operation_integer': 2
+ }
+
+
+def test_remote(service):
+ interface = service.nodes['remote_node_1'].interfaces['MyInterface']
+ operation = interface.operations['operation']
+ assert parameters_as_values(interface.inputs) == {
+ 'interface_string': 'value1',
+ 'interface_integer': 1
+ }
+ assert parameters_as_values(operation.inputs) == {
+ 'operation_string': 'value2',
+ 'operation_integer': 2,
+ 'interface_integer': 3
+ }
+ assert parameters_as_values(operation.arguments) == {
+ 'process': {},
+ 'use_sudo': False,
+ 'fabric_env': {'user': '', 'password': '', 'key': None, 'key_filename': None},
+ 'script_path': 'operation.sh',
+ 'hide_output': [],
+ 'interface_string': 'value1',
+ 'interface_integer': 3,
+ 'operation_string': 'value2',
+ 'operation_integer': 2
+ }
+
+
+def test_reserved_arguments(broken_service_issues):
+ assert len(broken_service_issues) == 1
+ message = broken_service_issues[0].message
+ assert message.startswith('using reserved arguments in operation "operation":')
+ assert '"ctx"' in message
+ assert '"toolbelt"' in message
diff --git a/azure/aria/aria-extension-cloudify/src/aria/tests/mock/__init__.py b/azure/aria/aria-extension-cloudify/src/aria/tests/mock/__init__.py
new file mode 100644
index 0000000..9183b77
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/tests/mock/__init__.py
@@ -0,0 +1,16 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from . import models, context, topology, operations, workflow
diff --git a/azure/aria/aria-extension-cloudify/src/aria/tests/mock/context.py b/azure/aria/aria-extension-cloudify/src/aria/tests/mock/context.py
new file mode 100644
index 0000000..ac0a8a7
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/tests/mock/context.py
@@ -0,0 +1,57 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+
+import aria
+from aria.orchestrator import context
+from aria.storage import (
+ sql_mapi,
+ filesystem_rapi,
+)
+
+from . import models
+from ..storage import init_inmemory_model_storage
+from .topology import create_simple_topology_two_nodes
+
+
+def simple(tmpdir, inmemory=False, context_kwargs=None, topology=None):
+ initiator = init_inmemory_model_storage if inmemory else None
+ initiator_kwargs = {} if inmemory else dict(base_dir=tmpdir)
+ topology = topology or create_simple_topology_two_nodes
+
+ model_storage = aria.application_model_storage(
+ sql_mapi.SQLAlchemyModelAPI, initiator=initiator, initiator_kwargs=initiator_kwargs)
+ resource_storage = aria.application_resource_storage(
+ filesystem_rapi.FileSystemResourceAPI,
+ api_kwargs=dict(directory=os.path.join(tmpdir, 'resources'))
+ )
+
+ service_id = topology(model_storage)
+ execution = models.create_execution(model_storage.service.get(service_id))
+ model_storage.execution.put(execution)
+
+ final_kwargs = dict(
+ name='simple_context',
+ model_storage=model_storage,
+ resource_storage=resource_storage,
+ service_id=service_id,
+ workflow_name=models.WORKFLOW_NAME,
+ execution_id=execution.id,
+ task_max_attempts=models.TASK_MAX_ATTEMPTS,
+ task_retry_interval=models.TASK_RETRY_INTERVAL
+ )
+ final_kwargs.update(context_kwargs or {})
+ return context.workflow.WorkflowContext(**final_kwargs)
diff --git a/azure/aria/aria-extension-cloudify/src/aria/tests/mock/models.py b/azure/aria/aria-extension-cloudify/src/aria/tests/mock/models.py
new file mode 100644
index 0000000..8a3b87e
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/tests/mock/models.py
@@ -0,0 +1,358 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from datetime import datetime
+
+from aria.modeling import models
+from aria.orchestrator import decorators
+from aria.orchestrator.workflows.builtin.workflows import (
+ NORMATIVE_STANDARD_INTERFACE,
+ NORMATIVE_CREATE,
+ NORMATIVE_START,
+ NORMATIVE_STOP,
+ NORMATIVE_DELETE,
+ NORMATIVE_CONFIGURE,
+
+ NORMATIVE_CONFIGURE_INTERFACE,
+ NORMATIVE_PRE_CONFIGURE_SOURCE,
+ NORMATIVE_PRE_CONFIGURE_TARGET,
+ NORMATIVE_POST_CONFIGURE_SOURCE,
+ NORMATIVE_POST_CONFIGURE_TARGET,
+
+ NORMATIVE_ADD_SOURCE,
+ NORMATIVE_ADD_TARGET,
+ NORMATIVE_REMOVE_TARGET,
+ NORMATIVE_REMOVE_SOURCE
+)
+
+SERVICE_TEMPLATE_NAME = 'test_service_template'
+SERVICE_NAME = 'test_service1'
+NODE_TEMPLATE_NAME = 'test_node_template'
+NODE_NAME = 'test_node1'
+WORKFLOW_NAME = 'test_workflow'
+TASK_RETRY_INTERVAL = 1
+TASK_MAX_ATTEMPTS = 1
+
+DEPENDENCY_NODE_TEMPLATE_NAME = 'dependency_node_template'
+DEPENDENCY_NODE_NAME = 'dependency_node'
+DEPENDENT_NODE_TEMPLATE_NAME = 'dependent_node_template'
+DEPENDENT_NODE_NAME = 'dependent_node'
+
+
+def create_service_template(name=SERVICE_TEMPLATE_NAME, description=None, inputs=None):
+ now = datetime.now()
+ inputs = inputs or {}
+ return models.ServiceTemplate(
+ name=name,
+ description=description,
+ inputs=inputs,
+ created_at=now,
+ updated_at=now,
+ main_file_name='main_file_name',
+ node_types=models.Type(variant='node', name='test_node_type'),
+ group_types=models.Type(variant='group', name='test_group_type'),
+ policy_types=models.Type(variant='policy', name='test_policy_type'),
+ relationship_types=models.Type(variant='relationship', name='test_relationship_type'),
+ capability_types=models.Type(variant='capability', name='test_capability_type'),
+ artifact_types=models.Type(variant='artifact', name='test_artifact_type'),
+ interface_types=models.Type(variant='interface', name='test_interface_type')
+ )
+
+
+def create_service(service_template, name=SERVICE_NAME, inputs=None):
+ now = datetime.utcnow()
+ inputs = inputs or {}
+ return models.Service(
+ name=name,
+ inputs=inputs,
+ service_template=service_template,
+ description='',
+ created_at=now,
+ updated_at=now,
+ )
+
+
+def create_service_with_dependencies(include_execution=False,
+ include_input=False,
+ include_output=False,
+ include_node=False):
+ service_template = create_service_template()
+ service = create_service(service_template=service_template)
+ if include_execution:
+ execution = create_execution(service=service, status=models.Execution.STARTED)
+ service.executions = [execution]
+ execution.id = '1'
+ if include_input:
+ input = create_input(name='input1', value='value1')
+ service.inputs = {'input1': input}
+ if include_output:
+ output = create_output(name='output1', value='value1')
+ service.outputs = {'output1': output}
+ if include_node:
+ node_template = create_node_template(service_template=service_template)
+ node = create_node(node_template, service, state=models.Node.STARTED)
+ node.id = '1'
+ return service
+
+
+def create_node_template_with_dependencies(include_node=False, include_property=False):
+ service_template = create_service_template()
+ node_template = create_node_template(service_template=service_template)
+ if include_node:
+ service = create_service(service_template=service_template)
+ create_node(dependency_node_template=node_template, service=service)
+ if include_property:
+ node_template.properties = {'prop1': create_property(name='prop1', value='value1')}
+ return node_template
+
+
+def create_node_with_dependencies(include_attribute=False):
+
+ node_template = create_node_template_with_dependencies()
+ node_template.service_template.services[0] = create_service(node_template.service_template)
+ node = create_node(node_template, node_template.service_template.services[0])
+ if include_attribute:
+ node.attributes['attribute1'] = models.Attribute.wrap('attribute1', 'value1') # pylint: disable=unsubscriptable-object
+ return node
+
+
+def create_node_template(service_template,
+ name=NODE_TEMPLATE_NAME,
+ type=models.Type(variant='node', name='test_node_type'),
+ capability_templates=None,
+ requirement_templates=None,
+ interface_templates=None):
+ capability_templates = capability_templates or {}
+ requirement_templates = requirement_templates or []
+ interface_templates = interface_templates or {}
+ node_template = models.NodeTemplate(
+ name=name,
+ type=type,
+ capability_templates=capability_templates,
+ requirement_templates=requirement_templates,
+ interface_templates=interface_templates,
+ service_template=service_template)
+
+ service_template.node_templates[node_template.name] = node_template
+ return node_template
+
+
+def create_dependency_node_template(service_template, name=DEPENDENCY_NODE_TEMPLATE_NAME):
+ node_type = service_template.node_types.get_descendant('test_node_type')
+ capability_type = service_template.capability_types.get_descendant('test_capability_type')
+
+ capability_template = models.CapabilityTemplate(
+ name='capability',
+ type=capability_type
+ )
+ return create_node_template(
+ service_template=service_template,
+ name=name,
+ type=node_type,
+ capability_templates=_dictify(capability_template)
+ )
+
+
+def create_dependent_node_template(
+ service_template, dependency_node_template, name=DEPENDENT_NODE_TEMPLATE_NAME):
+ the_type = service_template.node_types.get_descendant('test_node_type')
+
+ requirement_template = models.RequirementTemplate(
+ name='requirement',
+ target_node_template=dependency_node_template
+ )
+ return create_node_template(
+ service_template=service_template,
+ name=name,
+ type=the_type,
+ interface_templates=_dictify(get_standard_interface_template(service_template)),
+ requirement_templates=[requirement_template],
+ )
+
+
+def create_node(dependency_node_template, service, name=NODE_NAME, state=models.Node.INITIAL):
+ node = models.Node(
+ name=name,
+ type=dependency_node_template.type,
+ version=None,
+ node_template=dependency_node_template,
+ state=state,
+ service=service,
+ interfaces=get_standard_interface(service),
+ )
+ service.nodes[node.name] = node
+ return node
+
+
+def create_relationship(source, target):
+ return models.Relationship(
+ source_node=source,
+ target_node=target,
+ interfaces=get_configure_interfaces(service=source.service),
+ )
+
+
+def create_interface_template(service_template, interface_name, operation_name,
+ operation_kwargs=None, interface_kwargs=None):
+ the_type = service_template.interface_types.get_descendant('test_interface_type')
+ operation_template = models.OperationTemplate(
+ name=operation_name,
+ **(operation_kwargs or {})
+ )
+ return models.InterfaceTemplate(
+ type=the_type,
+ operation_templates=_dictify(operation_template),
+ name=interface_name,
+ **(interface_kwargs or {})
+ )
+
+
+def create_operation(operation_name, operation_kwargs=None):
+ if operation_kwargs and operation_kwargs.get('arguments'):
+ operation_kwargs['arguments'] = dict(
+ (argument_name, models.Argument.wrap(argument_name, argument_value))
+ for argument_name, argument_value in operation_kwargs['arguments'].iteritems()
+ if argument_value is not None)
+
+ return models.Operation(
+ name=operation_name,
+ **(operation_kwargs or {})
+ )
+
+
+def create_interface(service, interface_name, operation_name, operation_kwargs=None,
+ interface_kwargs=None):
+ the_type = service.service_template.interface_types.get_descendant('test_interface_type')
+ operation = create_operation(operation_name, operation_kwargs)
+
+ return models.Interface(
+ type=the_type,
+ operations=_dictify(operation),
+ name=interface_name,
+ **(interface_kwargs or {})
+ )
+
+
+def create_execution(service, status=models.Execution.PENDING):
+ return models.Execution(
+ service=service,
+ status=status,
+ workflow_name=WORKFLOW_NAME,
+ created_at=datetime.utcnow(),
+ started_at=datetime.utcnow(),
+ inputs={}
+ )
+
+
+def create_plugin(name='test_plugin', package_version='0.1'):
+ return models.Plugin(
+ name=name,
+ archive_name='archive_name',
+ distribution='distribution',
+ distribution_release='dist_release',
+ distribution_version='dist_version',
+ package_name='package',
+ package_source='source',
+ package_version=package_version,
+ supported_platform='any',
+ supported_py_versions=['python27'],
+ uploaded_at=datetime.now(),
+ wheels=[],
+ )
+
+
+def create_plugin_specification(name='test_plugin', version='0.1'):
+ return models.PluginSpecification(
+ name=name,
+ version=version
+ )
+
+
+def _create_parameter(name, value, model_cls):
+ return model_cls.wrap(name, value)
+
+
+def create_property(name, value):
+ return _create_parameter(name, value, model_cls=models.Property)
+
+
+def create_input(name, value):
+ return _create_parameter(name, value, model_cls=models.Input)
+
+
+def create_output(name, value):
+ return _create_parameter(name, value, model_cls=models.Output)
+
+
+def _dictify(item):
+ return dict(((item.name, item),))
+
+
+def get_standard_interface_template(service_template):
+ the_type = service_template.interface_types.get_descendant('test_interface_type')
+
+ op_templates = dict(
+ (op_name, models.OperationTemplate(
+ name=op_name, implementation='{0}.{1}'.format(__file__, mock_operation.__name__)))
+ for op_name in (NORMATIVE_CREATE, NORMATIVE_CONFIGURE, NORMATIVE_START,
+ NORMATIVE_STOP, NORMATIVE_DELETE)
+ )
+ return models.InterfaceTemplate(name=NORMATIVE_STANDARD_INTERFACE,
+ operation_templates=op_templates,
+ type=the_type)
+
+
+def get_standard_interface(service):
+ the_type = service.service_template.interface_types.get_descendant('test_interface_type')
+
+ ops = dict(
+ (op_name, models.Operation(
+ name=op_name, implementation='{0}.{1}'.format(__file__, mock_operation.__name__)))
+ for op_name in (NORMATIVE_CREATE, NORMATIVE_CONFIGURE, NORMATIVE_START,
+ NORMATIVE_STOP, NORMATIVE_DELETE)
+ )
+ return {
+ NORMATIVE_STANDARD_INTERFACE:
+ models.Interface(name=NORMATIVE_STANDARD_INTERFACE, operations=ops, type=the_type)
+ }
+
+
+def get_configure_interfaces(service):
+ the_type = service.service_template.interface_types.get_descendant('test_interface_type')
+
+ operations = dict(
+ (op_name, models.Operation(
+ name=op_name, implementation='{0}.{1}'.format(__file__, mock_operation.__name__)))
+ for op_name in (NORMATIVE_PRE_CONFIGURE_SOURCE,
+ NORMATIVE_POST_CONFIGURE_SOURCE,
+ NORMATIVE_ADD_SOURCE,
+ NORMATIVE_REMOVE_SOURCE,
+
+ NORMATIVE_PRE_CONFIGURE_TARGET,
+ NORMATIVE_POST_CONFIGURE_TARGET,
+ NORMATIVE_ADD_TARGET,
+ NORMATIVE_REMOVE_TARGET)
+ )
+ interface = {
+ NORMATIVE_CONFIGURE_INTERFACE: models.Interface(
+ name=NORMATIVE_CONFIGURE_INTERFACE, operations=operations, type=the_type)
+ }
+
+ return interface
+
+
+@decorators.operation
+def mock_operation(*args, **kwargs):
+ pass
diff --git a/azure/aria/aria-extension-cloudify/src/aria/tests/mock/operations.py b/azure/aria/aria-extension-cloudify/src/aria/tests/mock/operations.py
new file mode 100644
index 0000000..c752a8e
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/tests/mock/operations.py
@@ -0,0 +1,59 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+NODE_OPERATIONS_INSTALL = [
+ ('Standard', 'create'),
+ ('Standard', 'configure'),
+ ('Standard', 'start')
+]
+
+NODE_OPERATIONS_UNINSTALL = [
+ ('Standard', 'stop'),
+ ('Standard', 'delete')
+]
+
+NODE_OPERATIONS = NODE_OPERATIONS_INSTALL + NODE_OPERATIONS_UNINSTALL
+
+RELATIONSHIP_OPERATIONS_INSTALL = [
+ ('Configure', 'pre_configure_source'),
+ ('Configure', 'pre_configure_target'),
+ ('Configure', 'add_source'),
+ ('Configure', 'add_target')
+]
+
+RELATIONSHIP_OPERATIONS_UNINSTALL = [
+ ('Configure', 'remove_target'),
+ ('Configure', 'target_changed')
+]
+
+RELATIONSHIP_OPERATIONS = RELATIONSHIP_OPERATIONS_INSTALL + RELATIONSHIP_OPERATIONS_UNINSTALL
+
+OPERATIONS_INSTALL = [
+ ('Standard', 'create'),
+ ('Configure', 'pre_configure_source'),
+ ('Configure', 'pre_configure_target'),
+ ('Standard', 'configure'),
+ ('Standard', 'start'),
+ ('Configure', 'add_source'),
+ ('Configure', 'add_target'),
+ ('Configure', 'target_changed')
+]
+
+OPERATIONS_UNINSTALL = [
+ ('Configure', 'remove_target'),
+ ('Configure', 'target_changed'),
+ ('Standard', 'stop'),
+ ('Standard', 'delete')
+]
diff --git a/azure/aria/aria-extension-cloudify/src/aria/tests/mock/topology.py b/azure/aria/aria-extension-cloudify/src/aria/tests/mock/topology.py
new file mode 100644
index 0000000..9f0521f
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/tests/mock/topology.py
@@ -0,0 +1,96 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from aria.modeling import models as aria_models
+
+from . import models
+
+
+def create_simple_topology_single_node(model_storage, create_operation):
+ service_template = models.create_service_template()
+ service = models.create_service(service_template)
+
+ node_template = models.create_dependency_node_template(service_template)
+ interface_template = models.create_interface_template(
+ service_template,
+ 'Standard', 'create',
+ operation_kwargs=dict(
+ function=create_operation,
+ arguments={'key': aria_models.Argument.wrap('key', 'create'),
+ 'value': aria_models.Argument.wrap('value', True)})
+ )
+ node_template.interface_templates[interface_template.name] = interface_template # pylint: disable=unsubscriptable-object
+
+ node = models.create_node(node_template, service, name=models.DEPENDENCY_NODE_NAME)
+ interface = models.create_interface(
+ service,
+ 'Standard', 'create',
+ operation_kwargs=dict(
+ function=create_operation,
+ arguments={'key': aria_models.Argument.wrap('key', 'create'),
+ 'value': aria_models.Argument.wrap('value', True)})
+ )
+ node.interfaces[interface.name] = interface # pylint: disable=unsubscriptable-object
+
+ model_storage.service_template.put(service_template)
+ model_storage.service.put(service)
+
+
+def create_simple_topology_two_nodes(model_storage):
+ service_template = models.create_service_template()
+ service = models.create_service(service_template)
+
+ # Creating a simple service with node -> node as a graph
+
+ dependency_node_template = models.create_dependency_node_template(service_template)
+ dependent_node_template = models.create_dependent_node_template(service_template,
+ dependency_node_template)
+
+ dependency_node = models.create_node(
+ dependency_node_template, service, models.DEPENDENCY_NODE_NAME)
+ dependent_node = models.create_node(
+ dependent_node_template, service, models.DEPENDENT_NODE_NAME)
+
+ dependent_node.outbound_relationships.append(models.create_relationship( # pylint: disable=no-member
+ source=dependent_node,
+ target=dependency_node
+ ))
+
+ model_storage.service_template.put(service_template)
+ model_storage.service.put(service)
+
+ return service.id
+
+
+def create_simple_topology_three_nodes(model_storage):
+ #################################################################################
+ # Creating a simple deployment with the following topology:
+ # node1 <----|
+ # | <- node0
+ # node2 <----|
+ # meaning node0 has two relationships: node1 and node2 (one each).
+
+ service_id = create_simple_topology_two_nodes(model_storage)
+ service = model_storage.service.get(service_id)
+ third_node_template = models.create_dependency_node_template(
+ service.service_template, name='another_dependency_node_template')
+ third_node = models.create_node(third_node_template, service, 'another_dependency_node')
+ new_relationship = models.create_relationship(
+ source=model_storage.node.get_by_name(models.DEPENDENT_NODE_NAME),
+ target=third_node,
+ )
+ model_storage.relationship.put(new_relationship)
+
+ return service_id
diff --git a/azure/aria/aria-extension-cloudify/src/aria/tests/mock/workflow.py b/azure/aria/aria-extension-cloudify/src/aria/tests/mock/workflow.py
new file mode 100644
index 0000000..b12b9fa
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/tests/mock/workflow.py
@@ -0,0 +1,26 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import json
+
+from aria.orchestrator.decorators import workflow
+
+
+@workflow
+def mock_workflow(graph, ctx, output_path=None, **kwargs): # pylint: disable=unused-argument
+ if output_path:
+ # writes call arguments to the specified output file
+ with open(output_path, 'w') as f:
+ json.dump(kwargs, f)
diff --git a/azure/aria/aria-extension-cloudify/src/aria/tests/modeling/__init__.py b/azure/aria/aria-extension-cloudify/src/aria/tests/modeling/__init__.py
new file mode 100644
index 0000000..072ef54
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/tests/modeling/__init__.py
@@ -0,0 +1,34 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from sqlalchemy import (
+ Column,
+ Text,
+ Integer,
+)
+
+from aria.modeling import (
+ models,
+ types as modeling_types,
+ mixins
+)
+
+
+class MockModel(models.aria_declarative_base, mixins.ModelMixin): #pylint: disable=abstract-method
+ __tablename__ = 'mock_model'
+ model_dict = Column(modeling_types.Dict)
+ model_list = Column(modeling_types.List)
+ value = Column(Integer)
+ name = Column(Text)
diff --git a/azure/aria/aria-extension-cloudify/src/aria/tests/modeling/test_mixins.py b/azure/aria/aria-extension-cloudify/src/aria/tests/modeling/test_mixins.py
new file mode 100644
index 0000000..2d94d7c
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/tests/modeling/test_mixins.py
@@ -0,0 +1,215 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import pytest
+
+import sqlalchemy
+
+from aria.storage import (
+ ModelStorage,
+ sql_mapi
+)
+from aria import modeling
+from aria.modeling.exceptions import ValueFormatException
+
+from ..storage import (
+ release_sqlite_storage,
+ init_inmemory_model_storage
+)
+from . import MockModel
+from ..mock import (
+ models,
+ context as mock_context
+)
+
+
+@pytest.fixture
+def storage():
+ base_storage = ModelStorage(sql_mapi.SQLAlchemyModelAPI,
+ initiator=init_inmemory_model_storage)
+ base_storage.register(MockModel)
+ yield base_storage
+ release_sqlite_storage(base_storage)
+
+
+@pytest.fixture(scope='module', autouse=True)
+def module_cleanup():
+ modeling.models.aria_declarative_base.metadata.remove(MockModel.__table__) # pylint: disable=no-member
+
+
+@pytest.fixture
+def context(tmpdir):
+ ctx = mock_context.simple(str(tmpdir))
+ yield ctx
+ release_sqlite_storage(ctx.model)
+
+
+def test_inner_dict_update(storage):
+ inner_dict = {'inner_value': 1}
+
+ mock_model = MockModel(model_dict={'inner_dict': inner_dict, 'value': 0})
+ storage.mock_model.put(mock_model)
+
+ storage_mm = storage.mock_model.get(mock_model.id)
+ assert storage_mm == mock_model
+
+ storage_mm.model_dict['inner_dict']['inner_value'] = 2
+ storage_mm.model_dict['value'] = -1
+ storage.mock_model.update(storage_mm)
+ storage_mm = storage.mock_model.get(storage_mm.id)
+
+ assert storage_mm.model_dict['inner_dict']['inner_value'] == 2
+ assert storage_mm.model_dict['value'] == -1
+
+
+def test_inner_list_update(storage):
+ mock_model = MockModel(model_list=[0, [1]])
+ storage.mock_model.put(mock_model)
+
+ storage_mm = storage.mock_model.get(mock_model.id)
+ assert storage_mm == mock_model
+
+ storage_mm.model_list[1][0] = 'new_inner_value'
+ storage_mm.model_list[0] = 'new_value'
+ storage.mock_model.update(storage_mm)
+ storage_mm = storage.mock_model.get(storage_mm.id)
+
+ assert storage_mm.model_list[1][0] == 'new_inner_value'
+ assert storage_mm.model_list[0] == 'new_value'
+
+
+def test_model_to_dict(context):
+ service = context.service
+ service = service.to_dict()
+
+ expected_keys = [
+ 'description',
+ 'created_at',
+ 'updated_at'
+ ]
+
+ for expected_key in expected_keys:
+ assert expected_key in service
+
+
+def test_relationship_model_ordering(context):
+ service = context.model.service.get_by_name(models.SERVICE_NAME)
+ source_node = context.model.node.get_by_name(models.DEPENDENT_NODE_NAME)
+ target_node = context.model.node.get_by_name(models.DEPENDENCY_NODE_NAME)
+
+ new_node_template = modeling.models.NodeTemplate(
+ name='new_node_template',
+ type=source_node.type,
+ service_template=service.service_template
+ )
+
+ new_node = modeling.models.Node(
+ name='new_node',
+ type=source_node.type,
+ service=service,
+ version=None,
+ node_template=new_node_template,
+ state=modeling.models.Node.INITIAL,
+ )
+
+ source_node.outbound_relationships.append(modeling.models.Relationship(
+ source_node=source_node,
+ target_node=new_node,
+ ))
+
+ new_node.outbound_relationships.append(modeling.models.Relationship( # pylint: disable=no-member
+ source_node=new_node,
+ target_node=target_node,
+ ))
+
+ context.model.node_template.put(new_node_template)
+ context.model.node.put(new_node)
+ context.model.node.refresh(source_node)
+ context.model.node.refresh(target_node)
+
+ def flip_and_assert(node, direction):
+ """
+ Reversed the order of relationships and assert effects took place.
+ :param node: the node instance to operate on
+ :param direction: the type of relationships to flip (inbound/outbound)
+ :return:
+ """
+ assert direction in ('inbound', 'outbound')
+
+ def get_relationships():
+ return getattr(node, direction + '_relationships')
+
+ relationships = get_relationships()
+ assert len(relationships) == 2
+
+ reversed_relationship = list(reversed(relationships))
+ assert relationships != reversed_relationship
+
+ relationships[:] = reversed_relationship
+ context.model.node.update(node)
+ assert get_relationships() == reversed_relationship
+
+ flip_and_assert(source_node, 'outbound')
+ flip_and_assert(target_node, 'inbound')
+
+
+class StrictClass(modeling.models.aria_declarative_base, modeling.mixins.ModelMixin):
+ __tablename__ = 'strict_class'
+
+ strict_dict = sqlalchemy.Column(modeling.types.StrictDict(basestring, basestring))
+ strict_list = sqlalchemy.Column(modeling.types.StrictList(basestring))
+
+
+def test_strict_dict():
+
+ strict_class = StrictClass()
+
+ def assert_strict(sc):
+ with pytest.raises(ValueFormatException):
+ sc.strict_dict = {'key': 1}
+
+ with pytest.raises(ValueFormatException):
+ sc.strict_dict = {1: 'value'}
+
+ with pytest.raises(ValueFormatException):
+ sc.strict_dict = {1: 1}
+
+ assert_strict(strict_class)
+ strict_class.strict_dict = {'key': 'value'}
+ assert strict_class.strict_dict == {'key': 'value'}
+
+ assert_strict(strict_class)
+ with pytest.raises(ValueFormatException):
+ strict_class.strict_dict['key'] = 1
+ with pytest.raises(ValueFormatException):
+ strict_class.strict_dict[1] = 'value'
+ with pytest.raises(ValueFormatException):
+ strict_class.strict_dict[1] = 1
+
+
+def test_strict_list():
+ strict_class = StrictClass()
+
+ def assert_strict(sc):
+ with pytest.raises(ValueFormatException):
+ sc.strict_list = [1]
+
+ assert_strict(strict_class)
+ strict_class.strict_list = ['item']
+ assert strict_class.strict_list == ['item']
+
+ assert_strict(strict_class)
+ with pytest.raises(ValueFormatException):
+ strict_class.strict_list[0] = 1
diff --git a/azure/aria/aria-extension-cloudify/src/aria/tests/modeling/test_models.py b/azure/aria/aria-extension-cloudify/src/aria/tests/modeling/test_models.py
new file mode 100644
index 0000000..25b4080
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/tests/modeling/test_models.py
@@ -0,0 +1,872 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from datetime import datetime
+from contextlib import contextmanager
+
+import pytest
+
+from aria import application_model_storage
+from aria.storage import (
+ sql_mapi,
+)
+from aria.storage.exceptions import StorageError
+from aria.modeling.exceptions import ValueFormatException
+from aria.modeling.models import (
+ ServiceTemplate,
+ Service,
+ ServiceUpdate,
+ ServiceUpdateStep,
+ ServiceModification,
+ Execution,
+ Task,
+ Plugin,
+ Relationship,
+ NodeTemplate,
+ Node,
+ Input,
+ Output,
+ Property,
+ Attribute,
+ Configuration,
+ Argument,
+ Type
+)
+
+from tests import mock
+from tests.storage import release_sqlite_storage, init_inmemory_model_storage
+
+
+@contextmanager
+def sql_storage(storage_func):
+ storage = None
+ try:
+ storage = storage_func()
+ yield storage
+ finally:
+ if storage:
+ release_sqlite_storage(storage)
+
+
+def _empty_storage():
+ return application_model_storage(sql_mapi.SQLAlchemyModelAPI,
+ initiator=init_inmemory_model_storage)
+
+
+def _service_template_storage():
+ storage = _empty_storage()
+ service_template = mock.models.create_service_template()
+ storage.service_template.put(service_template)
+ return storage
+
+
+def _service_storage():
+ storage = _service_template_storage()
+ service = mock.models.create_service(
+ storage.service_template.get_by_name(mock.models.SERVICE_TEMPLATE_NAME))
+ storage.service.put(service)
+ return storage
+
+
+def _service_update_storage():
+ storage = _service_storage()
+ service_update = ServiceUpdate(
+ service=storage.service.list()[0],
+ created_at=now,
+ service_plan={},
+ )
+ storage.service_update.put(service_update)
+ return storage
+
+
+def _node_template_storage():
+ storage = _service_storage()
+ service_template = storage.service_template.list()[0]
+ dependency_node_template = mock.models.create_dependency_node_template(service_template)
+ mock.models.create_dependent_node_template(service_template, dependency_node_template)
+ storage.service_template.update(service_template)
+ return storage
+
+
+def _nodes_storage():
+ storage = _node_template_storage()
+ service = storage.service.get_by_name(mock.models.SERVICE_NAME)
+ dependency_node_template = storage.node_template.get_by_name(
+ mock.models.DEPENDENCY_NODE_TEMPLATE_NAME)
+ mock.models.create_node(dependency_node_template, service,
+ name=mock.models.DEPENDENCY_NODE_NAME)
+
+ dependent_node_template = mock.models.create_dependent_node_template(service.service_template,
+ dependency_node_template)
+
+ mock.models.create_node(dependent_node_template, service, name=mock.models.DEPENDENT_NODE_NAME)
+ storage.service.update(service)
+ return storage
+
+
+def _execution_storage():
+ storage = _service_storage()
+ execution = mock.models.create_execution(storage.service.list()[0])
+ plugin = mock.models.create_plugin()
+ storage.execution.put(execution)
+ storage.plugin.put(plugin)
+ return storage
+
+
+@pytest.fixture
+def empty_storage():
+ with sql_storage(_empty_storage) as storage:
+ yield storage
+
+
+@pytest.fixture
+def service_template_storage():
+ with sql_storage(_service_template_storage) as storage:
+ yield storage
+
+
+@pytest.fixture
+def service_storage():
+ with sql_storage(_service_storage) as storage:
+ yield storage
+
+
+@pytest.fixture
+def service_update_storage():
+ with sql_storage(_service_update_storage) as storage:
+ yield storage
+
+
+@pytest.fixture
+def node_template_storage():
+ with sql_storage(_node_template_storage) as storage:
+ yield storage
+
+
+@pytest.fixture
+def nodes_storage():
+ with sql_storage(_nodes_storage) as storage:
+ yield storage
+
+
+@pytest.fixture
+def execution_storage():
+ with sql_storage(_execution_storage) as storage:
+ yield storage
+
+
+m_cls = type('MockClass')
+now = datetime.utcnow()
+
+
+def _test_model(is_valid, storage, model_cls, model_kwargs):
+ if is_valid:
+ model = model_cls(**model_kwargs)
+ getattr(storage, model_cls.__modelname__).put(model)
+ return model
+ else:
+ with pytest.raises((ValueFormatException, StorageError, TypeError),):
+ getattr(storage, model_cls.__modelname__).put(model_cls(**model_kwargs))
+
+
+class TestServiceTemplate(object):
+
+ @pytest.mark.parametrize(
+ 'is_valid, description, created_at, updated_at, main_file_name',
+ [
+ (False, [], now, now, '/path'),
+ (False, 'description', 'error', now, '/path'),
+ (False, 'description', now, 'error', '/path'),
+ (False, 'description', now, now, {}),
+
+ (True, 'description', now, now, '/path'),
+ ]
+ )
+
+ def test_service_template_model_creation(self, empty_storage, is_valid, description, created_at,
+ updated_at, main_file_name):
+ _test_model(is_valid=is_valid,
+ storage=empty_storage,
+ model_cls=ServiceTemplate,
+ model_kwargs=dict(
+ description=description,
+ created_at=created_at,
+ updated_at=updated_at,
+ main_file_name=main_file_name)
+ )
+
+
+class TestService(object):
+
+ @pytest.mark.parametrize(
+ 'is_valid, name, created_at, description, inputs, '
+ 'outputs, updated_at',
+ [
+ (False, m_cls, now, 'desc', {}, {}, now),
+ (False, 'name', m_cls, 'desc', {}, {}, now),
+ (False, 'name', now, m_cls, {}, {}, now),
+ (False, 'name', now, 'desc', m_cls, {}, now),
+ (False, 'name', now, 'desc', {}, m_cls, now),
+ (False, 'name', now, 'desc', {}, {}, m_cls),
+
+ (True, 'name', now, 'desc', {}, {}, now),
+ (True, None, now, 'desc', {}, {}, now),
+ (True, 'name', now, None, {}, {}, now),
+ (True, 'name', now, 'desc', {}, {}, None),
+ (True, 'name', now, 'desc', {}, {}, now),
+ ]
+ )
+ def test_service_model_creation(self, service_storage, is_valid, name, created_at, description,
+ inputs, outputs, updated_at):
+ service = _test_model(
+ is_valid=is_valid,
+ storage=service_storage,
+ model_cls=Service,
+ model_kwargs=dict(
+ name=name,
+ service_template=service_storage.service_template.list()[0],
+ created_at=created_at,
+ description=description,
+ inputs=inputs,
+ outputs=outputs,
+ updated_at=updated_at
+ ))
+ if is_valid:
+ assert service.service_template == \
+ service_storage.service_template.list()[0]
+
+
+class TestExecution(object):
+
+ @pytest.mark.parametrize(
+ 'is_valid, created_at, started_at, ended_at, error, inputs, '
+ 'status, workflow_name',
+ [
+ (False, m_cls, now, now, 'error', {}, Execution.STARTED, 'wf_name'),
+ (False, now, m_cls, now, 'error', {}, Execution.STARTED, 'wf_name'),
+ (False, now, now, m_cls, 'error', {}, Execution.STARTED, 'wf_name'),
+ (False, now, now, now, m_cls, {}, Execution.STARTED, 'wf_name'),
+ (False, now, now, now, 'error', m_cls, Execution.STARTED, 'wf_name'),
+ (False, now, now, now, 'error', {}, m_cls, 'wf_name'),
+ (False, now, now, now, 'error', {}, Execution.STARTED, m_cls),
+
+ (True, now, now, now, 'error', {}, Execution.STARTED, 'wf_name'),
+ (True, now, None, now, 'error', {}, Execution.STARTED, 'wf_name'),
+ (True, now, now, None, 'error', {}, Execution.STARTED, 'wf_name'),
+ (True, now, now, now, None, {}, Execution.STARTED, 'wf_name'),
+ ]
+ )
+ def test_execution_model_creation(self, service_storage, is_valid, created_at, started_at,
+ ended_at, error, inputs, status, workflow_name):
+ execution = _test_model(
+ is_valid=is_valid,
+ storage=service_storage,
+ model_cls=Execution,
+ model_kwargs=dict(
+ service=service_storage.service.list()[0],
+ created_at=created_at,
+ started_at=started_at,
+ ended_at=ended_at,
+ error=error,
+ inputs=inputs,
+ status=status,
+ workflow_name=workflow_name,
+ ))
+ if is_valid:
+ assert execution.service == service_storage.service.list()[0]
+ assert execution.service_template == service_storage.service_template.list()[0]
+
+ def test_execution_status_transition(self):
+ def create_execution(status):
+ execution = Execution(
+ id='e_id',
+ workflow_name='w_name',
+ status=status,
+ inputs={},
+ created_at=now,
+ )
+ return execution
+
+ valid_transitions = {
+ Execution.PENDING: [Execution.STARTED,
+ Execution.CANCELLED,
+ Execution.PENDING],
+ Execution.STARTED: [Execution.FAILED,
+ Execution.SUCCEEDED,
+ Execution.CANCELLED,
+ Execution.CANCELLING,
+ Execution.STARTED],
+ Execution.CANCELLING: [Execution.FAILED,
+ Execution.SUCCEEDED,
+ Execution.CANCELLED,
+ Execution.CANCELLING],
+ Execution.FAILED: [Execution.FAILED],
+ Execution.SUCCEEDED: [Execution.SUCCEEDED],
+ Execution.CANCELLED: [Execution.CANCELLED, Execution.PENDING]
+ }
+
+ invalid_transitions = {
+ Execution.PENDING: [Execution.FAILED,
+ Execution.SUCCEEDED,
+ Execution.CANCELLING],
+ Execution.STARTED: [Execution.PENDING],
+ Execution.CANCELLING: [Execution.PENDING,
+ Execution.STARTED],
+ Execution.FAILED: [Execution.STARTED,
+ Execution.SUCCEEDED,
+ Execution.CANCELLED,
+ Execution.CANCELLING],
+ Execution.SUCCEEDED: [Execution.PENDING,
+ Execution.STARTED,
+ Execution.FAILED,
+ Execution.CANCELLED,
+ Execution.CANCELLING],
+ Execution.CANCELLED: [Execution.STARTED,
+ Execution.FAILED,
+ Execution.SUCCEEDED,
+ Execution.CANCELLING],
+ }
+
+ for current_status, valid_transitioned_statues in valid_transitions.items():
+ for transitioned_status in valid_transitioned_statues:
+ execution = create_execution(current_status)
+ execution.status = transitioned_status
+
+ for current_status, invalid_transitioned_statues in invalid_transitions.items():
+ for transitioned_status in invalid_transitioned_statues:
+ execution = create_execution(current_status)
+ with pytest.raises(ValueError):
+ execution.status = transitioned_status
+
+
+class TestServiceUpdate(object):
+ @pytest.mark.parametrize(
+ 'is_valid, created_at, service_plan, service_update_nodes, '
+ 'service_update_service, service_update_node_templates, '
+ 'modified_entity_ids, state',
+ [
+ (False, m_cls, {}, {}, {}, [], {}, 'state'),
+ (False, now, m_cls, {}, {}, [], {}, 'state'),
+ (False, now, {}, m_cls, {}, [], {}, 'state'),
+ (False, now, {}, {}, m_cls, [], {}, 'state'),
+ (False, now, {}, {}, {}, m_cls, {}, 'state'),
+ (False, now, {}, {}, {}, [], m_cls, 'state'),
+ (False, now, {}, {}, {}, [], {}, m_cls),
+
+ (True, now, {}, {}, {}, [], {}, 'state'),
+ (True, now, {}, None, {}, [], {}, 'state'),
+ (True, now, {}, {}, None, [], {}, 'state'),
+ (True, now, {}, {}, {}, None, {}, 'state'),
+ (True, now, {}, {}, {}, [], None, 'state'),
+ (True, now, {}, {}, {}, [], {}, None),
+ ]
+ )
+ def test_service_update_model_creation(self, service_storage, is_valid, created_at,
+ service_plan, service_update_nodes,
+ service_update_service, service_update_node_templates,
+ modified_entity_ids, state):
+ service_update = _test_model(
+ is_valid=is_valid,
+ storage=service_storage,
+ model_cls=ServiceUpdate,
+ model_kwargs=dict(
+ service=service_storage.service.list()[0],
+ created_at=created_at,
+ service_plan=service_plan,
+ service_update_nodes=service_update_nodes,
+ service_update_service=service_update_service,
+ service_update_node_templates=service_update_node_templates,
+ modified_entity_ids=modified_entity_ids,
+ state=state
+ ))
+ if is_valid:
+ assert service_update.service == \
+ service_storage.service.list()[0]
+
+
+class TestServiceUpdateStep(object):
+
+ @pytest.mark.parametrize(
+ 'is_valid, action, entity_id, entity_type',
+ [
+ (False, m_cls, 'id', ServiceUpdateStep.ENTITY_TYPES.NODE),
+ (False, ServiceUpdateStep.ACTION_TYPES.ADD, m_cls,
+ ServiceUpdateStep.ENTITY_TYPES.NODE),
+ (False, ServiceUpdateStep.ACTION_TYPES.ADD, 'id', m_cls),
+
+ (True, ServiceUpdateStep.ACTION_TYPES.ADD, 'id',
+ ServiceUpdateStep.ENTITY_TYPES.NODE)
+ ]
+ )
+ def test_service_update_step_model_creation(self, service_update_storage, is_valid, action,
+ entity_id, entity_type):
+ service_update_step = _test_model(
+ is_valid=is_valid,
+ storage=service_update_storage,
+ model_cls=ServiceUpdateStep,
+ model_kwargs=dict(
+ service_update=
+ service_update_storage.service_update.list()[0],
+ action=action,
+ entity_id=entity_id,
+ entity_type=entity_type
+ ))
+ if is_valid:
+ assert service_update_step.service_update == \
+ service_update_storage.service_update.list()[0]
+
+ def test_service_update_step_order(self):
+ add_node = ServiceUpdateStep(
+ id='add_step',
+ action='add',
+ entity_type='node',
+ entity_id='node_id')
+
+ modify_node = ServiceUpdateStep(
+ id='modify_step',
+ action='modify',
+ entity_type='node',
+ entity_id='node_id')
+
+ remove_node = ServiceUpdateStep(
+ id='remove_step',
+ action='remove',
+ entity_type='node',
+ entity_id='node_id')
+
+ for step in (add_node, modify_node, remove_node):
+ assert hash((step.id, step.entity_id)) == hash(step)
+
+ assert remove_node < modify_node < add_node
+ assert not remove_node > modify_node > add_node
+
+ add_rel = ServiceUpdateStep(
+ id='add_step',
+ action='add',
+ entity_type='relationship',
+ entity_id='relationship_id')
+
+ remove_rel = ServiceUpdateStep(
+ id='remove_step',
+ action='remove',
+ entity_type='relationship',
+ entity_id='relationship_id')
+
+ assert remove_rel < remove_node < add_node < add_rel
+ assert not add_node < None
+
+
+class TestServiceModification(object):
+ @pytest.mark.parametrize(
+ 'is_valid, context, created_at, ended_at, modified_node_templates, nodes, status',
+ [
+ (False, m_cls, now, now, {}, {}, ServiceModification.STARTED),
+ (False, {}, m_cls, now, {}, {}, ServiceModification.STARTED),
+ (False, {}, now, m_cls, {}, {}, ServiceModification.STARTED),
+ (False, {}, now, now, m_cls, {}, ServiceModification.STARTED),
+ (False, {}, now, now, {}, m_cls, ServiceModification.STARTED),
+ (False, {}, now, now, {}, {}, m_cls),
+
+ (True, {}, now, now, {}, {}, ServiceModification.STARTED),
+ (True, {}, now, None, {}, {}, ServiceModification.STARTED),
+ (True, {}, now, now, None, {}, ServiceModification.STARTED),
+ (True, {}, now, now, {}, None, ServiceModification.STARTED),
+ ]
+ )
+ def test_service_modification_model_creation(self, service_storage, is_valid, context,
+ created_at, ended_at, modified_node_templates,
+ nodes, status):
+ service_modification = _test_model(
+ is_valid=is_valid,
+ storage=service_storage,
+ model_cls=ServiceModification,
+ model_kwargs=dict(
+ service=service_storage.service.list()[0],
+ context=context,
+ created_at=created_at,
+ ended_at=ended_at,
+ modified_node_templates=modified_node_templates,
+ nodes=nodes,
+ status=status,
+ ))
+ if is_valid:
+ assert service_modification.service == \
+ service_storage.service.list()[0]
+
+
+class TestNodeTemplate(object):
+ @pytest.mark.parametrize(
+ 'is_valid, name, properties',
+ [
+ (False, m_cls, {}),
+ (False, 'name', m_cls),
+
+ (True, 'name', {}),
+ ]
+ )
+ def test_node_template_model_creation(self, service_storage, is_valid, name, properties):
+ node_template = _test_model(
+ is_valid=is_valid,
+ storage=service_storage,
+ model_cls=NodeTemplate,
+ model_kwargs=dict(
+ name=name,
+ type=service_storage.type.list()[0],
+ properties=properties,
+ service_template=service_storage.service_template.list()[0]
+ ))
+ if is_valid:
+ assert node_template.service_template == \
+ service_storage.service_template.list()[0]
+
+
+class TestNode(object):
+ @pytest.mark.parametrize(
+ 'is_valid, name, state, version',
+ [
+ (False, m_cls, 'state', 1),
+ (False, 'name', 'state', 1),
+ (False, 'name', m_cls, 1),
+ (False, m_cls, 'state', m_cls),
+
+ (True, 'name', 'initial', 1),
+ (True, None, 'initial', 1),
+ (True, 'name', 'initial', 1),
+ (True, 'name', 'initial', None),
+ ]
+ )
+ def test_node_model_creation(self, node_template_storage, is_valid, name, state, version):
+ node = _test_model(
+ is_valid=is_valid,
+ storage=node_template_storage,
+ model_cls=Node,
+ model_kwargs=dict(
+ node_template=node_template_storage.node_template.list()[0],
+ type=node_template_storage.type.list()[0],
+ name=name,
+ state=state,
+ version=version,
+ service=node_template_storage.service.list()[0]
+ ))
+ if is_valid:
+ assert node.node_template == node_template_storage.node_template.list()[0]
+ assert node.service == \
+ node_template_storage.service.list()[0]
+
+
+class TestNodeHostAddress(object):
+
+ host_address = '1.1.1.1'
+
+ def test_host_address_on_none_hosted_node(self, service_storage):
+ node_template = self._node_template(service_storage, host_address='not considered')
+ node = self._node(service_storage,
+ node_template,
+ is_host=False,
+ host_address='not considered')
+ assert node.host_address is None
+
+ def test_property_host_address_on_host_node(self, service_storage):
+ node_template = self._node_template(service_storage, host_address=self.host_address)
+ node = self._node(service_storage, node_template, is_host=True, host_address=None)
+ assert node.host_address == self.host_address
+
+ def test_runtime_property_host_address_on_host_node(self, service_storage):
+ node_template = self._node_template(service_storage, host_address='not considered')
+ node = self._node(service_storage, node_template, is_host=True,
+ host_address=self.host_address)
+ assert node.host_address == self.host_address
+
+ def test_no_host_address_configured_on_host_node(self, service_storage):
+ node_template = self._node_template(service_storage, host_address=None)
+ node = self._node(service_storage, node_template, is_host=True, host_address=None)
+ assert node.host_address is None
+
+ def test_runtime_property_on_hosted_node(self, service_storage):
+ host_node_template = self._node_template(service_storage, host_address=None)
+ host_node = self._node(service_storage,
+ host_node_template,
+ is_host=True,
+ host_address=self.host_address)
+ node_template = self._node_template(service_storage, host_address=None)
+ node = self._node(service_storage,
+ node_template,
+ is_host=False,
+ host_address=None,
+ host_fk=host_node.id)
+ assert node.host_address == self.host_address
+
+ def _node_template(self, storage, host_address):
+ kwargs = dict(
+ name='node_template',
+ type=storage.type.list()[0],
+ service_template=storage.service_template.list()[0]
+ )
+ if host_address:
+ kwargs['properties'] = {'host_address': Property.wrap('host_address', host_address)}
+ node = NodeTemplate(**kwargs)
+ storage.node_template.put(node)
+ return node
+
+ def _node(self, storage, node_template, is_host, host_address, host_fk=None):
+ kwargs = dict(
+ name='node',
+ node_template=node_template,
+ type=storage.type.list()[0],
+ state='initial',
+ service=storage.service.list()[0]
+ )
+ if is_host and (host_address is None):
+ host_address = node_template.properties.get('host_address')
+ if host_address is not None:
+ host_address = host_address.value
+ if host_address:
+ kwargs.setdefault('attributes', {})['ip'] = Attribute.wrap('ip', host_address)
+ if is_host:
+ kwargs['host_fk'] = 1
+ elif host_fk:
+ kwargs['host_fk'] = host_fk
+ node = Node(**kwargs)
+ storage.node.put(node)
+ return node
+
+
+class TestRelationship(object):
+ @pytest.mark.parametrize(
+ 'is_valid, source_position, target_position',
+ [
+ (False, m_cls, 0),
+ (False, 0, m_cls),
+
+ (True, 0, 0),
+ (True, None, 0),
+ (True, 0, None),
+ ]
+ )
+ def test_relationship_model_creation(self, nodes_storage, is_valid, source_position,
+ target_position):
+ nodes = nodes_storage.node
+ source_node = nodes.get_by_name(mock.models.DEPENDENT_NODE_NAME)
+ target_node = nodes.get_by_name(mock.models.DEPENDENCY_NODE_NAME)
+ _test_model(is_valid=is_valid,
+ storage=nodes_storage,
+ model_cls=Relationship,
+ model_kwargs=dict(
+ source_node=source_node,
+ target_node=target_node,
+ source_position=source_position,
+ target_position=target_position
+ ))
+
+
+class TestPlugin(object):
+ @pytest.mark.parametrize(
+ 'is_valid, archive_name, distribution, distribution_release, '
+ 'distribution_version, package_name, package_source, '
+ 'package_version, supported_platform, supported_py_versions, uploaded_at, wheels',
+ [
+ (False, m_cls, 'dis_name', 'dis_rel', 'dis_ver', 'pak_name', 'pak_src', 'pak_ver',
+ 'sup_plat', [], now, []),
+ (False, 'arc_name', m_cls, 'dis_rel', 'dis_ver', 'pak_name', 'pak_src', 'pak_ver',
+ 'sup_plat', [], now, []),
+ (False, 'arc_name', 'dis_name', m_cls, 'dis_ver', 'pak_name', 'pak_src', 'pak_ver',
+ 'sup_plat', [], now, []),
+ (False, 'arc_name', 'dis_name', 'dis_rel', m_cls, 'pak_name', 'pak_src', 'pak_ver',
+ 'sup_plat', [], now, []),
+ (False, 'arc_name', 'dis_name', 'dis_rel', 'dis_ver', m_cls, 'pak_src', 'pak_ver',
+ 'sup_plat', [], now, []),
+ (False, 'arc_name', 'dis_name', 'dis_rel', 'dis_ver', 'pak_name', m_cls, 'pak_ver',
+ 'sup_plat', [], now, []),
+ (False, 'arc_name', 'dis_name', 'dis_rel', 'dis_ver', 'pak_name', 'pak_src', m_cls,
+ 'sup_plat', [], now, []),
+ (False, 'arc_name', 'dis_name', 'dis_rel', 'dis_ver', 'pak_name', 'pak_src',
+ 'pak_ver', m_cls, [], now, []),
+ (False, 'arc_name', 'dis_name', 'dis_rel', 'dis_ver', 'pak_name', 'pak_src',
+ 'pak_ver', 'sup_plat', m_cls, now, []),
+ (False, 'arc_name', 'dis_name', 'dis_rel', 'dis_ver', 'pak_name', 'pak_src',
+ 'pak_ver', 'sup_plat', [], m_cls, []),
+ (False, 'arc_name', 'dis_name', 'dis_rel', 'dis_ver', 'pak_name', 'pak_src',
+ 'pak_ver', 'sup_plat', [], now, m_cls),
+
+ (True, 'arc_name', 'dis_name', 'dis_rel', 'dis_ver', 'pak_name', 'pak_src', 'pak_ver',
+ 'sup_plat', [], now, []),
+ (True, 'arc_name', None, 'dis_rel', 'dis_ver', 'pak_name', 'pak_src', 'pak_ver',
+ 'sup_plat', [], now, []),
+ (True, 'arc_name', 'dis_name', None, 'dis_ver', 'pak_name', 'pak_src', 'pak_ver',
+ 'sup_plat', [], now, []),
+ (True, 'arc_name', 'dis_name', 'dis_rel', None, 'pak_name', 'pak_src', 'pak_ver',
+ 'sup_plat', [], now, []),
+ (True, 'arc_name', 'dis_name', 'dis_rel', 'dis_ver', 'pak_name', 'pak_src',
+ 'pak_ver', 'sup_plat', [], now, []),
+ (True, 'arc_name', 'dis_name', 'dis_rel', 'dis_ver', 'pak_name', None, 'pak_ver',
+ 'sup_plat', [], now, []),
+ (True, 'arc_name', 'dis_name', 'dis_rel', 'dis_ver', 'pak_name', 'pak_src', None,
+ 'sup_plat', [], now, []),
+ (True, 'arc_name', 'dis_name', 'dis_rel', 'dis_ver', 'pak_name', 'pak_src',
+ 'pak_ver', None, [], now, []),
+ (True, 'arc_name', 'dis_name', 'dis_rel', 'dis_ver', 'pak_name', 'pak_src',
+ 'pak_ver', 'sup_plat', None, now, []),
+ (True, 'arc_name', 'dis_name', 'dis_rel', 'dis_ver', 'pak_name', 'pak_src',
+ 'pak_ver', 'sup_plat', [], now, []),
+ ]
+ )
+ def test_plugin_model_creation(self, empty_storage, is_valid, archive_name, distribution,
+ distribution_release, distribution_version, package_name,
+ package_source, package_version, supported_platform,
+ supported_py_versions, uploaded_at, wheels):
+ _test_model(is_valid=is_valid,
+ storage=empty_storage,
+ model_cls=Plugin,
+ model_kwargs=dict(
+ archive_name=archive_name,
+ distribution=distribution,
+ distribution_release=distribution_release,
+ distribution_version=distribution_version,
+ package_name=package_name,
+ package_source=package_source,
+ package_version=package_version,
+ supported_platform=supported_platform,
+ supported_py_versions=supported_py_versions,
+ uploaded_at=uploaded_at,
+ wheels=wheels,
+ ))
+
+
+class TestTask(object):
+
+ @pytest.mark.parametrize(
+ 'is_valid, status, due_at, started_at, ended_at, max_attempts, attempts_count, '
+ 'retry_interval, ignore_failure, name, operation_mapping, arguments, plugin_id',
+ [
+ (False, m_cls, now, now, now, 1, 1, 1, True, 'name', 'map', {}, '1'),
+ (False, Task.STARTED, m_cls, now, now, 1, 1, 1, True, 'name', 'map', {}, '1'),
+ (False, Task.STARTED, now, m_cls, now, 1, 1, 1, True, 'name', 'map', {}, '1'),
+ (False, Task.STARTED, now, now, m_cls, 1, 1, 1, True, 'name', 'map', {}, '1'),
+ (False, Task.STARTED, now, now, now, m_cls, 1, 1, True, 'name', 'map', {}, '1'),
+ (False, Task.STARTED, now, now, now, 1, m_cls, 1, True, 'name', 'map', {}, '1'),
+ (False, Task.STARTED, now, now, now, 1, 1, m_cls, True, 'name', 'map', {}, '1'),
+ (False, Task.STARTED, now, now, now, 1, 1, 1, True, m_cls, 'map', {}, '1'),
+ (False, Task.STARTED, now, now, now, 1, 1, 1, True, 'name', m_cls, {}, '1'),
+ (False, Task.STARTED, now, now, now, 1, 1, 1, True, 'name', 'map', m_cls, '1'),
+ (False, Task.STARTED, now, now, now, 1, 1, 1, True, 'name', 'map', {}, m_cls),
+ (False, Task.STARTED, now, now, now, 1, 1, 1, True, 'name', 'map', None, '1'),
+
+ (True, Task.STARTED, now, now, now, 1, 1, 1, True, 'name', 'map', {}, '1'),
+ (True, Task.STARTED, None, now, now, 1, 1, 1, True, 'name', 'map', {}, '1'),
+ (True, Task.STARTED, now, None, now, 1, 1, 1, True, 'name', 'map', {}, '1'),
+ (True, Task.STARTED, now, now, None, 1, 1, 1, True, 'name', 'map', {}, '1'),
+ (True, Task.STARTED, now, now, now, 1, None, 1, True, 'name', 'map', {}, '1'),
+ (True, Task.STARTED, now, now, now, 1, 1, None, True, 'name', 'map', {}, '1'),
+ (True, Task.STARTED, now, now, now, 1, 1, 1, None, 'name', 'map', {}, '1'),
+ (True, Task.STARTED, now, now, now, 1, 1, 1, True, None, 'map', {}, '1'),
+ (True, Task.STARTED, now, now, now, 1, 1, 1, True, 'name', None, {}, '1'),
+ (True, Task.STARTED, now, now, now, 1, 1, 1, True, 'name', 'map', {}, None),
+ ]
+ )
+ def test_task_model_creation(self, execution_storage, is_valid, status, due_at, started_at,
+ ended_at, max_attempts, attempts_count, retry_interval,
+ ignore_failure, name, operation_mapping, arguments, plugin_id):
+ task = _test_model(
+ is_valid=is_valid,
+ storage=execution_storage,
+ model_cls=Task,
+ model_kwargs=dict(
+ status=status,
+ execution=execution_storage.execution.list()[0],
+ due_at=due_at,
+ started_at=started_at,
+ ended_at=ended_at,
+ max_attempts=max_attempts,
+ attempts_count=attempts_count,
+ retry_interval=retry_interval,
+ ignore_failure=ignore_failure,
+ name=name,
+ function=operation_mapping,
+ arguments=arguments,
+ plugin_fk=plugin_id,
+ ))
+ if is_valid:
+ assert task.execution == execution_storage.execution.list()[0]
+ if task.plugin:
+ assert task.plugin == execution_storage.plugin.list()[0]
+
+ def test_task_max_attempts_validation(self):
+ def create_task(max_attempts):
+ Task(execution_fk='eid',
+ name='name',
+ function='',
+ arguments={},
+ max_attempts=max_attempts)
+ create_task(max_attempts=1)
+ create_task(max_attempts=2)
+ create_task(max_attempts=Task.INFINITE_RETRIES)
+ with pytest.raises(ValueError):
+ create_task(max_attempts=0)
+ with pytest.raises(ValueError):
+ create_task(max_attempts=-2)
+
+
+class TestType(object):
+ def test_type_hierarchy(self):
+ super_type = Type(variant='variant', name='super')
+ sub_type = Type(variant='variant', parent=super_type, name='sub')
+ additional_type = Type(variant='variant', name='non_related')
+
+ assert super_type.hierarchy == [super_type]
+ assert sub_type.hierarchy == [sub_type, super_type]
+ assert additional_type.hierarchy == [additional_type]
+
+ super_type.parent = additional_type
+
+ assert super_type.hierarchy == [super_type, additional_type]
+ assert sub_type.hierarchy == [sub_type, super_type, additional_type]
+
+
+class TestParameter(object):
+
+ MODELS_DERIVED_FROM_PARAMETER = (Input, Output, Property, Attribute, Configuration, Argument)
+
+ @pytest.mark.parametrize(
+ 'is_valid, name, type_name, description',
+ [
+ (False, 'name', 'int', []),
+ (False, 'name', [], 'desc'),
+ (False, [], 'type_name', 'desc'),
+ (True, 'name', 'type_name', 'desc'),
+ ]
+ )
+ def test_derived_from_parameter_model_creation(self, empty_storage, is_valid, name, type_name,
+ description):
+
+ for model_cls in self.MODELS_DERIVED_FROM_PARAMETER:
+ _test_model(is_valid=is_valid,
+ storage=empty_storage,
+ model_cls=model_cls,
+ model_kwargs=dict(
+ name=name,
+ type_name=type_name,
+ description=description,
+ _value={})
+ )
+
+ def test_as_argument(self):
+
+ for model_cls in self.MODELS_DERIVED_FROM_PARAMETER:
+ model = model_cls(name='name',
+ type_name='type_name',
+ description='description',
+ _value={})
+ argument = model.as_argument()
+ assert isinstance(argument, Argument)
diff --git a/azure/aria/aria-extension-cloudify/src/aria/tests/orchestrator/__init__.py b/azure/aria/aria-extension-cloudify/src/aria/tests/orchestrator/__init__.py
new file mode 100644
index 0000000..ae1e83e
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/tests/orchestrator/__init__.py
@@ -0,0 +1,14 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/azure/aria/aria-extension-cloudify/src/aria/tests/orchestrator/context/__init__.py b/azure/aria/aria-extension-cloudify/src/aria/tests/orchestrator/context/__init__.py
new file mode 100644
index 0000000..780db07
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/tests/orchestrator/context/__init__.py
@@ -0,0 +1,32 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import sys
+
+from aria.orchestrator.workflows.core import engine, graph_compiler
+
+
+def op_path(func, module_path=None):
+ module_path = module_path or sys.modules[__name__].__name__
+ return '{0}.{1}'.format(module_path, func.__name__)
+
+
+def execute(workflow_func, workflow_context, executor):
+ graph = workflow_func(ctx=workflow_context)
+
+ graph_compiler.GraphCompiler(workflow_context, executor.__class__).compile(graph)
+ eng = engine.Engine(executors={executor.__class__: executor})
+
+ eng.execute(workflow_context)
diff --git a/azure/aria/aria-extension-cloudify/src/aria/tests/orchestrator/context/test_context_instrumentation.py b/azure/aria/aria-extension-cloudify/src/aria/tests/orchestrator/context/test_context_instrumentation.py
new file mode 100644
index 0000000..6cc8096
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/tests/orchestrator/context/test_context_instrumentation.py
@@ -0,0 +1,108 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import pytest
+
+from aria.modeling import models
+from aria.storage import collection_instrumentation
+from aria.orchestrator.context import operation
+
+from tests import (
+ mock,
+ storage
+)
+
+
+class TestContextInstrumentation(object):
+
+ @pytest.fixture
+ def workflow_ctx(self, tmpdir):
+ context = mock.context.simple(str(tmpdir), inmemory=True)
+ yield context
+ storage.release_sqlite_storage(context.model)
+
+ def test_workflow_context_instrumentation(self, workflow_ctx):
+ with workflow_ctx.model.instrument(models.Node.attributes):
+ self._run_common_assertions(workflow_ctx, True)
+ self._run_common_assertions(workflow_ctx, False)
+
+ def test_operation_context_instrumentation(self, workflow_ctx):
+ node = workflow_ctx.model.node.list()[0]
+ task = models.Task(node=node)
+ workflow_ctx.model.task.put(task)
+
+ ctx = operation.NodeOperationContext(
+ task.id, node.id, name='', service_id=workflow_ctx.model.service.list()[0].id,
+ model_storage=workflow_ctx.model, resource_storage=workflow_ctx.resource,
+ execution_id=1)
+
+ with ctx.model.instrument(models.Node.attributes):
+ self._run_op_assertions(ctx, True)
+ self._run_common_assertions(ctx, True)
+
+ self._run_op_assertions(ctx, False)
+ self._run_common_assertions(ctx, False)
+
+ @staticmethod
+ def ctx_assert(expr, is_under_ctx):
+ if is_under_ctx:
+ assert expr
+ else:
+ assert not expr
+
+ def _run_op_assertions(self, ctx, is_under_ctx):
+ self.ctx_assert(isinstance(ctx.node.attributes,
+ collection_instrumentation._InstrumentedDict), is_under_ctx)
+ assert not isinstance(ctx.node.properties,
+ collection_instrumentation._InstrumentedCollection)
+
+ for rel in ctx.node.inbound_relationships:
+ self.ctx_assert(
+ isinstance(rel, collection_instrumentation._WrappedModel), is_under_ctx)
+ self.ctx_assert(
+ isinstance(rel.source_node.attributes,
+ collection_instrumentation._InstrumentedDict),
+ is_under_ctx)
+ self.ctx_assert(
+ isinstance(rel.target_node.attributes,
+ collection_instrumentation._InstrumentedDict),
+ is_under_ctx)
+
+ def _run_common_assertions(self, ctx, is_under_ctx):
+
+ for node in ctx.model.node:
+ self.ctx_assert(
+ isinstance(node.attributes, collection_instrumentation._InstrumentedDict),
+ is_under_ctx)
+ assert not isinstance(node.properties,
+ collection_instrumentation._InstrumentedCollection)
+
+ for rel in ctx.model.relationship:
+ self.ctx_assert(
+ isinstance(rel, collection_instrumentation._WrappedModel), is_under_ctx)
+
+ self.ctx_assert(
+ isinstance(rel.source_node.attributes,
+ collection_instrumentation._InstrumentedDict),
+ is_under_ctx)
+ self.ctx_assert(
+ isinstance(rel.target_node.attributes,
+ collection_instrumentation._InstrumentedDict),
+ is_under_ctx)
+
+ assert not isinstance(rel.source_node.properties,
+ collection_instrumentation._InstrumentedCollection)
+ assert not isinstance(rel.target_node.properties,
+ collection_instrumentation._InstrumentedCollection)
diff --git a/azure/aria/aria-extension-cloudify/src/aria/tests/orchestrator/context/test_operation.py b/azure/aria/aria-extension-cloudify/src/aria/tests/orchestrator/context/test_operation.py
new file mode 100644
index 0000000..111e121
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/tests/orchestrator/context/test_operation.py
@@ -0,0 +1,498 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import time
+
+import pytest
+
+from aria.orchestrator.workflows.executor import process, thread
+
+from aria import (
+ workflow,
+ operation,
+)
+from aria.orchestrator import context
+from aria.orchestrator.workflows import api
+
+import tests
+from tests import (
+ mock,
+ storage,
+ helpers
+)
+from . import (
+ op_path,
+ execute,
+)
+
+
+@pytest.fixture
+def ctx(tmpdir):
+ context = mock.context.simple(
+ str(tmpdir),
+ context_kwargs=dict(workdir=str(tmpdir.join('workdir')))
+ )
+ yield context
+ storage.release_sqlite_storage(context.model)
+
+
+@pytest.fixture
+def thread_executor():
+ result = thread.ThreadExecutor()
+ try:
+ yield result
+ finally:
+ result.close()
+
+
+@pytest.fixture
+def dataholder(tmpdir):
+ dataholder_path = str(tmpdir.join('dataholder'))
+ holder = helpers.FilesystemDataHolder(dataholder_path)
+ return holder
+
+
+def test_node_operation_task_execution(ctx, thread_executor, dataholder):
+ interface_name = 'Standard'
+ operation_name = 'create'
+
+ arguments = {'putput': True, 'holder_path': dataholder.path}
+ node = ctx.model.node.get_by_name(mock.models.DEPENDENCY_NODE_NAME)
+ interface = mock.models.create_interface(
+ node.service,
+ interface_name,
+ operation_name,
+ operation_kwargs=dict(function=op_path(basic_node_operation, module_path=__name__),
+ arguments=arguments)
+ )
+ node.interfaces[interface.name] = interface
+ ctx.model.node.update(node)
+
+ @workflow
+ def basic_workflow(graph, **_):
+ graph.add_tasks(
+ api.task.OperationTask(
+ node,
+ interface_name=interface_name,
+ operation_name=operation_name,
+ arguments=arguments
+ )
+ )
+
+ execute(workflow_func=basic_workflow, workflow_context=ctx, executor=thread_executor)
+
+ assert dataholder['ctx_name'] == context.operation.NodeOperationContext.__name__
+
+ # Task bases assertions
+ assert dataholder['actor_name'] == node.name
+ assert dataholder['task_name'] == api.task.OperationTask.NAME_FORMAT.format(
+ type='node',
+ name=node.name,
+ interface=interface_name,
+ operation=operation_name
+ )
+ operations = interface.operations
+ assert len(operations) == 1
+ assert dataholder['function'] == operations.values()[0].function # pylint: disable=no-member
+ assert dataholder['arguments']['putput'] is True
+
+ # Context based attributes (sugaring)
+ assert dataholder['template_name'] == node.node_template.name
+ assert dataholder['node_name'] == node.name
+
+
+def test_relationship_operation_task_execution(ctx, thread_executor, dataholder):
+ interface_name = 'Configure'
+ operation_name = 'post_configure'
+
+ arguments = {'putput': True, 'holder_path': dataholder.path}
+ relationship = ctx.model.relationship.list()[0]
+ interface = mock.models.create_interface(
+ relationship.source_node.service,
+ interface_name,
+ operation_name,
+ operation_kwargs=dict(function=op_path(basic_relationship_operation, module_path=__name__),
+ arguments=arguments),
+ )
+
+ relationship.interfaces[interface.name] = interface
+ ctx.model.relationship.update(relationship)
+
+ @workflow
+ def basic_workflow(graph, **_):
+ graph.add_tasks(
+ api.task.OperationTask(
+ relationship,
+ interface_name=interface_name,
+ operation_name=operation_name,
+ arguments=arguments
+ )
+ )
+
+ execute(workflow_func=basic_workflow, workflow_context=ctx, executor=thread_executor)
+
+ assert dataholder['ctx_name'] == context.operation.RelationshipOperationContext.__name__
+
+ # Task bases assertions
+ assert dataholder['actor_name'] == relationship.name
+ assert interface_name in dataholder['task_name']
+ operations = interface.operations
+ assert dataholder['function'] == operations.values()[0].function # pylint: disable=no-member
+ assert dataholder['arguments']['putput'] is True
+
+ # Context based attributes (sugaring)
+ dependency_node_template = ctx.model.node_template.get_by_name(
+ mock.models.DEPENDENCY_NODE_TEMPLATE_NAME)
+ dependency_node = ctx.model.node.get_by_name(mock.models.DEPENDENCY_NODE_NAME)
+ dependent_node_template = ctx.model.node_template.get_by_name(
+ mock.models.DEPENDENT_NODE_TEMPLATE_NAME)
+ dependent_node = ctx.model.node.get_by_name(mock.models.DEPENDENT_NODE_NAME)
+
+ assert dataholder['target_node_template_name'] == dependency_node_template.name
+ assert dataholder['target_node_name'] == dependency_node.name
+ assert dataholder['relationship_name'] == relationship.name
+ assert dataholder['source_node_template_name'] == dependent_node_template.name
+ assert dataholder['source_node_name'] == dependent_node.name
+
+
+def test_invalid_task_operation_id(ctx, thread_executor, dataholder):
+ """
+ Checks that the right id is used. The task created with id == 1, thus running the task on
+ node with id == 2. will check that indeed the node uses the correct id.
+ :param ctx:
+ :param thread_executor:
+ :return:
+ """
+ interface_name = 'Standard'
+ operation_name = 'create'
+
+ other_node, node = ctx.model.node.list()
+ assert other_node.id == 1
+ assert node.id == 2
+
+ interface = mock.models.create_interface(
+ node.service,
+ interface_name=interface_name,
+ operation_name=operation_name,
+ operation_kwargs=dict(function=op_path(get_node_id, module_path=__name__),
+ arguments={'holder_path': dataholder.path})
+ )
+ node.interfaces[interface.name] = interface
+ ctx.model.node.update(node)
+
+ @workflow
+ def basic_workflow(graph, **_):
+ graph.add_tasks(
+ api.task.OperationTask(
+ node,
+ interface_name=interface_name,
+ operation_name=operation_name,
+ )
+ )
+
+ execute(workflow_func=basic_workflow, workflow_context=ctx, executor=thread_executor)
+
+ op_node_id = dataholder[api.task.OperationTask.NAME_FORMAT.format(
+ type='node',
+ name=node.name,
+ interface=interface_name,
+ operation=operation_name
+ )]
+ assert op_node_id == node.id
+ assert op_node_id != other_node.id
+
+
+def test_plugin_workdir(ctx, thread_executor, tmpdir):
+ interface_name = 'Standard'
+ operation_name = 'create'
+
+ plugin = mock.models.create_plugin()
+ ctx.model.plugin.put(plugin)
+ node = ctx.model.node.get_by_name(mock.models.DEPENDENCY_NODE_NAME)
+ filename = 'test_file'
+ content = 'file content'
+ arguments = {'filename': filename, 'content': content}
+ interface = mock.models.create_interface(
+ node.service,
+ interface_name,
+ operation_name,
+ operation_kwargs=dict(
+ function='{0}.{1}'.format(__name__, _test_plugin_workdir.__name__),
+ plugin=plugin,
+ arguments=arguments)
+ )
+ node.interfaces[interface.name] = interface
+ ctx.model.node.update(node)
+
+ @workflow
+ def basic_workflow(graph, **_):
+ graph.add_tasks(api.task.OperationTask(
+ node,
+ interface_name=interface_name,
+ operation_name=operation_name,
+ arguments=arguments))
+
+ execute(workflow_func=basic_workflow, workflow_context=ctx, executor=thread_executor)
+ expected_file = tmpdir.join('workdir', 'plugins', str(ctx.service.id),
+ plugin.name,
+ filename)
+ assert expected_file.read() == content
+
+
+@pytest.fixture(params=[
+ (thread.ThreadExecutor, {}),
+ (process.ProcessExecutor, {'python_path': [tests.ROOT_DIR]}),
+])
+def executor(request):
+ ex_cls, kwargs = request.param
+ ex = ex_cls(**kwargs)
+ try:
+ yield ex
+ finally:
+ ex.close()
+
+
+def test_node_operation_logging(ctx, executor):
+ interface_name, operation_name = mock.operations.NODE_OPERATIONS_INSTALL[0]
+
+ node = ctx.model.node.get_by_name(mock.models.DEPENDENCY_NODE_NAME)
+
+ arguments = {
+ 'op_start': 'op_start',
+ 'op_end': 'op_end',
+ }
+ interface = mock.models.create_interface(
+ node.service,
+ interface_name,
+ operation_name,
+ operation_kwargs=dict(
+ function=op_path(logged_operation, module_path=__name__),
+ arguments=arguments)
+ )
+ node.interfaces[interface.name] = interface
+ ctx.model.node.update(node)
+
+ @workflow
+ def basic_workflow(graph, **_):
+ graph.add_tasks(
+ api.task.OperationTask(
+ node,
+ interface_name=interface_name,
+ operation_name=operation_name,
+ arguments=arguments
+ )
+ )
+ execute(workflow_func=basic_workflow, workflow_context=ctx, executor=executor)
+ _assert_loggins(ctx, arguments)
+
+
+def test_relationship_operation_logging(ctx, executor):
+ interface_name, operation_name = mock.operations.RELATIONSHIP_OPERATIONS_INSTALL[0]
+
+ relationship = ctx.model.relationship.list()[0]
+ arguments = {
+ 'op_start': 'op_start',
+ 'op_end': 'op_end',
+ }
+ interface = mock.models.create_interface(
+ relationship.source_node.service,
+ interface_name,
+ operation_name,
+ operation_kwargs=dict(function=op_path(logged_operation, module_path=__name__),
+ arguments=arguments)
+ )
+ relationship.interfaces[interface.name] = interface
+ ctx.model.relationship.update(relationship)
+
+ @workflow
+ def basic_workflow(graph, **_):
+ graph.add_tasks(
+ api.task.OperationTask(
+ relationship,
+ interface_name=interface_name,
+ operation_name=operation_name,
+ arguments=arguments
+ )
+ )
+
+ execute(workflow_func=basic_workflow, workflow_context=ctx, executor=executor)
+ _assert_loggins(ctx, arguments)
+
+
+def test_attribute_consumption(ctx, executor, dataholder):
+ # region Updating node operation
+ node_int_name, node_op_name = mock.operations.NODE_OPERATIONS_INSTALL[0]
+
+ source_node = ctx.model.node.get_by_name(mock.models.DEPENDENT_NODE_NAME)
+
+ arguments = {'dict_': {'key': 'value'},
+ 'set_test_dict': {'key2': 'value2'}}
+ interface = mock.models.create_interface(
+ source_node.service,
+ node_int_name,
+ node_op_name,
+ operation_kwargs=dict(
+ function=op_path(attribute_altering_operation, module_path=__name__),
+ arguments=arguments)
+ )
+ source_node.interfaces[interface.name] = interface
+ ctx.model.node.update(source_node)
+ # endregion
+
+ # region updating relationship operation
+ rel_int_name, rel_op_name = mock.operations.RELATIONSHIP_OPERATIONS_INSTALL[2]
+
+ relationship = ctx.model.relationship.list()[0]
+ interface = mock.models.create_interface(
+ relationship.source_node.service,
+ rel_int_name,
+ rel_op_name,
+ operation_kwargs=dict(
+ function=op_path(attribute_consuming_operation, module_path=__name__),
+ arguments={'holder_path': dataholder.path}
+ )
+ )
+ relationship.interfaces[interface.name] = interface
+ ctx.model.relationship.update(relationship)
+ # endregion
+
+ @workflow
+ def basic_workflow(graph, **_):
+ graph.sequence(
+ api.task.OperationTask(
+ source_node,
+ interface_name=node_int_name,
+ operation_name=node_op_name,
+ arguments=arguments
+ ),
+ api.task.OperationTask(
+ relationship,
+ interface_name=rel_int_name,
+ operation_name=rel_op_name,
+ )
+ )
+
+ execute(workflow_func=basic_workflow, workflow_context=ctx, executor=executor)
+ target_node = ctx.model.node.get_by_name(mock.models.DEPENDENCY_NODE_NAME)
+
+ assert len(source_node.attributes) == len(target_node.attributes) == 2
+ assert source_node.attributes['key'] != target_node.attributes['key']
+ assert source_node.attributes['key'].value == \
+ target_node.attributes['key'].value == \
+ dataholder['key'] == 'value'
+
+ assert source_node.attributes['key2'] != target_node.attributes['key2']
+ assert source_node.attributes['key2'].value == \
+ target_node.attributes['key2'].value == \
+ dataholder['key2'] == 'value2'
+
+
+def _assert_loggins(ctx, arguments):
+ # The logs should contain the following: Workflow Start, Operation Start, custom operation
+ # log string (op_start), custom operation log string (op_end), Operation End, Workflow End.
+
+ executions = ctx.model.execution.list()
+ assert len(executions) == 1
+ execution = executions[0]
+
+ tasks = ctx.model.task.list(filters={'_stub_type': None})
+ assert len(tasks) == 1
+ task = tasks[0]
+ assert len(task.logs) == 4
+
+ logs = ctx.model.log.list()
+ assert len(logs) == len(execution.logs) == 6
+ assert set(logs) == set(execution.logs)
+
+ assert all(l.execution == execution for l in logs)
+ assert all(l in logs and l.task == task for l in task.logs)
+
+ op_start_log = [l for l in logs if arguments['op_start'] in l.msg and l.level.lower() == 'info']
+ assert len(op_start_log) == 1
+ op_start_log = op_start_log[0]
+
+ op_end_log = [l for l in logs if arguments['op_end'] in l.msg and l.level.lower() == 'debug']
+ assert len(op_end_log) == 1
+ op_end_log = op_end_log[0]
+
+ assert op_start_log.created_at < op_end_log.created_at
+
+
+@operation
+def logged_operation(ctx, **_):
+ ctx.logger.info(ctx.task.arguments['op_start'].value)
+ # enables to check the relation between the created_at field properly
+ time.sleep(1)
+ ctx.logger.debug(ctx.task.arguments['op_end'].value)
+
+
+@operation
+def basic_node_operation(ctx, holder_path, **_):
+ holder = helpers.FilesystemDataHolder(holder_path)
+
+ operation_common(ctx, holder)
+ holder['template_name'] = ctx.node_template.name
+ holder['node_name'] = ctx.node.name
+
+
+@operation
+def basic_relationship_operation(ctx, holder_path, **_):
+ holder = helpers.FilesystemDataHolder(holder_path)
+
+ operation_common(ctx, holder)
+ holder['target_node_template_name'] = ctx.target_node_template.name
+ holder['target_node_name'] = ctx.target_node.name
+ holder['relationship_name'] = ctx.relationship.name
+ holder['source_node_template_name'] = ctx.source_node_template.name
+ holder['source_node_name'] = ctx.source_node.name
+
+
+def operation_common(ctx, holder):
+ holder['ctx_name'] = ctx.__class__.__name__
+
+ holder['actor_name'] = ctx.task.actor.name
+ holder['task_name'] = ctx.task.name
+ holder['function'] = ctx.task.function
+ holder['arguments'] = dict(i.unwrapped for i in ctx.task.arguments.itervalues())
+
+
+@operation
+def get_node_id(ctx, holder_path, **_):
+ helpers.FilesystemDataHolder(holder_path)[ctx.name] = ctx.node.id
+
+
+@operation
+def _test_plugin_workdir(ctx, filename, content):
+ with open(os.path.join(ctx.plugin_workdir, filename), 'w') as f:
+ f.write(content)
+
+
+@operation
+def attribute_altering_operation(ctx, dict_, set_test_dict, **_):
+ ctx.node.attributes.update(dict_)
+
+ for key, value in set_test_dict.items():
+ ctx.node.attributes[key] = value
+
+
+@operation
+def attribute_consuming_operation(ctx, holder_path, **_):
+ holder = helpers.FilesystemDataHolder(holder_path)
+ ctx.target_node.attributes.update(ctx.source_node.attributes)
+ holder.update(**ctx.target_node.attributes)
+
+ ctx.target_node.attributes['key2'] = ctx.source_node.attributes['key2']
+ holder['key2'] = ctx.target_node.attributes['key2']
diff --git a/azure/aria/aria-extension-cloudify/src/aria/tests/orchestrator/context/test_resource_render.py b/azure/aria/aria-extension-cloudify/src/aria/tests/orchestrator/context/test_resource_render.py
new file mode 100644
index 0000000..8249086
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/tests/orchestrator/context/test_resource_render.py
@@ -0,0 +1,72 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import pytest
+
+from tests import mock, storage
+
+_IMPLICIT_CTX_TEMPLATE = '{{ctx.service.name}}'
+_IMPLICIT_CTX_TEMPLATE_PATH = 'implicit-ctx.template'
+_VARIABLES_TEMPLATE = '{{variable}}'
+_VARIABLES_TEMPLATE_PATH = 'variables.template'
+
+
+def test_get_resource_and_render_implicit_ctx_no_variables(ctx):
+ content = ctx.get_resource_and_render(_IMPLICIT_CTX_TEMPLATE_PATH)
+ assert content == mock.models.SERVICE_NAME
+
+
+def test_get_resource_and_render_provided_variables(ctx):
+ variable = 'VARIABLE'
+ content = ctx.get_resource_and_render(_VARIABLES_TEMPLATE_PATH,
+ variables={'variable': variable})
+ assert content == variable
+
+
+def test_download_resource_and_render_implicit_ctx_no_variables(tmpdir, ctx):
+ destination = tmpdir.join('destination')
+ ctx.download_resource_and_render(destination=str(destination),
+ path=_IMPLICIT_CTX_TEMPLATE_PATH)
+ assert destination.read() == mock.models.SERVICE_NAME
+
+
+def test_download_resource_and_render_provided_variables(tmpdir, ctx):
+ destination = tmpdir.join('destination')
+ variable = 'VARIABLE'
+ ctx.download_resource_and_render(destination=str(destination),
+ path=_VARIABLES_TEMPLATE_PATH,
+ variables={'variable': variable})
+ assert destination.read() == variable
+
+
+@pytest.fixture
+def ctx(tmpdir):
+ context = mock.context.simple(str(tmpdir))
+ yield context
+ storage.release_sqlite_storage(context.model)
+
+
+@pytest.fixture(autouse=True)
+def resources(tmpdir, ctx):
+ implicit_ctx_template_path = tmpdir.join(_IMPLICIT_CTX_TEMPLATE_PATH)
+ implicit_ctx_template_path.write(_IMPLICIT_CTX_TEMPLATE)
+ variables_template_path = tmpdir.join(_VARIABLES_TEMPLATE_PATH)
+ variables_template_path.write(_VARIABLES_TEMPLATE)
+ ctx.resource.service.upload(entry_id='1',
+ source=str(implicit_ctx_template_path),
+ path=_IMPLICIT_CTX_TEMPLATE_PATH)
+ ctx.resource.service.upload(entry_id='1',
+ source=str(variables_template_path),
+ path=_VARIABLES_TEMPLATE_PATH)
diff --git a/azure/aria/aria-extension-cloudify/src/aria/tests/orchestrator/context/test_serialize.py b/azure/aria/aria-extension-cloudify/src/aria/tests/orchestrator/context/test_serialize.py
new file mode 100644
index 0000000..091e23c
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/tests/orchestrator/context/test_serialize.py
@@ -0,0 +1,104 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import pytest
+
+from aria.orchestrator.workflows import api
+from aria.orchestrator.workflows.core import engine, graph_compiler
+from aria.orchestrator.workflows.executor import process
+from aria.orchestrator import workflow, operation
+import tests
+from tests import mock
+from tests import storage
+
+TEST_FILE_CONTENT = 'CONTENT'
+TEST_FILE_ENTRY_ID = 'entry'
+TEST_FILE_NAME = 'test_file'
+
+
+def test_serialize_operation_context(context, executor, tmpdir):
+ test_file = tmpdir.join(TEST_FILE_NAME)
+ test_file.write(TEST_FILE_CONTENT)
+ resource = context.resource
+ resource.service_template.upload(TEST_FILE_ENTRY_ID, str(test_file))
+
+ node = context.model.node.get_by_name(mock.models.DEPENDENCY_NODE_NAME)
+ plugin = mock.models.create_plugin()
+ context.model.plugin.put(plugin)
+ interface = mock.models.create_interface(
+ node.service,
+ 'test',
+ 'op',
+ operation_kwargs=dict(function=_operation_mapping(),
+ plugin=plugin)
+ )
+ node.interfaces[interface.name] = interface
+ context.model.node.update(node)
+
+ graph = _mock_workflow(ctx=context) # pylint: disable=no-value-for-parameter
+ graph_compiler.GraphCompiler(context, executor.__class__).compile(graph)
+ eng = engine.Engine({executor.__class__: executor})
+ eng.execute(context)
+
+
+@workflow
+def _mock_workflow(ctx, graph):
+ node = ctx.model.node.get_by_name(mock.models.DEPENDENCY_NODE_NAME)
+ graph.add_tasks(api.task.OperationTask(node, interface_name='test', operation_name='op'))
+ return graph
+
+
+@operation
+def _mock_operation(ctx):
+ # We test several things in this operation
+ # ctx.task, ctx.node, etc... tell us that the model storage was properly re-created
+ # a correct ctx.task.function tells us we kept the correct task_id
+ assert ctx.task.function == _operation_mapping()
+ # a correct ctx.node.name tells us we kept the correct actor_id
+ assert ctx.node.name == mock.models.DEPENDENCY_NODE_NAME
+ # a correct ctx.name tells us we kept the correct name
+ assert ctx.name is not None
+ assert ctx.name == ctx.task.name
+ # a correct ctx.deployment.name tells us we kept the correct deployment_id
+ assert ctx.service.name == mock.models.SERVICE_NAME
+ # Here we test that the resource storage was properly re-created
+ test_file_content = ctx.resource.service_template.read(TEST_FILE_ENTRY_ID, TEST_FILE_NAME)
+ assert test_file_content == TEST_FILE_CONTENT
+ # a non empty plugin workdir tells us that we kept the correct base_workdir
+ assert ctx.plugin_workdir is not None
+
+
+def _operation_mapping():
+ return '{name}.{func.__name__}'.format(name=__name__, func=_mock_operation)
+
+
+@pytest.fixture
+def executor():
+ result = process.ProcessExecutor(python_path=[tests.ROOT_DIR])
+ try:
+ yield result
+ finally:
+ result.close()
+
+
+@pytest.fixture
+def context(tmpdir):
+ result = mock.context.simple(
+ str(tmpdir),
+ context_kwargs=dict(workdir=str(tmpdir.join('workdir')))
+ )
+
+ yield result
+ storage.release_sqlite_storage(result.model)
diff --git a/azure/aria/aria-extension-cloudify/src/aria/tests/orchestrator/context/test_toolbelt.py b/azure/aria/aria-extension-cloudify/src/aria/tests/orchestrator/context/test_toolbelt.py
new file mode 100644
index 0000000..4de9e55
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/tests/orchestrator/context/test_toolbelt.py
@@ -0,0 +1,164 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import pytest
+
+from aria import workflow, operation
+from aria.modeling import models
+from aria.orchestrator import context
+from aria.orchestrator.workflows import api
+from aria.orchestrator.workflows.executor import thread
+
+from tests import (
+ mock,
+ storage,
+ helpers
+)
+from . import (
+ op_path,
+ execute,
+)
+
+
+@pytest.fixture
+def workflow_context(tmpdir):
+ context = mock.context.simple(str(tmpdir))
+ yield context
+ storage.release_sqlite_storage(context.model)
+
+
+@pytest.fixture
+def executor():
+ result = thread.ThreadExecutor()
+ try:
+ yield result
+ finally:
+ result.close()
+
+
+@pytest.fixture
+def dataholder(tmpdir):
+ dataholder_path = str(tmpdir.join('dataholder'))
+ holder = helpers.FilesystemDataHolder(dataholder_path)
+ return holder
+
+
+def _get_elements(workflow_context):
+ dependency_node_template = workflow_context.model.node_template.get_by_name(
+ mock.models.DEPENDENCY_NODE_TEMPLATE_NAME)
+ dependency_node_template.host = dependency_node_template
+ workflow_context.model.node.update(dependency_node_template)
+
+ dependency_node = workflow_context.model.node.get_by_name(
+ mock.models.DEPENDENCY_NODE_NAME)
+ dependency_node.host_fk = dependency_node.id
+ workflow_context.model.node.update(dependency_node)
+
+ dependent_node_template = workflow_context.model.node_template.get_by_name(
+ mock.models.DEPENDENT_NODE_TEMPLATE_NAME)
+ dependent_node_template.host = dependency_node_template
+ workflow_context.model.node_template.update(dependent_node_template)
+
+ dependent_node = workflow_context.model.node.get_by_name(
+ mock.models.DEPENDENT_NODE_NAME)
+ dependent_node.host = dependent_node
+ workflow_context.model.node.update(dependent_node)
+
+ relationship = workflow_context.model.relationship.list()[0]
+ return dependency_node_template, dependency_node, dependent_node_template, dependent_node, \
+ relationship
+
+
+def test_host_ip(workflow_context, executor, dataholder):
+
+ interface_name = 'Standard'
+ operation_name = 'create'
+ _, dependency_node, _, _, _ = _get_elements(workflow_context)
+ arguments = {'putput': True, 'holder_path': dataholder.path}
+ interface = mock.models.create_interface(
+ dependency_node.service,
+ interface_name=interface_name,
+ operation_name=operation_name,
+ operation_kwargs=dict(function=op_path(host_ip, module_path=__name__), arguments=arguments)
+ )
+ dependency_node.interfaces[interface.name] = interface
+ dependency_node.attributes['ip'] = models.Attribute.wrap('ip', '1.1.1.1')
+
+ workflow_context.model.node.update(dependency_node)
+
+ @workflow
+ def basic_workflow(graph, **_):
+ graph.add_tasks(
+ api.task.OperationTask(
+ dependency_node,
+ interface_name=interface_name,
+ operation_name=operation_name,
+ arguments=arguments
+ )
+ )
+
+ execute(workflow_func=basic_workflow, workflow_context=workflow_context, executor=executor)
+
+ assert dataholder.get('host_ip') == dependency_node.attributes.get('ip').value
+
+
+def test_relationship_tool_belt(workflow_context, executor, dataholder):
+ interface_name = 'Configure'
+ operation_name = 'post_configure'
+ _, _, _, _, relationship = _get_elements(workflow_context)
+ arguments = {'putput': True, 'holder_path': dataholder.path}
+ interface = mock.models.create_interface(
+ relationship.source_node.service,
+ interface_name=interface_name,
+ operation_name=operation_name,
+ operation_kwargs=dict(function=op_path(relationship_operation, module_path=__name__),
+ arguments=arguments)
+ )
+ relationship.interfaces[interface.name] = interface
+ workflow_context.model.relationship.update(relationship)
+
+ @workflow
+ def basic_workflow(graph, **_):
+ graph.add_tasks(
+ api.task.OperationTask(
+ relationship,
+ interface_name=interface_name,
+ operation_name=operation_name,
+ arguments=arguments
+ )
+ )
+
+ execute(workflow_func=basic_workflow, workflow_context=workflow_context, executor=executor)
+
+ assert dataholder.get(api.task.OperationTask.NAME_FORMAT.format(
+ type='relationship',
+ name=relationship.name,
+ interface=interface_name,
+ operation=operation_name)) == relationship.source_node.name
+
+
+def test_wrong_model_toolbelt():
+ with pytest.raises(RuntimeError):
+ context.toolbelt(None)
+
+
+@operation(toolbelt=True)
+def host_ip(toolbelt, holder_path, **_):
+ helpers.FilesystemDataHolder(holder_path)['host_ip'] = toolbelt.host_ip
+
+
+@operation(toolbelt=True)
+def relationship_operation(ctx, toolbelt, holder_path, **_):
+ helpers.FilesystemDataHolder(holder_path)[ctx.name] = toolbelt._op_context.source_node.name
diff --git a/azure/aria/aria-extension-cloudify/src/aria/tests/orchestrator/context/test_workflow.py b/azure/aria/aria-extension-cloudify/src/aria/tests/orchestrator/context/test_workflow.py
new file mode 100644
index 0000000..6d53c2a
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/tests/orchestrator/context/test_workflow.py
@@ -0,0 +1,126 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from datetime import datetime
+
+import pytest
+
+from aria import application_model_storage, workflow
+from aria.orchestrator import context
+from aria.storage import sql_mapi
+from aria.orchestrator.workflows.executor import thread, process
+
+from tests import storage as test_storage, ROOT_DIR
+from ... import mock
+from . import execute
+
+
+class TestWorkflowContext(object):
+
+ def test_execution_creation_on_workflow_context_creation(self, storage):
+ ctx = self._create_ctx(storage)
+ execution = storage.execution.get(ctx.execution.id) # pylint: disable=no-member
+ assert execution.service == storage.service.get_by_name(
+ mock.models.SERVICE_NAME)
+ assert execution.workflow_name == mock.models.WORKFLOW_NAME
+ assert execution.service_template == storage.service_template.get_by_name(
+ mock.models.SERVICE_TEMPLATE_NAME)
+ assert execution.status == storage.execution.model_cls.PENDING
+ assert execution.inputs == {}
+ assert execution.created_at <= datetime.utcnow()
+
+ def test_subsequent_workflow_context_creation_do_not_fail(self, storage):
+ self._create_ctx(storage)
+ self._create_ctx(storage)
+
+ @staticmethod
+ def _create_ctx(storage):
+ """
+
+ :param storage:
+ :return WorkflowContext:
+ """
+ service = storage.service.get_by_name(mock.models.SERVICE_NAME)
+ return context.workflow.WorkflowContext(
+ name='simple_context',
+ model_storage=storage,
+ resource_storage=None,
+ service_id=service,
+ execution_id=storage.execution.list(filters=dict(service=service))[0].id,
+ workflow_name=mock.models.WORKFLOW_NAME,
+ task_max_attempts=mock.models.TASK_MAX_ATTEMPTS,
+ task_retry_interval=mock.models.TASK_RETRY_INTERVAL
+ )
+
+ @pytest.fixture
+ def storage(self):
+ workflow_storage = application_model_storage(
+ sql_mapi.SQLAlchemyModelAPI, initiator=test_storage.init_inmemory_model_storage)
+ workflow_storage.service_template.put(mock.models.create_service_template())
+ service_template = workflow_storage.service_template.get_by_name(
+ mock.models.SERVICE_TEMPLATE_NAME)
+ service = mock.models.create_service(service_template)
+ workflow_storage.service.put(service)
+ workflow_storage.execution.put(mock.models.create_execution(service))
+ yield workflow_storage
+ test_storage.release_sqlite_storage(workflow_storage)
+
+
+@pytest.fixture
+def ctx(tmpdir):
+ context = mock.context.simple(
+ str(tmpdir),
+ context_kwargs=dict(workdir=str(tmpdir.join('workdir')))
+ )
+ yield context
+ test_storage.release_sqlite_storage(context.model)
+
+
+@pytest.fixture(params=[
+ (thread.ThreadExecutor, {}),
+ (process.ProcessExecutor, {'python_path': [ROOT_DIR]}),
+])
+def executor(request):
+ executor_cls, executor_kwargs = request.param
+ result = executor_cls(**executor_kwargs)
+ try:
+ yield result
+ finally:
+ result.close()
+
+
+def test_attribute_consumption(ctx, executor):
+
+ node = ctx.model.node.get_by_name(mock.models.DEPENDENT_NODE_NAME)
+ node.attributes['key'] = ctx.model.attribute.model_cls.wrap('key', 'value')
+ node.attributes['key2'] = ctx.model.attribute.model_cls.wrap('key2', 'value_to_change')
+ ctx.model.node.update(node)
+
+ assert node.attributes['key'].value == 'value'
+ assert node.attributes['key2'].value == 'value_to_change'
+
+ @workflow
+ def basic_workflow(ctx, **_):
+ node = ctx.model.node.get_by_name(mock.models.DEPENDENT_NODE_NAME)
+ node.attributes['new_key'] = 'new_value'
+ node.attributes['key2'] = 'changed_value'
+
+ execute(workflow_func=basic_workflow, workflow_context=ctx, executor=executor)
+ node = ctx.model.node.get_by_name(mock.models.DEPENDENT_NODE_NAME)
+
+ assert len(node.attributes) == 3
+ assert node.attributes['key'].value == 'value'
+ assert node.attributes['new_key'].value == 'new_value'
+ assert node.attributes['key2'].value == 'changed_value'
diff --git a/azure/aria/aria-extension-cloudify/src/aria/tests/orchestrator/execution_plugin/__init__.py b/azure/aria/aria-extension-cloudify/src/aria/tests/orchestrator/execution_plugin/__init__.py
new file mode 100644
index 0000000..ae1e83e
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/tests/orchestrator/execution_plugin/__init__.py
@@ -0,0 +1,14 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/azure/aria/aria-extension-cloudify/src/aria/tests/orchestrator/execution_plugin/test_common.py b/azure/aria/aria-extension-cloudify/src/aria/tests/orchestrator/execution_plugin/test_common.py
new file mode 100644
index 0000000..dd1e9fb
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/tests/orchestrator/execution_plugin/test_common.py
@@ -0,0 +1,193 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from collections import namedtuple
+
+import requests
+import pytest
+
+from aria.modeling import models
+from aria.orchestrator import exceptions
+from aria.orchestrator.execution_plugin import common
+
+
+class TestDownloadScript(object):
+
+ @pytest.fixture(autouse=True)
+ def patch_requests(self, mocker):
+ def _mock_requests_get(url):
+ response = namedtuple('Response', 'text status_code')
+ return response(url, self.status_code)
+ self.status_code = 200
+ mocker.patch.object(requests, 'get', _mock_requests_get)
+
+ def _test_url(self, url):
+ class Ctx(object):
+ task = models.Task
+
+ script_path = url
+ result = common.download_script(Ctx, script_path)
+ with open(result) as f:
+ assert script_path == f.read()
+ assert result.endswith('-some_script.py')
+
+ def test_http_url(self):
+ self._test_url('http://localhost/some_script.py')
+
+ def test_https_url(self):
+ self._test_url('https://localhost/some_script.py')
+
+ def test_url_status_code_404(self):
+ self.status_code = 404
+ with pytest.raises(exceptions.TaskAbortException) as exc_ctx:
+ self.test_http_url()
+ exception = exc_ctx.value
+ assert 'status code: 404' in str(exception)
+
+ def test_blueprint_resource(self):
+ test_script_path = 'my_script.py'
+
+ class Ctx(object):
+ @staticmethod
+ def download_resource(destination, path):
+ assert path == test_script_path
+ return destination
+ result = common.download_script(Ctx, test_script_path)
+ assert result.endswith(test_script_path)
+
+
+class TestCreateProcessConfig(object):
+
+ def test_plain_command(self):
+ script_path = 'path'
+ process = common.create_process_config(
+ script_path=script_path,
+ process={},
+ operation_kwargs={})
+ assert process['command'] == script_path
+
+ def test_command_with_args(self):
+ script_path = 'path'
+ process = {'args': [1, 2, 3]}
+ process = common.create_process_config(
+ script_path=script_path,
+ process=process,
+ operation_kwargs={})
+ assert process['command'] == '{0} 1 2 3'.format(script_path)
+
+ def test_command_prefix(self):
+ script_path = 'path'
+ command_prefix = 'prefix'
+ process = {'command_prefix': command_prefix}
+ process = common.create_process_config(
+ script_path=script_path,
+ process=process,
+ operation_kwargs={})
+ assert process['command'] == '{0} {1}'.format(command_prefix, script_path)
+
+ def test_command_with_args_and_prefix(self):
+ script_path = 'path'
+ command_prefix = 'prefix'
+ process = {'command_prefix': command_prefix,
+ 'args': [1, 2, 3]}
+ process = common.create_process_config(
+ script_path=script_path,
+ process=process,
+ operation_kwargs={})
+ assert process['command'] == '{0} {1} 1 2 3'.format(command_prefix, script_path)
+
+ def test_ctx_is_removed(self):
+ process = common.create_process_config(
+ script_path='',
+ process={},
+ operation_kwargs={'ctx': 1})
+ assert 'ctx' not in process['env']
+
+ def test_env_passed_explicitly(self):
+ env = {'one': '1', 'two': '2'}
+ process = common.create_process_config(
+ script_path='',
+ process={'env': env},
+ operation_kwargs={})
+ assert process['env'] == env
+
+ def test_env_populated_from_operation_kwargs(self):
+ operation_kwargs = {'one': '1', 'two': '2'}
+ process = common.create_process_config(
+ script_path='',
+ process={},
+ operation_kwargs=operation_kwargs)
+ assert process['env'] == operation_kwargs
+
+ def test_env_merged_from_operation_kwargs_and_process(self):
+ operation_kwargs = {'one': '1', 'two': '2'}
+ env = {'three': '3', 'four': '4'}
+ process = common.create_process_config(
+ script_path='',
+ process={'env': env},
+ operation_kwargs=operation_kwargs)
+ assert process['env'] == dict(operation_kwargs.items() + env.items())
+
+ def test_process_env_gets_precedence_over_operation_kwargs(self):
+ operation_kwargs = {'one': 'from_kwargs'}
+ env = {'one': 'from_env_process'}
+ process = common.create_process_config(
+ script_path='',
+ process={'env': env},
+ operation_kwargs=operation_kwargs)
+ assert process['env'] == env
+
+ def test_json_env_vars(self, mocker):
+ mocker.patch.object(common, 'is_windows', lambda: False)
+ operation_kwargs = {'a_dict': {'key': 'value'},
+ 'a_list': ['a', 'b', 'c'],
+ 'a_tuple': (4, 5, 6),
+ 'a_bool': True}
+ process = common.create_process_config(
+ script_path='',
+ process={},
+ operation_kwargs=operation_kwargs)
+ assert process['env'] == {'a_dict': '{"key": "value"}',
+ 'a_list': '["a", "b", "c"]',
+ 'a_tuple': '[4, 5, 6]',
+ 'a_bool': 'true'}
+
+ def test_quote_json_env_vars(self):
+ operation_kwargs = {'one': []}
+ process = common.create_process_config(
+ script_path='',
+ process={},
+ operation_kwargs=operation_kwargs,
+ quote_json_env_vars=True)
+ assert process['env']['one'] == "'[]'"
+
+ def test_env_keys_converted_to_string_on_windows(self, mocker):
+ mocker.patch.object(common, 'is_windows', lambda: True)
+ env = {u'one': '1'}
+ process = common.create_process_config(
+ script_path='',
+ process={'env': env},
+ operation_kwargs={})
+ print type(process['env'].keys()[0])
+ assert isinstance(process['env'].keys()[0], str)
+
+ def test_env_values_quotes_are_escaped_on_windows(self, mocker):
+ mocker.patch.object(common, 'is_windows', lambda: True)
+ env = {'one': '"hello"'}
+ process = common.create_process_config(
+ script_path='',
+ process={'env': env},
+ operation_kwargs={})
+ assert process['env']['one'] == '\\"hello\\"'
diff --git a/azure/aria/aria-extension-cloudify/src/aria/tests/orchestrator/execution_plugin/test_ctx_proxy_server.py b/azure/aria/aria-extension-cloudify/src/aria/tests/orchestrator/execution_plugin/test_ctx_proxy_server.py
new file mode 100644
index 0000000..94b7409
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/tests/orchestrator/execution_plugin/test_ctx_proxy_server.py
@@ -0,0 +1,285 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import time
+import sys
+import subprocess
+import StringIO
+
+import pytest
+
+from aria.orchestrator.execution_plugin import ctx_proxy
+
+
+class TestCtxProxy(object):
+
+ def test_attribute_access(self, server):
+ response = self.request(server, 'stub_attr', 'some_property')
+ assert response == 'some_value'
+
+ def test_sugared_attribute_access(self, server):
+ response = self.request(server, 'stub-attr', 'some-property')
+ assert response == 'some_value'
+
+ def test_dict_prop_access_get_key(self, server):
+ response = self.request(server, 'node', 'properties', 'prop1')
+ assert response == 'value1'
+
+ def test_dict_prop_access_get_key_nested(self, server):
+ response = self.request(server, 'node', 'properties', 'prop2', 'nested_prop1')
+ assert response == 'nested_value1'
+
+ def test_dict_prop_access_get_with_list_index(self, server):
+ response = self.request(server, 'node', 'properties', 'prop3', 2, 'value')
+ assert response == 'value_2'
+
+ def test_dict_prop_access_set(self, server, ctx):
+ self.request(server, 'node', 'properties', 'prop4', 'key', '=', 'new_value')
+ self.request(server, 'node', 'properties', 'prop3', 2, 'value', '=', 'new_value_2')
+ self.request(server, 'node', 'properties', 'prop4', 'some', 'new', 'path', '=',
+ 'some_new_value')
+ assert ctx.node.properties['prop4']['key'] == 'new_value'
+ assert ctx.node.properties['prop3'][2]['value'] == 'new_value_2'
+ assert ctx.node.properties['prop4']['some']['new']['path'] == 'some_new_value'
+
+ def test_dict_prop_access_set_with_list_index(self, server, ctx):
+ self.request(server, 'node', 'properties', 'prop3', 2, '=', 'new_value')
+ assert ctx.node.properties['prop3'][2] == 'new_value'
+
+ def test_illegal_dict_access(self, server):
+ self.request(server, 'node', 'properties', 'prop4', 'key', '=', 'new_value')
+ with pytest.raises(RuntimeError):
+ self.request(server, 'node', 'properties', 'prop4', 'key', '=', 'new_value', 'what')
+
+ def test_method_invocation(self, server):
+ args = ['[', 'arg1', 'arg2', 'arg3', ']']
+ response_args = self.request(server, 'stub-method', *args)
+ assert response_args == args[1:-1]
+
+ def test_method_invocation_no_args(self, server):
+ response = self.request(server, 'stub-method', '[', ']')
+ assert response == []
+
+ def test_method_return_value(self, server, ctx):
+ response_args = self.request(server, 'node', 'get_prop', '[', 'prop2', ']', 'nested_prop1')
+ assert response_args == 'nested_value1'
+
+ def test_method_return_value_set(self, server, ctx):
+ self.request(
+ server, 'node', 'get_prop', '[', 'prop2', ']', 'nested_prop1', '=', 'new_value')
+ assert ctx.node.properties['prop2']['nested_prop1'] == 'new_value'
+
+ def test_empty_return_value(self, server):
+ response = self.request(server, 'stub_none')
+ assert response is None
+
+ def test_client_request_timeout(self, server):
+ with pytest.raises(IOError):
+ ctx_proxy.client._client_request(server.socket_url,
+ args=['stub-sleep', '[', '0.5', ']'],
+ timeout=0.1)
+
+ def test_processing_exception(self, server):
+ with pytest.raises(ctx_proxy.client._RequestError):
+ self.request(server, 'property_that_does_not_exist')
+
+ def test_not_json_serializable(self, server):
+ with pytest.raises(ctx_proxy.client._RequestError):
+ self.request(server, 'logger')
+
+ def test_no_string_arg(self, server):
+ args = ['[', 1, 2, ']']
+ response = self.request(server, 'stub_method', *args)
+ assert response == args[1:-1]
+
+ class StubAttribute(object):
+ some_property = 'some_value'
+
+ class NodeAttribute(object):
+ def __init__(self, properties):
+ self.properties = properties
+
+ def get_prop(self, name):
+ return self.properties[name]
+
+ @staticmethod
+ def stub_method(*args):
+ return args
+
+ @staticmethod
+ def stub_sleep(seconds):
+ time.sleep(float(seconds))
+
+ @staticmethod
+ def stub_args(arg1, arg2, arg3='arg3', arg4='arg4', *args, **kwargs):
+ return dict(
+ arg1=arg1,
+ arg2=arg2,
+ arg3=arg3,
+ arg4=arg4,
+ args=args,
+ kwargs=kwargs)
+
+ @pytest.fixture
+ def ctx(self, mocker):
+ class MockCtx(object):
+ INSTRUMENTATION_FIELDS = ()
+ ctx = MockCtx()
+ properties = {
+ 'prop1': 'value1',
+ 'prop2': {
+ 'nested_prop1': 'nested_value1'
+ },
+ 'prop3': [
+ {'index': 0, 'value': 'value_0'},
+ {'index': 1, 'value': 'value_1'},
+ {'index': 2, 'value': 'value_2'}
+ ],
+ 'prop4': {
+ 'key': 'value'
+ }
+ }
+ ctx.stub_none = None
+ ctx.stub_method = TestCtxProxy.stub_method
+ ctx.stub_sleep = TestCtxProxy.stub_sleep
+ ctx.stub_args = TestCtxProxy.stub_args
+ ctx.stub_attr = TestCtxProxy.StubAttribute()
+ ctx.node = TestCtxProxy.NodeAttribute(properties)
+ ctx.model = mocker.MagicMock()
+ return ctx
+
+ @pytest.fixture
+ def server(self, ctx):
+ result = ctx_proxy.server.CtxProxy(ctx)
+ result._close_session = lambda *args, **kwargs: {}
+ yield result
+ result.close()
+
+ def request(self, server, *args):
+ return ctx_proxy.client._client_request(server.socket_url, args, timeout=5)
+
+
+class TestArgumentParsing(object):
+
+ def test_socket_url_arg(self):
+ self.expected.update(dict(socket_url='sock_url'))
+ ctx_proxy.client.main(['--socket-url', self.expected.get('socket_url')])
+
+ def test_socket_url_env(self):
+ expected_socket_url = 'env_sock_url'
+ os.environ['CTX_SOCKET_URL'] = expected_socket_url
+ self.expected.update(dict(socket_url=expected_socket_url))
+ ctx_proxy.client.main([])
+
+ def test_socket_url_missing(self):
+ del os.environ['CTX_SOCKET_URL']
+ with pytest.raises(RuntimeError):
+ ctx_proxy.client.main([])
+
+ def test_args(self):
+ self.expected.update(dict(args=['1', '2', '3']))
+ ctx_proxy.client.main(self.expected.get('args'))
+
+ def test_timeout(self):
+ self.expected.update(dict(timeout='10'))
+ ctx_proxy.client.main(['--timeout', self.expected.get('timeout')])
+ self.expected.update(dict(timeout='15'))
+ ctx_proxy.client.main(['-t', self.expected.get('timeout')])
+
+ def test_mixed_order(self):
+ self.expected.update(dict(
+ args=['1', '2', '3'], timeout='20', socket_url='mixed_socket_url'))
+ ctx_proxy.client.main(
+ ['-t', self.expected.get('timeout')] +
+ ['--socket-url', self.expected.get('socket_url')] +
+ self.expected.get('args'))
+ ctx_proxy.client.main(
+ ['-t', self.expected.get('timeout')] +
+ self.expected.get('args') +
+ ['--socket-url', self.expected.get('socket_url')])
+ ctx_proxy.client.main(
+ self.expected.get('args') +
+ ['-t', self.expected.get('timeout')] +
+ ['--socket-url', self.expected.get('socket_url')])
+
+ def test_json_args(self):
+ args = ['@1', '@[1,2,3]', '@{"key":"value"}']
+ expected_args = [1, [1, 2, 3], {'key': 'value'}]
+ self.expected.update(dict(args=expected_args))
+ ctx_proxy.client.main(args)
+
+ def test_json_arg_prefix(self):
+ args = ['_1', '@1']
+ expected_args = [1, '@1']
+ self.expected.update(dict(args=expected_args))
+ ctx_proxy.client.main(args + ['--json-arg-prefix', '_'])
+
+ def test_json_output(self):
+ self.assert_valid_output('string', 'string', '"string"')
+ self.assert_valid_output(1, '1', '1')
+ self.assert_valid_output([1, '2'], "[1, '2']", '[1, "2"]')
+ self.assert_valid_output({'key': 1},
+ "{'key': 1}",
+ '{"key": 1}')
+ self.assert_valid_output(False, 'False', 'false')
+ self.assert_valid_output(True, 'True', 'true')
+ self.assert_valid_output([], '[]', '[]')
+ self.assert_valid_output({}, '{}', '{}')
+
+ def assert_valid_output(self, response, ex_typed_output, ex_json_output):
+ self.mock_response = response
+ current_stdout = sys.stdout
+
+ def run(args, expected):
+ output = StringIO.StringIO()
+ sys.stdout = output
+ ctx_proxy.client.main(args)
+ assert output.getvalue() == expected
+
+ try:
+ run([], ex_typed_output)
+ run(['-j'], ex_json_output)
+ run(['--json-output'], ex_json_output)
+ finally:
+ sys.stdout = current_stdout
+
+ def mock_client_request(self, socket_url, args, timeout):
+ assert socket_url == self.expected.get('socket_url')
+ assert args == self.expected.get('args')
+ assert timeout == int(self.expected.get('timeout'))
+ return self.mock_response
+
+ @pytest.fixture(autouse=True)
+ def patch_client_request(self, mocker):
+ mocker.patch.object(ctx_proxy.client,
+ ctx_proxy.client._client_request.__name__,
+ self.mock_client_request)
+ mocker.patch.dict('os.environ', {'CTX_SOCKET_URL': 'stub'})
+
+ @pytest.fixture(autouse=True)
+ def defaults(self):
+ self.expected = dict(args=[], timeout=30, socket_url='stub')
+ self.mock_response = None
+
+
+class TestCtxEntryPoint(object):
+
+ def test_ctx_in_path(self):
+ p = subprocess.Popen(['ctx', '--help'],
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ p.communicate()
+ assert not p.wait()
diff --git a/azure/aria/aria-extension-cloudify/src/aria/tests/orchestrator/execution_plugin/test_global_ctx.py b/azure/aria/aria-extension-cloudify/src/aria/tests/orchestrator/execution_plugin/test_global_ctx.py
new file mode 100644
index 0000000..dad7547
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/tests/orchestrator/execution_plugin/test_global_ctx.py
@@ -0,0 +1,28 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from aria.orchestrator import execution_plugin
+
+
+def test_python_script_scope():
+ assert execution_plugin.ctx is None
+ assert execution_plugin.inputs is None
+ ctx = object()
+ inputs = object()
+ with execution_plugin.python_script_scope(operation_ctx=ctx, operation_inputs=inputs):
+ assert execution_plugin.ctx is ctx
+ assert execution_plugin.inputs is inputs
+ assert execution_plugin.ctx is None
+ assert execution_plugin.inputs is None
diff --git a/azure/aria/aria-extension-cloudify/src/aria/tests/orchestrator/execution_plugin/test_local.py b/azure/aria/aria-extension-cloudify/src/aria/tests/orchestrator/execution_plugin/test_local.py
new file mode 100644
index 0000000..7f33318
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/tests/orchestrator/execution_plugin/test_local.py
@@ -0,0 +1,598 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import json
+import os
+
+import pytest
+
+from aria import workflow
+from aria.orchestrator import events
+from aria.orchestrator.workflows import api
+from aria.orchestrator.workflows.exceptions import ExecutorException
+from aria.orchestrator.exceptions import TaskAbortException, TaskRetryException
+from aria.orchestrator.execution_plugin import operations
+from aria.orchestrator.execution_plugin.exceptions import ProcessException
+from aria.orchestrator.execution_plugin import local
+from aria.orchestrator.execution_plugin import constants
+from aria.orchestrator.workflows.executor import process
+from aria.orchestrator.workflows.core import engine, graph_compiler
+
+from tests import mock
+from tests import storage
+from tests.orchestrator.workflows.helpers import events_collector
+
+IS_WINDOWS = os.name == 'nt'
+
+
+class TestLocalRunScript(object):
+
+ def test_script_path_parameter(self, executor, workflow_context, tmpdir):
+ script_path = self._create_script(
+ tmpdir,
+ linux_script='''#! /bin/bash -e
+ ctx node attributes map key = value
+ ''',
+ windows_script='''
+ ctx node attributes map key = value
+ ''')
+ props = self._run(
+ executor, workflow_context,
+ script_path=script_path)
+ assert props['map'].value['key'] == 'value'
+
+ def test_process_env(self, executor, workflow_context, tmpdir):
+ script_path = self._create_script(
+ tmpdir,
+ linux_script='''#! /bin/bash -e
+ ctx node attributes map key1 = "$key1"
+ ctx node attributes map key2 = "$key2"
+ ''',
+ windows_script='''
+ ctx node attributes map key1 = %key1%
+ ctx node attributes map key2 = %key2%
+ ''')
+ props = self._run(
+ executor, workflow_context,
+ script_path=script_path,
+ process={
+ 'env': {
+ 'key1': 'value1',
+ 'key2': 'value2'
+ }
+ })
+ p_map = props['map'].value
+ assert p_map['key1'] == 'value1'
+ assert p_map['key2'] == 'value2'
+
+ def test_process_cwd(self, executor, workflow_context, tmpdir):
+ script_path = self._create_script(
+ tmpdir,
+ linux_script='''#! /bin/bash -e
+ ctx node attributes map cwd = "$PWD"
+ ''',
+ windows_script='''
+ ctx node attributes map cwd = %CD%
+ ''')
+ tmpdir = str(tmpdir)
+ props = self._run(
+ executor, workflow_context,
+ script_path=script_path,
+ process={
+ 'cwd': tmpdir
+ })
+ p_map = props['map'].value
+ assert p_map['cwd'] == tmpdir
+
+ def test_process_command_prefix(self, executor, workflow_context, tmpdir):
+ use_ctx = 'ctx node attributes map key = value'
+ python_script = ['import subprocess',
+ 'subprocess.Popen("{0}".split(' ')).communicate()[0]'.format(use_ctx)]
+ python_script = '\n'.join(python_script)
+ script_path = self._create_script(
+ tmpdir,
+ linux_script=python_script,
+ windows_script=python_script,
+ windows_suffix='',
+ linux_suffix='')
+ props = self._run(
+ executor, workflow_context,
+ script_path=script_path,
+ process={
+ 'env': {'TEST_KEY': 'value'},
+ 'command_prefix': 'python'
+ })
+ p_map = props['map'].value
+ assert p_map['key'] == 'value'
+
+ def test_process_args(self, executor, workflow_context, tmpdir):
+ script_path = self._create_script(
+ tmpdir,
+ linux_script='''#! /bin/bash -e
+ ctx node attributes map arg1 = "$1"
+ ctx node attributes map arg2 = "$2"
+ ''',
+ windows_script='''
+ ctx node attributes map arg1 = %1
+ ctx node attributes map arg2 = %2
+ ''')
+ props = self._run(
+ executor, workflow_context,
+ script_path=script_path,
+ process={
+ 'args': ['"arg with spaces"', 'arg2']
+ })
+ assert props['map'].value['arg1'] == 'arg with spaces'
+ assert props['map'].value['arg2'] == 'arg2'
+
+ def test_no_script_path(self, executor, workflow_context):
+ exception = self._run_and_get_task_exception(
+ executor, workflow_context,
+ script_path=None)
+ assert isinstance(exception, TaskAbortException)
+ assert 'script_path' in exception.message
+
+ def test_script_error(self, executor, workflow_context, tmpdir):
+ script_path = self._create_script(
+ tmpdir,
+ linux_script='''#! /bin/bash -e
+ echo 123123
+ command_that_does_not_exist [ ]
+ ''',
+ windows_script='''
+ @echo off
+ echo 123123
+ command_that_does_not_exist [ ]
+ ''')
+ exception = self._run_and_get_task_exception(
+ executor, workflow_context,
+ script_path=script_path)
+ assert isinstance(exception, ProcessException)
+ assert os.path.basename(script_path) in exception.command
+ assert exception.exit_code == 1 if IS_WINDOWS else 127
+ assert exception.stdout.strip() == '123123'
+ assert 'command_that_does_not_exist' in exception.stderr
+
+ def test_script_error_from_bad_ctx_request(self, executor, workflow_context, tmpdir):
+ script_path = self._create_script(
+ tmpdir,
+ linux_script='''#! /bin/bash -e
+ ctx property_that_does_not_exist
+ ''',
+ windows_script='''
+ ctx property_that_does_not_exist
+ ''')
+ exception = self._run_and_get_task_exception(
+ executor, workflow_context,
+ script_path=script_path)
+ assert isinstance(exception, ProcessException)
+ assert os.path.basename(script_path) in exception.command
+ assert exception.exit_code == 1
+ assert 'RequestError' in exception.stderr
+ assert 'property_that_does_not_exist' in exception.stderr
+
+ def test_python_script(self, executor, workflow_context, tmpdir):
+ script = '''
+from aria.orchestrator.execution_plugin import ctx, inputs
+if __name__ == '__main__':
+ ctx.node.attributes['key'] = inputs['key']
+'''
+ suffix = '.py'
+ script_path = self._create_script(
+ tmpdir,
+ linux_script=script,
+ windows_script=script,
+ linux_suffix=suffix,
+ windows_suffix=suffix)
+ props = self._run(
+ executor, workflow_context,
+ script_path=script_path,
+ arguments={'key': 'value'})
+ assert props['key'].value == 'value'
+
+ @pytest.mark.parametrize(
+ 'value', ['string-value', [1, 2, 3], 999, 3.14, False,
+ {'complex1': {'complex2': {'key': 'value'}, 'list': [1, 2, 3]}}])
+ def test_inputs_as_environment_variables(self, executor, workflow_context, tmpdir, value):
+ script_path = self._create_script(
+ tmpdir,
+ linux_script='''#! /bin/bash -e
+ ctx node attributes key = "${input_as_env_var}"
+ ''',
+ windows_script='''
+ ctx node attributes key = "%input_as_env_var%"
+ ''')
+ props = self._run(
+ executor, workflow_context,
+ script_path=script_path,
+ env_var=value)
+ value = props['key'].value
+ expected = value if isinstance(value, basestring) else json.loads(value)
+ assert expected == value
+
+ @pytest.mark.parametrize('value', ['override', {'key': 'value'}])
+ def test_explicit_env_variables_inputs_override(
+ self, executor, workflow_context, tmpdir, value):
+ script_path = self._create_script(
+ tmpdir,
+ linux_script='''#! /bin/bash -e
+ ctx node attributes key = "${input_as_env_var}"
+ ''',
+ windows_script='''
+ ctx node attributes key = "%input_as_env_var%"
+ ''')
+
+ props = self._run(
+ executor, workflow_context,
+ script_path=script_path,
+ env_var='test-value',
+ process={
+ 'env': {
+ 'input_as_env_var': value
+ }
+ })
+ value = props['key'].value
+ expected = value if isinstance(value, basestring) else json.loads(value)
+ assert expected == value
+
+ def test_get_nonexistent_runtime_property(self, executor, workflow_context, tmpdir):
+ script_path = self._create_script(
+ tmpdir,
+ linux_script='''#! /bin/bash -e
+ ctx node attributes nonexistent
+ ''',
+ windows_script='''
+ ctx node attributes nonexistent
+ ''')
+ exception = self._run_and_get_task_exception(
+ executor, workflow_context,
+ script_path=script_path)
+ assert isinstance(exception, ProcessException)
+ assert os.path.basename(script_path) in exception.command
+ assert 'RequestError' in exception.stderr
+ assert 'nonexistent' in exception.stderr
+
+ def test_get_nonexistent_runtime_property_json(self, executor, workflow_context, tmpdir):
+ script_path = self._create_script(
+ tmpdir,
+ linux_script='''#! /bin/bash -e
+ ctx -j node attributes nonexistent
+ ''',
+ windows_script='''
+ ctx -j node attributes nonexistent
+ ''')
+ exception = self._run_and_get_task_exception(
+ executor, workflow_context,
+ script_path=script_path)
+ assert isinstance(exception, ProcessException)
+ assert os.path.basename(script_path) in exception.command
+ assert 'RequestError' in exception.stderr
+ assert 'nonexistent' in exception.stderr
+
+ def test_abort(self, executor, workflow_context, tmpdir):
+ script_path = self._create_script(
+ tmpdir,
+ linux_script='''#! /bin/bash -e
+ ctx task abort [ abort-message ]
+ ''',
+ windows_script='''
+ ctx task abort [ abort-message ]
+ ''')
+ exception = self._run_and_get_task_exception(
+ executor, workflow_context,
+ script_path=script_path)
+ assert isinstance(exception, TaskAbortException)
+ assert exception.message == 'abort-message'
+
+ def test_retry(self, executor, workflow_context, tmpdir):
+ script_path = self._create_script(
+ tmpdir,
+ linux_script='''#! /bin/bash -e
+ ctx task retry [ retry-message ]
+ ''',
+ windows_script='''
+ ctx task retry [ retry-message ]
+ ''')
+ exception = self._run_and_get_task_exception(
+ executor, workflow_context,
+ script_path=script_path)
+ assert isinstance(exception, TaskRetryException)
+ assert exception.message == 'retry-message'
+
+ def test_retry_with_interval(self, executor, workflow_context, tmpdir):
+ script_path = self._create_script(
+ tmpdir,
+ linux_script='''#! /bin/bash -e
+ ctx task retry [ retry-message @100 ]
+ ''',
+ windows_script='''
+ ctx task retry [ retry-message @100 ]
+ ''')
+ exception = self._run_and_get_task_exception(
+ executor, workflow_context,
+ script_path=script_path)
+ assert isinstance(exception, TaskRetryException)
+ assert exception.message == 'retry-message'
+ assert exception.retry_interval == 100
+
+ def test_crash_abort_after_retry(self, executor, workflow_context, tmpdir):
+ script_path = self._create_script(
+ tmpdir,
+ linux_script='''#! /bin/bash
+ ctx task retry [ retry-message ]
+ ctx task abort [ should-raise-a-runtime-error ]
+ ''',
+ windows_script='''
+ ctx task retry [ retry-message ]
+ ctx task abort [ should-raise-a-runtime-error ]
+ ''')
+ exception = self._run_and_get_task_exception(
+ executor, workflow_context,
+ script_path=script_path)
+ assert isinstance(exception, TaskAbortException)
+ assert exception.message == constants.ILLEGAL_CTX_OPERATION_MESSAGE
+
+ def test_crash_retry_after_abort(self, executor, workflow_context, tmpdir):
+ script_path = self._create_script(
+ tmpdir,
+ linux_script='''#! /bin/bash
+ ctx task abort [ abort-message ]
+ ctx task retry [ should-raise-a-runtime-error ]
+ ''',
+ windows_script='''
+ ctx task abort [ abort-message ]
+ ctx task retry [ should-raise-a-runtime-error ]
+ ''')
+ exception = self._run_and_get_task_exception(
+ executor, workflow_context,
+ script_path=script_path)
+ assert isinstance(exception, TaskAbortException)
+ assert exception.message == constants.ILLEGAL_CTX_OPERATION_MESSAGE
+
+ def test_crash_abort_after_abort(self, executor, workflow_context, tmpdir):
+ script_path = self._create_script(
+ tmpdir,
+ linux_script='''#! /bin/bash
+ ctx task abort [ abort-message ]
+ ctx task abort [ should-raise-a-runtime-error ]
+ ''',
+ windows_script='''
+ ctx task abort [ abort-message ]
+ ctx task abort [ should-raise-a-runtime-error ]
+ ''')
+ exception = self._run_and_get_task_exception(
+ executor, workflow_context,
+ script_path=script_path)
+ assert isinstance(exception, TaskAbortException)
+ assert exception.message == constants.ILLEGAL_CTX_OPERATION_MESSAGE
+
+ def test_crash_retry_after_retry(self, executor, workflow_context, tmpdir):
+ script_path = self._create_script(
+ tmpdir,
+ linux_script='''#! /bin/bash
+ ctx task retry [ retry-message ]
+ ctx task retry [ should-raise-a-runtime-error ]
+ ''',
+ windows_script='''
+ ctx task retry [ retry-message ]
+ ctx task retry [ should-raise-a-runtime-error ]
+ ''')
+ exception = self._run_and_get_task_exception(
+ executor, workflow_context,
+ script_path=script_path)
+ assert isinstance(exception, TaskAbortException)
+ assert exception.message == constants.ILLEGAL_CTX_OPERATION_MESSAGE
+
+ def test_retry_returns_a_nonzero_exit_code(self, executor, workflow_context, tmpdir):
+ log_path = tmpdir.join('temp.log')
+ message = 'message'
+ script_path = self._create_script(
+ tmpdir,
+ linux_script='''#! /bin/bash -e
+ ctx task retry [ "{0}" ] 2> {1}
+ echo should-not-run > {1}
+ '''.format(message, log_path),
+ windows_script='''
+ ctx task retry [ "{0}" ] 2> {1}
+ if %errorlevel% neq 0 exit /b %errorlevel%
+ echo should-not-run > {1}
+ '''.format(message, log_path))
+ with pytest.raises(ExecutorException):
+ self._run(
+ executor, workflow_context,
+ script_path=script_path)
+ assert log_path.read().strip() == message
+
+ def test_abort_returns_a_nonzero_exit_code(self, executor, workflow_context, tmpdir):
+ log_path = tmpdir.join('temp.log')
+ message = 'message'
+ script_path = self._create_script(
+ tmpdir,
+ linux_script='''#! /bin/bash -e
+ ctx task abort [ "{0}" ] 2> {1}
+ echo should-not-run > {1}
+ '''.format(message, log_path),
+ windows_script='''
+ ctx task abort [ "{0}" ] 2> {1}
+ if %errorlevel% neq 0 exit /b %errorlevel%
+ echo should-not-run > {1}
+ '''.format(message, log_path))
+ with pytest.raises(ExecutorException):
+ self._run(
+ executor, workflow_context,
+ script_path=script_path)
+ assert log_path.read().strip() == message
+
+ def _create_script(self,
+ tmpdir,
+ linux_script,
+ windows_script,
+ windows_suffix='.bat',
+ linux_suffix=''):
+ suffix = windows_suffix if IS_WINDOWS else linux_suffix
+ script = windows_script if IS_WINDOWS else linux_script
+ script_path = tmpdir.join('script{0}'.format(suffix))
+ script_path.write(script)
+ return str(script_path)
+
+ def _run_and_get_task_exception(self, *args, **kwargs):
+ signal = events.on_failure_task_signal
+ with events_collector(signal) as collected:
+ with pytest.raises(ExecutorException):
+ self._run(*args, **kwargs)
+ return collected[signal][0]['kwargs']['exception']
+
+ def _run(self,
+ executor,
+ workflow_context,
+ script_path,
+ process=None,
+ env_var='value',
+ arguments=None):
+ local_script_path = script_path
+ script_path = os.path.basename(local_script_path) if local_script_path else ''
+ arguments = arguments or {}
+ process = process or {}
+ if script_path:
+ workflow_context.resource.service.upload(
+ entry_id=str(workflow_context.service.id),
+ source=local_script_path,
+ path=script_path)
+
+ arguments.update({
+ 'script_path': script_path,
+ 'process': process,
+ 'input_as_env_var': env_var
+ })
+
+ node = workflow_context.model.node.get_by_name(mock.models.DEPENDENCY_NODE_NAME)
+ interface = mock.models.create_interface(
+ node.service,
+ 'test',
+ 'op',
+ operation_kwargs=dict(
+ function='{0}.{1}'.format(
+ operations.__name__,
+ operations.run_script_locally.__name__),
+ arguments=arguments)
+ )
+ node.interfaces[interface.name] = interface
+ workflow_context.model.node.update(node)
+
+ @workflow
+ def mock_workflow(ctx, graph):
+ graph.add_tasks(api.task.OperationTask(
+ node,
+ interface_name='test',
+ operation_name='op',
+ arguments=arguments))
+ return graph
+ tasks_graph = mock_workflow(ctx=workflow_context) # pylint: disable=no-value-for-parameter
+ graph_compiler.GraphCompiler(workflow_context, executor.__class__).compile(tasks_graph)
+ eng = engine.Engine({executor.__class__: executor})
+ eng.execute(workflow_context)
+ return workflow_context.model.node.get_by_name(
+ mock.models.DEPENDENCY_NODE_NAME).attributes
+
+ @pytest.fixture
+ def executor(self):
+ result = process.ProcessExecutor()
+ try:
+ yield result
+ finally:
+ result.close()
+
+ @pytest.fixture
+ def workflow_context(self, tmpdir):
+ workflow_context = mock.context.simple(str(tmpdir), inmemory=False)
+ workflow_context.states = []
+ workflow_context.exception = None
+ yield workflow_context
+ storage.release_sqlite_storage(workflow_context.model)
+
+
+class BaseTestConfiguration(object):
+
+ @pytest.fixture(autouse=True)
+ def mock_execute(self, mocker):
+ def eval_func(**_):
+ self.called = 'eval'
+
+ def execute_func(process, **_):
+ self.process = process
+ self.called = 'execute'
+ self.process = {}
+ self.called = None
+ mocker.patch.object(local, '_execute_func', execute_func)
+ mocker.patch.object(local, '_eval_script_func', eval_func)
+
+ class Ctx(object):
+ @staticmethod
+ def download_resource(destination, *args, **kwargs):
+ return destination
+
+ def _run(self, script_path, process=None):
+ local.run_script(
+ script_path=script_path,
+ process=process,
+ ctx=self.Ctx)
+
+
+class TestPowerShellConfiguration(BaseTestConfiguration):
+
+ def test_implicit_powershell_call_with_ps1_extension(self):
+ self._run(script_path='script_path.ps1')
+ assert self.process['command_prefix'] == 'powershell'
+
+ def test_command_prefix_is_overridden_for_ps1_extension(self):
+ self._run(script_path='script_path.ps1',
+ process={'command_prefix': 'bash'})
+ assert self.process['command_prefix'] == 'bash'
+
+ def test_explicit_powershell_call(self):
+ self._run(script_path='script_path.ps1',
+ process={'command_prefix': 'powershell'})
+ assert self.process['command_prefix'] == 'powershell'
+
+
+class TestEvalPythonConfiguration(BaseTestConfiguration):
+
+ def test_explicit_eval_without_py_extension(self):
+ self._run(script_path='script_path',
+ process={'eval_python': True})
+ assert self.called == 'eval'
+
+ def test_explicit_eval_with_py_extension(self):
+ self._run(script_path='script_path.py',
+ process={'eval_python': True})
+ assert self.called == 'eval'
+
+ def test_implicit_eval(self):
+ self._run(script_path='script_path.py')
+ assert self.called == 'eval'
+
+ def test_explicit_execute_without_py_extension(self):
+ self._run(script_path='script_path',
+ process={'eval_python': False})
+ assert self.called == 'execute'
+
+ def test_explicit_execute_with_py_extension(self):
+ self._run(script_path='script_path.py',
+ process={'eval_python': False})
+ assert self.called == 'execute'
+
+ def test_implicit_execute(self):
+ self._run(script_path='script_path')
+ assert self.called == 'execute'
diff --git a/azure/aria/aria-extension-cloudify/src/aria/tests/orchestrator/execution_plugin/test_ssh.py b/azure/aria/aria-extension-cloudify/src/aria/tests/orchestrator/execution_plugin/test_ssh.py
new file mode 100644
index 0000000..b5df939
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/tests/orchestrator/execution_plugin/test_ssh.py
@@ -0,0 +1,523 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import contextlib
+import json
+import logging
+import os
+
+import pytest
+
+import fabric.api
+from fabric.contrib import files
+from fabric import context_managers
+
+from aria.modeling import models
+from aria.orchestrator import events
+from aria.orchestrator import workflow
+from aria.orchestrator.workflows import api
+from aria.orchestrator.workflows.executor import process
+from aria.orchestrator.workflows.core import (engine, graph_compiler)
+from aria.orchestrator.workflows.exceptions import ExecutorException
+from aria.orchestrator.exceptions import (TaskAbortException, TaskRetryException)
+from aria.orchestrator.execution_plugin import operations
+from aria.orchestrator.execution_plugin import constants
+from aria.orchestrator.execution_plugin.exceptions import (ProcessException, TaskException)
+from aria.orchestrator.execution_plugin.ssh import operations as ssh_operations
+
+from tests import mock, storage, resources
+from tests.orchestrator.workflows.helpers import events_collector
+
+
+_CUSTOM_BASE_DIR = '/tmp/new-aria-ctx'
+
+_FABRIC_ENV = {
+ 'host_string': 'localhost',
+ 'user': 'travis',
+ # 'password': 'travis',
+ 'key_filename': '/home/travis/.ssh/id_rsa'
+}
+
+
+# To help debug in case of connection failures
+logging.getLogger('paramiko.transport').addHandler(logging.StreamHandler())
+logging.getLogger('paramiko.transport').setLevel(logging.DEBUG)
+
+
+@pytest.mark.skipif(not os.environ.get('TRAVIS'), reason='actual ssh server required')
+class TestWithActualSSHServer(object):
+
+ def test_run_script_basic(self):
+ expected_attribute_value = 'some_value'
+ props = self._execute(env={'test_value': expected_attribute_value})
+ assert props['test_value'].value == expected_attribute_value
+
+ @pytest.mark.skip(reason='sudo privileges are required')
+ def test_run_script_as_sudo(self):
+ self._execute(use_sudo=True)
+ with self._ssh_env():
+ assert files.exists('/opt/test_dir')
+ fabric.api.sudo('rm -rf /opt/test_dir')
+
+ def test_run_script_default_base_dir(self):
+ props = self._execute()
+ assert props['work_dir'].value == '{0}/work'.format(constants.DEFAULT_BASE_DIR)
+
+ @pytest.mark.skip(reason='Re-enable once output from process executor can be captured')
+ @pytest.mark.parametrize('hide_groups', [[], ['everything']])
+ def test_run_script_with_hide(self, hide_groups):
+ self._execute(hide_output=hide_groups)
+ output = 'TODO'
+ expected_log_message = ('[localhost] run: source {0}/scripts/'
+ .format(constants.DEFAULT_BASE_DIR))
+ if hide_groups:
+ assert expected_log_message not in output
+ else:
+ assert expected_log_message in output
+
+ def test_run_script_process_config(self):
+ expected_env_value = 'test_value_env'
+ expected_arg1_value = 'test_value_arg1'
+ expected_arg2_value = 'test_value_arg2'
+ expected_cwd = '/tmp'
+ expected_base_dir = _CUSTOM_BASE_DIR
+ props = self._execute(
+ env={'test_value_env': expected_env_value},
+ process={
+ 'args': [expected_arg1_value, expected_arg2_value],
+ 'cwd': expected_cwd,
+ 'base_dir': expected_base_dir
+ })
+ assert props['env_value'].value == expected_env_value
+ assert len(props['bash_version'].value) > 0
+ assert props['arg1_value'].value == expected_arg1_value
+ assert props['arg2_value'].value == expected_arg2_value
+ assert props['cwd'].value == expected_cwd
+ assert props['ctx_path'].value == '{0}/ctx'.format(expected_base_dir)
+
+ def test_run_script_command_prefix(self):
+ props = self._execute(process={'command_prefix': 'bash -i'})
+ assert 'i' in props['dollar_dash'].value
+
+ def test_run_script_reuse_existing_ctx(self):
+ expected_test_value_1 = 'test_value_1'
+ expected_test_value_2 = 'test_value_2'
+ props = self._execute(
+ test_operations=['{0}_1'.format(self.test_name),
+ '{0}_2'.format(self.test_name)],
+ env={'test_value1': expected_test_value_1,
+ 'test_value2': expected_test_value_2})
+ assert props['test_value1'].value == expected_test_value_1
+ assert props['test_value2'].value == expected_test_value_2
+
+ def test_run_script_download_resource_plain(self, tmpdir):
+ resource = tmpdir.join('resource')
+ resource.write('content')
+ self._upload(str(resource), 'test_resource')
+ props = self._execute()
+ assert props['test_value'].value == 'content'
+
+ def test_run_script_download_resource_and_render(self, tmpdir):
+ resource = tmpdir.join('resource')
+ resource.write('{{ctx.service.name}}')
+ self._upload(str(resource), 'test_resource')
+ props = self._execute()
+ assert props['test_value'].value == self._workflow_context.service.name
+
+ @pytest.mark.parametrize('value', ['string-value', [1, 2, 3], {'key': 'value'}])
+ def test_run_script_inputs_as_env_variables_no_override(self, value):
+ props = self._execute(custom_input=value)
+ return_value = props['test_value'].value
+ expected = return_value if isinstance(value, basestring) else json.loads(return_value)
+ assert value == expected
+
+ @pytest.mark.parametrize('value', ['string-value', [1, 2, 3], {'key': 'value'}])
+ def test_run_script_inputs_as_env_variables_process_env_override(self, value):
+ props = self._execute(custom_input='custom-input-value',
+ env={'custom_env_var': value})
+ return_value = props['test_value'].value
+ expected = return_value if isinstance(value, basestring) else json.loads(return_value)
+ assert value == expected
+
+ def test_run_script_error_in_script(self):
+ exception = self._execute_and_get_task_exception()
+ assert isinstance(exception, TaskException)
+
+ def test_run_script_abort_immediate(self):
+ exception = self._execute_and_get_task_exception()
+ assert isinstance(exception, TaskAbortException)
+ assert exception.message == 'abort-message'
+
+ def test_run_script_retry(self):
+ exception = self._execute_and_get_task_exception()
+ assert isinstance(exception, TaskRetryException)
+ assert exception.message == 'retry-message'
+
+ def test_run_script_abort_error_ignored_by_script(self):
+ exception = self._execute_and_get_task_exception()
+ assert isinstance(exception, TaskAbortException)
+ assert exception.message == 'abort-message'
+
+ def test_run_commands(self):
+ temp_file_path = '/tmp/very_temporary_file'
+ with self._ssh_env():
+ if files.exists(temp_file_path):
+ fabric.api.run('rm {0}'.format(temp_file_path))
+ self._execute(commands=['touch {0}'.format(temp_file_path)])
+ with self._ssh_env():
+ assert files.exists(temp_file_path)
+ fabric.api.run('rm {0}'.format(temp_file_path))
+
+ @pytest.fixture(autouse=True)
+ def _setup(self, request, workflow_context, executor, capfd):
+ self._workflow_context = workflow_context
+ self._executor = executor
+ self._capfd = capfd
+ self.test_name = request.node.originalname or request.node.name
+ with self._ssh_env():
+ for directory in [constants.DEFAULT_BASE_DIR, _CUSTOM_BASE_DIR]:
+ if files.exists(directory):
+ fabric.api.run('rm -rf {0}'.format(directory))
+
+ @contextlib.contextmanager
+ def _ssh_env(self):
+ with self._capfd.disabled():
+ with context_managers.settings(fabric.api.hide('everything'),
+ **_FABRIC_ENV):
+ yield
+
+ def _execute(self,
+ env=None,
+ use_sudo=False,
+ hide_output=None,
+ process=None,
+ custom_input='',
+ test_operations=None,
+ commands=None):
+ process = process or {}
+ if env:
+ process.setdefault('env', {}).update(env)
+
+ test_operations = test_operations or [self.test_name]
+
+ local_script_path = os.path.join(resources.DIR, 'scripts', 'test_ssh.sh')
+ script_path = os.path.basename(local_script_path)
+ self._upload(local_script_path, script_path)
+
+ if commands:
+ operation = operations.run_commands_with_ssh
+ else:
+ operation = operations.run_script_with_ssh
+
+ node = self._workflow_context.model.node.get_by_name(mock.models.DEPENDENCY_NODE_NAME)
+ arguments = {
+ 'script_path': script_path,
+ 'fabric_env': _FABRIC_ENV,
+ 'process': process,
+ 'use_sudo': use_sudo,
+ 'custom_env_var': custom_input,
+ 'test_operation': '',
+ }
+ if hide_output:
+ arguments['hide_output'] = hide_output
+ if commands:
+ arguments['commands'] = commands
+ interface = mock.models.create_interface(
+ node.service,
+ 'test',
+ 'op',
+ operation_kwargs=dict(
+ function='{0}.{1}'.format(
+ operations.__name__,
+ operation.__name__),
+ arguments=arguments)
+ )
+ node.interfaces[interface.name] = interface
+
+ @workflow
+ def mock_workflow(ctx, graph):
+ ops = []
+ for test_operation in test_operations:
+ op_arguments = arguments.copy()
+ op_arguments['test_operation'] = test_operation
+ ops.append(api.task.OperationTask(
+ node,
+ interface_name='test',
+ operation_name='op',
+ arguments=op_arguments))
+
+ graph.sequence(*ops)
+ return graph
+ tasks_graph = mock_workflow(ctx=self._workflow_context) # pylint: disable=no-value-for-parameter
+ graph_compiler.GraphCompiler(
+ self._workflow_context, self._executor.__class__).compile(tasks_graph)
+ eng = engine.Engine({self._executor.__class__: self._executor})
+ eng.execute(self._workflow_context)
+ return self._workflow_context.model.node.get_by_name(
+ mock.models.DEPENDENCY_NODE_NAME).attributes
+
+ def _execute_and_get_task_exception(self, *args, **kwargs):
+ signal = events.on_failure_task_signal
+ with events_collector(signal) as collected:
+ with pytest.raises(ExecutorException):
+ self._execute(*args, **kwargs)
+ return collected[signal][0]['kwargs']['exception']
+
+ def _upload(self, source, path):
+ self._workflow_context.resource.service.upload(
+ entry_id=str(self._workflow_context.service.id),
+ source=source,
+ path=path)
+
+ @pytest.fixture
+ def executor(self):
+ result = process.ProcessExecutor()
+ try:
+ yield result
+ finally:
+ result.close()
+
+ @pytest.fixture
+ def workflow_context(self, tmpdir):
+ workflow_context = mock.context.simple(str(tmpdir))
+ workflow_context.states = []
+ workflow_context.exception = None
+ yield workflow_context
+ storage.release_sqlite_storage(workflow_context.model)
+
+
+class TestFabricEnvHideGroupsAndRunCommands(object):
+
+ def test_fabric_env_default_override(self):
+ # first sanity for no override
+ self._run()
+ assert self.mock.settings_merged['timeout'] == constants.FABRIC_ENV_DEFAULTS['timeout']
+ # now override
+ invocation_fabric_env = self.default_fabric_env.copy()
+ timeout = 1000000
+ invocation_fabric_env['timeout'] = timeout
+ self._run(fabric_env=invocation_fabric_env)
+ assert self.mock.settings_merged['timeout'] == timeout
+
+ def test_implicit_host_string(self, mocker):
+ expected_host_address = '1.1.1.1'
+ mocker.patch.object(self._Ctx.task.actor, 'host')
+ mocker.patch.object(self._Ctx.task.actor.host, 'host_address', expected_host_address)
+ fabric_env = self.default_fabric_env.copy()
+ del fabric_env['host_string']
+ self._run(fabric_env=fabric_env)
+ assert self.mock.settings_merged['host_string'] == expected_host_address
+
+ def test_explicit_host_string(self):
+ fabric_env = self.default_fabric_env.copy()
+ host_string = 'explicit_host_string'
+ fabric_env['host_string'] = host_string
+ self._run(fabric_env=fabric_env)
+ assert self.mock.settings_merged['host_string'] == host_string
+
+ def test_override_warn_only(self):
+ fabric_env = self.default_fabric_env.copy()
+ self._run(fabric_env=fabric_env)
+ assert self.mock.settings_merged['warn_only'] is True
+ fabric_env = self.default_fabric_env.copy()
+ fabric_env['warn_only'] = False
+ self._run(fabric_env=fabric_env)
+ assert self.mock.settings_merged['warn_only'] is False
+
+ def test_missing_host_string(self):
+ with pytest.raises(TaskAbortException) as exc_ctx:
+ fabric_env = self.default_fabric_env.copy()
+ del fabric_env['host_string']
+ self._run(fabric_env=fabric_env)
+ assert '`host_string` not supplied' in str(exc_ctx.value)
+
+ def test_missing_user(self):
+ with pytest.raises(TaskAbortException) as exc_ctx:
+ fabric_env = self.default_fabric_env.copy()
+ del fabric_env['user']
+ self._run(fabric_env=fabric_env)
+ assert '`user` not supplied' in str(exc_ctx.value)
+
+ def test_missing_key_or_password(self):
+ with pytest.raises(TaskAbortException) as exc_ctx:
+ fabric_env = self.default_fabric_env.copy()
+ del fabric_env['key_filename']
+ self._run(fabric_env=fabric_env)
+ assert 'Access credentials not supplied' in str(exc_ctx.value)
+
+ def test_hide_in_settings_and_non_viable_groups(self):
+ groups = ('running', 'stdout')
+ self._run(hide_output=groups)
+ assert set(self.mock.settings_merged['hide_output']) == set(groups)
+ with pytest.raises(TaskAbortException) as exc_ctx:
+ self._run(hide_output=('running', 'bla'))
+ assert '`hide_output` must be a subset of' in str(exc_ctx.value)
+
+ def test_run_commands(self):
+ def test(use_sudo):
+ commands = ['command1', 'command2']
+ self._run(
+ commands=commands,
+ use_sudo=use_sudo)
+ assert all(item in self.mock.settings_merged.items() for
+ item in self.default_fabric_env.items())
+ assert self.mock.settings_merged['warn_only'] is True
+ assert self.mock.settings_merged['use_sudo'] == use_sudo
+ assert self.mock.commands == commands
+ self.mock.settings_merged = {}
+ self.mock.commands = []
+ test(use_sudo=False)
+ test(use_sudo=True)
+
+ def test_failed_command(self):
+ with pytest.raises(ProcessException) as exc_ctx:
+ self._run(commands=['fail'])
+ exception = exc_ctx.value
+ assert exception.stdout == self.MockCommandResult.stdout
+ assert exception.stderr == self.MockCommandResult.stderr
+ assert exception.command == self.MockCommandResult.command
+ assert exception.exit_code == self.MockCommandResult.return_code
+
+ class MockCommandResult(object):
+ stdout = 'mock_stdout'
+ stderr = 'mock_stderr'
+ command = 'mock_command'
+ return_code = 1
+
+ def __init__(self, failed):
+ self.failed = failed
+
+ class MockFabricApi(object):
+
+ def __init__(self):
+ self.commands = []
+ self.settings_merged = {}
+
+ @contextlib.contextmanager
+ def settings(self, *args, **kwargs):
+ self.settings_merged.update(kwargs)
+ if args:
+ groups = args[0]
+ self.settings_merged.update({'hide_output': groups})
+ yield
+
+ def run(self, command):
+ self.commands.append(command)
+ self.settings_merged['use_sudo'] = False
+ return TestFabricEnvHideGroupsAndRunCommands.MockCommandResult(command == 'fail')
+
+ def sudo(self, command):
+ self.commands.append(command)
+ self.settings_merged['use_sudo'] = True
+ return TestFabricEnvHideGroupsAndRunCommands.MockCommandResult(command == 'fail')
+
+ def hide(self, *groups):
+ return groups
+
+ def exists(self, *args, **kwargs):
+ raise RuntimeError
+
+ class _Ctx(object):
+ INSTRUMENTATION_FIELDS = ()
+
+ class Task(object):
+ @staticmethod
+ def abort(message=None):
+ models.Task.abort(message)
+ actor = None
+
+ class Actor(object):
+ host = None
+
+ class Model(object):
+ @contextlib.contextmanager
+ def instrument(self, *args, **kwargs):
+ yield
+ task = Task
+ task.actor = Actor
+ model = Model()
+ logger = logging.getLogger()
+
+ @staticmethod
+ @contextlib.contextmanager
+ def _mock_self_logging(*args, **kwargs):
+ yield
+ _Ctx.logging_handlers = _mock_self_logging
+
+ @pytest.fixture(autouse=True)
+ def _setup(self, mocker):
+ self.default_fabric_env = {
+ 'host_string': 'test',
+ 'user': 'test',
+ 'key_filename': 'test',
+ }
+ self.mock = self.MockFabricApi()
+ mocker.patch('fabric.api', self.mock)
+
+ def _run(self,
+ commands=(),
+ fabric_env=None,
+ process=None,
+ use_sudo=False,
+ hide_output=None):
+ operations.run_commands_with_ssh(
+ ctx=self._Ctx,
+ commands=commands,
+ process=process,
+ fabric_env=fabric_env or self.default_fabric_env,
+ use_sudo=use_sudo,
+ hide_output=hide_output)
+
+
+class TestUtilityFunctions(object):
+
+ def test_paths(self):
+ base_dir = '/path'
+ local_script_path = '/local/script/path.py'
+ paths = ssh_operations._Paths(base_dir=base_dir,
+ local_script_path=local_script_path)
+ assert paths.local_script_path == local_script_path
+ assert paths.remote_ctx_dir == base_dir
+ assert paths.base_script_path == 'path.py'
+ assert paths.remote_ctx_path == '/path/ctx'
+ assert paths.remote_scripts_dir == '/path/scripts'
+ assert paths.remote_work_dir == '/path/work'
+ assert paths.remote_env_script_path.startswith('/path/scripts/env-path.py-')
+ assert paths.remote_script_path.startswith('/path/scripts/path.py-')
+
+ def test_write_environment_script_file(self):
+ base_dir = '/path'
+ local_script_path = '/local/script/path.py'
+ paths = ssh_operations._Paths(base_dir=base_dir,
+ local_script_path=local_script_path)
+ env = {'one': "'1'"}
+ local_socket_url = 'local_socket_url'
+ remote_socket_url = 'remote_socket_url'
+ env_script_lines = set([l for l in ssh_operations._write_environment_script_file(
+ process={'env': env},
+ paths=paths,
+ local_socket_url=local_socket_url,
+ remote_socket_url=remote_socket_url
+ ).getvalue().split('\n') if l])
+ expected_env_script_lines = set([
+ 'export PATH=/path:$PATH',
+ 'export PYTHONPATH=/path:$PYTHONPATH',
+ 'chmod +x /path/ctx',
+ 'chmod +x {0}'.format(paths.remote_script_path),
+ 'export CTX_SOCKET_URL={0}'.format(remote_socket_url),
+ 'export LOCAL_CTX_SOCKET_URL={0}'.format(local_socket_url),
+ 'export one=\'1\''
+ ])
+ assert env_script_lines == expected_env_script_lines
diff --git a/azure/aria/aria-extension-cloudify/src/aria/tests/orchestrator/test_workflow_runner.py b/azure/aria/aria-extension-cloudify/src/aria/tests/orchestrator/test_workflow_runner.py
new file mode 100644
index 0000000..011c4cc
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/tests/orchestrator/test_workflow_runner.py
@@ -0,0 +1,726 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import json
+import time
+from threading import Thread, Event
+from datetime import datetime
+
+import mock
+import pytest
+
+from aria.modeling import exceptions as modeling_exceptions
+from aria.modeling import models
+from aria.orchestrator import exceptions
+from aria.orchestrator import events
+from aria.orchestrator.workflow_runner import WorkflowRunner
+from aria.orchestrator.workflows.executor.process import ProcessExecutor
+from aria.orchestrator.workflows import api
+from aria.orchestrator.workflows.core import engine, graph_compiler
+from aria.orchestrator.workflows.executor import thread
+from aria.orchestrator import (
+ workflow,
+ operation,
+)
+
+from tests import (
+ mock as tests_mock,
+ storage
+)
+
+from ..fixtures import ( # pylint: disable=unused-import
+ plugins_dir,
+ plugin_manager,
+ fs_model as model,
+ resource_storage as resource
+)
+
+custom_events = {
+ 'is_resumed': Event(),
+ 'is_active': Event(),
+ 'execution_cancelled': Event(),
+ 'execution_failed': Event(),
+}
+
+
+class TimeoutError(BaseException):
+ pass
+
+
+class FailingTask(BaseException):
+ pass
+
+
+def test_undeclared_workflow(request):
+ # validating a proper error is raised when the workflow is not declared in the service
+ with pytest.raises(exceptions.UndeclaredWorkflowError):
+ _create_workflow_runner(request, 'undeclared_workflow')
+
+
+def test_missing_workflow_implementation(service, request):
+ # validating a proper error is raised when the workflow code path does not exist
+ workflow = models.Operation(
+ name='test_workflow',
+ service=service,
+ function='nonexistent.workflow.implementation')
+ service.workflows['test_workflow'] = workflow
+
+ with pytest.raises(exceptions.WorkflowImplementationNotFoundError):
+ _create_workflow_runner(request, 'test_workflow')
+
+
+def test_builtin_workflow_instantiation(request):
+ # validates the workflow runner instantiates properly when provided with a builtin workflow
+ # (expecting no errors to be raised on undeclared workflow or missing workflow implementation)
+ workflow_runner = _create_workflow_runner(request, 'install')
+ tasks = list(workflow_runner.execution.tasks)
+ assert len(tasks) == 18 # expecting 18 tasks for 2 node topology
+
+
+def test_custom_workflow_instantiation(request):
+ # validates the workflow runner instantiates properly when provided with a custom workflow
+ # (expecting no errors to be raised on undeclared workflow or missing workflow implementation)
+ mock_workflow = _setup_mock_workflow_in_service(request)
+ workflow_runner = _create_workflow_runner(request, mock_workflow)
+ tasks = list(workflow_runner.execution.tasks)
+ assert len(tasks) == 2 # mock workflow creates only start workflow and end workflow task
+
+
+def test_existing_active_executions(request, service, model):
+ existing_active_execution = models.Execution(
+ service=service,
+ status=models.Execution.STARTED,
+ workflow_name='uninstall')
+ model.execution.put(existing_active_execution)
+ with pytest.raises(exceptions.ActiveExecutionsError):
+ _create_workflow_runner(request, 'install')
+
+
+def test_existing_executions_but_no_active_ones(request, service, model):
+ existing_terminated_execution = models.Execution(
+ service=service,
+ status=models.Execution.SUCCEEDED,
+ workflow_name='uninstall')
+ model.execution.put(existing_terminated_execution)
+ # no active executions exist, so no error should be raised
+ _create_workflow_runner(request, 'install')
+
+
+def test_default_executor(request):
+ # validates the ProcessExecutor is used by the workflow runner by default
+ mock_workflow = _setup_mock_workflow_in_service(request)
+
+ with mock.patch('aria.orchestrator.workflow_runner.engine.Engine') as mock_engine_cls:
+ _create_workflow_runner(request, mock_workflow)
+ _, engine_kwargs = mock_engine_cls.call_args
+ assert isinstance(engine_kwargs.get('executors').values()[0], ProcessExecutor)
+
+
+def test_custom_executor(request):
+ mock_workflow = _setup_mock_workflow_in_service(request)
+
+ custom_executor = mock.MagicMock()
+ with mock.patch('aria.orchestrator.workflow_runner.engine.Engine') as mock_engine_cls:
+ _create_workflow_runner(request, mock_workflow, executor=custom_executor)
+ _, engine_kwargs = mock_engine_cls.call_args
+ assert engine_kwargs.get('executors').values()[0] == custom_executor
+
+
+def test_task_configuration_parameters(request):
+ mock_workflow = _setup_mock_workflow_in_service(request)
+
+ task_max_attempts = 5
+ task_retry_interval = 7
+ with mock.patch('aria.orchestrator.workflow_runner.engine.Engine.execute') as \
+ mock_engine_execute:
+ _create_workflow_runner(request, mock_workflow, task_max_attempts=task_max_attempts,
+ task_retry_interval=task_retry_interval).execute()
+ _, engine_kwargs = mock_engine_execute.call_args
+ assert engine_kwargs['ctx']._task_max_attempts == task_max_attempts
+ assert engine_kwargs['ctx']._task_retry_interval == task_retry_interval
+
+
+def test_execute(request, service):
+ mock_workflow = _setup_mock_workflow_in_service(request)
+
+ mock_engine = mock.MagicMock()
+ with mock.patch('aria.orchestrator.workflow_runner.engine.Engine.execute',
+ return_value=mock_engine) as mock_engine_execute:
+ workflow_runner = _create_workflow_runner(request, mock_workflow)
+ workflow_runner.execute()
+
+ _, engine_kwargs = mock_engine_execute.call_args
+ assert engine_kwargs['ctx'].service.id == service.id
+ assert engine_kwargs['ctx'].execution.workflow_name == 'test_workflow'
+
+ mock_engine_execute.assert_called_once_with(ctx=workflow_runner._workflow_context,
+ resuming=False,
+ retry_failed=False)
+
+
+def test_cancel_execution(request):
+ mock_workflow = _setup_mock_workflow_in_service(request)
+
+ mock_engine = mock.MagicMock()
+ with mock.patch('aria.orchestrator.workflow_runner.engine.Engine', return_value=mock_engine):
+ workflow_runner = _create_workflow_runner(request, mock_workflow)
+ workflow_runner.cancel()
+ mock_engine.cancel_execution.assert_called_once_with(ctx=workflow_runner._workflow_context)
+
+
+def test_execution_model_creation(request, service, model):
+ mock_workflow = _setup_mock_workflow_in_service(request)
+
+ with mock.patch('aria.orchestrator.workflow_runner.engine.Engine'):
+ workflow_runner = _create_workflow_runner(request, mock_workflow)
+
+ assert model.execution.get(workflow_runner.execution.id) == workflow_runner.execution
+ assert workflow_runner.execution.service.id == service.id
+ assert workflow_runner.execution.workflow_name == mock_workflow
+ assert workflow_runner.execution.created_at <= datetime.utcnow()
+ assert workflow_runner.execution.inputs == dict()
+
+
+def test_execution_inputs_override_workflow_inputs(request):
+ wf_inputs = {'input1': 'value1', 'input2': 'value2', 'input3': 5}
+ mock_workflow = _setup_mock_workflow_in_service(
+ request,
+ inputs=dict((name, models.Input.wrap(name, val)) for name, val
+ in wf_inputs.iteritems()))
+
+ with mock.patch('aria.orchestrator.workflow_runner.engine.Engine'):
+ workflow_runner = _create_workflow_runner(
+ request, mock_workflow, inputs={'input2': 'overriding-value2', 'input3': 7})
+
+ assert len(workflow_runner.execution.inputs) == 3
+ # did not override input1 - expecting the default value from the workflow inputs
+ assert workflow_runner.execution.inputs['input1'].value == 'value1'
+ # overrode input2
+ assert workflow_runner.execution.inputs['input2'].value == 'overriding-value2'
+ # overrode input of integer type
+ assert workflow_runner.execution.inputs['input3'].value == 7
+
+
+def test_execution_inputs_undeclared_inputs(request):
+ mock_workflow = _setup_mock_workflow_in_service(request)
+
+ with pytest.raises(modeling_exceptions.UndeclaredInputsException):
+ _create_workflow_runner(request, mock_workflow, inputs={'undeclared_input': 'value'})
+
+
+def test_execution_inputs_missing_required_inputs(request):
+ mock_workflow = _setup_mock_workflow_in_service(
+ request, inputs={'required_input': models.Input.wrap('required_input', value=None)})
+
+ with pytest.raises(modeling_exceptions.MissingRequiredInputsException):
+ _create_workflow_runner(request, mock_workflow, inputs={})
+
+
+def test_execution_inputs_wrong_type_inputs(request):
+ mock_workflow = _setup_mock_workflow_in_service(
+ request, inputs={'input': models.Input.wrap('input', 'value')})
+
+ with pytest.raises(modeling_exceptions.ParametersOfWrongTypeException):
+ _create_workflow_runner(request, mock_workflow, inputs={'input': 5})
+
+
+def test_execution_inputs_builtin_workflow_with_inputs(request):
+ # built-in workflows don't have inputs
+ with pytest.raises(modeling_exceptions.UndeclaredInputsException):
+ _create_workflow_runner(request, 'install', inputs={'undeclared_input': 'value'})
+
+
+def test_workflow_function_parameters(request, tmpdir):
+ # validating the workflow function is passed with the
+ # merged execution inputs, in dict form
+
+ # the workflow function parameters will be written to this file
+ output_path = str(tmpdir.join('output'))
+ wf_inputs = {'output_path': output_path, 'input1': 'value1', 'input2': 'value2', 'input3': 5}
+
+ mock_workflow = _setup_mock_workflow_in_service(
+ request, inputs=dict((name, models.Input.wrap(name, val)) for name, val
+ in wf_inputs.iteritems()))
+
+ _create_workflow_runner(request, mock_workflow,
+ inputs={'input2': 'overriding-value2', 'input3': 7})
+
+ with open(output_path) as f:
+ wf_call_kwargs = json.load(f)
+ assert len(wf_call_kwargs) == 3
+ assert wf_call_kwargs.get('input1') == 'value1'
+ assert wf_call_kwargs.get('input2') == 'overriding-value2'
+ assert wf_call_kwargs.get('input3') == 7
+
+
+@pytest.fixture
+def service(model):
+ # sets up a service in the storage
+ service_id = tests_mock.topology.create_simple_topology_two_nodes(model)
+ service = model.service.get(service_id)
+ return service
+
+
+def _setup_mock_workflow_in_service(request, inputs=None):
+ # sets up a mock workflow as part of the service, including uploading
+ # the workflow code to the service's dir on the resource storage
+ service = request.getfixturevalue('service')
+ resource = request.getfixturevalue('resource')
+
+ source = tests_mock.workflow.__file__
+ resource.service_template.upload(str(service.service_template.id), source)
+ mock_workflow_name = 'test_workflow'
+ arguments = {}
+ if inputs:
+ for input in inputs.itervalues():
+ arguments[input.name] = input.as_argument()
+ workflow = models.Operation(
+ name=mock_workflow_name,
+ service=service,
+ function='workflow.mock_workflow',
+ inputs=inputs or {},
+ arguments=arguments)
+ service.workflows[mock_workflow_name] = workflow
+ return mock_workflow_name
+
+
+def _create_workflow_runner(request, workflow_name, inputs=None, executor=None,
+ task_max_attempts=None, task_retry_interval=None):
+ # helper method for instantiating a workflow runner
+ service_id = request.getfixturevalue('service').id
+ model = request.getfixturevalue('model')
+ resource = request.getfixturevalue('resource')
+ plugin_manager = request.getfixturevalue('plugin_manager')
+
+ # task configuration parameters can't be set to None, therefore only
+ # passing those if they've been set by the test
+ task_configuration_kwargs = dict()
+ if task_max_attempts is not None:
+ task_configuration_kwargs['task_max_attempts'] = task_max_attempts
+ if task_retry_interval is not None:
+ task_configuration_kwargs['task_retry_interval'] = task_retry_interval
+
+ return WorkflowRunner(
+ workflow_name=workflow_name,
+ service_id=service_id,
+ inputs=inputs or {},
+ executor=executor,
+ model_storage=model,
+ resource_storage=resource,
+ plugin_manager=plugin_manager,
+ **task_configuration_kwargs)
+
+
+class TestResumableWorkflows(object):
+
+ def _create_initial_workflow_runner(
+ self, workflow_context, workflow, executor, inputs=None):
+
+ service = workflow_context.service
+ service.workflows['custom_workflow'] = tests_mock.models.create_operation(
+ 'custom_workflow',
+ operation_kwargs={
+ 'function': '{0}.{1}'.format(__name__, workflow.__name__),
+ 'inputs': dict((k, models.Input.wrap(k, v)) for k, v in (inputs or {}).items())
+ }
+ )
+ workflow_context.model.service.update(service)
+
+ wf_runner = WorkflowRunner(
+ service_id=workflow_context.service.id,
+ inputs=inputs or {},
+ model_storage=workflow_context.model,
+ resource_storage=workflow_context.resource,
+ plugin_manager=None,
+ workflow_name='custom_workflow',
+ executor=executor)
+ return wf_runner
+
+ @staticmethod
+ def _wait_for_active_and_cancel(workflow_runner):
+ if custom_events['is_active'].wait(60) is False:
+ raise TimeoutError("is_active wasn't set to True")
+ workflow_runner.cancel()
+ if custom_events['execution_cancelled'].wait(60) is False:
+ raise TimeoutError("Execution did not end")
+
+ def test_resume_workflow(self, workflow_context, thread_executor):
+ node = workflow_context.model.node.get_by_name(tests_mock.models.DEPENDENCY_NODE_NAME)
+ node.attributes['invocations'] = models.Attribute.wrap('invocations', 0)
+ self._create_interface(workflow_context, node, mock_pass_first_task_only)
+
+ wf_runner = self._create_initial_workflow_runner(
+ workflow_context, mock_parallel_tasks_workflow, thread_executor,
+ inputs={'number_of_tasks': 2})
+
+ wf_thread = Thread(target=wf_runner.execute)
+ wf_thread.daemon = True
+ wf_thread.start()
+
+ # Wait for the execution to start
+ self._wait_for_active_and_cancel(wf_runner)
+ node = workflow_context.model.node.refresh(node)
+
+ tasks = workflow_context.model.task.list(filters={'_stub_type': None})
+ assert any(task.status == task.SUCCESS for task in tasks)
+ assert any(task.status == task.RETRYING for task in tasks)
+ custom_events['is_resumed'].set()
+ assert any(task.status == task.RETRYING for task in tasks)
+
+ # Create a new workflow runner, with an existing execution id. This would cause
+ # the old execution to restart.
+ new_wf_runner = WorkflowRunner(
+ service_id=wf_runner.service.id,
+ inputs={},
+ model_storage=workflow_context.model,
+ resource_storage=workflow_context.resource,
+ plugin_manager=None,
+ execution_id=wf_runner.execution.id,
+ executor=thread_executor)
+
+ new_wf_runner.execute()
+
+ # Wait for it to finish and assert changes.
+ node = workflow_context.model.node.refresh(node)
+ assert all(task.status == task.SUCCESS for task in tasks)
+ assert node.attributes['invocations'].value == 3
+ assert wf_runner.execution.status == wf_runner.execution.SUCCEEDED
+
+ def test_resume_started_task(self, workflow_context, thread_executor):
+ node = workflow_context.model.node.get_by_name(tests_mock.models.DEPENDENCY_NODE_NAME)
+ node.attributes['invocations'] = models.Attribute.wrap('invocations', 0)
+ self._create_interface(workflow_context, node, mock_stuck_task)
+
+ wf_runner = self._create_initial_workflow_runner(
+ workflow_context, mock_parallel_tasks_workflow, thread_executor,
+ inputs={'number_of_tasks': 1})
+
+ wf_thread = Thread(target=wf_runner.execute)
+ wf_thread.daemon = True
+ wf_thread.start()
+
+ self._wait_for_active_and_cancel(wf_runner)
+ node = workflow_context.model.node.refresh(node)
+ task = workflow_context.model.task.list(filters={'_stub_type': None})[0]
+ assert node.attributes['invocations'].value == 1
+ assert task.status == task.STARTED
+ assert wf_runner.execution.status in (wf_runner.execution.CANCELLED,
+ wf_runner.execution.CANCELLING)
+ custom_events['is_resumed'].set()
+
+ new_thread_executor = thread.ThreadExecutor()
+ try:
+ new_wf_runner = WorkflowRunner(
+ service_id=wf_runner.service.id,
+ inputs={},
+ model_storage=workflow_context.model,
+ resource_storage=workflow_context.resource,
+ plugin_manager=None,
+ execution_id=wf_runner.execution.id,
+ executor=new_thread_executor)
+
+ new_wf_runner.execute()
+ finally:
+ new_thread_executor.close()
+
+ # Wait for it to finish and assert changes.
+ node = workflow_context.model.node.refresh(node)
+ assert node.attributes['invocations'].value == 2
+ assert task.status == task.SUCCESS
+ assert wf_runner.execution.status == wf_runner.execution.SUCCEEDED
+
+ def test_resume_failed_task(self, workflow_context, thread_executor):
+ node = workflow_context.model.node.get_by_name(tests_mock.models.DEPENDENCY_NODE_NAME)
+ node.attributes['invocations'] = models.Attribute.wrap('invocations', 0)
+ self._create_interface(workflow_context, node, mock_failed_before_resuming)
+
+ wf_runner = self._create_initial_workflow_runner(workflow_context,
+ mock_parallel_tasks_workflow,
+ thread_executor)
+ wf_thread = Thread(target=wf_runner.execute)
+ wf_thread.setDaemon(True)
+ wf_thread.start()
+
+ self._wait_for_active_and_cancel(wf_runner)
+ node = workflow_context.model.node.refresh(node)
+
+ task = workflow_context.model.task.list(filters={'_stub_type': None})[0]
+ assert node.attributes['invocations'].value == 2
+ assert task.status == task.STARTED
+ assert wf_runner.execution.status in (wf_runner.execution.CANCELLED,
+ wf_runner.execution.CANCELLING)
+
+ custom_events['is_resumed'].set()
+ assert node.attributes['invocations'].value == 2
+
+ # Create a new workflow runner, with an existing execution id. This would cause
+ # the old execution to restart.
+ new_thread_executor = thread.ThreadExecutor()
+ try:
+ new_wf_runner = WorkflowRunner(
+ service_id=wf_runner.service.id,
+ inputs={},
+ model_storage=workflow_context.model,
+ resource_storage=workflow_context.resource,
+ plugin_manager=None,
+ execution_id=wf_runner.execution.id,
+ executor=new_thread_executor)
+
+ new_wf_runner.execute()
+ finally:
+ new_thread_executor.close()
+
+ # Wait for it to finish and assert changes.
+ node = workflow_context.model.node.refresh(node)
+ assert node.attributes['invocations'].value == task.max_attempts - 1
+ assert task.status == task.SUCCESS
+ assert wf_runner.execution.status == wf_runner.execution.SUCCEEDED
+
+ def test_resume_failed_task_and_successful_task(self, workflow_context, thread_executor):
+ node = workflow_context.model.node.get_by_name(tests_mock.models.DEPENDENCY_NODE_NAME)
+ node.attributes['invocations'] = models.Attribute.wrap('invocations', 0)
+ self._create_interface(workflow_context, node, mock_pass_first_task_only)
+
+ wf_runner = self._create_initial_workflow_runner(
+ workflow_context,
+ mock_parallel_tasks_workflow,
+ thread_executor,
+ inputs={'retry_interval': 1, 'max_attempts': 2, 'number_of_tasks': 2}
+ )
+ wf_thread = Thread(target=wf_runner.execute)
+ wf_thread.setDaemon(True)
+ wf_thread.start()
+
+ if custom_events['execution_failed'].wait(60) is False:
+ raise TimeoutError("Execution did not end")
+
+ tasks = workflow_context.model.task.list(filters={'_stub_type': None})
+ node = workflow_context.model.node.refresh(node)
+ assert node.attributes['invocations'].value == 3
+ failed_task = [t for t in tasks if t.status == t.FAILED][0]
+
+ # First task passes
+ assert any(task.status == task.FAILED for task in tasks)
+ assert failed_task.attempts_count == 2
+ # Second task fails
+ assert any(task.status == task.SUCCESS for task in tasks)
+ assert wf_runner.execution.status in wf_runner.execution.FAILED
+
+ custom_events['is_resumed'].set()
+ new_thread_executor = thread.ThreadExecutor()
+ try:
+ new_wf_runner = WorkflowRunner(
+ service_id=wf_runner.service.id,
+ retry_failed_tasks=True,
+ inputs={},
+ model_storage=workflow_context.model,
+ resource_storage=workflow_context.resource,
+ plugin_manager=None,
+ execution_id=wf_runner.execution.id,
+ executor=new_thread_executor)
+
+ new_wf_runner.execute()
+ finally:
+ new_thread_executor.close()
+
+ # Wait for it to finish and assert changes.
+ node = workflow_context.model.node.refresh(node)
+ assert failed_task.attempts_count == 1
+ assert node.attributes['invocations'].value == 4
+ assert all(task.status == task.SUCCESS for task in tasks)
+ assert wf_runner.execution.status == wf_runner.execution.SUCCEEDED
+
+ def test_two_sequential_task_first_task_failed(self, workflow_context, thread_executor):
+ node = workflow_context.model.node.get_by_name(tests_mock.models.DEPENDENCY_NODE_NAME)
+ node.attributes['invocations'] = models.Attribute.wrap('invocations', 0)
+ self._create_interface(workflow_context, node, mock_fail_first_task_only)
+
+ wf_runner = self._create_initial_workflow_runner(
+ workflow_context,
+ mock_sequential_tasks_workflow,
+ thread_executor,
+ inputs={'retry_interval': 1, 'max_attempts': 1, 'number_of_tasks': 2}
+ )
+ wf_thread = Thread(target=wf_runner.execute)
+ wf_thread.setDaemon(True)
+ wf_thread.start()
+
+ if custom_events['execution_failed'].wait(60) is False:
+ raise TimeoutError("Execution did not end")
+
+ tasks = workflow_context.model.task.list(filters={'_stub_type': None})
+ node = workflow_context.model.node.refresh(node)
+ assert node.attributes['invocations'].value == 1
+ assert any(t.status == t.FAILED for t in tasks)
+ assert any(t.status == t.PENDING for t in tasks)
+
+ custom_events['is_resumed'].set()
+ new_thread_executor = thread.ThreadExecutor()
+ try:
+ new_wf_runner = WorkflowRunner(
+ service_id=wf_runner.service.id,
+ inputs={},
+ model_storage=workflow_context.model,
+ resource_storage=workflow_context.resource,
+ plugin_manager=None,
+ execution_id=wf_runner.execution.id,
+ executor=new_thread_executor)
+
+ new_wf_runner.execute()
+ finally:
+ new_thread_executor.close()
+
+ # Wait for it to finish and assert changes.
+ node = workflow_context.model.node.refresh(node)
+ assert node.attributes['invocations'].value == 2
+ assert any(t.status == t.SUCCESS for t in tasks)
+ assert any(t.status == t.FAILED for t in tasks)
+ assert wf_runner.execution.status == wf_runner.execution.SUCCEEDED
+
+
+
+ @staticmethod
+ @pytest.fixture
+ def thread_executor():
+ result = thread.ThreadExecutor()
+ try:
+ yield result
+ finally:
+ result.close()
+
+ @staticmethod
+ @pytest.fixture
+ def workflow_context(tmpdir):
+ workflow_context = tests_mock.context.simple(str(tmpdir))
+ yield workflow_context
+ storage.release_sqlite_storage(workflow_context.model)
+
+ @staticmethod
+ def _create_interface(ctx, node, func, arguments=None):
+ interface_name = 'aria.interfaces.lifecycle'
+ operation_kwargs = dict(function='{name}.{func.__name__}'.format(
+ name=__name__, func=func))
+ if arguments:
+ # the operation has to declare the arguments before those may be passed
+ operation_kwargs['arguments'] = arguments
+ operation_name = 'create'
+ interface = tests_mock.models.create_interface(node.service, interface_name, operation_name,
+ operation_kwargs=operation_kwargs)
+ node.interfaces[interface.name] = interface
+ ctx.model.node.update(node)
+
+ return node, interface_name, operation_name
+
+ @staticmethod
+ def _engine(workflow_func, workflow_context, executor):
+ graph = workflow_func(ctx=workflow_context)
+ execution = workflow_context.execution
+ graph_compiler.GraphCompiler(workflow_context, executor.__class__).compile(graph)
+ workflow_context.execution = execution
+
+ return engine.Engine(executors={executor.__class__: executor})
+
+ @pytest.fixture(autouse=True)
+ def register_to_events(self):
+ def execution_cancelled(*args, **kwargs):
+ custom_events['execution_cancelled'].set()
+
+ def execution_failed(*args, **kwargs):
+ custom_events['execution_failed'].set()
+
+ events.on_cancelled_workflow_signal.connect(execution_cancelled)
+ events.on_failure_workflow_signal.connect(execution_failed)
+ yield
+ events.on_cancelled_workflow_signal.disconnect(execution_cancelled)
+ events.on_failure_workflow_signal.disconnect(execution_failed)
+ for event in custom_events.values():
+ event.clear()
+
+
+@workflow
+def mock_sequential_tasks_workflow(ctx, graph,
+ retry_interval=1, max_attempts=10, number_of_tasks=1):
+ node = ctx.model.node.get_by_name(tests_mock.models.DEPENDENCY_NODE_NAME)
+ graph.sequence(*_create_tasks(node, retry_interval, max_attempts, number_of_tasks))
+
+
+@workflow
+def mock_parallel_tasks_workflow(ctx, graph,
+ retry_interval=1, max_attempts=10, number_of_tasks=1):
+ node = ctx.model.node.get_by_name(tests_mock.models.DEPENDENCY_NODE_NAME)
+ graph.add_tasks(*_create_tasks(node, retry_interval, max_attempts, number_of_tasks))
+
+
+def _create_tasks(node, retry_interval, max_attempts, number_of_tasks):
+ return [
+ api.task.OperationTask(node,
+ 'aria.interfaces.lifecycle',
+ 'create',
+ retry_interval=retry_interval,
+ max_attempts=max_attempts)
+ for _ in xrange(number_of_tasks)
+ ]
+
+
+
+@operation
+def mock_failed_before_resuming(ctx):
+ """
+ The task should run atmost ctx.task.max_attempts - 1 times, and only then pass.
+ overall, the number of invocations should be ctx.task.max_attempts - 1
+ """
+ ctx.node.attributes['invocations'] += 1
+
+ if ctx.node.attributes['invocations'] == 2:
+ custom_events['is_active'].set()
+ # unfreeze the thread only when all of the invocations are done
+ while ctx.node.attributes['invocations'] < ctx.task.max_attempts - 1:
+ time.sleep(5)
+
+ elif ctx.node.attributes['invocations'] == ctx.task.max_attempts - 1:
+ # pass only just before the end.
+ return
+ else:
+ # fail o.w.
+ raise FailingTask("stop this task")
+
+
+@operation
+def mock_stuck_task(ctx):
+ ctx.node.attributes['invocations'] += 1
+ while not custom_events['is_resumed'].isSet():
+ if not custom_events['is_active'].isSet():
+ custom_events['is_active'].set()
+ time.sleep(5)
+
+
+@operation
+def mock_pass_first_task_only(ctx):
+ ctx.node.attributes['invocations'] += 1
+
+ if ctx.node.attributes['invocations'] != 1:
+ custom_events['is_active'].set()
+ if not custom_events['is_resumed'].isSet():
+ # if resume was called, increase by one. o/w fail the execution - second task should
+ # fail as long it was not a part of resuming the workflow
+ raise FailingTask("wasn't resumed yet")
+
+
+@operation
+def mock_fail_first_task_only(ctx):
+ ctx.node.attributes['invocations'] += 1
+
+ if not custom_events['is_resumed'].isSet() and ctx.node.attributes['invocations'] == 1:
+ raise FailingTask("First task should fail")
diff --git a/azure/aria/aria-extension-cloudify/src/aria/tests/orchestrator/workflows/__init__.py b/azure/aria/aria-extension-cloudify/src/aria/tests/orchestrator/workflows/__init__.py
new file mode 100644
index 0000000..7f0fd56
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/tests/orchestrator/workflows/__init__.py
@@ -0,0 +1,16 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from . import api, core
diff --git a/azure/aria/aria-extension-cloudify/src/aria/tests/orchestrator/workflows/api/__init__.py b/azure/aria/aria-extension-cloudify/src/aria/tests/orchestrator/workflows/api/__init__.py
new file mode 100644
index 0000000..ae1e83e
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/tests/orchestrator/workflows/api/__init__.py
@@ -0,0 +1,14 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/azure/aria/aria-extension-cloudify/src/aria/tests/orchestrator/workflows/api/test_task.py b/azure/aria/aria-extension-cloudify/src/aria/tests/orchestrator/workflows/api/test_task.py
new file mode 100644
index 0000000..9d91b6b
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/tests/orchestrator/workflows/api/test_task.py
@@ -0,0 +1,223 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+import pytest
+
+from aria.orchestrator import context
+from aria.orchestrator.workflows import api
+
+from tests import mock, storage
+
+
+@pytest.fixture
+def ctx(tmpdir):
+ """
+ Create the following graph in storage:
+ dependency_node <------ dependent_node
+ :return:
+ """
+ simple_context = mock.context.simple(str(tmpdir), inmemory=False)
+ simple_context.model.execution.put(mock.models.create_execution(simple_context.service))
+ yield simple_context
+ storage.release_sqlite_storage(simple_context.model)
+
+
+class TestOperationTask(object):
+
+ def test_node_operation_task_creation(self, ctx):
+ interface_name = 'test_interface'
+ operation_name = 'create'
+
+ plugin = mock.models.create_plugin('test_plugin', '0.1')
+ ctx.model.node.update(plugin)
+
+ arguments = {'test_input': True}
+
+ interface = mock.models.create_interface(
+ ctx.service,
+ interface_name,
+ operation_name,
+ operation_kwargs=dict(plugin=plugin,
+ function='op_path',
+ arguments=arguments),)
+
+ node = ctx.model.node.get_by_name(mock.models.DEPENDENT_NODE_NAME)
+ node.interfaces[interface_name] = interface
+ ctx.model.node.update(node)
+ max_attempts = 10
+ retry_interval = 10
+ ignore_failure = True
+
+ with context.workflow.current.push(ctx):
+ api_task = api.task.OperationTask(
+ node,
+ interface_name=interface_name,
+ operation_name=operation_name,
+ arguments=arguments,
+ max_attempts=max_attempts,
+ retry_interval=retry_interval,
+ ignore_failure=ignore_failure)
+
+ assert api_task.name == api.task.OperationTask.NAME_FORMAT.format(
+ type='node',
+ name=node.name,
+ interface=interface_name,
+ operation=operation_name
+ )
+ assert api_task.function == 'op_path'
+ assert api_task.actor == node
+ assert api_task.arguments['test_input'].value is True
+ assert api_task.retry_interval == retry_interval
+ assert api_task.max_attempts == max_attempts
+ assert api_task.ignore_failure == ignore_failure
+ assert api_task.plugin.name == 'test_plugin'
+
+ def test_source_relationship_operation_task_creation(self, ctx):
+ interface_name = 'test_interface'
+ operation_name = 'preconfigure'
+
+ plugin = mock.models.create_plugin('test_plugin', '0.1')
+ ctx.model.plugin.update(plugin)
+
+ arguments = {'test_input': True}
+
+ interface = mock.models.create_interface(
+ ctx.service,
+ interface_name,
+ operation_name,
+ operation_kwargs=dict(plugin=plugin,
+ function='op_path',
+ arguments=arguments)
+ )
+
+ relationship = ctx.model.relationship.list()[0]
+ relationship.interfaces[interface.name] = interface
+ max_attempts = 10
+ retry_interval = 10
+
+ with context.workflow.current.push(ctx):
+ api_task = api.task.OperationTask(
+ relationship,
+ interface_name=interface_name,
+ operation_name=operation_name,
+ arguments=arguments,
+ max_attempts=max_attempts,
+ retry_interval=retry_interval)
+
+ assert api_task.name == api.task.OperationTask.NAME_FORMAT.format(
+ type='relationship',
+ name=relationship.name,
+ interface=interface_name,
+ operation=operation_name
+ )
+ assert api_task.function == 'op_path'
+ assert api_task.actor == relationship
+ assert api_task.arguments['test_input'].value is True
+ assert api_task.retry_interval == retry_interval
+ assert api_task.max_attempts == max_attempts
+ assert api_task.plugin.name == 'test_plugin'
+
+ def test_target_relationship_operation_task_creation(self, ctx):
+ interface_name = 'test_interface'
+ operation_name = 'preconfigure'
+
+ plugin = mock.models.create_plugin('test_plugin', '0.1')
+ ctx.model.node.update(plugin)
+
+ arguments = {'test_input': True}
+
+ interface = mock.models.create_interface(
+ ctx.service,
+ interface_name,
+ operation_name,
+ operation_kwargs=dict(plugin=plugin,
+ function='op_path',
+ arguments=arguments)
+ )
+
+ relationship = ctx.model.relationship.list()[0]
+ relationship.interfaces[interface.name] = interface
+ max_attempts = 10
+ retry_interval = 10
+
+ with context.workflow.current.push(ctx):
+ api_task = api.task.OperationTask(
+ relationship,
+ interface_name=interface_name,
+ operation_name=operation_name,
+ arguments=arguments,
+ max_attempts=max_attempts,
+ retry_interval=retry_interval)
+
+ assert api_task.name == api.task.OperationTask.NAME_FORMAT.format(
+ type='relationship',
+ name=relationship.name,
+ interface=interface_name,
+ operation=operation_name
+ )
+ assert api_task.function == 'op_path'
+ assert api_task.actor == relationship
+ assert api_task.arguments['test_input'].value is True
+ assert api_task.retry_interval == retry_interval
+ assert api_task.max_attempts == max_attempts
+ assert api_task.plugin.name == 'test_plugin'
+
+ def test_operation_task_default_values(self, ctx):
+ interface_name = 'test_interface'
+ operation_name = 'create'
+
+ plugin = mock.models.create_plugin('package', '0.1')
+ ctx.model.node.update(plugin)
+
+ dependency_node = ctx.model.node.get_by_name(mock.models.DEPENDENCY_NODE_NAME)
+
+ interface = mock.models.create_interface(
+ ctx.service,
+ interface_name,
+ operation_name,
+ operation_kwargs=dict(plugin=plugin,
+ function='op_path'))
+ dependency_node.interfaces[interface_name] = interface
+
+ with context.workflow.current.push(ctx):
+ task = api.task.OperationTask(
+ dependency_node,
+ interface_name=interface_name,
+ operation_name=operation_name)
+
+ assert task.arguments == {}
+ assert task.retry_interval == ctx._task_retry_interval
+ assert task.max_attempts == ctx._task_max_attempts
+ assert task.ignore_failure == ctx._task_ignore_failure
+ assert task.plugin is plugin
+
+
+class TestWorkflowTask(object):
+
+ def test_workflow_task_creation(self, ctx):
+
+ workspace = {}
+
+ mock_class = type('mock_class', (object,), {'test_attribute': True})
+
+ def sub_workflow(**kwargs):
+ workspace.update(kwargs)
+ return mock_class
+
+ with context.workflow.current.push(ctx):
+ workflow_task = api.task.WorkflowTask(sub_workflow, kwarg='workflow_kwarg')
+ assert workflow_task.graph is mock_class
+ assert workflow_task.test_attribute is True
diff --git a/azure/aria/aria-extension-cloudify/src/aria/tests/orchestrator/workflows/api/test_task_graph.py b/azure/aria/aria-extension-cloudify/src/aria/tests/orchestrator/workflows/api/test_task_graph.py
new file mode 100644
index 0000000..a569386
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/tests/orchestrator/workflows/api/test_task_graph.py
@@ -0,0 +1,745 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import pytest
+
+from aria.orchestrator.workflows.api import task_graph, task
+
+
+class MockTask(task.BaseTask):
+ def __init__(self):
+ super(MockTask, self).__init__(ctx={})
+
+
+@pytest.fixture
+def graph():
+ return task_graph.TaskGraph(name='mock-graph')
+
+
+class TestTaskGraphTasks(object):
+
+ def test_add_task(self, graph):
+ task = MockTask()
+ add_result = graph.add_tasks(task)
+ assert add_result == [task]
+ tasks = [t for t in graph.tasks]
+ assert len(tasks) == 1
+ assert tasks[0] == task
+
+ def test_add_empty_group(self, graph):
+ result = graph.add_tasks([])
+ assert result == []
+
+ def test_add_group(self, graph):
+ tasks = [MockTask(), MockTask(), MockTask()]
+ added_tasks = graph.add_tasks(*tasks)
+ assert added_tasks == tasks
+
+ def test_add_partially_existing_group(self, graph):
+ task = MockTask()
+ graph.add_tasks(task)
+ tasks = [MockTask(), task, MockTask()]
+ added_tasks = graph.add_tasks(*tasks)
+ assert added_tasks == [tasks[0], tasks[2]]
+
+ def test_add_recursively_group(self, graph):
+ recursive_group = [MockTask(), MockTask()]
+ tasks = [MockTask(), recursive_group, MockTask()]
+ added_tasks = graph.add_tasks(tasks)
+ assert added_tasks == [tasks[0], recursive_group[0], recursive_group[1], tasks[2]]
+
+ def test_add_existing_task(self, graph):
+ task = MockTask()
+ graph.add_tasks(task)
+ # adding a task already in graph - should have no effect, and return False
+ add_result = graph.add_tasks(task)
+ assert add_result == []
+ tasks = [t for t in graph.tasks]
+ assert len(tasks) == 1
+ assert tasks[0] == task
+
+ def test_remove_task(self, graph):
+ task = MockTask()
+ other_task = MockTask()
+ graph.add_tasks(task)
+ graph.add_tasks(other_task)
+ graph.remove_tasks(other_task)
+ tasks = [t for t in graph.tasks]
+ assert len(tasks) == 1
+ assert tasks[0] == task
+
+ def test_remove_tasks_with_dependency(self, graph):
+ task = MockTask()
+ dependent_task = MockTask()
+ graph.add_tasks(task)
+ graph.add_tasks(dependent_task)
+ graph.add_dependency(dependent_task, task)
+ remove_result = graph.remove_tasks(dependent_task)
+ assert remove_result == [dependent_task]
+ tasks = [t for t in graph.tasks]
+ assert len(tasks) == 1
+ assert tasks[0] == task
+ # asserting no dependencies are left for the dependent task
+ assert len(list(graph.get_dependencies(task))) == 0
+
+ def test_remove_empty_group(self, graph):
+ result = graph.remove_tasks([])
+ assert result == []
+
+ def test_remove_group(self, graph):
+ tasks = [MockTask(), MockTask(), MockTask()]
+ graph.add_tasks(*tasks)
+ removed_tasks = graph.remove_tasks(*tasks)
+ assert removed_tasks == tasks
+
+ def test_remove_partially_existing_group(self, graph):
+ task = MockTask()
+ graph.add_tasks(task)
+ tasks = [MockTask(), task, MockTask()]
+ removed_tasks = graph.remove_tasks(*tasks)
+ assert removed_tasks == [task]
+
+ def test_remove_recursively_group(self, graph):
+ recursive_group = [MockTask(), MockTask()]
+ tasks = [MockTask(), recursive_group, MockTask()]
+ graph.add_tasks(tasks)
+ removed_tasks = graph.remove_tasks(tasks)
+ assert removed_tasks == [tasks[0], recursive_group[0], recursive_group[1], tasks[2]]
+
+ def test_remove_nonexistent_task(self, graph):
+ task = MockTask()
+ task_not_in_graph = MockTask()
+ graph.add_tasks(task)
+ # removing a task not in graph - should have no effect, and return False
+ remove_result = graph.remove_tasks(task_not_in_graph)
+ assert remove_result == []
+ tasks = [t for t in graph.tasks]
+ assert len(tasks) == 1
+ assert tasks[0] == task
+
+ def test_has_task(self, graph):
+ task = MockTask()
+ graph.add_tasks(task)
+ assert graph.has_tasks(task) is True
+
+ def test_has_nonexistent_task(self, graph):
+ task = MockTask()
+ task_not_in_graph = MockTask()
+ graph.add_tasks(task)
+ assert graph.has_tasks(task_not_in_graph) is False
+
+ def test_has_empty_group(self, graph):
+ # the "empty task" is in the graph
+ assert graph.has_tasks([]) is True
+
+ def test_has_group(self, graph):
+ tasks = [MockTask(), MockTask(), MockTask()]
+ graph.add_tasks(*tasks)
+ assert graph.has_tasks(*tasks) is True
+
+ def test_has_partially_existing_group(self, graph):
+ task = MockTask()
+ graph.add_tasks(task)
+ tasks = [MockTask(), task, MockTask()]
+ assert graph.has_tasks(tasks) is False
+
+ def test_has_recursively_group(self, graph):
+ recursive_group = [MockTask(), MockTask()]
+ tasks = [MockTask(), recursive_group, MockTask()]
+ graph.add_tasks(tasks)
+ assert graph.has_tasks(tasks) is True
+
+ def test_get_task(self, graph):
+ task = MockTask()
+ graph.add_tasks(task)
+ assert graph.get_task(task.id) == task
+
+ def test_get_nonexistent_task(self, graph):
+ task = MockTask()
+ task_not_in_graph = MockTask()
+ graph.add_tasks(task)
+ with pytest.raises(task_graph.TaskNotInGraphError):
+ graph.get_task(task_not_in_graph.id)
+
+
+class TestTaskGraphGraphTraversal(object):
+
+ def test_tasks_iteration(self, graph):
+ task = MockTask()
+ other_task = MockTask()
+ graph.add_tasks(task)
+ graph.add_tasks(other_task)
+ tasks = [t for t in graph.tasks]
+ assert set(tasks) == set([task, other_task])
+
+ def test_get_dependents(self, graph):
+ task = MockTask()
+ dependent_task_1 = MockTask()
+ dependent_task_2 = MockTask()
+ transitively_dependent_task = MockTask()
+
+ graph.add_tasks(task)
+ graph.add_tasks(dependent_task_1)
+ graph.add_tasks(dependent_task_2)
+ graph.add_tasks(transitively_dependent_task)
+
+ graph.add_dependency(dependent_task_1, task)
+ graph.add_dependency(dependent_task_2, task)
+ graph.add_dependency(transitively_dependent_task, dependent_task_2)
+
+ dependent_tasks = list(graph.get_dependents(task))
+ # transitively_dependent_task not expected to appear in the result
+ assert set(dependent_tasks) == set([dependent_task_1, dependent_task_2])
+
+ def test_get_task_empty_dependents(self, graph):
+ task = MockTask()
+ other_task = MockTask()
+ graph.add_tasks(task)
+ graph.add_tasks(other_task)
+ dependent_tasks = list(graph.get_dependents(task))
+ assert len(dependent_tasks) == 0
+
+ def test_get_nonexistent_task_dependents(self, graph):
+ task = MockTask()
+ task_not_in_graph = MockTask()
+ graph.add_tasks(task)
+ with pytest.raises(task_graph.TaskNotInGraphError):
+ list(graph.get_dependents(task_not_in_graph))
+
+ def test_get_dependencies(self, graph):
+ task = MockTask()
+ dependency_task_1 = MockTask()
+ dependency_task_2 = MockTask()
+ transitively_dependency_task = MockTask()
+
+ graph.add_tasks(task)
+ graph.add_tasks(dependency_task_1)
+ graph.add_tasks(dependency_task_2)
+ graph.add_tasks(transitively_dependency_task)
+
+ graph.add_dependency(task, dependency_task_1)
+ graph.add_dependency(task, dependency_task_2)
+ graph.add_dependency(dependency_task_2, transitively_dependency_task)
+
+ dependency_tasks = list(graph.get_dependencies(task))
+ # transitively_dependency_task not expected to appear in the result
+ assert set(dependency_tasks) == set([dependency_task_1, dependency_task_2])
+
+ def test_get_task_empty_dependencies(self, graph):
+ task = MockTask()
+ other_task = MockTask()
+ graph.add_tasks(task)
+ graph.add_tasks(other_task)
+ dependency_tasks = list(graph.get_dependencies(task))
+ assert len(dependency_tasks) == 0
+
+ def test_get_nonexistent_task_dependencies(self, graph):
+ task = MockTask()
+ task_not_in_graph = MockTask()
+ graph.add_tasks(task)
+ with pytest.raises(task_graph.TaskNotInGraphError):
+ list(graph.get_dependencies(task_not_in_graph))
+
+
+class TestTaskGraphDependencies(object):
+
+ def test_add_dependency(self, graph):
+ task = MockTask()
+ dependency_task = MockTask()
+ unrelated_task = MockTask()
+ graph.add_tasks(task)
+ graph.add_tasks(dependency_task)
+ graph.add_tasks(unrelated_task)
+ graph.add_dependency(task, dependency_task)
+ add_result = graph.has_dependency(task, dependency_task)
+ assert add_result is True
+ dependency_tasks = list(graph.get_dependencies(task))
+ assert len(dependency_tasks) == 1
+ assert dependency_tasks[0] == dependency_task
+
+ def test_add_existing_dependency(self, graph):
+ task = MockTask()
+ dependency_task = MockTask()
+ graph.add_tasks(task)
+ graph.add_tasks(dependency_task)
+ graph.add_dependency(task, dependency_task)
+ add_result = graph.has_dependency(task, dependency_task)
+ # adding a dependency already in graph - should have no effect, and return False
+ assert add_result is True
+ graph.add_dependency(task, dependency_task)
+ add_result = graph.has_dependency(task, dependency_task)
+ assert add_result is True
+ dependency_tasks = list(graph.get_dependencies(task))
+ assert len(dependency_tasks) == 1
+ assert dependency_tasks[0] == dependency_task
+
+ def test_add_dependency_nonexistent_dependent(self, graph):
+ task = MockTask()
+ task_not_in_graph = MockTask()
+ graph.add_tasks(task)
+ with pytest.raises(task_graph.TaskNotInGraphError):
+ graph.add_dependency(task_not_in_graph, task)
+
+ def test_add_dependency_nonexistent_dependency(self, graph):
+ task = MockTask()
+ task_not_in_graph = MockTask()
+ graph.add_tasks(task)
+ with pytest.raises(task_graph.TaskNotInGraphError):
+ graph.add_dependency(task, task_not_in_graph)
+
+ def test_add_dependency_empty_dependent(self, graph):
+ task = MockTask()
+ graph.add_tasks(task)
+ # expecting add_dependency result to be False - no dependency has been created
+ assert set(graph.tasks) == set((task,))
+
+ def test_add_dependency_empty_dependency(self, graph):
+ task = MockTask()
+ graph.add_tasks(task)
+ # expecting add_dependency result to be False - no dependency has been created
+ assert set(graph.tasks) == set((task,))
+
+ def test_add_dependency_dependent_group(self, graph):
+ task = MockTask()
+ group_tasks = [MockTask() for _ in xrange(3)]
+ graph.add_tasks(task)
+ graph.add_tasks(*group_tasks)
+ graph.add_dependency(group_tasks, task)
+ assert graph.has_dependency(group_tasks[0], task) is True
+ assert graph.has_dependency(group_tasks[1], task) is True
+ assert graph.has_dependency(group_tasks[2], task) is True
+
+ def test_add_dependency_dependency_group(self, graph):
+ task = MockTask()
+ group_tasks = [MockTask() for _ in xrange(3)]
+ graph.add_tasks(task)
+ graph.add_tasks(*group_tasks)
+ graph.add_dependency(task, group_tasks)
+ assert graph.has_dependency(task, group_tasks[0]) is True
+ assert graph.has_dependency(task, group_tasks[1]) is True
+ assert graph.has_dependency(task, group_tasks[2]) is True
+
+ def test_add_dependency_between_groups(self, graph):
+ group_1_tasks = [MockTask() for _ in xrange(3)]
+ group_2_tasks = [MockTask() for _ in xrange(3)]
+ graph.add_tasks(*group_1_tasks)
+ graph.add_tasks(*group_2_tasks)
+ graph.add_dependency(group_1_tasks, group_2_tasks)
+ for group_2_task in group_2_tasks:
+ assert graph.has_dependency(group_1_tasks[0], group_2_task) is True
+ assert graph.has_dependency(group_1_tasks[1], group_2_task) is True
+ assert graph.has_dependency(group_1_tasks[2], group_2_task) is True
+
+ def test_add_dependency_dependency_group_with_some_existing_dependencies(self, graph):
+ task = MockTask()
+ group_tasks = [MockTask() for _ in xrange(3)]
+ graph.add_tasks(task)
+ graph.add_tasks(*group_tasks)
+ # adding a dependency on a specific task manually,
+ # before adding a dependency on the whole parallel
+ graph.add_dependency(task, group_tasks[1])
+ graph.add_dependency(task, group_tasks)
+ assert graph.has_dependency(task, group_tasks[0]) is True
+ assert graph.has_dependency(task, group_tasks[1]) is True
+ assert graph.has_dependency(task, group_tasks[2]) is True
+
+ def test_add_existing_dependency_between_groups(self, graph):
+ group_1_tasks = [MockTask() for _ in xrange(3)]
+ group_2_tasks = [MockTask() for _ in xrange(3)]
+ graph.add_tasks(*group_1_tasks)
+ graph.add_tasks(*group_2_tasks)
+ graph.add_dependency(group_1_tasks, group_2_tasks)
+ add_result = graph.has_dependency(group_1_tasks, group_2_tasks)
+ assert add_result is True
+ # adding a dependency already in graph - should have no effect, and return False
+ graph.add_dependency(group_1_tasks, group_2_tasks)
+ add_result = graph.has_dependency(group_1_tasks, group_2_tasks)
+ assert add_result is True
+ for group_2_task in group_2_tasks:
+ assert graph.has_dependency(group_1_tasks[0], group_2_task) is True
+ assert graph.has_dependency(group_1_tasks[1], group_2_task) is True
+ assert graph.has_dependency(group_1_tasks[2], group_2_task) is True
+
+ def test_has_dependency(self, graph):
+ task = MockTask()
+ dependency_task = MockTask()
+ graph.add_tasks(task)
+ graph.add_tasks(dependency_task)
+ graph.add_dependency(task, dependency_task)
+ assert graph.has_dependency(task, dependency_task) is True
+
+ def test_has_nonexistent_dependency(self, graph):
+ task = MockTask()
+ other_task = MockTask()
+ graph.add_tasks(task)
+ graph.add_tasks(other_task)
+ assert graph.has_dependency(task, other_task) is False
+
+ def test_has_dependency_nonexistent_dependent(self, graph):
+ task = MockTask()
+ task_not_in_graph = MockTask()
+ graph.add_tasks(task)
+ with pytest.raises(task_graph.TaskNotInGraphError):
+ graph.has_dependency(task_not_in_graph, task)
+
+ def test_has_dependency_nonexistent_dependency(self, graph):
+ task = MockTask()
+ task_not_in_graph = MockTask()
+ graph.add_tasks(task)
+ with pytest.raises(task_graph.TaskNotInGraphError):
+ graph.has_dependency(task, task_not_in_graph)
+
+ def test_has_dependency_empty_dependent(self, graph):
+ task = MockTask()
+ graph.add_tasks(task)
+ # expecting has_dependency result to be False - dependency in an empty form
+ assert graph.has_dependency([], task) is False
+
+ def test_has_dependency_empty_dependency(self, graph):
+ task = MockTask()
+ graph.add_tasks(task)
+ # expecting has_dependency result to be True - dependency in an empty form
+ assert graph.has_dependency(task, []) is False
+
+ def test_has_dependency_dependent_group(self, graph):
+ task = MockTask()
+ group_tasks = [MockTask() for _ in xrange(3)]
+ graph.add_tasks(task)
+ graph.add_tasks(*group_tasks)
+ assert graph.has_dependency(group_tasks, task) is False
+ graph.add_dependency(group_tasks, task)
+ assert graph.has_dependency(group_tasks, task) is True
+
+ def test_has_dependency_dependency_parallel(self, graph):
+ task = MockTask()
+ group_tasks = [MockTask() for _ in xrange(3)]
+ graph.add_tasks(task)
+ graph.add_tasks(*group_tasks)
+ assert graph.has_dependency(task, group_tasks) is False
+ graph.add_dependency(task, group_tasks)
+ assert graph.has_dependency(task, group_tasks) is True
+
+ def test_has_dependency_between_groups(self, graph):
+ group_1_tasks = [MockTask() for _ in xrange(3)]
+ group_2_tasks = [MockTask() for _ in xrange(3)]
+ graph.add_tasks(*group_1_tasks)
+ graph.add_tasks(*group_2_tasks)
+ assert graph.has_dependency(group_2_tasks, group_1_tasks) is False
+ graph.add_dependency(group_2_tasks, group_1_tasks)
+ assert graph.has_dependency(group_2_tasks, group_1_tasks) is True
+
+ def test_has_dependency_dependency_parallel_with_some_existing_dependencies(self, graph):
+ task = MockTask()
+ parallel_tasks = [MockTask() for _ in xrange(3)]
+ graph.add_tasks(task)
+ parallel = graph.add_tasks(*parallel_tasks)
+ graph.add_dependency(task, parallel_tasks[1])
+ # only a partial dependency exists - has_dependency is expected to return False
+ assert graph.has_dependency(task, parallel) is False
+
+ def test_has_nonexistent_dependency_between_groups(self, graph):
+ group_1_tasks = [MockTask() for _ in xrange(3)]
+ group_2_tasks = [MockTask() for _ in xrange(3)]
+ graph.add_tasks(*group_1_tasks)
+ graph.add_tasks(*group_2_tasks)
+ assert graph.has_dependency(group_1_tasks, group_2_tasks) is False
+
+ def test_remove_dependency(self, graph):
+ task = MockTask()
+ dependency_task = MockTask()
+ another_dependent_task = MockTask()
+ graph.add_tasks(task)
+ graph.add_tasks(dependency_task)
+ graph.add_tasks(another_dependent_task)
+ graph.add_dependency(task, dependency_task)
+ graph.add_dependency(another_dependent_task, dependency_task)
+
+ graph.remove_dependency(task, dependency_task)
+ remove_result = graph.has_dependency(task, dependency_task)
+ assert remove_result is False
+ assert graph.has_dependency(task, dependency_task) is False
+ assert graph.has_dependency(another_dependent_task, dependency_task) is True
+
+ def test_remove_nonexistent_dependency(self, graph):
+ task = MockTask()
+ dependency_task = MockTask()
+ graph.add_tasks(task)
+ graph.add_tasks(dependency_task)
+ # removing a dependency not in graph - should have no effect, and return False
+ graph.remove_dependency(task, dependency_task)
+ remove_result = graph.has_dependency(task, dependency_task)
+ assert remove_result is False
+ tasks = [t for t in graph.tasks]
+ assert set(tasks) == set([task, dependency_task])
+
+ def test_remove_dependency_nonexistent_dependent(self, graph):
+ task = MockTask()
+ task_not_in_graph = MockTask()
+ graph.add_tasks(task)
+ with pytest.raises(task_graph.TaskNotInGraphError):
+ graph.remove_dependency(task_not_in_graph, task)
+
+ def test_remove_dependency_nonexistent_dependency(self, graph):
+ # in this test the dependency *task* is not in the graph, not just the dependency itself
+ task = MockTask()
+ task_not_in_graph = MockTask()
+ graph.add_tasks(task)
+ with pytest.raises(task_graph.TaskNotInGraphError):
+ graph.remove_dependency(task, task_not_in_graph)
+
+ def test_remove_dependency_empty_dependent(self, graph):
+ task = MockTask()
+ graph.add_tasks(task)
+ # expecting remove_dependency result to be False - no dependency has been created
+ graph.remove_dependency([], task)
+ assert set(graph.tasks) == set((task,))
+
+ def test_remove_dependency_empty_dependency(self, graph):
+ task = MockTask()
+ graph.add_tasks(task)
+ # expecting remove_dependency result to be False - no dependency has been created
+ graph.remove_dependency(task, [])
+ assert set(graph.tasks) == set((task,))
+
+ def test_remove_dependency_dependent_group(self, graph):
+ task = MockTask()
+ group_tasks = [MockTask() for _ in xrange(3)]
+ graph.add_tasks(task)
+ graph.add_tasks(*group_tasks)
+ graph.add_dependency(group_tasks, task)
+ graph.remove_dependency(group_tasks, task)
+ remove_result = graph.has_dependency(group_tasks, task)
+ assert remove_result is False
+ assert graph.has_dependency(group_tasks[0], task) is False
+ assert graph.has_dependency(group_tasks[1], task) is False
+ assert graph.has_dependency(group_tasks[2], task) is False
+
+ def test_remove_dependency_dependency_group(self, graph):
+ task = MockTask()
+ group_tasks = [MockTask() for _ in xrange(3)]
+ graph.add_tasks(task)
+ graph.add_tasks(*group_tasks)
+ graph.add_dependency(task, group_tasks)
+ graph.remove_dependency(task, group_tasks)
+ remove_result = graph.has_dependency(task, group_tasks)
+ assert remove_result is False
+ assert graph.has_dependency(task, group_tasks[0]) is False
+ assert graph.has_dependency(task, group_tasks[1]) is False
+ assert graph.has_dependency(task, group_tasks[2]) is False
+
+ def test_remove_dependency_between_groups(self, graph):
+ group_1_tasks = [MockTask() for _ in xrange(3)]
+ group_2_tasks = [MockTask() for _ in xrange(3)]
+ graph.add_tasks(*group_1_tasks)
+ graph.add_tasks(*group_2_tasks)
+ graph.add_dependency(group_2_tasks, group_1_tasks)
+ graph.remove_dependency(group_2_tasks, group_1_tasks)
+ remove_result = graph.has_dependency(group_2_tasks, group_1_tasks)
+ assert remove_result is False
+ for group_2_task in group_2_tasks:
+ assert graph.has_dependency(group_2_task, group_1_tasks[0]) is False
+ assert graph.has_dependency(group_2_task, group_1_tasks[1]) is False
+ assert graph.has_dependency(group_2_task, group_1_tasks[2]) is False
+
+ def test_remove_dependency_dependency_group_with_some_existing_dependencies(self, graph):
+ task = MockTask()
+ group_tasks = [MockTask() for _ in xrange(3)]
+ graph.add_tasks(task)
+ graph.add_tasks(*group_tasks)
+ graph.add_dependency(task, group_tasks[1])
+ graph.remove_dependency(task, group_tasks)
+ remove_result = graph.has_dependency(task, group_tasks)
+ # only a partial dependency exists - remove_dependency is expected to return False
+ assert remove_result is False
+ # no dependencies are expected to have changed
+ assert graph.has_dependency(task, group_tasks[0]) is False
+ assert graph.has_dependency(task, group_tasks[1]) is True
+ assert graph.has_dependency(task, group_tasks[2]) is False
+
+ def test_remove_nonexistent_dependency_between_groups(self, graph):
+ group_1_tasks = [MockTask() for _ in xrange(3)]
+ group_2_tasks = [MockTask() for _ in xrange(3)]
+ graph.add_tasks(*group_1_tasks)
+ graph.add_tasks(*group_2_tasks)
+ # removing a dependency not in graph - should have no effect, and return False
+ graph.remove_dependency(group_2_tasks, group_1_tasks)
+ remove_result = graph.has_dependency(group_2_tasks, group_1_tasks)
+ assert remove_result is False
+
+ # nested tests
+
+ def test_group_with_nested_sequence(self, graph):
+ all_tasks = [MockTask() for _ in xrange(5)]
+ graph.add_tasks(all_tasks[0],
+ graph.sequence(all_tasks[1], all_tasks[2], all_tasks[3]),
+ all_tasks[4])
+ assert set(graph.tasks) == set(all_tasks)
+
+ # tasks[2] and tasks[3] should each have a single dependency; the rest should have none
+ assert len(list(graph.get_dependencies(all_tasks[0]))) == 0
+ assert len(list(graph.get_dependencies(all_tasks[1]))) == 0
+ assert set(graph.get_dependencies(all_tasks[2])) == set([all_tasks[1]])
+ assert set(graph.get_dependencies(all_tasks[3])) == set([all_tasks[2]])
+ assert len(list(graph.get_dependencies(all_tasks[4]))) == 0
+
+ def test_group_with_nested_group(self, graph):
+ tasks = [MockTask() for _ in xrange(5)]
+ graph.add_tasks(tasks[0], (tasks[1], tasks[2], tasks[3]), tasks[4])
+ graph_tasks = [t for t in graph.tasks]
+ assert set(graph_tasks) == set(tasks)
+ # none of the tasks should have any dependencies
+ for i in xrange(len(tasks)):
+ assert len(list(graph.get_dependencies(tasks[i]))) == 0
+
+ def test_group_with_recursively_nested_group(self, graph):
+ recursively_nested_tasks = [MockTask(), MockTask(), MockTask()]
+ nested_tasks = [MockTask(), MockTask(), MockTask(), recursively_nested_tasks]
+ tasks = [MockTask(), MockTask(), MockTask(), nested_tasks]
+ graph.add_tasks(*tasks)
+
+ assert set(graph.tasks) == set(tasks[:3] + nested_tasks[:3] + recursively_nested_tasks)
+ for tasks_list in [tasks, nested_tasks, recursively_nested_tasks]:
+ for i in xrange(len(tasks_list[:3])):
+ assert len(list(graph.get_dependencies(tasks_list[i]))) == 0
+
+ def test_group_with_recursively_nested_group_and_interdependencies(self, graph):
+ recursively_nested_tasks = [MockTask(), MockTask(), MockTask()]
+ nested_tasks = [MockTask(), MockTask(), MockTask(), recursively_nested_tasks]
+ tasks = [MockTask(), MockTask(), MockTask(), nested_tasks]
+ graph.add_tasks(*tasks)
+
+ graph.add_dependency(tasks[2], nested_tasks[2])
+ graph.add_dependency(nested_tasks[1], recursively_nested_tasks[0])
+ graph.add_dependency(recursively_nested_tasks[1], tasks[0])
+
+ assert set(graph.tasks) == set(tasks[:3] + nested_tasks[:3] + recursively_nested_tasks)
+ assert set(graph.get_dependencies(tasks[0])) == set()
+ assert set(graph.get_dependencies(tasks[1])) == set()
+ assert set(graph.get_dependencies(tasks[2])) == set([nested_tasks[2]])
+
+ assert set(graph.get_dependencies(nested_tasks[0])) == set()
+ assert set(graph.get_dependencies(nested_tasks[1])) == set([recursively_nested_tasks[0]])
+ assert set(graph.get_dependencies(nested_tasks[2])) == set()
+
+ assert set(graph.get_dependencies(recursively_nested_tasks[0])) == set()
+ assert set(graph.get_dependencies(recursively_nested_tasks[1])) == set([tasks[0]])
+ assert set(graph.get_dependencies(recursively_nested_tasks[2])) == set()
+
+
+class TestTaskGraphSequence(object):
+
+ def test_sequence(self, graph):
+ tasks = [MockTask(), MockTask(), MockTask()]
+ graph.sequence(*tasks)
+ graph_tasks = [t for t in graph.tasks]
+ assert set(graph_tasks) == set(tasks)
+ assert len(list(graph.get_dependencies(tasks[0]))) == 0
+ assert set(graph.get_dependencies(tasks[1])) == set([tasks[0]])
+ assert set(graph.get_dependencies(tasks[2])) == set([tasks[1]])
+
+ def test_sequence_with_some_tasks_and_dependencies_already_in_graph(self, graph):
+ # tests both that tasks which werent previously in graph get inserted, and
+ # that existing tasks don't get re-added to graph
+ tasks = [MockTask(), MockTask(), MockTask()]
+ # insert some tasks and dependencies to the graph
+ graph.add_tasks(tasks[1])
+ graph.add_tasks(tasks[2])
+ graph.add_dependency(tasks[2], tasks[1])
+
+ graph.sequence(*tasks)
+ graph_tasks = [t for t in graph.tasks]
+ assert set(graph_tasks) == set(tasks)
+ assert len(list(graph.get_dependencies(tasks[0]))) == 0
+ assert set(graph.get_dependencies(tasks[1])) == set([tasks[0]])
+ assert set(graph.get_dependencies(tasks[2])) == set([tasks[1]])
+
+ def test_sequence_with_nested_sequence(self, graph):
+ tasks = [MockTask() for _ in xrange(5)]
+ graph.sequence(tasks[0], graph.sequence(tasks[1], tasks[2], tasks[3]), tasks[4])
+ graph_tasks = [t for t in graph.tasks]
+ assert set(graph_tasks) == set(tasks)
+ # first task should have no dependencies
+ assert len(list(graph.get_dependencies(tasks[0]))) == 0
+ assert len(list(graph.get_dependencies(tasks[1]))) == 1
+ assert len(list(graph.get_dependencies(tasks[2]))) == 2
+ assert len(list(graph.get_dependencies(tasks[3]))) == 2
+ assert len(list(graph.get_dependencies(tasks[4]))) == 3
+
+ def test_sequence_with_nested_group(self, graph):
+ tasks = [MockTask() for _ in xrange(5)]
+ graph.sequence(tasks[0], (tasks[1], tasks[2], tasks[3]), tasks[4])
+ graph_tasks = [t for t in graph.tasks]
+ assert set(graph_tasks) == set(tasks)
+ # first task should have no dependencies
+ assert len(list(graph.get_dependencies(tasks[0]))) == 0
+ # rest of the tasks (except last) should have a single dependency - the first task
+ for i in xrange(1, 4):
+ assert set(graph.get_dependencies(tasks[i])) == set([tasks[0]])
+ # last task should have have a dependency on all tasks except for the first one
+ assert set(graph.get_dependencies(tasks[4])) == set([tasks[1], tasks[2], tasks[3]])
+
+ def test_sequence_with_recursively_nested_group(self, graph):
+ recursively_nested_group = [MockTask(), MockTask()]
+ nested_group = [MockTask(), recursively_nested_group, MockTask()]
+ sequence_tasks = [MockTask(), nested_group, MockTask()]
+
+ graph.sequence(*sequence_tasks)
+ graph_tasks = [t for t in graph.tasks]
+ assert set(graph_tasks) == set([sequence_tasks[0], nested_group[0],
+ recursively_nested_group[0], recursively_nested_group[1],
+ nested_group[2], sequence_tasks[2]])
+
+ assert list(graph.get_dependencies(nested_group[0])) == [sequence_tasks[0]]
+ assert list(graph.get_dependencies(recursively_nested_group[0])) == [sequence_tasks[0]]
+ assert list(graph.get_dependencies(recursively_nested_group[1])) == [sequence_tasks[0]]
+ assert list(graph.get_dependencies(nested_group[2])) == [sequence_tasks[0]]
+
+ assert list(graph.get_dependents(nested_group[0])) == [sequence_tasks[2]]
+ assert list(graph.get_dependents(recursively_nested_group[0])) == [sequence_tasks[2]]
+ assert list(graph.get_dependents(recursively_nested_group[1])) == [sequence_tasks[2]]
+ assert list(graph.get_dependents(nested_group[2])) == [sequence_tasks[2]]
+
+ def test_sequence_with_empty_group(self, graph):
+ tasks = [MockTask(), [], MockTask()]
+ graph.sequence(*tasks)
+ graph_tasks = set([t for t in graph.tasks])
+ assert graph_tasks == set([tasks[0], tasks[2]])
+ assert list(graph.get_dependents(tasks[0])) == [tasks[2]]
+ assert list(graph.get_dependencies(tasks[2])) == [tasks[0]]
+
+ def test_sequence_with_recursively_nested_sequence_and_interdependencies(self, graph):
+ recursively_nested_tasks = list(graph.sequence(MockTask(), MockTask(), MockTask()))
+ nested_tasks = list(graph.sequence(MockTask(),
+ MockTask(),
+ MockTask(),
+ recursively_nested_tasks))
+ tasks = [MockTask(), MockTask(), MockTask(), nested_tasks]
+ graph.sequence(*tasks)
+
+ assert set(graph.tasks) == set(tasks[:3] + nested_tasks[:3] + recursively_nested_tasks)
+ assert set(graph.get_dependencies(tasks[0])) == set()
+ for i in xrange(1, len(tasks[:-1])):
+ assert set(graph.get_dependencies(tasks[i])) == set([tasks[i - 1]])
+
+ assert set(graph.get_dependencies(nested_tasks[0])) == set([tasks[2]])
+ for i in xrange(1, len(nested_tasks[:-1])):
+ assert set(graph.get_dependencies(nested_tasks[i])) == \
+ set([tasks[2], nested_tasks[i-1]])
+
+ assert set(graph.get_dependencies(recursively_nested_tasks[0])) == \
+ set([tasks[2], nested_tasks[2]])
+ for i in xrange(1, len(recursively_nested_tasks[:-1])):
+ assert set(graph.get_dependencies(recursively_nested_tasks[i])) == \
+ set([tasks[2], nested_tasks[2], recursively_nested_tasks[i-1]])
diff --git a/azure/aria/aria-extension-cloudify/src/aria/tests/orchestrator/workflows/builtin/__init__.py b/azure/aria/aria-extension-cloudify/src/aria/tests/orchestrator/workflows/builtin/__init__.py
new file mode 100644
index 0000000..1809f82
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/tests/orchestrator/workflows/builtin/__init__.py
@@ -0,0 +1,70 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from aria.orchestrator.workflows.builtin import workflows
+
+
+def _assert_relationships(operations, expected_op_full_name, relationships=0):
+ """
+
+ :param operations: and iterable of operations
+ :param expected_op_full_name: Note that source/target doesn't really matter since they are
+ dropped
+ :param relationships: the number of relationships
+ :return:
+ """
+ expected_op_name = expected_op_full_name.rsplit('_', 1)[0]
+ for _ in xrange(relationships):
+ # Since the target and source operations start of the same way, we only need to retrieve the
+ # suffix once
+ operation = next(operations)
+ relationship_id_1 = operation.actor.id
+ _assert_cfg_interface_op(operation, expected_op_name)
+
+ operation = next(operations)
+ relationship_id_2 = operation.actor.id
+ _assert_cfg_interface_op(operation, expected_op_name)
+
+ assert relationship_id_1 == relationship_id_2
+
+
+def assert_node_install_operations(operations, relationships=0):
+ operations = iter(operations)
+
+ _assert_std_interface_op(next(operations), workflows.NORMATIVE_CREATE)
+ _assert_relationships(operations, workflows.NORMATIVE_PRE_CONFIGURE_SOURCE, relationships)
+ _assert_std_interface_op(next(operations), workflows.NORMATIVE_CONFIGURE)
+ _assert_relationships(operations, workflows.NORMATIVE_POST_CONFIGURE_SOURCE, relationships)
+ _assert_std_interface_op(next(operations), workflows.NORMATIVE_START)
+ _assert_relationships(operations, workflows.NORMATIVE_ADD_SOURCE, relationships)
+
+
+def assert_node_uninstall_operations(operations, relationships=0):
+ operations = iter(operations)
+
+ _assert_std_interface_op(next(operations), workflows.NORMATIVE_STOP)
+ _assert_relationships(operations, workflows.NORMATIVE_REMOVE_SOURCE, relationships)
+ _assert_std_interface_op(next(operations), workflows.NORMATIVE_DELETE)
+
+
+def _assert_cfg_interface_op(op, operation_name):
+ # We need to remove the source/target
+ assert op.operation_name.rsplit('_', 1)[0] == operation_name
+ assert op.interface_name == workflows.NORMATIVE_CONFIGURE_INTERFACE
+
+
+def _assert_std_interface_op(op, operation_name):
+ assert op.operation_name == operation_name
+ assert op.interface_name == workflows.NORMATIVE_STANDARD_INTERFACE
diff --git a/azure/aria/aria-extension-cloudify/src/aria/tests/orchestrator/workflows/builtin/test_execute_operation.py b/azure/aria/aria-extension-cloudify/src/aria/tests/orchestrator/workflows/builtin/test_execute_operation.py
new file mode 100644
index 0000000..8713e3c
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/tests/orchestrator/workflows/builtin/test_execute_operation.py
@@ -0,0 +1,64 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import pytest
+
+from aria.orchestrator.workflows.api import task
+from aria.orchestrator.workflows.builtin.execute_operation import execute_operation
+
+from tests import mock, storage
+
+
+@pytest.fixture
+def ctx(tmpdir):
+ context = mock.context.simple(str(tmpdir), inmemory=False)
+ yield context
+ storage.release_sqlite_storage(context.model)
+
+
+def test_execute_operation(ctx):
+ node = ctx.model.node.get_by_name(mock.models.DEPENDENCY_NODE_NAME)
+ interface_name, operation_name = mock.operations.NODE_OPERATIONS_INSTALL[0]
+ interface = mock.models.create_interface(
+ ctx.service,
+ interface_name,
+ operation_name,
+ operation_kwargs=dict(function='test')
+ )
+ node.interfaces[interface.name] = interface
+ ctx.model.node.update(node)
+
+ execute_tasks = list(
+ task.WorkflowTask(
+ execute_operation,
+ ctx=ctx,
+ interface_name=interface_name,
+ operation_name=operation_name,
+ operation_kwargs={},
+ allow_kwargs_override=False,
+ run_by_dependency_order=False,
+ type_names=[],
+ node_template_ids=[],
+ node_ids=[node.id]
+ ).topological_order()
+ )
+
+ assert len(execute_tasks) == 1
+ assert getattr(execute_tasks[0].actor, '_wrapped', execute_tasks[0].actor) == node
+ assert execute_tasks[0].operation_name == operation_name
+ assert execute_tasks[0].interface_name == interface_name
+
+
+# TODO: add more scenarios
diff --git a/azure/aria/aria-extension-cloudify/src/aria/tests/orchestrator/workflows/builtin/test_heal.py b/azure/aria/aria-extension-cloudify/src/aria/tests/orchestrator/workflows/builtin/test_heal.py
new file mode 100644
index 0000000..0a422bd
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/tests/orchestrator/workflows/builtin/test_heal.py
@@ -0,0 +1,100 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import pytest
+
+from aria.orchestrator.workflows.api import task
+from aria.orchestrator.workflows.builtin.heal import heal
+
+from tests import mock, storage
+
+from . import (assert_node_install_operations, assert_node_uninstall_operations)
+
+
+@pytest.fixture
+def ctx(tmpdir):
+ context = mock.context.simple(str(tmpdir))
+ yield context
+ storage.release_sqlite_storage(context.model)
+
+
+@pytest.mark.skip(reason='heal is not implemented for now')
+def test_heal_dependent_node(ctx):
+ dependent_node = \
+ ctx.model.node.get_by_name(mock.models.DEPENDENT_NODE_NAME)
+ dependent_node.host_fk = dependent_node.id
+ ctx.model.node.update(dependent_node)
+ heal_graph = task.WorkflowTask(heal, ctx=ctx, node_id=dependent_node.id)
+
+ assert len(list(heal_graph.tasks)) == 2
+ uninstall_subgraph, install_subgraph = list(heal_graph.topological_order(reverse=True))
+
+ assert len(list(uninstall_subgraph.tasks)) == 2
+ dependent_node_subgraph_uninstall, dependency_node_subgraph_uninstall = \
+ list(uninstall_subgraph.topological_order(reverse=True))
+
+ assert len(list(install_subgraph.tasks)) == 2
+ dependency_node_subgraph_install, dependent_node_subgraph_install = \
+ list(install_subgraph.topological_order(reverse=True))
+
+ dependent_node_uninstall_tasks = \
+ list(dependent_node_subgraph_uninstall.topological_order(reverse=True))
+ assert isinstance(dependency_node_subgraph_uninstall, task.StubTask)
+ dependent_node_install_tasks = \
+ list(dependent_node_subgraph_install.topological_order(reverse=True))
+ assert isinstance(dependency_node_subgraph_install, task.StubTask)
+
+ assert_node_uninstall_operations(dependent_node_uninstall_tasks, relationships=1)
+ assert_node_install_operations(dependent_node_install_tasks, relationships=1)
+
+
+@pytest.mark.skip(reason='heal is not implemented for now')
+def test_heal_dependency_node(ctx):
+ dependency_node = \
+ ctx.model.node.get_by_name(mock.models.DEPENDENCY_NODE_NAME)
+ dependency_node.host_fk = dependency_node.id
+ ctx.model.node.update(dependency_node)
+ heal_graph = task.WorkflowTask(heal, ctx=ctx, node_id=dependency_node.id)
+ # both subgraphs should contain un\install for both the dependent and the dependency
+ assert len(list(heal_graph.tasks)) == 2
+ uninstall_subgraph, install_subgraph = list(heal_graph.topological_order(reverse=True))
+
+ uninstall_tasks = list(uninstall_subgraph.topological_order(reverse=True))
+ assert len(uninstall_tasks) == 4
+ unlink_source, unlink_target = uninstall_tasks[:2]
+ dependent_node_subgraph_uninstall, dependency_node_subgraph_uninstall = uninstall_tasks[2:]
+
+ install_tasks = list(install_subgraph.topological_order(reverse=True))
+ assert len(install_tasks) == 4
+ dependency_node_subgraph_install, dependent_node_subgraph_install = install_tasks[:2]
+ establish_source, establish_target = install_tasks[2:]
+
+ assert isinstance(dependent_node_subgraph_uninstall, task.StubTask)
+ dependency_node_uninstall_tasks = \
+ list(dependency_node_subgraph_uninstall.topological_order(reverse=True))
+ assert isinstance(dependent_node_subgraph_install, task.StubTask)
+ dependency_node_install_tasks = \
+ list(dependency_node_subgraph_install.topological_order(reverse=True))
+
+ assert unlink_source.name.startswith('aria.interfaces.relationship_lifecycle.unlink')
+ assert unlink_target.name.startswith('aria.interfaces.relationship_lifecycle.unlink')
+ assert_node_uninstall_operations(dependency_node_uninstall_tasks)
+
+ assert_node_install_operations(dependency_node_install_tasks)
+ assert establish_source.name.startswith('aria.interfaces.relationship_lifecycle.establish')
+ assert establish_target.name.startswith('aria.interfaces.relationship_lifecycle.establish')
+
+
+# TODO: add tests for contained in scenario
diff --git a/azure/aria/aria-extension-cloudify/src/aria/tests/orchestrator/workflows/builtin/test_install.py b/azure/aria/aria-extension-cloudify/src/aria/tests/orchestrator/workflows/builtin/test_install.py
new file mode 100644
index 0000000..1a4e1f9
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/tests/orchestrator/workflows/builtin/test_install.py
@@ -0,0 +1,46 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import pytest
+
+from aria.orchestrator.workflows.api import task
+from aria.orchestrator.workflows.builtin.install import install
+
+from tests import mock
+from tests import storage
+
+from . import assert_node_install_operations
+
+
+@pytest.fixture
+def ctx(tmpdir):
+ context = mock.context.simple(str(tmpdir),
+ topology=mock.topology.create_simple_topology_three_nodes)
+ yield context
+ storage.release_sqlite_storage(context.model)
+
+
+def test_install(ctx):
+
+ install_tasks = list(task.WorkflowTask(install, ctx=ctx).topological_order(True))
+
+ assert len(install_tasks) == 3
+ dependency_node_subgraph1, dependency_node_subgraph2, dependent_node_subgraph = install_tasks
+ dependent_node_tasks = list(dependent_node_subgraph.topological_order(reverse=True))
+ dependency_node1_tasks = list(dependency_node_subgraph1.topological_order(reverse=True))
+ dependency_node2_tasks = list(dependency_node_subgraph2.topological_order(reverse=True))
+
+ assert_node_install_operations(dependency_node1_tasks)
+ assert_node_install_operations(dependency_node2_tasks)
+ assert_node_install_operations(dependent_node_tasks, relationships=2)
diff --git a/azure/aria/aria-extension-cloudify/src/aria/tests/orchestrator/workflows/builtin/test_uninstall.py b/azure/aria/aria-extension-cloudify/src/aria/tests/orchestrator/workflows/builtin/test_uninstall.py
new file mode 100644
index 0000000..aa04c38
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/tests/orchestrator/workflows/builtin/test_uninstall.py
@@ -0,0 +1,47 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import pytest
+
+from aria.orchestrator.workflows.api import task
+from aria.orchestrator.workflows.builtin.uninstall import uninstall
+
+from tests import mock
+from tests import storage
+
+from . import assert_node_uninstall_operations
+
+
+@pytest.fixture
+def ctx(tmpdir):
+ context = mock.context.simple(str(tmpdir),
+ topology=mock.topology.create_simple_topology_three_nodes)
+ yield context
+ storage.release_sqlite_storage(context.model)
+
+
+def test_uninstall(ctx):
+
+ uninstall_tasks = list(task.WorkflowTask(uninstall, ctx=ctx).topological_order(True))
+
+ assert len(uninstall_tasks) == 3
+ dependent_node_subgraph, dependency_node_subgraph1, dependency_node_subgraph2 = uninstall_tasks
+ dependent_node_tasks = list(dependent_node_subgraph.topological_order(reverse=True))
+ dependency_node1_tasks = list(dependency_node_subgraph1.topological_order(reverse=True))
+ dependency_node2_tasks = list(dependency_node_subgraph2.topological_order(reverse=True))
+
+ assert_node_uninstall_operations(operations=dependency_node1_tasks)
+ assert_node_uninstall_operations(operations=dependency_node2_tasks)
+ assert_node_uninstall_operations(operations=dependent_node_tasks, relationships=2)
diff --git a/azure/aria/aria-extension-cloudify/src/aria/tests/orchestrator/workflows/core/__init__.py b/azure/aria/aria-extension-cloudify/src/aria/tests/orchestrator/workflows/core/__init__.py
new file mode 100644
index 0000000..ae1e83e
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/tests/orchestrator/workflows/core/__init__.py
@@ -0,0 +1,14 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/azure/aria/aria-extension-cloudify/src/aria/tests/orchestrator/workflows/core/test_engine.py b/azure/aria/aria-extension-cloudify/src/aria/tests/orchestrator/workflows/core/test_engine.py
new file mode 100644
index 0000000..0c704f5
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/tests/orchestrator/workflows/core/test_engine.py
@@ -0,0 +1,564 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import time
+import threading
+from datetime import datetime
+
+import pytest
+
+from aria.orchestrator import (
+ events,
+ workflow,
+ operation,
+)
+from aria.modeling import models
+from aria.orchestrator.workflows import (
+ api,
+ exceptions,
+)
+from aria.orchestrator.workflows.core import engine, graph_compiler
+from aria.orchestrator.workflows.executor import thread
+
+from tests import mock, storage
+
+
+global_test_holder = {}
+
+
+class BaseTest(object):
+
+ @classmethod
+ def _execute(cls, workflow_func, workflow_context, executor):
+ eng = cls._engine(workflow_func=workflow_func,
+ workflow_context=workflow_context,
+ executor=executor)
+ eng.execute(ctx=workflow_context)
+ return eng
+
+ @staticmethod
+ def _engine(workflow_func, workflow_context, executor):
+ graph = workflow_func(ctx=workflow_context)
+ graph_compiler.GraphCompiler(workflow_context, executor.__class__).compile(graph)
+
+ return engine.Engine(executors={executor.__class__: executor})
+
+ @staticmethod
+ def _create_interface(ctx, func, arguments=None):
+ node = ctx.model.node.get_by_name(mock.models.DEPENDENCY_NODE_NAME)
+ interface_name = 'aria.interfaces.lifecycle'
+ operation_kwargs = dict(function='{name}.{func.__name__}'.format(
+ name=__name__, func=func))
+ if arguments:
+ # the operation has to declare the arguments before those may be passed
+ operation_kwargs['arguments'] = arguments
+ operation_name = 'create'
+ interface = mock.models.create_interface(node.service, interface_name, operation_name,
+ operation_kwargs=operation_kwargs)
+ node.interfaces[interface.name] = interface
+ ctx.model.node.update(node)
+
+ return node, interface_name, operation_name
+
+ @staticmethod
+ def _op(node,
+ operation_name,
+ arguments=None,
+ max_attempts=None,
+ retry_interval=None,
+ ignore_failure=None):
+
+ return api.task.OperationTask(
+ node,
+ interface_name='aria.interfaces.lifecycle',
+ operation_name=operation_name,
+ arguments=arguments,
+ max_attempts=max_attempts,
+ retry_interval=retry_interval,
+ ignore_failure=ignore_failure,
+ )
+
+ @pytest.fixture(autouse=True)
+ def globals_cleanup(self):
+ try:
+ yield
+ finally:
+ global_test_holder.clear()
+
+ @pytest.fixture(autouse=True)
+ def signals_registration(self, ):
+ def sent_task_handler(ctx, *args, **kwargs):
+ if ctx.task._stub_type is None:
+ calls = global_test_holder.setdefault('sent_task_signal_calls', 0)
+ global_test_holder['sent_task_signal_calls'] = calls + 1
+
+ def start_workflow_handler(workflow_context, *args, **kwargs):
+ workflow_context.states.append('start')
+
+ def success_workflow_handler(workflow_context, *args, **kwargs):
+ workflow_context.states.append('success')
+
+ def failure_workflow_handler(workflow_context, exception, *args, **kwargs):
+ workflow_context.states.append('failure')
+ workflow_context.exception = exception
+
+ def cancel_workflow_handler(workflow_context, *args, **kwargs):
+ workflow_context.states.append('cancel')
+
+ events.start_workflow_signal.connect(start_workflow_handler)
+ events.on_success_workflow_signal.connect(success_workflow_handler)
+ events.on_failure_workflow_signal.connect(failure_workflow_handler)
+ events.on_cancelled_workflow_signal.connect(cancel_workflow_handler)
+ events.sent_task_signal.connect(sent_task_handler)
+ try:
+ yield
+ finally:
+ events.start_workflow_signal.disconnect(start_workflow_handler)
+ events.on_success_workflow_signal.disconnect(success_workflow_handler)
+ events.on_failure_workflow_signal.disconnect(failure_workflow_handler)
+ events.on_cancelled_workflow_signal.disconnect(cancel_workflow_handler)
+ events.sent_task_signal.disconnect(sent_task_handler)
+
+ @pytest.fixture
+ def executor(self):
+ result = thread.ThreadExecutor()
+ try:
+ yield result
+ finally:
+ result.close()
+
+ @pytest.fixture
+ def workflow_context(self, tmpdir):
+ workflow_context = mock.context.simple(str(tmpdir))
+ workflow_context.states = []
+ workflow_context.exception = None
+ yield workflow_context
+ storage.release_sqlite_storage(workflow_context.model)
+
+
+class TestEngine(BaseTest):
+
+ def test_empty_graph_execution(self, workflow_context, executor):
+ @workflow
+ def mock_workflow(**_):
+ pass
+ self._execute(workflow_func=mock_workflow,
+ workflow_context=workflow_context,
+ executor=executor)
+ assert workflow_context.states == ['start', 'success']
+ assert workflow_context.exception is None
+ assert 'sent_task_signal_calls' not in global_test_holder
+ execution = workflow_context.execution
+ assert execution.started_at <= execution.ended_at <= datetime.utcnow()
+ assert execution.error is None
+ assert execution.status == models.Execution.SUCCEEDED
+
+ def test_single_task_successful_execution(self, workflow_context, executor):
+ node, _, operation_name = self._create_interface(workflow_context, mock_success_task)
+
+ @workflow
+ def mock_workflow(ctx, graph):
+ graph.add_tasks(self._op(node, operation_name))
+ self._execute(
+ workflow_func=mock_workflow,
+ workflow_context=workflow_context,
+ executor=executor)
+ assert workflow_context.states == ['start', 'success']
+ assert workflow_context.exception is None
+ assert global_test_holder.get('sent_task_signal_calls') == 1
+
+ def test_single_task_failed_execution(self, workflow_context, executor):
+ node, _, operation_name = self._create_interface(workflow_context, mock_failed_task)
+
+ @workflow
+ def mock_workflow(ctx, graph):
+ graph.add_tasks(self._op(node, operation_name))
+ with pytest.raises(exceptions.ExecutorException):
+ self._execute(
+ workflow_func=mock_workflow,
+ workflow_context=workflow_context,
+ executor=executor)
+ assert workflow_context.states == ['start', 'failure']
+ assert isinstance(workflow_context.exception, exceptions.ExecutorException)
+ assert global_test_holder.get('sent_task_signal_calls') == 1
+ execution = workflow_context.execution
+ assert execution.started_at <= execution.ended_at <= datetime.utcnow()
+ assert execution.error is not None
+ assert execution.status == models.Execution.FAILED
+
+ def test_two_tasks_execution_order(self, workflow_context, executor):
+ node, _, operation_name = self._create_interface(
+ workflow_context, mock_ordered_task, {'counter': 1})
+
+ @workflow
+ def mock_workflow(ctx, graph):
+ op1 = self._op(node, operation_name, arguments={'counter': 1})
+ op2 = self._op(node, operation_name, arguments={'counter': 2})
+ graph.sequence(op1, op2)
+ self._execute(
+ workflow_func=mock_workflow,
+ workflow_context=workflow_context,
+ executor=executor)
+ assert workflow_context.states == ['start', 'success']
+ assert workflow_context.exception is None
+ assert global_test_holder.get('invocations') == [1, 2]
+ assert global_test_holder.get('sent_task_signal_calls') == 2
+
+ def test_stub_and_subworkflow_execution(self, workflow_context, executor):
+ node, _, operation_name = self._create_interface(
+ workflow_context, mock_ordered_task, {'counter': 1})
+
+ @workflow
+ def sub_workflow(ctx, graph):
+ op1 = self._op(node, operation_name, arguments={'counter': 1})
+ op2 = api.task.StubTask()
+ op3 = self._op(node, operation_name, arguments={'counter': 2})
+ graph.sequence(op1, op2, op3)
+
+ @workflow
+ def mock_workflow(ctx, graph):
+ graph.add_tasks(api.task.WorkflowTask(sub_workflow, ctx=ctx))
+ self._execute(workflow_func=mock_workflow,
+ workflow_context=workflow_context,
+ executor=executor)
+ assert workflow_context.states == ['start', 'success']
+ assert workflow_context.exception is None
+ assert global_test_holder.get('invocations') == [1, 2]
+ assert global_test_holder.get('sent_task_signal_calls') == 2
+
+
+class TestCancel(BaseTest):
+
+ def test_cancel_started_execution(self, workflow_context, executor):
+ number_of_tasks = 100
+ node, _, operation_name = self._create_interface(
+ workflow_context, mock_sleep_task, {'seconds': 0.1})
+
+ @workflow
+ def mock_workflow(ctx, graph):
+ operations = (
+ self._op(node, operation_name, arguments=dict(seconds=0.1))
+ for _ in range(number_of_tasks)
+ )
+ return graph.sequence(*operations)
+
+ eng = self._engine(workflow_func=mock_workflow,
+ workflow_context=workflow_context,
+ executor=executor)
+ t = threading.Thread(target=eng.execute, kwargs=dict(ctx=workflow_context))
+ t.daemon = True
+ t.start()
+ time.sleep(10)
+ eng.cancel_execution(workflow_context)
+ t.join(timeout=60) # we need to give this a *lot* of time because Travis can be *very* slow
+ assert not t.is_alive() # if join is timed out it will not raise an exception
+ assert workflow_context.states == ['start', 'cancel']
+ assert workflow_context.exception is None
+ invocations = global_test_holder.get('invocations', [])
+ assert 0 < len(invocations) < number_of_tasks
+ execution = workflow_context.execution
+ assert execution.started_at <= execution.ended_at <= datetime.utcnow()
+ assert execution.error is None
+ assert execution.status == models.Execution.CANCELLED
+
+ def test_cancel_pending_execution(self, workflow_context, executor):
+ @workflow
+ def mock_workflow(graph, **_):
+ return graph
+ eng = self._engine(workflow_func=mock_workflow,
+ workflow_context=workflow_context,
+ executor=executor)
+ eng.cancel_execution(workflow_context)
+ execution = workflow_context.execution
+ assert execution.status == models.Execution.CANCELLED
+
+
+class TestRetries(BaseTest):
+
+ def test_two_max_attempts_and_success_on_retry(self, workflow_context, executor):
+ node, _, operation_name = self._create_interface(
+ workflow_context, mock_conditional_failure_task, {'failure_count': 1})
+
+ @workflow
+ def mock_workflow(ctx, graph):
+ op = self._op(node, operation_name,
+ arguments={'failure_count': 1},
+ max_attempts=2)
+ graph.add_tasks(op)
+ self._execute(
+ workflow_func=mock_workflow,
+ workflow_context=workflow_context,
+ executor=executor)
+ assert workflow_context.states == ['start', 'success']
+ assert workflow_context.exception is None
+ assert len(global_test_holder.get('invocations', [])) == 2
+ assert global_test_holder.get('sent_task_signal_calls') == 2
+
+ def test_two_max_attempts_and_failure_on_retry(self, workflow_context, executor):
+ node, _, operation_name = self._create_interface(
+ workflow_context, mock_conditional_failure_task, {'failure_count': 1})
+
+ @workflow
+ def mock_workflow(ctx, graph):
+ op = self._op(node, operation_name,
+ arguments={'failure_count': 2},
+ max_attempts=2)
+ graph.add_tasks(op)
+ with pytest.raises(exceptions.ExecutorException):
+ self._execute(
+ workflow_func=mock_workflow,
+ workflow_context=workflow_context,
+ executor=executor)
+ assert workflow_context.states == ['start', 'failure']
+ assert isinstance(workflow_context.exception, exceptions.ExecutorException)
+ assert len(global_test_holder.get('invocations', [])) == 2
+ assert global_test_holder.get('sent_task_signal_calls') == 2
+
+ def test_three_max_attempts_and_success_on_first_retry(self, workflow_context, executor):
+ node, _, operation_name = self._create_interface(
+ workflow_context, mock_conditional_failure_task, {'failure_count': 1})
+ @workflow
+ def mock_workflow(ctx, graph):
+ op = self._op(node, operation_name,
+ arguments={'failure_count': 1},
+ max_attempts=3)
+ graph.add_tasks(op)
+ self._execute(
+ workflow_func=mock_workflow,
+ workflow_context=workflow_context,
+ executor=executor)
+ assert workflow_context.states == ['start', 'success']
+ assert workflow_context.exception is None
+ assert len(global_test_holder.get('invocations', [])) == 2
+ assert global_test_holder.get('sent_task_signal_calls') == 2
+
+ def test_three_max_attempts_and_success_on_second_retry(self, workflow_context, executor):
+ node, _, operation_name = self._create_interface(
+ workflow_context, mock_conditional_failure_task, {'failure_count': 1})
+
+ @workflow
+ def mock_workflow(ctx, graph):
+ op = self._op(node, operation_name,
+ arguments={'failure_count': 2},
+ max_attempts=3)
+ graph.add_tasks(op)
+ self._execute(
+ workflow_func=mock_workflow,
+ workflow_context=workflow_context,
+ executor=executor)
+ assert workflow_context.states == ['start', 'success']
+ assert workflow_context.exception is None
+ assert len(global_test_holder.get('invocations', [])) == 3
+ assert global_test_holder.get('sent_task_signal_calls') == 3
+
+ def test_infinite_retries(self, workflow_context, executor):
+ node, _, operation_name = self._create_interface(
+ workflow_context, mock_conditional_failure_task, {'failure_count': 1})
+ @workflow
+ def mock_workflow(ctx, graph):
+ op = self._op(node, operation_name,
+ arguments={'failure_count': 1},
+ max_attempts=-1)
+ graph.add_tasks(op)
+ self._execute(
+ workflow_func=mock_workflow,
+ workflow_context=workflow_context,
+ executor=executor)
+ assert workflow_context.states == ['start', 'success']
+ assert workflow_context.exception is None
+ assert len(global_test_holder.get('invocations', [])) == 2
+ assert global_test_holder.get('sent_task_signal_calls') == 2
+
+ def test_retry_interval_float(self, workflow_context, executor):
+ self._test_retry_interval(retry_interval=0.3,
+ workflow_context=workflow_context,
+ executor=executor)
+
+ def test_retry_interval_int(self, workflow_context, executor):
+ self._test_retry_interval(retry_interval=1,
+ workflow_context=workflow_context,
+ executor=executor)
+
+ def _test_retry_interval(self, retry_interval, workflow_context, executor):
+ node, _, operation_name = self._create_interface(
+ workflow_context, mock_conditional_failure_task, {'failure_count': 1})
+ @workflow
+ def mock_workflow(ctx, graph):
+ op = self._op(node, operation_name,
+ arguments={'failure_count': 1},
+ max_attempts=2,
+ retry_interval=retry_interval)
+ graph.add_tasks(op)
+ self._execute(
+ workflow_func=mock_workflow,
+ workflow_context=workflow_context,
+ executor=executor)
+ assert workflow_context.states == ['start', 'success']
+ assert workflow_context.exception is None
+ invocations = global_test_holder.get('invocations', [])
+ assert len(invocations) == 2
+ invocation1, invocation2 = invocations
+ assert invocation2 - invocation1 >= retry_interval
+ assert global_test_holder.get('sent_task_signal_calls') == 2
+
+ def test_ignore_failure(self, workflow_context, executor):
+ node, _, operation_name = self._create_interface(
+ workflow_context, mock_conditional_failure_task, {'failure_count': 1})
+ @workflow
+ def mock_workflow(ctx, graph):
+ op = self._op(node, operation_name,
+ ignore_failure=True,
+ arguments={'failure_count': 100},
+ max_attempts=100)
+ graph.add_tasks(op)
+ self._execute(
+ workflow_func=mock_workflow,
+ workflow_context=workflow_context,
+ executor=executor)
+ assert workflow_context.states == ['start', 'success']
+ assert workflow_context.exception is None
+ invocations = global_test_holder.get('invocations', [])
+ assert len(invocations) == 1
+ assert global_test_holder.get('sent_task_signal_calls') == 1
+
+
+class TestTaskRetryAndAbort(BaseTest):
+ message = 'EXPECTED_ERROR'
+
+ def test_task_retry_default_interval(self, workflow_context, executor):
+ default_retry_interval = 0.1
+ node, _, operation_name = self._create_interface(
+ workflow_context, mock_task_retry, {'message': self.message})
+
+ @workflow
+ def mock_workflow(ctx, graph):
+ op = self._op(node, operation_name,
+ arguments={'message': self.message},
+ retry_interval=default_retry_interval,
+ max_attempts=2)
+ graph.add_tasks(op)
+ with pytest.raises(exceptions.ExecutorException):
+ self._execute(
+ workflow_func=mock_workflow,
+ workflow_context=workflow_context,
+ executor=executor)
+ assert workflow_context.states == ['start', 'failure']
+ assert isinstance(workflow_context.exception, exceptions.ExecutorException)
+ invocations = global_test_holder.get('invocations', [])
+ assert len(invocations) == 2
+ invocation1, invocation2 = invocations
+ assert invocation2 - invocation1 >= default_retry_interval
+ assert global_test_holder.get('sent_task_signal_calls') == 2
+
+ def test_task_retry_custom_interval(self, workflow_context, executor):
+ default_retry_interval = 100
+ custom_retry_interval = 0.1
+ node, _, operation_name = self._create_interface(
+ workflow_context, mock_task_retry, {'message': self.message,
+ 'retry_interval': custom_retry_interval})
+
+ @workflow
+ def mock_workflow(ctx, graph):
+ op = self._op(node, operation_name,
+ arguments={'message': self.message,
+ 'retry_interval': custom_retry_interval},
+ retry_interval=default_retry_interval,
+ max_attempts=2)
+ graph.add_tasks(op)
+ execution_start = time.time()
+ with pytest.raises(exceptions.ExecutorException):
+ self._execute(
+ workflow_func=mock_workflow,
+ workflow_context=workflow_context,
+ executor=executor)
+ execution_end = time.time()
+ assert workflow_context.states == ['start', 'failure']
+ assert isinstance(workflow_context.exception, exceptions.ExecutorException)
+ invocations = global_test_holder.get('invocations', [])
+ assert len(invocations) == 2
+ assert (execution_end - execution_start) < default_retry_interval
+ assert global_test_holder.get('sent_task_signal_calls') == 2
+
+ def test_task_abort(self, workflow_context, executor):
+ node, _, operation_name = self._create_interface(
+ workflow_context, mock_task_abort, {'message': self.message})
+ @workflow
+ def mock_workflow(ctx, graph):
+ op = self._op(node, operation_name,
+ arguments={'message': self.message},
+ retry_interval=100,
+ max_attempts=100)
+ graph.add_tasks(op)
+ with pytest.raises(exceptions.ExecutorException):
+ self._execute(
+ workflow_func=mock_workflow,
+ workflow_context=workflow_context,
+ executor=executor)
+ assert workflow_context.states == ['start', 'failure']
+ assert isinstance(workflow_context.exception, exceptions.ExecutorException)
+ invocations = global_test_holder.get('invocations', [])
+ assert len(invocations) == 1
+ assert global_test_holder.get('sent_task_signal_calls') == 1
+
+
+@operation
+def mock_success_task(**_):
+ pass
+
+
+@operation
+def mock_failed_task(**_):
+ raise RuntimeError
+
+
+@operation
+def mock_ordered_task(counter, **_):
+ invocations = global_test_holder.setdefault('invocations', [])
+ invocations.append(counter)
+
+
+@operation
+def mock_conditional_failure_task(failure_count, **_):
+ invocations = global_test_holder.setdefault('invocations', [])
+ try:
+ if len(invocations) < failure_count:
+ raise RuntimeError
+ finally:
+ invocations.append(time.time())
+
+
+@operation
+def mock_sleep_task(seconds, **_):
+ _add_invocation_timestamp()
+ time.sleep(seconds)
+
+
+@operation
+def mock_task_retry(ctx, message, retry_interval=None, **_):
+ _add_invocation_timestamp()
+ retry_kwargs = {}
+ if retry_interval is not None:
+ retry_kwargs['retry_interval'] = retry_interval
+ ctx.task.retry(message, **retry_kwargs)
+
+
+@operation
+def mock_task_abort(ctx, message, **_):
+ _add_invocation_timestamp()
+ ctx.task.abort(message)
+
+
+def _add_invocation_timestamp():
+ invocations = global_test_holder.setdefault('invocations', [])
+ invocations.append(time.time())
diff --git a/azure/aria/aria-extension-cloudify/src/aria/tests/orchestrator/workflows/core/test_events.py b/azure/aria/aria-extension-cloudify/src/aria/tests/orchestrator/workflows/core/test_events.py
new file mode 100644
index 0000000..d804de5
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/tests/orchestrator/workflows/core/test_events.py
@@ -0,0 +1,171 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import pytest
+
+from aria.orchestrator.decorators import operation, workflow
+from aria.orchestrator.workflows.core import engine, graph_compiler
+from aria.orchestrator.workflows.executor.thread import ThreadExecutor
+from aria.orchestrator.workflows import api
+from aria.modeling.service_instance import NodeBase
+
+from tests import mock, storage
+
+global_test_dict = {} # used to capture transitional node state changes
+
+
+@pytest.fixture
+def ctx(tmpdir):
+ context = mock.context.simple(str(tmpdir))
+ yield context
+ storage.release_sqlite_storage(context.model)
+
+# TODO another possible approach of writing these tests:
+# Don't create a ctx for every test.
+# Problem is, that if for every test we create a workflow that contains just one standard
+# lifecycle operation, then by the time we try to run the second test, the workflow failes since
+# the execution tries to go from 'terminated' to 'pending'.
+# And if we write a workflow that contains all the lifecycle operations, then first we need to
+# change the api of `mock.models.create_interface`, which a lot of other tests use, and second how
+# do we check all the state transition during the workflow execution in a convenient way.
+
+TYPE_URI_NAME = 'tosca.interfaces.node.lifecycle.Standard'
+SHORTHAND_NAME = 'Standard'
+
+
+def test_node_state_changes_as_a_result_of_standard_lifecycle_create(ctx, executor):
+ node = run_operation_on_node(
+ ctx, interface_name=TYPE_URI_NAME, op_name='create', executor=executor)
+ _assert_node_state_changed_as_a_result_of_standard_lifecycle_operation(node, 'create')
+
+
+def test_node_state_changes_as_a_result_of_standard_lifecycle_configure(ctx, executor):
+ node = run_operation_on_node(
+ ctx, interface_name=TYPE_URI_NAME, op_name='configure', executor=executor)
+ _assert_node_state_changed_as_a_result_of_standard_lifecycle_operation(node, 'configure')
+
+
+def test_node_state_changes_as_a_result_of_standard_lifecycle_start(ctx, executor):
+ node = run_operation_on_node(
+ ctx, interface_name=TYPE_URI_NAME, op_name='start', executor=executor)
+ _assert_node_state_changed_as_a_result_of_standard_lifecycle_operation(node, 'start')
+
+
+def test_node_state_changes_as_a_result_of_standard_lifecycle_stop(ctx, executor):
+ node = run_operation_on_node(
+ ctx, interface_name=TYPE_URI_NAME, op_name='stop', executor=executor)
+ _assert_node_state_changed_as_a_result_of_standard_lifecycle_operation(node, 'stop')
+
+
+def test_node_state_changes_as_a_result_of_standard_lifecycle_delete(ctx, executor):
+ node = run_operation_on_node(
+ ctx, interface_name=TYPE_URI_NAME, op_name='delete', executor=executor)
+ _assert_node_state_changed_as_a_result_of_standard_lifecycle_operation(node, 'delete')
+
+
+def test_node_state_changes_as_a_result_of_standard_lifecycle_create_shorthand_name(ctx, executor):
+ node = run_operation_on_node(
+ ctx, interface_name=SHORTHAND_NAME, op_name='create', executor=executor)
+ _assert_node_state_changed_as_a_result_of_standard_lifecycle_operation(node, 'create')
+
+
+def test_node_state_changes_as_a_result_of_standard_lifecycle_configure_shorthand_name(
+ ctx, executor):
+ node = run_operation_on_node(
+ ctx, interface_name=SHORTHAND_NAME, op_name='configure', executor=executor)
+ _assert_node_state_changed_as_a_result_of_standard_lifecycle_operation(node, 'configure')
+
+
+def test_node_state_changes_as_a_result_of_standard_lifecycle_start_shorthand_name(ctx, executor):
+ node = run_operation_on_node(
+ ctx, interface_name=SHORTHAND_NAME, op_name='start', executor=executor)
+ _assert_node_state_changed_as_a_result_of_standard_lifecycle_operation(node, 'start')
+
+
+def test_node_state_changes_as_a_result_of_standard_lifecycle_stop_shorthand_name(ctx, executor):
+ node = run_operation_on_node(
+ ctx, interface_name=SHORTHAND_NAME, op_name='stop', executor=executor)
+ _assert_node_state_changed_as_a_result_of_standard_lifecycle_operation(node, 'stop')
+
+
+def test_node_state_changes_as_a_result_of_standard_lifecycle_delete_shorthand_name(ctx, executor):
+ node = run_operation_on_node(
+ ctx, interface_name=SHORTHAND_NAME, op_name='delete', executor=executor)
+ _assert_node_state_changed_as_a_result_of_standard_lifecycle_operation(node, 'delete')
+
+
+def test_node_state_doesnt_change_as_a_result_of_an_operation_that_is_not_standard_lifecycle1(
+ ctx, executor):
+ node = run_operation_on_node(
+ ctx, interface_name='interface_name', op_name='op_name', executor=executor)
+ assert node.state == node.INITIAL
+
+
+def test_node_state_doesnt_change_as_a_result_of_an_operation_that_is_not_standard_lifecycle2(
+ ctx, executor):
+ node = run_operation_on_node(
+ ctx, interface_name='interface_name', op_name='create', executor=executor)
+ assert node.state == node.INITIAL
+
+
+def run_operation_on_node(ctx, op_name, interface_name, executor):
+ node = ctx.model.node.get_by_name(mock.models.DEPENDENCY_NODE_NAME)
+ interface = mock.models.create_interface(
+ service=node.service,
+ interface_name=interface_name,
+ operation_name=op_name,
+ operation_kwargs=dict(function='{name}.{func.__name__}'.format(name=__name__, func=func)))
+ node.interfaces[interface.name] = interface
+ graph_compiler.GraphCompiler(ctx, ThreadExecutor).compile(
+ single_operation_workflow(ctx, node=node, interface_name=interface_name, op_name=op_name)
+ )
+
+ eng = engine.Engine(executors={executor.__class__: executor})
+ eng.execute(ctx)
+ return node
+
+
+def run_standard_lifecycle_operation_on_node(ctx, op_name, executor):
+ return run_operation_on_node(ctx,
+ interface_name='aria.interfaces.lifecycle.Standard',
+ op_name=op_name,
+ executor=executor)
+
+
+def _assert_node_state_changed_as_a_result_of_standard_lifecycle_operation(node, op_name):
+ assert global_test_dict['transitional_state'] == NodeBase._OP_TO_STATE[op_name]['transitional']
+ assert node.state == NodeBase._OP_TO_STATE[op_name]['finished']
+
+
+@workflow
+def single_operation_workflow(graph, node, interface_name, op_name, **_):
+ graph.add_tasks(api.task.OperationTask(
+ node,
+ interface_name=interface_name,
+ operation_name=op_name))
+
+
+@operation
+def func(ctx):
+ global_test_dict['transitional_state'] = ctx.node.state
+
+
+@pytest.fixture
+def executor():
+ result = ThreadExecutor()
+ try:
+ yield result
+ finally:
+ result.close()
diff --git a/azure/aria/aria-extension-cloudify/src/aria/tests/orchestrator/workflows/core/test_task.py b/azure/aria/aria-extension-cloudify/src/aria/tests/orchestrator/workflows/core/test_task.py
new file mode 100644
index 0000000..2b3f7d7
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/tests/orchestrator/workflows/core/test_task.py
@@ -0,0 +1,153 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from datetime import (
+ datetime,
+ timedelta
+)
+
+import pytest
+
+from aria.orchestrator.context import workflow as workflow_context
+from aria.orchestrator.workflows import (
+ api,
+ exceptions,
+)
+from aria.modeling import models
+
+from tests import mock, storage
+
+NODE_INTERFACE_NAME = 'Standard'
+NODE_OPERATION_NAME = 'create'
+RELATIONSHIP_INTERFACE_NAME = 'Configure'
+RELATIONSHIP_OPERATION_NAME = 'pre_configure'
+
+
+@pytest.fixture
+def ctx(tmpdir):
+ context = mock.context.simple(str(tmpdir))
+
+ relationship = context.model.relationship.list()[0]
+ interface = mock.models.create_interface(
+ relationship.source_node.service,
+ RELATIONSHIP_INTERFACE_NAME,
+ RELATIONSHIP_OPERATION_NAME,
+ operation_kwargs=dict(function='test')
+ )
+ relationship.interfaces[interface.name] = interface
+ context.model.relationship.update(relationship)
+
+ node = context.model.node.get_by_name(mock.models.DEPENDENCY_NODE_NAME)
+ interface = mock.models.create_interface(
+ node.service,
+ NODE_INTERFACE_NAME,
+ NODE_OPERATION_NAME,
+ operation_kwargs=dict(function='test')
+ )
+ node.interfaces[interface.name] = interface
+ context.model.node.update(node)
+
+ yield context
+ storage.release_sqlite_storage(context.model)
+
+
+class TestOperationTask(object):
+
+ def _create_node_operation_task(self, ctx, node):
+ with workflow_context.current.push(ctx):
+ api_task = api.task.OperationTask(
+ node,
+ interface_name=NODE_INTERFACE_NAME,
+ operation_name=NODE_OPERATION_NAME)
+ model_task = models.Task.from_api_task(api_task, None)
+ return api_task, model_task
+
+ def _create_relationship_operation_task(self, ctx, relationship):
+ with workflow_context.current.push(ctx):
+ api_task = api.task.OperationTask(
+ relationship,
+ interface_name=RELATIONSHIP_INTERFACE_NAME,
+ operation_name=RELATIONSHIP_OPERATION_NAME)
+ core_task = models.Task.from_api_task(api_task, None)
+ return api_task, core_task
+
+ def test_node_operation_task_creation(self, ctx):
+ storage_plugin = mock.models.create_plugin('p1', '0.1')
+ storage_plugin_other = mock.models.create_plugin('p0', '0.0')
+ ctx.model.plugin.put(storage_plugin)
+ ctx.model.plugin.put(storage_plugin_other)
+ node = ctx.model.node.get_by_name(mock.models.DEPENDENCY_NODE_NAME)
+ interface = mock.models.create_interface(
+ node.service,
+ NODE_INTERFACE_NAME,
+ NODE_OPERATION_NAME,
+ operation_kwargs=dict(plugin=storage_plugin, function='test')
+ )
+ node.interfaces[interface.name] = interface
+ ctx.model.node.update(node)
+ api_task, model_task = self._create_node_operation_task(ctx, node)
+ assert model_task.name == api_task.name
+ assert model_task.function == api_task.function
+ assert model_task.actor == api_task.actor == node
+ assert model_task.arguments == api_task.arguments
+ assert model_task.plugin == storage_plugin
+
+ def test_relationship_operation_task_creation(self, ctx):
+ relationship = ctx.model.relationship.list()[0]
+ ctx.model.relationship.update(relationship)
+ _, model_task = self._create_relationship_operation_task(
+ ctx, relationship)
+ assert model_task.actor == relationship
+
+ @pytest.mark.skip("Currently not supported for model tasks")
+ def test_operation_task_edit_locked_attribute(self, ctx):
+ node = ctx.model.node.get_by_name(mock.models.DEPENDENCY_NODE_NAME)
+
+ _, core_task = self._create_node_operation_task(ctx, node)
+ now = datetime.utcnow()
+ with pytest.raises(exceptions.TaskException):
+ core_task.status = core_task.STARTED
+ with pytest.raises(exceptions.TaskException):
+ core_task.started_at = now
+ with pytest.raises(exceptions.TaskException):
+ core_task.ended_at = now
+ with pytest.raises(exceptions.TaskException):
+ core_task.attempts_count = 2
+ with pytest.raises(exceptions.TaskException):
+ core_task.due_at = now
+
+ @pytest.mark.skip("Currently not supported for model tasks")
+ def test_operation_task_edit_attributes(self, ctx):
+ node = ctx.model.node.get_by_name(mock.models.DEPENDENCY_NODE_NAME)
+
+ _, core_task = self._create_node_operation_task(ctx, node)
+ future_time = datetime.utcnow() + timedelta(seconds=3)
+
+ with core_task._update():
+ core_task.status = core_task.STARTED
+ core_task.started_at = future_time
+ core_task.ended_at = future_time
+ core_task.attempts_count = 2
+ core_task.due_at = future_time
+ assert core_task.status != core_task.STARTED
+ assert core_task.started_at != future_time
+ assert core_task.ended_at != future_time
+ assert core_task.attempts_count != 2
+ assert core_task.due_at != future_time
+
+ assert core_task.status == core_task.STARTED
+ assert core_task.started_at == future_time
+ assert core_task.ended_at == future_time
+ assert core_task.attempts_count == 2
+ assert core_task.due_at == future_time
diff --git a/azure/aria/aria-extension-cloudify/src/aria/tests/orchestrator/workflows/core/test_task_graph_into_execution_graph.py b/azure/aria/aria-extension-cloudify/src/aria/tests/orchestrator/workflows/core/test_task_graph_into_execution_graph.py
new file mode 100644
index 0000000..e24c901
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/tests/orchestrator/workflows/core/test_task_graph_into_execution_graph.py
@@ -0,0 +1,172 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from networkx import topological_sort, DiGraph
+
+from aria.modeling import models
+from aria.orchestrator import context
+from aria.orchestrator.workflows import api
+from aria.orchestrator.workflows.core import graph_compiler
+from aria.orchestrator.workflows.executor import base
+from tests import mock
+from tests import storage
+
+
+def test_task_graph_into_execution_graph(tmpdir):
+ interface_name = 'Standard'
+ op1_name, op2_name, op3_name = 'create', 'configure', 'start'
+ workflow_context = mock.context.simple(str(tmpdir))
+ node = workflow_context.model.node.get_by_name(mock.models.DEPENDENCY_NODE_NAME)
+ interface = mock.models.create_interface(
+ node.service,
+ interface_name,
+ op1_name,
+ operation_kwargs=dict(function='test')
+ )
+ interface.operations[op2_name] = mock.models.create_operation(op2_name) # pylint: disable=unsubscriptable-object
+ interface.operations[op3_name] = mock.models.create_operation(op3_name) # pylint: disable=unsubscriptable-object
+ node.interfaces[interface.name] = interface
+ workflow_context.model.node.update(node)
+
+ def sub_workflow(name, **_):
+ return api.task_graph.TaskGraph(name)
+
+ with context.workflow.current.push(workflow_context):
+ test_task_graph = api.task.WorkflowTask(sub_workflow, name='test_task_graph')
+ simple_before_task = api.task.OperationTask(
+ node,
+ interface_name=interface_name,
+ operation_name=op1_name)
+ simple_after_task = api.task.OperationTask(
+ node,
+ interface_name=interface_name,
+ operation_name=op1_name)
+
+ inner_task_graph = api.task.WorkflowTask(sub_workflow, name='test_inner_task_graph')
+ inner_task_1 = api.task.OperationTask(
+ node,
+ interface_name=interface_name,
+ operation_name=op1_name)
+ inner_task_2 = api.task.OperationTask(
+ node,
+ interface_name=interface_name,
+ operation_name=op2_name)
+ inner_task_3 = api.task.OperationTask(
+ node,
+ interface_name=interface_name,
+ operation_name=op3_name)
+ inner_task_graph.add_tasks(inner_task_1)
+ inner_task_graph.add_tasks(inner_task_2)
+ inner_task_graph.add_tasks(inner_task_3)
+ inner_task_graph.add_dependency(inner_task_2, inner_task_1)
+ inner_task_graph.add_dependency(inner_task_3, inner_task_1)
+ inner_task_graph.add_dependency(inner_task_3, inner_task_2)
+
+ test_task_graph.add_tasks(simple_before_task)
+ test_task_graph.add_tasks(simple_after_task)
+ test_task_graph.add_tasks(inner_task_graph)
+ test_task_graph.add_dependency(inner_task_graph, simple_before_task)
+ test_task_graph.add_dependency(simple_after_task, inner_task_graph)
+
+ compiler = graph_compiler.GraphCompiler(workflow_context, base.StubTaskExecutor)
+ compiler.compile(test_task_graph)
+
+ execution_tasks = topological_sort(_graph(workflow_context.execution.tasks))
+
+ assert len(execution_tasks) == 9
+
+ expected_tasks_names = [
+ '{0}-Start'.format(test_task_graph.id),
+ simple_before_task.id,
+ '{0}-Start'.format(inner_task_graph.id),
+ inner_task_1.id,
+ inner_task_2.id,
+ inner_task_3.id,
+ '{0}-End'.format(inner_task_graph.id),
+ simple_after_task.id,
+ '{0}-End'.format(test_task_graph.id)
+ ]
+
+ assert expected_tasks_names == [compiler._model_to_api_id[t.id] for t in execution_tasks]
+ assert all(isinstance(task, models.Task) for task in execution_tasks)
+ execution_tasks = iter(execution_tasks)
+
+ _assert_tasks(
+ iter(execution_tasks),
+ iter([simple_after_task, inner_task_1, inner_task_2, inner_task_3, simple_after_task])
+ )
+ storage.release_sqlite_storage(workflow_context.model)
+
+
+def _assert_tasks(execution_tasks, api_tasks):
+ start_workflow_exec_task = next(execution_tasks)
+ assert start_workflow_exec_task._stub_type == models.Task.START_WORKFLOW
+
+ before_exec_task = next(execution_tasks)
+ simple_before_task = next(api_tasks)
+ _assert_execution_is_api_task(before_exec_task, simple_before_task)
+ assert before_exec_task.dependencies == [start_workflow_exec_task]
+
+ start_subworkflow_exec_task = next(execution_tasks)
+ assert start_subworkflow_exec_task._stub_type == models.Task.START_SUBWROFKLOW
+ assert start_subworkflow_exec_task.dependencies == [before_exec_task]
+
+ inner_exec_task_1 = next(execution_tasks)
+ inner_task_1 = next(api_tasks)
+ _assert_execution_is_api_task(inner_exec_task_1, inner_task_1)
+ assert inner_exec_task_1.dependencies == [start_subworkflow_exec_task]
+
+ inner_exec_task_2 = next(execution_tasks)
+ inner_task_2 = next(api_tasks)
+ _assert_execution_is_api_task(inner_exec_task_2, inner_task_2)
+ assert inner_exec_task_2.dependencies == [inner_exec_task_1]
+
+ inner_exec_task_3 = next(execution_tasks)
+ inner_task_3 = next(api_tasks)
+ _assert_execution_is_api_task(inner_exec_task_3, inner_task_3)
+ assert sorted(inner_exec_task_3.dependencies) == sorted([inner_exec_task_1, inner_exec_task_2])
+
+ end_subworkflow_exec_task = next(execution_tasks)
+ assert end_subworkflow_exec_task._stub_type == models.Task.END_SUBWORKFLOW
+ assert end_subworkflow_exec_task.dependencies == [inner_exec_task_3]
+
+ after_exec_task = next(execution_tasks)
+ simple_after_task = next(api_tasks)
+ _assert_execution_is_api_task(after_exec_task, simple_after_task)
+ assert after_exec_task.dependencies == [end_subworkflow_exec_task]
+
+ end_workflow_exec_task = next(execution_tasks)
+ assert end_workflow_exec_task._stub_type == models.Task.END_WORKFLOW
+ assert end_workflow_exec_task.dependencies == [after_exec_task]
+
+
+def _assert_execution_is_api_task(execution_task, api_task):
+ assert execution_task.name == api_task.name
+ assert execution_task.function == api_task.function
+ assert execution_task.actor == api_task.actor
+ assert execution_task.arguments == api_task.arguments
+
+
+def _get_task_by_name(task_name, graph):
+ return graph.node[task_name]['task']
+
+
+def _graph(tasks):
+ graph = DiGraph()
+ for task in tasks:
+ for dependency in task.dependencies:
+ graph.add_edge(dependency, task)
+
+ return graph
diff --git a/azure/aria/aria-extension-cloudify/src/aria/tests/orchestrator/workflows/executor/__init__.py b/azure/aria/aria-extension-cloudify/src/aria/tests/orchestrator/workflows/executor/__init__.py
new file mode 100644
index 0000000..99d0b39
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/tests/orchestrator/workflows/executor/__init__.py
@@ -0,0 +1,98 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import logging
+import uuid
+from contextlib import contextmanager
+
+import aria
+from aria.modeling import models
+from aria.orchestrator.context.common import BaseContext
+
+
+class MockContext(object):
+
+ INSTRUMENTATION_FIELDS = BaseContext.INSTRUMENTATION_FIELDS
+
+ def __init__(self, storage, task_kwargs=None):
+ self.logger = logging.getLogger('mock_logger')
+ self._task_kwargs = task_kwargs or {}
+ self._storage = storage
+ self.task = MockTask(storage, **task_kwargs)
+ self.states = []
+ self.exception = None
+
+ @property
+ def serialization_dict(self):
+ return {
+ 'context_cls': self.__class__,
+ 'context': {
+ 'storage_kwargs': self._storage.serialization_dict,
+ 'task_kwargs': self._task_kwargs
+ }
+ }
+
+ def __getattr__(self, item):
+ return None
+
+ def close(self):
+ pass
+
+ @property
+ def model(self):
+ return self._storage
+
+ @classmethod
+ def instantiate_from_dict(cls, storage_kwargs=None, task_kwargs=None):
+ return cls(storage=aria.application_model_storage(**(storage_kwargs or {})),
+ task_kwargs=(task_kwargs or {}))
+
+ @property
+ @contextmanager
+ def persist_changes(self):
+ yield
+
+
+class MockActor(object):
+ def __init__(self):
+ self.name = 'actor_name'
+
+
+class MockTask(object):
+
+ INFINITE_RETRIES = models.Task.INFINITE_RETRIES
+
+ def __init__(self, model, function, arguments=None, plugin_fk=None):
+ self.function = self.name = function
+ self.plugin_fk = plugin_fk
+ self.arguments = arguments or {}
+ self.states = []
+ self.exception = None
+ self.id = str(uuid.uuid4())
+ self.logger = logging.getLogger()
+ self.attempts_count = 1
+ self.max_attempts = 1
+ self.ignore_failure = False
+ self.interface_name = 'interface_name'
+ self.operation_name = 'operation_name'
+ self.actor = MockActor()
+ self.node = self.actor
+ self.model = model
+
+ for state in models.Task.STATES:
+ setattr(self, state.upper(), state)
+
+ @property
+ def plugin(self):
+ return self.model.plugin.get(self.plugin_fk) if self.plugin_fk else None
diff --git a/azure/aria/aria-extension-cloudify/src/aria/tests/orchestrator/workflows/executor/test_executor.py b/azure/aria/aria-extension-cloudify/src/aria/tests/orchestrator/workflows/executor/test_executor.py
new file mode 100644
index 0000000..32a68e0
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/tests/orchestrator/workflows/executor/test_executor.py
@@ -0,0 +1,149 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+import pytest
+import retrying
+
+try:
+ import celery as _celery
+ app = _celery.Celery()
+ app.conf.update(CELERY_RESULT_BACKEND='amqp://')
+except ImportError:
+ _celery = None
+ app = None
+
+import aria
+from aria.modeling import models
+from aria.orchestrator import events
+from aria.orchestrator.workflows.executor import (
+ thread,
+ process,
+ # celery
+)
+
+import tests
+from . import MockContext
+
+
+def _get_function(func):
+ return '{module}.{func.__name__}'.format(module=__name__, func=func)
+
+
+def execute_and_assert(executor, storage=None):
+ expected_value = 'value'
+ successful_task = MockContext(
+ storage, task_kwargs=dict(function=_get_function(mock_successful_task))
+ )
+ failing_task = MockContext(
+ storage, task_kwargs=dict(function=_get_function(mock_failing_task))
+ )
+ task_with_inputs = MockContext(
+ storage,
+ task_kwargs=dict(function=_get_function(mock_task_with_input),
+ arguments={'input': models.Argument.wrap('input', 'value')})
+ )
+
+ for task in [successful_task, failing_task, task_with_inputs]:
+ executor.execute(task)
+
+ @retrying.retry(stop_max_delay=10000, wait_fixed=100)
+ def assertion():
+ assert successful_task.states == ['start', 'success']
+ assert failing_task.states == ['start', 'failure']
+ assert task_with_inputs.states == ['start', 'failure']
+ assert isinstance(failing_task.exception, MockException)
+ assert isinstance(task_with_inputs.exception, MockException)
+ assert task_with_inputs.exception.message == expected_value
+ assertion()
+
+
+def test_thread_execute(thread_executor):
+ execute_and_assert(thread_executor)
+
+
+def test_process_execute(process_executor, storage):
+ execute_and_assert(process_executor, storage)
+
+
+def mock_successful_task(**_):
+ pass
+
+
+def mock_failing_task(**_):
+ raise MockException
+
+
+def mock_task_with_input(input, **_):
+ raise MockException(input)
+
+if app:
+ mock_successful_task = app.task(mock_successful_task)
+ mock_failing_task = app.task(mock_failing_task)
+ mock_task_with_input = app.task(mock_task_with_input)
+
+
+class MockException(Exception):
+ pass
+
+
+@pytest.fixture
+def storage(tmpdir):
+ _storage = aria.application_model_storage(aria.storage.sql_mapi.SQLAlchemyModelAPI,
+ initiator_kwargs=dict(base_dir=str(tmpdir)))
+ yield _storage
+ tests.storage.release_sqlite_storage(_storage)
+
+
+@pytest.fixture(params=[
+ (thread.ThreadExecutor, {'pool_size': 1}),
+ (thread.ThreadExecutor, {'pool_size': 2}),
+ # subprocess needs to load a tests module so we explicitly add the root directory as if
+ # the project has been installed in editable mode
+ # (celery.CeleryExecutor, {'app': app})
+])
+def thread_executor(request):
+ executor_cls, executor_kwargs = request.param
+ result = executor_cls(**executor_kwargs)
+ yield result
+ result.close()
+
+
+@pytest.fixture
+def process_executor():
+ result = process.ProcessExecutor(python_path=tests.ROOT_DIR)
+ yield result
+ result.close()
+
+
+@pytest.fixture(autouse=True)
+def register_signals():
+ def start_handler(task, *args, **kwargs):
+ task.states.append('start')
+
+ def success_handler(task, *args, **kwargs):
+ task.states.append('success')
+
+ def failure_handler(task, exception, *args, **kwargs):
+ task.states.append('failure')
+ task.exception = exception
+
+ events.start_task_signal.connect(start_handler)
+ events.on_success_task_signal.connect(success_handler)
+ events.on_failure_task_signal.connect(failure_handler)
+ yield
+ events.start_task_signal.disconnect(start_handler)
+ events.on_success_task_signal.disconnect(success_handler)
+ events.on_failure_task_signal.disconnect(failure_handler)
diff --git a/azure/aria/aria-extension-cloudify/src/aria/tests/orchestrator/workflows/executor/test_process_executor.py b/azure/aria/aria-extension-cloudify/src/aria/tests/orchestrator/workflows/executor/test_process_executor.py
new file mode 100644
index 0000000..e050d18
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/tests/orchestrator/workflows/executor/test_process_executor.py
@@ -0,0 +1,172 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import sys
+import time
+import Queue
+import subprocess
+
+import pytest
+import psutil
+import retrying
+
+import aria
+
+from aria import operation
+from aria.modeling import models
+from aria.orchestrator import events
+from aria.utils.plugin import create as create_plugin
+from aria.orchestrator.workflows.executor import process
+
+import tests.storage
+import tests.resources
+from tests.helpers import FilesystemDataHolder
+from tests.fixtures import ( # pylint: disable=unused-import
+ plugins_dir,
+ plugin_manager,
+)
+from . import MockContext
+
+
+class TestProcessExecutor(object):
+
+ def test_plugin_execution(self, executor, mock_plugin, model, queue):
+ ctx = MockContext(
+ model,
+ task_kwargs=dict(function='mock_plugin1.operation', plugin_fk=mock_plugin.id)
+ )
+
+ executor.execute(ctx)
+ error = queue.get(timeout=60)
+ # tests/resources/plugins/mock-plugin1 is the plugin installed
+ # during this tests setup. The module mock_plugin1 contains a single
+ # operation named "operation" which calls an entry point defined in the plugin's
+ # setup.py. This entry points simply prints 'mock-plugin-output' to stdout.
+ # The "operation" operation that called this subprocess, then raises a RuntimeError
+ # with that subprocess output as the error message.
+ # This is what we assert here. This tests checks that both the PYTHONPATH (operation)
+ # and PATH (entry point) are properly updated in the subprocess in which the task is
+ # running.
+ assert isinstance(error, RuntimeError)
+ assert error.message == 'mock-plugin-output'
+
+ def test_closed(self, executor, model):
+ executor.close()
+ with pytest.raises(RuntimeError) as exc_info:
+ executor.execute(MockContext(model, task_kwargs=dict(function='some.function')))
+ assert 'closed' in exc_info.value.message
+
+ def test_process_termination(self, executor, model, fs_test_holder, tmpdir):
+ freeze_script_path = str(tmpdir.join('freeze_script'))
+ with open(freeze_script_path, 'w+b') as f:
+ f.write(
+ '''import time
+while True:
+ time.sleep(5)
+ '''
+ )
+ holder_path_argument = models.Argument.wrap('holder_path', fs_test_holder._path)
+ script_path_argument = models.Argument.wrap('freezing_script_path',
+ str(tmpdir.join('freeze_script')))
+
+ model.argument.put(holder_path_argument)
+ model.argument.put(script_path_argument)
+ ctx = MockContext(
+ model,
+ task_kwargs=dict(
+ function='{0}.{1}'.format(__name__, freezing_task.__name__),
+ arguments=dict(holder_path=holder_path_argument,
+ freezing_script_path=script_path_argument)),
+ )
+
+ executor.execute(ctx)
+
+ @retrying.retry(retry_on_result=lambda r: r is False, stop_max_delay=60000, wait_fixed=500)
+ def wait_for_extra_process_id():
+ return fs_test_holder.get('subproc', False)
+
+ task_pid = executor._tasks[ctx.task.id].proc.pid
+ extra_process_pid = wait_for_extra_process_id()
+
+ assert set([task_pid, extra_process_pid]).issubset(set(psutil.pids()))
+ executor.terminate(ctx.task.id)
+
+ # Give a chance to the processes to terminate
+ time.sleep(2)
+
+ # all processes should be either zombies or non existent
+ pids = [task_pid, extra_process_pid]
+ for pid in pids:
+ if pid in psutil.pids():
+ assert psutil.Process(pid).status() == psutil.STATUS_ZOMBIE
+ else:
+ # making the test more readable
+ assert pid not in psutil.pids()
+
+
+@pytest.fixture
+def queue():
+ _queue = Queue.Queue()
+
+ def handler(_, exception=None, **kwargs):
+ _queue.put(exception)
+
+ events.on_success_task_signal.connect(handler)
+ events.on_failure_task_signal.connect(handler)
+ try:
+ yield _queue
+ finally:
+ events.on_success_task_signal.disconnect(handler)
+ events.on_failure_task_signal.disconnect(handler)
+
+
+@pytest.fixture
+def fs_test_holder(tmpdir):
+ dataholder_path = str(tmpdir.join('dataholder'))
+ holder = FilesystemDataHolder(dataholder_path)
+ return holder
+
+
+@pytest.fixture
+def executor(plugin_manager):
+ result = process.ProcessExecutor(plugin_manager=plugin_manager, python_path=[tests.ROOT_DIR])
+ try:
+ yield result
+ finally:
+ result.close()
+
+
+@pytest.fixture
+def mock_plugin(plugin_manager, tmpdir):
+ source = os.path.join(tests.resources.DIR, 'plugins', 'mock-plugin1')
+ plugin_path = create_plugin(source=source, destination_dir=str(tmpdir))
+ return plugin_manager.install(source=plugin_path)
+
+
+@pytest.fixture
+def model(tmpdir):
+ _storage = aria.application_model_storage(aria.storage.sql_mapi.SQLAlchemyModelAPI,
+ initiator_kwargs=dict(base_dir=str(tmpdir)))
+ yield _storage
+ tests.storage.release_sqlite_storage(_storage)
+
+
+@operation
+def freezing_task(holder_path, freezing_script_path, **_):
+ holder = FilesystemDataHolder(holder_path)
+ holder['subproc'] = subprocess.Popen([sys.executable, freezing_script_path], shell=True).pid
+ while True:
+ time.sleep(5)
diff --git a/azure/aria/aria-extension-cloudify/src/aria/tests/orchestrator/workflows/executor/test_process_executor_concurrent_modifications.py b/azure/aria/aria-extension-cloudify/src/aria/tests/orchestrator/workflows/executor/test_process_executor_concurrent_modifications.py
new file mode 100644
index 0000000..86a2edf
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/tests/orchestrator/workflows/executor/test_process_executor_concurrent_modifications.py
@@ -0,0 +1,167 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import time
+
+import fasteners
+import pytest
+
+from aria.orchestrator import events
+from aria.orchestrator.workflows.exceptions import ExecutorException
+from aria.orchestrator.workflows import api
+from aria.orchestrator.workflows.executor import process
+from aria.orchestrator import workflow, operation
+
+import tests
+from tests.orchestrator.context import execute as execute_workflow
+from tests.orchestrator.workflows.helpers import events_collector
+from tests import mock
+from tests import storage
+from tests import helpers
+
+
+@pytest.fixture
+def dataholder(tmpdir):
+ dataholder_path = str(tmpdir.join('dataholder'))
+ holder = helpers.FilesystemDataHolder(dataholder_path)
+ return holder
+
+
+def test_concurrent_modification_on_task_succeeded(context, executor, lock_files, dataholder):
+ _test(context, executor, lock_files, _test_task_succeeded, dataholder, expected_failure=False)
+
+
+@operation
+def _test_task_succeeded(ctx, lock_files, key, first_value, second_value, holder_path):
+ _concurrent_update(lock_files, ctx.node, key, first_value, second_value, holder_path)
+
+
+def test_concurrent_modification_on_task_failed(context, executor, lock_files, dataholder):
+ _test(context, executor, lock_files, _test_task_failed, dataholder, expected_failure=True)
+
+
+@operation
+def _test_task_failed(ctx, lock_files, key, first_value, second_value, holder_path):
+ first = _concurrent_update(lock_files, ctx.node, key, first_value, second_value, holder_path)
+ if not first:
+ raise RuntimeError('MESSAGE')
+
+
+def _test(context, executor, lock_files, func, dataholder, expected_failure):
+ def _node(ctx):
+ return ctx.model.node.get_by_name(mock.models.DEPENDENCY_NODE_NAME)
+
+ interface_name, operation_name = mock.operations.NODE_OPERATIONS_INSTALL[0]
+
+ key = 'key'
+ first_value = 'value1'
+ second_value = 'value2'
+ arguments = {
+ 'lock_files': lock_files,
+ 'key': key,
+ 'first_value': first_value,
+ 'second_value': second_value,
+ 'holder_path': dataholder.path
+ }
+
+ node = _node(context)
+ interface = mock.models.create_interface(
+ node.service,
+ interface_name,
+ operation_name,
+ operation_kwargs=dict(function='{0}.{1}'.format(__name__, func.__name__),
+ arguments=arguments)
+ )
+ node.interfaces[interface.name] = interface
+ context.model.node.update(node)
+
+ @workflow
+ def mock_workflow(graph, **_):
+ graph.add_tasks(
+ api.task.OperationTask(
+ node,
+ interface_name=interface_name,
+ operation_name=operation_name,
+ arguments=arguments),
+ api.task.OperationTask(
+ node,
+ interface_name=interface_name,
+ operation_name=operation_name,
+ arguments=arguments)
+ )
+
+ signal = events.on_failure_task_signal
+ with events_collector(signal) as collected:
+ try:
+ execute_workflow(mock_workflow, context, executor)
+ except ExecutorException:
+ pass
+
+ props = _node(context).attributes
+ assert dataholder['invocations'] == 2
+ assert props[key].value == dataholder[key]
+
+ exceptions = [event['kwargs']['exception'] for event in collected.get(signal, [])]
+ if expected_failure:
+ assert exceptions
+
+
+@pytest.fixture
+def executor():
+ result = process.ProcessExecutor(python_path=[tests.ROOT_DIR])
+ try:
+ yield result
+ finally:
+ result.close()
+
+
+@pytest.fixture
+def context(tmpdir):
+ result = mock.context.simple(str(tmpdir))
+ yield result
+ storage.release_sqlite_storage(result.model)
+
+
+@pytest.fixture
+def lock_files(tmpdir):
+ return str(tmpdir.join('first_lock_file')), str(tmpdir.join('second_lock_file'))
+
+
+def _concurrent_update(lock_files, node, key, first_value, second_value, holder_path):
+ holder = helpers.FilesystemDataHolder(holder_path)
+ locker1 = fasteners.InterProcessLock(lock_files[0])
+ locker2 = fasteners.InterProcessLock(lock_files[1])
+
+ first = locker1.acquire(blocking=False)
+
+ if first:
+ # Give chance for both processes to acquire locks
+ while locker2.acquire(blocking=False):
+ locker2.release()
+ time.sleep(0.1)
+ else:
+ locker2.acquire()
+
+ node.attributes[key] = first_value if first else second_value
+ holder['key'] = first_value if first else second_value
+ holder.setdefault('invocations', 0)
+ holder['invocations'] += 1
+
+ if first:
+ locker1.release()
+ else:
+ with locker1:
+ locker2.release()
+
+ return first
diff --git a/azure/aria/aria-extension-cloudify/src/aria/tests/orchestrator/workflows/executor/test_process_executor_extension.py b/azure/aria/aria-extension-cloudify/src/aria/tests/orchestrator/workflows/executor/test_process_executor_extension.py
new file mode 100644
index 0000000..b26fa43
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/tests/orchestrator/workflows/executor/test_process_executor_extension.py
@@ -0,0 +1,99 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import pytest
+
+from aria import extension
+from aria.orchestrator.workflows import api
+from aria.orchestrator.workflows.core import engine, graph_compiler
+from aria.orchestrator.workflows.executor import process
+from aria.orchestrator import workflow, operation
+
+import tests
+from tests import mock
+from tests import storage
+
+
+def test_decorate_extension(context, executor):
+ arguments = {'arg1': 1, 'arg2': 2}
+
+ def get_node(ctx):
+ return ctx.model.node.get_by_name(mock.models.DEPENDENCY_NODE_NAME)
+
+ node = get_node(context)
+ interface_name = 'test_interface'
+ operation_name = 'operation'
+ interface = mock.models.create_interface(
+ context.service,
+ interface_name,
+ operation_name,
+ operation_kwargs=dict(function='{0}.{1}'.format(__name__, _mock_operation.__name__),
+ arguments=arguments)
+ )
+ node.interfaces[interface.name] = interface
+ context.model.node.update(node)
+
+
+ @workflow
+ def mock_workflow(ctx, graph):
+ node = get_node(ctx)
+ task = api.task.OperationTask(
+ node,
+ interface_name=interface_name,
+ operation_name=operation_name,
+ arguments=arguments)
+ graph.add_tasks(task)
+ return graph
+ graph = mock_workflow(ctx=context) # pylint: disable=no-value-for-parameter
+ graph_compiler.GraphCompiler(context, executor.__class__).compile(graph)
+ eng = engine.Engine({executor.__class__: executor})
+ eng.execute(context)
+ out = get_node(context).attributes.get('out').value
+ assert out['wrapper_arguments'] == arguments
+ assert out['function_arguments'] == arguments
+
+
+@extension.process_executor
+class MockProcessExecutorExtension(object):
+
+ def decorate(self):
+ def decorator(function):
+ def wrapper(ctx, **operation_arguments):
+ with ctx.model.instrument(ctx.model.node.model_cls.attributes):
+ ctx.node.attributes['out'] = {'wrapper_arguments': operation_arguments}
+ function(ctx=ctx, **operation_arguments)
+ return wrapper
+ return decorator
+
+
+@operation
+def _mock_operation(ctx, **operation_arguments):
+ ctx.node.attributes['out']['function_arguments'] = operation_arguments
+
+
+@pytest.fixture
+def executor():
+ result = process.ProcessExecutor(python_path=[tests.ROOT_DIR])
+ try:
+ yield result
+ finally:
+ result.close()
+
+
+@pytest.fixture
+def context(tmpdir):
+ result = mock.context.simple(str(tmpdir))
+ yield result
+ storage.release_sqlite_storage(result.model)
diff --git a/azure/aria/aria-extension-cloudify/src/aria/tests/orchestrator/workflows/executor/test_process_executor_tracked_changes.py b/azure/aria/aria-extension-cloudify/src/aria/tests/orchestrator/workflows/executor/test_process_executor_tracked_changes.py
new file mode 100644
index 0000000..47ee2f7
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/tests/orchestrator/workflows/executor/test_process_executor_tracked_changes.py
@@ -0,0 +1,168 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import copy
+
+import pytest
+
+from aria.orchestrator.workflows import api
+from aria.orchestrator.workflows.core import engine, graph_compiler
+from aria.orchestrator.workflows.executor import process
+from aria.orchestrator import workflow, operation
+from aria.orchestrator.workflows import exceptions
+
+import tests
+from tests import mock
+from tests import storage
+
+
+_TEST_ATTRIBUTES = {
+ 'some': 'values', 'that': 'are', 'most': 'likely', 'only': 'set', 'here': 'yo'
+}
+
+
+def test_track_changes_of_successful_operation(context, executor):
+ _run_workflow(context=context, executor=executor, op_func=_mock_success_operation)
+ _assert_tracked_changes_are_applied(context)
+
+
+def test_track_changes_of_failed_operation(context, executor):
+ with pytest.raises(exceptions.ExecutorException):
+ _run_workflow(context=context, executor=executor, op_func=_mock_fail_operation)
+ _assert_tracked_changes_are_applied(context)
+
+
+def _assert_tracked_changes_are_applied(context):
+ instance = context.model.node.get_by_name(mock.models.DEPENDENCY_NODE_NAME)
+ assert all(instance.attributes[key].value == value
+ for key, value in _TEST_ATTRIBUTES.items())
+
+
+def _update_attributes(context):
+ context.node.attributes.clear()
+ context.node.attributes.update(_TEST_ATTRIBUTES)
+
+
+def test_refresh_state_of_tracked_attributes(context, executor):
+ out = _run_workflow(context=context, executor=executor, op_func=_mock_refreshing_operation)
+ assert out['after_refresh'] == out['after_change']
+ assert out['initial'] != out['after_change']
+
+
+def test_apply_tracked_changes_during_an_operation(context, executor):
+ arguments = {
+ 'committed': {'some': 'new', 'properties': 'right here'},
+ 'changed_but_refreshed': {'some': 'newer', 'properties': 'right there'}
+ }
+
+ expected_initial = context.model.node.get_by_name(mock.models.DEPENDENCY_NODE_NAME).attributes
+ out = _run_workflow(
+ context=context, executor=executor, op_func=_mock_updating_operation, arguments=arguments)
+
+ expected_after_update = expected_initial.copy()
+ expected_after_update.update(arguments['committed']) # pylint: disable=no-member
+ expected_after_change = expected_after_update.copy()
+ expected_after_change.update(arguments['changed_but_refreshed']) # pylint: disable=no-member
+
+ assert out['initial'] == expected_initial
+ assert out['after_update'] == expected_after_update
+ assert out['after_change'] == expected_after_change
+ assert out['after_refresh'] == expected_after_change
+
+
+def _run_workflow(context, executor, op_func, arguments=None):
+ node = context.model.node.get_by_name(mock.models.DEPENDENCY_NODE_NAME)
+ interface_name = 'test_interface'
+ operation_name = 'operation'
+ wf_arguments = arguments or {}
+ interface = mock.models.create_interface(
+ context.service,
+ interface_name,
+ operation_name,
+ operation_kwargs=dict(function=_operation_mapping(op_func),
+ arguments=wf_arguments)
+ )
+ node.interfaces[interface.name] = interface
+ context.model.node.update(node)
+
+ @workflow
+ def mock_workflow(ctx, graph):
+ task = api.task.OperationTask(
+ node,
+ interface_name=interface_name,
+ operation_name=operation_name,
+ arguments=wf_arguments)
+ graph.add_tasks(task)
+ return graph
+ graph = mock_workflow(ctx=context) # pylint: disable=no-value-for-parameter
+ graph_compiler.GraphCompiler(context, executor.__class__).compile(graph)
+ eng = engine.Engine({executor.__class__: executor})
+ eng.execute(context)
+ out = context.model.node.get_by_name(mock.models.DEPENDENCY_NODE_NAME).attributes.get('out')
+ return out.value if out else None
+
+
+@operation
+def _mock_success_operation(ctx):
+ _update_attributes(ctx)
+
+
+@operation
+def _mock_fail_operation(ctx):
+ _update_attributes(ctx)
+ raise RuntimeError
+
+
+@operation
+def _mock_refreshing_operation(ctx):
+ out = {'initial': copy.deepcopy(ctx.node.attributes)}
+ ctx.node.attributes.update({'some': 'new', 'properties': 'right here'})
+ out['after_change'] = copy.deepcopy(ctx.node.attributes)
+ ctx.model.node.refresh(ctx.node)
+ out['after_refresh'] = copy.deepcopy(ctx.node.attributes)
+ ctx.node.attributes['out'] = out
+
+
+@operation
+def _mock_updating_operation(ctx, committed, changed_but_refreshed):
+ out = {'initial': copy.deepcopy(ctx.node.attributes)}
+ ctx.node.attributes.update(committed)
+ ctx.model.node.update(ctx.node)
+ out['after_update'] = copy.deepcopy(ctx.node.attributes)
+ ctx.node.attributes.update(changed_but_refreshed)
+ out['after_change'] = copy.deepcopy(ctx.node.attributes)
+ ctx.model.node.refresh(ctx.node)
+ out['after_refresh'] = copy.deepcopy(ctx.node.attributes)
+ ctx.node.attributes['out'] = out
+
+
+def _operation_mapping(func):
+ return '{name}.{func.__name__}'.format(name=__name__, func=func)
+
+
+@pytest.fixture
+def executor():
+ result = process.ProcessExecutor(python_path=[tests.ROOT_DIR])
+ try:
+ yield result
+ finally:
+ result.close()
+
+
+@pytest.fixture
+def context(tmpdir):
+ result = mock.context.simple(str(tmpdir))
+ yield result
+ storage.release_sqlite_storage(result.model)
diff --git a/azure/aria/aria-extension-cloudify/src/aria/tests/orchestrator/workflows/helpers.py b/azure/aria/aria-extension-cloudify/src/aria/tests/orchestrator/workflows/helpers.py
new file mode 100644
index 0000000..8e3f9b1
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/tests/orchestrator/workflows/helpers.py
@@ -0,0 +1,37 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from contextlib import contextmanager
+
+
+@contextmanager
+def events_collector(*signals):
+ handlers = {}
+ collected = {}
+
+ def handler_factory(key):
+ def handler(*args, **kwargs):
+ signal_events = collected.setdefault(key, [])
+ signal_events.append({'args': args, 'kwargs': kwargs})
+ handlers[signal] = handler
+ return handler
+
+ for signal in signals:
+ signal.connect(handler_factory(signal))
+ try:
+ yield collected
+ finally:
+ for signal in signals:
+ signal.disconnect(handlers[signal])
diff --git a/azure/aria/aria-extension-cloudify/src/aria/tests/parser/__init__.py b/azure/aria/aria-extension-cloudify/src/aria/tests/parser/__init__.py
new file mode 100644
index 0000000..ae1e83e
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/tests/parser/__init__.py
@@ -0,0 +1,14 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/azure/aria/aria-extension-cloudify/src/aria/tests/parser/service_templates.py b/azure/aria/aria-extension-cloudify/src/aria/tests/parser/service_templates.py
new file mode 100644
index 0000000..9e8fcae
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/tests/parser/service_templates.py
@@ -0,0 +1,86 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+
+from aria.utils.caching import cachedmethod
+from aria.parser.loading import LiteralLocation
+
+from .utils import (create_context, create_consumer)
+from ..helpers import (get_example_uri, get_service_template_uri)
+
+
+def consume_literal(literal, consumer_class_name='instance', cache=True, no_issues=True):
+ cachedmethod.ENABLED = cache
+ context = create_context(LiteralLocation(literal))
+ consumer, dumper = create_consumer(context, consumer_class_name)
+ consumer.consume()
+ if no_issues:
+ context.validation.dump_issues()
+ assert not context.validation.has_issues
+ return context, dumper
+
+
+def consume_use_case(use_case_name, consumer_class_name='instance', cache=True):
+ cachedmethod.ENABLED = cache
+ uri = get_example_uri('tosca-simple-1.0', 'use-cases', use_case_name,
+ '{0}.yaml'.format(use_case_name))
+ context = create_context(uri)
+ inputs_file = get_example_uri('tosca-simple-1.0', 'use-cases', use_case_name, 'inputs.yaml')
+ if os.path.isfile(inputs_file):
+ context.args.append('--inputs={0}'.format(inputs_file))
+ consumer, dumper = create_consumer(context, consumer_class_name)
+ consumer.consume()
+ context.validation.dump_issues()
+ assert not context.validation.has_issues
+ return context, dumper
+
+
+def consume_types_use_case(use_case_name, consumer_class_name='instance', cache=True):
+ cachedmethod.ENABLED = cache
+ uri = get_service_template_uri('tosca-simple-1.0', 'types', use_case_name,
+ '{0}.yaml'.format(use_case_name))
+ context = create_context(uri)
+ inputs_file = get_example_uri('tosca-simple-1.0', 'types', use_case_name, 'inputs.yaml')
+ if os.path.isfile(inputs_file):
+ context.args.append('--inputs={0}'.format(inputs_file))
+ consumer, dumper = create_consumer(context, consumer_class_name)
+ consumer.consume()
+ context.validation.dump_issues()
+ assert not context.validation.has_issues
+ return context, dumper
+
+
+def consume_node_cellar(consumer_class_name='instance', cache=True):
+ consume_test_case(
+ get_service_template_uri('tosca-simple-1.0', 'node-cellar', 'node-cellar.yaml'),
+ consumer_class_name=consumer_class_name,
+ inputs_uri=get_service_template_uri('tosca-simple-1.0', 'node-cellar', 'inputs.yaml'),
+ cache=cache
+
+ )
+
+
+def consume_test_case(uri, inputs_uri=None, consumer_class_name='instance', cache=True):
+ cachedmethod.ENABLED = cache
+ uri = get_service_template_uri(uri)
+ context = create_context(uri)
+ if inputs_uri:
+ context.args.append('--inputs=' + get_service_template_uri(inputs_uri))
+ consumer, dumper = create_consumer(context, consumer_class_name)
+ consumer.consume()
+ context.validation.dump_issues()
+ assert not context.validation.has_issues
+ return context, dumper
diff --git a/azure/aria/aria-extension-cloudify/src/aria/tests/parser/test_reqs_caps.py b/azure/aria/aria-extension-cloudify/src/aria/tests/parser/test_reqs_caps.py
new file mode 100644
index 0000000..e92aec4
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/tests/parser/test_reqs_caps.py
@@ -0,0 +1,29 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from .service_templates import consume_test_case
+from ..helpers import get_service_template_uri
+
+
+def test_satisfy_capability_type():
+ consume_reqs_caps_template1('instance')
+
+
+def consume_reqs_caps_template1(consumer_class_name, cache=True):
+ consume_test_case(
+ get_service_template_uri('tosca-simple-1.0', 'reqs_caps', 'reqs_caps1.yaml'),
+ consumer_class_name=consumer_class_name,
+ cache=cache
+ )
diff --git a/azure/aria/aria-extension-cloudify/src/aria/tests/parser/test_tosca_simple_v1_0/__init__.py b/azure/aria/aria-extension-cloudify/src/aria/tests/parser/test_tosca_simple_v1_0/__init__.py
new file mode 100644
index 0000000..ae1e83e
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/tests/parser/test_tosca_simple_v1_0/__init__.py
@@ -0,0 +1,14 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/azure/aria/aria-extension-cloudify/src/aria/tests/parser/test_tosca_simple_v1_0/presentation/__init__.py b/azure/aria/aria-extension-cloudify/src/aria/tests/parser/test_tosca_simple_v1_0/presentation/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/tests/parser/test_tosca_simple_v1_0/presentation/__init__.py
diff --git a/azure/aria/aria-extension-cloudify/src/aria/tests/parser/test_tosca_simple_v1_0/presentation/test_types.py b/azure/aria/aria-extension-cloudify/src/aria/tests/parser/test_tosca_simple_v1_0/presentation/test_types.py
new file mode 100644
index 0000000..cfd4d3c
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/tests/parser/test_tosca_simple_v1_0/presentation/test_types.py
@@ -0,0 +1,23 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from tests.parser.service_templates import consume_types_use_case
+
+
+def test_use_case_shorthand_1_name():
+ consume_types_use_case('shorthand-1', 'types')
+
+def test_use_case_typequalified_1_name():
+ consume_types_use_case('typequalified-1', 'types')
diff --git a/azure/aria/aria-extension-cloudify/src/aria/tests/parser/test_tosca_simple_v1_0/test_end2end.py b/azure/aria/aria-extension-cloudify/src/aria/tests/parser/test_tosca_simple_v1_0/test_end2end.py
new file mode 100644
index 0000000..474d90e
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/tests/parser/test_tosca_simple_v1_0/test_end2end.py
@@ -0,0 +1,112 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from ..service_templates import (consume_use_case, consume_node_cellar)
+
+
+# Use Cases
+
+def test_use_case_compute_1():
+ consume_use_case('compute-1', 'instance')
+
+
+def test_use_case_software_component_1():
+ consume_use_case('software-component-1', 'instance')
+
+
+def test_use_case_block_storage_1():
+ consume_use_case('block-storage-1', 'instance')
+
+
+def test_use_case_block_storage_2():
+ consume_use_case('block-storage-2', 'instance')
+
+
+def test_use_case_block_storage_3():
+ consume_use_case('block-storage-3', 'instance')
+
+
+def test_use_case_block_storage_4():
+ consume_use_case('block-storage-4', 'instance')
+
+
+def test_use_case_block_storage_5():
+ consume_use_case('block-storage-5', 'instance')
+
+
+def test_use_case_block_storage_6():
+ consume_use_case('block-storage-6', 'instance')
+
+
+def test_use_case_object_storage_1():
+ consume_use_case('object-storage-1', 'instance')
+
+
+def test_use_case_network_1():
+ consume_use_case('network-1', 'instance')
+
+
+def test_use_case_network_2():
+ consume_use_case('network-2', 'instance')
+
+
+def test_use_case_network_3():
+ consume_use_case('network-3', 'instance')
+
+
+def test_use_case_network_4():
+ consume_use_case('network-4', 'instance')
+
+
+def test_use_case_webserver_dbms_1():
+ consume_use_case('webserver-dbms-1', 'template')
+
+
+def test_use_case_webserver_dbms_2():
+ consume_use_case('webserver-dbms-2', 'instance')
+
+
+def test_use_case_multi_tier_1():
+ consume_use_case('multi-tier-1', 'instance')
+
+
+def test_use_case_container_1():
+ consume_use_case('container-1', 'template')
+
+
+# NodeCellar
+
+def test_node_cellar_validation():
+ consume_node_cellar('validate')
+
+
+def test_node_cellar_validation_no_cache():
+ consume_node_cellar('validate', False)
+
+
+def test_node_cellar_presentation():
+ consume_node_cellar('presentation')
+
+
+def test_node_cellar_model():
+ consume_node_cellar('template')
+
+
+def test_node_cellar_types():
+ consume_node_cellar('types')
+
+
+def test_node_cellar_instance():
+ consume_node_cellar('instance')
diff --git a/azure/aria/aria-extension-cloudify/src/aria/tests/parser/utils.py b/azure/aria/aria-extension-cloudify/src/aria/tests/parser/utils.py
new file mode 100644
index 0000000..f0e890f
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/tests/parser/utils.py
@@ -0,0 +1,67 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from aria.parser.loading import UriLocation
+from aria.parser.consumption import (
+ ConsumptionContext,
+ ConsumerChain,
+ Read,
+ Validate,
+ ServiceTemplate,
+ Types,
+ Inputs,
+ ServiceInstance
+)
+from aria.utils.imports import import_fullname
+
+
+def create_context(uri,
+ loader_source='aria.parser.loading.DefaultLoaderSource',
+ reader_source='aria.parser.reading.DefaultReaderSource',
+ presenter_source='aria.parser.presentation.DefaultPresenterSource',
+ presenter=None,
+ debug=False):
+ context = ConsumptionContext()
+ context.loading.loader_source = import_fullname(loader_source)()
+ context.reading.reader_source = import_fullname(reader_source)()
+ context.presentation.location = UriLocation(uri) if isinstance(uri, basestring) else uri
+ context.presentation.presenter_source = import_fullname(presenter_source)()
+ context.presentation.presenter_class = import_fullname(presenter)
+ context.presentation.print_exceptions = debug
+ return context
+
+
+def create_consumer(context, consumer_class_name):
+ consumer = ConsumerChain(context, (Read, Validate))
+ dumper = None
+ if consumer_class_name == 'validate':
+ dumper = None
+ elif consumer_class_name == 'presentation':
+ dumper = consumer.consumers[0]
+ elif consumer_class_name == 'template':
+ consumer.append(ServiceTemplate)
+ elif consumer_class_name == 'types':
+ consumer.append(ServiceTemplate, Types)
+ elif consumer_class_name == 'instance':
+ consumer.append(ServiceTemplate, Inputs, ServiceInstance)
+ else:
+ consumer.append(ServiceTemplate, Inputs, ServiceInstance)
+ consumer.append(import_fullname(consumer_class_name))
+
+ if dumper is None:
+ # Default to last consumer
+ dumper = consumer.consumers[-1]
+
+ return consumer, dumper
diff --git a/azure/aria/aria-extension-cloudify/src/aria/tests/requirements.txt b/azure/aria/aria-extension-cloudify/src/aria/tests/requirements.txt
new file mode 100644
index 0000000..56a7bf5
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/tests/requirements.txt
@@ -0,0 +1,22 @@
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+testtools==2.3.0
+fasteners==0.14.1
+sh==1.12.14
+psutil==5.2.2
+mock==2.0.0
+pylint==1.6.5
+pytest==3.2.0
+pytest-cov==2.5.1
+pytest-mock==1.6.2
+pytest-xdist==1.18.2
diff --git a/azure/aria/aria-extension-cloudify/src/aria/tests/resources/__init__.py b/azure/aria/aria-extension-cloudify/src/aria/tests/resources/__init__.py
new file mode 100644
index 0000000..3ed601f
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/tests/resources/__init__.py
@@ -0,0 +1,19 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+
+
+DIR = os.path.dirname(__file__)
diff --git a/azure/aria/aria-extension-cloudify/src/aria/tests/resources/plugins/mock-plugin1/mock_plugin1.py b/azure/aria/aria-extension-cloudify/src/aria/tests/resources/plugins/mock-plugin1/mock_plugin1.py
new file mode 100644
index 0000000..25a00d1
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/tests/resources/plugins/mock-plugin1/mock_plugin1.py
@@ -0,0 +1,27 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import subprocess
+
+
+def operation(**_):
+ process = subprocess.Popen(['mock-plugin1'], stdout=subprocess.PIPE)
+ output, _ = process.communicate()
+ assert not process.poll()
+ raise RuntimeError(output.strip())
+
+
+def console_script_entry_point():
+ print 'mock-plugin-output'
diff --git a/azure/aria/aria-extension-cloudify/src/aria/tests/resources/plugins/mock-plugin1/setup.py b/azure/aria/aria-extension-cloudify/src/aria/tests/resources/plugins/mock-plugin1/setup.py
new file mode 100644
index 0000000..88d354d
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/tests/resources/plugins/mock-plugin1/setup.py
@@ -0,0 +1,28 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from setuptools import setup
+
+
+setup(
+ name='mock-plugin1',
+ version='0.1',
+ py_modules=['mock_plugin1'],
+ entry_points={
+ 'console_scripts': [
+ 'mock-plugin1 = mock_plugin1:console_script_entry_point'
+ ]
+ }
+)
diff --git a/azure/aria/aria-extension-cloudify/src/aria/tests/resources/scripts/test_ssh.sh b/azure/aria/aria-extension-cloudify/src/aria/tests/resources/scripts/test_ssh.sh
new file mode 100644
index 0000000..1c35370
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/tests/resources/scripts/test_ssh.sh
@@ -0,0 +1,96 @@
+#!/bin/bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+set -u
+set -e
+
+test_run_script_basic() {
+ ctx node attributes test_value = "$test_value"
+}
+
+test_run_script_as_sudo() {
+ mkdir -p /opt/test_dir
+}
+
+test_run_script_default_base_dir() {
+ ctx node attributes work_dir = "$PWD"
+}
+
+test_run_script_with_hide() {
+ true
+}
+
+test_run_script_process_config() {
+ ctx node attributes env_value = "$test_value_env"
+ ctx node attributes bash_version = "$BASH_VERSION"
+ ctx node attributes arg1_value = "$1"
+ ctx node attributes arg2_value = "$2"
+ ctx node attributes cwd = "$PWD"
+ ctx node attributes ctx_path = "$(which ctx)"
+}
+
+test_run_script_command_prefix() {
+ ctx node attributes dollar_dash = $-
+}
+
+test_run_script_reuse_existing_ctx_1() {
+ ctx node attributes test_value1 = "$test_value1"
+}
+
+test_run_script_reuse_existing_ctx_2() {
+ ctx node attributes test_value2 = "$test_value2"
+}
+
+test_run_script_download_resource_plain() {
+ local DESTINATION=$(mktemp)
+ ctx download-resource [ "$DESTINATION" test_resource ]
+ ctx node attributes test_value = "$(cat "$DESTINATION")"
+}
+
+test_run_script_download_resource_and_render() {
+ local DESTINATION=$(mktemp)
+ ctx download-resource-and-render [ "$DESTINATION" test_resource ]
+ ctx node attributes test_value = "$(cat "$DESTINATION")"
+}
+
+test_run_script_inputs_as_env_variables_no_override() {
+ ctx node attributes test_value = "$custom_env_var"
+}
+
+test_run_script_inputs_as_env_variables_process_env_override() {
+ ctx node attributes test_value = "$custom_env_var"
+}
+
+test_run_script_error_in_script() {
+ ctx property-that-does-not-exist
+}
+
+test_run_script_abort_immediate() {
+ ctx task abort [ abort-message ]
+}
+
+test_run_script_retry() {
+ ctx task retry [ retry-message ]
+}
+
+test_run_script_abort_error_ignored_by_script() {
+ set +e
+ ctx task abort [ abort-message ]
+}
+
+# Injected by test
+"$test_operation" "$@"
diff --git a/azure/aria/aria-extension-cloudify/src/aria/tests/resources/service-templates/tosca-simple-1.0/node-cellar/inputs.yaml b/azure/aria/aria-extension-cloudify/src/aria/tests/resources/service-templates/tosca-simple-1.0/node-cellar/inputs.yaml
new file mode 100644
index 0000000..37ab9ea
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/tests/resources/service-templates/tosca-simple-1.0/node-cellar/inputs.yaml
@@ -0,0 +1,3 @@
+openstack_credential:
+ user: username
+ token: password
diff --git a/azure/aria/aria-extension-cloudify/src/aria/tests/resources/service-templates/tosca-simple-1.0/node-cellar/node-cellar.yaml b/azure/aria/aria-extension-cloudify/src/aria/tests/resources/service-templates/tosca-simple-1.0/node-cellar/node-cellar.yaml
new file mode 100644
index 0000000..260f0bf
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/tests/resources/service-templates/tosca-simple-1.0/node-cellar/node-cellar.yaml
@@ -0,0 +1,357 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# NFV is not used here, but we are using it just to validate the imports
+tosca_definitions_version: tosca_simple_profile_for_nfv_1_0
+#tosca_definitions_version: tosca_simple_yaml_1_0
+
+description: >-
+ Node Cellar TOSCA blueprint.
+ Here is some Unicode: 中國.
+
+metadata:
+ template_name: node-cellar
+ template_author: ARIA
+ template_version: '1.0.0'
+ aria_version: '0.0'
+
+imports:
+ - types/openstack.yaml
+ - types/nodejs.yaml
+ - types/mongodb.yaml
+ - types/nginx.yaml
+ - aria-1.0
+
+dsl_definitions:
+
+ default_openstack_credential: &DEFAULT_OPENSTACK_CREDENTIAL
+ user: openstackadmin
+ token: { concat: [ openstack, 123 ] }
+
+repositories:
+
+ node_cellar:
+ description: >-
+ The repository for the Node Cellar application and its dependencies.
+ url: https://github.com/ccoenraets/nodecellar/archive/
+ credential:
+ user: guest
+ token: ''
+
+interface_types:
+
+ Maintenance:
+ derived_from: tosca.interfaces.Root
+ inputs:
+ mode:
+ type: string
+ default: immediate
+ constraints:
+ - valid_values: [ immediate, eventual ]
+ description: >-
+ The mode in which maintenance mode is enabled/disabled.
+ enable:
+ description: >-
+ Enable maintenance mode.
+ disable:
+ description: >-
+ Disable maintenance mode.
+
+node_types:
+
+ NodeMongoApplication:
+ derived_from: nodejs.Application
+ interfaces:
+ Maintenance:
+ type: Maintenance
+ requirements:
+ - database:
+ capability: tosca.capabilities.Endpoint.Database
+ node: mongodb.Database
+
+topology_template:
+
+ inputs:
+ openstack_credential:
+ type: openstack.Credential
+ value: *DEFAULT_OPENSTACK_CREDENTIAL
+
+ node_templates:
+
+ # Application
+
+ node_cellar:
+ description: >-
+ Node Cellar Node.js web application.
+ type: NodeMongoApplication
+ artifacts:
+ node_cellar:
+ description: >-
+ The Node Cellar application package.
+ type: os.Archive
+ file: master.zip
+ repository: node_cellar
+ deploy_path: /opt/nodejs/applications/node-cellar
+ properties:
+ unpack_credential:
+ user: gigaspaces
+ token: { get_attribute: [ SELF, tosca_id ] }
+ #token: { get_property: [ SELF, app_endpoint, protocol ] }
+ #token: { get_property: [ HOST, flavor_name ] }
+ #token: { token: [ { get_property: [ HOST, flavor_name ] }, '.', 1 ] }
+ #token: { token: [ 'zero.one|two-three', '.|-', 3 ] }
+ interfaces:
+ Maintenance:
+ inputs:
+ mode: eventual
+ enable: maintenance_node_cellar.sh
+ disable: maintenance_node_cellar.sh
+ Standard:
+ create:
+ implementation:
+ primary: create_node_cellar.sh
+ dependencies:
+ - "process.args.1 > { get_attribute: [ SELF, tosca_id ] }"
+ - "process.args.2 > { get_property: [ HOST, flavor_name ] }"
+ - ssh.user > admin
+ - ssh.password > '1234'
+ - ssh.use_sudo > true
+ requirements:
+ - database: node_cellar_database
+ capabilities:
+ app_endpoint:
+ properties:
+ protocol: udp
+ url_path: /nodecellar
+
+ node_cellar_database:
+ description: >-
+ Node Cellar MongoDB database.
+ type: mongodb.Database
+ properties:
+ name: node_cellar
+ artifacts:
+ initial:
+ description: >-
+ The Node Cellar initial database.
+ type: mongodb.DatabaseDump
+ file: node-cellar.json
+ repository: node_cellar
+
+ # Server software
+
+ nodejs:
+ description: >-
+ Node.js instance.
+ type: nodejs.Server
+ requirements:
+ - host: application_host
+ capabilities:
+ data_endpoint:
+ properties:
+ url_path: /app
+ node_filter: # cannot be validated
+ properties:
+ #- flavor_name: { valid_values: [ {concat:[m1,.,small]} ] } # won't work because not validated :/
+ - flavor_name: { valid_values: [ m1.small ] }
+ capabilities:
+ - scalable:
+ properties:
+ - max_instances: { greater_or_equal: 5 }
+
+ mongodb:
+ description: >-
+ MongoDB instance.
+ type: mongodb.Server
+ requirements:
+ - host:
+ node: openstack.Instance
+ node_filter:
+ properties:
+ - flavor_name: { valid_values: [ m1.medium, { concat: [ { concat: [ m1, . ] }, large ] } ] }
+ #- flavor_name: { valid_values: [ m1.medium, m1.large ] }
+ capabilities:
+ - scalable:
+ properties:
+ - max_instances: { greater_or_equal: 5 }
+ relationship:
+ interfaces:
+ Configure:
+ target_changed: changed.sh
+
+ nginx:
+ type: nginx.Nginx
+ requirements:
+ - host: loadbalancer_host
+ - feature: loadbalancer
+
+ # Features
+
+ loadbalancer:
+ type: nginx.LoadBalancer
+ properties:
+ algorithm: round-robin
+
+ # Hosts
+
+ loadbalancer_host:
+ description: >-
+ Host for the loadbalancer.
+ type: openstack.Instance
+ properties:
+ flavor_name: m1.small
+ os_users: # map of os.UserInfo
+ root:
+ password: admin123
+ interfaces:
+ Standard:
+ inputs:
+ openstack_credential: { get_input: openstack_credential }
+ configure:
+ implementation:
+ primary: juju > run_charm
+ dependencies:
+ - charm > loadbalancer
+
+ application_host:
+ copy: loadbalancer_host
+ description: >-
+ Host for applications.
+ properties:
+ flavor_name: m1.small
+ os_users: # map of os.UserInfo
+ nodejs:
+ password: nodejs123
+ groups:
+ - www-data
+ capabilities:
+ scalable:
+ properties:
+ max_instances: 5 # overrides the policy
+
+ data_host:
+ copy: loadbalancer_host
+ description: >-
+ Host for data.
+ properties:
+ flavor_name: m1.large
+ flavor_id: 5d62e82c-924e-4fa9-b1e4-c133867596f7
+ os_users: # map of os.UserInfo
+ mongodb:
+ password: mongo123
+ requirements:
+ - local_storage:
+ node: data_volume
+ relationship:
+ properties:
+ location: /mnt/volume
+ capabilities:
+ scalable:
+ properties:
+ max_instances: 6 # overrides the policy
+
+ data_volume:
+ type: openstack.Volume
+ properties:
+ size: 10 GB
+ interfaces:
+ Standard:
+ inputs:
+ openstack_credential: { get_input: openstack_credential }
+ create: create_data_volume.sh
+
+ groups:
+
+ node_cellar_group:
+ type: openstack.Secured
+ members:
+ - loadbalancer
+ - application_host
+ - data_host
+ interfaces:
+ Standard:
+ inputs:
+ openstack_credential: { get_input: openstack_credential }
+
+ policies:
+
+ app_scaling:
+ type: aria.Scaling
+ properties:
+ max_instances: 10
+ default_instances: 2
+ targets:
+ - node_cellar
+ - nodejs
+
+ host_scaling:
+ type: openstack.Scaling
+ properties:
+ bandwidth_threshold: 2 GB
+ max_instances: 10
+ default_instances: 2
+ targets: # node templates or groups
+ - node_cellar_group
+
+ juju:
+ description: >-
+ Juju plugin executes charms.
+ type: aria.Plugin
+ properties:
+ version: 1.0
+ enabled: false
+
+ maintenance_on:
+ type: MaintenanceWorkflow
+ properties:
+ enabled: true
+
+ maintenance_off:
+ type: MaintenanceWorkflow
+ properties:
+ enabled: false
+
+ substitution_mappings:
+
+ node_type: tosca.nodes.WebApplication
+ requirements:
+ host: [ node_cellar, host ] # doesn't really make sense; just for testing
+ capabilities:
+ app_endpoint: [ loadbalancer, client ]
+
+ outputs:
+
+ endpoint:
+ description: >-
+ The application endpoint.
+ type: string
+ value: { get_property: [ nodejs, data_endpoint, url_path ] }
+
+policy_types:
+
+ MaintenanceWorkflow:
+ description: >-
+ Workflow to put all nodes in/out of maintenance mode. For web servers, this will show a "this
+ site is under maintenance and we'll be back soon" web page. Database nodes will then close all
+ client connections cleanly and shut down services.
+ derived_from: aria.Workflow
+ properties:
+ implementation:
+ type: string
+ default: workflows.maintenance
+ enabled:
+ description: >-
+ Whether to turn maintenance mode on or off.
+ type: boolean
diff --git a/azure/aria/aria-extension-cloudify/src/aria/tests/resources/service-templates/tosca-simple-1.0/node-cellar/types/mongodb.yaml b/azure/aria/aria-extension-cloudify/src/aria/tests/resources/service-templates/tosca-simple-1.0/node-cellar/types/mongodb.yaml
new file mode 100644
index 0000000..7031252
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/tests/resources/service-templates/tosca-simple-1.0/node-cellar/types/mongodb.yaml
@@ -0,0 +1,72 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+imports:
+ - os.yaml
+
+node_types:
+
+ mongodb.Server:
+ description: >-
+ MongoDB server application.
+ derived_from: tosca.nodes.DBMS
+ properties:
+ root_password: # @override
+ type: string
+ default: admin
+ port: # @override
+ type: integer
+ default: 27017
+ artifacts:
+ mongodb:
+ description: >-
+ MongoDB application package.
+ type: os.Archive
+ file: https://fastdl.mongodb.org/linux/mongodb-linux-x86_64-ubuntu1604-3.2.8.tgz
+ deploy_path: /opt/mongodb
+ capabilities:
+ host: # @override
+ type: tosca.capabilities.Container
+ valid_source_types: [ mongodb.Database ]
+
+ mongodb.Database:
+ description: >-
+ MongoDB database.
+
+ Supports importing database data if a mongodb.DatabaseDump is provided.
+ derived_from: tosca.nodes.Database
+ interfaces:
+ Standard:
+ type: tosca.interfaces.node.lifecycle.Standard
+ create:
+ implementation:
+ primary: mongodb/create_and_import_database.sh
+ dependencies:
+ - mongodb/utils/api.sh
+ - utils/os.sh
+ requirements:
+ - host: # @override
+ capability: tosca.capabilities.Container
+ node: mongodb.Server
+ relationship: tosca.relationships.HostedOn
+
+artifact_types:
+
+ mongodb.DatabaseDump:
+ description: >-
+ Dump of a MongoDB database.
+ derived_from: tosca.artifacts.Root
+ file_ext:
+ - json
diff --git a/azure/aria/aria-extension-cloudify/src/aria/tests/resources/service-templates/tosca-simple-1.0/node-cellar/types/nginx.yaml b/azure/aria/aria-extension-cloudify/src/aria/tests/resources/service-templates/tosca-simple-1.0/node-cellar/types/nginx.yaml
new file mode 100644
index 0000000..3621360
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/tests/resources/service-templates/tosca-simple-1.0/node-cellar/types/nginx.yaml
@@ -0,0 +1,29 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+node_types:
+
+ nginx.Nginx:
+ description: >-
+ Nginx instance.
+ derived_from: tosca.nodes.SoftwareComponent
+ requirements:
+ - feature:
+ capability: tosca.capabilities.Node
+
+ nginx.LoadBalancer:
+ description: >-
+ Nginx loadbalancer feature.
+ derived_from: tosca.nodes.LoadBalancer
diff --git a/azure/aria/aria-extension-cloudify/src/aria/tests/resources/service-templates/tosca-simple-1.0/node-cellar/types/nodejs.yaml b/azure/aria/aria-extension-cloudify/src/aria/tests/resources/service-templates/tosca-simple-1.0/node-cellar/types/nodejs.yaml
new file mode 100644
index 0000000..2b4d451
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/tests/resources/service-templates/tosca-simple-1.0/node-cellar/types/nodejs.yaml
@@ -0,0 +1,69 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+imports:
+ - os.yaml
+
+node_types:
+
+ nodejs.Server:
+ description: >-
+ Node.js server application.
+ derived_from: tosca.nodes.WebServer
+ artifacts:
+ nodejs:
+ description: >-
+ Node.js application package.
+ type: os.Archive
+ file: https://nodejs.org/dist/v4.4.7/node-v4.4.7-linux-x64.tar.xz
+ deploy_path: /opt/nodejs
+ capabilities:
+ data_endpoint: # @override
+ type: tosca.capabilities.Endpoint
+ properties:
+ port:
+ type: tosca.datatypes.network.PortDef
+ default: 8080
+ url_path:
+ type: string
+ default: /
+ admin_endpoint: # @override
+ type: tosca.capabilities.Endpoint.Admin
+ properties:
+ port:
+ type: tosca.datatypes.network.PortDef
+ default: 8080
+ url_path:
+ type: string
+ default: /admin
+ host: # @override
+ type: tosca.capabilities.Container
+ valid_source_types: [ nodejs.Application ]
+ occurrences: [ 0, 1 ]
+
+ nodejs.Application:
+ derived_from: tosca.nodes.WebApplication
+ capabilities:
+ app_endpoint: # @override
+ type: tosca.capabilities.Endpoint
+ properties:
+ port:
+ type: tosca.datatypes.network.PortDef
+ default: 8080
+ requirements:
+ - host: # @override
+ capability: tosca.capabilities.Container
+ node: nodejs.Server
+ relationship: tosca.relationships.HostedOn
diff --git a/azure/aria/aria-extension-cloudify/src/aria/tests/resources/service-templates/tosca-simple-1.0/node-cellar/types/openstack.yaml b/azure/aria/aria-extension-cloudify/src/aria/tests/resources/service-templates/tosca-simple-1.0/node-cellar/types/openstack.yaml
new file mode 100644
index 0000000..6941c1a
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/tests/resources/service-templates/tosca-simple-1.0/node-cellar/types/openstack.yaml
@@ -0,0 +1,201 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+imports:
+ - os.yaml
+ - aria-1.0
+
+dsl_definitions:
+
+ openstack:
+ uuid_constraints: &OPENSTACK_UUID_CONSTRAINTS
+ - pattern: '^[0-9A-Fa-f]{8}-[0-9A-Fa-f]{4}-[0-9A-Fa-f]{4}-[0-9A-Fa-f]{4}-[0-9A-Fa-f]{12}$'
+
+node_types:
+
+ openstack.Instance:
+ description: >-
+ OpenStack instance.
+
+ You may assign an image_id or attach an openstack.Image artifact (the artifact
+ will take precedence).
+
+ You may assign either flavor_id or flavor_name (flavor_id will take precedence).
+ If neither are assigned, flavor_name has a default value.
+ derived_from: tosca.nodes.Compute
+ properties:
+ image_id:
+ description: >-
+ See: https://s3itwiki.uzh.ch/display/clouddoc/Supported+Images
+ type: openstack.UUID
+ default: 5d62e82c-924e-4fa9-b1e4-c133867596f7
+ flavor_id:
+ type: openstack.UUID
+ required: false
+ flavor_name:
+ type: string
+ default: m1.medium
+ required: false
+ availability_zone:
+ description: >-
+ OpenStack availability zone.
+ type: string
+ required: false
+ os_users:
+ type: map
+ entry_schema: os.UserInfo
+ interfaces:
+ Standard:
+ type: tosca.interfaces.node.lifecycle.Standard
+ inputs:
+ openstack_credential:
+ description: The OpenStack API credential for all operations.
+ type: openstack.Credential
+ create:
+ implementation:
+ primary: openstack/create_instance.sh
+ dependencies:
+ - openstack/utils/api.sh
+ - utils/os.sh
+ requirements:
+ - local_storage: # @override
+ capability: tosca.capabilities.Attachment
+ node: openstack.Volume
+ relationship: tosca.relationships.AttachesTo
+# relationship:
+# type: tosca.relationships.AttachesTo
+# interfaces:
+# Standard:
+# inputs:
+# xxx:
+# type: string
+# default: { concat: [ a, b ] }
+ occurrences: [ 0, UNBOUNDED ]
+
+ openstack.Volume:
+ description: >-
+ OpenStack volume.
+
+ See: http://developer.openstack.org/api-ref-blockstorage-v2.html
+ derived_from: tosca.nodes.BlockStorage
+ properties:
+ tenant_id:
+ type: openstack.UUID
+ required: false
+ availability_zone:
+ type: string
+ required: false
+ source_volid:
+ type: openstack.UUID
+ required: false
+ description:
+ type: string
+ required: false
+ multiattach:
+ type: boolean
+ default: false
+ #snapshot_id: # @override
+ # type: openstack.UUID
+ # required: false
+ name:
+ type: string
+ required: false
+ volume_type:
+ type: string
+ required: false
+ metadata:
+ type: map
+ entry_schema: string
+ required: false
+ source_replica:
+ type: openstack.UUID
+ required: false
+ consistencygroup_id:
+ type: openstack.UUID
+ required: false
+ scheduler_hints:
+ type: map
+ entry_schema: string
+ required: false
+ interfaces:
+ Standard:
+ type: tosca.interfaces.node.lifecycle.Standard
+ inputs:
+ openstack_credential:
+ description: The OpenStack API credential for all operations.
+ type: openstack.Credential
+ create:
+ implementation:
+ primary: openstack/create_volume.sh
+ dependencies:
+ - openstack/utils/api.sh
+ - utils/os.sh
+
+group_types:
+
+ openstack.Secured:
+ description: >-
+ OpenStack secured group.
+ derived_from: tosca.groups.Root
+ members:
+ - openstack.Instance
+ interfaces:
+ Standard:
+ type: tosca.interfaces.node.lifecycle.Standard
+ inputs:
+ openstack_credential:
+ description: The OpenStack API credential for all operations.
+ type: openstack.Credential
+ create:
+ implementation:
+ primary: openstack/create_secured_group.sh
+ dependencies:
+ - openstack/utils/api.sh
+ - utils/os.sh
+
+policy_types:
+
+ openstack.Scaling:
+ description: >-
+ OpenStack scaling policy.
+ derived_from: aria.Scaling
+ properties:
+ bandwidth_threshold:
+ type: scalar-unit.size
+ default: 1 GB
+ targets:
+ - openstack.Instance
+ - openstack.Secured
+
+data_types:
+
+ openstack.Credential:
+ derived_from: tosca.datatypes.Credential
+
+ openstack.UUID:
+ description: >-
+ OpenStack UUID (in GUID format).
+ derived_from: string
+ constraints: *OPENSTACK_UUID_CONSTRAINTS
+
+artifact_types:
+
+ openstack.Image:
+ description: >-
+ OpenStack image artifact.
+ derived_from: tosca.artifacts.Deployment.Image.VM
+ file_ext:
+ - img
+ - iso
diff --git a/azure/aria/aria-extension-cloudify/src/aria/tests/resources/service-templates/tosca-simple-1.0/node-cellar/types/os.yaml b/azure/aria/aria-extension-cloudify/src/aria/tests/resources/service-templates/tosca-simple-1.0/node-cellar/types/os.yaml
new file mode 100644
index 0000000..adc6363
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/tests/resources/service-templates/tosca-simple-1.0/node-cellar/types/os.yaml
@@ -0,0 +1,74 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+dsl_definitions:
+
+ os:
+ user_and_group_name_constraints: &OS_USER_AND_GROUP_NAME_CONSTRAINTS
+ - pattern: '^[a-z0-9_-]{3,16}$'
+ password_constraints: &OS_PASSWORD_CONSTRAINTS
+ - pattern: '^[a-z0-9_-]{6,18}$'
+
+artifact_types:
+
+ os.Package:
+ description: >-
+ Generic application package.
+ derived_from: tosca.artifacts.Root
+
+ os.Archive:
+ description: >-
+ Application package in an archive.
+ derived_from: os.Package
+ file_ext:
+ - zip
+ - tar
+ - tar.gz
+ - tar.xz
+ properties:
+ unpack_credential:
+ type: tosca.datatypes.Credential
+ required: false
+
+ os.Deb:
+ description: >-
+ Debian application package.
+ derived_from: os.Package
+ file_ext:
+ - deb
+
+ os.RPM:
+ description: >-
+ RPM application package.
+ derived_from: os.Package
+ file_ext:
+ - rpm
+
+data_types:
+
+ os.UserInfo:
+ description: >-
+ Information about an operating system user.
+ derived_from: tosca.datatypes.Root
+ properties:
+ password:
+ type: string
+ constraints: *OS_PASSWORD_CONSTRAINTS
+ groups:
+ type: list
+ entry_schema:
+ type: string
+ constraints: *OS_USER_AND_GROUP_NAME_CONSTRAINTS
+ required: false
diff --git a/azure/aria/aria-extension-cloudify/src/aria/tests/resources/service-templates/tosca-simple-1.0/node-cellar/workflows.py b/azure/aria/aria-extension-cloudify/src/aria/tests/resources/service-templates/tosca-simple-1.0/node-cellar/workflows.py
new file mode 100644
index 0000000..fdca65d
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/tests/resources/service-templates/tosca-simple-1.0/node-cellar/workflows.py
@@ -0,0 +1,40 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+from aria import workflow
+from aria.orchestrator.workflows.api import task
+from aria.orchestrator.workflows.exceptions import TaskException
+
+
+INTERFACE_NAME = 'Maintenance'
+ENABLE_OPERATION_NAME = 'enable'
+DISABLE_OPERATION_NAME = 'disable'
+
+
+@workflow
+def maintenance(ctx, graph, enabled):
+ """
+ Custom workflow to call the operations on the Maintenance interface.
+ """
+
+ for node in ctx.model.node.iter():
+ try:
+ graph.add_tasks(task.OperationTask(node,
+ interface_name=INTERFACE_NAME,
+ operation_name=ENABLE_OPERATION_NAME if enabled
+ else DISABLE_OPERATION_NAME))
+ except TaskException:
+ pass
diff --git a/azure/aria/aria-extension-cloudify/src/aria/tests/resources/service-templates/tosca-simple-1.0/reqs_caps/reqs_caps1.yaml b/azure/aria/aria-extension-cloudify/src/aria/tests/resources/service-templates/tosca-simple-1.0/reqs_caps/reqs_caps1.yaml
new file mode 100644
index 0000000..466a78e
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/tests/resources/service-templates/tosca-simple-1.0/reqs_caps/reqs_caps1.yaml
@@ -0,0 +1,40 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+tosca_definitions_version: tosca_simple_yaml_1_0
+
+capability_types:
+ Socket:
+ derived_from: tosca.capabilities.Root
+
+node_types:
+ Socket:
+ derived_from: tosca.nodes.Root
+ capabilities:
+ socket: Socket
+
+ Plug:
+ derived_from: tosca.nodes.Root
+ requirements:
+ - plug:
+ capability: Socket
+
+topology_template:
+ node_templates:
+ socket:
+ type: Socket
+
+ plug:
+ type: Plug \ No newline at end of file
diff --git a/azure/aria/aria-extension-cloudify/src/aria/tests/resources/service-templates/tosca-simple-1.0/types/shorthand-1/shorthand-1.yaml b/azure/aria/aria-extension-cloudify/src/aria/tests/resources/service-templates/tosca-simple-1.0/types/shorthand-1/shorthand-1.yaml
new file mode 100644
index 0000000..bb5a84e
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/tests/resources/service-templates/tosca-simple-1.0/types/shorthand-1/shorthand-1.yaml
@@ -0,0 +1,23 @@
+tosca_definitions_version: tosca_simple_yaml_1_0
+
+description: >-
+ TOSCA simple profile that defines a compute instance and a block storage with the "shorthand type"
+
+topology_template:
+
+ node_templates:
+
+ my_server:
+ type: Compute
+ requirements:
+ - local_storage:
+ node: my_block_storage
+ relationship:
+ type: AttachesTo
+ properties:
+ location: /path1/path2
+
+ my_block_storage:
+ type: BlockStorage
+ properties:
+ size: 10 GB
diff --git a/azure/aria/aria-extension-cloudify/src/aria/tests/resources/service-templates/tosca-simple-1.0/types/typequalified-1/typequalified-1.yaml b/azure/aria/aria-extension-cloudify/src/aria/tests/resources/service-templates/tosca-simple-1.0/types/typequalified-1/typequalified-1.yaml
new file mode 100644
index 0000000..b54604f
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/tests/resources/service-templates/tosca-simple-1.0/types/typequalified-1/typequalified-1.yaml
@@ -0,0 +1,23 @@
+tosca_definitions_version: tosca_simple_yaml_1_0
+
+description: >-
+ TOSCA simple profile that defines a compute instance and a block storage with the "typequalified type"
+
+topology_template:
+
+ node_templates:
+
+ my_server:
+ type: tosca:Compute
+ requirements:
+ - local_storage:
+ node: my_block_storage
+ relationship:
+ type: AttachesTo
+ properties:
+ location: /path1/path2
+
+ my_block_storage:
+ type: tosca:BlockStorage
+ properties:
+ size: 10 GB
diff --git a/azure/aria/aria-extension-cloudify/src/aria/tests/storage/__init__.py b/azure/aria/aria-extension-cloudify/src/aria/tests/storage/__init__.py
new file mode 100644
index 0000000..8a4d613
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/tests/storage/__init__.py
@@ -0,0 +1,53 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from shutil import rmtree
+from tempfile import mkdtemp
+
+from sqlalchemy import (
+ create_engine,
+ orm,
+ pool,
+ MetaData
+)
+
+
+class TestFileSystem(object):
+
+ def setup_method(self):
+ self.path = mkdtemp('{0}'.format(self.__class__.__name__))
+
+ def teardown_method(self):
+ rmtree(self.path, ignore_errors=True)
+
+
+def release_sqlite_storage(storage):
+ """
+ Drops the tables and clears the session
+ :param storage:
+ :return:
+ """
+ storage._all_api_kwargs['session'].close()
+ MetaData(bind=storage._all_api_kwargs['engine']).drop_all()
+
+
+def init_inmemory_model_storage():
+ uri = 'sqlite:///:memory:'
+ engine_kwargs = dict(connect_args={'check_same_thread': False}, poolclass=pool.StaticPool)
+
+ engine = create_engine(uri, **engine_kwargs)
+ session_factory = orm.sessionmaker(bind=engine)
+
+ return dict(engine=engine, session=session_factory())
diff --git a/azure/aria/aria-extension-cloudify/src/aria/tests/storage/test_collection_instrumentation.py b/azure/aria/aria-extension-cloudify/src/aria/tests/storage/test_collection_instrumentation.py
new file mode 100644
index 0000000..e915421
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/tests/storage/test_collection_instrumentation.py
@@ -0,0 +1,257 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import pytest
+
+from aria.modeling import models
+from aria.storage import collection_instrumentation
+
+
+class MockActor(object):
+ def __init__(self):
+ self.dict_ = {}
+ self.list_ = []
+
+
+class MockMAPI(object):
+
+ def __init__(self):
+ pass
+
+ def put(self, *args, **kwargs):
+ pass
+
+ def update(self, *args, **kwargs):
+ pass
+
+
+class CollectionInstrumentation(object):
+
+ @pytest.fixture
+ def actor(self):
+ return MockActor()
+
+ @pytest.fixture
+ def model(self):
+ return MockMAPI()
+
+ @pytest.fixture
+ def dict_(self, actor, model):
+ return collection_instrumentation._InstrumentedDict(model, actor, 'dict_', models.Attribute)
+
+ @pytest.fixture
+ def list_(self, actor, model):
+ return collection_instrumentation._InstrumentedList(model, actor, 'list_', models.Attribute)
+
+
+class TestDict(CollectionInstrumentation):
+
+ def test_keys(self, actor, dict_):
+ dict_.update(
+ {
+ 'key1': models.Attribute.wrap('key1', 'value1'),
+ 'key2': models.Attribute.wrap('key2', 'value2')
+ }
+ )
+ assert sorted(dict_.keys()) == sorted(['key1', 'key2']) == sorted(actor.dict_.keys())
+
+ def test_values(self, actor, dict_):
+ dict_.update({
+ 'key1': models.Attribute.wrap('key1', 'value1'),
+ 'key2': models.Attribute.wrap('key1', 'value2')
+ })
+ assert (sorted(dict_.values()) ==
+ sorted(['value1', 'value2']) ==
+ sorted(v.value for v in actor.dict_.values()))
+
+ def test_items(self, dict_):
+ dict_.update({
+ 'key1': models.Attribute.wrap('key1', 'value1'),
+ 'key2': models.Attribute.wrap('key1', 'value2')
+ })
+ assert sorted(dict_.items()) == sorted([('key1', 'value1'), ('key2', 'value2')])
+
+ def test_iter(self, actor, dict_):
+ dict_.update({
+ 'key1': models.Attribute.wrap('key1', 'value1'),
+ 'key2': models.Attribute.wrap('key1', 'value2')
+ })
+ assert sorted(list(dict_)) == sorted(['key1', 'key2']) == sorted(actor.dict_.keys())
+
+ def test_bool(self, dict_):
+ assert not dict_
+ dict_.update({
+ 'key1': models.Attribute.wrap('key1', 'value1'),
+ 'key2': models.Attribute.wrap('key1', 'value2')
+ })
+ assert dict_
+
+ def test_set_item(self, actor, dict_):
+ dict_['key1'] = models.Attribute.wrap('key1', 'value1')
+ assert dict_['key1'] == 'value1' == actor.dict_['key1'].value
+ assert isinstance(actor.dict_['key1'], models.Attribute)
+
+ def test_nested(self, actor, dict_):
+ dict_['key'] = {}
+ assert isinstance(actor.dict_['key'], models.Attribute)
+ assert dict_['key'] == actor.dict_['key'].value == {}
+
+ dict_['key']['inner_key'] = 'value'
+
+ assert len(dict_) == 1
+ assert 'inner_key' in dict_['key']
+ assert dict_['key']['inner_key'] == 'value'
+ assert dict_['key'].keys() == ['inner_key']
+ assert dict_['key'].values() == ['value']
+ assert dict_['key'].items() == [('inner_key', 'value')]
+ assert isinstance(actor.dict_['key'], models.Attribute)
+ assert isinstance(dict_['key'], collection_instrumentation._InstrumentedDict)
+
+ dict_['key'].update({'updated_key': 'updated_value'})
+ assert len(dict_) == 1
+ assert 'updated_key' in dict_['key']
+ assert dict_['key']['updated_key'] == 'updated_value'
+ assert sorted(dict_['key'].keys()) == sorted(['inner_key', 'updated_key'])
+ assert sorted(dict_['key'].values()) == sorted(['value', 'updated_value'])
+ assert sorted(dict_['key'].items()) == sorted([('inner_key', 'value'),
+ ('updated_key', 'updated_value')])
+ assert isinstance(actor.dict_['key'], models.Attribute)
+ assert isinstance(dict_['key'], collection_instrumentation._InstrumentedDict)
+
+ dict_.update({'key': 'override_value'})
+ assert len(dict_) == 1
+ assert 'key' in dict_
+ assert dict_['key'] == 'override_value'
+ assert len(actor.dict_) == 1
+ assert isinstance(actor.dict_['key'], models.Attribute)
+ assert actor.dict_['key'].value == 'override_value'
+
+ def test_get_item(self, actor, dict_):
+ dict_['key1'] = models.Attribute.wrap('key1', 'value1')
+ assert isinstance(actor.dict_['key1'], models.Attribute)
+
+ def test_update(self, actor, dict_):
+ dict_['key1'] = 'value1'
+
+ new_dict = {'key2': 'value2'}
+ dict_.update(new_dict)
+ assert len(dict_) == 2
+ assert dict_['key2'] == 'value2'
+ assert isinstance(actor.dict_['key2'], models.Attribute)
+
+ new_dict = {}
+ new_dict.update(dict_)
+ assert new_dict['key1'] == dict_['key1']
+
+ def test_copy(self, dict_):
+ dict_['key1'] = 'value1'
+
+ new_dict = dict_.copy()
+ assert new_dict is not dict_
+ assert new_dict == dict_
+
+ dict_['key1'] = 'value2'
+ assert new_dict['key1'] == 'value1'
+ assert dict_['key1'] == 'value2'
+
+ def test_clear(self, dict_):
+ dict_['key1'] = 'value1'
+ dict_.clear()
+
+ assert len(dict_) == 0
+
+
+class TestList(CollectionInstrumentation):
+
+ def test_append(self, actor, list_):
+ list_.append(models.Attribute.wrap('name', 'value1'))
+ list_.append('value2')
+ assert len(actor.list_) == 2
+ assert len(list_) == 2
+ assert isinstance(actor.list_[0], models.Attribute)
+ assert list_[0] == 'value1'
+
+ assert isinstance(actor.list_[1], models.Attribute)
+ assert list_[1] == 'value2'
+
+ list_[0] = 'new_value1'
+ list_[1] = 'new_value2'
+ assert isinstance(actor.list_[1], models.Attribute)
+ assert isinstance(actor.list_[1], models.Attribute)
+ assert list_[0] == 'new_value1'
+ assert list_[1] == 'new_value2'
+
+ def test_iter(self, list_):
+ list_.append('value1')
+ list_.append('value2')
+ assert sorted(list_) == sorted(['value1', 'value2'])
+
+ def test_insert(self, actor, list_):
+ list_.append('value1')
+ list_.insert(0, 'value2')
+ list_.insert(2, 'value3')
+ list_.insert(10, 'value4')
+ assert sorted(list_) == sorted(['value1', 'value2', 'value3', 'value4'])
+ assert len(actor.list_) == 4
+
+ def test_set(self, list_):
+ list_.append('value1')
+ list_.append('value2')
+
+ list_[1] = 'value3'
+ assert len(list_) == 2
+ assert sorted(list_) == sorted(['value1', 'value3'])
+
+ def test_insert_into_nested(self, actor, list_):
+ list_.append([])
+
+ list_[0].append('inner_item')
+ assert isinstance(actor.list_[0], models.Attribute)
+ assert len(list_) == 1
+ assert list_[0][0] == 'inner_item'
+
+ list_[0].append('new_item')
+ assert isinstance(actor.list_[0], models.Attribute)
+ assert len(list_) == 1
+ assert list_[0][1] == 'new_item'
+
+ assert list_[0] == ['inner_item', 'new_item']
+ assert ['inner_item', 'new_item'] == list_[0]
+
+
+class TestDictList(CollectionInstrumentation):
+ def test_dict_in_list(self, actor, list_):
+ list_.append({})
+ assert len(list_) == 1
+ assert isinstance(actor.list_[0], models.Attribute)
+ assert actor.list_[0].value == {}
+
+ list_[0]['key'] = 'value'
+ assert list_[0]['key'] == 'value'
+ assert len(actor.list_) == 1
+ assert isinstance(actor.list_[0], models.Attribute)
+ assert actor.list_[0].value['key'] == 'value'
+
+ def test_list_in_dict(self, actor, dict_):
+ dict_['key'] = []
+ assert len(dict_) == 1
+ assert isinstance(actor.dict_['key'], models.Attribute)
+ assert actor.dict_['key'].value == []
+
+ dict_['key'].append('value')
+ assert dict_['key'][0] == 'value'
+ assert len(actor.dict_) == 1
+ assert isinstance(actor.dict_['key'], models.Attribute)
+ assert actor.dict_['key'].value[0] == 'value'
diff --git a/azure/aria/aria-extension-cloudify/src/aria/tests/storage/test_model_storage.py b/azure/aria/aria-extension-cloudify/src/aria/tests/storage/test_model_storage.py
new file mode 100644
index 0000000..518d624
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/tests/storage/test_model_storage.py
@@ -0,0 +1,213 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import pytest
+
+from sqlalchemy import (
+ Column,
+ Integer,
+ Text
+)
+
+from aria import (
+ application_model_storage,
+ modeling
+)
+from aria.storage import (
+ ModelStorage,
+ exceptions,
+ sql_mapi,
+)
+
+from tests import (
+ mock,
+ storage as tests_storage,
+ modeling as tests_modeling
+)
+
+
+@pytest.fixture
+def storage():
+ base_storage = ModelStorage(sql_mapi.SQLAlchemyModelAPI,
+ initiator=tests_storage.init_inmemory_model_storage)
+ base_storage.register(tests_modeling.MockModel)
+ yield base_storage
+ tests_storage.release_sqlite_storage(base_storage)
+
+
+@pytest.fixture(scope='module', autouse=True)
+def module_cleanup():
+ modeling.models.aria_declarative_base.metadata.remove(tests_modeling.MockModel.__table__) #pylint: disable=no-member
+
+
+def test_storage_base(storage):
+ with pytest.raises(AttributeError):
+ storage.non_existent_attribute()
+
+
+def test_model_storage(storage):
+ mock_model = tests_modeling.MockModel(value=0, name='model_name')
+ storage.mock_model.put(mock_model)
+
+ assert storage.mock_model.get_by_name('model_name') == mock_model
+
+ assert [mm_from_storage for mm_from_storage in storage.mock_model.iter()] == [mock_model]
+ assert [mm_from_storage for mm_from_storage in storage.mock_model] == [mock_model]
+
+ storage.mock_model.delete(mock_model)
+ with pytest.raises(exceptions.StorageError):
+ storage.mock_model.get(mock_model.id)
+
+
+def test_application_storage_factory():
+ storage = application_model_storage(sql_mapi.SQLAlchemyModelAPI,
+ initiator=tests_storage.init_inmemory_model_storage)
+
+ assert storage.service_template
+ assert storage.node_template
+ assert storage.group_template
+ assert storage.policy_template
+ assert storage.substitution_template
+ assert storage.substitution_template_mapping
+ assert storage.requirement_template
+ assert storage.relationship_template
+ assert storage.capability_template
+ assert storage.interface_template
+ assert storage.operation_template
+ assert storage.artifact_template
+
+ assert storage.service
+ assert storage.node
+ assert storage.group
+ assert storage.policy
+ assert storage.substitution
+ assert storage.substitution_mapping
+ assert storage.relationship
+ assert storage.capability
+ assert storage.interface
+ assert storage.operation
+ assert storage.artifact
+
+ assert storage.execution
+ assert storage.service_update
+ assert storage.service_update_step
+ assert storage.service_modification
+ assert storage.plugin
+ assert storage.task
+
+ assert storage.input
+ assert storage.output
+ assert storage.property
+ assert storage.attribute
+
+ assert storage.type
+ assert storage.metadata
+
+ tests_storage.release_sqlite_storage(storage)
+
+
+def test_cascade_deletion(context):
+ service = context.model.service.list()[0]
+
+ assert len(context.model.service_template.list()) == 1
+ assert len(service.nodes) == len(context.model.node.list()) == 2
+
+ context.model.service.delete(service)
+
+ assert len(context.model.service_template.list()) == 1
+ assert len(context.model.service.list()) == 0
+ assert len(context.model.node.list()) == 0
+
+
+@pytest.fixture
+def context(tmpdir):
+ result = mock.context.simple(str(tmpdir))
+ yield result
+ tests_storage.release_sqlite_storage(result.model)
+
+
+def test_mapi_include(context):
+ service1 = context.model.service.list()[0]
+ service1.name = 'service1'
+ service1.service_template.name = 'service_template1'
+ context.model.service.update(service1)
+
+ service_template2 = mock.models.create_service_template('service_template2')
+ service2 = mock.models.create_service(service_template2, 'service2')
+ context.model.service.put(service2)
+
+ assert service1 != service2
+ assert service1.service_template != service2.service_template
+
+ def assert_include(service):
+ st_name = context.model.service.get(service.id, include=('service_template_name',))
+ st_name_list = context.model.service.list(filters={'id': service.id},
+ include=('service_template_name', ))
+ assert len(st_name) == len(st_name_list) == 1
+ assert st_name[0] == st_name_list[0][0] == service.service_template.name
+
+ assert_include(service1)
+ assert_include(service2)
+
+
+class MockModel(modeling.models.aria_declarative_base, modeling.mixins.ModelMixin): #pylint: disable=abstract-method
+ __tablename__ = 'op_mock_model'
+
+ name = Column(Text)
+ value = Column(Integer)
+
+
+class TestFilterOperands(object):
+
+ @pytest.fixture()
+ def storage(self):
+ model_storage = application_model_storage(
+ sql_mapi.SQLAlchemyModelAPI, initiator=tests_storage.init_inmemory_model_storage)
+ model_storage.register(MockModel)
+ for value in (1, 2, 3, 4):
+ model_storage.op_mock_model.put(MockModel(value=value))
+ yield model_storage
+ tests_storage.release_sqlite_storage(model_storage)
+
+ def test_gt(self, storage):
+ assert len(storage.op_mock_model.list(filters=dict(value=dict(gt=3)))) == 1
+ assert len(storage.op_mock_model.list(filters=dict(value=dict(gt=4)))) == 0
+
+ def test_ge(self, storage):
+ assert len(storage.op_mock_model.list(filters=dict(value=dict(ge=3)))) == 2
+ assert len(storage.op_mock_model.list(filters=dict(value=dict(ge=5)))) == 0
+
+ def test_lt(self, storage):
+ assert len(storage.op_mock_model.list(filters=dict(value=dict(lt=2)))) == 1
+ assert len(storage.op_mock_model.list(filters=dict(value=dict(lt=1)))) == 0
+
+ def test_le(self, storage):
+ assert len(storage.op_mock_model.list(filters=dict(value=dict(le=2)))) == 2
+ assert len(storage.op_mock_model.list(filters=dict(value=dict(le=0)))) == 0
+
+ def test_eq(self, storage):
+ assert len(storage.op_mock_model.list(filters=dict(value=dict(eq=2)))) == 1
+ assert len(storage.op_mock_model.list(filters=dict(value=dict(eq=0)))) == 0
+
+ def test_neq(self, storage):
+ assert len(storage.op_mock_model.list(filters=dict(value=dict(ne=2)))) == 3
+
+ def test_gt_and_lt(self, storage):
+ assert len(storage.op_mock_model.list(filters=dict(value=dict(gt=1, lt=3)))) == 1
+ assert len(storage.op_mock_model.list(filters=dict(value=dict(gt=2, lt=2)))) == 0
+
+ def test_eq_and_ne(self, storage):
+ assert len(storage.op_mock_model.list(filters=dict(value=dict(eq=1, ne=3)))) == 1
+ assert len(storage.op_mock_model.list(filters=dict(value=dict(eq=1, ne=1)))) == 0
diff --git a/azure/aria/aria-extension-cloudify/src/aria/tests/storage/test_resource_storage.py b/azure/aria/aria-extension-cloudify/src/aria/tests/storage/test_resource_storage.py
new file mode 100644
index 0000000..efacb2e
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/tests/storage/test_resource_storage.py
@@ -0,0 +1,280 @@
+# Licensed to the Apache ftware Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import tempfile
+
+import pytest
+
+from aria.storage.filesystem_rapi import FileSystemResourceAPI
+from aria.storage import (
+ exceptions,
+ ResourceStorage
+)
+from . import TestFileSystem
+
+
+class TestResourceStorage(TestFileSystem):
+ def _create(self, storage):
+ storage.register('service_template')
+
+ def _upload(self, storage, tmp_path, id):
+ with open(tmp_path, 'w') as f:
+ f.write('fake context')
+
+ storage.service_template.upload(entry_id=id, source=tmp_path)
+
+ def _upload_dir(self, storage, tmp_dir, tmp_file_name, id):
+ file_source = os.path.join(tmp_dir, tmp_file_name)
+ with open(file_source, 'w') as f:
+ f.write('fake context')
+
+ storage.service_template.upload(entry_id=id, source=tmp_dir)
+
+ def _create_storage(self):
+ return ResourceStorage(FileSystemResourceAPI,
+ api_kwargs=dict(directory=self.path))
+
+ def test_name(self):
+ api = FileSystemResourceAPI
+ storage = ResourceStorage(FileSystemResourceAPI,
+ items=['service_template'],
+ api_kwargs=dict(directory=self.path))
+ assert repr(storage) == 'ResourceStorage(api={api})'.format(api=api)
+ assert 'directory={resource_dir}'.format(resource_dir=self.path) in \
+ repr(storage.registered['service_template'])
+
+ def test_create(self):
+ storage = self._create_storage()
+ self._create(storage)
+ assert os.path.exists(os.path.join(self.path, 'service_template'))
+
+ def test_upload_file(self):
+ storage = ResourceStorage(FileSystemResourceAPI, api_kwargs=dict(directory=self.path))
+ self._create(storage)
+ tmpfile_path = tempfile.mkstemp(suffix=self.__class__.__name__, dir=self.path)[1]
+ self._upload(storage, tmpfile_path, id='service_template_id')
+
+ storage_path = os.path.join(
+ self.path,
+ 'service_template',
+ 'service_template_id',
+ os.path.basename(tmpfile_path))
+ assert os.path.exists(storage_path)
+
+ with open(storage_path, 'rb') as f:
+ assert f.read() == 'fake context'
+
+ def test_download_file(self):
+ storage = self._create_storage()
+ self._create(storage)
+ tmpfile_path = tempfile.mkstemp(suffix=self.__class__.__name__, dir=self.path)[1]
+ tmpfile_name = os.path.basename(tmpfile_path)
+ self._upload(storage, tmpfile_path, 'service_template_id')
+
+ temp_dir = tempfile.mkdtemp(dir=self.path)
+ storage.service_template.download(
+ entry_id='service_template_id',
+ destination=temp_dir,
+ path=tmpfile_name)
+
+ with open(os.path.join(self.path, os.path.join(temp_dir, tmpfile_name))) as f:
+ assert f.read() == 'fake context'
+
+ def test_download_non_existing_file(self):
+ storage = self._create_storage()
+ self._create(storage)
+ with pytest.raises(exceptions.StorageError):
+ storage.service_template.download(entry_id='service_template_id', destination='',
+ path='fake_path')
+
+ def test_data_non_existing_file(self):
+ storage = self._create_storage()
+ self._create(storage)
+ with pytest.raises(exceptions.StorageError):
+ storage.service_template.read(entry_id='service_template_id', path='fake_path')
+
+ def test_data_file(self):
+ storage = self._create_storage()
+ self._create(storage)
+ tmpfile_path = tempfile.mkstemp(suffix=self.__class__.__name__, dir=self.path)[1]
+ self._upload(storage, tmpfile_path, 'service_template_id')
+
+ assert storage.service_template.read(entry_id='service_template_id',
+ path=os.path.basename(tmpfile_path)) == 'fake context'
+
+ def test_upload_dir(self):
+ storage = self._create_storage()
+ self._create(storage)
+ tmp_dir = tempfile.mkdtemp(suffix=self.__class__.__name__, dir=self.path)
+ second_level_tmp_dir = tempfile.mkdtemp(dir=tmp_dir)
+ tmp_filename = tempfile.mkstemp(dir=second_level_tmp_dir)[1]
+ self._upload_dir(storage, tmp_dir, tmp_filename, id='service_template_id')
+
+ destination = os.path.join(
+ self.path,
+ 'service_template',
+ 'service_template_id',
+ os.path.basename(second_level_tmp_dir),
+ os.path.basename(tmp_filename))
+
+ assert os.path.isfile(destination)
+
+ def test_upload_path_in_dir(self):
+ storage = self._create_storage()
+ self._create(storage)
+ tmp_dir = tempfile.mkdtemp(suffix=self.__class__.__name__, dir=self.path)
+ second_level_tmp_dir = tempfile.mkdtemp(dir=tmp_dir)
+ tmp_filename = tempfile.mkstemp(dir=second_level_tmp_dir)[1]
+ self._upload_dir(storage, tmp_dir, tmp_filename, id='service_template_id')
+
+ second_update_file = tempfile.mkstemp(dir=self.path)[1]
+ with open(second_update_file, 'w') as f:
+ f.write('fake context2')
+
+ storage.service_template.upload(
+ entry_id='service_template_id',
+ source=second_update_file,
+ path=os.path.basename(second_level_tmp_dir))
+
+ assert os.path.isfile(os.path.join(
+ self.path,
+ 'service_template',
+ 'service_template_id',
+ os.path.basename(second_level_tmp_dir),
+ os.path.basename(second_update_file)))
+
+ def test_download_dir(self):
+ storage = self._create_storage()
+ self._create(storage)
+ tmp_dir = tempfile.mkdtemp(suffix=self.__class__.__name__, dir=self.path)
+ second_level_tmp_dir = tempfile.mkdtemp(dir=tmp_dir)
+ tmp_filename = tempfile.mkstemp(dir=second_level_tmp_dir)[1]
+ self._upload_dir(storage, tmp_dir, tmp_filename, id='service_template_id')
+
+ temp_destination_dir = tempfile.mkdtemp(dir=self.path)
+ storage.service_template.download(
+ entry_id='service_template_id',
+ destination=temp_destination_dir)
+
+ destination_file_path = os.path.join(
+ temp_destination_dir,
+ os.path.basename(second_level_tmp_dir),
+ os.path.basename(tmp_filename))
+
+ assert os.path.isfile(destination_file_path)
+
+ with open(destination_file_path) as f:
+ assert f.read() == 'fake context'
+
+ def test_data_dir(self):
+ storage = self._create_storage()
+ self._create(storage)
+
+ tmp_dir = tempfile.mkdtemp(suffix=self.__class__.__name__, dir=self.path)
+ tempfile.mkstemp(dir=tmp_dir)
+ tempfile.mkstemp(dir=tmp_dir)
+
+ storage.service_template.upload(entry_id='service_template_id', source=tmp_dir)
+
+ with pytest.raises(exceptions.StorageError):
+ storage.service_template.read(entry_id='service_template_id', path='')
+
+ def test_delete_resource(self):
+ storage = self._create_storage()
+ self._create(storage)
+ tmpfile_path = tempfile.mkstemp(suffix=self.__class__.__name__, dir=self.path)[1]
+ self._upload(storage, tmpfile_path, 'service_template_id')
+ tmpfile2_path = tempfile.mkstemp(suffix=self.__class__.__name__, dir=self.path)[1]
+ self._upload(storage, tmpfile2_path, 'service_template_id')
+
+ # deleting the first resource and expecting an error on read
+ storage.service_template.delete(entry_id='service_template_id',
+ path=os.path.basename(tmpfile_path))
+ with pytest.raises(exceptions.StorageError):
+ storage.service_template.read(entry_id='service_template_id',
+ path=os.path.basename(tmpfile_path))
+ # the second resource should still be available for reading
+ assert storage.service_template.read(
+ entry_id='service_template_id',
+ path=os.path.basename(tmpfile2_path)) == 'fake context'
+
+ def test_delete_directory(self):
+ storage = self._create_storage()
+ self._create(storage)
+ temp_destination_dir = tempfile.mkdtemp(dir=self.path)
+
+ tmp_dir = tempfile.mkdtemp(suffix=self.__class__.__name__, dir=self.path)
+ second_level_tmp_dir = tempfile.mkdtemp(dir=tmp_dir)
+ tmp_filename = tempfile.mkstemp(dir=second_level_tmp_dir)[1]
+ self._upload_dir(storage, tmp_dir, tmp_filename, id='service_template_id')
+ file_path_in_dir = os.path.join(
+ os.path.basename(second_level_tmp_dir),
+ os.path.basename(tmp_filename))
+
+ # should be able to read the file and download the directory..
+ assert storage.service_template.read(
+ entry_id='service_template_id',
+ path=file_path_in_dir) == 'fake context'
+ storage.service_template.download(
+ entry_id='service_template_id',
+ path=os.path.basename(second_level_tmp_dir),
+ destination=temp_destination_dir)
+
+ # after deletion, the file and directory should both be gone
+ storage.service_template.delete(
+ entry_id='service_template_id',
+ path=os.path.basename(second_level_tmp_dir))
+ with pytest.raises(exceptions.StorageError):
+ assert storage.service_template.read(
+ entry_id='service_template_id',
+ path=file_path_in_dir) == 'fake context'
+ with pytest.raises(exceptions.StorageError):
+ storage.service_template.download(
+ entry_id='service_template_id',
+ path=os.path.basename(second_level_tmp_dir),
+ destination=temp_destination_dir)
+
+ def test_delete_all_resources(self):
+ storage = self._create_storage()
+ self._create(storage)
+ temp_destination_dir = tempfile.mkdtemp(dir=self.path)
+
+ tmp_dir = tempfile.mkdtemp(suffix=self.__class__.__name__, dir=self.path)
+ second_level_tmp_dir = tempfile.mkdtemp(dir=tmp_dir)
+ tmp_filename = tempfile.mkstemp(dir=second_level_tmp_dir)[1]
+ self._upload_dir(storage, tmp_dir, tmp_filename, id='service_template_id')
+ file_path_in_dir = os.path.join(
+ os.path.basename(second_level_tmp_dir),
+ os.path.basename(tmp_filename))
+
+ # deleting without specifying a path - delete all resources of this entry
+ storage.service_template.delete(entry_id='service_template_id')
+ with pytest.raises(exceptions.StorageError):
+ assert storage.service_template.read(
+ entry_id='service_template_id',
+ path=file_path_in_dir) == 'fake context'
+ with pytest.raises(exceptions.StorageError):
+ storage.service_template.download(
+ entry_id='service_template_id',
+ path=os.path.basename(second_level_tmp_dir),
+ destination=temp_destination_dir)
+
+ def test_delete_nonexisting_resource(self):
+ storage = self._create_storage()
+ self._create(storage)
+ # deleting a nonexisting resource - no effect is expected to happen
+ assert storage.service_template.delete(entry_id='service_template_id',
+ path='fake-file') is False
diff --git a/azure/aria/aria-extension-cloudify/src/aria/tests/test_extension.py b/azure/aria/aria-extension-cloudify/src/aria/tests/test_extension.py
new file mode 100644
index 0000000..f0378fd
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/tests/test_extension.py
@@ -0,0 +1,156 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import pytest
+
+from aria import extension
+
+# #pylint: disable=no-member,no-method-argument,unused-variable
+
+
+class TestRegistrar(object):
+
+ def test_list_based_registrar_with_single_element_registration(self):
+ class ExtensionRegistration(extension._ExtensionRegistration):
+ @extension._registrar
+ def list_based_registrar(*_):
+ return []
+ extension_registration = ExtensionRegistration()
+
+ @extension_registration
+ class Extension(object):
+ def list_based_registrar(self):
+ return True
+
+ assert extension_registration.list_based_registrar() == []
+ extension_registration.init()
+ assert extension_registration.list_based_registrar() == [True]
+
+ def test_list_based_registrar_with_sequence_element_registration(self):
+ class ExtensionRegistration(extension._ExtensionRegistration):
+ @extension._registrar
+ def list_based_registrar1(*_):
+ return []
+
+ @extension._registrar
+ def list_based_registrar2(*_):
+ return []
+
+ @extension._registrar
+ def list_based_registrar3(*_):
+ return []
+ extension_registration = ExtensionRegistration()
+
+ @extension_registration
+ class Extension(object):
+ def list_based_registrar1(*_):
+ return [True, True]
+
+ def list_based_registrar2(*_):
+ return True, True
+
+ def list_based_registrar3(*_):
+ return set([True])
+
+ extension_registration.init()
+ assert extension_registration.list_based_registrar1() == [True, True]
+ assert extension_registration.list_based_registrar2() == [True, True]
+ assert extension_registration.list_based_registrar3() == [True]
+
+ def test_dict_based_registrar(self):
+ class ExtensionRegistration(extension._ExtensionRegistration):
+ @extension._registrar
+ def dict_based_registrar(*_):
+ return {}
+ extension_registration = ExtensionRegistration()
+
+ @extension_registration
+ class Extension1(object):
+ def dict_based_registrar(self):
+ return {
+ 'a': 'a',
+ 'b': 'b'
+ }
+
+ @extension_registration
+ class Extension2(object):
+ def dict_based_registrar(self):
+ return {
+ 'c': 'c',
+ 'd': 'd'
+ }
+
+ assert extension_registration.dict_based_registrar() == {}
+ extension_registration.init()
+ assert extension_registration.dict_based_registrar() == {
+ 'a': 'a',
+ 'b': 'b',
+ 'c': 'c',
+ 'd': 'd'
+ }
+
+ def test_invalid_duplicate_key_dict_based_registrar(self):
+ class ExtensionRegistration(extension._ExtensionRegistration):
+ @extension._registrar
+ def dict_based_registrar(*_):
+ return {}
+ extension_registration = ExtensionRegistration()
+
+ @extension_registration
+ class Extension1(object):
+ def dict_based_registrar(self):
+ return {
+ 'a': 'val1',
+ }
+
+ @extension_registration
+ class Extension2(object):
+ def dict_based_registrar(self):
+ return {
+ 'a': 'val2',
+ }
+
+ with pytest.raises(RuntimeError):
+ extension_registration.init()
+
+ def test_unsupported_registrar(self):
+ with pytest.raises(RuntimeError):
+ class ExtensionRegistration(extension._ExtensionRegistration):
+ @extension._registrar
+ def unsupported_registrar(*_):
+ return set()
+ extension_registration = ExtensionRegistration()
+
+ @extension_registration
+ class Extension(object):
+ def unsupported_registrar(self):
+ return True
+
+ extension_registration.init()
+
+ def test_unimplemented_registration(self):
+ class ExtensionRegistration(extension._ExtensionRegistration):
+ @extension._registrar
+ def list_based_registrar(*_):
+ return []
+ extension_registration = ExtensionRegistration()
+
+ @extension_registration
+ class Extension(object):
+ pass
+
+ assert extension_registration.list_based_registrar() == []
+ extension_registration.init()
+ assert extension_registration.list_based_registrar() == []
diff --git a/azure/aria/aria-extension-cloudify/src/aria/tests/test_logger.py b/azure/aria/aria-extension-cloudify/src/aria/tests/test_logger.py
new file mode 100644
index 0000000..d6999fd
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/tests/test_logger.py
@@ -0,0 +1,129 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+
+from aria.logger import (create_logger,
+ create_console_log_handler,
+ create_file_log_handler,
+ _default_file_formatter,
+ LoggerMixin,
+ _DefaultConsoleFormat)
+
+
+def test_create_logger():
+
+ logger = create_logger()
+ assert logger.name == 'aria'
+ assert len(logger.handlers) == 0
+ assert logger.level == logging.DEBUG
+
+ custom_logger = logging.getLogger('custom_logger')
+ handlers = [logging.FileHandler, logging.StreamHandler]
+ logger = create_logger(logger=custom_logger, handlers=handlers, level=logging.INFO)
+ assert custom_logger.name == 'custom_logger'
+ assert logger.handlers == handlers
+ assert logger.level == logging.INFO
+
+
+def test_create_console_log_handler(capsys):
+
+ debug_test_string = 'debug_create_console_test_string'
+ info_test_string = 'info_create_console_test_string'
+
+ # Default handler
+ handler = create_console_log_handler()
+ assert isinstance(handler, logging.StreamHandler)
+ assert isinstance(handler.formatter, _DefaultConsoleFormat)
+ assert handler.level == logging.DEBUG
+
+ logger = create_logger(handlers=[handler])
+
+ logger.info(info_test_string)
+ logger.debug(debug_test_string)
+ _, err = capsys.readouterr()
+
+ assert '[DEBUG]> {test_string}'.format(test_string=debug_test_string) in err
+ assert err.count(info_test_string) == 1
+
+ # Custom handler
+ custom_handler = create_console_log_handler(level=logging.INFO, formatter=logging.Formatter())
+ assert isinstance(custom_handler.formatter, logging.Formatter)
+ assert custom_handler.level == logging.INFO
+
+ logger = create_logger(handlers=[custom_handler])
+
+ logger.info(info_test_string)
+ _, err = capsys.readouterr()
+
+ assert err.count(info_test_string) == 1
+
+
+def test_create_file_log_handler(tmpdir):
+
+ test_string = 'create_file_log_test_string'
+
+ debug_log = tmpdir.join('debug.log')
+ handler = create_file_log_handler(file_path=str(debug_log))
+ assert handler.baseFilename == str(debug_log)
+ assert handler.maxBytes == 5 * 1000 * 1024
+ assert handler.backupCount == 10
+ assert handler.stream is None
+ assert handler.level == logging.DEBUG
+ assert handler.formatter == _default_file_formatter
+
+ logger = create_logger(handlers=[handler])
+ logger.debug(test_string)
+ assert test_string in debug_log.read()
+
+ info_log = tmpdir.join('info.log')
+ handler = create_file_log_handler(
+ file_path=str(info_log),
+ level=logging.INFO,
+ max_bytes=1000,
+ backup_count=2,
+ formatter=logging.Formatter()
+ )
+ assert handler.baseFilename == str(info_log)
+ assert handler.level == logging.INFO
+ assert handler.maxBytes == 1000
+ assert handler.backupCount == 2
+ assert isinstance(handler.formatter, logging.Formatter)
+
+ logger = create_logger(handlers=[handler])
+ logger.info(test_string)
+ assert test_string in info_log.read()
+
+
+def test_loggermixin(capsys):
+
+ test_string = 'loggermixing_test_string'
+
+ logger = create_logger(handlers=[create_console_log_handler()])
+
+ custom_class = type('CustomClass', (LoggerMixin,), {}).with_logger()
+ custom_class.logger.debug(test_string)
+
+ _, err = capsys.readouterr()
+ assert test_string in err
+
+ for handler in logger.handlers:
+ logger.removeHandler(handler)
+
+ # TODO: figure out what up with pickle
+ # class_pickled = pickle.dumps(custom_class)
+ # class_unpickled = pickle.loads(class_pickled)
+ #
+ # assert vars(class_unpickled) == vars(custom_class)
diff --git a/azure/aria/aria-extension-cloudify/src/aria/tests/utils/__init__.py b/azure/aria/aria-extension-cloudify/src/aria/tests/utils/__init__.py
new file mode 100644
index 0000000..ae1e83e
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/tests/utils/__init__.py
@@ -0,0 +1,14 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/azure/aria/aria-extension-cloudify/src/aria/tests/utils/test_exceptions.py b/azure/aria/aria-extension-cloudify/src/aria/tests/utils/test_exceptions.py
new file mode 100644
index 0000000..5d030e2
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/tests/utils/test_exceptions.py
@@ -0,0 +1,73 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import jsonpickle
+
+from aria.utils import exceptions
+
+_ARG1 = 'arg-1'
+_ARG2 = 'arg-2'
+
+
+class TestWrapIfNeeded(object):
+
+ def test_no_wrapping_required1(self):
+ e = JsonPickleableException1(_ARG1, _ARG2)
+ assert exceptions.wrap_if_needed(e) is e
+
+ def test_no_wrapping_required2(self):
+ e = JsonPickleableException1(arg1=_ARG1, arg2=_ARG2)
+ assert exceptions.wrap_if_needed(e) is e
+
+ def test_no_wrapping_required3(self):
+ e = JsonPickleableException2(arg1=_ARG1, arg2=_ARG2)
+ assert exceptions.wrap_if_needed(e) is e
+
+ def test_wrapping_required1(self):
+ e = NonJsonPickleableException(_ARG1, _ARG2)
+ wrapped_e = exceptions.wrap_if_needed(e)
+ wrapped_e = jsonpickle.loads(jsonpickle.dumps(wrapped_e))
+ assert isinstance(wrapped_e, exceptions._WrappedException)
+ assert wrapped_e.exception_type == type(e).__name__
+ assert wrapped_e.exception_str == str(e)
+
+ def test_wrapping_required2(self):
+ e = NonJsonPickleableException(arg1=_ARG1, arg2=_ARG2)
+ wrapped_e = exceptions.wrap_if_needed(e)
+ wrapped_e = jsonpickle.loads(jsonpickle.dumps(wrapped_e))
+ assert isinstance(wrapped_e, exceptions._WrappedException)
+ assert wrapped_e.exception_type == type(e).__name__
+ assert wrapped_e.exception_str == str(e)
+
+
+class JsonPickleableException1(Exception):
+ def __init__(self, arg1, arg2):
+ super(JsonPickleableException1, self).__init__(arg1, arg2)
+ self.arg1 = arg1
+ self.arg2 = arg2
+
+
+class JsonPickleableException2(Exception):
+ def __init__(self, arg1=None, arg2=None):
+ super(JsonPickleableException2, self).__init__()
+ self.arg1 = arg1
+ self.arg2 = arg2
+
+
+class NonJsonPickleableException(Exception):
+ def __init__(self, arg1, arg2):
+ super(NonJsonPickleableException, self).__init__()
+ self.arg1 = arg1
+ self.arg2 = arg2
diff --git a/azure/aria/aria-extension-cloudify/src/aria/tests/utils/test_plugin.py b/azure/aria/aria-extension-cloudify/src/aria/tests/utils/test_plugin.py
new file mode 100644
index 0000000..c91d0c9
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/tests/utils/test_plugin.py
@@ -0,0 +1,58 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+
+import pytest
+
+from aria.orchestrator import exceptions
+from aria.utils.plugin import create as create_plugin
+
+from ..fixtures import ( # pylint: disable=unused-import
+ plugins_dir,
+ plugin_manager,
+ inmemory_model as model
+)
+
+
+PACKAGE_NAME = 'mock-plugin'
+PACKAGE_VERSION = '100'
+
+
+class TestPluginManager(object):
+
+ def test_install(self, plugin_manager, mock_plugin, model, plugins_dir):
+ plugin = plugin_manager.install(mock_plugin)
+ assert plugin.package_name == PACKAGE_NAME
+ assert plugin.package_version == PACKAGE_VERSION
+ assert plugin == model.plugin.get(plugin.id)
+ plugin_dir = os.path.join(plugins_dir, '{0}-{1}'.format(PACKAGE_NAME, PACKAGE_VERSION))
+ assert os.path.isdir(plugin_dir)
+ assert plugin_dir == plugin_manager.get_plugin_dir(plugin)
+
+ def test_install_already_exits(self, plugin_manager, mock_plugin):
+ plugin_manager.install(mock_plugin)
+ with pytest.raises(exceptions.PluginAlreadyExistsError):
+ plugin_manager.install(mock_plugin)
+
+
+@pytest.fixture
+def mock_plugin(tmpdir):
+ source_dir = tmpdir.join('mock_plugin')
+ source_dir.mkdir()
+ setup_py = source_dir.join('setup.py')
+ setup_py.write('from setuptools import setup; setup(name="{0}", version="{1}")'
+ .format(PACKAGE_NAME, PACKAGE_VERSION))
+ return create_plugin(source=str(source_dir), destination_dir=str(tmpdir))
diff --git a/azure/aria/aria-extension-cloudify/src/aria/tests/utils/test_threading.py b/azure/aria/aria-extension-cloudify/src/aria/tests/utils/test_threading.py
new file mode 100644
index 0000000..d24661f
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/tests/utils/test_threading.py
@@ -0,0 +1,33 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+import pytest
+
+from aria.utils import threading
+
+
+def test_exception_raised_from_thread():
+
+ def error_raising_func():
+ raise ValueError('This is an error')
+
+ thread = threading.ExceptionThread(target=error_raising_func)
+ thread.start()
+ thread.join()
+
+ assert thread.is_error()
+ with pytest.raises(ValueError):
+ thread.raise_error_if_exists()
diff --git a/azure/aria/aria-extension-cloudify/src/aria/tests/utils/test_validation.py b/azure/aria/aria-extension-cloudify/src/aria/tests/utils/test_validation.py
new file mode 100644
index 0000000..8e35f22
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/tests/utils/test_validation.py
@@ -0,0 +1,35 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import pytest
+
+from aria.utils import validation
+
+
+def test_function_kwargs_validation():
+
+ def mock_function(arg1, arg2=1, arg3=1):
+ pass
+
+ with pytest.raises(ValueError):
+ validation.validate_function_arguments(mock_function, dict(arg2=1))
+ with pytest.raises(ValueError):
+ validation.validate_function_arguments(mock_function, dict(arg3=3))
+ with pytest.raises(ValueError):
+ validation.validate_function_arguments(mock_function, dict(arg2=2, arg3=3))
+
+ validation.validate_function_arguments(mock_function, dict(arg1=1, arg3=3))
+ validation.validate_function_arguments(mock_function, dict(arg1=1, arg2=2))
+ validation.validate_function_arguments(mock_function, dict(arg1=1, arg2=2, arg3=3))
diff --git a/azure/aria/aria-extension-cloudify/src/aria/tests/utils/test_versions.py b/azure/aria/aria-extension-cloudify/src/aria/tests/utils/test_versions.py
new file mode 100644
index 0000000..222949c
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/tests/utils/test_versions.py
@@ -0,0 +1,85 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from aria.utils.versions import (VersionString, parse_version_string)
+
+
+def test_version_string():
+ # No qualifiers
+ assert VersionString('20') == VersionString('20')
+ assert VersionString('20') == VersionString('20.0')
+ assert VersionString('20') == VersionString('20.0.0')
+ assert VersionString('20') < VersionString('20.0.1')
+
+ # With numeric qualifiers
+ assert VersionString('20.0.1-1') < VersionString('20.0.1-2')
+ assert VersionString('20.0.1-0') < VersionString('20.0.1')
+ assert VersionString('20.0.1-1') < VersionString('20.0.1')
+
+ # With prefixed qualifiers
+ assert VersionString('20.0.1-beta1') < VersionString('20.0.1-beta2')
+ assert VersionString('20.0.1-beta1') < VersionString('20.0.1-1')
+ assert VersionString('20.0.1-beta1') < VersionString('20.0.1')
+ assert VersionString('20.0.1-beta2') < VersionString('20.0.1-rc2')
+ assert VersionString('20.0.1-alpha2') < VersionString('20.0.1-beta1')
+ assert VersionString('20.0.1-dev2') < VersionString('20.0.1-alpha1')
+ assert VersionString('20.0.1-DEV2') < VersionString('20.0.1-ALPHA1')
+
+ # Coercive comparisons
+ assert VersionString('20.0.0') == VersionString(10 * 2)
+ assert VersionString('20.0.0') == VersionString(20.0)
+
+ # Non-VersionString comparisons
+ assert VersionString('20.0.0') == 20
+ assert VersionString('20.0.0') < '20.0.1'
+
+ # Nulls
+ assert VersionString() == VersionString()
+ assert VersionString() == VersionString.NULL
+ assert VersionString(None) == VersionString.NULL
+ assert VersionString.NULL == None # pylint: disable=singleton-comparison
+ assert VersionString.NULL == 0
+
+ # Invalid version strings
+ assert VersionString('maxim is maxim') == VersionString.NULL
+ assert VersionString('20.maxim.0') == VersionString.NULL
+ assert VersionString('20.0.0-maxim1') == VersionString.NULL
+ assert VersionString('20.0.1-1.1') == VersionString.NULL
+
+ # Sorts
+ v1 = VersionString('20.0.0')
+ v2 = VersionString('20.0.1-beta1')
+ v3 = VersionString('20.0.1')
+ v4 = VersionString('20.0.2')
+ assert [v1, v2, v3, v4] == sorted([v4, v3, v2, v1], key=lambda v: v.key)
+
+ # Sets
+ v1 = VersionString('20.0.0')
+ v2 = VersionString('20.0')
+ v3 = VersionString('20')
+ assert set([v1]) == set([v1, v2, v3])
+
+ # Dicts
+ the_dict = {v1: 'test'}
+ assert the_dict.get(v2) == 'test'
+
+def test_parse_version_string():
+ # One test of each type from the groups above should be enough
+ assert parse_version_string('20') < parse_version_string('20.0.1')
+ assert parse_version_string('20.0.1-1') < parse_version_string('20.0.1-2')
+ assert parse_version_string('20.0.1-beta1') < parse_version_string('20.0.1-beta2')
+ assert parse_version_string('20.0.0') == parse_version_string(10 * 2)
+ assert parse_version_string(None) == parse_version_string(0)
+ assert parse_version_string(None) == parse_version_string('maxim is maxim')
diff --git a/azure/aria/aria-extension-cloudify/src/aria/tox.ini.bkp b/azure/aria/aria-extension-cloudify/src/aria/tox.ini.bkp
new file mode 100644
index 0000000..765435a
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/src/aria/tox.ini.bkp
@@ -0,0 +1,116 @@
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+[tox]
+envlist=py27,py26,py27e2e,py26e2e,pywin,py27ssh,pylint_code,pylint_tests,docs
+processes={env:PYTEST_PROCESSES:auto}
+py26={env:PY26:python2.6}
+
+[testenv]
+whitelist_externals=
+ rm
+passenv=
+ TRAVIS
+ PYTHON
+ PYTHON_VERSION
+ PYTHON_ARCH
+deps=
+ --requirement
+ requirements.txt
+ --requirement
+ tests/requirements.txt
+basepython=
+ py26: {[tox]py26}
+ py27: python2.7
+ py26e2e: {[tox]py26}
+ py27e2e: python2.7
+ py26ssh: {[tox]py26}
+ py27ssh: python2.7
+ pywin: {env:PYTHON:}\python.exe
+ pylint_code: python2.7
+ pylint_tests: python2.7
+ docs: python2.7
+
+[testenv:py27]
+commands=
+ pytest tests \
+ --numprocesses={[tox]processes} \
+ --ignore=tests/end2end \
+ --ignore=tests/orchestrator/execution_plugin/test_ssh.py \
+ --cov-report term-missing \
+ --cov aria
+
+[testenv:py26]
+commands=
+ pytest tests \
+ --numprocesses={[tox]processes} \
+ --ignore=tests/end2end \
+ --ignore=tests/orchestrator/execution_plugin/test_ssh.py \
+ --cov-report term-missing \
+ --cov aria
+
+[testenv:py27e2e]
+commands=
+ pytest tests/end2end \
+ --numprocesses={[tox]processes} \
+ --cov-report term-missing \
+ --cov aria
+
+[testenv:py26e2e]
+commands=
+ pytest tests/end2end \
+ --numprocesses={[tox]processes} \
+ --cov-report term-missing \
+ --cov aria
+
+[testenv:pywin]
+commands=
+ pytest tests \
+ --numprocesses={[tox]processes} \
+ --ignore=tests/end2end \
+ --ignore=tests/orchestrator/execution_plugin/test_ssh.py \
+ --cov-report term-missing \
+ --cov aria
+
+[testenv:py27ssh]
+install_command=
+ pip install {opts} {packages} .[ssh]
+commands=
+ pytest tests/orchestrator/execution_plugin/test_ssh.py \
+ --numprocesses={[tox]processes}
+
+[testenv:py26ssh]
+install_command=
+ pip install {opts} {packages} .[ssh]
+commands=
+ pytest tests/orchestrator/execution_plugin/test_ssh.py \
+ --numprocesses={[tox]processes}
+
+[testenv:pylint_code]
+commands=
+ pylint aria extensions/aria_extension_tosca/ \
+ --rcfile=aria/.pylintrc \
+ --disable=fixme,missing-docstring
+
+[testenv:pylint_tests]
+commands=
+ pylint tests \
+ --rcfile=tests/.pylintrc \
+ --disable=fixme,missing-docstring
+
+[testenv:docs]
+install_command=
+ pip install {opts} {packages} \
+ --requirement docs/requirements.txt
+commands=
+ rm --recursive --force docs/html
+ sphinx-build -W -T -b html docs docs/html
diff --git a/azure/aria/aria-extension-cloudify/tox.ini.bkp b/azure/aria/aria-extension-cloudify/tox.ini.bkp
new file mode 100644
index 0000000..271ddca
--- /dev/null
+++ b/azure/aria/aria-extension-cloudify/tox.ini.bkp
@@ -0,0 +1,56 @@
+# Copyright (c) 2016 GigaSpaces Technologies Ltd. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+[tox]
+envlist=py27,py26,pywin,flake8code,flake8tests
+processes={env:PYTEST_PROCESSES:auto}
+
+[testenv]
+deps =
+ --requirement
+ requirements.txt
+ --requirement
+ aria_extension_tests/requirements.txt
+basepython =
+ py26: python2.6
+ py27: python2.7
+ flake8: python2.7
+ pywin: {env:PYTHON:}\python.exe
+
+[testenv:py27]
+commands=
+ pytest aria_extension_tests \
+ --numprocesses={[tox]processes} \
+ --cov-report term-missing \
+ --cov adapters
+
+[testenv:py26]
+commands=
+ pytest aria_extension_tests \
+ --numprocesses={[tox]processes} \
+ --cov-report term-missing \
+ --cov adapters
+
+[testenv:pywin]
+commands=
+ pytest aria_extension_tests \
+ --numprocesses={[tox]processes} \
+ --cov-report term-missing \
+ --cov adapters
+
+[testenv:flake8code]
+commands=flake8 adapters
+
+[testenv:flake8tests]
+commands=flake8 aria_extension_tests
diff --git a/azure/assembly.xml b/azure/assembly.xml
index 48255fd..6833caa 100644
--- a/azure/assembly.xml
+++ b/azure/assembly.xml
@@ -46,6 +46,7 @@
<includes>
<include>*.sh</include>
<include>Dockerfile</include>
+ <include>*.wgn</include>
</includes>
</fileSet>
<fileSet>
diff --git a/azure/docker/Dockerfile b/azure/docker/Dockerfile
index 8099aed..70924e3 100644
--- a/azure/docker/Dockerfile
+++ b/azure/docker/Dockerfile
@@ -16,6 +16,22 @@ RUN apt-get update && \
apt-get install -y unzip && \
apt-get install -y curl && \
apt-get install -y wget
+RUN apt-get install -y python-dev gcc libffi-dev libssl-dev make
+
+# ARIA orchestrator addition
+RUN pip install -U pip setuptools
+COPY /aria/ /tmp/
+WORKDIR /tmp/aria-extension-cloudify/src/aria
+RUN pip install .
+
+WORKDIR /tmp/aria-extension-cloudify
+RUN pip install .
+
+RUN pip install --force-reinstall pip==9.0.1
+WORKDIR /tmp
+ADD docker/cloudify_azure_plugin-1.4.2-py27-none-linux_x86_64.wgn /tmp/
+RUN aria plugins install cloudify_azure_plugin-1.4.2-py27-none-linux_x86_64.wgn
+RUN rm cloudify_azure_plugin-1.4.2-py27-none-linux_x86_64.wgn
RUN cd /opt/ && \
@@ -26,4 +42,5 @@ RUN cd /opt/ && \
WORKDIR /opt
+RUN chmod +x multicloud_azure/docker/*.sh && chmod +x multicloud_azure/*.sh
ENTRYPOINT multicloud_azure/docker/docker-entrypoint.sh
diff --git a/azure/docker/build_image.sh b/azure/docker/build_image.sh
index 24ba356..2865c60 100644
--- a/azure/docker/build_image.sh
+++ b/azure/docker/build_image.sh
@@ -14,7 +14,7 @@
# limitations under the License.
DIRNAME=`dirname $0`
-DOCKER_BUILD_DIR=`cd $DIRNAME/; pwd`
+DOCKER_BUILD_DIR=`cd $DIRNAME/../; pwd`
echo "DOCKER_BUILD_DIR=${DOCKER_BUILD_DIR}"
cd ${DOCKER_BUILD_DIR}
@@ -36,7 +36,7 @@ fi
function build_image {
echo "Start build docker image: ${IMAGE_NAME}"
- docker build ${BUILD_ARGS} -t ${IMAGE_NAME}:${VERSION} -t ${IMAGE_NAME}:latest -t ${IMAGE_NAME}:${STAGING} .
+ docker build ${BUILD_ARGS} -f docker/Dockerfile -t ${IMAGE_NAME}:${VERSION} -t ${IMAGE_NAME}:latest -t ${IMAGE_NAME}:${STAGING} .
}
function push_image {
diff --git a/azure/docker/cloudify_azure_plugin-1.4.2-py27-none-linux_x86_64.wgn b/azure/docker/cloudify_azure_plugin-1.4.2-py27-none-linux_x86_64.wgn
new file mode 100644
index 0000000..aef4269
--- /dev/null
+++ b/azure/docker/cloudify_azure_plugin-1.4.2-py27-none-linux_x86_64.wgn
Binary files differ
diff --git a/azure/multicloud_azure/pub/aria/__init__.py b/azure/multicloud_azure/pub/aria/__init__.py
new file mode 100644
index 0000000..a952e9e
--- /dev/null
+++ b/azure/multicloud_azure/pub/aria/__init__.py
@@ -0,0 +1,11 @@
+# Copyright (c) 2018 Amdocs
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
diff --git a/azure/multicloud_azure/pub/aria/service.py b/azure/multicloud_azure/pub/aria/service.py
new file mode 100644
index 0000000..637858a
--- /dev/null
+++ b/azure/multicloud_azure/pub/aria/service.py
@@ -0,0 +1,159 @@
+# Copyright (c) 2018 Amdocs
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+
+import logging
+import json
+import tempfile
+import time
+import os
+
+from multicloud_azure.pub.aria import util
+from aria.cli.core import aria
+from aria.cli import utils
+from aria.core import Core
+from aria.cli import service_template_utils
+from aria.storage import exceptions as storage_exceptions
+from aria.utils import threading
+from aria.orchestrator.workflow_runner import WorkflowRunner as Runner
+
+LOG = logging.getLogger(__name__)
+
+execution_state = util.SafeDict()
+
+
+class AriaServiceImpl(object):
+
+ def deploy_service(self, template_name, template_body, inputs, logger):
+
+ service_template_name = template_name + "-template" + \
+ time.strftime('%Y%m%d%H%M%S')
+ status = self.install_template_private(service_template_name,
+ template_body)
+ if (status[1] != 200):
+ logger.error("Error while installing the service-template")
+ return status[0], status[1]
+ else:
+ logger.info("service template {0} valiadated and stored".format(
+ service_template_name))
+ status = self.create_service(
+ status, template_name + time.strftime('%Y%m%d%H%M%S'), inputs)
+ if (status[1] != 200):
+ return status[0], status[1]
+ execution_id = time.strftime('%Y%m%d%H%M%S')
+ thread = threading.ExceptionThread(target=self.start_execution,
+ args=(status[2].id, execution_id,
+ inputs, 'install'))
+ thread.start()
+ return execution_id, 200
+
+ @aria.pass_model_storage
+ @aria.pass_resource_storage
+ @aria.pass_plugin_manager
+ @aria.pass_logger
+ def install_template_private(self, service_template_name, template_body,
+ model_storage,
+ resource_storage,
+ plugin_manager,
+ logger):
+ service_template_filename = "MainServiceTemplate.yaml"
+ fileSp = template_body
+ f = tempfile.NamedTemporaryFile(suffix='.csar',
+ delete=False)
+ f.write(fileSp.read())
+ f.seek(fileSp.tell(), 0)
+ service_template_path = f.name
+ fileSp.close()
+ file_path = service_template_utils.get(
+ service_template_path, service_template_filename)
+
+ core = Core(model_storage, resource_storage, plugin_manager)
+ logger.info("service-template file {}".format(file_path))
+
+ try:
+ service_template_id = core.create_service_template(
+ file_path,
+ os.path.dirname(file_path),
+ service_template_name)
+ except storage_exceptions.StorageError as e:
+ logger.error("storage exception")
+ utils.check_overriding_storage_exceptions(
+ e, 'service template', service_template_name)
+ return e.message, 500
+ except Exception as e:
+ logger.error("catchall exception")
+ return e.message, 500
+ return "service template installed", 200, service_template_id
+
+ @aria.pass_model_storage
+ @aria.pass_resource_storage
+ @aria.pass_plugin_manager
+ @aria.pass_logger
+ def create_service(self, template_id, service_name, input,
+ model_storage,
+ resource_storage,
+ plugin_manager,
+ logger):
+ """
+ Creates a service from the specified service template
+ """
+ input = input['sdnc_directives'] if'sdnc_directives'in input else None
+ core = Core(model_storage, resource_storage, plugin_manager)
+ service = core.create_service(template_id, input, service_name)
+ logger.info("service {} created".format(service.name))
+ return "service {} created".format(service.name), 200, service
+
+ @aria.pass_model_storage
+ @aria.pass_resource_storage
+ @aria.pass_plugin_manager
+ @aria.pass_logger
+ def start_execution(self, service_id, execution_id, input, workflow_name,
+ model_storage,
+ resource_storage,
+ plugin_manager,
+ logger):
+ """
+ Start an execution for the specified service
+ """
+ input = input['sdnc_directives'] if'sdnc_directives'in input else None
+ runner = Runner(model_storage, resource_storage, plugin_manager,
+ execution_id=execution_id,
+ service_id=service_id,
+ workflow_name=workflow_name,
+ inputs=input)
+
+ service = model_storage.service.get(service_id)
+ tname = '{}_{}_{}'.format(service.name, workflow_name,
+ runner.execution_id)
+ thread = threading.ExceptionThread(target=runner.execute,
+ name=tname)
+ thread.start()
+ execution_state[str(runner.execution_id)] = [runner, thread]
+ logger.info("execution {} started".format(runner.execution_id))
+ return json.dumps({"id": runner.execution_id}), 202
+
+ @aria.pass_model_storage
+ @aria.pass_logger
+ def show_execution(self, execution_id, model_storage, logger):
+ """
+ Return details of specified execution/Stack
+ """
+ try:
+ execution = model_storage.execution.get(execution_id)
+ except BaseException:
+ return "Execution {} not found".format(execution_id), 404
+ logger.info("showing details of execution id {}".format(execution_id))
+ return json.dumps({"execution_id": execution_id,
+ "service_name": execution.service_name,
+ "service_template_name":
+ execution.service_template_name,
+ "workflow_name": execution.workflow_name,
+ "status": execution.status}), 200
diff --git a/azure/multicloud_azure/pub/aria/util.py b/azure/multicloud_azure/pub/aria/util.py
new file mode 100644
index 0000000..7dc415e
--- /dev/null
+++ b/azure/multicloud_azure/pub/aria/util.py
@@ -0,0 +1,40 @@
+# Copyright (c) 2018 Amdocs
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+
+import threading
+
+
+def make_template_name(user, template_name):
+ return "{}.{}".format(user, template_name)
+
+
+class SafeDict(dict):
+ def __init__(self, *args):
+ self._lockobj = threading.Lock()
+ dict.__init__(self, args)
+
+ def __getitem__(self, key):
+ try:
+ self._lockobj.acquire()
+ except Exception as ex:
+ raise ex
+ finally:
+ self._lockobj.release()
+
+ def __setitem__(self, key, value):
+ try:
+ self._lockobj.acquire()
+ dict.__setitem__(self, key, value)
+ except Exception as ex:
+ raise ex
+ finally:
+ self._lockobj.release()
diff --git a/azure/multicloud_azure/settings.py b/azure/multicloud_azure/settings.py
index 5078754..4db77bc 100644
--- a/azure/multicloud_azure/settings.py
+++ b/azure/multicloud_azure/settings.py
@@ -14,6 +14,7 @@ import os
import sys
from logging import config
from onaplogging import monkey
+from aria import install_aria_extensions
monkey.patch_all()
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
@@ -96,3 +97,5 @@ if 'test' in sys.argv:
TEST_OUTPUT_VERBOSE = True
TEST_OUTPUT_DESCRIPTIONS = True
TEST_OUTPUT_DIR = 'test-reports'
+
+install_aria_extensions()
diff --git a/azure/multicloud_azure/swagger/urls.py b/azure/multicloud_azure/swagger/urls.py
index a3de04a..dde553a 100644
--- a/azure/multicloud_azure/swagger/urls.py
+++ b/azure/multicloud_azure/swagger/urls.py
@@ -1,5 +1,4 @@
# Copyright (c) 2018 Amdocs
-# Copyright (c) 2018 Amdocs
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -16,13 +15,15 @@ from rest_framework.urlpatterns import format_suffix_patterns
from multicloud_azure.swagger.views.swagger_json import SwaggerJsonView
-
# Registry
from multicloud_azure.swagger.views.registry.views import Registry
from multicloud_azure.swagger.views.registry.views import UnRegistry
from multicloud_azure.swagger.views.registry.views import APIv1Registry
from multicloud_azure.swagger.views.registry.views import APIv1UnRegistry
+from multicloud_azure.swagger.views.infra_workload.views import InfraWorkload
+from multicloud_azure.swagger.views.infra_workload.views import GetStackView
+
urlpatterns = [
# swagger
url(r'^api/multicloud-azure/v0/swagger.json$', SwaggerJsonView.as_view()),
@@ -42,6 +43,15 @@ urlpatterns = [
r'/(?P<cloud_region_id>[0-9a-zA-Z_-]+)$',
APIv1UnRegistry.as_view()),
+ url(r'^api/multicloud-azure/v1/(?P<cloud_owner>[0-9a-zA-Z_-]+)'
+ r'/(?P<cloud_region_id>[0-9a-zA-Z_-]+)/infra_workload$',
+ InfraWorkload.as_view()),
+
+ url(r'^api/multicloud-azure/v1/(?P<cloud_owner>[0-9a-zA-Z_-]+)/'
+ r'(?P<cloud_region_id>[0-9a-zA-Z_-]+)/infra_workload/'
+ r'(?P<workload_id>[0-9a-zA-Z\-\_]+)$',
+ GetStackView.as_view()),
+
]
urlpatterns = format_suffix_patterns(urlpatterns)
diff --git a/azure/multicloud_azure/swagger/views/infra_workload/__init__.py b/azure/multicloud_azure/swagger/views/infra_workload/__init__.py
new file mode 100644
index 0000000..a952e9e
--- /dev/null
+++ b/azure/multicloud_azure/swagger/views/infra_workload/__init__.py
@@ -0,0 +1,11 @@
+# Copyright (c) 2018 Amdocs
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
diff --git a/azure/multicloud_azure/swagger/views/infra_workload/views.py b/azure/multicloud_azure/swagger/views/infra_workload/views.py
new file mode 100644
index 0000000..c44eba2
--- /dev/null
+++ b/azure/multicloud_azure/swagger/views/infra_workload/views.py
@@ -0,0 +1,82 @@
+# Copyright (c) 2018 Amdocs
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+
+import logging
+import json
+
+from rest_framework import status
+from rest_framework.response import Response
+from rest_framework.views import APIView
+
+from multicloud_azure.pub.aria.service import AriaServiceImpl
+
+logger = logging.getLogger(__name__)
+
+
+class InfraWorkload(APIView):
+
+ def post(self, request, cloud_owner, cloud_region_id):
+ data = request.data
+ template_data = data["infra-template"]
+ payload = data["infra-payload"]
+ inputs = json.loads(payload)
+ template_name = inputs['template_data']['stack_name']
+ service_op = AriaServiceImpl()
+ try:
+ stack = service_op.deploy_service(template_name, template_data,
+ inputs, logger)
+ if stack[1] != 200:
+ return Response(data=stack[0], status=stack[1])
+ except Exception as e:
+
+ if hasattr(e, "http_status"):
+ return Response(data={'error': str(e)}, status=e.http_status)
+ else:
+ return Response(data={'error': str(e)},
+ status=status.HTTP_500_INTERNAL_SERVER_ERROR)
+ rsp = {
+ "template_type": "heat",
+ "workload_id": stack[0]
+ }
+ return Response(data=rsp, status=status.HTTP_202_ACCEPTED)
+
+
+class GetStackView(APIView):
+
+ def get(self, request, cloud_owner, cloud_region_id, workload_id):
+ service_op = AriaServiceImpl()
+ try:
+ stack = service_op.show_execution(workload_id)
+ if stack[1] != 200:
+ return Response(data=stack[0], status=stack[1])
+ body = json.loads(stack[0])
+ stack_status = body["status"]
+ response = "unknown"
+ if stack_status == "pending" or stack_status == "started":
+ response = "CREATE_IN_PROGRESS"
+ elif stack_status == "succeeded":
+ response = "CREATE_COMPLETE"
+ elif stack_status == "failed" or stack_status == "cancelled":
+ response = "CREATE_FAILED"
+ rsp = {
+ "template_type": "heat",
+ "workload_id": workload_id,
+ "workload_status": response
+ }
+ return Response(data=rsp, status=stack[1])
+ except Exception as e:
+
+ if hasattr(e, "http_status"):
+ return Response(data={'error': str(e)}, status=e.http_status)
+ else:
+ return Response(data={'error': str(e)},
+ status=status.HTTP_500_INTERNAL_SERVER_ERROR)
diff --git a/azure/multicloud_azure/tests/test_aria_view.py b/azure/multicloud_azure/tests/test_aria_view.py
new file mode 100644
index 0000000..69c18e7
--- /dev/null
+++ b/azure/multicloud_azure/tests/test_aria_view.py
@@ -0,0 +1,171 @@
+# Copyright (c) 2018 Amdocs
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+
+import unittest
+import mock
+import json
+from rest_framework import status
+from aria.cli.core import aria
+
+from multicloud_azure.swagger.views.infra_workload.views import InfraWorkload
+from multicloud_azure.swagger.views.infra_workload.views import GetStackView
+from multicloud_azure.pub.aria.service import AriaServiceImpl
+
+
+class InfraViewTest(unittest.TestCase):
+
+ def setUp(self):
+ self.fsv = InfraWorkload()
+
+ def tearDown(self):
+ pass
+
+ def test_service_get_fail(self):
+ req = mock.Mock()
+ dict = {'infra-template': 'aria', 'infra-payload': json.dumps(
+ {'name': 'abc', 'template_data': {'stack_name': 'stack'}})}
+ req.data = dict
+ resp = self.fsv.post(req, "abc", "def")
+ self.assertEqual(status.HTTP_500_INTERNAL_SERVER_ERROR,
+ resp.status_code)
+
+
+class StackViewTest(unittest.TestCase):
+
+ def setUp(self):
+ self.fsv = GetStackView()
+
+ def tearDown(self):
+ pass
+
+ def test_service_get_fail(self):
+
+ class Request:
+ def __init__(self, query_params):
+ self.query_params = query_params
+ req = Request({'k': 'v'})
+ self.assertNotEqual(status.HTTP_500_INTERNAL_SERVER_ERROR,
+ self.fsv.get(req, "abc", "def", 123))
+
+
+class WorkoadViewTest(unittest.TestCase):
+
+ def setUp(self):
+ self.fsv = AriaServiceImpl()
+
+ def tearDown(self):
+ pass
+
+ @mock.patch.object(AriaServiceImpl, 'deploy_service')
+ def test_deploy_service(self, mock_service_info):
+
+ class Service:
+ def __init__(self, name, body, input, logger):
+ self.name = name
+ self.body = body
+ self.input = input
+ self.logger = logger
+ s = Service("abc", "def", "ghi", "OK")
+ mock_service_info.return_value = s
+ service_op = AriaServiceImpl()
+ self.assertNotEqual(200, service_op.deploy_service("a1", "b1", "c1",
+ "OK"))
+
+ @mock.patch.object(AriaServiceImpl, 'install_template_private')
+ @aria.pass_model_storage
+ @aria.pass_resource_storage
+ @aria.pass_plugin_manager
+ @aria.pass_logger
+ def test_install_template(self, mock_template_info, model_storage,
+ resource_storage, plugin_manager, logger):
+
+ class Workload:
+ def __init__(self, name, body):
+ self.name = name
+ self.body = body
+ service = Workload("a", "w1")
+ mock_template_info.return_value = service
+
+ class Request:
+ def __init__(self, query_params):
+ self.query_params = query_params
+ req = Request({'k': 'v'})
+ self.assertNotEqual(200,
+ self.fsv.install_template_private(req, "a1", "b1",
+ model_storage,
+ resource_storage,
+ plugin_manager,
+ logger))
+
+ @mock.patch.object(AriaServiceImpl, 'create_service')
+ @aria.pass_model_storage
+ @aria.pass_resource_storage
+ @aria.pass_plugin_manager
+ @aria.pass_logger
+ def test_create_service(self, mock_template_info, model_storage,
+ resource_storage, plugin_manager, logger):
+ class Workload:
+ def __init__(self, id, name, input):
+ self.id = id
+ self.name = name
+ self.input = input
+
+ f1 = Workload(1, "a", "w1")
+ f2 = Workload(2, "b", "w2")
+ service = [f1, f2]
+ mock_template_info.return_value = service
+
+ class Request:
+ def __init__(self, query_params):
+ self.query_params = query_params
+
+ req = Request({'k': 'v'})
+ self.assertNotEqual(200,
+ self.fsv.create_service(req, 123, "a1", "b1",
+ model_storage,
+ resource_storage,
+ plugin_manager,
+ logger))
+
+ @mock.patch.object(AriaServiceImpl, 'start_execution')
+ @aria.pass_model_storage
+ @aria.pass_resource_storage
+ @aria.pass_plugin_manager
+ @aria.pass_logger
+ def test_start_execution(self, mock_template_info, model_storage,
+ resource_storage, plugin_manager, logger):
+ class Workload:
+ def __init__(self, status_id, execution_id, name, input):
+ self.status_id = status_id
+ self.execution_id = execution_id
+ self.input = input
+ self.name = name
+
+ service = Workload(1, 2, "a", "w")
+ mock_template_info.return_value = service
+
+ class Request:
+ def __init__(self, query_params):
+ self.query_params = query_params
+
+ req = Request({'k': 'v'})
+ self.assertNotEqual(200,
+ self.fsv.start_execution(req, 123, 456, "a1", "b1",
+ model_storage,
+ resource_storage,
+ plugin_manager,
+ logger))
+
+ def test_show_execution(self):
+ service_op = AriaServiceImpl()
+ self.assertNotEqual(200,
+ service_op.show_execution(123))
diff --git a/azure/requirements.txt b/azure/requirements.txt
index f7225e9..3f3fa5d 100644
--- a/azure/requirements.txt
+++ b/azure/requirements.txt
@@ -27,18 +27,19 @@ unittest_xml_reporting==1.12.0
onappylog>=1.0.6
# for event
-oslo_messaging
+#oslo_messaging
# for pecan framework
uwsgi
pecan>=1.2.1
-oslo.concurrency>=3.21.0
-oslo.config>=4.11.0
-oslo.service>=1.25.0
+#oslo.concurrency>=3.21.0
+#oslo.config>=4.11.0
+#oslo.service>=1.25.0
eventlet>=0.20.0
PyYAML>=3.1.0
#azure
+requests==2.16.0
azure-mgmt-resource==2.0.0
azure-mgmt-compute==4.0.1
azure-mgmt-authorization==0.50.0
diff --git a/azure/tox.ini b/azure/tox.ini
index 4c2c67e..58fd7a6 100644
--- a/azure/tox.ini
+++ b/azure/tox.ini
@@ -6,14 +6,17 @@ skipsdist = true
downloadcache = ~/cache/pip
[testenv]
-deps = -r{toxinidir}/requirements.txt
+deps =
+ -r{toxinidir}/requirements.txt
commands =
/usr/bin/find . -type f -name "*.py[c|o]" -delete
+ pip install {toxinidir}/aria/aria-extension-cloudify/src/aria
+ pip install {toxinidir}/aria/aria-extension-cloudify
python manage.py test multicloud_azure
[testenv:pep8]
deps=flake8
-commands=flake8
+commands=flake8 --exclude='./aria/**,./*tox*/**'
[testenv:py27]
commands =
diff --git a/pom.xml b/pom.xml
index 744a192..82f704d 100644
--- a/pom.xml
+++ b/pom.xml
@@ -39,7 +39,7 @@
<sonar.language>py</sonar.language>
<sonar.pluginName>Python</sonar.pluginName>
<sonar.inclusions>**/*.py</sonar.inclusions>
- <sonar.exclusions>**/tests/*,setup.py</sonar.exclusions>
+ <sonar.exclusions>**/tests/*,setup.py,**/aria/*</sonar.exclusions>
</properties>
<build>
<pluginManagement>