summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJack Lucas <jflucas@research.att.com>2018-03-22 17:43:01 +0000
committerJack Lucas <jflucas@research.att.com>2018-03-23 12:46:10 +0000
commit1ed29c111f50e33ac89915ba86b530820da3a689 (patch)
tree314ad8dc5dd2cad4ba89bac5662015f8a9c43ced
parent19a075b395d687753de0f0266842cb9af8f24682 (diff)
Add DCAE Kubernetes plugin
Change-Id: Ic329f77bb09dcccfd49a34a8f84d387e8da929c6 Issue-ID: DCAEGEN2-410 Signed-off-by: Jack Lucas <jflucas@research.att.com>
-rw-r--r--k8s/.gitignore71
-rw-r--r--k8s/ChangeLog.md62
-rw-r--r--k8s/DesignNotes.md13
-rw-r--r--k8s/LICENSE.txt32
-rw-r--r--k8s/README.md244
-rw-r--r--k8s/configure/__init__.py19
-rw-r--r--k8s/configure/configure.py83
-rw-r--r--k8s/k8s-node-type.yaml353
-rw-r--r--k8s/k8sclient/__init__.py19
-rw-r--r--k8s/k8sclient/k8sclient.py324
-rw-r--r--k8s/k8splugin/__init__.py30
-rw-r--r--k8s/k8splugin/decorators.py102
-rw-r--r--k8s/k8splugin/discovery.py269
-rw-r--r--k8s/k8splugin/exceptions.py29
-rw-r--r--k8s/k8splugin/tasks.py682
-rw-r--r--k8s/k8splugin/utils.py43
-rw-r--r--k8s/msb/__init__.py19
-rw-r--r--k8s/msb/msb.py64
-rw-r--r--k8s/pom.xml165
-rw-r--r--k8s/requirements.txt6
-rw-r--r--k8s/setup.py39
-rw-r--r--k8s/tests/conftest.py34
-rw-r--r--k8s/tests/test_decorators.py34
-rw-r--r--k8s/tests/test_discovery.py71
-rw-r--r--k8s/tests/test_tasks.py293
-rw-r--r--k8s/tests/test_utils.py33
-rw-r--r--k8s/tox.ini15
-rwxr-xr-xmvn-phase-script.sh4
-rw-r--r--pom.xml1
29 files changed, 3151 insertions, 2 deletions
diff --git a/k8s/.gitignore b/k8s/.gitignore
new file mode 100644
index 0000000..be63b67
--- /dev/null
+++ b/k8s/.gitignore
@@ -0,0 +1,71 @@
+cfyhelper
+.cloudify
+*.swp
+*.swn
+*.swo
+.DS_Store
+.project
+.pydevproject
+venv
+
+
+# Byte-compiled / optimized / DLL files
+__pycache__/
+*.py[cod]
+
+# C extensions
+*.so
+
+# Distribution / packaging
+.Python
+env/
+build/
+develop-eggs/
+dist/
+downloads/
+eggs/
+.eggs/
+lib/
+lib64/
+parts/
+sdist/
+var/
+*.egg-info/
+.installed.cfg
+*.egg
+
+# PyInstaller
+# Usually these files are written by a python script from a template
+# before PyInstaller builds the exe, so as to inject date/other infos into it.
+*.manifest
+*.spec
+
+# Installer logs
+pip-log.txt
+pip-delete-this-directory.txt
+
+# Unit test / coverage reports
+htmlcov/
+.tox/
+.coverage
+.coveragerc
+.coverage.*
+.cache
+.pytest_cache/
+xunit-results.xml
+nosetests.xml
+coverage.xml
+*,cover
+
+# Translations
+*.mo
+*.pot
+
+# Django stuff:
+*.log
+
+# Sphinx documentation
+docs/_build/
+
+# PyBuilder
+target/
diff --git a/k8s/ChangeLog.md b/k8s/ChangeLog.md
new file mode 100644
index 0000000..0d0eafc
--- /dev/null
+++ b/k8s/ChangeLog.md
@@ -0,0 +1,62 @@
+# Change Log
+
+All notable changes to this project will be documented in this file.
+
+The format is based on [Keep a Changelog](http://keepachangelog.com/)
+and this project adheres to [Semantic Versioning](http://semver.org/).
+
+## [2.4.0]
+
+* Change *components* to be policy reconfigurable:
+ - Add policy execution operation
+ - Add policy decorators to task so that application configuration will be merged with policy
+* Fetch Docker logins from Consul
+
+## [2.3.0+t.0.3]
+
+* Enhance `SelectedDockerHost` node type with `name_search` and add default to `docker_host_override`
+* Implement the functionality in the `select_docker_host` task to query Consul given location id and name search
+* Deprecate `location_id` on the `DockerContainerForComponents*` node types
+* Change `service_id` to be optional for `DockerContainerForComponents*` node types
+* Add deployment id as a tag for registration on the component
+
+## [2.3.0]
+
+* Rip out dockering and use common python-dockering library
+ - Using 1.2.0 of python-dockering supports Docker exec based health checks
+* Support mapping ports and volumes when provided in docker config
+
+## [2.2.0]
+
+* Add `dcae.nodes.DockerContainerForComponentsUsingDmaap` node type and parse streams_publishes and streams_subscribes to be used by the DMaaP plugin.
+ - Handle message router wiring in the create operation for components
+ - Handle data router wiring in the create and in the start operation for components
+* Refactor the create operations and the start operations for components. Refactored to be functional to enable for better unit test coverage.
+* Add decorators for common cross cutting functionality
+* Add example blueprints for different dmaap cases
+
+## [2.1.0]
+
+* Add the node type `DockerContainerForPlatforms` which is intended for platform services who are to have well known names and ports
+* Add backdoor for `DockerContainerForComponents` to statically map ports
+* Add hack fix to allow this plugin access to the research nexus
+* Add support for dns through the local Consul agent
+* Free this plugin from the CentOS bondage
+
+## [2.0.0]
+
+* Remove the magic env.ini code. It's no longer needed because we are now running local agents of Consul.
+* Save and use the docker container id
+* `DockerContainer` is now a different node type that is much simpler than `DockerContainerforComponents`. It is targeted for the use case of registrator. This involved overhauling the create and start container functionality.
+* Classify connection and docker host not found error as recoverable
+* Apply CONSUL_HOST to point to the local Consul agent
+
+## [1.0.0]
+
+* Implement health checks - expose health checks on the node and register Docker containers with it. Note that health checks are currently optional.
+* Add option to remove images in the stop operation
+* Verify that the container is running and healthy before finishing the start operation
+* Image names passed in are now required to be the fully tagged names including registry
+* Remove references to rework in the code namespaces
+* Application configuration is now a YAML map to accomodate future blueprint generation
+* Update blueprints and cfyhelper.sh
diff --git a/k8s/DesignNotes.md b/k8s/DesignNotes.md
new file mode 100644
index 0000000..556e0f0
--- /dev/null
+++ b/k8s/DesignNotes.md
@@ -0,0 +1,13 @@
+## Design Notes: DCAE on Kubernetes
+### Background: Previous Support for DCAE on Docker
+### Key Design Elements for DCAE on Kubernetes
+#### Use of Kubernetes Abstractions
+#### Networking
+#### Service Discovery
+#### Component Configuration
+#### Health Checking and Resiliency
+#### Scaling
+#### Software Updates
+### Changes to Cloudify Type Definitions
+### Impact to Other DCAE Components
+### Status of Work
diff --git a/k8s/LICENSE.txt b/k8s/LICENSE.txt
new file mode 100644
index 0000000..e266d0a
--- /dev/null
+++ b/k8s/LICENSE.txt
@@ -0,0 +1,32 @@
+============LICENSE_START=======================================================
+org.onap.dcae
+================================================================================
+Copyright (c) 2017-2018 AT&T Intellectual Property. All rights reserved.
+================================================================================
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+============LICENSE_END=========================================================
+
+ECOMP is a trademark and service mark of AT&T Intellectual Property.
+
+
+Copyright (c) 2017-2018 AT&T Intellectual Property. All rights reserved.
+===================================================================
+Licensed under the Creative Commons License, Attribution 4.0 Intl. (the "License");
+you may not use this documentation except in compliance with the License.
+You may obtain a copy of the License at
+ https://creativecommons.org/licenses/by/4.0/
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/k8s/README.md b/k8s/README.md
new file mode 100644
index 0000000..79ce303
--- /dev/null
+++ b/k8s/README.md
@@ -0,0 +1,244 @@
+# ONAP DCAE Kubernetes Plugin for Cloudify
+
+This directory contains a Cloudify plugin used to orchestrate the deployment of containerized DCAE platform and service components into a Kubernetes ("k8s")
+environment. This work is based on the [ONAP DCAE Docker plugin] (../docker).
+
+This plugin is *not* a generic Kubernetes plugin that exposes the full set of Kubernetes features.
+In fact, the plugin largely hides the fact that we're using Kubernetes from both component developers and blueprint authors,
+The Cloudify node type definitions are very similar to the Cloudify type definitions used in the ONAP DCAE Docker plugin.
+
+For the node types `ContainerizedPlatformComponent`, `ContainerizedServiceComponent`, and `ContainerizedServiceComponentUsingDmaap`, this plugin
+creates the following Kubernetes entities:
+
+- A Kubernetes `Deployment` containing information about what containers to run and what volume to mount.
+ - The `Deployment` always includes a container that runs the component's Docker image
+ - The `Deployment` includes any volumes specified in the blueprint
+ - If the blueprint specifies a logging directory via the `log_info` property, the `Deployment` includes a second container,
+ running the `filebeat` logging sidecar that ships logging information to the ONAP ELK stack. The `Deployment` will include
+ some additional volumes needed by filebeat.
+- If the blueprint indicates that the component exposes any ports, the plugin will create a Kubernetes `Service` that allocates an address
+ in the Kubernetes network address space that will route traffic to a container that's running the component. This `Service` provides a
+ fixed "virtual IP" for the component.
+- If the blueprint indicates that the component exposes a port externally, the plugin will create an additional Kubernetes `Service` that opens up a
+ port on the external interface of each node in the Kubernetes cluster.
+
+Through the `replicas` property, a blueprint can request deployment of multiple instances of the component. The plugin will still create a single `Deployment` (and,
+if needed one or two `Services`), but the `Deployment` will cause multiple instances of the container to run. (Specifically, the `Deployment` will create
+a Kubernetes `Pod` for each requested instance.) Other entities connect to a component via the IP address of a `Service`, and Kubernetes takes care of routing
+traffic to an appropriate container instance.
+
+## Pre-requisites
+### Configuration
+#### Configuration file
+The plugin expects a configuration file in the Python "ini" format to be stored at `/opt/onap/config.txt`. This file contains the address of the Consul cluster.
+Here is an example:
+```
+[consul]
+address=10.12.5.115:30270
+```
+#### Configuration entry in Consul
+Additional configuration information is stored in the Consul KV store under the key `k8s-plugin`.
+The configuration is provided as JSON object with the following properties:
+
+ - namespace: k8s namespace to use for DCAE
+ - consul_dns_name: k8s internal DNS name for Consul (passed to containers)
+ - image_pull_secrets: list of names of k8s secrets for accessing Docker registries, with the following properties:
+ - filebeat: object containing onfiguration for setting up filebeat container
+ - log_path: mount point for log volume in filebeat container
+ - data_path: mount point for data volume in filebeat container
+ - config_path: mount point for config volume in filebeat container
+ - config_subpath: subpath for config data in filebeat container
+ - config_map: name of a ConfigMap holding the filebeat configuration file
+ - image: Docker image to use for filebeat
+
+#### Kubernetes access information
+The plugin accesses a Kubernetes cluster. The information and credentials for accessing a cluster are stored in a "kubeconfig"
+file. The plugin expects to find such a file at `/etc/cloudify/.kube/config`.
+
+#### Additional Kubernetes configuration elements
+The plugin expects certain elements to be provided in the DCAE/ONAP environment, namely:
+ - Kubernetes secret(s) containing the credentials needed to pull images from Docker registries, if needed
+ - A Kubernetes ConfigMap containing the filebeat.yml file used by the filebeat logging container
+ - An ExternalName service
+
+
+## Input parameters
+
+### start
+
+These input parameters are for the `start` `cloudify.interfaces.lifecycle` and are inputs into the variant task operations `create_and_start_container*`.
+
+#### `envs`
+
+A map of environment variables that is intended to be forwarded to the container as environment variables. Example:
+
+```yaml
+envs:
+ EXTERNAL_IP: '10.100.1.99'
+```
+
+These environment variables will be forwarded in addition to the *platform-related* environment variables like `CONSUL_HOST`.
+
+#### `volumes`
+
+List of maps used for setting up Kubernetes volume mounts. Example:
+
+```yaml
+volumes:
+ - host:
+ path: '/var/run/docker.sock'
+ container:
+ bind: '/tmp/docker.sock'
+ mode: 'ro'
+```
+
+The table below describes the fields.
+
+key | description
+--- | -----------
+path | Full path to the file or directory on the host machine to be mounted
+bind | Full path to the file or directory in the container where the volume should be mounted to
+mode | Readable, writeable: `ro`, `rw`
+
+#### `ports`
+
+List of strings - Used to bind container ports to host ports. Each item is of the format: `<container port>:<host port>`.
+
+Note that `ContainerizedPlatformComponent` has the property pair `host_port` and `container_port`. This pair will be merged with the input parameters ports.
+
+```yaml
+ports:
+ - '8000:31000'
+```
+
+Default is `None`.
+
+In the Kubernetes environment, most components will communicate over the Kubernetes network using private addresses. For those cases,
+setting the `<host port>` to 0 will expose the `<container port>` to other components on the Kubernetes network, but not will not expose any
+ports on the Kubernetes host's external interface. Setting `<host port>` to a non-zero value will expose that port on the external interfaces
+of every Kubernetes host in the cluster. (This uses the Kubernetes `NodePort` service type.)
+
+#### `max_wait`
+
+Integer - seconds to wait for Docker to come up healthy before throwing a `NonRecoverableError`.
+
+```yaml
+max_wait:
+ 60
+```
+
+Default is 300 seconds.
+
+
+## Using DMaaP
+
+The node type `dcae.nodes.ContainerizedServiceComponentUsingDmaap` is intended to be used by components that use DMaaP and expects to be connected with the DMaaP node types found in the DMaaP plugin.
+
+### Node properties
+
+The properties `streams_publishes` and `streams_subscribes` both are lists of objects that are intended to be passed into the DMaaP plugin and used to create additional parameters that will be passed into the DMaaP plugin.
+
+#### Message router
+
+For message router publishers and subscribers, the objects look like:
+
+```yaml
+name: topic00
+location: mtc5
+client_role: XXXX
+type: message_router
+```
+
+Where `name` is the node name of `dcae.nodes.Topic` or `dcae.nodes.ExistingTopic` that the Docker node is connecting with via the relationships `dcae.relationships.publish_events` for publishing and `dcae.relationships.subscribe_to_events` for subscribing.
+
+#### Data router
+
+For data router publishers, the object looks like:
+
+```yaml
+name: feed00
+location: mtc5
+type: data_router
+```
+
+Where `name` is the node name of `dcae.nodes.Feed` or `dcae.nodes.ExistingFeed` that the Docker node is connecting with via the relationships `dcae.relationships.publish_files`.
+
+For data router subscribers, the object looks like:
+
+```yaml
+name: feed00
+location: mtc5
+type: data_router
+username: king
+password: "123456"
+route: some-path
+scheme: https
+```
+
+Where the relationship to use is `dcae.relationships.subscribe_to_files`.
+
+If `username` and `password` are not provided, then the plugin will generate username and password pair.
+
+`route` and `scheme` are parameter used in the dynamic construction of the delivery url which will be passed to the DMaaP plugin to be used in the setting up of the subscriber to the feed.
+
+`route` is the http path endpoint of the subscriber that will handle files from the associated feed.
+
+`scheme` is either `http` or `https`. If not specified, then the plugin will default to `http`.
+
+### Component configuration
+
+The DMaaP plugin is responsible to provision the feed/topic and store into Consul the resulting DMaaP connection details. Here is an example:
+
+```json
+{
+ "topic00": {
+ "client_role": "XXXX",
+ "client_id": "XXXX",
+ "location": "XXXX",
+ "topic_url": "https://some-topic-url.com/events/abc"
+ }
+}
+```
+
+This is to be merged with the templatized application configuration:
+
+```json
+{
+ "some-param": "Lorem ipsum dolor sit amet",
+ "streams_subscribes": {
+ "topic-alpha": {
+ "type": "message_router",
+ "aaf_username": "user-foo",
+ "aaf_password": "password-bar",
+ "dmaap_info": "<< topic00 >>"
+ },
+ },
+ "streams_publishes": {},
+ "services_calls": {}
+}
+```
+
+To form the application configuration:
+
+```json
+{
+ "some-param": "Lorem ipsum dolor sit amet",
+ "streams_subscribes": {
+ "topic-alpha": {
+ "type": "message_router",
+ "aaf_username": "user-foo",
+ "aaf_password": "password-bar",
+ "dmaap_info": {
+ "client_role": "XXXX",
+ "client_id": "XXXX",
+ "location": "XXXX",
+ "topic_url": "https://some-topic-url.com/events/abc"
+ }
+ },
+ },
+ "streams_publishes": {},
+ "services_calls": {}
+}
+```
+
+This also applies to data router feeds.
diff --git a/k8s/configure/__init__.py b/k8s/configure/__init__.py
new file mode 100644
index 0000000..b986659
--- /dev/null
+++ b/k8s/configure/__init__.py
@@ -0,0 +1,19 @@
+# ============LICENSE_START=======================================================
+# org.onap.dcae
+# ================================================================================
+# Copyright (c) 2018 AT&T Intellectual Property. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END=========================================================
+#
+# ECOMP is a trademark and service mark of AT&T Intellectual Property. \ No newline at end of file
diff --git a/k8s/configure/configure.py b/k8s/configure/configure.py
new file mode 100644
index 0000000..fcf4044
--- /dev/null
+++ b/k8s/configure/configure.py
@@ -0,0 +1,83 @@
+# ============LICENSE_START=======================================================
+# org.onap.dcae
+# ================================================================================
+# Copyright (c) 2018 AT&T Intellectual Property. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END=========================================================
+#
+# ECOMP is a trademark and service mark of AT&T Intellectual Property.
+
+_CONFIG_PATH = "/opt/onap/config.txt" # Path to config file on the Cloudify Manager host
+_CONSUL_KEY = "k8s-plugin" # Key under which CM configuration is stored in Consul
+
+# Default configuration values
+DCAE_NAMESPACE = "dcae"
+CONSUL_DNS_NAME = "consul"
+
+FB_LOG_PATH = "/var/log/onap"
+FB_DATA_PATH = "/usr/share/filebeat/data"
+FB_CONFIG_PATH = "/usr/share/filebeat/filebeat.yml"
+FB_CONFIG_SUBPATH = "filebeat.yml"
+FB_CONFIG_MAP = "filebeat-conf"
+FB_IMAGE = "docker.elastic.co/beats/filebeat:5.5.0"
+
+def _set_defaults():
+ """ Set default configuration parameters """
+ return {
+ "namespace" : DCAE_NAMESPACE, # k8s namespace to use for DCAE
+ "consul_dns_name" : CONSUL_DNS_NAME, # k8s internal DNS name for Consul
+ "image_pull_secrets" : [], # list of k8s secrets for accessing Docker registries
+ "filebeat": { # Configuration for setting up filebeat container
+ "log_path" : FB_LOG_PATH, # mount point for log volume in filebeat container
+ "data_path" : FB_DATA_PATH, # mount point for data volume in filebeat container
+ "config_path" : FB_CONFIG_PATH, # mount point for config volume in filebeat container
+ "config_subpath" : FB_CONFIG_SUBPATH, # subpath for config data in filebeat container
+ "config_map" : FB_CONFIG_MAP, # ConfigMap holding the filebeat configuration
+ "image": FB_IMAGE # Docker image to use for filebeat
+ }
+ }
+
+def configure(config_path=_CONFIG_PATH, key = _CONSUL_KEY):
+ """
+ Get configuration information from local file and Consul.
+ Note that the Cloudify context ("ctx") isn't available at
+ module load time.
+ """
+
+ from cloudify.exceptions import NonRecoverableError
+ import ConfigParser
+ from k8splugin import discovery
+ config = _set_defaults()
+
+ try:
+ # Get Consul address from a config file
+ c = ConfigParser.ConfigParser()
+ c.read(config_path)
+ config["consul_host"] = c.get('consul','address')
+
+ # Get the rest of the config from Consul
+ conn = discovery.create_kv_conn(config["consul_host"])
+ val = discovery.get_kv_value(conn, key)
+
+ # Merge Consul results into the config
+ config.update(val)
+
+ except discovery.DiscoveryKVEntryNotFoundError as e:
+ # Don't reraise error, assume defaults are wanted.
+ pass
+
+ except Exception as e:
+ raise NonRecoverableError(e)
+
+ return config
diff --git a/k8s/k8s-node-type.yaml b/k8s/k8s-node-type.yaml
new file mode 100644
index 0000000..61b6f70
--- /dev/null
+++ b/k8s/k8s-node-type.yaml
@@ -0,0 +1,353 @@
+# ================================================================================
+# Copyright (c) 2017-2018 AT&T Intellectual Property. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END=========================================================
+#
+# ECOMP is a trademark and service mark of AT&T Intellectual Property.
+
+tosca_definitions_version: cloudify_dsl_1_3
+
+imports:
+ - http://www.getcloudify.org/spec/cloudify/3.4/types.yaml
+
+plugins:
+ k8s:
+ executor: 'central_deployment_agent'
+ package_name: k8splugin
+ package_version: 1.0.0
+
+data_types:
+
+ dcae.types.MSBRegistration:
+ description: >
+ Information for registering an HTTP service into MSB. It's optional to do so,
+ but if MSB registration is desired at least the port property must be provided.
+ If 'port' property is not provided, the plugin will not do the registration.
+ (The properties all have to be declared as not required, otherwise the
+ 'msb_registration' property on the node would also be required.)
+ properties:
+ port:
+ description: The container port at which the service is exposed
+ type: string
+ required: false
+ version:
+ description: The version identifier for the service
+ type: string
+ required: false
+ url_path:
+ description: The URL path (e.g., "/api", not the full URL) to the service endpoint
+ type: string
+ required: false
+ uses_ssl:
+ description: Set to true if service endpoint uses SSL (TLS)
+ type: boolean
+ required: false
+
+ dcae.types.LoggingInfo:
+ description: >
+ Information for setting up centralized logging via ELK using a "sidecar" container.
+ If 'log_directory' is not provided, the plugin will not set up ELK logging.
+ (The properties all have to be declared as not required, otherwise the
+ 'log_info' property on the node would also be required.)
+ properties:
+ log_directory:
+ description: >
+ The path in the container where the component writes its logs.
+ If the component is following the EELF requirements, this would be
+ the directory where the four EELF files are being written.
+ (Other logs can be placed in the directory--if their names in '.log',
+ they'll also be sent into ELK.)
+ type: string
+ required: false
+ alternate_fb_path:
+ description: >
+ Hope not to use this. By default, the plugin will mount the log volume
+ at /var/log/onap/<component_type> in the sidecar container's file system.
+ 'alternate_fb_path' allows overriding the default. Will affect how the log
+ data can be found in the ELK system.
+ type: string
+ required: false
+
+node_types:
+ # The ContainerizedServiceComponent node type is to be used for DCAE service components that
+ # are to be run in a Docker container. This node type goes beyond that of a ordinary Docker
+ # plugin where it has DCAE platform specific functionality:
+ #
+ # * Generation of the service component name
+ # * Managing of service component configuration information
+ #
+ # The plugin deploys the container into a Kubernetes cluster with a very specific choice
+ # of Kubernetes elements that are deliberately not under the control of the blueprint author.
+ # The idea is to deploy all service components in a consistent way, with the details abstracted
+ # away from the blueprint author.
+ dcae.nodes.ContainerizedServiceComponent:
+ derived_from: cloudify.nodes.Root
+ properties:
+ service_component_type:
+ type: string
+ description: Service component type of the application being run in the container
+
+ service_id:
+ type: string
+ description: >
+ Unique id for this DCAE service instance this component belongs to. This value
+ will be applied as a tag in the registration of this component with Consul.
+ default: Null
+
+ location_id:
+ type: string
+ description: >
+ Location id of where to run the container. Not used by the plugin. Here for backward compatibility.
+ default: Null
+ required: False
+
+ service_component_name_override:
+ type: string
+ description: >
+ Manually override and set the name for this Docker container node. If this
+ is set, then the name will not be auto-generated. Platform services are the
+ specific use cases for using this parameter because they have static
+ names for example the CDAP broker.
+ default: Null
+
+ application_config:
+ default: {}
+ description: >
+ Application configuration for this Docker component. The data structure is
+ expected to be a complex map (native YAML) and to be constructed and filled
+ by the creator of the blueprint.
+
+ docker_config:
+ default: {}
+ description: >
+ This is what is the auxilary portion of the component spec that contains things
+ like healthcheck definitions for the Docker component. Health checks are
+ optional.
+
+ image:
+ type: string
+ description: Full uri of the Docker image
+
+ log_info:
+ type: dcae.types.LoggingInfo
+ description: >
+ Information for setting up centralized logging via ELK.
+ required: false
+
+ replicas:
+ type: integer
+ description: >
+ The number of instances of the component that should be launched initially
+ default: 1
+
+ always_pull_image:
+ type: boolean
+ description: >
+ Set to true if the orchestrator should always pull a new copy of the image
+ before deploying. By default the orchestrator pulls only if the image is
+ not already present on the Docker host where the container is being launched.
+ default: false
+
+ interfaces:
+ cloudify.interfaces.lifecycle:
+ create:
+ # Generate service component name and populate config into Consul
+ implementation: k8s.k8splugin.create_for_components
+ start:
+ # Create Docker container and start
+ implementation: k8s.k8splugin.create_and_start_container_for_components
+ stop:
+ # Stop and remove Docker container
+ implementation: k8s.k8splugin.stop_and_remove_container
+ delete:
+ # Delete configuration from Consul
+ implementation: k8s.k8splugin.cleanup_discovery
+ dcae.interfaces.policy:
+ # This is to be invoked by the policy handler upon policy updates
+ policy_update:
+ implementation: k8s.k8splugin.policy_update
+ dcae.interfaces.scale:
+ scale:
+ implementation: k8s.k8splugin.scale
+
+
+ # This node type is intended for DCAE service components that use DMaaP and must use the
+ # DMaaP plugin.
+ dcae.nodes.ContainerizedServiceComponentUsingDmaap:
+ derived_from: dcae.nodes.ContainerizedServiceComponent
+ properties:
+ streams_publishes:
+ description: >
+ List of DMaaP streams used for publishing.
+
+ Message router items look like:
+
+ name: topic00
+ location: mtc5
+ client_role: XXXX
+ type: message_router
+
+ Data router items look like:
+
+ name: feed00
+ location: mtc5
+ type: data_router
+
+ This information is forwarded to the dmaap plugin to provision
+ default: []
+ streams_subscribes:
+ description: >
+ List of DMaaP streams used for subscribing.
+
+ Message router items look like:
+
+ name: topic00
+ location: mtc5
+ client_role: XXXX
+ type: message_router
+
+ Data router items look like:
+
+ name: feed00
+ location: mtc5
+ type: data_router
+ username: king
+ password: 123456
+ route: some-path
+ scheme: https
+
+ Note that username and password is optional. If not provided or null then the
+ plugin will generate them.
+
+ default: []
+ interfaces:
+ cloudify.interfaces.lifecycle:
+ create:
+ # Generate service component name and populate config into Consul
+ implementation: k8s.k8splugin.create_for_components_with_streams
+ start:
+ # Create Docker container and start
+ implementation: k8s.k8splugin.create_and_start_container_for_components_with_streams
+
+
+ # ContainerizedPlatformComponent is intended for DCAE platform services. Unlike the components,
+ # platform services have well-known names and well-known ports.
+ dcae.nodes.ContainerizedPlatformComponent:
+ derived_from: cloudify.nodes.Root
+ properties:
+ name:
+ description: >
+ Container name used to register with Consul
+ dns_name:
+ required: false
+ description: >
+ Name to be registered in the DNS for the service provided by the container.
+ If not provided, the 'name' field is used.
+ This is a work-around for the Kubernetes restriction on having '_' in a DNS name.
+ Having this field allows a component to look up its configuration using a name that
+ includes a '_' while providing a legal Kubernetes DNS name.
+
+ application_config:
+ default: {}
+ description: >
+ Application configuration for this Docker component. The data strcture is
+ expected to be a complex map (native YAML) and to be constructed and filled
+ by the creator of the blueprint.
+
+ docker_config:
+ default: {}
+ description: >
+ This is what is the auxilary portion of the component spec that contains things
+ like healthcheck definitions for the Docker component. Health checks are
+ optional.
+
+ image:
+ type: string
+ description: Full uri of the Docker image
+
+ host_port:
+ type: integer
+ description: >
+ Network port that the platform service is expecting to expose on the host
+ default: 0
+
+ container_port:
+ type: integer
+ description: >
+ Network port that the platform service exposes in the container
+ default: 0
+
+ msb_registration:
+ type: dcae.types.MSBRegistration
+ description: >
+ Information for registering with MSB
+ required: false
+
+ log_info:
+ type: dcae.types.LoggingInfo
+ description: >
+ Information for setting up centralized logging via ELK.
+ required: false
+
+ replicas:
+ type: integer
+ description: >
+ The number of instances of the component that should be launched initially
+ default: 1
+
+ always_pull_image:
+ type: boolean
+ description: >
+ Set to true if the orchestrator should always pull a new copy of the image
+ before deploying. By default the orchestrator pulls only if the image is
+ not already present on the Docker host where the container is being launched.
+ default: false
+
+ interfaces:
+ cloudify.interfaces.lifecycle:
+ create:
+ # Populate config into Consul
+ implementation: k8s.k8splugin.create_for_platforms
+ start:
+ # Create Docker container and start
+ implementation: k8s.k8splugin.create_and_start_container_for_platforms
+ stop:
+ # Stop and remove Docker container
+ implementation: k8s.k8splugin.stop_and_remove_container
+ delete:
+ # Delete configuration from Consul
+ implementation: k8s.k8splugin.cleanup_discovery
+ dcae.interfaces.scale:
+ scale:
+ implementation: k8s.k8splugin.tasks.scale
+
+ # ContainerizedApplication is intended to be more of an all-purpose Docker container node
+ # for non-componentized applications.
+ dcae.nodes.ContainerizedApplication:
+ derived_from: cloudify.nodes.Root
+ properties:
+ name:
+ type: string
+ description: Name of the Docker container to be given
+ image:
+ type: string
+ description: Full uri of the Docker image
+ interfaces:
+ cloudify.interfaces.lifecycle:
+ start:
+ # Create Docker container and start
+ implementation: k8s.k8splugin.create_and_start_container
+ stop:
+ # Stop and remove Docker container
+ implementation: k8s.k8splugin.stop_and_remove_container
diff --git a/k8s/k8sclient/__init__.py b/k8s/k8sclient/__init__.py
new file mode 100644
index 0000000..b986659
--- /dev/null
+++ b/k8s/k8sclient/__init__.py
@@ -0,0 +1,19 @@
+# ============LICENSE_START=======================================================
+# org.onap.dcae
+# ================================================================================
+# Copyright (c) 2018 AT&T Intellectual Property. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END=========================================================
+#
+# ECOMP is a trademark and service mark of AT&T Intellectual Property. \ No newline at end of file
diff --git a/k8s/k8sclient/k8sclient.py b/k8s/k8sclient/k8sclient.py
new file mode 100644
index 0000000..a61fafa
--- /dev/null
+++ b/k8s/k8sclient/k8sclient.py
@@ -0,0 +1,324 @@
+# ============LICENSE_START=======================================================
+# org.onap.dcae
+# ================================================================================
+# Copyright (c) 2018 AT&T Intellectual Property. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END=========================================================
+#
+# ECOMP is a trademark and service mark of AT&T Intellectual Property.
+import os
+import uuid
+from msb import msb
+from kubernetes import config, client
+
+def _create_deployment_name(component_name):
+ return "dep-{0}".format(component_name)
+
+def _create_service_name(component_name):
+ return "{0}".format(component_name)
+
+def _create_exposed_service_name(component_name):
+ return ("x{0}".format(component_name))[:63]
+
+def _configure_api():
+ #TODO: real configuration
+ config.load_kube_config(os.path.join(os.environ["HOME"], '.kube/config'))
+
+def _create_container_object(name, image, always_pull, env={}, container_ports=[], volume_mounts = []):
+ # Set up environment variables
+ # Copy any passed in environment variables
+ env_vars = [client.V1EnvVar(name=k, value=env[k]) for k in env.keys()]
+ # Add POD_IP with the IP address of the pod running the container
+ pod_ip = client.V1EnvVarSource(field_ref = client.V1ObjectFieldSelector(field_path="status.podIP"))
+ env_vars.append(client.V1EnvVar(name="POD_IP",value_from=pod_ip))
+
+ # Define container for pod
+ return client.V1Container(
+ name=name,
+ image=image,
+ image_pull_policy='Always' if always_pull else 'IfNotPresent',
+ env=env_vars,
+ ports=[client.V1ContainerPort(container_port=p) for p in container_ports],
+ volume_mounts = volume_mounts
+ )
+
+def _create_deployment_object(component_name,
+ containers,
+ replicas,
+ volumes,
+ labels,
+ pull_secrets=[]):
+
+ # pull_secrets is a list of the names of the k8s secrets containing docker registry credentials
+ # See https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod
+ ips = []
+ for secret in pull_secrets:
+ ips.append(client.V1LocalObjectReference(name=secret))
+
+ # Define pod template
+ template = client.V1PodTemplateSpec(
+ metadata=client.V1ObjectMeta(labels=labels),
+ spec=client.V1PodSpec(hostname=component_name,
+ containers=containers,
+ volumes=volumes,
+ image_pull_secrets=ips)
+ )
+
+ # Define deployment spec
+ spec = client.ExtensionsV1beta1DeploymentSpec(
+ replicas=replicas,
+ template=template
+ )
+
+ # Create deployment object
+ deployment = client.ExtensionsV1beta1Deployment(
+ kind="Deployment",
+ metadata=client.V1ObjectMeta(name=_create_deployment_name(component_name)),
+ spec=spec
+ )
+
+ return deployment
+
+def _create_service_object(service_name, component_name, service_ports, annotations, labels, service_type):
+ service_spec = client.V1ServiceSpec(
+ ports=service_ports,
+ selector={"app" : component_name},
+ type=service_type
+ )
+ if annotations:
+ metadata = client.V1ObjectMeta(name=_create_service_name(service_name), labels=labels, annotations=annotations)
+ else:
+ metadata = client.V1ObjectMeta(name=_create_service_name(service_name), labels=labels)
+
+ service = client.V1Service(
+ kind="Service",
+ api_version="v1",
+ metadata=metadata,
+ spec=service_spec
+ )
+ return service
+
+def _parse_ports(port_list):
+ container_ports = []
+ port_map = {}
+ for p in port_list:
+ try:
+ [container, host] = (p.strip()).split(":",2)
+ cport = int(container)
+ container_ports.append(cport)
+ hport = int(host)
+ port_map[container] = hport
+ except:
+ pass # if something doesn't parse, we just ignore it
+
+ return container_ports, port_map
+
+def _parse_volumes(volume_list):
+ volumes = []
+ volume_mounts = []
+ for v in volume_list:
+ vname = str(uuid.uuid4())
+ vhost = v['host']['path']
+ vcontainer = v['container']['bind']
+ vro = (v['container']['mode'] == 'ro')
+ volumes.append(client.V1Volume(name=vname, host_path=client.V1HostPathVolumeSource(path=vhost)))
+ volume_mounts.append(client.V1VolumeMount(name=vname, mount_path=vcontainer, read_only=vro))
+
+ return volumes, volume_mounts
+
+def _service_exists(namespace, component_name):
+ exists = False
+ try:
+ _configure_api()
+ client.CoreV1Api().read_namespaced_service(_create_service_name(component_name), namespace)
+ exists = True
+ except client.rest.ApiException:
+ pass
+
+ return exists
+
+def deploy(namespace, component_name, image, replicas, always_pull, k8sconfig, **kwargs):
+ '''
+ This will create a k8s Deployment and, if needed, one or two k8s Services.
+ (We are being opinionated in our use of k8s... this code decides what k8s abstractions and features to use.
+ We're not exposing k8s to the component developer and the blueprint author.
+ This is a conscious choice. We want to use k8s in a controlled, consistent way, and we want to hide
+ the details from the component developer and the blueprint author.)
+
+ namespace: the Kubernetes namespace into which the component is deployed
+ component_name: the component name, used to derive names of Kubernetes entities
+ image: the docker image for the component being deployed
+ replica: the number of instances of the component to be deployed
+ always_pull: boolean flag, indicating that Kubernetes should always pull a new copy of
+ the Docker image for the component, even if it is already present on the Kubernetes node.
+ k8sconfig contains:
+ - image_pull_secrets: a list of names of image pull secrets that can be used for retrieving images.
+ (DON'T PANIC: these are just the names of secrets held in the Kubernetes secret store.)
+ - filebeat: a dictionary of filebeat sidecar parameters:
+ "log_path" : mount point for log volume in filebeat container
+ "data_path" : mount point for data volume in filebeat container
+ "config_path" : mount point for config volume in filebeat container
+ "config_subpath" : subpath for config data in filebeat container
+ "config_map" : ConfigMap holding the filebeat configuration
+ "image": Docker image to use for filebeat
+ kwargs may have:
+ - volumes: array of volume objects, where a volume object is:
+ {"host":{"path": "/path/on/host"}, "container":{"bind":"/path/on/container","mode":"rw_or_ro"}
+ - ports: array of strings in the form "container_port:host_port"
+ - env: map of name-value pairs ( {name0: value0, name1: value1...}
+ - msb_list: array of msb objects, where an msb object is as described in msb/msb.py.
+ - log_info: an object with info for setting up ELK logging, with the form:
+ {"log_directory": "/path/to/container/log/directory", "alternate_fb_path" : "/alternate/sidecar/log/path"}
+ - labels: dict with label-name/label-value pairs, e.g. {"cfydeployment" : "lsdfkladflksdfsjkl", "cfynode":"mycomponent"}
+ These label will be set on all the pods deployed as a result of this deploy() invocation.
+
+ '''
+
+ deployment_ok = False
+ cip_service_created = False
+ deployment_description = {
+ "namespace": namespace,
+ "deployment": '',
+ "services": []
+ }
+
+ try:
+ _configure_api()
+
+ # Get API handles
+ core = client.CoreV1Api()
+ ext = client.ExtensionsV1beta1Api()
+
+ # Parse the port mapping into [container_port,...] and [{"host_port" : "container_port"},...]
+ container_ports, port_map = _parse_ports(kwargs.get("ports", []))
+
+ # Parse the volumes list into volumes and volume_mounts for the deployment
+ volumes, volume_mounts = _parse_volumes(kwargs.get("volumes",[]))
+
+ # Initialize the list of containers that will be part of the pod
+ containers = []
+
+ # Set up the ELK logging sidecar container, if needed
+ log_info = kwargs.get("log_info")
+ if log_info and "log_directory" in log_info:
+ log_dir = log_info["log_directory"]
+ fb = k8sconfig["filebeat"]
+ sidecar_volume_mounts = []
+
+ # Create the volume for component log files and volume mounts for the component and sidecar containers
+ volumes.append(client.V1Volume(name="component-log", empty_dir=client.V1EmptyDirVolumeSource()))
+ volume_mounts.append(client.V1VolumeMount(name="component-log", mount_path=log_dir))
+ sc_path = log_info["alternate_fb_path"] if "alternate_fb_path" in log_info \
+ else "{0}/{1}".format(fb["log_path"], component_name)
+ sidecar_volume_mounts.append(client.V1VolumeMount(name="component-log", mount_path=sc_path))
+
+ # Create the volume for sidecar data and the volume mount for it
+ volumes.append(client.V1Volume(name="filebeat-data", empty_dir=client.V1EmptyDirVolumeSource()))
+ sidecar_volume_mounts.append(client.V1VolumeMount(name="filebeat-data", mount_path=fb["data_path"]))
+
+ # Create the container for the sidecar
+ containers.append(_create_container_object("filebeat", fb["image"], False, {}, [], sidecar_volume_mounts))
+
+ # Create the volume for the sidecar configuration data and the volume mount for it
+ # The configuration data is in a k8s ConfigMap that should be created when DCAE is installed.
+ volumes.append(
+ client.V1Volume(name="filebeat-conf", config_map=client.V1ConfigMapVolumeSource(name=fb["config_map"])))
+ sidecar_volume_mounts.append(
+ client.V1VolumeMount(name="filebeat-conf", mount_path=fb["config_path"], sub_path=fb["config_subpath"]))
+
+ # Create the container for the component
+ # Make it the first container in the pod
+ containers.insert(0, _create_container_object(component_name, image, always_pull, kwargs.get("env", {}), container_ports, volume_mounts))
+
+ # Build the k8s Deployment object
+ labels = kwargs.get("labels", {})
+ labels.update({"app": component_name})
+ dep = _create_deployment_object(component_name, containers, replicas, volumes, labels, pull_secrets=k8sconfig["image_pull_secrets"])
+
+ # Have k8s deploy it
+ ext.create_namespaced_deployment(namespace, dep)
+ deployment_ok = True
+ deployment_description["deployment"] = _create_deployment_name(component_name)
+
+ # Create service(s), if a port mapping is specified
+ if port_map:
+ service_ports = [] # Ports exposed internally on the k8s network
+ exposed_ports = [] # Ports to be mapped to ports on the k8s nodes via NodePort
+ for cport, hport in port_map.iteritems():
+ service_ports.append(client.V1ServicePort(port=int(cport),name="port-{}".format(cport)))
+ if int(hport) != 0:
+ exposed_ports.append(client.V1ServicePort(port=int(cport), node_port=int(hport),name="xport-{}".format(cport)))
+
+ # If there are ports to be exposed via MSB, set up the annotation for the service
+ msb_list = kwargs.get("msb_list")
+ annotations = msb.create_msb_annotation(msb_list) if msb_list else ''
+
+ # Create a ClusterIP service for access via the k8s network
+ service = _create_service_object(_create_service_name(component_name), component_name, service_ports, annotations, labels, "ClusterIP")
+ core.create_namespaced_service(namespace, service)
+ cip_service_created = True
+ deployment_description["services"].append(_create_service_name(component_name))
+
+ # If there are ports to be exposed on the k8s nodes, create a "NodePort" service
+ if len(exposed_ports) > 0:
+ exposed_service = \
+ _create_service_object(_create_exposed_service_name(component_name), component_name, exposed_ports, '', labels, "NodePort")
+ core.create_namespaced_service(namespace, exposed_service)
+ deployment_description["services"].append(_create_exposed_service_name(component_name))
+
+ except Exception as e:
+ # If the ClusterIP service was created, delete the service:
+ if cip_service_created:
+ core.delete_namespaced_service(_create_service_name(component_name), namespace)
+ # If the deployment was created but not the service, delete the deployment
+ if deployment_ok:
+ client.ExtensionsV1beta1Api().delete_namespaced_deployment(_create_deployment_name(component_name), namespace, client.V1DeleteOptions())
+ raise e
+
+ return dep, deployment_description
+
+def undeploy(deployment_description):
+ # TODO: do real configuration
+ _configure_api()
+
+ namespace = deployment_description["namespace"]
+
+ # remove any services associated with the component
+ for service in deployment_description["services"]:
+ client.CoreV1Api().delete_namespaced_service(service, namespace)
+
+ # Have k8s delete the underlying pods and replicaset when deleting the deployment.
+ options = client.V1DeleteOptions(propagation_policy="Foreground")
+ client.ExtensionsV1beta1Api().delete_namespaced_deployment(deployment_description["deployment"], namespace, options)
+
+def is_available(namespace, component_name):
+ _configure_api()
+ dep_status = client.AppsV1beta1Api().read_namespaced_deployment_status(_create_deployment_name(component_name), namespace)
+ # Check if the number of available replicas is equal to the number requested
+ return dep_status.status.available_replicas >= dep_status.spec.replicas
+
+def scale(deployment_description, replicas):
+ # TODO: do real configuration
+ _configure_api()
+
+ namespace = deployment_description["namespace"]
+ name = deployment_description["deployment"]
+
+ # Get deployment spec
+ spec = client.ExtensionsV1beta1Api().read_namespaced_deployment(name, namespace)
+
+ # Update the replica count in the spec
+ spec.spec.replicas = replicas
+ client.ExtensionsV1beta1Api().patch_namespaced_deployment(name, namespace, spec)
+
diff --git a/k8s/k8splugin/__init__.py b/k8s/k8splugin/__init__.py
new file mode 100644
index 0000000..28306ee
--- /dev/null
+++ b/k8s/k8splugin/__init__.py
@@ -0,0 +1,30 @@
+# ============LICENSE_START=======================================================
+# org.onap.dcae
+# ================================================================================
+# Copyright (c) 2017-2018 AT&T Intellectual Property. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END=========================================================
+#
+# ECOMP is a trademark and service mark of AT&T Intellectual Property.
+
+# REVIEW: Tried to source the version from here but you run into import issues
+# because "tasks" module is loaded. This method seems to be the PEP 396
+# recommended way and is listed #3 here https://packaging.python.org/single_source_version/
+# __version__ = '0.1.0'
+
+from .tasks import create_for_components, create_for_components_with_streams, \
+ create_and_start_container_for_components_with_streams, \
+ create_for_platforms, create_and_start_container, \
+ create_and_start_container_for_components, create_and_start_container_for_platforms, \
+ stop_and_remove_container, cleanup_discovery, policy_update, scale \ No newline at end of file
diff --git a/k8s/k8splugin/decorators.py b/k8s/k8splugin/decorators.py
new file mode 100644
index 0000000..186b212
--- /dev/null
+++ b/k8s/k8splugin/decorators.py
@@ -0,0 +1,102 @@
+# ============LICENSE_START=======================================================
+# org.onap.dcae
+# ================================================================================
+# Copyright (c) 2017 AT&T Intellectual Property. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END=========================================================
+#
+# ECOMP is a trademark and service mark of AT&T Intellectual Property.
+
+import copy
+from cloudify import ctx
+from cloudify.exceptions import NonRecoverableError, RecoverableError
+from dockering import utils as doc
+from k8splugin import discovery as dis
+from k8splugin.exceptions import DockerPluginDeploymentError, \
+ DockerPluginDependencyNotReadyError
+from k8splugin import utils
+
+
+def monkeypatch_loggers(task_func):
+ """Sets up the dependent loggers"""
+
+ def wrapper(**kwargs):
+ # Ouch! Monkeypatch loggers
+ doc.logger = ctx.logger
+ dis.logger = ctx.logger
+
+ return task_func(**kwargs)
+
+ return wrapper
+
+
+def wrap_error_handling_start(task_start_func):
+ """Wrap error handling for the start operations"""
+
+ def wrapper(**kwargs):
+ try:
+ return task_start_func(**kwargs)
+ except DockerPluginDependencyNotReadyError as e:
+ # You are here because things we need like a working docker host is not
+ # available yet so let Cloudify try again later.
+ raise RecoverableError(e)
+ except DockerPluginDeploymentError as e:
+ # Container failed to come up in the allotted time. This is deemed
+ # non-recoverable.
+ raise NonRecoverableError(e)
+ except Exception as e:
+ ctx.logger.error("Unexpected error while starting container: {0}"
+ .format(str(e)))
+ raise NonRecoverableError(e)
+
+ return wrapper
+
+
+def _wrapper_merge_inputs(task_func, properties, **kwargs):
+ """Merge Cloudify properties with input kwargs before calling task func"""
+ inputs = copy.deepcopy(properties)
+ # Recursively update
+ utils.update_dict(inputs, kwargs)
+
+ # Apparently kwargs contains "ctx" which is cloudify.context.CloudifyContext
+ # This has to be removed and not copied into runtime_properties else you get
+ # JSON serialization errors.
+ if "ctx" in inputs:
+ del inputs["ctx"]
+
+ return task_func(**inputs)
+
+def merge_inputs_for_create(task_create_func):
+ """Merge all inputs for start operation into one dict"""
+
+ # Needed to wrap the wrapper because I was seeing issues with
+ # "RuntimeError: No context set in current execution thread"
+ def wrapper(**kwargs):
+ # NOTE: ctx.node.properties is an ImmutableProperties instance which is
+ # why it is passed into a mutable dict so that it can be deep copied
+ return _wrapper_merge_inputs(task_create_func,
+ dict(ctx.node.properties), **kwargs)
+
+ return wrapper
+
+def merge_inputs_for_start(task_start_func):
+ """Merge all inputs for start operation into one dict"""
+
+ # Needed to wrap the wrapper because I was seeing issues with
+ # "RuntimeError: No context set in current execution thread"
+ def wrapper(**kwargs):
+ return _wrapper_merge_inputs(task_start_func,
+ ctx.instance.runtime_properties, **kwargs)
+
+ return wrapper
diff --git a/k8s/k8splugin/discovery.py b/k8s/k8splugin/discovery.py
new file mode 100644
index 0000000..f3b87b6
--- /dev/null
+++ b/k8s/k8splugin/discovery.py
@@ -0,0 +1,269 @@
+# ============LICENSE_START=======================================================
+# org.onap.dcae
+# ================================================================================
+# Copyright (c) 2017-2018 AT&T Intellectual Property. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END=========================================================
+#
+# ECOMP is a trademark and service mark of AT&T Intellectual Property.
+
+from functools import partial
+import json
+import logging
+import uuid
+import requests
+import consul
+import re
+
+
+logger = logging.getLogger("discovery")
+
+
+class DiscoveryError(RuntimeError):
+ pass
+
+class DiscoveryConnectionError(RuntimeError):
+ pass
+
+class DiscoveryServiceNotFoundError(RuntimeError):
+ pass
+
+class DiscoveryKVEntryNotFoundError(RuntimeError):
+ pass
+
+
+def _wrap_consul_call(consul_func, *args, **kwargs):
+ """Wrap Consul call to map errors"""
+ try:
+ return consul_func(*args, **kwargs)
+ except requests.exceptions.ConnectionError as e:
+ raise DiscoveryConnectionError(e)
+
+
+def generate_service_component_name(service_component_type):
+ """Generate service component id used to pass into the service component
+ instance and used as the key to the service component configuration.
+
+ Updated for use with Kubernetes. Sometimes the service component name gets
+ used in Kubernetes in contexts (such as naming a Kubernetes Service) that
+ requires the name to conform to the RFC1035 DNS "label" syntax:
+ -- starts with an alpha
+ -- contains only of alphanumerics and "-"
+ -- <= 63 characters long
+
+ Format:
+ s<service component id>-<service component type>,
+ truncated to 63 characters, "_" replaced with "-" in service_component_type,
+ other non-conforming characters removed from service_component_type
+ """
+ # Random generated
+ # Copied from cdap plugin
+ sct = re.sub('[^A-Za-z0-9-]','',(service_component_type.replace('_','-')))
+ return ("s{0}-{1}".format(str(uuid.uuid4()).replace("-",""),sct))[:63]
+
+
+def create_kv_conn(host):
+ """Create connection to key-value store
+
+ Returns a Consul client to the specified Consul host"""
+ try:
+ [hostname, port] = host.split(":")
+ return consul.Consul(host=hostname, port=int(port))
+ except ValueError as e:
+ return consul.Consul(host=host)
+
+def push_service_component_config(kv_conn, service_component_name, config):
+ config_string = config if isinstance(config, str) else json.dumps(config)
+ kv_put_func = partial(_wrap_consul_call, kv_conn.kv.put)
+
+ if kv_put_func(service_component_name, config_string):
+ logger.info("Added config for {0}".format(service_component_name))
+ else:
+ raise DiscoveryError("Failed to push configuration")
+
+def remove_service_component_config(kv_conn, service_component_name):
+ kv_delete_func = partial(_wrap_consul_call, kv_conn.kv.delete)
+ kv_delete_func(service_component_name)
+
+
+def get_kv_value(kv_conn, key):
+ """Get a key-value entry's value from Consul
+
+ Raises DiscoveryKVEntryNotFoundError if entry not found
+ """
+ kv_get_func = partial(_wrap_consul_call, kv_conn.kv.get)
+ (index, val) = kv_get_func(key)
+
+ if val:
+ return json.loads(val['Value']) # will raise ValueError if not JSON, let it propagate
+ else:
+ raise DiscoveryKVEntryNotFoundError("{0} kv entry not found".format(key))
+
+
+def _create_rel_key(service_component_name):
+ return "{0}:rel".format(service_component_name)
+
+def store_relationship(kv_conn, source_name, target_name):
+ # TODO: Rel entry may already exist in a one-to-many situation. Need to
+ # support that.
+ rel_key = _create_rel_key(source_name)
+ rel_value = [target_name] if target_name else []
+
+ kv_put_func = partial(_wrap_consul_call, kv_conn.kv.put)
+ kv_put_func(rel_key, json.dumps(rel_value))
+ logger.info("Added relationship for {0}".format(rel_key))
+
+def delete_relationship(kv_conn, service_component_name):
+ rel_key = _create_rel_key(service_component_name)
+ kv_get_func = partial(_wrap_consul_call, kv_conn.kv.get)
+ index, rels = kv_get_func(rel_key)
+
+ if rels:
+ rels = json.loads(rels["Value"].decode("utf-8"))
+ kv_delete_func = partial(_wrap_consul_call, kv_conn.kv.delete)
+ kv_delete_func(rel_key)
+ return rels
+ else:
+ return []
+
+def lookup_service(kv_conn, service_component_name):
+ catalog_get_func = partial(_wrap_consul_call, kv_conn.catalog.service)
+ index, results = catalog_get_func(service_component_name)
+
+ if results:
+ return results
+ else:
+ raise DiscoveryServiceNotFoundError("Failed to find: {0}".format(service_component_name))
+
+
+# TODO: Note these functions have been (for the most part) shamelessly lifted from
+# dcae-cli and should really be shared.
+
+def _is_healthy_pure(get_health_func, instance):
+ """Checks to see if a component instance is running healthy
+
+ Pure function edition
+
+ Args
+ ----
+ get_health_func: func(string) -> complex object
+ Look at unittests in test_discovery to see examples
+ instance: (string) fully qualified name of component instance
+
+ Returns
+ -------
+ True if instance has been found and is healthy else False
+ """
+ index, resp = get_health_func(instance)
+
+ if resp:
+ def is_passing(instance):
+ return all([check["Status"] == "passing" for check in instance["Checks"]])
+
+ return any([is_passing(instance) for instance in resp])
+ else:
+ return False
+
+def is_healthy(consul_host, instance):
+ """Checks to see if a component instance is running healthy
+
+ Impure function edition
+
+ Args
+ ----
+ consul_host: (string) host string of Consul
+ instance: (string) fully qualified name of component instance
+
+ Returns
+ -------
+ True if instance has been found and is healthy else False
+ """
+ cons = create_kv_conn(consul_host)
+
+ get_health_func = partial(_wrap_consul_call, cons.health.service)
+ return _is_healthy_pure(get_health_func, instance)
+
+
+def add_to_entry(conn, key, add_name, add_value):
+ """
+ Find 'key' in consul.
+ Treat its value as a JSON string representing a dict.
+ Extend the dict by adding an entry with key 'add_name' and value 'add_value'.
+ Turn the resulting extended dict into a JSON string.
+ Store the string back into Consul under 'key'.
+ Watch out for conflicting concurrent updates.
+
+ Example:
+ Key 'xyz:dmaap' has the value '{"feed00": {"feed_url" : "http://example.com/feeds/999"}}'
+ add_to_entry('xyz:dmaap', 'topic00', {'topic_url' : 'http://example.com/topics/1229'})
+ should result in the value for key 'xyz:dmaap' in consul being updated to
+ '{"feed00": {"feed_url" : "http://example.com/feeds/999"}, "topic00" : {"topic_url" : "http://example.com/topics/1229"}}'
+ """
+ while True: # do until update succeeds
+ (index, val) = conn.kv.get(key) # index gives version of key retrieved
+
+ if val is None: # no key yet
+ vstring = '{}'
+ mod_index = 0 # Use 0 as the cas index for initial insertion of the key
+ else:
+ vstring = val['Value']
+ mod_index = val['ModifyIndex']
+
+ # Build the updated dict
+ # Exceptions just propagate
+ v = json.loads(vstring)
+ v[add_name] = add_value
+ new_vstring = json.dumps(v)
+
+ updated = conn.kv.put(key, new_vstring, cas=mod_index) # if the key has changed since retrieval, this will return false
+ if updated:
+ return v
+
+
+def _find_matching_services(services, name_search, tags):
+ """Find matching services given search criteria"""
+ def is_match(service):
+ srv_name, srv_tags = service
+ return name_search in srv_name and \
+ all(map(lambda tag: tag in srv_tags, tags))
+
+ return [ srv[0] for srv in services.items() if is_match(srv) ]
+
+def search_services(conn, name_search, tags):
+ """Search for services that match criteria
+
+ Args:
+ -----
+ name_search: (string) Name to search for as a substring
+ tags: (list) List of strings that are tags. A service must match **all** the
+ tags in the list.
+
+ Retruns:
+ --------
+ List of names of services that matched
+ """
+ # srvs is dict where key is service name and value is list of tags
+ catalog_get_services_func = partial(_wrap_consul_call, conn.catalog.services)
+ index, srvs = catalog_get_services_func()
+
+ if srvs:
+ matches = _find_matching_services(srvs, name_search, tags)
+
+ if matches:
+ return matches
+
+ raise DiscoveryServiceNotFoundError(
+ "No matches found: {0}, {1}".format(name_search, tags))
+ else:
+ raise DiscoveryServiceNotFoundError("No services found")
diff --git a/k8s/k8splugin/exceptions.py b/k8s/k8splugin/exceptions.py
new file mode 100644
index 0000000..0d8a341
--- /dev/null
+++ b/k8s/k8splugin/exceptions.py
@@ -0,0 +1,29 @@
+# ============LICENSE_START=======================================================
+# org.onap.dcae
+# ================================================================================
+# Copyright (c) 2017 AT&T Intellectual Property. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END=========================================================
+#
+# ECOMP is a trademark and service mark of AT&T Intellectual Property.
+
+class DockerPluginDeploymentError(RuntimeError):
+ pass
+
+
+class DockerPluginDependencyNotReadyError(RuntimeError):
+ """Error to use when something that this plugin depends upon e.g. docker api,
+ consul is not ready"""
+ pass
+
diff --git a/k8s/k8splugin/tasks.py b/k8s/k8splugin/tasks.py
new file mode 100644
index 0000000..1718274
--- /dev/null
+++ b/k8s/k8splugin/tasks.py
@@ -0,0 +1,682 @@
+# ============LICENSE_START=======================================================
+# org.onap.dcae
+# ================================================================================
+# Copyright (c) 2017-2018 AT&T Intellectual Property. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END=========================================================
+#
+# ECOMP is a trademark and service mark of AT&T Intellectual Property.
+
+# Lifecycle interface calls for containerized components
+
+# Needed by Cloudify Manager to load google.auth for the Kubernetes python client
+import cloudify_importer
+
+import time, copy
+from cloudify import ctx
+from cloudify.decorators import operation
+from cloudify.exceptions import NonRecoverableError, RecoverableError
+import dockering as doc
+from onap_dcae_dcaepolicy_lib import Policies
+from k8splugin import discovery as dis
+from k8splugin.decorators import monkeypatch_loggers, wrap_error_handling_start, \
+ merge_inputs_for_start, merge_inputs_for_create
+from k8splugin.exceptions import DockerPluginDeploymentError
+from k8splugin import utils
+from configure import configure
+from k8sclient import k8sclient
+
+# Get configuration
+plugin_conf = configure.configure()
+CONSUL_HOST = plugin_conf.get("consul_host")
+CONSUL_INTERNAL_NAME = plugin_conf.get("consul_dns_name")
+DCAE_NAMESPACE = plugin_conf.get("namespace")
+
+# Used to construct delivery urls for data router subscribers. Data router in FTL
+# requires https but this author believes that ONAP is to be defaulted to http.
+DEFAULT_SCHEME = "http"
+
+# Property keys
+SERVICE_COMPONENT_NAME = "service_component_name"
+CONTAINER_ID = "container_id"
+APPLICATION_CONFIG = "application_config"
+
+
+
+# Utility methods
+
+# Lifecycle interface calls for dcae.nodes.DockerContainer
+
+def _setup_for_discovery(**kwargs):
+ """Setup for config discovery"""
+ try:
+ name = kwargs['name']
+ application_config = kwargs[APPLICATION_CONFIG]
+
+ # NOTE: application_config is no longer a json string and is inputed as a
+ # YAML map which translates to a dict. We don't have to do any
+ # preprocessing anymore.
+ conn = dis.create_kv_conn(CONSUL_HOST)
+ dis.push_service_component_config(conn, name, application_config)
+ return kwargs
+ except dis.DiscoveryConnectionError as e:
+ raise RecoverableError(e)
+ except Exception as e:
+ ctx.logger.error("Unexpected error while pushing configuration: {0}"
+ .format(str(e)))
+ raise NonRecoverableError(e)
+
+def _generate_component_name(**kwargs):
+ """Generate component name"""
+ service_component_type = kwargs['service_component_type']
+ name_override = kwargs['service_component_name_override']
+
+ kwargs['name'] = name_override if name_override \
+ else dis.generate_service_component_name(service_component_type)
+ return kwargs
+
+def _done_for_create(**kwargs):
+ """Wrap up create operation"""
+ name = kwargs['name']
+ kwargs[SERVICE_COMPONENT_NAME] = name
+ # All updates to the runtime_properties happens here. I don't see a reason
+ # why we shouldn't do this because the context is not being mutated by
+ # something else and will keep the other functions pure (pure in the sense
+ # not dealing with CloudifyContext).
+ ctx.instance.runtime_properties.update(kwargs)
+ ctx.logger.info("Done setting up: {0}".format(name))
+ return kwargs
+
+
+@merge_inputs_for_create
+@monkeypatch_loggers
+@Policies.gather_policies_to_node()
+@operation
+def create_for_components(**create_inputs):
+ """Create step for Docker containers that are components
+
+ This interface is responsible for:
+
+ 1. Generating service component name
+ 2. Populating config information into Consul
+ """
+ _done_for_create(
+ **_setup_for_discovery(
+ **_generate_component_name(
+ **create_inputs)))
+
+
+def _parse_streams(**kwargs):
+ """Parse streams and setup for DMaaP plugin"""
+ # The DMaaP plugin requires this plugin to set the runtime properties
+ # keyed by the node name.
+ def setup_publishes(s):
+ kwargs[s["name"]] = s
+
+ map(setup_publishes, kwargs["streams_publishes"])
+
+ def setup_subscribes(s):
+ if s["type"] == "data_router":
+ # If username and password has been provided then generate it. The
+ # DMaaP plugin doesn't generate for subscribers. The generation code
+ # and length of username password has been lifted from the DMaaP
+ # plugin.
+
+ # Don't want to mutate the source
+ s = copy.deepcopy(s)
+ if not s.get("username", None):
+ s["username"] = utils.random_string(8)
+ if not s.get("password", None):
+ s["password"] = utils.random_string(10)
+
+ kwargs[s["name"]] = s
+
+ # NOTE: That the delivery url is constructed and setup in the start operation
+ map(setup_subscribes, kwargs["streams_subscribes"])
+
+ return kwargs
+
+def _setup_for_discovery_streams(**kwargs):
+ """Setup for discovery of streams
+
+ Specifically, there's a race condition this call addresses for data router
+ subscriber case. The component needs its feed subscriber information but the
+ DMaaP plugin doesn't provide this until after the docker plugin start
+ operation.
+ """
+ dr_subs = [kwargs[s["name"]] for s in kwargs["streams_subscribes"] \
+ if s["type"] == "data_router"]
+
+ if dr_subs:
+ dmaap_kv_key = "{0}:dmaap".format(kwargs["name"])
+ conn = dis.create_kv_conn(CONSUL_HOST)
+
+ def add_feed(dr_sub):
+ # delivery url and subscriber id will be fill by the dmaap plugin later
+ v = { "location": dr_sub["location"], "delivery_url": None,
+ "username": dr_sub["username"], "password": dr_sub["password"],
+ "subscriber_id": None }
+ return dis.add_to_entry(conn, dmaap_kv_key, dr_sub["name"], v) != None
+
+ try:
+ if all(map(add_feed, dr_subs)):
+ return kwargs
+ except Exception as e:
+ raise NonRecoverableError(e)
+
+ # You should never get here
+ raise NonRecoverableError("Failure updating feed streams in Consul")
+ else:
+ return kwargs
+
+
+@merge_inputs_for_create
+@monkeypatch_loggers
+@Policies.gather_policies_to_node()
+@operation
+def create_for_components_with_streams(**create_inputs):
+ """Create step for Docker containers that are components that use DMaaP
+
+ This interface is responsible for:
+
+ 1. Generating service component name
+ 2. Setup runtime properties for DMaaP plugin
+ 3. Populating application config into Consul
+ 4. Populating DMaaP config for data router subscribers in Consul
+ """
+ _done_for_create(
+ **_setup_for_discovery(
+ **_setup_for_discovery_streams(
+ **_parse_streams(
+ **_generate_component_name(
+ **create_inputs)))))
+
+
+@merge_inputs_for_create
+@monkeypatch_loggers
+@operation
+def create_for_platforms(**create_inputs):
+ """Create step for Docker containers that are platform components
+
+ This interface is responible for:
+
+ 1. Populating config information into Consul
+ """
+ _done_for_create(
+ **_setup_for_discovery(
+ **create_inputs))
+
+
+def _lookup_service(service_component_name, consul_host=CONSUL_HOST,
+ with_port=False):
+ conn = dis.create_kv_conn(consul_host)
+ results = dis.lookup_service(conn, service_component_name)
+
+ if with_port:
+ # Just grab first
+ result = results[0]
+ return "{address}:{port}".format(address=result["ServiceAddress"],
+ port=result["ServicePort"])
+ else:
+ return results[0]["ServiceAddress"]
+
+
+def _verify_container(service_component_name, max_wait):
+ """Verify that the container is healthy
+
+ Args:
+ -----
+ max_wait (integer): limit to how may attempts to make which translates to
+ seconds because each sleep is one second. 0 means infinite.
+
+ Return:
+ -------
+ True if component is healthy else a DockerPluginDeploymentError exception
+ will be raised.
+ """
+ num_attempts = 1
+
+ while True:
+ if k8sclient.is_available(DCAE_NAMESPACE, service_component_name):
+ return True
+ else:
+ num_attempts += 1
+
+ if max_wait > 0 and max_wait < num_attempts:
+ raise DockerPluginDeploymentError("Container never became healthy")
+
+ time.sleep(1)
+
+ return True
+
+def _create_and_start_container(container_name, image, **kwargs):
+ '''
+ This will create a k8s Deployment and, if needed, a k8s Service or two.
+ (We are being opinionated in our use of k8s... this code decides what k8s abstractions and features to use.
+ We're not exposing k8s to the component developer and the blueprint author.
+ This is a conscious choice. We want to use k8s in a controlled, consistent way, and we want to hide
+ the details from the component developer and the blueprint author.)
+
+ kwargs may have:
+ - volumes: array of volume objects, where a volume object is:
+ {"host":{"path": "/path/on/host"}, "container":{"bind":"/path/on/container","mode":"rw_or_ro"}
+ - ports: array of strings in the form "container_port:host_port"
+ - env: map of name-value pairs ( {name0: value0, name1: value1...} )
+ - always_pull: boolean. If true, sets image pull policy to "Always"
+ so that a fresh copy of the image is always pull. Otherwise, sets
+ image pull policy to "IfNotPresent"
+ - msb_list: array of msb objects, where an msb object is as described in msb/msb.py.
+ - log_info: an object with info for setting up ELK logging, with the form:
+ {"log_directory": "/path/to/container/log/directory", "alternate_fb_path" : "/alternate/sidecar/log/path"}"
+ - replicas: number of replicas to be launched initially
+ '''
+ env = { "CONSUL_HOST": CONSUL_INTERNAL_NAME,
+ "CONFIG_BINDING_SERVICE": "config-binding-service" }
+ env.update(kwargs.get("env", {}))
+ ctx.logger.info("Deploying {}, image: {}, env: {}, kwargs: {}".format(container_name, image, env, kwargs))
+ ctx.logger.info("Passing k8sconfig: {}".format(plugin_conf))
+ replicas = kwargs.get("replicas", 1)
+ _,dep = k8sclient.deploy(DCAE_NAMESPACE,
+ container_name,
+ image,
+ replicas = replicas,
+ always_pull=kwargs.get("always_pull_image", False),
+ k8sconfig=plugin_conf,
+ volumes=kwargs.get("volumes",[]),
+ ports=kwargs.get("ports",[]),
+ msb_list=kwargs.get("msb_list"),
+ env = env,
+ labels = kwargs.get("labels", {}),
+ log_info=kwargs.get("log_info"))
+
+ # Capture the result of deployment for future use
+ ctx.instance.runtime_properties["k8s_deployment"] = dep
+ ctx.instance.runtime_properties["replicas"] = replicas
+ ctx.logger.info ("Deployment complete: {0}".format(dep))
+
+def _parse_cloudify_context(**kwargs):
+ """Parse Cloudify context
+
+ Extract what is needed. This is impure function because it requires ctx.
+ """
+ kwargs["deployment_id"] = ctx.deployment.id
+
+ # Set some labels for the Kubernetes pods
+ kwargs["labels"] = {
+ "cfydeployment" : ctx.deployment.id,
+ "cfynode": ctx.node.name,
+ "cfynodeinstance": ctx.instance.id
+ }
+
+ # Pick up the centralized logging info
+ if "log_info" in ctx.node.properties and "log_directory" in ctx.node.properties["log_info"]:
+ kwargs["log_info"] = ctx.node.properties["log_info"]
+
+ # Pick up replica count and always_pull_image flag
+ if "replicas" in ctx.node.properties:
+ kwargs["replicas"] = ctx.node.properties["replicas"]
+ if "always_pull_image" in ctx.node.properties:
+ kwargs["always_pull_image"] = ctx.node.properties["always_pull_image"]
+
+ return kwargs
+
+def _enhance_docker_params(**kwargs):
+ """Setup Docker envs"""
+ docker_config = kwargs.get("docker_config", {})
+
+ envs = kwargs.get("envs", {})
+ # NOTE: Healthchecks are optional until prepared to handle use cases that
+ # don't necessarily use http
+ envs_healthcheck = doc.create_envs_healthcheck(docker_config) \
+ if "healthcheck" in docker_config else {}
+ envs.update(envs_healthcheck)
+
+ # Set tags on this component for its Consul registration as a service
+ tags = [kwargs.get("deployment_id", None), kwargs["service_id"]]
+ tags = [ str(tag) for tag in tags if tag is not None ]
+ # Registrator will use this to register this component with tags. Must be
+ # comma delimited.
+ envs["SERVICE_TAGS"] = ",".join(tags)
+
+ kwargs["envs"] = envs
+
+ def combine_params(key, docker_config, kwargs):
+ v = docker_config.get(key, []) + kwargs.get(key, [])
+ if v:
+ kwargs[key] = v
+ return kwargs
+
+ # Add the lists of ports and volumes unintelligently - meaning just add the
+ # lists together with no deduping.
+ kwargs = combine_params("ports", docker_config, kwargs)
+ kwargs = combine_params("volumes", docker_config, kwargs)
+
+
+ return kwargs
+
+def _create_and_start_component(**kwargs):
+ """Create and start component (container)"""
+ image = kwargs["image"]
+ service_component_name = kwargs[SERVICE_COMPONENT_NAME]
+ # Need to be picky and manually select out pieces because just using kwargs
+ # which contains everything confused the execution of
+ # _create_and_start_container because duplicate variables exist
+ sub_kwargs = {
+ "volumes": kwargs.get("volumes", []),
+ "ports": kwargs.get("ports", None),
+ "envs": kwargs.get("envs", {}),
+ "log_info": kwargs.get("log_info", {}),
+ "labels": kwargs.get("labels", {})}
+ _create_and_start_container(service_component_name, image, **sub_kwargs)
+
+ # TODO: Use regular logging here
+ ctx.logger.info("Container started: {0}".format(service_component_name))
+
+ return kwargs
+
+def _verify_component(**kwargs):
+ """Verify component (container) is healthy"""
+ service_component_name = kwargs[SERVICE_COMPONENT_NAME]
+ # TODO: "Consul doesn't make its first health check immediately upon registration.
+ # Instead it waits for the health check interval to pass."
+ # Possible enhancement is to read the interval (and possibly the timeout) from
+ # docker_config and multiply that by a number to come up with a more suitable
+ # max_wait.
+
+ max_wait = kwargs.get("max_wait", 300)
+
+ # Verify that the container is healthy
+
+ if _verify_container(service_component_name, max_wait):
+ service_component_name = kwargs[SERVICE_COMPONENT_NAME]
+
+ # TODO: Use regular logging here
+ ctx.logger.info("Container is healthy: {0}".format(service_component_name))
+
+ return kwargs
+
+def _done_for_start(**kwargs):
+ ctx.instance.runtime_properties.update(kwargs)
+ ctx.logger.info("Done starting: {0}".format(kwargs["name"]))
+ return kwargs
+
+def _setup_msb_registration(service_name, msb_reg):
+ return {
+ "serviceName" : service_name,
+ "port" : msb_reg.get("port", "80"),
+ "version" : msb_reg.get("version", "v1"),
+ "url" : msb_reg.get("url_path", "/v1"),
+ "protocol" : "REST",
+ "enable_ssl" : msb_reg.get("uses_ssl", False),
+ "visualRange" : "1"
+}
+
+@wrap_error_handling_start
+@merge_inputs_for_start
+@monkeypatch_loggers
+@operation
+def create_and_start_container_for_components(**start_inputs):
+ """Create Docker container and start for components
+
+ This operation method is to be used with the DockerContainerForComponents
+ node type. After launching the container, the plugin will verify with Consul
+ that the app is up and healthy before terminating.
+ """
+ _done_for_start(
+ **_verify_component(
+ **_create_and_start_component(
+ **_enhance_docker_params(
+ **_parse_cloudify_context(**start_inputs)))))
+
+
+def _update_delivery_url(**kwargs):
+ """Update the delivery url for data router subscribers"""
+ dr_subs = [kwargs[s["name"]] for s in kwargs["streams_subscribes"] \
+ if s["type"] == "data_router"]
+
+ if dr_subs:
+ service_component_name = kwargs[SERVICE_COMPONENT_NAME]
+ # TODO: Should NOT be setting up the delivery url with ip addresses
+ # because in the https case, this will not work because data router does
+ # a certificate validation using the fqdn.
+ subscriber_host = _lookup_service(service_component_name, with_port=True)
+
+ for dr_sub in dr_subs:
+ scheme = dr_sub["scheme"] if "scheme" in dr_sub else DEFAULT_SCHEME
+ if "route" not in dr_sub:
+ raise NonRecoverableError("'route' key missing from data router subscriber")
+ path = dr_sub["route"]
+ dr_sub["delivery_url"] = "{scheme}://{host}/{path}".format(
+ scheme=scheme, host=subscriber_host, path=path)
+ kwargs[dr_sub["name"]] = dr_sub
+
+ return kwargs
+
+@wrap_error_handling_start
+@merge_inputs_for_start
+@monkeypatch_loggers
+@operation
+def create_and_start_container_for_components_with_streams(**start_inputs):
+ """Create Docker container and start for components that have streams
+
+ This operation method is to be used with the DockerContainerForComponents
+ node type. After launching the container, the plugin will verify with Consul
+ that the app is up and healthy before terminating.
+ """
+ _done_for_start(
+ **_update_delivery_url(
+ **_verify_component(
+ **_create_and_start_component(
+ **_enhance_docker_params(
+ **_parse_cloudify_context(**start_inputs))))))
+
+
+@wrap_error_handling_start
+@monkeypatch_loggers
+@operation
+def create_and_start_container_for_platforms(**kwargs):
+ """Create Docker container and start for platform services
+
+ This operation method is to be used with the ContainerizedPlatformComponent
+ node type.
+ """
+ # Capture node properties
+ image = ctx.node.properties["image"]
+ docker_config = ctx.node.properties.get("docker_config", {})
+ if "dns_name" in ctx.node.properties:
+ service_component_name = ctx.node.properties["dns_name"]
+ else:
+ service_component_name = ctx.node.properties["name"]
+
+
+ envs = kwargs.get("envs", {})
+ # NOTE: Healthchecks are optional until prepared to handle use cases that
+ # don't necessarily use http
+ envs_healthcheck = doc.create_envs_healthcheck(docker_config) \
+ if "healthcheck" in docker_config else {}
+ envs.update(envs_healthcheck)
+ kwargs["envs"] = envs
+
+ # Set some labels for the Kubernetes pods
+ kwargs["labels"] = {
+ "cfydeployment" : ctx.deployment.id,
+ "cfynode": ctx.node.name,
+ "cfynodeinstance": ctx.instance.id
+ }
+
+ host_port = ctx.node.properties["host_port"]
+ container_port = ctx.node.properties["container_port"]
+
+ # Cloudify properties are all required and Cloudify complains that None
+ # is not a valid type for integer. Defaulting to 0 to indicate to not
+ # use this and not to set a specific port mapping in cases like service
+ # change handler.
+ if container_port != 0:
+ # Doing this because other nodes might want to use this property
+ port_mapping = "{cp}:{hp}".format(cp=container_port, hp=host_port)
+ ports = kwargs.get("ports", []) + [ port_mapping ]
+ kwargs["ports"] = ports
+ if "ports" not in kwargs:
+ ctx.logger.warn("No port mappings defined. Will randomly assign port.")
+
+ # All of the new node properties could be handled more DRYly!
+ # If a registration to MSB is required, then set up the registration info
+ if "msb_registration" in ctx.node.properties and "port" in ctx.node.properties["msb_registration"]:
+ kwargs["msb_list"] = [_setup_msb_registration(service_component_name, ctx.node.properties["msb_registration"])]
+
+ # If centralized logging via ELK is desired, then set up the logging info
+ if "log_info" in ctx.node.properties and "log_directory" in ctx.node.properties["log_info"]:
+ kwargs["log_info"] = ctx.node.properties["log_info"]
+
+ # Pick up replica count and always_pull_image flag
+ if "replicas" in ctx.node.properties:
+ kwargs["replicas"] = ctx.node.properties["replicas"]
+ if "always_pull_image" in ctx.node.properties:
+ kwargs["always_pull_image"] = ctx.node.properties["always_pull_image"]
+ _create_and_start_container(service_component_name, image, **kwargs)
+
+ ctx.logger.info("Container started: {0}".format(service_component_name))
+
+ # Verify that the container is healthy
+
+ max_wait = kwargs.get("max_wait", 300)
+
+ if _verify_container(service_component_name, max_wait):
+ ctx.logger.info("Container is healthy: {0}".format(service_component_name))
+
+
+@wrap_error_handling_start
+@monkeypatch_loggers
+@operation
+def create_and_start_container(**kwargs):
+ """Create Docker container and start"""
+ service_component_name = ctx.node.properties["name"]
+ ctx.instance.runtime_properties[SERVICE_COMPONENT_NAME] = service_component_name
+
+ image = ctx.node.properties["image"]
+
+ _create_and_start_container(service_component_name, image,**kwargs)
+
+ ctx.logger.info("Component deployed: {0}".format(service_component_name))
+
+
+@monkeypatch_loggers
+@operation
+def stop_and_remove_container(**kwargs):
+ """Stop and remove Docker container"""
+ try:
+ deployment_description = ctx.instance.runtime_properties["k8s_deployment"]
+ k8sclient.undeploy(deployment_description)
+
+ except Exception as e:
+ ctx.logger.error("Unexpected error while stopping container: {0}"
+ .format(str(e)))
+
+@monkeypatch_loggers
+@operation
+def scale(replicas, **kwargs):
+ """Change number of replicas in the deployment"""
+ if replicas > 0:
+ current_replicas = ctx.instance.runtime_properties["replicas"]
+ ctx.logger.info("Scaling from {0} to {1}".format(current_replicas, replicas))
+ try:
+ deployment_description = ctx.instance.runtime_properties["k8s_deployment"]
+ k8sclient.scale(deployment_description, replicas)
+ ctx.instance.runtime_properties["replicas"] = replicas
+ except Exception as e:
+ ctx.logger.error ("Unexpected error while scaling {0}".format(str(e)))
+ else:
+ ctx.logger.info("Ignoring request to scale to zero replicas")
+
+@monkeypatch_loggers
+@Policies.cleanup_policies_on_node
+@operation
+def cleanup_discovery(**kwargs):
+ """Delete configuration from Consul"""
+ service_component_name = ctx.instance.runtime_properties[SERVICE_COMPONENT_NAME]
+
+ try:
+ conn = dis.create_kv_conn(CONSUL_HOST)
+ dis.remove_service_component_config(conn, service_component_name)
+ except dis.DiscoveryConnectionError as e:
+ raise RecoverableError(e)
+
+
+def _notify_container(**kwargs):
+ """Notify container using the policy section in the docker_config"""
+ dc = kwargs["docker_config"]
+
+ if "policy" in dc:
+ if dc["policy"]["trigger_type"] == "docker":
+ pass
+ """
+ Need replacement for this in kubernetes.
+ Need to find all the pods that have been deployed
+ and execute the script in them.
+ Kubernetes does not appear to have a way to ask for a script
+ to be executed in all of the currently running pods for a
+ Kubernetes Deployment or ReplicaSet. We will have to find
+ each of them and run the script. The problem is that set of
+ pods could be changing. We can query to get all the pods, but
+ there's no guarantee the list won't change while we're trying to
+ execute the script.
+
+ In ONAP R2, all of the policy-driven components rely on polling.
+ """
+ """
+ # REVIEW: Need to finalize on the docker config policy data structure
+ script_path = dc["policy"]["script_path"]
+ updated_policies = kwargs["updated_policies"]
+ removed_policies = kwargs["removed_policies"]
+ policies = kwargs["policies"]
+ cmd = doc.build_policy_update_cmd(script_path, use_sh=False,
+ msg_type="policies",
+ updated_policies=updated_policies,
+ removed_policies=removed_policies,
+ policies=policies
+ )
+
+ docker_host = kwargs[SELECTED_CONTAINER_DESTINATION]
+ docker_host_ip = _lookup_service(docker_host)
+ logins = _get_docker_logins()
+ client = doc.create_client(docker_host_ip, DOCKER_PORT, logins=logins)
+
+ container_id = kwargs["container_id"]
+
+ doc.notify_for_policy_update(client, container_id, cmd)
+ """
+ # else the default is no trigger
+
+ return kwargs
+
+
+@monkeypatch_loggers
+@Policies.update_policies_on_node()
+@operation
+def policy_update(updated_policies, removed_policies=None, policies=None, **kwargs):
+ """Policy update task
+
+ This method is responsible for updating the application configuration and
+ notifying the applications that the change has occurred. This is to be used
+ for the dcae.interfaces.policy.policy_update operation.
+
+ :updated_policies: contains the list of changed policy-configs when configs_only=True
+ (default) Use configs_only=False to bring the full policy objects in :updated_policies:.
+ """
+ update_inputs = copy.deepcopy(ctx.instance.runtime_properties)
+ update_inputs["updated_policies"] = updated_policies
+ update_inputs["removed_policies"] = removed_policies
+ update_inputs["policies"] = policies
+
+ _notify_container(**update_inputs)
diff --git a/k8s/k8splugin/utils.py b/k8s/k8splugin/utils.py
new file mode 100644
index 0000000..c45af68
--- /dev/null
+++ b/k8s/k8splugin/utils.py
@@ -0,0 +1,43 @@
+# ============LICENSE_START=======================================================
+# org.onap.dcae
+# ================================================================================
+# Copyright (c) 2017 AT&T Intellectual Property. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END=========================================================
+#
+# ECOMP is a trademark and service mark of AT&T Intellectual Property.
+
+import string
+import random
+import collections
+
+
+def random_string(n):
+ """Random generate an ascii string of "n" length"""
+ corpus = string.ascii_lowercase + string.ascii_uppercase + string.digits
+ return ''.join(random.choice(corpus) for x in range(n))
+
+
+def update_dict(d, u):
+ """Recursively updates dict
+
+ Update dict d with dict u
+ """
+ for k, v in u.iteritems():
+ if isinstance(v, collections.Mapping):
+ r = update_dict(d.get(k, {}), v)
+ d[k] = r
+ else:
+ d[k] = u[k]
+ return d
diff --git a/k8s/msb/__init__.py b/k8s/msb/__init__.py
new file mode 100644
index 0000000..58d8f1c
--- /dev/null
+++ b/k8s/msb/__init__.py
@@ -0,0 +1,19 @@
+# ============LICENSE_START=======================================================
+# org.onap.dcae
+# ================================================================================
+# Copyright (c) 2018 AT&T Intellectual Property. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END=========================================================
+#
+# ECOMP is a trademark and service mark of AT&T Intellectual Property.
diff --git a/k8s/msb/msb.py b/k8s/msb/msb.py
new file mode 100644
index 0000000..5883f29
--- /dev/null
+++ b/k8s/msb/msb.py
@@ -0,0 +1,64 @@
+# ============LICENSE_START=======================================================
+# org.onap.dcae
+# ================================================================================
+# Copyright (c) 2018 AT&T Intellectual Property. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END=========================================================
+#
+# ECOMP is a trademark and service mark of AT&T Intellectual Property.
+#
+
+import uuid
+import json
+
+MSB_ANNOTATION_KEY = 'msb.onap.org/service-info'
+
+def _sanitize_service_info(service_info):
+ '''
+ Sanitize a dict containing the MSB annotation parameters for registering
+ a service with MSB. (Not bullet proof, but useful.)
+ MSB registration happens asynchronously from the installation flow: an MSB process
+ watches the k8s event stream for new Service creations and looks for the MSB annotation.
+ A bad annotation will fail silently. This sanitization process should make sure that something
+ gets put into the MSB's list of services, so that the problem can be seen.
+
+ service_info is a dict containing the MSB annotation parameters.
+ -- serviceName: the name under which the service is to be registered (default: random--pretty useless!)
+ -- port: the container port on which the service can be contacted (default: "80"--nearly as useless)
+ -- version: the API version (default: "v1")
+ -- url: the path to the application's API endpoint (default: "/")
+ -- protocol: see the MSB documentation--the default is usually OK (default: "REST")
+ -- enable_ssl: a flag indicating if the service uses SSL (True) or not (False) (default: True)
+ -- visualRange: "1" means the service is exposed only in ONAP, "0" means externally (default: "1")
+ (Note this is a string value)
+ '''
+ return {
+ 'serviceName': service_info.get('serviceName', str(uuid.uuid4())),
+ 'port': str(service_info.get('port', '80')),
+ 'version': service_info.get('version','v1'),
+ 'url': service_info.get('url','/'),
+ 'protocol': service_info.get('protocol','REST'),
+ 'enable_ssl': bool(service_info.get('enable_ssl', False)),
+ 'visualRange': str(service_info.get('visualRange', '1'))
+ }
+
+def create_msb_annotation(msb_service_list):
+ '''
+ Creates an annotation that can be added to a k8s Service to trigger
+ registration with MSB.
+ msb_list is a list of dicts each containing MSB registration information for a
+ service. (One k8s Service can have multiple ports, each one of which can be
+ registered as an MSB service.)
+ '''
+ return {MSB_ANNOTATION_KEY : json.dumps([_sanitize_service_info(service_info) for service_info in msb_service_list])}
diff --git a/k8s/pom.xml b/k8s/pom.xml
new file mode 100644
index 0000000..4f09ef5
--- /dev/null
+++ b/k8s/pom.xml
@@ -0,0 +1,165 @@
+<?xml version="1.0"?>
+<!--
+================================================================================
+Copyright (c) 2017-2018 AT&T Intellectual Property. All rights reserved.
+================================================================================
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+============LICENSE_END=========================================================
+
+ECOMP is a trademark and service mark of AT&T Intellectual Property.
+-->
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+ <modelVersion>4.0.0</modelVersion>
+ <parent>
+ <groupId>org.onap.dcaegen2.platform</groupId>
+ <artifactId>plugins</artifactId>
+ <version>1.1.0-SNAPSHOT</version>
+ </parent>
+ <groupId>org.onap.dcaegen2.platform.plugins</groupId>
+ <artifactId>k8s</artifactId>
+ <name>k8s-plugin</name>
+ <version>1.0.0-SNAPSHOT</version>
+ <url>http://maven.apache.org</url>
+ <properties>
+ <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
+ <sonar.sources>.</sonar.sources>
+ <sonar.junit.reportsPath>xunit-results.xml</sonar.junit.reportsPath>
+ <sonar.python.coverage.reportPath>coverage.xml</sonar.python.coverage.reportPath>
+ <sonar.language>py</sonar.language>
+ <sonar.pluginName>Python</sonar.pluginName>
+ <sonar.inclusions>**/*.py</sonar.inclusions>
+ <sonar.exclusions>tests/*,setup.py</sonar.exclusions>
+ </properties>
+ <build>
+ <finalName>${project.artifactId}-${project.version}</finalName>
+ <plugins>
+ <!-- plugin>
+ <artifactId>maven-assembly-plugin</artifactId>
+ <version>2.4.1</version>
+ <configuration>
+ <descriptors>
+ <descriptor>assembly/dep.xml</descriptor>
+ </descriptors>
+ </configuration>
+ <executions>
+ <execution>
+ <id>make-assembly</id>
+ <phase>package</phase>
+ <goals>
+ <goal>single</goal>
+ </goals>
+ </execution>
+ </executions>
+ </plugin -->
+ <!-- now we configure custom action (calling a script) at various lifecycle phases -->
+ <plugin>
+ <groupId>org.codehaus.mojo</groupId>
+ <artifactId>exec-maven-plugin</artifactId>
+ <version>1.2.1</version>
+ <executions>
+ <execution>
+ <id>clean phase script</id>
+ <phase>clean</phase>
+ <goals>
+ <goal>exec</goal>
+ </goals>
+ <configuration>
+ <arguments>
+ <argument>${project.artifactId}</argument>
+ <argument>clean</argument>
+ </arguments>
+ </configuration>
+ </execution>
+ <execution>
+ <id>generate-sources script</id>
+ <phase>generate-sources</phase>
+ <goals>
+ <goal>exec</goal>
+ </goals>
+ <configuration>
+ <arguments>
+ <argument>${project.artifactId}</argument>
+ <argument>generate-sources</argument>
+ </arguments>
+ </configuration>
+ </execution>
+ <execution>
+ <id>compile script</id>
+ <phase>compile</phase>
+ <goals>
+ <goal>exec</goal>
+ </goals>
+ <configuration>
+ <arguments>
+ <argument>${project.artifactId}</argument>
+ <argument>compile</argument>
+ </arguments>
+ </configuration>
+ </execution>
+ <execution>
+ <id>package script</id>
+ <phase>package</phase>
+ <goals>
+ <goal>exec</goal>
+ </goals>
+ <configuration>
+ <arguments>
+ <argument>${project.artifactId}</argument>
+ <argument>package</argument>
+ </arguments>
+ </configuration>
+ </execution>
+ <execution>
+ <id>test script</id>
+ <phase>test</phase>
+ <goals>
+ <goal>exec</goal>
+ </goals>
+ <configuration>
+ <arguments>
+ <argument>${project.artifactId}</argument>
+ <argument>test</argument>
+ </arguments>
+ </configuration>
+ </execution>
+ <execution>
+ <id>install script</id>
+ <phase>install</phase>
+ <goals>
+ <goal>exec</goal>
+ </goals>
+ <configuration>
+ <arguments>
+ <argument>${project.artifactId}</argument>
+ <argument>install</argument>
+ </arguments>
+ </configuration>
+ </execution>
+ <execution>
+ <id>deploy script</id>
+ <phase>deploy</phase>
+ <goals>
+ <goal>exec</goal>
+ </goals>
+ <configuration>
+ <arguments>
+ <argument>${project.artifactId}</argument>
+ <argument>deploy</argument>
+ </arguments>
+ </configuration>
+ </execution>
+ </executions>
+ </plugin>
+ </plugins>
+ </build>
+</project>
diff --git a/k8s/requirements.txt b/k8s/requirements.txt
new file mode 100644
index 0000000..d107559
--- /dev/null
+++ b/k8s/requirements.txt
@@ -0,0 +1,6 @@
+python-consul>=0.6.0,<1.0.0
+uuid==1.30
+onap-dcae-dockering==1.4.0
+onap-dcae-dcaepolicy-lib==2.1.0
+kubernetes==4.0.0
+cloudify-plugins-common==3.4 \ No newline at end of file
diff --git a/k8s/setup.py b/k8s/setup.py
new file mode 100644
index 0000000..f160777
--- /dev/null
+++ b/k8s/setup.py
@@ -0,0 +1,39 @@
+# ============LICENSE_START=======================================================
+# org.onap.dcae
+# ================================================================================
+# Copyright (c) 2017-2018 AT&T Intellectual Property. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END=========================================================
+#
+# ECOMP is a trademark and service mark of AT&T Intellectual Property.
+
+from setuptools import setup
+
+setup(
+ name='k8splugin',
+ description='Cloudify plugin for containerized components deployed using Kubernetes',
+ version="1.0.0",
+ author='J. F. Lucas, Michael Hwang, Tommy Carpenter',
+ packages=['k8splugin','k8sclient','msb','configure'],
+ zip_safe=False,
+ install_requires=[
+ "python-consul>=0.6.0,<1.0.0",
+ "uuid==1.30",
+ "onap-dcae-dockering>=1.0.0,<2.0.0",
+ "onap-dcae-dcaepolicy-lib>=2.1.0,<3.0.0",
+ "cloudify-plugins-common==3.4",
+ "cloudify-python-importer==0.1.0",
+ "kubernetes==4.0.0"
+ ]
+)
diff --git a/k8s/tests/conftest.py b/k8s/tests/conftest.py
new file mode 100644
index 0000000..572a510
--- /dev/null
+++ b/k8s/tests/conftest.py
@@ -0,0 +1,34 @@
+# ============LICENSE_START=======================================================
+# org.onap.dcae
+# ================================================================================
+# Copyright (c) 2018 AT&T Intellectual Property. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END=========================================================
+#
+# ECOMP is a trademark and service mark of AT&T Intellectual Property.
+
+import pytest
+
+@pytest.fixture()
+def mockconfig(monkeypatch):
+ """ Override the regular configure() routine that reads a file and calls Consul"""
+ def altconfig():
+ return {
+ "consul_host": "consul",
+ "namespace":"dcae",
+ "consul_dns_name" : "consul",
+ "image_pull_secrets" : []
+ }
+ from configure import configure
+ monkeypatch.setattr(configure, 'configure', altconfig) \ No newline at end of file
diff --git a/k8s/tests/test_decorators.py b/k8s/tests/test_decorators.py
new file mode 100644
index 0000000..552fa4b
--- /dev/null
+++ b/k8s/tests/test_decorators.py
@@ -0,0 +1,34 @@
+# ============LICENSE_START=======================================================
+# org.onap.dcae
+# ================================================================================
+# Copyright (c) 2017-2018 AT&T Intellectual Property. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END=========================================================
+#
+# ECOMP is a trademark and service mark of AT&T Intellectual Property.
+
+def test_wrapper_merge_inputs(mockconfig):
+ from k8splugin import decorators as dec
+
+ properties = { "app_config": {"nested": { "a": 123, "b": 456 }, "foo": "duh"},
+ "image": "some-docker-image" }
+ kwargs = { "app_config": {"nested": {"a": 789, "c": "zyx"}} }
+
+ def task_func(**inputs):
+ return inputs
+
+ expected = { "app_config": {"nested": { "a": 789, "b": 456, "c": "zyx" },
+ "foo": "duh"}, "image": "some-docker-image" }
+
+ assert expected == dec._wrapper_merge_inputs(task_func, properties, **kwargs) \ No newline at end of file
diff --git a/k8s/tests/test_discovery.py b/k8s/tests/test_discovery.py
new file mode 100644
index 0000000..24e45ee
--- /dev/null
+++ b/k8s/tests/test_discovery.py
@@ -0,0 +1,71 @@
+# ============LICENSE_START=======================================================
+# org.onap.dcae
+# ================================================================================
+# Copyright (c) 2017-2018 AT&T Intellectual Property. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END=========================================================
+#
+# ECOMP is a trademark and service mark of AT&T Intellectual Property.
+
+import pytest
+from functools import partial
+import requests
+
+def test_wrap_consul_call(mockconfig):
+ from k8splugin import discovery as dis
+
+ def foo(a, b, c="default"):
+ return " ".join([a, b, c])
+
+ wrapped_foo = partial(dis._wrap_consul_call, foo)
+ assert wrapped_foo("hello", "world") == "hello world default"
+ assert wrapped_foo("hello", "world", c="new masters") == "hello world new masters"
+
+ def foo_connection_error(a, b, c):
+ raise requests.exceptions.ConnectionError("simulate failed connection")
+
+ wrapped_foo = partial(dis._wrap_consul_call, foo_connection_error)
+ with pytest.raises(dis.DiscoveryConnectionError):
+ wrapped_foo("a", "b", "c")
+
+def test_generate_service_component_name(mockconfig):
+ from k8splugin import discovery as dis
+
+ component_type = "some-component-type"
+ name = dis.generate_service_component_name(component_type)
+ assert name.split("-", 1)[1] == component_type
+
+def test_find_matching_services(mockconfig):
+ from k8splugin import discovery as dis
+
+ services = { "component_dockerhost_1": ["foo", "bar"],
+ "platform_dockerhost": [], "component_dockerhost_2": ["baz"] }
+ assert sorted(["component_dockerhost_1", "component_dockerhost_2"]) \
+ == sorted(dis._find_matching_services(services, "component_dockerhost", []))
+
+ assert ["component_dockerhost_1"] == dis._find_matching_services(services, \
+ "component_dockerhost", ["foo", "bar"])
+
+ assert ["component_dockerhost_1"] == dis._find_matching_services(services, \
+ "component_dockerhost", ["foo"])
+
+ assert [] == dis._find_matching_services(services, "unknown", ["foo"])
+
+def test_is_healthy_pure(mockconfig):
+ from k8splugin import discovery as dis
+
+ def fake_is_healthy(name):
+ return 0, [{ "Checks": [{"Status": "passing"}] }]
+
+ assert True == dis._is_healthy_pure(fake_is_healthy, "some-component") \ No newline at end of file
diff --git a/k8s/tests/test_tasks.py b/k8s/tests/test_tasks.py
new file mode 100644
index 0000000..4d0aa90
--- /dev/null
+++ b/k8s/tests/test_tasks.py
@@ -0,0 +1,293 @@
+# ============LICENSE_START=======================================================
+# org.onap.dcae
+# ================================================================================
+# Copyright (c) 2017-2018 AT&T Intellectual Property. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END=========================================================
+#
+# ECOMP is a trademark and service mark of AT&T Intellectual Property.
+
+import copy
+import pytest
+from cloudify.exceptions import NonRecoverableError, RecoverableError
+
+
+def test_generate_component_name(mockconfig):
+ from k8splugin import tasks
+ kwargs = { "service_component_type": "doodle",
+ "service_component_name_override": None }
+
+ assert "doodle" in tasks._generate_component_name(**kwargs)["name"]
+
+ kwargs["service_component_name_override"] = "yankee"
+
+ assert "yankee" == tasks._generate_component_name(**kwargs)["name"]
+
+
+def test_parse_streams(monkeypatch, mockconfig):
+ import k8splugin
+ from k8splugin import tasks
+ # Good case for streams_publishes
+ test_input = { "streams_publishes": [{"name": "topic00", "type": "message_router"},
+ {"name": "feed00", "type": "data_router"}],
+ "streams_subscribes": {} }
+
+ expected = {'feed00': {'type': 'data_router', 'name': 'feed00'},
+ 'streams_publishes': [{'type': 'message_router', 'name': 'topic00'},
+ {'type': 'data_router', 'name': 'feed00'}],
+ 'streams_subscribes': {},
+ 'topic00': {'type': 'message_router', 'name': 'topic00'}
+ }
+
+ assert expected == tasks._parse_streams(**test_input)
+
+ # Good case for streams_subscribes (password provided)
+ test_input = { "streams_publishes": {},
+ "streams_subscribes": [{"name": "topic01", "type": "message_router"},
+ {"name": "feed01", "type": "data_router", "username": "hero",
+ "password": "123456"}] }
+
+ expected = {'feed01': {'type': 'data_router', 'name': 'feed01',
+ 'username': 'hero', 'password': '123456'},
+ 'streams_publishes': {},
+ 'streams_subscribes': [{'type': 'message_router', 'name': 'topic01'},
+ {'type': 'data_router', 'name': 'feed01', 'username': 'hero',
+ 'password': '123456'}],
+ 'topic01': {'type': 'message_router', 'name': 'topic01'}}
+
+ assert expected == tasks._parse_streams(**test_input)
+
+ # Good case for streams_subscribes (password generated)
+ test_input = { "streams_publishes": {},
+ "streams_subscribes": [{"name": "topic01", "type": "message_router"},
+ {"name": "feed01", "type": "data_router", "username": None,
+ "password": None}] }
+
+ def not_so_random(n):
+ return "nosurprise"
+
+ monkeypatch.setattr(k8splugin.utils, "random_string", not_so_random)
+
+ expected = {'feed01': {'type': 'data_router', 'name': 'feed01',
+ 'username': 'nosurprise', 'password': 'nosurprise'},
+ 'streams_publishes': {},
+ 'streams_subscribes': [{'type': 'message_router', 'name': 'topic01'},
+ {'type': 'data_router', 'name': 'feed01', 'username': None,
+ 'password': None}],
+ 'topic01': {'type': 'message_router', 'name': 'topic01'}}
+
+ assert expected == tasks._parse_streams(**test_input)
+
+
+def test_setup_for_discovery(monkeypatch, mockconfig):
+ import k8splugin
+ from k8splugin import tasks
+
+ test_input = { "name": "some-name",
+ "application_config": { "one": "a", "two": "b" } }
+
+ def fake_push_config(conn, name, application_config):
+ return
+
+ monkeypatch.setattr(k8splugin.discovery, "push_service_component_config",
+ fake_push_config)
+
+ assert test_input == tasks._setup_for_discovery(**test_input)
+
+ def fake_push_config_connection_error(conn, name, application_config):
+ raise k8splugin.discovery.DiscoveryConnectionError("Boom")
+
+ monkeypatch.setattr(k8splugin.discovery, "push_service_component_config",
+ fake_push_config_connection_error)
+
+ with pytest.raises(RecoverableError):
+ tasks._setup_for_discovery(**test_input)
+
+
+def test_setup_for_discovery_streams(monkeypatch, mockconfig):
+ import k8splugin
+ from k8splugin import tasks
+ test_input = {'feed01': {'type': 'data_router', 'name': 'feed01',
+ 'username': 'hero', 'password': '123456', 'location': 'Bedminster'},
+ 'streams_publishes': {},
+ 'streams_subscribes': [{'type': 'message_router', 'name': 'topic01'},
+ {'type': 'data_router', 'name': 'feed01', 'username': 'hero',
+ 'password': '123456', 'location': 'Bedminster'}],
+ 'topic01': {'type': 'message_router', 'name': 'topic01'}}
+ test_input["name"] = "some-foo-service-component"
+
+ # Good case
+ def fake_add_to_entry(conn, key, add_name, add_value):
+ """
+ This fake method will check all the pieces that are used to make store
+ details in Consul
+ """
+ if key != test_input["name"] + ":dmaap":
+ return None
+ if add_name != "feed01":
+ return None
+ if add_value != {"location": "Bedminster", "delivery_url": None,
+ "username": "hero", "password": "123456", "subscriber_id": None}:
+ return None
+
+ return "SUCCESS!"
+
+ monkeypatch.setattr(k8splugin.discovery, "add_to_entry",
+ fake_add_to_entry)
+
+ assert tasks._setup_for_discovery_streams(**test_input) == test_input
+
+ # Good case - no data router subscribers
+ test_input = {"streams_publishes": [{"name": "topic00", "type": "message_router"}],
+ 'streams_subscribes': [{'type': 'message_router', 'name': 'topic01'}]}
+ test_input["name"] = "some-foo-service-component"
+
+ assert tasks._setup_for_discovery_streams(**test_input) == test_input
+
+ # Bad case - something happened from the Consul call
+ test_input = {'feed01': {'type': 'data_router', 'name': 'feed01',
+ 'username': 'hero', 'password': '123456', 'location': 'Bedminster'},
+ 'streams_publishes': {},
+ 'streams_subscribes': [{'type': 'message_router', 'name': 'topic01'},
+ {'type': 'data_router', 'name': 'feed01', 'username': 'hero',
+ 'password': '123456', 'location': 'Bedminster'}],
+ 'topic01': {'type': 'message_router', 'name': 'topic01'}}
+ test_input["name"] = "some-foo-service-component"
+
+ def barf(conn, key, add_name, add_value):
+ raise RuntimeError("Barf")
+
+ monkeypatch.setattr(k8splugin.discovery, "add_to_entry",
+ barf)
+
+ with pytest.raises(NonRecoverableError):
+ tasks._setup_for_discovery_streams(**test_input)
+
+
+def test_lookup_service(monkeypatch, mockconfig):
+ import k8splugin
+ from k8splugin import tasks
+ def fake_lookup(conn, scn):
+ return [{"ServiceAddress": "192.168.1.1", "ServicePort": "80"}]
+
+ monkeypatch.setattr(k8splugin.discovery, "lookup_service",
+ fake_lookup)
+
+ assert "192.168.1.1" == tasks._lookup_service("some-component")
+ assert "192.168.1.1:80" == tasks._lookup_service("some-component",
+ with_port=True)
+
+
+def test_verify_container(monkeypatch, mockconfig):
+ from k8sclient import k8sclient
+ from k8splugin import tasks
+ from k8splugin.exceptions import DockerPluginDeploymentError
+
+ def fake_is_available_success(ch, scn):
+ return True
+
+ monkeypatch.setattr(k8sclient, "is_available",
+ fake_is_available_success)
+
+ assert tasks._verify_container("some-name", 3)
+
+ def fake_is_available_never_good(ch, scn):
+ return False
+
+ monkeypatch.setattr(k8sclient, "is_available",
+ fake_is_available_never_good)
+
+ with pytest.raises(DockerPluginDeploymentError):
+ tasks._verify_container("some-name", 2)
+
+
+def test_update_delivery_url(monkeypatch, mockconfig):
+ import k8splugin
+ from k8splugin import tasks
+ test_input = {'feed01': {'type': 'data_router', 'name': 'feed01',
+ 'username': 'hero', 'password': '123456', 'location': 'Bedminster',
+ 'route': 'some-path'},
+ 'streams_publishes': {},
+ 'streams_subscribes': [{'type': 'message_router', 'name': 'topic01'},
+ {'type': 'data_router', 'name': 'feed01', 'username': 'hero',
+ 'password': '123456', 'location': 'Bedminster',
+ 'route': 'some-path'}],
+ 'topic01': {'type': 'message_router', 'name': 'topic01'}}
+ test_input["service_component_name"] = "some-foo-service-component"
+
+ def fake_lookup_service(name, with_port=False):
+ if with_port:
+ return "10.100.1.100:8080"
+ else:
+ return
+
+ monkeypatch.setattr(k8splugin.tasks, "_lookup_service",
+ fake_lookup_service)
+
+ expected = copy.deepcopy(test_input)
+ expected["feed01"]["delivery_url"] = "http://10.100.1.100:8080/some-path"
+
+ assert tasks._update_delivery_url(**test_input) == expected
+
+
+def test_enhance_docker_params(mockconfig):
+ from k8splugin import tasks
+ # Good - Test empty docker config
+
+ test_kwargs = { "docker_config": {}, "service_id": None }
+ actual = tasks._enhance_docker_params(**test_kwargs)
+
+ assert actual == {'envs': {"SERVICE_TAGS": ""}, 'docker_config': {}, "service_id": None }
+
+ # Good - Test just docker config ports and volumes
+
+ test_kwargs = { "docker_config": { "ports": ["1:1", "2:2"],
+ "volumes": [{"container": "somewhere", "host": "somewhere else"}] },
+ "service_id": None }
+ actual = tasks._enhance_docker_params(**test_kwargs)
+
+ assert actual == {'envs': {"SERVICE_TAGS": ""}, 'docker_config': {'ports': ['1:1', '2:2'],
+ 'volumes': [{'host': 'somewhere else', 'container': 'somewhere'}]},
+ 'ports': ['1:1', '2:2'], 'volumes': [{'host': 'somewhere else',
+ 'container': 'somewhere'}], "service_id": None}
+
+ # Good - Test just docker config ports and volumes with overrrides
+
+ test_kwargs = { "docker_config": { "ports": ["1:1", "2:2"],
+ "volumes": [{"container": "somewhere", "host": "somewhere else"}] },
+ "ports": ["3:3", "4:4"], "volumes": [{"container": "nowhere", "host":
+ "nowhere else"}],
+ "service_id": None }
+ actual = tasks._enhance_docker_params(**test_kwargs)
+
+ assert actual == {'envs': {"SERVICE_TAGS": ""}, 'docker_config': {'ports': ['1:1', '2:2'],
+ 'volumes': [{'host': 'somewhere else', 'container': 'somewhere'}]},
+ 'ports': ['1:1', '2:2', '3:3', '4:4'], 'volumes': [{'host': 'somewhere else',
+ 'container': 'somewhere'}, {'host': 'nowhere else', 'container':
+ 'nowhere'}], "service_id": None}
+
+ # Good
+
+ test_kwargs = { "docker_config": {}, "service_id": "zed",
+ "deployment_id": "abc" }
+ actual = tasks._enhance_docker_params(**test_kwargs)
+
+ assert actual["envs"] == {"SERVICE_TAGS": "abc,zed"}
+
+
+def test_notify_container(mockconfig):
+ from k8splugin import tasks
+
+ test_input = { "docker_config": { "trigger_type": "unknown" } }
+ assert test_input == tasks._notify_container(**test_input) \ No newline at end of file
diff --git a/k8s/tests/test_utils.py b/k8s/tests/test_utils.py
new file mode 100644
index 0000000..0b7cba4
--- /dev/null
+++ b/k8s/tests/test_utils.py
@@ -0,0 +1,33 @@
+# ============LICENSE_START=======================================================
+# org.onap.dcae
+# ================================================================================
+# Copyright (c) 2017-2018 AT&T Intellectual Property. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END=========================================================
+#
+# ECOMP is a trademark and service mark of AT&T Intellectual Property.
+
+def test_random_string(mockconfig):
+ from k8splugin import utils
+
+ target_length = 10
+ assert len(utils.random_string(target_length)) == target_length
+
+
+def test_update_dict(mockconfig):
+ from k8splugin import utils
+
+ d = { "a": 1, "b": 2 }
+ u = { "a": 2, "b": 3 }
+ assert utils.update_dict(d, u) == u
diff --git a/k8s/tox.ini b/k8s/tox.ini
new file mode 100644
index 0000000..d58364b
--- /dev/null
+++ b/k8s/tox.ini
@@ -0,0 +1,15 @@
+# content of: tox.ini , put in same dir as setup.py
+[tox]
+envlist = py27
+
+[testenv]
+deps=
+ -rrequirements.txt
+ pytest
+ coverage
+ pytest-cov
+setenv=
+ PYTHONPATH={toxinidir}
+commands=
+ pytest --junitxml xunit-results.xml --cov k8splugin --cov-report xml
+ coverage xml \ No newline at end of file
diff --git a/mvn-phase-script.sh b/mvn-phase-script.sh
index 184c8ab..2745340 100755
--- a/mvn-phase-script.sh
+++ b/mvn-phase-script.sh
@@ -56,7 +56,7 @@ test)
package)
echo "==> package phase script"
case $MVN_PROJECT_MODULEID in
- cdap|dcae-policy|docker|relationships)
+ cdap|dcae-policy|docker|relationships|k8s)
build_archives_for_wagons
build_wagons
;;
@@ -70,7 +70,7 @@ install)
deploy)
echo "==> deploy phase script"
case $MVN_PROJECT_MODULEID in
- cdap|dcae-policy|docker|relationships)
+ cdap|dcae-policy|docker|relationships|k8s)
upload_wagons_and_type_yamls
upload_wagon_archives
;;
diff --git a/pom.xml b/pom.xml
index 558f373..0a420ae 100644
--- a/pom.xml
+++ b/pom.xml
@@ -38,6 +38,7 @@ ECOMP is a trademark and service mark of AT&T Intellectual Property.
<module>docker</module>
<module>relationships</module>
<module>dcae-policy</module>
+ <module>k8s</module>
</modules>
<properties>
<onap.nexus.url>https://nexus.onap.org</onap.nexus.url>