diff options
25 files changed, 469 insertions, 109 deletions
diff --git a/ansible/roles/application/defaults/main.yml b/ansible/roles/application/defaults/main.yml index 2ae668ac..6a7472a6 100644 --- a/ansible/roles/application/defaults/main.yml +++ b/ansible/roles/application/defaults/main.yml @@ -5,11 +5,15 @@ helm_extra_install_options: - { opt: '--timeout 1800'} # Override file generation for Helm application can be customized by any role # given by user and found by ansible from roles_path. -# By default override file is generated by 'application-override' role that is -# specific for offline installer (for onap) as it's generating server -# certificate needed to simulate internet by offline installer. +# By default override file is generated by 'application-override' role that +# allows specifying custom helm overrides as the value of "override" +# dictionary in inventory or extravars. app_skip_helm_override: false app_helm_override_role: application-override app_helm_override_file: "{{ app_data_path }}/override.yaml" -helm_overide_files: - - "{{ app_helm_override_file }}" +# List of additional helm override files, the one generated above overrides +# every file listed here. In addition, the order of override files in this list +# matters, settings in latter files override the former.. +# Paths provided here must be absolute. +helm_override_files: + - "{{ app_helm_charts_infra_directory }}/onap/resources/overrides/onap-all.yaml" diff --git a/ansible/roles/application/molecule/default/tests/test_default.py b/ansible/roles/application/molecule/default/tests/test_default.py index 5edceff3..22298e3a 100644 --- a/ansible/roles/application/molecule/default/tests/test_default.py +++ b/ansible/roles/application/molecule/default/tests/test_default.py @@ -16,7 +16,8 @@ serve repo list repo add local http://127.0.0.1:8879 install --name moleculetestapp local/moleculetestapp --namespace \ -moleculetestapp -f /opt/moleculetestapp/override.yaml \ +moleculetestapp -f /opt/moleculetestapp/helm_charts/onap/resources/\ +overrides/onap-all.yaml -f /opt/moleculetestapp/override.yaml \ --timeout 1800""" assert fc == expected_content diff --git a/ansible/roles/application/tasks/install.yml b/ansible/roles/application/tasks/install.yml index 003631d7..2ac2fd6b 100644 --- a/ansible/roles/application/tasks/install.yml +++ b/ansible/roles/application/tasks/install.yml @@ -59,6 +59,16 @@ name: "{{ app_helm_override_role }}" when: not app_skip_helm_override +# The generated override file is added to override list unless skipped. +- name: Add application helm override file to list of overrides unless skipped + set_fact: + helm_override_files: "{{ (helm_override_files | default([])) + [app_helm_override_file] }}" + when: not app_skip_helm_override + +- name: Print final list of override files + debug: + var: helm_override_files + - name: Check for deploy plugin presence stat: path: '{{ helm_home_dir.stdout }}/plugins/deploy/deploy.sh' @@ -71,7 +81,7 @@ {{ app_helm_release_name }} {{ helm_repository_name }}/{{ app_helm_chart_name }} --namespace {{ app_kubernetes_namespace }} - {% if not app_skip_helm_override %} {% for arg in helm_overide_files %} {{ '-f ' + arg }} {% endfor %} {% endif %} + {% for arg in helm_override_files %} {{ '-f ' + arg }} {% endfor %} {% for arg in helm_extra_install_options %} {{ arg.opt }} {% endfor %} changed_when: true # when executed its a changed type of action register: helm_install diff --git a/ansible/roles/rancher/defaults/main.yml b/ansible/roles/rancher/defaults/main.yml index e4e12d23..8a37574a 100644 --- a/ansible/roles/rancher/defaults/main.yml +++ b/ansible/roles/rancher/defaults/main.yml @@ -1,5 +1,5 @@ --- -rancher_server_url: "http://{{ hostvars[groups.infrastructure.0].ansible_host }}:8080" +rancher_server_url: "http://{{ hostvars[groups.infrastructure.0].cluster_ip }}:8080" rancher_remove_other_env: true rancher_redeploy_k8s_env: true rancher_cluster_health_state: healthy diff --git a/ansible/roles/rancher/molecule/default/playbook.yml b/ansible/roles/rancher/molecule/default/playbook.yml index e4a7151e..afdbb4af 100644 --- a/ansible/roles/rancher/molecule/default/playbook.yml +++ b/ansible/roles/rancher/molecule/default/playbook.yml @@ -6,11 +6,11 @@ - role: rancher vars: mode: server - rancher_server_url: "http://{{ cluster_ip }}:8080" - name: Converge rancher agent hosts: kubernetes roles: + - prepare-common - role: rancher vars: mode: agent diff --git a/ansible/roles/rancher/tasks/rancher_agent.yml b/ansible/roles/rancher/tasks/rancher_agent.yml index 73d9a642..e54d760e 100644 --- a/ansible/roles/rancher/tasks/rancher_agent.yml +++ b/ansible/roles/rancher/tasks/rancher_agent.yml @@ -4,6 +4,8 @@ name: rancher_agent image: "{{ server_hostvars.rancher_agent_image }}" command: "{{ server_hostvars.rancher_agent_reg_url }}" + env: + CATTLE_AGENT_IP: "{{ cluster_ip }}" volumes: - "/var/run/docker.sock:/var/run/docker.sock" - "/var/lib/rancher:/var/lib/rancher" diff --git a/ansible/test/roles/prepare-docker-dind/defaults/main.yml b/ansible/test/roles/prepare-docker-dind/defaults/main.yml index 2489014e..147a828a 100644 --- a/ansible/test/roles/prepare-docker-dind/defaults/main.yml +++ b/ansible/test/roles/prepare-docker-dind/defaults/main.yml @@ -1,3 +1,5 @@ --- # Variable specifying if we should install docker, or only prepare for it start_docker: true +#The version of docker to install +docker_version: 18.09.5 diff --git a/ansible/test/roles/prepare-docker-dind/tasks/main.yml b/ansible/test/roles/prepare-docker-dind/tasks/main.yml index 3e109e87..c0bf1543 100644 --- a/ansible/test/roles/prepare-docker-dind/tasks/main.yml +++ b/ansible/test/roles/prepare-docker-dind/tasks/main.yml @@ -20,4 +20,5 @@ package: name: "docker-ce-{{ docker_version }}" state: present + allow_downgrade: true notify: Restart docker diff --git a/ansible/test/roles/prepare-docker-dind/vars/main.yml b/ansible/test/roles/prepare-docker-dind/vars/main.yml deleted file mode 100644 index 950fb921..00000000 --- a/ansible/test/roles/prepare-docker-dind/vars/main.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -#The version of docker to install -docker_version: 18.09.5 diff --git a/build/download/__init__.py b/build/download/__init__.py index f0efbc15..b1ef8d99 100644 --- a/build/download/__init__.py +++ b/build/download/__init__.py @@ -1,5 +1,3 @@ - -#! /usr/bin/env python # -*- coding: utf-8 -*- # COPYRIGHT NOTICE STARTS HERE diff --git a/build/download/clean_docker_images.py b/build/download/clean_docker_images.py new file mode 100755 index 00000000..186bfd60 --- /dev/null +++ b/build/download/clean_docker_images.py @@ -0,0 +1,71 @@ +#! /usr/bin/env python3 +# -*- coding: utf-8 -*- + +# COPYRIGHT NOTICE STARTS HERE + +# Copyright 2019 © Samsung Electronics Co., Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# COPYRIGHT NOTICE ENDS HERE + + +import argparse +import docker +import logging +import sys + +from downloader import AbstractDownloader +from docker_downloader import DockerDownloader + + +def main(): + parser = argparse.ArgumentParser() + parser.add_argument('image_lists', nargs='+', help='Images to keep') + parser.add_argument('--debug', '-d', action='store_true', help='Debugging messages') + args = parser.parse_args() + + if args.debug: + logging.basicConfig(level=logging.DEBUG, stream=sys.stdout) + else: + logging.basicConfig(level=logging.INFO, stream=sys.stdout, format='%(message)s') + + target = set() + for lst in args.image_lists: + target = target.union(AbstractDownloader.load_list(lst)) + + target = set(map(DockerDownloader.image_registry_name, target)) + + client = docker.client.DockerClient(version='auto') + + errors = 0 + for image in client.images.list(): + for tag in image.tags: + logging.debug('Checking {}'.format(tag)) + if tag not in target: + logging.debug('Image \'{}\' not in lists'.format(tag)) + logging.info('Removing: {}'.format(tag)) + try: + client.images.remove(tag) + logging.info('Removed: {}'.format(tag)) + except docker.errors.APIError as err: + errors += 1 + logging.exception(err) + else: + logging.debug('Image \'{}\' found in lists.'.format(tag)) + sys.exit(errors) + + +if __name__ == '__main__': + main() + diff --git a/build/download/command_downloader.py b/build/download/command_downloader.py index 5efc8b0f..835d218d 100755 --- a/build/download/command_downloader.py +++ b/build/download/command_downloader.py @@ -1,4 +1,3 @@ -#! /usr/bin/env python # -*- coding: utf-8 -*- # COPYRIGHT NOTICE STARTS HERE diff --git a/build/download/concurrent_downloader.py b/build/download/concurrent_downloader.py index c84dac86..a150db73 100644 --- a/build/download/concurrent_downloader.py +++ b/build/download/concurrent_downloader.py @@ -1,4 +1,3 @@ -#! /usr/bin/env python # -*- coding: utf-8 -*- # COPYRIGHT NOTICE STARTS HERE diff --git a/build/download/docker_downloader.py b/build/download/docker_downloader.py index 13323d3b..d83f682c 100755 --- a/build/download/docker_downloader.py +++ b/build/download/docker_downloader.py @@ -1,4 +1,4 @@ -#! /usr/bin/env python +#! /usr/bin/env python3 # -*- coding: utf-8 -*- # COPYRIGHT NOTICE STARTS HERE diff --git a/build/download/download.py b/build/download/download.py index 0af12989..6d76b369 100755 --- a/build/download/download.py +++ b/build/download/download.py @@ -1,4 +1,4 @@ -#! /usr/bin/env python +#! /usr/bin/env python3 # -*- coding: utf-8 -*- # COPYRIGHT NOTICE STARTS HERE diff --git a/build/download/downloader.py b/build/download/downloader.py index 64403300..6bbab35d 100644 --- a/build/download/downloader.py +++ b/build/download/downloader.py @@ -1,4 +1,3 @@ -#! /usr/bin/env python # -*- coding: utf-8 -*- # COPYRIGHT NOTICE STARTS HERE @@ -32,7 +31,7 @@ class AbstractDownloader(ABC): def __init__(self, list_type, *list_args): self._list_type = list_type self._data_list = {item: list_arg[1] for list_arg in list_args - for item in self._load_list(list_arg[0])} + for item in self.load_list(list_arg[0])} self._missing = self.missing() @property @@ -43,7 +42,7 @@ class AbstractDownloader(ABC): return self._list_type @staticmethod - def _load_list(path): + def load_list(path): """ Load list from file. :param path: path to file diff --git a/build/download/git_downloader.py b/build/download/git_downloader.py index 46faa8f8..ed7a3956 100755 --- a/build/download/git_downloader.py +++ b/build/download/git_downloader.py @@ -1,4 +1,4 @@ -#! /usr/bin/env python +#! /usr/bin/env python3 # -*- coding: utf-8 -*- # COPYRIGHT NOTICE STARTS HERE diff --git a/build/download/http_downloader.py b/build/download/http_downloader.py index ba2c0f7e..c6b7c6e9 100644 --- a/build/download/http_downloader.py +++ b/build/download/http_downloader.py @@ -1,4 +1,4 @@ -#! /usr/bin/env python +#! /usr/bin/env python3 # -*- coding: utf-8 -*- # COPYRIGHT NOTICE STARTS HERE diff --git a/build/download/npm_downloader.py b/build/download/npm_downloader.py index 369af72a..ed4152b8 100755 --- a/build/download/npm_downloader.py +++ b/build/download/npm_downloader.py @@ -1,4 +1,4 @@ -#! /usr/bin/env python +#! /usr/bin/env python3 # -*- coding: utf-8 -*- # COPYRIGHT NOTICE STARTS HERE diff --git a/build/download/pypi_downloader.py b/build/download/pypi_downloader.py index 4ab6b1f4..10ac7b9f 100755 --- a/build/download/pypi_downloader.py +++ b/build/download/pypi_downloader.py @@ -1,4 +1,4 @@ -#! /usr/bin/env python +#! /usr/bin/env python3 # -*- coding: utf-8 -*- # COPYRIGHT NOTICE STARTS HERE diff --git a/build/download/rpm_downloader.py b/build/download/rpm_downloader.py index 92ae6a78..415f9483 100755 --- a/build/download/rpm_downloader.py +++ b/build/download/rpm_downloader.py @@ -1,4 +1,4 @@ -#! /usr/bin/env python +#! /usr/bin/env python3 # -*- coding: utf-8 -*- # COPYRIGHT NOTICE STARTS HERE diff --git a/build/package.py b/build/package.py index 8a1808b3..8a1808b3 100644..100755 --- a/build/package.py +++ b/build/package.py diff --git a/docs/BuildGuide.rst b/docs/BuildGuide.rst index 798da9f9..464422ac 100644 --- a/docs/BuildGuide.rst +++ b/docs/BuildGuide.rst @@ -79,7 +79,8 @@ Then it is necessary to clone all installer and build related repositories and p git clone https://gerrit.onap.org/r/oom/offline-installer onap-offline cd onap-offline - # install required pip packages for download scripts + # install required pip packages for build and download scripts + pip3 install -r ./build/requirements.txt pip3 install -r ./build/download/requirements.txt Part 2. Download artifacts for offline installer @@ -90,8 +91,6 @@ Part 2. Download artifacts for offline installer It's possible to download all artifacts in single ./download.py execution. Recently we improved reliability of download scripts so one might try following command to download most of the required artifacts in single shot. -**Step1 - download wrapper script execution** - :: # following arguments are provided @@ -111,23 +110,6 @@ so one might try following command to download most of the required artifacts in Alternatively, step-by-step procedure is described in Appendix 1. -Following steps are still required and are not supported by current version of download.py script. - -**Step 2 - Building own dns image** - -:: - - # We are building our own dns image within our offline infrastructure - ./build/creating_data/create_nginx_image/01create-image.sh /tmp/resources/offline_data/docker_images_infra - - -**Step 3 - Create repo** - -:: - - createrepo ../resources/pkg/rhel - - This concludes SW download part required for ONAP offline platform creating. Part 3. Populate local nexus @@ -172,78 +154,43 @@ E.g. rm -rf /tmp/resources/offline_data/npm_tar rm -rf /tmp/resources/offline_data/pypi -Part 4. Application helm charts preparation and patching +Part 4. Packages preparation -------------------------------------------------------- -This is about to clone oom repository and patch it to be able to use it -offline. Use the following command: - -:: - - ./build/fetch_and_patch_charts.sh <helm charts repo> <commit/tag/branch> <patchfile> <target\_dir> - -For example: - -:: - - ./build/fetch_and_patch_charts.sh https://gerrit.onap.org/r/oom 0b904977dde761d189874d6dc6c527cd45928 /tmp/onap-offline/patches/onap.patch /tmp/oom-clone - -Part 5. Creating offline installation package ---------------------------------------------- - -For the packagin itself it's necessary to prepare configuration. You can -use ./build/package.conf as template or -directly modify it. - -There are some parameters needs to be set in configuration file. -Example values below are setup according to steps done in this guide to package ONAP. +ONAP offline deliverable consist of 3 packages: +---------------------------------------+------------------------------------------------------------------------------+ -| Parameter | Description | +| Package | Description | +=======================================+==============================================================================+ -| HELM_CHARTS_DIR | directory with Helm charts for the application | -| | | -| | Example: /tmp/oom-clone/kubernetes | +| sw_package.tar | Contains installation software and configuration for infrastructure and ONAP | +---------------------------------------+------------------------------------------------------------------------------+ -| APP_CONFIGURATION | application install configuration (application_configuration.yml) for | -| | ansible installer and custom ansible role code directories if any. | -| | | -| | Example:: | -| | | -| | APP_CONFIGURATION=( | -| | /tmp/onap-offline/config/application_configuration.yml | -| | /tmp/onap-offline/patches/onap-patch-role | -| | ) | -| | | +| resources_package.tar | Contains all input files needed to deploy infrastructure and ONAP | +---------------------------------------+------------------------------------------------------------------------------+ -| APP_BINARY_RESOURCES_DIR | directory with all (binary) resources for offline infra and application | -| | | -| | Example: /tmp/resources | -+---------------------------------------+------------------------------------------------------------------------------+ -| APP_AUX_BINARIES | additional binaries such as docker images loaded during runtime [optional] | +| aux_package.tar | Contains auxiliary input files that can be added to ONAP | +---------------------------------------+------------------------------------------------------------------------------+ -Offline installer packages are created with prepopulated data via -following command run from onap-offline directory +All packages can be created using script build/package.py. Beside of archiving files gathered in the previous steps, script also builds installer software and apply patch over application repository to make it usable without internet access. + +From onap-offline directory run: :: - ./build/package.sh <project> <version> <packaging target directory> + ./build/package.py <helm charts repo> --application-repository_reference <commit/tag/branch> --application-patch_file <patchfile> --output-dir <target\_dir> --resources-directory <target\_dir> -E.g. +For example: :: - ./build/package.sh onap 4.0.0 /tmp/package + ./build/package.py https://gerrit.onap.org/r/oom --application-repository_reference master --application-patch_file ./patches/onap.patch --output-dir ../packages --resources-directory ../resources -So in the target directory you should find tar files with +In the target directory you should find tar files: :: - offline-<PROJECT_NAME>-<PROJECT_VERSION>-sw.tar - offline-<PROJECT_NAME>-<PROJECT_VERSION>-resources.tar - offline-<PROJECT_NAME>-<PROJECT_VERSION>-aux-resources.tar + sw_package.tar + resources_package.tar + aux_package.tar Appendix 1. Step-by-step download procedure @@ -261,14 +208,7 @@ Appendix 1. Step-by-step download procedure --docker ./build/data_lists/onap_docker_images.list ../resources/offline_data/docker_images_for_nexus -**Step 2 - building own dns image** - -:: - - # We are building our own dns image within our offline infrastructure - ./build/creating_data/create_nginx_image/01create-image.sh /tmp/resources/offline_data/docker_images_infra - -**Step 3 - git repos** +**Step 2 - git repos** :: @@ -276,21 +216,21 @@ Appendix 1. Step-by-step download procedure ./build/download/download.py --git ./build/data_lists/onap_git_repos.list ../resources/git-repo -**Step 4 - npm packages** +**Step 3 - npm packages** :: # Following step will download all npm packages ./build/download/download.py --npm ./build/data_lists/onap_npm.list ../resources/offline_data/npm_tar -**Step 5 - binaries** +**Step 4 - binaries** :: # Following step will download rke, kubectl and helm binaries ./build/download/download.py --http ./build/data_lists/infra_bin_utils.sh ../resources/downloads -**Step 6 - rpms** +**Step 5 - rpms** :: @@ -299,7 +239,7 @@ Appendix 1. Step-by-step download procedure createrepo ../resources/pkg/rhel -**Step 7 - pip packages** +**Step 6 - pip packages** :: diff --git a/docs/images/vFWCL-dublin.jpg b/docs/images/vFWCL-dublin.jpg Binary files differnew file mode 100644 index 00000000..a943a5d4 --- /dev/null +++ b/docs/images/vFWCL-dublin.jpg diff --git a/docs/vFWCL-notes.rst b/docs/vFWCL-notes.rst new file mode 100644 index 00000000..17a49399 --- /dev/null +++ b/docs/vFWCL-notes.rst @@ -0,0 +1,337 @@ +************************************* +vFWCL on Dublin ONAP offline platform +************************************* + +|image0| + +This document is collecting notes we have from running vFirewall demo on offline Dublin platform +installed by ONAP offline installer tool. + +Overall it was much easier in compare with earlier version, however following steps are still needed. + +Some of the most relevant materials are available on following links: + +* `oom_quickstart_guide.html <https://docs.onap.org/en/dublin/submodules/oom.git/docs/oom_quickstart_guide.html>`_ +* `docs_vfw.html <https://docs.onap.org/en/dublin/submodules/integration.git/docs/docs_vfw.html>`_ + + +.. contents:: Table of Contents + :depth: 2 + + + +Step 1. Preconditions - before ONAP deployment +============================================== + +Understanding of the underlying OpenStack deployment is required from anyone applying these instructions. + +In addition, installation-specific location of the helm charts on the infra node must be known. +In this document it is referred to as <helm_charts_dir> + +Snippets below are describing areas we need to configure for successfull vFWCL demo. + +Pay attention to them and configure it (ideally before deployment) accordingly. + +**1) <helm_charts_dir>/onap/values.yaml**:: + + + ################################################################# + # Global configuration overrides. + # !!! VIM specific entries are in APPC / Robot & SO parts !!! + ################################################################# + global: + # Change to an unused port prefix range to prevent port conflicts + # with other instances running within the same k8s cluster + nodePortPrefix: 302 + nodePortPrefixExt: 304 + + # ONAP Repository + # Uncomment the following to enable the use of a single docker + # repository but ONLY if your repository mirrors all ONAP + # docker images. This includes all images from dockerhub and + # any other repository that hosts images for ONAP components. + #repository: nexus3.onap.org:10001 + repositoryCred: + user: docker + password: docker + + # readiness check - temporary repo until images migrated to nexus3 + readinessRepository: oomk8s + # logging agent - temporary repo until images migrated to nexus3 + loggingRepository: docker.elastic.co + + # image pull policy + pullPolicy: Always + + # default mount path root directory referenced + # by persistent volumes and log files + persistence: + mountPath: /dockerdata-nfs + enableDefaultStorageclass: false + parameters: {} + storageclassProvisioner: kubernetes.io/no-provisioner + volumeReclaimPolicy: Retain + + # override default resource limit flavor for all charts + flavor: unlimited + + # flag to enable debugging - application support required + debugEnabled: false + + ################################################################# + # Enable/disable and configure helm charts (ie. applications) + # to customize the ONAP deployment. + ################################################################# + aaf: + enabled: true + aai: + enabled: true + appc: + enabled: true + config: + openStackType: "OpenStackProvider" + openStackName: "OpenStack" + openStackKeyStoneUrl: "http://10.20.30.40:5000/v2.0" + openStackServiceTenantName: "service" + openStackDomain: "default" + openStackUserName: "onap-tieto" + openStackEncryptedPassword: "31ECA9F2BA98EF34C9EC3412D071E31185F6D9522808867894FF566E6118983AD5E6F794B8034558" + cassandra: + enabled: true + clamp: + enabled: true + cli: + enabled: true + consul: + enabled: true + contrib: + enabled: true + dcaegen2: + enabled: true + pnda: + enabled: true + dmaap: + enabled: true + esr: + enabled: true + log: + enabled: true + sniro-emulator: + enabled: true + oof: + enabled: true + mariadb-galera: + enabled: true + msb: + enabled: true + multicloud: + enabled: true + nbi: + enabled: true + config: + # openstack configuration + openStackRegion: "Yolo" + openStackVNFTenantId: "1234" + nfs-provisioner: + enabled: true + policy: + enabled: true + pomba: + enabled: true + portal: + enabled: true + robot: + enabled: true + appcUsername: "appc@appc.onap.org" + appcPassword: "demo123456!" + openStackKeyStoneUrl: "http://10.20.30.40:5000" + openStackPublicNetId: "9403ceea-0738-4908-a826-316c8541e4bb" + openStackPublicNetworkName: "rc3-offline-network" + openStackTenantId: "b1ce7742d956463999923ceaed71786e" + openStackUserName: "onap-tieto" + ubuntu14Image: "trusty" + openStackPrivateNetId: "3c7aa2bd-ba14-40ce-8070-6a0d6a617175" + openStackPrivateSubnetId: "2bcb9938-9c94-4049-b580-550a44dc63b3" + openStackPrivateNetCidr: "10.0.0.0/16" + openStackSecurityGroup: "onap_sg" + openStackOamNetworkCidrPrefix: "10.0" + dcaeCollectorIp: "10.8.8.22" # this IP is taken from k8s host + vnfPubKey: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDPwF2bYm2QuqZpjuAcZDJTcFdUkKv4Hbd/3qqbxf6g5ZgfQarCi+mYnKe9G9Px3CgFLPdgkBBnMSYaAzMjdIYOEdPKFTMQ9lIF0+i5KsrXvszWraGKwHjAflECfpTAWkPq2UJUvwkV/g7NS5lJN3fKa9LaqlXdtdQyeSBZAUJ6QeCE5vFUplk3X6QFbMXOHbZh2ziqu8mMtP+cWjHNBB47zHQ3RmNl81Rjv+QemD5zpdbK/h6AahDncOY3cfN88/HPWrENiSSxLC020sgZNYgERqfw+1YhHrclhf3jrSwCpZikjl7rqKroua2LBI/yeWEta3amTVvUnR2Y7gM8kHyh Generated-by-Nova" + demoArtifactsVersion: "1.4.0" # Dublin prefered is 1.4.0 + demoArtifactsRepoUrl: "https://nexus.onap.org/content/repositories/releases" + scriptVersion: "1.4.0" # Dublin prefered is 1.4.0 + rancherIpAddress: "10.8.8.8" # this IP is taken from infra node + config: + # instructions how to generate this value properly are in OOM quick quide mentioned above + openStackEncryptedPasswordHere: "f7920677e15e2678b0f33736189e8965" + + sdc: + enabled: true + sdnc: + enabled: true + + replicaCount: 1 + + mysql: + replicaCount: 1 + so: + enabled: true + config: + openStackUserName: "onap-tieto" + openStackRegion: "RegionOne" + openStackKeyStoneUrl: "http://10.20.30.40:5000" + openStackServiceTenantName: "services" + # instructions how to generate this value properly are in OOM quick quide mentioned above + openStackEncryptedPasswordHere: "31ECA9F2BA98EF34C9EC3412D071E31185F6D9522808867894FF566E6118983AD5E6F794B8034558" + + replicaCount: 1 + + liveness: + # necessary to disable liveness probe when setting breakpoints + # in debugger so K8s doesn't restart unresponsive container + enabled: true + + so-catalog-db-adapter: + config: + openStackUserName: "onap-tieto" + openStackKeyStoneUrl: "http://10.20.30.40:5000/v2.0" + # instructions how to generate this value properly are in OOM quick quide mentioned above + openStackEncryptedPasswordHere: "31ECA9F2BA98EF34C9EC3412D071E31185F6D9522808867894FF566E6118983AD5E6F794B8034558" + + uui: + enabled: true + vfc: + enabled: true + vid: + enabled: true + vnfsdk: + enabled: true + modeling: + enabled: true + + +**2) <helm_charts_dir>/robot/resources/config/eteshare/config/vm_properties.py**:: + + # following patch is required because in Dublin public network is hardcoded + # reported in TEST-166 and is implemented in El-Alto + # just add following row into file + GLOBAL_INJECTED_OPENSTACK_PUBLIC_NETWORK = '{{ .Values.openStackPublicNetworkName }}' + + + +Step 2. Preconditions - after ONAP deployment +============================================= + + +Run HealthChecks after successful deployment, all of them must pass + +Relevant robot scripts are under <helm_charts_dir>/oom/kubernetes/robot + +:: + + [root@tomas-infra robot]# ./ete-k8s.sh onap health + + 61 critical tests, 61 passed, 0 failed + 61 tests total, 61 passed, 0 failed + +very useful page describing commands for `manual checking of HC’s <https://wiki.onap.org/display/DW/Robot+Healthcheck+Tests+on+ONAP+Components#RobotHealthcheckTestsonONAPComponents-ApplicationController(APPC)Healthcheck>`_ + +Step 3. Patch public network +============================ + +This is the last part of correction for `TEST-166 <https://jira.onap.org/browse/TEST-166>`_ needed for Dublin branch. + +:: + + [root@tomas-infra helm_charts]# kubectl get pods -n onap | grep robot + onap-robot-robot-5c7c46bbf4-4zgkn 1/1 Running 0 3h15m + [root@tomas-infra helm_charts]# kubectl exec -it onap-robot-robot-5c7c46bbf4-4zgkn bash + root@onap-robot-robot-5c7c46bbf4-4zgkn:/# cd /var/opt/ONAP/ + root@onap-robot-robot-5c7c46bbf4-4zgkn:/var/opt/ONAP# sed -i 's/network_name=public/network_name=${GLOBAL_INJECTED_OPENSTACK_PUBLIC_NETWORK}/g' robot/resources/demo_preload.robot + root@onap-robot-robot-5c7c46bbf4-4zgkn:/var/opt/ONAP# sed -i 's/network_name=public/network_name=${GLOBAL_INJECTED_OPENSTACK_PUBLIC_NETWORK}/g' robot/resources/stack_validation/policy_check_vfw.robot + root@onap-robot-robot-5c7c46bbf4-4zgkn:/var/opt/ONAP# sed -i 's/network_name=public/network_name=${GLOBAL_INJECTED_OPENSTACK_PUBLIC_NETWORK}/g' robot/resources/stack_validation/validate_vfw.robot + + +Step 4. Set private key for robot when accessing VNFs +===================================================== + +This is workaround for ticket `TEST-167 <https://jira.onap.org/browse/TEST-167>`_, as of now robot is using following file as private key +*/var/opt/ONAP/robot/assets/keys/onap_dev.pvt* + +One can either set it to own private key, corresponding with public key inserted into VMs from *vnfPubKey* param +OR +set mount own private key into robot container and change GLOBAL_VM_PRIVATE_KEY in */var/opt/ONAP/robot/resources/global_properties.robot* + + +Step 5. robot init - demo services distribution +================================================ + +Run following robot script to execute both init_customer + distribute + +:: + + # demo-k8s.sh <namespace> init + + [root@tomas-infra robot]# ./demo-k8s.sh onap init + + + +Step 6. robot instantiateVFW +============================ + +Following tag is used for whole vFWCL testcase. It will deploy single heat stack with 3 VMs and set policies and APPC mount point for vFWCL to happen. + +:: + + # demo-k8s.sh <namespace> instantiateVFW + + root@tomas-infra robot]# ./demo-k8s.sh onap instantiateVFW + +Step 7. fix CloseLoopName in tca microservice +============================================= + +In Dublin scope, tca microservice is configured with hardcoded entries from `tcaSpec.json <https://gerrit.onap.org/r/gitweb?p=dcaegen2/analytics/tca.git;a=blob;f=dpo/tcaSpec.json;h=8e69c068ea47300707b8131fbc8d71e9a47af8a2;hb=HEAD#l278>`_ + +After updating operational policy within instantiateVFW robot tag execution, one must change CloseLoopName in tca to match with generated +value in policy. This is done in two parts: + +a) get correct value + +:: + + # from drools container, i.e. drools in Dublin is not mapped to k8s host + curl -k --silent --user 'demo@people.osaaf.org:demo123456!' -X GET https://localhost:9696/policy/pdp/engine/controllers/usecases/drools/facts/usecases/controlloops --insecure + + + # alternatively same value can be obtained from telemetry console in drools container + telemetry + https://localhost:9696/policy/pdp/engine> cd controllers/usecases/drools/facts/usecases/controlloops + https://localhost:9696/policy/pdp/engine/controllers/usecases/drools/facts/usecases/controlloops> get + HTTP/1.1 200 OK + Content-Length: 62 + Content-Type: application/json + Date: Tue, 25 Jun 2019 07:18:56 GMT + Server: Jetty(9.4.14.v20181114) + [ + "ControlLoop-vFirewall-da1fd2be-2a26-4704-ab99-cd80fe1cf89c" + ] + +b) update the tca microservice + +see Preconditions part in `docs_vfw.html <https://docs.onap.org/en/dublin/submodules/integration.git/docs/docs_vfw.html>`_ +This step will be automated in El-Alto, it's tracked in `TEST-168 <https://jira.onap.org/browse/TEST-168>`_ + +Step 8. verify vFW +================== + +Verify VFWCL. This step is just to verify CL functionality, which can be also verified by checking DarkStat GUI on vSINK VM <sink_ip:667> + +:: + + # demo-k8s.sh <namespace> vfwclosedloop <pgn-ip-address> + # e.g. where 10.8.8.5 is IP from public network dedicated to vPKG VM + root@tomas-infra robot]# ./demo-k8s.sh onap vfwclosedloop 10.8.8.5 + +.. |image0| image:: images/vFWCL-dublin.jpg + :width: 387px + :height: 393px |