summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rwxr-xr-xbuild/build_nexus_blob.sh41
-rwxr-xr-xbuild/common-functions.sh98
-rw-r--r--build/package.py259
-rw-r--r--build/requirements.txt2
-rw-r--r--docs/images/vFWCL-dublin.jpgbin0 -> 130332 bytes
-rw-r--r--docs/vFWCL-notes.rst337
6 files changed, 623 insertions, 114 deletions
diff --git a/build/build_nexus_blob.sh b/build/build_nexus_blob.sh
index f3edb482..5f4ed0ff 100755
--- a/build/build_nexus_blob.sh
+++ b/build/build_nexus_blob.sh
@@ -53,31 +53,41 @@ NEXUS_EMAIL=admin@example.org
LOCAL_PATH="$(readlink -f $(dirname ${0}))"
#Defaults
+DOCKER_LOAD="false"
DATA_DIR="$(realpath ${LOCAL_PATH}/../../resources)"
NEXUS_DATA_DIR="${DATA_DIR}/nexus_data"
LISTS_DIR="${LOCAL_PATH}/data_lists"
usage () {
echo " Example usage: build_nexus_blob.sh --input-directory </path/to/downloaded/files/dir> --output-directory
- </path/to/output/dir> --resource-list-directory </path/to/dir/with/resource/list>
+ </path/to/output/dir> --resource-list-directory </path/to/dir/with/resource/list> [--load-docker-images]
- -i | --input-directory directory containing file needed to create nexus blob. The structure of this directory must organized as described in build guide
- -o | --output-directory
- -rl | --resource-list-directory directory with files containing docker, pypi and npm lists
+ -i | --input-directory directory containing file needed to create nexus blob. The structure of this directory must organized as described in build guide
+ -ld | --load-docker-images load docker images from stored files in the input directory
+ -o | --output-directory
+ -rl | --resource-list-directory directory with files containing docker, pypi and npm lists
"
exit 1
}
-while [ "$1" != "" ]; do
- case $1 in
+load_docker_images () {
+ for ARCHIVE in $(sed $'s/\r// ; /^#/d ; s/\:/\_/g ; s/\//\_/g ; s/$/\.tar/g' ${1} | awk '{ print $1 }'); do
+ docker load -i ${NXS_SRC_DOCKER_IMG_DIR}/${ARCHIVE}
+ done
+}
+
+while [ "${1}" != "" ]; do
+ case ${1} in
-i | --input-directory ) shift
- DATA_DIR=$1
+ DATA_DIR="${1}"
+ ;;
+ -ld | --load-docker-images ) DOCKER_LOAD="true"
;;
-o | --output-directory ) shift
- NEXUS_DATA_DIR=$1
+ NEXUS_DATA_DIR="${1}"
;;
-rl | --resource-list-directory ) shift
- LISTS_DIR=$1
+ LISTS_DIR="${1}"
;;
-h | --help ) usage
;;
@@ -179,13 +189,12 @@ fi
# Docker repository preparation #
#################################
-# Load predefined Nexus image
-docker load -i ${NEXUS_IMAGE_TAR}
-
-# Load all necessary images
-for ARCHIVE in $(sed $'s/\r// ; /^#/d ; s/\:/\_/g ; s/\//\_/g ; s/$/\.tar/g' ${NXS_DOCKER_IMG_LIST} | awk '{ print $1 }'); do
- docker load -i ${NXS_SRC_DOCKER_IMG_DIR}/${ARCHIVE}
-done
+if [ "${DOCKER_LOAD}" == "true" ]; then
+ # Load predefined Nexus image
+ docker load -i ${NEXUS_IMAGE_TAR}
+ # Load all necessary images
+ load_docker_images ${NXS_DOCKER_IMG_LIST}
+fi
################################
# Nexus repository preparation #
diff --git a/build/common-functions.sh b/build/common-functions.sh
deleted file mode 100755
index 04ea2017..00000000
--- a/build/common-functions.sh
+++ /dev/null
@@ -1,98 +0,0 @@
-# COPYRIGHT NOTICE STARTS HERE
-#
-# Copyright 2018 © Samsung Electronics Co., Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-# COPYRIGHT NOTICE ENDS HERE
-
-#
-# this file contains shared variables and functions for the onap installer
-#
-
-# any script which needs this file can check this variable
-# and it will know immediately if the functions and variables
-# are loaded and usable
-IS_COMMON_FUNCTIONS_SOURCED=YES
-
-PATH="${PATH}:/usr/local/bin:/usr/local/sbin"
-export PATH
-
-# just self-defense against locale
-LANG=C
-export LANG
-
-# default credentials to the repository
-NEXUS_USERNAME=admin
-NEXUS_PASSWORD=admin123
-NEXUS_EMAIL=admin@onap.org
-
-# this function is intended to unify the installer output
-message() {
- case "$1" in
- info)
- echo 'INFO:' "$@"
- ;;
- debug)
- echo 'DEBUG:' "$@" >&2
- ;;
- warning)
- echo 'WARNING [!]:' "$@" >&2
- ;;
- error)
- echo 'ERROR [!!]:' "$@" >&2
- return 1
- ;;
- *)
- echo 'UNKNOWN [?!]:' "$@" >&2
- return 2
- ;;
- esac
- return 0
-}
-export message
-
-# if the environment variable DEBUG is set to DEBUG-ONAP ->
-# -> this function will print its arguments
-# otherwise nothing is done
-debug() {
- [ "$DEBUG" = DEBUG-ONAP ] && message debug "$@"
-}
-export debug
-
-fail() {
- message error "$@"
- exit 1
-}
-
-retry() {
- local n=1
- local max=5
- while ! "$@"; do
- if [ $n -lt $max ]; then
- n=$((n + 1))
- message warning "Command ${@} failed. Attempt: $n/$max"
- message info "waiting 10s for another try..."
- sleep 10s
- else
- fail "Command ${@} failed after $n attempts. Better to abort now."
- fi
- done
-}
-
-clean_list() {
- sed -e 's/\s*#.*$//' \
- -e '/^\s*$/d' ${1} |
- tr -d '\r' |
- awk '{ print $1 }'
-}
diff --git a/build/package.py b/build/package.py
new file mode 100644
index 00000000..8a1808b3
--- /dev/null
+++ b/build/package.py
@@ -0,0 +1,259 @@
+#! /usr/bin/env python
+# -*- coding: utf-8 -*-
+
+# COPYRIGHT NOTICE STARTS HERE
+
+# Copyright 2019 . Samsung Electronics Co., Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# COPYRIGHT NOTICE ENDS HERE
+
+from datetime import datetime
+import subprocess
+import argparse
+import logging
+import shutil
+import glob
+import json
+import sys
+import os
+
+import tarfile
+import git
+
+log = logging.getLogger(__name__)
+script_location = os.path.dirname(os.path.realpath(__file__))
+
+
+def prepare_application_repository(directory, url, refspec, patch_path):
+ """
+ Downloads git repository according to refspec, applies patch if provided
+ :param directory: path to repository
+ :param url: url to repository
+ :param refspec: refspec to fetch
+ :param patch_path: path git patch to be applied over repository
+ :return: repository - git repository object
+ """
+
+ try:
+ shutil.rmtree(directory)
+ except FileNotFoundError:
+ pass
+
+ log.info('Cloning {} with refspec {} '.format(url, refspec))
+ repository = git.Repo.init(directory)
+ origin = repository.create_remote('origin', url)
+ origin.pull(refspec)
+ repository.git.submodule('update', '--init')
+
+ if patch_path:
+ log.info('Applying {} over {} {}'.format(patch_path,
+ url,
+ refspec))
+ repository.git.apply(patch_path)
+ else:
+ log.info('No patch file provided, skipping patching')
+
+ return repository
+
+
+def create_package_info_file(output_file, repository_list):
+ """
+ Generates text file in json format containing basic information about the build
+ :param output_file:
+ :param repository_list: list of repositories to be included in package info
+ :return:
+ """
+ log.info('Generating package.info file')
+ build_info = {
+ 'Build_info': {
+ 'build_date': datetime.now().strftime('%Y-%m-%d_%H-%M')
+ }
+ }
+ for repository in repository_list:
+ build_info['Build_info'][
+ repository.config_reader().get_value('remote "origin"', 'url')] = repository.head.commit.hexsha
+
+ with open(output_file, 'w') as outfile:
+ json.dump(build_info, outfile, indent=4)
+
+
+def create_package(tar_content, file_name):
+ """
+ Creates packages
+ :param tar_content: list of dictionaries defining src file and destination tar file
+ :param file_name: output file
+ """
+ log.info('Creating package {}'.format(file_name))
+ with tarfile.open(file_name, 'w') as output_tar_file:
+ for src, dst in tar_content.items():
+ output_tar_file.add(src, dst)
+
+
+def build_offline_deliverables(application_repository_url,
+ application_repository_reference,
+ application_patch_file,
+ output_dir,
+ resources_directory,
+ skip_sw,
+ skip_resources,
+ skip_aux,
+ overwrite):
+ """
+ Prepares offline deliverables
+ :param application_repository_url: git repository hosting application helm charts
+ :param application_repository_reference: git refspec for repository hosting application helm charts
+ :param application_patch_file: git patch file to be applied over application repository
+ :param output_dir: Destination directory for saving packages
+ :param resources_directory: Path to resource directory
+ :param skip_sw: skip sw package generation
+ :param skip_resources: skip resources package generation
+ :param skip_aux: skip aux package generation
+ :param overwrite: overwrite files in output directory
+ :return:
+ """
+
+ if os.path.exists(output_dir) and os.listdir(output_dir):
+ if not overwrite:
+ log.error('Output directory is not empty, use overwrite to force build')
+ raise FileExistsError
+
+ # Git
+ offline_repository_dir = os.path.join(script_location, '..')
+ offline_repository = git.Repo(offline_repository_dir)
+
+ application_dir = os.path.join(output_dir, 'application_repository')
+ application_repository = prepare_application_repository(application_dir,
+ application_repository_url,
+ application_repository_reference,
+ application_patch_file)
+
+ # Package info
+ info_file = os.path.join(output_dir, 'package.info')
+ create_package_info_file(info_file, [application_repository, offline_repository])
+
+ # packages layout as dictionaries. <file> : <file location under tar archive>
+ sw_content = {
+ os.path.join(offline_repository_dir, 'ansible'): 'ansible',
+ os.path.join(offline_repository_dir, 'config',
+ 'application_configuration.yml'): 'ansible/application/application_configuration.yml',
+ os.path.join(offline_repository_dir, 'patches', 'onap-patch-role'): 'ansible/application/onap-patch-role',
+ os.path.join(application_dir, 'kubernetes'): 'ansible/application/helm_charts',
+ info_file: 'packge.info'
+ }
+ resources_content = {
+ resources_directory: '',
+ info_file: 'packge.info'
+ }
+ aux_content = {
+ info_file: 'packge.info'
+ }
+
+ if not skip_sw:
+ log.info('Building offline installer')
+ os.chdir(os.path.join(offline_repository_dir, 'ansible', 'docker'))
+ installer_build = subprocess.run(
+ os.path.join(offline_repository_dir, 'ansible', 'docker', 'build_ansible_image.sh'))
+ installer_build.check_returncode()
+ os.chdir(script_location)
+ sw_package_tar_path = os.path.join(output_dir, 'sw_package.tar')
+ create_package(sw_content, sw_package_tar_path)
+
+ if not skip_resources:
+ log.info('Building own dns image')
+ dns_build = subprocess.run([
+ os.path.join(offline_repository_dir, 'build', 'creating_data', 'create_nginx_image', '01create-image.sh'),
+ os.path.join(resources_directory, 'offline_data', 'docker_images_infra')])
+ dns_build.check_returncode()
+
+ # Workaround for downloading without "flat" option
+ log.info('Binaries - workaround')
+ download_dir_path = os.path.join(resources_directory, 'downloads')
+ os.chdir(download_dir_path)
+ for file in os.listdir():
+ if os.path.islink(file):
+ os.unlink(file)
+
+ rke_files = glob.glob(os.path.join('.', '**/rke_linux-amd64'), recursive=True)
+ os.symlink(rke_files[0], os.path.join(download_dir_path, rke_files[0].split('/')[-1]))
+
+ helm_tar_files = glob.glob(os.path.join('.', '**/helm-*-linux-amd64.tar.gz'), recursive=True)
+ os.symlink(helm_tar_files[0], os.path.join(download_dir_path, helm_tar_files[0].split('/')[-1]))
+
+ kubectl_files = glob.glob(os.path.join('.', '**/kubectl'), recursive=True)
+ os.symlink(kubectl_files[0], os.path.join(download_dir_path, kubectl_files[0].split('/')[-1]))
+
+ os.chdir(script_location)
+ # End of workaround
+
+ log.info('Create rhel repo')
+ createrepo = subprocess.run(['createrepo', os.path.join(resources_directory, 'pkg', 'rhel')])
+ createrepo.check_returncode()
+
+ resources_package_tar_path = os.path.join(output_dir, 'resources_package.tar')
+ create_package(resources_content, resources_package_tar_path)
+
+ if not skip_aux:
+ aux_package_tar_path = os.path.join(output_dir, 'aux_package.tar')
+ create_package(aux_content, aux_package_tar_path)
+
+ shutil.rmtree(application_dir)
+
+
+def run_cli():
+ """
+ Run as cli tool
+ """
+ parser = argparse.ArgumentParser(description='Create Package For Offline Installer')
+ parser.add_argument('application_repository_url', metavar='application-repository-url',
+ help='git repository hosting application helm charts')
+ parser.add_argument('--application-repository_reference', default='master',
+ help='git refspec for repository hosting application helm charts')
+ parser.add_argument('--application-patch_file',
+ help='git patch file to be applied over application repository', default='')
+ parser.add_argument('--output-dir', '-o', default=os.path.join(script_location, '..', '..'),
+ help='Destination directory for saving packages')
+ parser.add_argument('--resources-directory',
+ help='Path to resource directory')
+ parser.add_argument('--skip-sw', action='store_true', default=False,
+ help='Set to skip sw package generation')
+ parser.add_argument('--skip-resources', action='store_true', default=False,
+ help='Set to skip resources package generation')
+ parser.add_argument('--skip-aux', action='store_true', default=False,
+ help='Set to skip aux package generation')
+ parser.add_argument('--overwrite', action='store_true', default=False,
+ help='overwrite files in output directory')
+ parser.add_argument('--debug', action='store_true', default=False,
+ help='Turn on debug output')
+ args = parser.parse_args()
+
+ if args.debug:
+ logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
+ else:
+ logging.basicConfig(stream=sys.stdout, level=logging.INFO, format='%(message)s')
+
+ build_offline_deliverables(args.application_repository_url,
+ args.application_repository_reference,
+ args.application_patch_file,
+ args.output_dir,
+ args.resources_directory,
+ args.skip_sw,
+ args.skip_resources,
+ args.skip_aux,
+ args.overwrite)
+
+
+if __name__ == '__main__':
+ run_cli()
+
diff --git a/build/requirements.txt b/build/requirements.txt
new file mode 100644
index 00000000..2c404aed
--- /dev/null
+++ b/build/requirements.txt
@@ -0,0 +1,2 @@
+docker==3.7.2
+gitpython==2.1.11
diff --git a/docs/images/vFWCL-dublin.jpg b/docs/images/vFWCL-dublin.jpg
new file mode 100644
index 00000000..a943a5d4
--- /dev/null
+++ b/docs/images/vFWCL-dublin.jpg
Binary files differ
diff --git a/docs/vFWCL-notes.rst b/docs/vFWCL-notes.rst
new file mode 100644
index 00000000..17a49399
--- /dev/null
+++ b/docs/vFWCL-notes.rst
@@ -0,0 +1,337 @@
+*************************************
+vFWCL on Dublin ONAP offline platform
+*************************************
+
+|image0|
+
+This document is collecting notes we have from running vFirewall demo on offline Dublin platform
+installed by ONAP offline installer tool.
+
+Overall it was much easier in compare with earlier version, however following steps are still needed.
+
+Some of the most relevant materials are available on following links:
+
+* `oom_quickstart_guide.html <https://docs.onap.org/en/dublin/submodules/oom.git/docs/oom_quickstart_guide.html>`_
+* `docs_vfw.html <https://docs.onap.org/en/dublin/submodules/integration.git/docs/docs_vfw.html>`_
+
+
+.. contents:: Table of Contents
+ :depth: 2
+
+
+
+Step 1. Preconditions - before ONAP deployment
+==============================================
+
+Understanding of the underlying OpenStack deployment is required from anyone applying these instructions.
+
+In addition, installation-specific location of the helm charts on the infra node must be known.
+In this document it is referred to as <helm_charts_dir>
+
+Snippets below are describing areas we need to configure for successfull vFWCL demo.
+
+Pay attention to them and configure it (ideally before deployment) accordingly.
+
+**1) <helm_charts_dir>/onap/values.yaml**::
+
+
+ #################################################################
+ # Global configuration overrides.
+ # !!! VIM specific entries are in APPC / Robot & SO parts !!!
+ #################################################################
+ global:
+ # Change to an unused port prefix range to prevent port conflicts
+ # with other instances running within the same k8s cluster
+ nodePortPrefix: 302
+ nodePortPrefixExt: 304
+
+ # ONAP Repository
+ # Uncomment the following to enable the use of a single docker
+ # repository but ONLY if your repository mirrors all ONAP
+ # docker images. This includes all images from dockerhub and
+ # any other repository that hosts images for ONAP components.
+ #repository: nexus3.onap.org:10001
+ repositoryCred:
+ user: docker
+ password: docker
+
+ # readiness check - temporary repo until images migrated to nexus3
+ readinessRepository: oomk8s
+ # logging agent - temporary repo until images migrated to nexus3
+ loggingRepository: docker.elastic.co
+
+ # image pull policy
+ pullPolicy: Always
+
+ # default mount path root directory referenced
+ # by persistent volumes and log files
+ persistence:
+ mountPath: /dockerdata-nfs
+ enableDefaultStorageclass: false
+ parameters: {}
+ storageclassProvisioner: kubernetes.io/no-provisioner
+ volumeReclaimPolicy: Retain
+
+ # override default resource limit flavor for all charts
+ flavor: unlimited
+
+ # flag to enable debugging - application support required
+ debugEnabled: false
+
+ #################################################################
+ # Enable/disable and configure helm charts (ie. applications)
+ # to customize the ONAP deployment.
+ #################################################################
+ aaf:
+ enabled: true
+ aai:
+ enabled: true
+ appc:
+ enabled: true
+ config:
+ openStackType: "OpenStackProvider"
+ openStackName: "OpenStack"
+ openStackKeyStoneUrl: "http://10.20.30.40:5000/v2.0"
+ openStackServiceTenantName: "service"
+ openStackDomain: "default"
+ openStackUserName: "onap-tieto"
+ openStackEncryptedPassword: "31ECA9F2BA98EF34C9EC3412D071E31185F6D9522808867894FF566E6118983AD5E6F794B8034558"
+ cassandra:
+ enabled: true
+ clamp:
+ enabled: true
+ cli:
+ enabled: true
+ consul:
+ enabled: true
+ contrib:
+ enabled: true
+ dcaegen2:
+ enabled: true
+ pnda:
+ enabled: true
+ dmaap:
+ enabled: true
+ esr:
+ enabled: true
+ log:
+ enabled: true
+ sniro-emulator:
+ enabled: true
+ oof:
+ enabled: true
+ mariadb-galera:
+ enabled: true
+ msb:
+ enabled: true
+ multicloud:
+ enabled: true
+ nbi:
+ enabled: true
+ config:
+ # openstack configuration
+ openStackRegion: "Yolo"
+ openStackVNFTenantId: "1234"
+ nfs-provisioner:
+ enabled: true
+ policy:
+ enabled: true
+ pomba:
+ enabled: true
+ portal:
+ enabled: true
+ robot:
+ enabled: true
+ appcUsername: "appc@appc.onap.org"
+ appcPassword: "demo123456!"
+ openStackKeyStoneUrl: "http://10.20.30.40:5000"
+ openStackPublicNetId: "9403ceea-0738-4908-a826-316c8541e4bb"
+ openStackPublicNetworkName: "rc3-offline-network"
+ openStackTenantId: "b1ce7742d956463999923ceaed71786e"
+ openStackUserName: "onap-tieto"
+ ubuntu14Image: "trusty"
+ openStackPrivateNetId: "3c7aa2bd-ba14-40ce-8070-6a0d6a617175"
+ openStackPrivateSubnetId: "2bcb9938-9c94-4049-b580-550a44dc63b3"
+ openStackPrivateNetCidr: "10.0.0.0/16"
+ openStackSecurityGroup: "onap_sg"
+ openStackOamNetworkCidrPrefix: "10.0"
+ dcaeCollectorIp: "10.8.8.22" # this IP is taken from k8s host
+ vnfPubKey: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDPwF2bYm2QuqZpjuAcZDJTcFdUkKv4Hbd/3qqbxf6g5ZgfQarCi+mYnKe9G9Px3CgFLPdgkBBnMSYaAzMjdIYOEdPKFTMQ9lIF0+i5KsrXvszWraGKwHjAflECfpTAWkPq2UJUvwkV/g7NS5lJN3fKa9LaqlXdtdQyeSBZAUJ6QeCE5vFUplk3X6QFbMXOHbZh2ziqu8mMtP+cWjHNBB47zHQ3RmNl81Rjv+QemD5zpdbK/h6AahDncOY3cfN88/HPWrENiSSxLC020sgZNYgERqfw+1YhHrclhf3jrSwCpZikjl7rqKroua2LBI/yeWEta3amTVvUnR2Y7gM8kHyh Generated-by-Nova"
+ demoArtifactsVersion: "1.4.0" # Dublin prefered is 1.4.0
+ demoArtifactsRepoUrl: "https://nexus.onap.org/content/repositories/releases"
+ scriptVersion: "1.4.0" # Dublin prefered is 1.4.0
+ rancherIpAddress: "10.8.8.8" # this IP is taken from infra node
+ config:
+ # instructions how to generate this value properly are in OOM quick quide mentioned above
+ openStackEncryptedPasswordHere: "f7920677e15e2678b0f33736189e8965"
+
+ sdc:
+ enabled: true
+ sdnc:
+ enabled: true
+
+ replicaCount: 1
+
+ mysql:
+ replicaCount: 1
+ so:
+ enabled: true
+ config:
+ openStackUserName: "onap-tieto"
+ openStackRegion: "RegionOne"
+ openStackKeyStoneUrl: "http://10.20.30.40:5000"
+ openStackServiceTenantName: "services"
+ # instructions how to generate this value properly are in OOM quick quide mentioned above
+ openStackEncryptedPasswordHere: "31ECA9F2BA98EF34C9EC3412D071E31185F6D9522808867894FF566E6118983AD5E6F794B8034558"
+
+ replicaCount: 1
+
+ liveness:
+ # necessary to disable liveness probe when setting breakpoints
+ # in debugger so K8s doesn't restart unresponsive container
+ enabled: true
+
+ so-catalog-db-adapter:
+ config:
+ openStackUserName: "onap-tieto"
+ openStackKeyStoneUrl: "http://10.20.30.40:5000/v2.0"
+ # instructions how to generate this value properly are in OOM quick quide mentioned above
+ openStackEncryptedPasswordHere: "31ECA9F2BA98EF34C9EC3412D071E31185F6D9522808867894FF566E6118983AD5E6F794B8034558"
+
+ uui:
+ enabled: true
+ vfc:
+ enabled: true
+ vid:
+ enabled: true
+ vnfsdk:
+ enabled: true
+ modeling:
+ enabled: true
+
+
+**2) <helm_charts_dir>/robot/resources/config/eteshare/config/vm_properties.py**::
+
+ # following patch is required because in Dublin public network is hardcoded
+ # reported in TEST-166 and is implemented in El-Alto
+ # just add following row into file
+ GLOBAL_INJECTED_OPENSTACK_PUBLIC_NETWORK = '{{ .Values.openStackPublicNetworkName }}'
+
+
+
+Step 2. Preconditions - after ONAP deployment
+=============================================
+
+
+Run HealthChecks after successful deployment, all of them must pass
+
+Relevant robot scripts are under <helm_charts_dir>/oom/kubernetes/robot
+
+::
+
+ [root@tomas-infra robot]# ./ete-k8s.sh onap health
+
+ 61 critical tests, 61 passed, 0 failed
+ 61 tests total, 61 passed, 0 failed
+
+very useful page describing commands for `manual checking of HC’s <https://wiki.onap.org/display/DW/Robot+Healthcheck+Tests+on+ONAP+Components#RobotHealthcheckTestsonONAPComponents-ApplicationController(APPC)Healthcheck>`_
+
+Step 3. Patch public network
+============================
+
+This is the last part of correction for `TEST-166 <https://jira.onap.org/browse/TEST-166>`_ needed for Dublin branch.
+
+::
+
+ [root@tomas-infra helm_charts]# kubectl get pods -n onap | grep robot
+ onap-robot-robot-5c7c46bbf4-4zgkn 1/1 Running 0 3h15m
+ [root@tomas-infra helm_charts]# kubectl exec -it onap-robot-robot-5c7c46bbf4-4zgkn bash
+ root@onap-robot-robot-5c7c46bbf4-4zgkn:/# cd /var/opt/ONAP/
+ root@onap-robot-robot-5c7c46bbf4-4zgkn:/var/opt/ONAP# sed -i 's/network_name=public/network_name=${GLOBAL_INJECTED_OPENSTACK_PUBLIC_NETWORK}/g' robot/resources/demo_preload.robot
+ root@onap-robot-robot-5c7c46bbf4-4zgkn:/var/opt/ONAP# sed -i 's/network_name=public/network_name=${GLOBAL_INJECTED_OPENSTACK_PUBLIC_NETWORK}/g' robot/resources/stack_validation/policy_check_vfw.robot
+ root@onap-robot-robot-5c7c46bbf4-4zgkn:/var/opt/ONAP# sed -i 's/network_name=public/network_name=${GLOBAL_INJECTED_OPENSTACK_PUBLIC_NETWORK}/g' robot/resources/stack_validation/validate_vfw.robot
+
+
+Step 4. Set private key for robot when accessing VNFs
+=====================================================
+
+This is workaround for ticket `TEST-167 <https://jira.onap.org/browse/TEST-167>`_, as of now robot is using following file as private key
+*/var/opt/ONAP/robot/assets/keys/onap_dev.pvt*
+
+One can either set it to own private key, corresponding with public key inserted into VMs from *vnfPubKey* param
+OR
+set mount own private key into robot container and change GLOBAL_VM_PRIVATE_KEY in */var/opt/ONAP/robot/resources/global_properties.robot*
+
+
+Step 5. robot init - demo services distribution
+================================================
+
+Run following robot script to execute both init_customer + distribute
+
+::
+
+ #  demo-k8s.sh <namespace> init
+
+ [root@tomas-infra robot]# ./demo-k8s.sh onap init
+
+
+
+Step 6. robot instantiateVFW
+============================
+
+Following tag is used for whole vFWCL testcase. It will deploy single heat stack with 3 VMs and set policies and APPC mount point for vFWCL to happen.
+
+::
+
+ # demo-k8s.sh <namespace> instantiateVFW
+
+ root@tomas-infra robot]# ./demo-k8s.sh onap instantiateVFW
+
+Step 7. fix CloseLoopName in tca microservice
+=============================================
+
+In Dublin scope, tca microservice is configured with hardcoded entries from `tcaSpec.json <https://gerrit.onap.org/r/gitweb?p=dcaegen2/analytics/tca.git;a=blob;f=dpo/tcaSpec.json;h=8e69c068ea47300707b8131fbc8d71e9a47af8a2;hb=HEAD#l278>`_
+
+After updating operational policy within instantiateVFW robot tag execution, one must change CloseLoopName in tca to match with generated
+value in policy. This is done in two parts:
+
+a) get correct value
+
+::
+
+ # from drools container, i.e. drools in Dublin is not mapped to k8s host
+ curl -k --silent --user 'demo@people.osaaf.org:demo123456!' -X GET https://localhost:9696/policy/pdp/engine/controllers/usecases/drools/facts/usecases/controlloops --insecure
+
+
+ # alternatively same value can be obtained from telemetry console in drools container
+ telemetry
+ https://localhost:9696/policy/pdp/engine> cd controllers/usecases/drools/facts/usecases/controlloops
+ https://localhost:9696/policy/pdp/engine/controllers/usecases/drools/facts/usecases/controlloops> get
+ HTTP/1.1 200 OK
+ Content-Length: 62
+ Content-Type: application/json
+ Date: Tue, 25 Jun 2019 07:18:56 GMT
+ Server: Jetty(9.4.14.v20181114)
+ [
+ "ControlLoop-vFirewall-da1fd2be-2a26-4704-ab99-cd80fe1cf89c"
+ ]
+
+b) update the tca microservice
+
+see Preconditions part in `docs_vfw.html <https://docs.onap.org/en/dublin/submodules/integration.git/docs/docs_vfw.html>`_
+This step will be automated in El-Alto, it's tracked in `TEST-168 <https://jira.onap.org/browse/TEST-168>`_
+
+Step 8. verify vFW
+==================
+
+Verify VFWCL. This step is just to verify CL functionality, which can be also verified by checking DarkStat GUI on vSINK VM <sink_ip:667>
+
+::
+
+ # demo-k8s.sh <namespace> vfwclosedloop <pgn-ip-address>
+ # e.g. where 10.8.8.5 is IP from public network dedicated to vPKG VM
+ root@tomas-infra robot]# ./demo-k8s.sh onap vfwclosedloop 10.8.8.5
+
+.. |image0| image:: images/vFWCL-dublin.jpg
+ :width: 387px
+ :height: 393px