summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--ansible/roles/application/tasks/transfer-helm-charts.yml9
-rw-r--r--ansible/roles/nginx/defaults/main.yml2
-rw-r--r--ansible/roles/package-repository/defaults/main.yml2
-rw-r--r--ansible/roles/package-repository/molecule/default/tests/test_infrastructure-server.py2
-rw-r--r--ansible/roles/package-repository/molecule/default/tests/test_kubernetes-node-1.py2
-rw-r--r--ansible/test/roles/prepare-package-repository/tasks/main.yml10
-rw-r--r--ansible/test/roles/prepare-package-repository/vars/main.yml2
-rwxr-xr-xbuild/build_nexus_blob.sh15
-rwxr-xr-xbuild/create_repo.sh6
-rwxr-xr-xbuild/creating_data/docker-images-collector.sh4
-rw-r--r--build/data_lists/onap_docker_images.list263
-rwxr-xr-xbuild/docker-entrypoint.sh4
-rwxr-xr-xbuild/download/docker_downloader.py2
-rwxr-xr-xbuild/package.py27
-rw-r--r--docs/BuildGuide.rst21
-rw-r--r--docs/InstallGuide.rst25
-rwxr-xr-xtools/helm-healer.sh237
-rwxr-xr-xtools/nexus_image_add.sh103
18 files changed, 475 insertions, 261 deletions
diff --git a/ansible/roles/application/tasks/transfer-helm-charts.yml b/ansible/roles/application/tasks/transfer-helm-charts.yml
index 5e4240b6..56c95cc4 100644
--- a/ansible/roles/application/tasks/transfer-helm-charts.yml
+++ b/ansible/roles/application/tasks/transfer-helm-charts.yml
@@ -3,9 +3,12 @@
- name: Distribute helm charts to infra node
block:
- name: Archive helm charts
- archive:
- path: "{{ app_helm_charts_install_directory }}/*"
- dest: "{{ app_helm_charts_install_directory }}.tgz"
+ command: tar -cvzf {{ app_helm_charts_install_directory }}.tgz -C {{ app_helm_charts_install_directory }} .
+ args:
+ warn: false
+ tags:
+ - skip_ansible_lint # Prevent '[303] tar used in place of unarchive module'
+ changed_when: false # for idempotency
delegate_to: localhost
- name: Create helm charts dir on infra
file:
diff --git a/ansible/roles/nginx/defaults/main.yml b/ansible/roles/nginx/defaults/main.yml
index 8bf44197..c2f1e05c 100644
--- a/ansible/roles/nginx/defaults/main.yml
+++ b/ansible/roles/nginx/defaults/main.yml
@@ -16,7 +16,7 @@ nginx:
- "{{ app_data_path }}/certs:/etc/nginx/certs:ro"
- "{{ app_data_path }}/git-repo:/srv/git:rw"
- "{{ app_data_path }}/http:/srv/http:rw"
- - "{{ app_data_path }}/pkg/rhel:/srv/http/repo.infra-server/rhel:rw"
+ - "{{ app_data_path }}/pkg/rpm:/srv/http/repo.infra-server/rpm:rw"
- "{{ app_data_path }}/pkg/ubuntu/xenial:/srv/http/repo.infra-server/ubuntu/xenial:rw"
- /var/log/nginx:/var/log/nginx:rw
# Default rule for tarball naming translation
diff --git a/ansible/roles/package-repository/defaults/main.yml b/ansible/roles/package-repository/defaults/main.yml
index 678b6bd4..0557eda3 100644
--- a/ansible/roles/package-repository/defaults/main.yml
+++ b/ansible/roles/package-repository/defaults/main.yml
@@ -3,6 +3,6 @@ package_repositories:
- name: "{{ app_name }}"
file: "{{ app_name | lower }}"
description: "{{ app_name | upper }} offline repository"
- baseurl: "{{ 'http://repo.infra-server/rhel' if 'infrastructure' not in group_names else 'file://' + app_data_path + '/pkg/rhel' }}"
+ baseurl: "{{ 'http://repo.infra-server/rpm' if 'infrastructure' not in group_names else 'file://' + app_data_path + '/pkg/rpm' }}"
gpgcheck: false
enabled: true
diff --git a/ansible/roles/package-repository/molecule/default/tests/test_infrastructure-server.py b/ansible/roles/package-repository/molecule/default/tests/test_infrastructure-server.py
index 63588e26..9fb1a4f9 100644
--- a/ansible/roles/package-repository/molecule/default/tests/test_infrastructure-server.py
+++ b/ansible/roles/package-repository/molecule/default/tests/test_infrastructure-server.py
@@ -9,7 +9,7 @@ testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
def test_onap_repo(host):
fc = host.file('/etc/yum.repos.d/moleculetestapp.repo').content_string
expected_content = """[moleculetestapp]
-baseurl = file:///opt/moleculetestapp/pkg/rhel
+baseurl = file:///opt/moleculetestapp/pkg/rpm
enabled = 1
gpgcheck = 0
name = MOLECULETESTAPP offline repository"""
diff --git a/ansible/roles/package-repository/molecule/default/tests/test_kubernetes-node-1.py b/ansible/roles/package-repository/molecule/default/tests/test_kubernetes-node-1.py
index eab7d065..fda85e8d 100644
--- a/ansible/roles/package-repository/molecule/default/tests/test_kubernetes-node-1.py
+++ b/ansible/roles/package-repository/molecule/default/tests/test_kubernetes-node-1.py
@@ -9,7 +9,7 @@ testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
def test_onap_repo(host):
fc = host.file('/etc/yum.repos.d/moleculetestapp.repo').content_string
expected_content = """[moleculetestapp]
-baseurl = http://repo.infra-server/rhel
+baseurl = http://repo.infra-server/rpm
enabled = 1
gpgcheck = 0
name = MOLECULETESTAPP offline repository"""
diff --git a/ansible/test/roles/prepare-package-repository/tasks/main.yml b/ansible/test/roles/prepare-package-repository/tasks/main.yml
index 66f81d6d..983e9766 100644
--- a/ansible/test/roles/prepare-package-repository/tasks/main.yml
+++ b/ansible/test/roles/prepare-package-repository/tasks/main.yml
@@ -12,15 +12,15 @@
name: createrepo
state: present
-- name: "Ensure {{ rhel_pkg_dir }} exists"
+- name: "Ensure {{ pkg_dir }} exists"
file:
- path: "{{ rhel_pkg_dir }}"
+ path: "{{ pkg_dir }}"
state: directory
- name: Download rpm packages
- command: "yumdownloader --destdir={{ rhel_pkg_dir }} --resolve {{ packages | join(' ') }}"
+ command: "yumdownloader --destdir={{ pkg_dir }} --resolve {{ packages | join(' ') }}"
- name: Index repository
- command: "createrepo {{ rhel_pkg_dir }}"
+ command: "createrepo {{ pkg_dir }}"
args:
- creates: "{{ rhel_pkg_dir }}/repodata"
+ creates: "{{ pkg_dir }}/repodata"
diff --git a/ansible/test/roles/prepare-package-repository/vars/main.yml b/ansible/test/roles/prepare-package-repository/vars/main.yml
index 80944284..195ad0f1 100644
--- a/ansible/test/roles/prepare-package-repository/vars/main.yml
+++ b/ansible/test/roles/prepare-package-repository/vars/main.yml
@@ -1,5 +1,5 @@
---
-rhel_pkg_dir: "{{ app_data_path }}/pkg/rhel"
+pkg_dir: "{{ app_data_path }}/pkg/rpm"
packages:
- "docker-ce-18.09.5"
- container-selinux
diff --git a/build/build_nexus_blob.sh b/build/build_nexus_blob.sh
index 72dda540..688410ef 100755
--- a/build/build_nexus_blob.sh
+++ b/build/build_nexus_blob.sh
@@ -212,32 +212,33 @@ push_docker () {
while [ "${1}" != "" ]; do
case ${1} in
-d | --docker ) shift
- NXS_DOCKER_IMG_LISTS+=("${1}")
+ NXS_DOCKER_IMG_LISTS+=("$(realpath ${1})")
;;
-i | --input-directory ) shift
- DATA_DIR="${1}"
+ DATA_DIR="$(realpath ${1})"
;;
-ld | --load-docker-images ) DOCKER_LOAD="true"
;;
-n | --npm ) NPM_PUSH="true"
COMMANDS+=(expect npm)
shift
- NXS_NPM_LISTS+=("${1}")
+ NXS_NPM_LISTS+=("$(realpath ${1})")
;;
-o | --output-directory ) shift
- NEXUS_DATA_DIR="${1}"
+ NEXUS_DATA_DIR="$(realpath ${1})"
;;
-p | --pypi ) PYPI_PUSH="true"
COMMANDS+=(twine)
shift
- NXS_PYPI_LISTS+=("${1}")
+ NXS_PYPI_LISTS+=("$(realpath ${1})")
;;
-rl | --resource-list-directory ) shift
- LISTS_DIR="${1}"
+ LISTS_DIR="$(realpath ${1})"
;;
-h | --help ) usage
;;
*) usage
+ ;;
esac
shift
done
@@ -445,4 +446,4 @@ if [ -f ~/.docker/${DOCKER_CONF_BACKUP} ]; then
fi
echo "Nexus blob is built"
-exit 0 \ No newline at end of file
+exit 0
diff --git a/build/create_repo.sh b/build/create_repo.sh
index b6db6ad2..eaf0ee30 100755
--- a/build/create_repo.sh
+++ b/build/create_repo.sh
@@ -12,7 +12,7 @@ container_repo_volume="/mnt/repo/"
# Docker image name and version
docker_image="centos:centos7.6.1810"
# Expected directory for RPM packages
-expected_dir="resources/pkg/rhel"
+expected_dir="resources/pkg/rpm"
help () {
echo "Script for run docker container with RPM repository"
@@ -56,10 +56,10 @@ do
shift;shift
done
-# Check if path contains expected path "resources/pkg/rhel"
+# Check if path contains expected path "resources/pkg/rpm"
if ! [[ "/$volume_repo_directory/" = *"/$expected_dir/"* ]]; then
# Create repo folder if it not exists
- volume_repo_directory="$volume_repo_directory"/resources/pkg/rhel
+ volume_repo_directory="$volume_repo_directory"/resources/pkg/rpm
[ ! -d "$volume_repo_directory" ] && mkdir -p $volume_repo_directory
fi
diff --git a/build/creating_data/docker-images-collector.sh b/build/creating_data/docker-images-collector.sh
index 9a5baea7..c07de107 100755
--- a/build/creating_data/docker-images-collector.sh
+++ b/build/creating_data/docker-images-collector.sh
@@ -92,7 +92,7 @@ if [ -e "${LIST}" ]; then
fi
# Setup helm
-if pgrep -x "helm" > /dev/null; then
+if ps -eaf | grep -v "grep" | grep "helm" > /dev/null; then
echo "helm is already running"
else
helm init -c > /dev/null
@@ -114,7 +114,7 @@ popd
echo "Creating the list..."
if [ "${PROJECT}" == "onap" ]; then
COMMENT="OOM commit $(git --git-dir="${PROJECT_DIR}/../../.git" rev-parse HEAD)"
- for subsystem in `parse_yaml "${PROJECT_DIR}/values.yaml"`; do
+ for subsystem in `parse_yaml "${PROJECT_DIR}/resources/overrides/onap-all.yaml"`; do
create_list ${subsystem}
done | sort -u > ${LIST}
else
diff --git a/build/data_lists/onap_docker_images.list b/build/data_lists/onap_docker_images.list
index c849ac69..d203425c 100644
--- a/build/data_lists/onap_docker_images.list
+++ b/build/data_lists/onap_docker_images.list
@@ -1,13 +1,16 @@
-# generated from OOM commit 106dbc9a0224a3259f1eaf24d4bcc8571ed591ab
+# generated from OOM commit 7fd7493cdb4e5056763de07b7a5c7ee1432de79a
alpine:3.6
+ansible/awx_rabbitmq:3.7.4
+ansible/awx_task:latest
+ansible/awx_web:latest
busybox
crunchydata/crunchy-pgpool:centos7-10.4-2.0.0
crunchydata/crunchy-postgres:centos7-10.3-1.8.2
crunchydata/crunchy-postgres:centos7-10.4-2.0.0
docker.elastic.co/beats/filebeat:5.5.0
docker.elastic.co/elasticsearch/elasticsearch:5.5.0
+docker.elastic.co/elasticsearch/elasticsearch:6.1.2
docker.elastic.co/elasticsearch/elasticsearch:6.6.2
-docker.elastic.co/elasticsearch/elasticsearch-oss:6.6.2
docker.elastic.co/kibana/kibana:5.5.0
docker.elastic.co/kibana/kibana:6.6.2
docker.elastic.co/logstash/logstash:5.4.3
@@ -24,191 +27,185 @@ gcr.io/google_samples/k8szk:v3
k8s.gcr.io/etcd-amd64:3.2.24
library/cassandra:2.2.14
library/mariadb:10
-library/python:3.6-alpine
+memcached:latest
netboxcommunity/netbox:v2.5.8
nexus3.onap.org:10001/adfinissygroup/k8s-mariadb-galera-centos:v002
+nexus3.onap.org:10001/adfinissygroup/k8s-mariadb-galera-centos:v004
nexus3.onap.org:10001/busybox
nexus3.onap.org:10001/library/consul:1.0.6
nexus3.onap.org:10001/library/tomcat:8.5
nexus3.onap.org:10001/library/vault:0.10.0
nexus3.onap.org:10001/mariadb:10.1.38
-nexus3.onap.org:10001/mariadb:10.2.14
+nexus3.onap.org:10001/mariadb:10.2.25
nexus3.onap.org:10001/mariadb:10.3.12
nexus3.onap.org:10001/mariadb:10.3.14
-nexus3.onap.org:10001/onap/aaf/aaf_agent:2.1.13
-nexus3.onap.org:10001/onap/aaf/aaf_cass:2.1.13
-nexus3.onap.org:10001/onap/aaf/aaf_cm:2.1.13
-nexus3.onap.org:10001/onap/aaf/aaf_config:2.1.13
-nexus3.onap.org:10001/onap/aaf/aaf_fs:2.1.13
-nexus3.onap.org:10001/onap/aaf/aaf_gui:2.1.13
-nexus3.onap.org:10001/onap/aaf/aaf_hello:2.1.13
-nexus3.onap.org:10001/onap/aaf/aaf_locate:2.1.13
-nexus3.onap.org:10001/onap/aaf/aaf_oauth:2.1.13
-nexus3.onap.org:10001/onap/aaf/aaf_service:2.1.13
+nexus3.onap.org:10001/onap/aaf/aaf_agent:2.1.15
+nexus3.onap.org:10001/onap/aaf/aaf_cass:2.1.15
+nexus3.onap.org:10001/onap/aaf/aaf_config:2.1.15
+nexus3.onap.org:10001/onap/aaf/aaf_core:2.1.15
+nexus3.onap.org:10001/onap/aaf/aaf_hello:2.1.15
nexus3.onap.org:10001/onap/aaf/distcenter:4.0.0
nexus3.onap.org:10001/onap/aaf/sms:4.0.1
nexus3.onap.org:10001/onap/aaf/smsquorumclient:4.0.0
nexus3.onap.org:10001/onap/aaf/testcaservice:4.0.0
nexus3.onap.org:10001/onap/aai/esr-gui:1.4.0
nexus3.onap.org:10001/onap/aai/esr-server:1.4.0
-nexus3.onap.org:10001/onap/aai-graphadmin:1.1.0
-nexus3.onap.org:10001/onap/aai-resources:1.4.0
-nexus3.onap.org:10001/onap/aai-schema-service:1.0.6
-nexus3.onap.org:10001/onap/aai-traversal:1.4.1
-nexus3.onap.org:10001/onap/admportal-sdnc-image:1.5.4
-nexus3.onap.org:10001/onap/appc-cdt-image:1.5.3
-nexus3.onap.org:10001/onap/appc-image:1.5.3
-nexus3.onap.org:10001/onap/babel:1.4.2
-nexus3.onap.org:10001/onap/ccsdk-ansible-server-image:0.4.2
-nexus3.onap.org:10001/onap/ccsdk-apps-ms-neng:0.4.4
-nexus3.onap.org:10001/onap/ccsdk-blueprintsprocessor:0.4.5
-nexus3.onap.org:10001/onap/ccsdk-cds-ui-server:0.4.5
-nexus3.onap.org:10001/onap/ccsdk-commandexecutor:0.4.5
-nexus3.onap.org:10001/onap/ccsdk-controllerblueprints:0.4.5
-nexus3.onap.org:10001/onap/ccsdk-dgbuilder-image:0.4.4
-nexus3.onap.org:10001/onap/ccsdk-sdclistener:0.4.5
-nexus3.onap.org:10001/onap/champ:1.4.0
-nexus3.onap.org:10001/onap/clamp:4.0.5
-nexus3.onap.org:10001/onap/clamp-dashboard-kibana:4.0.5
-nexus3.onap.org:10001/onap/clamp-dashboard-logstash:4.0.5
+nexus3.onap.org:10001/onap/aai-graphadmin:1.5.1
+nexus3.onap.org:10001/onap/aai-graphgraph:1.5
+nexus3.onap.org:10001/onap/aai-resources:1.5.1
+nexus3.onap.org:10001/onap/aai-schema-service:1.5.2
+nexus3.onap.org:10001/onap/aai-traversal:1.5.1
+nexus3.onap.org:10001/onap/admportal-sdnc-image:1.7.4
+nexus3.onap.org:10001/onap/appc-cdt-image:1.6.4
+nexus3.onap.org:10001/onap/appc-image:1.6.4
+nexus3.onap.org:10001/onap/babel:1.5.1
+nexus3.onap.org:10001/onap/ccsdk-ansible-server-image:0.4.4
+nexus3.onap.org:10001/onap/ccsdk-apps-ms-neng:0.6.3
+nexus3.onap.org:10001/onap/ccsdk-blueprintsprocessor:0.6.3
+nexus3.onap.org:10001/onap/ccsdk-cds-ui-server:0.6.3
+nexus3.onap.org:10001/onap/ccsdk-commandexecutor:0.6.3
+nexus3.onap.org:10001/onap/ccsdk-dgbuilder-image:0.6.3
+nexus3.onap.org:10001/onap/ccsdk-sdclistener:0.6.3
+nexus3.onap.org:10001/onap/clamp-backend:4.1.3
+nexus3.onap.org:10001/onap/clamp-dashboard-elasticsearch:4.1.1
+nexus3.onap.org:10001/onap/clamp-dashboard-kibana:4.1.3
+nexus3.onap.org:10001/onap/clamp-dashboard-logstash:4.1.3
+nexus3.onap.org:10001/onap/clamp-frontend:4.1.3
nexus3.onap.org:10001/onap/cli:3.0.0
nexus3.onap.org:10001/onap/data-router:1.3.3
-nexus3.onap.org:10001/onap/data-router:1.4.0
-nexus3.onap.org:10001/onap/dcae-be:1.3.0
-nexus3.onap.org:10001/onap/dcae-dt:1.2.0
-nexus3.onap.org:10001/onap/dcae-fe:1.3.0
-nexus3.onap.org:10001/onap/dcae-tools:1.3.0
-nexus3.onap.org:10001/onap/dcae-tosca-app:1.3.0
-nexus3.onap.org:10001/onap/dmaap/datarouter-node:2.1.0
-nexus3.onap.org:10001/onap/dmaap/datarouter-prov:2.1.0
+nexus3.onap.org:10001/onap/data-router:1.5.1
+nexus3.onap.org:10001/onap/dcae-be:1.3.2
+nexus3.onap.org:10001/onap/dcae-dt:1.3.2
+nexus3.onap.org:10001/onap/dcae-fe:1.3.2
+nexus3.onap.org:10001/onap/dcae-tools:1.3.2
+nexus3.onap.org:10001/onap/dcae-tosca-app:1.3.1
+nexus3.onap.org:10001/onap/dmaap/datarouter-node:2.1.2
+nexus3.onap.org:10001/onap/dmaap/datarouter-prov:2.1.2
nexus3.onap.org:10001/onap/dmaap/dbc-client:1.0.9
nexus3.onap.org:10001/onap/dmaap/dmaap-bc:1.1.5
-nexus3.onap.org:10001/onap/dmaap/dmaap-mr:1.1.14
+nexus3.onap.org:10001/onap/dmaap/dmaap-mr:1.1.16
nexus3.onap.org:10001/onap/dmaap/kafka111:1.0.1
nexus3.onap.org:10001/onap/dmaap/zookeeper:6.0.0
-nexus3.onap.org:10001/onap/elasticsearch-sg:1.4.3
-nexus3.onap.org:10001/onap/externalapi/nbi:4.0.0
-nexus3.onap.org:10001/onap/gizmo:1.4.0
+nexus3.onap.org:10001/onap/externalapi/nbi:5.0.1
nexus3.onap.org:10001/onap/holmes/engine-management:1.2.5
nexus3.onap.org:10001/onap/holmes/rule-management:1.2.6
-nexus3.onap.org:10001/onap/modeling/genericparser:1.0.2
-nexus3.onap.org:10001/onap/model-loader:1.4.0
-nexus3.onap.org:10001/onap/msb/msb_apigateway:1.2.4
-nexus3.onap.org:10001/onap/msb/msb_discovery:1.2.3
+nexus3.onap.org:10001/onap/modeling/etsicatalog:1.0.4
+nexus3.onap.org:10001/onap/model-loader:1.5.1
+nexus3.onap.org:10001/onap/msb/msb_apigateway:1.2.5
+nexus3.onap.org:10001/onap/msb/msb_discovery:1.2.5
nexus3.onap.org:10001/onap/multicloud/azure:1.2.4
-nexus3.onap.org:10001/onap/multicloud/framework:1.3.3
-nexus3.onap.org:10001/onap/multicloud/framework-artifactbroker:1.3.3
-nexus3.onap.org:10001/onap/multicloud/k8s:0.4.0
-nexus3.onap.org:10001/onap/multicloud/openstack-fcaps:1.3.4
+nexus3.onap.org:10001/onap/multicloud/framework:1.4.2
+nexus3.onap.org:10001/onap/multicloud-framework-artifactbroker:1.4.2
+nexus3.onap.org:10001/onap/multicloud/openstack-fcaps:1.4.1
nexus3.onap.org:10001/onap/multicloud/openstack-lenovo:1.3.4
-nexus3.onap.org:10001/onap/multicloud/openstack-ocata:1.3.4
-nexus3.onap.org:10001/onap/multicloud/openstack-pike:1.3.4
-nexus3.onap.org:10001/onap/multicloud/openstack-starlingx:1.3.4
-nexus3.onap.org:10001/onap/multicloud/openstack-windriver:1.3.4
-nexus3.onap.org:10001/onap/multicloud/vio:1.3.1
+nexus3.onap.org:10001/onap/multicloud/openstack-pike:1.4.1
+nexus3.onap.org:10001/onap/multicloud/openstack-starlingx:1.4.1
+nexus3.onap.org:10001/onap/multicloud/openstack-windriver:1.4.1
+nexus3.onap.org:10001/onap/multicloud/vio:1.4.1
nexus3.onap.org:10001/onap/music/cassandra_3_11:3.0.24
nexus3.onap.org:10001/onap/music/cassandra_job:3.0.24
nexus3.onap.org:10001/onap/music/cassandra_music:3.0.0
nexus3.onap.org:10001/onap/music/music:3.0.24
nexus3.onap.org:10001/onap/network-discovery:1.5.1
nexus3.onap.org:10001/onap/oom/kube2msb:1.1.0
-nexus3.onap.org:10001/onap/optf-cmso-dbinit:2.0.0
-nexus3.onap.org:10001/onap/optf-cmso-optimizer:2.0.0
-nexus3.onap.org:10001/onap/optf-cmso-service:2.0.0
-nexus3.onap.org:10001/onap/optf-cmso-ticketmgt:2.0.0
-nexus3.onap.org:10001/onap/optf-cmso-topology:2.0.0
-nexus3.onap.org:10001/onap/optf-has:1.3.1
-nexus3.onap.org:10001/onap/optf-osdf:1.3.2
+nexus3.onap.org:10001/onap/optf-cmso-dbinit:2.1.1
+nexus3.onap.org:10001/onap/optf-cmso-optimizer:2.1.1
+nexus3.onap.org:10001/onap/optf-cmso-robot:2.1.1
+nexus3.onap.org:10001/onap/optf-cmso-service:2.1.1
+nexus3.onap.org:10001/onap/optf-cmso-ticketmgt:2.1.1
+nexus3.onap.org:10001/onap/optf-cmso-topology:2.1.1
+nexus3.onap.org:10001/onap/optf-has:1.3.3
+nexus3.onap.org:10001/onap/optf-osdf:1.3.4
nexus3.onap.org:10001/onap/org.onap.ccsdk.dashboard.ccsdk-app-os:1.1.0
-nexus3.onap.org:10001/onap/org.onap.dcaegen2.collectors.hv-ves.hv-collector-main:1.1.0
+nexus3.onap.org:10001/onap/org.onap.dcaegen2.collectors.hv-ves.hv-collector-main:1.3.0
nexus3.onap.org:10001/onap/org.onap.dcaegen2.collectors.snmptrap:1.4.0
nexus3.onap.org:10001/onap/org.onap.dcaegen2.collectors.ves.vescollector:1.4.5
-nexus3.onap.org:10001/onap/org.onap.dcaegen2.deployments.cm-container:1.6.2
+nexus3.onap.org:10001/onap/org.onap.dcaegen2.deployments.cm-container:2.0.2
nexus3.onap.org:10001/onap/org.onap.dcaegen2.deployments.consul-loader-container:1.0.0
-nexus3.onap.org:10001/onap/org.onap.dcaegen2.deployments.healthcheck-container:1.2.4
-nexus3.onap.org:10001/onap/org.onap.dcaegen2.deployments.k8s-bootstrap-container:1.4.18
+nexus3.onap.org:10001/onap/org.onap.dcaegen2.deployments.dcae-k8s-cleanup-container:1.0.0
+nexus3.onap.org:10001/onap/org.onap.dcaegen2.deployments.healthcheck-container:1.2.5
+nexus3.onap.org:10001/onap/org.onap.dcaegen2.deployments.k8s-bootstrap-container:1.6.4
nexus3.onap.org:10001/onap/org.onap.dcaegen2.deployments.multisite-init-container:1.0.0
nexus3.onap.org:10001/onap/org.onap.dcaegen2.deployments.redis-cluster-container:1.0.0
nexus3.onap.org:10001/onap/org.onap.dcaegen2.deployments.tca-cdap-container:1.1.2
nexus3.onap.org:10001/onap/org.onap.dcaegen2.deployments.tls-init-container:1.0.3
-nexus3.onap.org:10001/onap/org.onap.dcaegen2.platform.configbinding.app-app:2.3.0
-nexus3.onap.org:10001/onap/org.onap.dcaegen2.platform.deployment-handler:4.0.1
-nexus3.onap.org:10001/onap/org.onap.dcaegen2.platform.inventory-api:3.2.0
+nexus3.onap.org:10001/onap/org.onap.dcaegen2.platform.configbinding:2.5.2
+nexus3.onap.org:10001/onap/org.onap.dcaegen2.platform.deployment-handler:4.2.0
+nexus3.onap.org:10001/onap/org.onap.dcaegen2.platform.inventory-api:3.4.0
nexus3.onap.org:10001/onap/org.onap.dcaegen2.platform.policy-handler:5.0.0
-nexus3.onap.org:10001/onap/org.onap.dcaegen2.platform.servicechange-handler:1.1.5
-nexus3.onap.org:10001/onap/org.onap.dcaegen2.services.prh.prh-app-server:1.2.4
-nexus3.onap.org:10001/onap/policy-apex-pdp:2.1.0
-nexus3.onap.org:10001/onap/policy-api:2.0.1
-nexus3.onap.org:10001/onap/policy-distribution:2.1.0
-nexus3.onap.org:10001/onap/policy-pap:2.0.1
-nexus3.onap.org:10001/onap/policy-pdpd-cl:1.4.2
-nexus3.onap.org:10001/onap/policy-pe:1.4.1
-nexus3.onap.org:10001/onap/policy-xacml-pdp:2.0.1
-nexus3.onap.org:10001/onap/pomba-aai-context-builder:1.4.0
-nexus3.onap.org:10001/onap/pomba-context-aggregator:1.4.0
-nexus3.onap.org:10001/onap/pomba-network-discovery-context-builder:1.4.0
-nexus3.onap.org:10001/onap/pomba-sdc-context-builder:1.4.0
-nexus3.onap.org:10001/onap/pomba-sdnc-context-builder:1.4.0
-nexus3.onap.org:10001/onap/portal-app:2.5.0
-nexus3.onap.org:10001/onap/portal-db:2.5.0
-nexus3.onap.org:10001/onap/portal-sdk:2.5.0
-nexus3.onap.org:10001/onap/portal-wms:2.5.0
-nexus3.onap.org:10001/onap/sdc-backend:1.4.1
-nexus3.onap.org:10001/onap/sdc-backend-init:1.4.1
-nexus3.onap.org:10001/onap/sdc-cassandra-init:1.4.1
-nexus3.onap.org:10001/onap/sdc-elasticsearch:1.4.1
-nexus3.onap.org:10001/onap/sdc-frontend:1.4.1
-nexus3.onap.org:10001/onap/sdc-init-elasticsearch:1.4.1
-nexus3.onap.org:10001/onap/sdc-kibana:1.4.1
-nexus3.onap.org:10001/onap/sdc-onboard-backend:1.4.1
-nexus3.onap.org:10001/onap/sdc-onboard-cassandra-init:1.4.1
-nexus3.onap.org:10001/onap/sdnc-ansible-server-image:1.5.4
-nexus3.onap.org:10001/onap/sdnc-dmaap-listener-image:1.5.4
-nexus3.onap.org:10001/onap/sdnc-image:1.5.4
-nexus3.onap.org:10001/onap/sdnc-ueb-listener-image:1.5.4
+nexus3.onap.org:10001/onap/org.onap.dcaegen2.platform.servicechange-handler:1.3.2
+nexus3.onap.org:10001/onap/org.onap.dcaegen2.services.prh.prh-app-server:1.3.1
+nexus3.onap.org:10001/onap/policy-apex-pdp:2.2.2
+nexus3.onap.org:10001/onap/policy-api:2.1.2
+nexus3.onap.org:10001/onap/policy-distribution:2.2.1
+nexus3.onap.org:10001/onap/policy-pap:2.1.2
+nexus3.onap.org:10001/onap/policy-pdpd-cl:1.5.3
+nexus3.onap.org:10001/onap/policy-pe:1.5.2
+nexus3.onap.org:10001/onap/policy-xacml-pdp:2.1.2
+nexus3.onap.org:10001/onap/pomba-aai-context-builder:1.5.1
+nexus3.onap.org:10001/onap/pomba-context-aggregator:1.5.1
+nexus3.onap.org:10001/onap/pomba-network-discovery-context-builder:1.5.1
+nexus3.onap.org:10001/onap/pomba-sdc-context-builder:1.5.1
+nexus3.onap.org:10001/onap/pomba-sdnc-context-builder:1.5.1
+nexus3.onap.org:10001/onap/portal-app:2.6.0
+nexus3.onap.org:10001/onap/portal-db:2.6.0
+nexus3.onap.org:10001/onap/portal-sdk:2.6.0
+nexus3.onap.org:10001/onap/portal-wms:2.6.0
+nexus3.onap.org:10001/onap/sdc-backend:1.5.2
+nexus3.onap.org:10001/onap/sdc-backend-init:1.5.2
+nexus3.onap.org:10001/onap/sdc-cassandra-init:1.5.2
+nexus3.onap.org:10001/onap/sdc-elasticsearch:1.5.2
+nexus3.onap.org:10001/onap/sdc-frontend:1.5.2
+nexus3.onap.org:10001/onap/sdc-init-elasticsearch:1.5.2
+nexus3.onap.org:10001/onap/sdc-kibana:1.5.2
+nexus3.onap.org:10001/onap/sdc-onboard-backend:1.5.2
+nexus3.onap.org:10001/onap/sdc-onboard-cassandra-init:1.5.2
+nexus3.onap.org:10001/onap/sdnc-ansible-server-image:1.7.4
+nexus3.onap.org:10001/onap/sdnc-dmaap-listener-image:1.7.4
+nexus3.onap.org:10001/onap/sdnc-image:1.7.4
+nexus3.onap.org:10001/onap/sdnc-ueb-listener-image:1.7.4
nexus3.onap.org:10001/onap/search-data-service:1.3.1
-nexus3.onap.org:10001/onap/search-data-service:1.4.3
+nexus3.onap.org:10001/onap/search-data-service:1.5.1
nexus3.onap.org:10001/onap/service-decomposition:1.5.1
nexus3.onap.org:10001/onap/sniroemulator:1.0.0
-nexus3.onap.org:10001/onap/so/api-handler-infra:1.4.3
-nexus3.onap.org:10001/onap/so/bpmn-infra:1.4.4
-nexus3.onap.org:10001/onap/so/catalog-db-adapter:1.4.4
-nexus3.onap.org:10001/onap/so/openstack-adapter:1.4.4
-nexus3.onap.org:10001/onap/so/request-db-adapter:1.4.4
-nexus3.onap.org:10001/onap/so/sdc-controller:1.4.4
-nexus3.onap.org:10001/onap/so/sdnc-adapter:1.4.4
-nexus3.onap.org:10001/onap/so/so-monitoring:1.4.4
-nexus3.onap.org:10001/onap/so/vfc-adapter:1.4.4
-nexus3.onap.org:10001/onap/so/vnfm-adapter:1.4.4
-nexus3.onap.org:10001/onap/sparky-be:1.4.0
-nexus3.onap.org:10001/onap/spike:1.4.0
-nexus3.onap.org:10001/onap/testsuite:1.4.2
-nexus3.onap.org:10001/onap/usecase-ui:1.2.2
-nexus3.onap.org:10001/onap/usecase-ui-server:1.2.1
+nexus3.onap.org:10001/onap/so/api-handler-infra:1.5.3
+nexus3.onap.org:10001/onap/so/bpmn-infra:1.5.3
+nexus3.onap.org:10001/onap/so/catalog-db-adapter:1.5.3
+nexus3.onap.org:10001/onap/so/openstack-adapter:1.5.3
+nexus3.onap.org:10001/onap/so/request-db-adapter:1.5.3
+nexus3.onap.org:10001/onap/so/sdc-controller:1.5.3
+nexus3.onap.org:10001/onap/so/sdnc-adapter:1.5.3
+nexus3.onap.org:10001/onap/so/so-monitoring:1.5.3
+nexus3.onap.org:10001/onap/so/vfc-adapter:1.5.3
+nexus3.onap.org:10001/onap/so/vnfm-adapter:1.5.3
+nexus3.onap.org:10001/onap/sparky-be:1.5.1
+nexus3.onap.org:10001/onap/testsuite:1.5.4
+nexus3.onap.org:10001/onap/usecase-ui:2.0.1
+nexus3.onap.org:10001/onap/usecase-ui-server:2.0.1
nexus3.onap.org:10001/onap/validation:1.3.1
-nexus3.onap.org:10001/onap/vfc/catalog:1.3.2
-nexus3.onap.org:10001/onap/vfc/db:1.3.0
+nexus3.onap.org:10001/onap/vfc/catalog:1.3.4
+nexus3.onap.org:10001/onap/vfc/db:1.3.1
nexus3.onap.org:10001/onap/vfc/emsdriver:1.3.0
-nexus3.onap.org:10001/onap/vfc/gvnfmdriver:1.3.3
+nexus3.onap.org:10001/onap/vfc/gvnfmdriver:1.3.5
nexus3.onap.org:10001/onap/vfc/jujudriver:1.3.1
nexus3.onap.org:10001/onap/vfc/multivimproxy:1.3.0
nexus3.onap.org:10001/onap/vfc/nfvo/svnfm/huawei:1.3.0
nexus3.onap.org:10001/onap/vfc/nfvo/svnfm/nokiav2:1.3.0
-nexus3.onap.org:10001/onap/vfc/nslcm:1.3.2
+nexus3.onap.org:10001/onap/vfc/nslcm:1.3.4
nexus3.onap.org:10001/onap/vfc/resmanagement:1.3.0
-nexus3.onap.org:10001/onap/vfc/vnflcm:1.3.2
-nexus3.onap.org:10001/onap/vfc/vnfmgr:1.3.3
-nexus3.onap.org:10001/onap/vfc/vnfres:1.3.2
+nexus3.onap.org:10001/onap/vfc/vnflcm:1.3.4
+nexus3.onap.org:10001/onap/vfc/vnfmgr:1.3.4
+nexus3.onap.org:10001/onap/vfc/vnfres:1.3.4
nexus3.onap.org:10001/onap/vfc/wfengine-activiti:1.3.0
nexus3.onap.org:10001/onap/vfc/wfengine-mgrservice:1.3.0
nexus3.onap.org:10001/onap/vfc/ztesdncdriver:1.3.0
nexus3.onap.org:10001/onap/vfc/ztevnfmdriver:1.3.1
-nexus3.onap.org:10001/onap/vid:4.3.1
+nexus3.onap.org:10001/onap/vid:5.0.3
nexus3.onap.org:10001/onap/vnfsdk/refrepo:1.3.2
-nexus3.onap.org:10001/onap/workflow-backend:1.4.1
-nexus3.onap.org:10001/onap/workflow-frontend:1.4.1
-nexus3.onap.org:10001/onap/workflow-init:1.4.1
-nexus3.onap.org:10001/sonatype/nexus:2.14.8-01
+nexus3.onap.org:10001/onap/workflow-backend:1.5.1
+nexus3.onap.org:10001/onap/workflow-frontend:1.5.1
+nexus3.onap.org:10001/onap/workflow-init:1.5.1
+nexus3.onap.org:10001/sonatype/nexus:2.14.13-01
nexus3.onap.org:10001/zookeeper:3.4
oomk8s/mariadb-client-init:3.0.0
oomk8s/readiness-check:2.0.0
@@ -222,4 +219,6 @@ rabbitmq:alpine
registry.hub.docker.com/library/busybox:latest
registry.hub.docker.com/library/consul:1.4.3
registry.hub.docker.com/library/mongo:4.0.8
+registry.hub.docker.com/onap/multicloud-framework-artifactbroker:1.4.2
+registry.hub.docker.com/onap/multicloud-k8s:0.5.0
registry.hub.docker.com/oomk8s/ubuntu-init:2.0.0
diff --git a/build/docker-entrypoint.sh b/build/docker-entrypoint.sh
index 1b4ec53c..14f6aaa7 100755
--- a/build/docker-entrypoint.sh
+++ b/build/docker-entrypoint.sh
@@ -14,7 +14,7 @@ help () {
echo -e "-l --list: set path where rpm list is stored in container\n"
echo "Both paths have to be set with shared volume between"
echo "container and host computer. Default path in container is: /tmp/"
- echo "Repository will be created at: /<path>/resources/pkg/rhel/"
+ echo "Repository will be created at: /<path>/resources/pkg/rpm/"
echo "RMP list is stored at: ./data_list/"
}
@@ -52,7 +52,7 @@ do
done
# Testing if directory parametter was used
-# If not variable is sets to default value /tmp/repo/resources/pkg/rhel
+# If not variable is sets to default value /tmp/repo/resources/pkg/rpm
if test -z "$OFFLINE_REPO_DIR"
then
OFFLINE_REPO_DIR="/tmp/repo/"
diff --git a/build/download/docker_downloader.py b/build/download/docker_downloader.py
index d83f682c..27dde12f 100755
--- a/build/download/docker_downloader.py
+++ b/build/download/docker_downloader.py
@@ -40,7 +40,7 @@ class DockerDownloader(ConcurrentDownloader):
self._save = save
try:
# big timeout in case of massive images like pnda-mirror-container:5.0.0 (11.4GB)
- self._docker_client = docker.client.DockerClient(version='auto', timeout=300)
+ self._docker_client = docker.from_env(timeout=300)
except docker.errors.DockerException as err:
log.exception(
'Error creating docker client. Check if is docker installed and running'
diff --git a/build/package.py b/build/package.py
index ad921ed3..47f78b87 100755
--- a/build/package.py
+++ b/build/package.py
@@ -33,7 +33,8 @@ import tarfile
import git
log = logging.getLogger(__name__)
-script_location = os.path.dirname(os.path.realpath(__file__))
+script_location = os.path.abspath(os.path.join(__file__, '..'))
+offline_repository_dir = os.path.abspath(os.path.join(script_location, '..'))
def prepare_application_repository(directory, url, refspec, patch_path):
@@ -73,6 +74,7 @@ def create_package_info_file(output_file, repository_list, tag):
Generates text file in json format containing basic information about the build
:param output_file:
:param repository_list: list of repositories to be included in package info
+ :param tag: build version of packages
:return:
"""
log.info('Generating package.info file')
@@ -100,7 +102,7 @@ def create_package(tar_content, file_name):
with tarfile.open(file_name, 'w') as output_tar_file:
for src, dst in tar_content.items():
if src != '':
- output_tar_file.add(src, dst)
+ output_tar_file.add(src, dst)
def build_offline_deliverables(build_version,
@@ -139,11 +141,10 @@ def build_offline_deliverables(build_version,
if os.path.exists(output_dir) and os.listdir(output_dir):
if not overwrite:
log.error('Output directory is not empty, use overwrite to force build')
- raise FileExistsError
+ raise FileExistsError(output_dir)
shutil.rmtree(output_dir)
# Git
- offline_repository_dir = os.path.join(script_location, '..')
offline_repository = git.Repo(offline_repository_dir)
application_dir = os.path.join(output_dir, 'application_repository')
@@ -173,6 +174,9 @@ def build_offline_deliverables(build_version,
info_file: 'package.info'
}
+ # add separator if build version not empty
+ build_version = "-" + build_version if build_version != "" else ""
+
if not skip_sw:
log.info('Building offline installer')
os.chdir(os.path.join(offline_repository_dir, 'ansible', 'docker'))
@@ -194,7 +198,7 @@ def build_offline_deliverables(build_version,
log.info('Binaries - workaround')
download_dir_path = os.path.join(resources_directory, 'downloads')
os.chdir(download_dir_path)
- for file in os.listdir():
+ for file in os.listdir(download_dir_path):
if os.path.islink(file):
os.unlink(file)
@@ -214,7 +218,7 @@ def build_offline_deliverables(build_version,
create_package(resources_content, resources_package_tar_path)
if not skip_aux:
- aux_package_tar_path = os.path.join(output_dir, 'aux_package'+ build_version + '.tar')
+ aux_package_tar_path = os.path.join(output_dir, 'aux_package' + build_version + '.tar')
create_package(aux_content, aux_package_tar_path)
shutil.rmtree(application_dir)
@@ -226,7 +230,7 @@ def run_cli():
"""
parser = argparse.ArgumentParser(description='Create Package For Offline Installer')
parser.add_argument('--build-version',
- help='version of the build', default='custom')
+ help='version of the build', default='')
parser.add_argument('application_repository_url', metavar='application-repository-url',
help='git repository hosting application helm charts')
parser.add_argument('--application-repository_reference', default='master',
@@ -234,16 +238,17 @@ def run_cli():
parser.add_argument('--application-patch_file',
help='git patch file to be applied over application repository', default='')
parser.add_argument('--application-charts_dir',
- help='path to directory under application repository containing helm charts ', default='kubernetes')
+ help='path to directory under application repository containing helm charts ',
+ default='kubernetes')
parser.add_argument('--application-configuration',
help='path to application configuration file (helm override configuration)',
- default='')
+ default=os.path.join(offline_repository_dir, 'config/application_configuration.yml'))
parser.add_argument('--application-patch-role',
help='path to application patch role file (ansible role) to be executed right before installation',
default='')
- parser.add_argument('--output-dir', '-o', default=os.path.join(script_location, '..', '..'),
+ parser.add_argument('--output-dir', '-o', default=os.path.join(offline_repository_dir, '../packages'),
help='Destination directory for saving packages')
- parser.add_argument('--resources-directory', default='',
+ parser.add_argument('--resources-directory', default=os.path.join(offline_repository_dir, '../resources'),
help='Path to resource directory')
parser.add_argument('--aux-directory',
help='Path to aux binary directory', default='')
diff --git a/docs/BuildGuide.rst b/docs/BuildGuide.rst
index 27c0835e..12426b7c 100644
--- a/docs/BuildGuide.rst
+++ b/docs/BuildGuide.rst
@@ -97,13 +97,12 @@ Part 2. Download artifacts for offline installer
# clone the OOM repository
git clone https://gerrit.onap.org/r/oom -b master --recurse-submodules /tmp/oom
- # enable subsystems in oom/kubernetes/onap/values.yaml as required
+ # docker-images-collector.sh script uses oom/kubernetes/onap/resources/overrides/onap-all.yaml file to find what subsystems
+ are enabled. By default all subsystems are enabled there. Modify the file if want to drop some subsystems.
#run the collector providing path the the project
./build/creating_data/docker-images-collector.sh /tmp/oom/kubernetes/onap
- If the list does not contain any image, no subsystem is enabled in the oom/kubernetes/onap/values.yaml
-
For the other options check the usage of the script.
.. note:: Skip this step if you have already all necessary resources and continue with Part 3. Populate local nexus
@@ -128,14 +127,18 @@ so one might try following command to download most of the required artifacts in
::
# following arguments are provided
- # all data lists are taken in ./build/data_lists/ folder
+ # all data lists are taken from ./build/data_lists/ folder
# all resources will be stored in expected folder structure within ../resources folder
./build/download/download.py --docker ./build/data_lists/infra_docker_images.list ../resources/offline_data/docker_images_infra \
- --docker ./build/data_lists/rke_docker_images.list \
+ --http ./build/data_lists/infra_bin_utils.list ../resources/downloads
+
+ # following docker images do not necessarily need to be stored under resources as they load into repository in next part
+ # if second argument for --docker is not present, images are just pulled and cached.
+ # Warning: script must be run twice separately, for more details run download.py --help
+ ./build/download/download.py --docker ./build/data_lists/rke_docker_images.list \
--docker ./build/data_lists/k8s_docker_images.list \
--docker ./build/data_lists/onap_docker_images.list \
- --http ./build/data_lists/infra_bin_utils.list ../resources/downloads
Alternatively, step-by-step procedure is described in Appendix 1.
@@ -148,7 +151,7 @@ Part 3. Populate local nexus
Prerequisites:
- All data lists and resources which are pushed to local nexus repository are available
-- Following ports are not occupied buy another service: 80, 8081, 8082, 10001
+- Following ports are not occupied by another service: 80, 8081, 8082, 10001
- There's no docker container called "nexus"
.. note:: In case you skipped the Part 2 for the artifacts download, please ensure that the onap docker images are cached and copy of resources data are untarred in *./onap-offline/../resources/*
@@ -185,13 +188,13 @@ From onap-offline directory run:
::
- ./build/package.py <helm charts repo> --build_version "" --application-repository_reference <commit/tag/branch> --output-dir <target\_dir> --resources-directory <target\_dir>
+ ./build/package.py <helm charts repo> --build-version <version> --application-repository_reference <commit/tag/branch> --output-dir <target\_dir> --resources-directory <target\_dir>
For example:
::
- ./build/package.py https://gerrit.onap.org/r/oom --build_version "" --application-repository_reference master --output-dir /tmp/packages --resources-directory /tmp/resources
+ ./build/package.py https://gerrit.onap.org/r/oom --application-repository_reference master --output-dir /tmp/packages --resources-directory /tmp/resources
In the target directory you should find tar files:
diff --git a/docs/InstallGuide.rst b/docs/InstallGuide.rst
index fc527374..4bd65fac 100644
--- a/docs/InstallGuide.rst
+++ b/docs/InstallGuide.rst
@@ -124,17 +124,12 @@ Change the current directory to the ``'ansible'``::
You can see multiple files and directories inside - this is the *offline-installer*. It is implemented as a set of ansible playbooks.
-If you created the ``'sw'`` package according to the *Build Guide* then you should have had the ``'application'`` directory populated with at least the following files:
+If you created the ``'sw'`` package according to the *Build Guide* then you should have had the *offline-installer* populated with at least the following files:
-- ``application_configuration.yml``
-- ``hosts.yml``
+- ``application/application_configuration.yml``
+- ``inventory/hosts.yml``
-**NOTE:** The following paragraph describes a way how to create or fine-tune your own ``'application_configuration.yml'`` - we are discouraging you from executing this step. The recommended way is to use the packaged files inside the ``'application'`` directory.
-
-**NOT RECOMMENDED:** If for some reason you don't have these files inside the ``'application'`` directory or you simply want to do things the hard way then you can recreate them from their templates. It is better to keep the originals (templates) intact - so we will copy them to the ``'application'`` directory::
-
- $ cp ../config/application_configuration.yml application/
- $ cp inventory/hosts.yml application/
+Following paragraphs describe fine-tuning of ``'inventory.yml'`` and ``'application_configuration.yml'`` to reflect your VMs setup.
.. _oooi_installguide_config_hosts:
@@ -233,7 +228,7 @@ After all the changes, the ``'hosts.yml'`` should look similar to this::
infrastructure:
hosts:
infrastructure-server:
- ansible_host: 10.8.8.13
+ ansible_host: 10.8.8.100
#IP used for communication between infra and kubernetes nodes, must be specified.
cluster_ip: 10.8.8.100
@@ -326,7 +321,7 @@ Second one controls time zone setting on host. It's value should be time zone na
Final configuration can resemble the following::
resources_dir: /data
- resources_filename: resources-package.tar
+ resources_filename: resources_package.tar
app_data_path: /opt/onap
app_name: onap
timesync:
@@ -367,7 +362,7 @@ We are almost finished with the configuration and we are close to start the inst
You can use the ansible playbook ``'setup.yml'`` like this::
- $ ./run_playbook.sh -i application/hosts.yml setup.yml -u root --ask-pass
+ $ ./run_playbook.sh -i inventory/hosts.yml setup.yml -u root --ask-pass
You will be asked for password per each node and the playbook will generate a unprotected ssh key-pair ``'~/.ssh/offline_ssh_key'``, which will be distributed to the nodes.
@@ -383,7 +378,7 @@ This command behaves almost identically to the ``'setup.yml'`` playbook.
If you generated the ssh key manually then you can now run the ``'setup.yml'`` playbook like this and achieve the same result as in the first execution::
- $ ./run_playbook.sh -i application/hosts.yml setup.yml
+ $ ./run_playbook.sh -i inventory/hosts.yml setup.yml
This time it should not ask you for any password - of course this is very redundant, because you just distributed two ssh keys for no good reason.
@@ -412,7 +407,7 @@ We will use the default chroot option so we don't need any docker service to be
Installation is actually very straightforward now::
- $ ./run_playbook.sh -i application/hosts.yml -e @application/application_configuration.yml site.yml
+ $ ./run_playbook.sh -i inventory/hosts.yml -e @application/application_configuration.yml site.yml
This will take a while so be patient.
@@ -432,7 +427,7 @@ Part 4. Post-installation and troubleshooting
After all of the playbooks are run successfully, it will still take a lot of time until all pods are up and running. You can monitor your newly created kubernetes cluster for example like this::
- $ ssh -i ~/.ssh/offline_ssh_key root@10.8.8.4 # tailor this command to connect to your infra-node
+ $ ssh -i ~/.ssh/offline_ssh_key root@10.8.8.100 # tailor this command to connect to your infra-node
$ watch -d -n 5 'kubectl get pods --all-namespaces'
Alternatively you can monitor progress with ``helm_deployment_status.py`` script located in offline-installer directory. Transfer it to infra-node and run::
diff --git a/tools/helm-healer.sh b/tools/helm-healer.sh
index b030fcac..a6e5b398 100755
--- a/tools/helm-healer.sh
+++ b/tools/helm-healer.sh
@@ -54,72 +54,89 @@ USAGE
(-D|--delete-all)]
[-C|--clean-only]
- Usage 1 (simple heuristics - redeploy failed components):
+EXAMPLES
+
+ Usage 1: (simple heuristics - redeploy failed components):
${CMD} -n onap -f /some/override1.yml -s /dockerdata-nfs
- Usage 2 (redeploy ONLY explicit listed components):
- ${CMD} -n onap -f /some/override1.yml -s /dockerdata-nfs \
+ Usage 2: (redeploy ONLY explicitly listed components):
+ ${CMD} -n onap -f /some/override1.yml -s /dockerdata-nfs \\
-c onap-aaf -c onap-sdc -c onap-portal
- Usage 3 (delete EVERYTHING and redeploy):
- ${CMD} -n onap -f /some/override1.yml -s /dockerdata-nfs \
- --delete-all
-
- Usage 4 (just clean - do not redeploy)
- ${CMD} -n onap -f /some/override1.yml -s /dockerdata-nfs \
- --delete-all --clean-only
-
- Namespace argument and at least one override file are mandatory
- for this script to execute. Also you must provide path to the
- storage or explicitly request to not delete file storage of the
- component.
-
- Storage should be directory where persistent volume resides. It
- will work only if component created a persistent volume with the
- same filename as its release name. Otherwise no effect. The
- exception is when '--delete-all' is used - in that case all
- content of the storage is deleted (because ONAP is not consistent
- with the volume directory names - eg.: sdnc).
-
- CAUTION 1: filename of an override file cannot contain whitespace!
- This is actually helm/onap deploy plugin issue which does not
- handle such files. So I dropped the more complicated version of
- this script when there is no reason to support something on what
- will helm deploy choke anyway.
-
- '--prefix' option is helm release argument - it is actually prefix
- when you list the helm releases - helm is little confusing here.
-
- CAUTION 2: By default release prefix is 'onap' - if you deployed
- release 'onap' and now run this script with different prefix then
- it will skip all 'onap-*' components and will deploy a new release
- with new prefix - BEWARE TO USE PROPER RELEASE PREFIX!
-
- Timeout set the waiting time for helm deploy per component.
-
- '--component' references to release name of the chart which you
- want to redeploy excplicitly - otherwise 'ALL FAILED' components
- will be redeployed. You can target more than one component at once
- - just use the argument multiple times.
-
- Component option is mutually exclusive with the '--delete-all'
- which will delete all components - healthy or not. Actually it will
- delete the whole NAMESPACE and everything in it.
-
- '--clean-only' can be used with any usage: heuristics, explicit
- component list or with '--delete-all'. It basically just skips the
- last step - the actual redeploy.
+ Usage 3: (delete EVERYTHING and redeploy):
+ ${CMD} -n onap -f /some/override1.yml -s /dockerdata-nfs --delete-all
+
+ Usage 4: (delete EVERYTHING and DO NOT redeploy - clean env.)
+ ${CMD} -n onap -s /dockerdata-nfs --delete-all --clean-only
+
+NOTES
+
+ Namespace argument (always) and at least one override file (if you don't
+ use '--delete-all') are mandatory for this script to execute. Also you must
+ provide path to the storage ('--storage') OR explicitly request to not
+ delete file storage of the component ('--no-storage-deletion').
+
+ The storage should be a directory where persistent volume resides. It will
+ work only if the component created the persistent volume with the same
+ filename as its release name. Otherwise no files are deleted. The exception
+ is when '--delete-all' is used - in that case all content of the storage is
+ deleted (because ONAP is not consistent with the volume directory names
+ - e.g.: sdnc).
+
+ '--file' can be used multiple of times and it is used for override files
+ which are passed on to helm. The order is significant because if two
+ override files modify one value the latest one is used. This option is
+ ignored if '--clean-only' is used.
+
+ CAUTION 1: filename of an override file cannot contain whitespace! This is
+ actually helm/onap deploy plugin issue which does not handle such files. So
+ I dropped the more complicated version of this script when there is no
+ reason to support something on what will helm deploy choke anyway.
+
+ '--prefix' option is helm release argument - it is actually prefix when you
+ list the helm releases - helm is little confusing here.
+
+ CAUTION 2: By default release prefix is 'onap' - if you deployed release
+ 'onap' and now run this script with different prefix then it will skip all
+ 'onap-*' components and will deploy a new release with new prefix - BEWARE
+ TO USE PROPER RELEASE PREFIX!
+
+ Timeout sets the waiting time for helm deploy per component.
+
+ '--component' references to the release name of the chart which you want to
+ redeploy excplicitly - otherwise 'ALL FAILED' components will be
+ redeployed. You can target more than one component at once - just use the
+ argument multiple times.
+
+ Component option is mutually exclusive with the '--delete-all' which will
+ delete all components - healthy or not. Actually it will delete the whole
+ NAMESPACE and everything in it. Also to be sure it will cleanup all
+ orphaned images and volumes on all kubernetes nodes.
+
+ '--clean-only' can be used with any usage: heuristics, explicit component
+ list or with '--delete-all'. It basically just skips the last step - the
+ actual redeploy.
EOF
}
+use_help()
+{
+ printf "Try help: ${CMD} --help\n"
+}
+
msg()
{
- echo -e "${COLOR_ON_GREEN}INFO: $@ ${COLOR_OFF}"
+ printf "${COLOR_ON_GREEN}INFO: $@ ${COLOR_OFF}\n"
}
error()
{
- echo -e "${COLOR_ON_RED}ERROR: $@ ${COLOR_OFF}"
+ printf "${COLOR_ON_RED}ERROR: $@ ${COLOR_OFF}\n"
+}
+
+on_exit()
+{
+ printf "$COLOR_OFF"
}
# remove all successfully completed jobs
@@ -153,7 +170,6 @@ get_labels()
kubectl get pods -n ${NAMESPACE} \
--show-labels=true \
- --include-uninitialized=true \
${_selector} \
--ignore-not-found=true \
--no-headers=true | \
@@ -183,7 +199,6 @@ delete_job()
kubectl delete job -n ${NAMESPACE} \
--cascade=true \
--now=true \
- --include-uninitialized=true \
--wait=true \
${1}
@@ -213,13 +228,12 @@ delete_resource()
# this is due to missing "release" label in some pods
# grep for the rescue...
kubectl get ${_resource} -n ${NAMESPACE} \
- --no-headers=true | grep "^${_release}"
+ --no-headers=true | grep "^${_release}[-]"
} | awk '{print $1}' | sort -u | while read -r _name _rest ; do
echo "Deleting '${_name}'"
kubectl delete ${_resource} -n ${NAMESPACE} \
--cascade=true \
--now=true \
- --include-uninitialized=true \
--wait=true \
${_name} \
2>&1 | grep -iv 'not[[:space:]]*found'
@@ -241,7 +255,6 @@ delete_namespace()
kubectl delete namespace \
--cascade=true \
--now=true \
- --include-uninitialized=true \
--wait=true \
"$NAMESPACE"
@@ -280,13 +293,67 @@ EOF
fi
}
+docker_cleanup()
+{
+ _nodes=$(kubectl get nodes \
+ --selector=node-role.kubernetes.io/worker \
+ -o wide \
+ --no-headers=true | \
+ awk '{print $6}')
+
+ if [ -z "$_nodes" ] ; then
+ error "Could not list kubernetes nodes - SKIPPING docker cleanup"
+ return
+ fi
+
+ for _node in $_nodes ; do
+ msg "Docker cleanup on $_node"
+ {
+ ssh -T $_node >/dev/null <<EOF
+if which docker >/dev/null ; then
+ docker system prune --force --all --volumes
+fi
+EOF
+ } &
+ done
+
+ msg "We are waiting now for docker cleanup to finish on all nodes..."
+ wait
+}
+
+is_helm_serve_running()
+{
+ # healthy result: HTTP/1.1 200 OK
+ _helm_serve_result=$(\
+ curl --head --silent --connect-timeout 3 http://127.0.0.1:8879 | \
+ head -n 1 | cut -d" " -f 3 | tr '[:upper:]' '[:lower:]' | tr -d '\r' )
+
+ if [ "$_helm_serve_result" == ok ] ; then
+ return 0
+ else
+ return 1
+ fi
+}
+
# arg: <release name>
-redeploy_component()
+undeploy_component()
{
_chart=$(echo "$1" | sed 's/[^-]*-//')
helm_undeploy ${1}
+
+ # for all kubernetes resources: kubectl api-resources
# TODO: does deleted secret per component break something?
- for x in jobs deployments pods pvc pv ; do
+ for x in jobs \
+ deployments \
+ services \
+ replicasets \
+ statefulsets \
+ daemonsets \
+ pods \
+ pvc \
+ pv \
+ ;
+ do
delete_resource ${x} ${1}
done
@@ -294,10 +361,15 @@ redeploy_component()
msg "Persistent volume data deletion in directory: ${VOLUME_STORAGE}/${1}"
delete_storage "$1"
fi
+}
+# arg: <release name>
+deploy_component()
+{
# TODO: until I can verify that this does the same for this component as helm deploy
#msg "Redeployment of the component ${1}..."
#helm install "local/${_chart}" --name ${1} --namespace ${NAMESPACE} --wait --timeout ${HELM_TIMEOUT}
+ error "NOT IMPLEMENTED"
}
@@ -338,6 +410,7 @@ while [ -n "$1" ] ; do
--no-storage-deletion)
if [ -n "$arg_storage" ] ; then
error "Usage of storage argument together with no storage deletion option!"
+ use_help
exit 1
elif [ -z "$arg_nostorage" ] ; then
arg_nostorage=nostorage
@@ -348,6 +421,7 @@ while [ -n "$1" ] ; do
-c|--component)
if [ -n "$arg_deleteall" ] ; then
error "'Delete all components' used already - argument mismatch"
+ use_help
exit 1
fi
state=component
@@ -355,6 +429,7 @@ while [ -n "$1" ] ; do
-D|--delete-all)
if [ -n "$arg_components" ] ; then
error "Explicit component(s) provided already - argument mismatch"
+ use_help
exit 1
elif [ -z "$arg_deleteall" ] ; then
arg_deleteall=deleteall
@@ -374,6 +449,7 @@ while [ -n "$1" ] ; do
;;
*)
error "Unknown parameter: $1"
+ use_help
exit 1
;;
esac
@@ -384,12 +460,14 @@ while [ -n "$1" ] ; do
state=nil
else
error "Duplicit argument for namespace!"
+ use_help
exit 1
fi
;;
override)
if ! [ -f "$1" ] ; then
error "Wrong filename for override file: $1"
+ use_help
exit 1
fi
arg_overrides="${arg_overrides} -f $1"
@@ -405,6 +483,7 @@ while [ -n "$1" ] ; do
state=nil
else
error "Duplicit argument for release prefix!"
+ use_help
exit 1
fi
;;
@@ -412,24 +491,28 @@ while [ -n "$1" ] ; do
if [ -z "$arg_timeout" ] ; then
if ! echo "$1" | grep -q '^[0-9]\+$' ; then
error "Timeout must be an integer: $1"
+ use_help
exit 1
fi
arg_timeout="$1"
state=nil
else
error "Duplicit argument for timeout!"
+ use_help
exit 1
fi
;;
storage)
if [ -n "$arg_nostorage" ] ; then
error "Usage of storage argument together with no storage deletion option!"
+ use_help
exit 1
elif [ -z "$arg_storage" ] ; then
arg_storage="$1"
state=nil
else
error "Duplicit argument for storage!"
+ use_help
exit 1
fi
;;
@@ -437,18 +520,19 @@ while [ -n "$1" ] ; do
shift
done
-# sanity check
+# sanity checks
+
if [ -z "$arg_namespace" ] ; then
error "Missing namespace"
- help
+ use_help
exit 1
else
NAMESPACE="$arg_namespace"
fi
-if [ -z "$arg_overrides" ] ; then
- error "Missing override file(s)"
- help
+if [ -z "$arg_overrides" ] && [ -z "$arg_cleanonly" ] ; then
+ error "Missing override file(s) or use '--clean-only'"
+ use_help
exit 1
else
OVERRIDES="$arg_overrides"
@@ -466,6 +550,7 @@ if [ -n "$arg_storage" ] ; then
VOLUME_STORAGE="$arg_storage"
elif [ -z "$arg_nostorage" ] ; then
error "Missing storage argument! If it is intended then use '--no-storage-deletion' option"
+ use_help
exit 1
fi
@@ -486,6 +571,22 @@ fi
# main
#
+# set trap for this script cleanup
+trap on_exit INT QUIT TERM EXIT
+
+# another sanity checks
+for tool in helm kubectl curl ; do
+ if ! which "$tool" >/dev/null 2>&1 ; then
+ error "Missing '${tool}' command"
+ exit 1
+ fi
+done
+
+if ! is_helm_serve_running ; then
+ error "'helm serve' is not running (http://localhost:8879)"
+ exit 1
+fi
+
# if --delete-all is used then redeploy all components (the current namespace is deleted)
if [ -n "$HELM_DELETE_ALL" ] ; then
# undeploy helm release (prefix)
@@ -494,6 +595,10 @@ if [ -n "$HELM_DELETE_ALL" ] ; then
# we will delete the whole namespace
delete_namespace
+ # we will cleanup docker on each node
+ docker_cleanup
+
+ # we will delete the content of storage (volumes)
if [ -n "$VOLUME_STORAGE" ] ; then
delete_storage
fi
@@ -515,7 +620,7 @@ else
for _component in ${_COMPONENTS} ; do
if echo "$_component" | grep -q "^${RELEASE_PREFIX}-" ; then
msg "Redeploy component: ${_component}"
- redeploy_component ${_component}
+ undeploy_component ${_component}
else
error "Component release name '${_component}' does not match release prefix: ${RELEASE_PREFIX} (SKIP)"
fi
diff --git a/tools/nexus_image_add.sh b/tools/nexus_image_add.sh
new file mode 100755
index 00000000..35940817
--- /dev/null
+++ b/tools/nexus_image_add.sh
@@ -0,0 +1,103 @@
+#!/usr/bin/env bash
+
+#Variables
+SCRIPT_PATH=$0
+CMD=$(basename "$0")
+
+REPO_IMAGE=${1}
+
+#Server with connection to internet
+SERVER=${2}
+
+#User and password for docker login on $SERVER
+REMOTE_USER=
+REMOTE_PASSWD=
+
+#User and password for docker login on localhost
+LOCAL_USER=
+LOCAL_PASSWD=
+IMAGE=`sed 's/^[^/]*\///g' <<< ${1}`
+
+help()
+{
+cat <<EOF
+${CMD} - tool for downloading image and adding it to offline nexus repository
+
+Usage
+ $SCRIPT_PATH <repository>/<image>:<tag> <server>
+
+ $SCRIPT_PATH <repository>/<image>:<tag> <server> --local-repo example.repository
+
+MANDATORY PARAMETERS:
+<repository>/<image>:<tag> == image to be downloaded (e.g. nexus3.onap.org:10001/library/busybox:latest)
+<server> == server with connection to internet and docker installed
+
+OPTIONAL PARAMETERS:
+--remote-user == user for docker login on <server>
+--remote-passwd == password for cicd-user
+--local-user == user for docker login on localhost
+--local-passwd == password for local-user
+--local-repo == local repository where new image should be pushed
+
+EOF
+}
+
+if [ $# -lt 2 ]; then
+ help
+ exit 1
+fi
+
+#Set up parameters
+while [[ $# -gt 2 ]]; do
+ case $3 in
+ --remote-user) REMOTE_USER=$4
+ ;;
+ --remote-passwd) REMOTE_PASSWD=$4
+ ;;
+ --local-user) LOCAL_USER=$4
+ ;;
+ --local-passwd) LOCAL_PASSWD=$4
+ ;;
+ --local-repo) LOCAL_REPO=$4
+ ;;
+ -h | --help) help
+ exit 0
+ ;;
+ *) help
+ exit 1
+ ;;
+ esac
+ shift 2
+done
+
+REMOTE_USER=${REMOTE_USER:-jenkins}
+REMOTE_PASSWD=${REMOTE_PASSWD:-jenkins}
+LOCAL_USER=${LOCAL_USER:-admin}
+LOCAL_PASSWD=${LOCAL_PASSWD:-admin123}
+LOCAL_REPO=${LOCAL_REPO:-nexus3.onap.org:10001}
+
+# Login to CICD server, pull image and push it into CICD nexus repo
+/usr/bin/ssh -oStrictHostKeyChecking=no $SERVER << EOF
+ set -e
+ docker pull $REPO_IMAGE
+ docker tag $REPO_IMAGE $SERVER/$IMAGE
+ docker login -u $REMOTE_USER -p $REMOTE_PASSWD $SERVER
+ docker push $SERVER/$IMAGE
+ docker rmi $REPO_IMAGE
+ docker rmi $SERVER/$IMAGE
+EOF
+
+if [ $? -eq 1 ]
+then
+ exit 1
+fi
+
+# Download image from CICD nexus repo and push it into local repo
+docker pull $SERVER/$IMAGE
+docker tag $SERVER/$IMAGE $LOCAL_REPO/$IMAGE
+docker login -u $LOCAL_USER -p $LOCAL_PASSWD $LOCAL_REPO
+docker push $LOCAL_REPO/$IMAGE
+docker rmi $SERVER/$IMAGE
+docker rmi $LOCAL_REPO/$IMAGE
+
+echo 'Done Successfully'