summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--ansible/roles/nfs/molecule/default/molecule.yml2
-rw-r--r--ansible/roles/rke/.yamllint11
-rw-r--r--ansible/roles/rke/molecule/default/destroy.yml34
-rw-r--r--ansible/roles/rke/molecule/default/molecule.yml78
-rw-r--r--ansible/roles/rke/molecule/default/playbook.yml30
-rw-r--r--ansible/roles/rke/molecule/default/prepare.yml15
-rw-r--r--ansible/roles/rke/molecule/default/tests/test_controlplane.py14
-rw-r--r--ansible/roles/rke/molecule/default/tests/test_infrastructure.py56
-rw-r--r--ansible/roles/rke/molecule/default/tests/test_kubernetes.py13
-rw-r--r--ansible/roles/rke/tasks/rke_config.yml1
-rw-r--r--ansible/roles/rke/tasks/rke_deploy.yml9
-rw-r--r--ansible/test/play-resources/molecule/default/cleanup.yml23
-rw-r--r--ansible/test/play-resources/molecule/nfs/molecule.yml2
-rw-r--r--ansible/test/roles/cleanup-directories/tasks/main.yml7
-rw-r--r--ansible/test/roles/prepare-rke/defaults/main.yml5
-rw-r--r--ansible/test/roles/prepare-rke/tasks/all.yml6
-rw-r--r--ansible/test/roles/prepare-rke/tasks/infra.yml16
-rw-r--r--ansible/test/roles/prepare-rke/tasks/main.yml2
-rwxr-xr-xbuild/build_nexus_blob.sh26
-rwxr-xr-xbuild/creating_data/docker-images-collector.sh42
-rw-r--r--build/data_lists/onap_docker_images.list96
-rw-r--r--build/data_lists/onap_pip_packages.list1
-rw-r--r--docs/InstallGuide.rst14
-rwxr-xr-xhelm_deployment_status.py167
24 files changed, 496 insertions, 174 deletions
diff --git a/ansible/roles/nfs/molecule/default/molecule.yml b/ansible/roles/nfs/molecule/default/molecule.yml
index 7bacf3c4..a8ca6a30 100644
--- a/ansible/roles/nfs/molecule/default/molecule.yml
+++ b/ansible/roles/nfs/molecule/default/molecule.yml
@@ -19,7 +19,7 @@ platforms:
- name: nfs-net
volumes:
- /sys/fs/cgroup:/sys/fs/cgroup:ro
- - ${HOME}/data:/dockerdata-nfs:rw
+ - /dockerdata-nfs
- name: kubernetes-node-2
image: molecule-${PREBUILD_PLATFORM_DISTRO:-centos}:${PREBUILD_DISTRO_VERSION:-centos7.6}
diff --git a/ansible/roles/rke/.yamllint b/ansible/roles/rke/.yamllint
new file mode 100644
index 00000000..ad0be760
--- /dev/null
+++ b/ansible/roles/rke/.yamllint
@@ -0,0 +1,11 @@
+extends: default
+
+rules:
+ braces:
+ max-spaces-inside: 1
+ level: error
+ brackets:
+ max-spaces-inside: 1
+ level: error
+ line-length: disable
+ truthy: disable
diff --git a/ansible/roles/rke/molecule/default/destroy.yml b/ansible/roles/rke/molecule/default/destroy.yml
new file mode 100644
index 00000000..591da82e
--- /dev/null
+++ b/ansible/roles/rke/molecule/default/destroy.yml
@@ -0,0 +1,34 @@
+---
+- name: Destroy
+ hosts: localhost
+ connection: local
+ gather_facts: false
+ no_log: "{{ not (lookup('env', 'MOLECULE_DEBUG') | bool or molecule_yml.provisioner.log|default(false) | bool) }}"
+ tasks:
+ - name: Destroy molecule instance(s)
+ docker_container:
+ name: "{{ item.name }}"
+ docker_host: "{{ item.docker_host | default(lookup('env', 'DOCKER_HOST') or 'unix://var/run/docker.sock') }}"
+ state: absent
+ force_kill: "{{ item.force_kill | default(true) }}"
+ # Modification: we want to clean up old volumes.
+ keep_volumes: false
+ register: server
+ with_items: "{{ molecule_yml.platforms }}"
+ async: 7200
+ poll: 0
+
+ - name: Wait for instance(s) deletion to complete
+ async_status:
+ jid: "{{ item.ansible_job_id }}"
+ register: docker_jobs
+ until: docker_jobs.finished
+ retries: 300
+ with_items: "{{ server.results }}"
+
+ - name: Delete docker network(s)
+ docker_network:
+ name: "{{ item }}"
+ docker_host: "{{ item.docker_host | default(lookup('env', 'DOCKER_HOST') or 'unix://var/run/docker.sock') }}"
+ state: absent
+ with_items: "{{ molecule_yml.platforms | molecule_get_docker_networks }}"
diff --git a/ansible/roles/rke/molecule/default/molecule.yml b/ansible/roles/rke/molecule/default/molecule.yml
new file mode 100644
index 00000000..e8e5ad76
--- /dev/null
+++ b/ansible/roles/rke/molecule/default/molecule.yml
@@ -0,0 +1,78 @@
+---
+dependency:
+ name: galaxy
+driver:
+ name: docker
+lint:
+ name: yamllint
+platforms:
+ - name: infrastructure-server
+ image: molecule-${PREBUILD_PLATFORM_DISTRO:-centos}:${PREBUILD_DISTRO_VERSION:-centos7.6}
+ pre_build_image: true
+ privileged: true
+ override_command: false
+ restart_policy: unless-stopped
+ volumes:
+ - /var/lib/kubelet
+ - /var/lib/docker
+ env:
+ container: docker
+ groups:
+ - infrastructure
+ - kubernetes-control-plane
+ networks:
+ - name: rke
+ purge_networks: true
+
+ - name: kubernetes-node-1
+ image: molecule-${PREBUILD_PLATFORM_DISTRO:-centos}:${PREBUILD_DISTRO_VERSION:-centos7.6}
+ pre_build_image: true
+ privileged: true
+ override_command: false
+ restart_policy: unless-stopped
+ env:
+ container: docker
+ volumes:
+ - /var/lib/kubelet
+ - /var/lib/docker
+ groups:
+ - kubernetes
+ networks:
+ - name: rke
+ purge_networks: true
+
+ - name: kubernetes-node-2
+ image: molecule-${PREBUILD_PLATFORM_DISTRO:-centos}:${PREBUILD_DISTRO_VERSION:-centos7.6}
+ pre_build_image: true
+ privileged: true
+ override_command: false
+ restart_policy: unless-stopped
+ env:
+ container: docker
+ volumes:
+ - /var/lib/kubelet
+ - /var/lib/docker
+ groups:
+ - kubernetes
+ networks:
+ - name: rke
+ purge_networks: true
+
+provisioner:
+ name: ansible
+ env:
+ ANSIBLE_ROLES_PATH: ../../../../test/roles
+ ANSIBLE_LIBRARY: ../../../../library
+ inventory:
+ links:
+ group_vars: ../../../../group_vars
+ options:
+ e: "app_data_path=/opt/onap"
+ lint:
+ name: ansible-lint
+scenario:
+ name: default
+verifier:
+ name: testinfra
+ lint:
+ name: flake8
diff --git a/ansible/roles/rke/molecule/default/playbook.yml b/ansible/roles/rke/molecule/default/playbook.yml
new file mode 100644
index 00000000..09dbfb8e
--- /dev/null
+++ b/ansible/roles/rke/molecule/default/playbook.yml
@@ -0,0 +1,30 @@
+---
+- name: "Set cluster_ip"
+ hosts: all
+ tasks:
+ - name: "Set cluster_ip fact"
+ set_fact:
+ cluster_ip: "{{ ansible_default_ipv4.address }}"
+
+- name: Configure kubernetes cluster (RKE)
+ hosts: infrastructure
+ roles:
+ - role: rke
+ vars:
+ mode: config
+
+- name: Prepare kubernetes nodes (RKE)
+ hosts:
+ - kubernetes
+ - kubernetes-control-plane
+ roles:
+ - role: rke
+ vars:
+ mode: node
+
+- name: Deploy kubernetes cluster (RKE)
+ hosts: infrastructure
+ roles:
+ - role: rke
+ vars:
+ mode: deploy
diff --git a/ansible/roles/rke/molecule/default/prepare.yml b/ansible/roles/rke/molecule/default/prepare.yml
new file mode 100644
index 00000000..6bad2b80
--- /dev/null
+++ b/ansible/roles/rke/molecule/default/prepare.yml
@@ -0,0 +1,15 @@
+---
+- name: "Prepare hosts"
+ hosts: all
+ roles:
+ - role: prepare-rke
+ vars:
+ mode: all
+ - prepare-docker-dind
+
+- name: "Infra specific preparations"
+ hosts: infrastructure
+ roles:
+ - role: prepare-rke
+ vars:
+ mode: infra
diff --git a/ansible/roles/rke/molecule/default/tests/test_controlplane.py b/ansible/roles/rke/molecule/default/tests/test_controlplane.py
new file mode 100644
index 00000000..0bfbca2d
--- /dev/null
+++ b/ansible/roles/rke/molecule/default/tests/test_controlplane.py
@@ -0,0 +1,14 @@
+import os
+import pytest
+
+import testinfra.utils.ansible_runner
+
+testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
+ os.environ['MOLECULE_INVENTORY_FILE']).get_hosts(
+ 'kubernetes-control-plane')
+
+
+@pytest.mark.parametrize('container_name', [
+ 'kube-apiserver', 'kube-controller-manager', 'kube-scheduler', 'kubelet'])
+def test_container_running(host, container_name):
+ assert host.docker(container_name).is_running
diff --git a/ansible/roles/rke/molecule/default/tests/test_infrastructure.py b/ansible/roles/rke/molecule/default/tests/test_infrastructure.py
new file mode 100644
index 00000000..9ba11d6e
--- /dev/null
+++ b/ansible/roles/rke/molecule/default/tests/test_infrastructure.py
@@ -0,0 +1,56 @@
+import os
+import pytest
+import json
+
+import testinfra.utils.ansible_runner
+
+testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
+ os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('infrastructure')
+
+
+@pytest.mark.parametrize('filename', [
+ '/root/.kube/config',
+ '/opt/onap/cluster/cluster.yml',
+ '/opt/onap/cluster/cluster.rkestate'])
+def test_file_existence(host, filename):
+ assert host.file(filename).exists
+
+
+def test_rke_in_path(host):
+ assert host.find_command('rke') == '/usr/local/bin/rke'
+
+
+def test_rke_version_works(host):
+ # Note that we need to cd to the cluster data dir first, really.
+ assert host.run('cd /opt/onap/cluster && rke version').rc == 0
+
+
+def test_nodes_ready(host):
+ # Retrieve all node names.
+ nodecmdres = host.run('kubectl get nodes -o name')
+ assert nodecmdres.rc == 0
+ nodes = nodecmdres.stdout.split('\n')
+ for node in nodes:
+ assert host.run(
+ 'kubectl wait --timeout=0 --for=condition=ready ' + node).rc == 0
+
+
+def test_pods_ready(host):
+ # Retrieve all pods from all namespaces.
+ # Because we need pod and namespace name, we get full json representation.
+ podcmdres = host.run('kubectl get pods --all-namespaces -o json')
+ assert podcmdres.rc == 0
+ pods = json.loads(podcmdres.stdout)['items']
+ for pod in pods:
+ # Each pod may be either created by a job or not.
+ # In job case they should already be completed
+ # when we are here so we ignore them.
+ namespace = pod['metadata']['namespace']
+ podname = pod['metadata']['name']
+ condition = 'Ready'
+ if len(pod['metadata']['ownerReferences']) == 1 and pod[
+ 'metadata']['ownerReferences'][0]['kind'] == 'Job':
+ continue
+ assert host.run(
+ 'kubectl wait --timeout=120s --for=condition=' + condition + ' -n ' +
+ namespace + ' pods/' + podname).rc == 0
diff --git a/ansible/roles/rke/molecule/default/tests/test_kubernetes.py b/ansible/roles/rke/molecule/default/tests/test_kubernetes.py
new file mode 100644
index 00000000..887494fa
--- /dev/null
+++ b/ansible/roles/rke/molecule/default/tests/test_kubernetes.py
@@ -0,0 +1,13 @@
+import os
+import pytest
+
+import testinfra.utils.ansible_runner
+
+testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
+ os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('kubernetes')
+
+
+@pytest.mark.parametrize('container_name', [
+ 'etcd', 'kubelet', 'kube-proxy'])
+def test_container_running(host, container_name):
+ assert host.docker(container_name).is_running
diff --git a/ansible/roles/rke/tasks/rke_config.yml b/ansible/roles/rke/tasks/rke_config.yml
index 4112e107..9dc0d8c6 100644
--- a/ansible/roles/rke/tasks/rke_config.yml
+++ b/ansible/roles/rke/tasks/rke_config.yml
@@ -37,6 +37,7 @@
template:
src: cluster.yml.j2
dest: "{{ cluster_config_dir }}/cluster.yml"
+ register: cluster_yml
- name: Prepare rke addon manifest (dashboard)
template:
diff --git a/ansible/roles/rke/tasks/rke_deploy.yml b/ansible/roles/rke/tasks/rke_deploy.yml
index 9983d08a..7b36f55d 100644
--- a/ansible/roles/rke/tasks/rke_deploy.yml
+++ b/ansible/roles/rke/tasks/rke_deploy.yml
@@ -1,8 +1,17 @@
---
+- name: "Check if rke is deployed"
+ command: "rke version"
+ args:
+ chdir: "{{ cluster_config_dir }}"
+ failed_when: false
+ changed_when: false
+ register: rke_deployed
+
- name: Run rke up
command: "{{ rke_bin_dir }}/rke up --config cluster.yml"
args:
chdir: "{{ cluster_config_dir }}"
+ when: rke_deployed.rc != 0 or cluster_yml.changed # noqa 503
- name: Ensure .kube directory is present
file:
diff --git a/ansible/test/play-resources/molecule/default/cleanup.yml b/ansible/test/play-resources/molecule/default/cleanup.yml
deleted file mode 100644
index e0c0b624..00000000
--- a/ansible/test/play-resources/molecule/default/cleanup.yml
+++ /dev/null
@@ -1,23 +0,0 @@
----
-- name: Cleanup data from instance (doing it from hosts requires root access).
- hosts: resources
- gather_facts: false
- ignore_unreachable: true
- pre_tasks:
- - name: Find files and dirs to delete
- find:
- paths: /data/ # Only deleting content not dir itself as we get "Device or resource busy" error as it's mounted to container doing the deletion
- patterns: "*"
- recurse: true
- file_type: any
- register: files_to_delete
- - name: Make file/dir path list
- set_fact:
- to_delete_paths: "{{ to_delete_paths | default([]) + [item.path] }}"
- loop: "{{ files_to_delete.files }}"
- when: files_to_delete.files is defined
- roles:
- - role: cleanup-directories
- vars:
- directories_files_list_to_remove: "{{ to_delete_paths }}"
- when: to_delete_paths is defined
diff --git a/ansible/test/play-resources/molecule/nfs/molecule.yml b/ansible/test/play-resources/molecule/nfs/molecule.yml
index ffaabb07..9cff6b8d 100644
--- a/ansible/test/play-resources/molecule/nfs/molecule.yml
+++ b/ansible/test/play-resources/molecule/nfs/molecule.yml
@@ -13,7 +13,7 @@ platforms:
override_command: false
volumes:
- /sys/fs/cgroup:/sys/fs/cgroup:ro
- - ${HOME}/data:/data:rw # mount fs from host to get nfs exportfs task working
+ - /data
groups:
- resources
networks:
diff --git a/ansible/test/roles/cleanup-directories/tasks/main.yml b/ansible/test/roles/cleanup-directories/tasks/main.yml
deleted file mode 100644
index 8e79ea0c..00000000
--- a/ansible/test/roles/cleanup-directories/tasks/main.yml
+++ /dev/null
@@ -1,7 +0,0 @@
----
-- name: Remove directories/files
- file:
- path: "{{ item }}"
- state: absent
- become: true
- loop: "{{ directories_files_list_to_remove }}"
diff --git a/ansible/test/roles/prepare-rke/defaults/main.yml b/ansible/test/roles/prepare-rke/defaults/main.yml
new file mode 100644
index 00000000..2cf85635
--- /dev/null
+++ b/ansible/test/roles/prepare-rke/defaults/main.yml
@@ -0,0 +1,5 @@
+---
+#The rke version.
+rke_version: 0.2.0
+#The kubectl version.
+kubectl_version: 1.13.5
diff --git a/ansible/test/roles/prepare-rke/tasks/all.yml b/ansible/test/roles/prepare-rke/tasks/all.yml
new file mode 100644
index 00000000..d4b67c1f
--- /dev/null
+++ b/ansible/test/roles/prepare-rke/tasks/all.yml
@@ -0,0 +1,6 @@
+#This is needed because login from non root is blocked by default.
+- name: "Allow non root logins"
+ service:
+ name: systemd-user-sessions
+ state: started
+
diff --git a/ansible/test/roles/prepare-rke/tasks/infra.yml b/ansible/test/roles/prepare-rke/tasks/infra.yml
new file mode 100644
index 00000000..55ab7f16
--- /dev/null
+++ b/ansible/test/roles/prepare-rke/tasks/infra.yml
@@ -0,0 +1,16 @@
+---
+- name: "Ensure {{ app_data_path }} exists"
+ file:
+ path: "{{ app_data_path }}/downloads"
+ state: directory
+
+- name: "Install rke-{{ rke_version }}"
+ get_url:
+ url: "https://github.com/rancher/rke/releases/download/v{{ rke_version }}/rke_linux-amd64"
+ dest: "{{ app_data_path }}/downloads/rke"
+
+- name: "Install kubectl-{{ kubectl_version }}"
+ get_url:
+ url: "https://storage.googleapis.com/kubernetes-release/release/v{{ kubectl_version }}/bin/linux/amd64/kubectl"
+ dest: "/usr/local/bin/kubectl"
+ mode: 0755
diff --git a/ansible/test/roles/prepare-rke/tasks/main.yml b/ansible/test/roles/prepare-rke/tasks/main.yml
new file mode 100644
index 00000000..210c9b57
--- /dev/null
+++ b/ansible/test/roles/prepare-rke/tasks/main.yml
@@ -0,0 +1,2 @@
+---
+- include_tasks: "{{ mode }}.yml"
diff --git a/build/build_nexus_blob.sh b/build/build_nexus_blob.sh
index 09ed8969..f3edb482 100755
--- a/build/build_nexus_blob.sh
+++ b/build/build_nexus_blob.sh
@@ -20,7 +20,7 @@
### This script prepares Nexus repositories data blobs for ONAP
-## The script requires following dependencies are installed: nodejs, jq, docker
+## The script requires following dependencies are installed: nodejs, jq, docker, twine
## All required resources are expected in the upper directory created during
## download procedure as DATA_DIR or in the directory given as --input-directory
## All lists used must be in project data_lists directory or in the directory given
@@ -58,22 +58,18 @@ NEXUS_DATA_DIR="${DATA_DIR}/nexus_data"
LISTS_DIR="${LOCAL_PATH}/data_lists"
usage () {
- echo " Example usage: build_nexus_blob.sh -t <tag> --input-directory </path/to/downloaded/files/dir> --output-directory
+ echo " Example usage: build_nexus_blob.sh --input-directory </path/to/downloaded/files/dir> --output-directory
</path/to/output/dir> --resource-list-directory </path/to/dir/with/resource/list>
- -t | --tag release tag, taken from available on git or placed by data generating script (mandatory) must fallow scheme onap_<semver>
-i | --input-directory directory containing file needed to create nexus blob. The structure of this directory must organized as described in build guide
-o | --output-directory
- -rl | --resource-list-directory directory with files containing docker, pypi and rpm lists
+ -rl | --resource-list-directory directory with files containing docker, pypi and npm lists
"
exit 1
}
while [ "$1" != "" ]; do
case $1 in
- -t | --tag ) shift
- TAG=$1
- ;;
-i | --input-directory ) shift
DATA_DIR=$1
;;
@@ -90,22 +86,15 @@ while [ "$1" != "" ]; do
shift
done
-
-# exit if no tag given
-if [ -z ${TAG} ]; then
- usage
- exit 1
-fi
-
# Setup directories with resources for docker, npm and pypi
NXS_SRC_DOCKER_IMG_DIR="${DATA_DIR}/offline_data/docker_images_for_nexus"
NXS_SRC_NPM_DIR="${DATA_DIR}/offline_data/npm_tar"
NXS_SRC_PYPI_DIR="${DATA_DIR}/offline_data/pypi"
-# Setup specific resources list based on the tag provided
-NXS_DOCKER_IMG_LIST="${LISTS_DIR}/${TAG}-docker_images.list"
-NXS_NPM_LIST="${LISTS_DIR}/$(sed 's/.$/x/' <<< ${TAG})-npm.list"
-NXS_PYPI_LIST="${LISTS_DIR}/$(sed 's/.$/x/' <<< ${TAG})-pip_packages.list"
+# Setup specific resources lists
+NXS_DOCKER_IMG_LIST="${LISTS_DIR}/onap_docker_images.list"
+NXS_NPM_LIST="${LISTS_DIR}/onap_npm.list"
+NXS_PYPI_LIST="${LISTS_DIR}/onap_pip_packages.list"
# Setup Nexus image used for build and install infra
INFRA_LIST="${LISTS_DIR}/infra_docker_images.list"
@@ -340,4 +329,3 @@ npm config set registry "https://registry.npmjs.org"
echo "Nexus blob is built"
exit 0
-
diff --git a/build/creating_data/docker-images-collector.sh b/build/creating_data/docker-images-collector.sh
index e13b9150..9206b0bb 100755
--- a/build/creating_data/docker-images-collector.sh
+++ b/build/creating_data/docker-images-collector.sh
@@ -30,9 +30,9 @@ usage () {
echo " "
echo " This script is preparing docker images list based on kubernetes project"
echo " Usage:"
- echo " ./$(basename $0) <project version> <path to project> [<output list file>]"
+ echo " ./$(basename $0) <path to project> [<output list file>]"
echo " "
- echo " Example: ./$(basename $0) onap_3.0.2 /root/oom/kubernetes/onap"
+ echo " Example: ./$(basename $0) /root/oom/kubernetes/onap"
echo " "
echo " Dependencies: helm, python-yaml, make"
echo " "
@@ -55,26 +55,35 @@ PYP
}
create_list() {
- helm template "${PROJECT_DIR}/../${1}" | grep 'image:\ \|tag_version:\ \|h._image' |
+ if [ -d "${PROJECT_DIR}/../${1}" ]; then
+ SUBSYS_DIR="${PROJECT_DIR}/../${1}"
+ elif [ -d "${PROJECT_DIR}/../common/${1}" ]; then
+ SUBSYS_DIR="${PROJECT_DIR}/../common/${1}"
+ else
+ >&2 echo -e \n" !!! ${1} sybsystem does not exist !!!"\n
+ fi
+ helm template "${SUBSYS_DIR}" | grep 'image:\ \|tag_version:\ \|h._image' |
sed -e 's/^.*\"h._image\"\ :\ //; s/^.*\"\(.*\)\".*$/\1/' \
-e 's/\x27\|,//g; s/^.*\(image\|tag_version\):\ //' | tr -d '\r'
}
# Configuration
-TAG="${1}"
-PROJECT_DIR="${2}"
-LIST="${3}"
+if [ "${1}" == "-h" ] || [ "${1}" == "--help" ] || [ $# -lt 1 ]; then
+ usage
+fi
+
+PROJECT_DIR="${1}"
+LIST="${2}"
LISTS_DIR="$(readlink -f $(dirname ${0}))/../data_lists"
HELM_REPO="local http://127.0.0.1:8879"
+PROJECT="$(basename ${1})"
-if [ "${1}" == "-h" ] || [ "${1}" == "--help" ] || [ $# -lt 2 ]; then
- usage
-elif [ ! -f "${PROJECT_DIR}/../Makefile" ]; then
+if [ ! -f "${PROJECT_DIR}/../Makefile" ]; then
echo "Wrong path to project directory entered"
exit 1
elif [ -z "${LIST}" ]; then
mkdir -p ${LISTS_DIR}
- LIST="${LISTS_DIR}/${TAG}-docker_images.list"
+ LIST="${LISTS_DIR}/${PROJECT}_docker_images.list"
fi
if [ -e "${LIST}" ]; then
@@ -82,8 +91,6 @@ if [ -e "${LIST}" ]; then
MSG="$(realpath ${LIST}) already existed\nCreated backup $(realpath ${LIST}).bk\n"
fi
-PROJECT="$(basename ${2})"
-
# Setup helm
if pgrep -x "helm" > /dev/null; then
echo "helm is already running"
@@ -106,12 +113,17 @@ popd
# Create the list from all enabled subsystems
echo "Creating the list..."
if [ "${PROJECT}" == "onap" ]; then
+ COMMENT="OOM commit $(git --git-dir="${PROJECT_DIR}/../../.git" rev-parse HEAD)"
for subsystem in `parse_yaml "${PROJECT_DIR}/values.yaml"`; do
create_list ${subsystem}
- done
+ done | sort -u > ${LIST}
else
- create_list ${PROJECT}
-fi | sort -u > ${LIST}
+ COMMENT="${PROJECT}"
+ create_list ${PROJECT} | sort -u > ${LIST}
+fi
+
+# Add comment reffering to the project
+sed -i "1i# generated from ${COMMENT}" "${LIST}"
echo -e ${MSG}
echo -e 'The list has been created:\n '"${LIST}"
diff --git a/build/data_lists/onap_docker_images.list b/build/data_lists/onap_docker_images.list
index 3ac67ed6..451f6172 100644
--- a/build/data_lists/onap_docker_images.list
+++ b/build/data_lists/onap_docker_images.list
@@ -1,13 +1,13 @@
-# generated from OOM commit 94664fb4457c61076cc7e65ed40dda5cf696bcbe
+# generated from OOM commit 0b904977dde761d189874d6dc6c527cd45928d92
alpine:3.6
busybox
crunchydata/crunchy-pgpool:centos7-10.4-2.0.0
crunchydata/crunchy-postgres:centos7-10.3-1.8.2
crunchydata/crunchy-postgres:centos7-10.4-2.0.0
docker.elastic.co/beats/filebeat:5.5.0
+docker.elastic.co/elasticsearch/elasticsearch-oss:6.6.2
docker.elastic.co/elasticsearch/elasticsearch:5.5.0
docker.elastic.co/elasticsearch/elasticsearch:6.6.2
-docker.elastic.co/elasticsearch/elasticsearch-oss:6.6.2
docker.elastic.co/kibana/kibana:5.5.0
docker.elastic.co/kibana/kibana:6.6.2
docker.elastic.co/logstash/logstash:5.4.3
@@ -45,8 +45,8 @@ nexus3.onap.org:10001/onap/aaf/aaf_locate:2.1.13
nexus3.onap.org:10001/onap/aaf/aaf_oauth:2.1.13
nexus3.onap.org:10001/onap/aaf/aaf_service:2.1.13
nexus3.onap.org:10001/onap/aaf/distcenter:4.0.0
-nexus3.onap.org:10001/onap/aaf/sms:4.0.0
nexus3.onap.org:10001/onap/aaf/smsquorumclient:4.0.0
+nexus3.onap.org:10001/onap/aaf/sms:4.0.0
nexus3.onap.org:10001/onap/aaf/testcaservice:4.0.0
nexus3.onap.org:10001/onap/aai/esr-gui:1.4.0
nexus3.onap.org:10001/onap/aai/esr-server:1.4.0
@@ -54,23 +54,22 @@ nexus3.onap.org:10001/onap/aai-graphadmin:1.1.0
nexus3.onap.org:10001/onap/aai-resources:1.4.0
nexus3.onap.org:10001/onap/aai-schema-service:1.0.6
nexus3.onap.org:10001/onap/aai-traversal:1.4.1
-nexus3.onap.org:10001/onap/admportal-sdnc-image:1.5.1
-nexus3.onap.org:10001/onap/appc-cdt-image:1.5.0
-nexus3.onap.org:10001/onap/appc-image:1.5.0
+nexus3.onap.org:10001/onap/admportal-sdnc-image:1.5.2
+nexus3.onap.org:10001/onap/appc-cdt-image:1.5.1
+nexus3.onap.org:10001/onap/appc-image:1.5.1
nexus3.onap.org:10001/onap/babel:1.4.2
nexus3.onap.org:10001/onap/ccsdk-ansible-server-image:0.4.2
-nexus3.onap.org:10001/onap/ccsdk-apps-ms-neng:0.4.2
-nexus3.onap.org:10001/onap/ccsdk-blueprintsprocessor:0.4.2
-nexus3.onap.org:10001/onap/ccsdk-cds-ui-server:0.4.2
-nexus3.onap.org:10001/onap/ccsdk-commandexecutor:0.4.2
-nexus3.onap.org:10001/onap/ccsdk-controllerblueprints:0.4.2
-nexus3.onap.org:10001/onap/ccsdk-dgbuilder-image:0.4.2
-nexus3.onap.org:10001/onap/ccsdk-sdclistener:0.4.2
-nexus3.onap.org:10001/onap/champ:1.4.0
-nexus3.onap.org:10001/onap/clamp:4.0.2
+nexus3.onap.org:10001/onap/ccsdk-apps-ms-neng:0.4.3
+nexus3.onap.org:10001/onap/ccsdk-blueprintsprocessor:0.4.3
+nexus3.onap.org:10001/onap/ccsdk-cds-ui-server:0.4.3
+nexus3.onap.org:10001/onap/ccsdk-commandexecutor:0.4.3
+nexus3.onap.org:10001/onap/ccsdk-controllerblueprints:0.4.3
+nexus3.onap.org:10001/onap/ccsdk-dgbuilder-image:0.4.3
+nexus3.onap.org:10001/onap/ccsdk-sdclistener:0.4.3
nexus3.onap.org:10001/onap/clamp-dashboard-kibana:4.0.1
nexus3.onap.org:10001/onap/clamp-dashboard-logstash:4.0.1
-nexus3.onap.org:10001/onap/cli:2.0.4
+nexus3.onap.org:10001/onap/clamp:4.0.2
+nexus3.onap.org:10001/onap/cli:3.0.0
nexus3.onap.org:10001/onap/data-router:1.3.3
nexus3.onap.org:10001/onap/data-router:1.4.0
nexus3.onap.org:10001/onap/dcae-be:1.3.0
@@ -90,24 +89,25 @@ nexus3.onap.org:10001/onap/externalapi/nbi:4.0.0
nexus3.onap.org:10001/onap/gizmo:1.4.0
nexus3.onap.org:10001/onap/holmes/engine-management:1.2.5
nexus3.onap.org:10001/onap/holmes/rule-management:1.2.6
+nexus3.onap.org:10001/onap/champ:1.4.0
nexus3.onap.org:10001/onap/modeling/genericparser:1.0.2
nexus3.onap.org:10001/onap/model-loader:1.4.0
nexus3.onap.org:10001/onap/msb/msb_apigateway:1.2.4
nexus3.onap.org:10001/onap/msb/msb_discovery:1.2.3
-nexus3.onap.org:10001/onap/multicloud/azure:1.2.1
-nexus3.onap.org:10001/onap/multicloud/framework:1.3.1
+nexus3.onap.org:10001/onap/multicloud/azure:1.2.2
nexus3.onap.org:10001/onap/multicloud/framework-artifactbroker:1.3.3
-nexus3.onap.org:10001/onap/multicloud/k8s:0.2.0
-nexus3.onap.org:10001/onap/multicloud/openstack-fcaps:1.3.2
-nexus3.onap.org:10001/onap/multicloud/openstack-lenovo:1.3.1
-nexus3.onap.org:10001/onap/multicloud/openstack-ocata:1.3.2
-nexus3.onap.org:10001/onap/multicloud/openstack-pike:1.3.2
-nexus3.onap.org:10001/onap/multicloud/openstack-starlingx:1.3.2
-nexus3.onap.org:10001/onap/multicloud/openstack-windriver:1.3.2
+nexus3.onap.org:10001/onap/multicloud/framework:1.3.3
+nexus3.onap.org:10001/onap/multicloud/k8s:0.4.0
+nexus3.onap.org:10001/onap/multicloud/openstack-fcaps:1.3.4
+nexus3.onap.org:10001/onap/multicloud/openstack-lenovo:1.3.4
+nexus3.onap.org:10001/onap/multicloud/openstack-ocata:1.3.4
+nexus3.onap.org:10001/onap/multicloud/openstack-pike:1.3.4
+nexus3.onap.org:10001/onap/multicloud/openstack-starlingx:1.3.4
+nexus3.onap.org:10001/onap/multicloud/openstack-windriver:1.3.4
nexus3.onap.org:10001/onap/multicloud/vio:1.3.1
-nexus3.onap.org:10001/onap/music/cassandra_3_11:3.0.24
nexus3.onap.org:10001/onap/music/cassandra_job:3.0.24
nexus3.onap.org:10001/onap/music/cassandra_music:3.0.0
+nexus3.onap.org:10001/onap/music/cassandra_3_11:3.0.24
nexus3.onap.org:10001/onap/music/music:3.0.24
nexus3.onap.org:10001/onap/network-discovery:1.5.1
nexus3.onap.org:10001/onap/oom/kube2msb:1.1.0
@@ -118,17 +118,17 @@ nexus3.onap.org:10001/onap/optf-cmso-ticketmgt:2.0.0
nexus3.onap.org:10001/onap/optf-cmso-topology:2.0.0
nexus3.onap.org:10001/onap/optf-has:1.3.0
nexus3.onap.org:10001/onap/optf-osdf:1.3.0
-nexus3.onap.org:10001/onap/org.onap.ccsdk.dashboard.ccsdk-app-os:1.1.0-SNAPSHOT-latest
+nexus3.onap.org:10001/onap/org.onap.ccsdk.dashboard.ccsdk-app-os:1.1.0
nexus3.onap.org:10001/onap/org.onap.dcaegen2.collectors.hv-ves.hv-collector-main:1.1.0
nexus3.onap.org:10001/onap/org.onap.dcaegen2.collectors.snmptrap:1.4.0
nexus3.onap.org:10001/onap/org.onap.dcaegen2.collectors.ves.vescollector:1.4.4
nexus3.onap.org:10001/onap/org.onap.dcaegen2.deployments.cm-container:1.6.2
nexus3.onap.org:10001/onap/org.onap.dcaegen2.deployments.consul-loader-container:1.0.0
nexus3.onap.org:10001/onap/org.onap.dcaegen2.deployments.healthcheck-container:1.2.4
-nexus3.onap.org:10001/onap/org.onap.dcaegen2.deployments.k8s-bootstrap-container:1.4.17
+nexus3.onap.org:10001/onap/org.onap.dcaegen2.deployments.k8s-bootstrap-container:1.4.18
nexus3.onap.org:10001/onap/org.onap.dcaegen2.deployments.multisite-init-container:1.0.0
nexus3.onap.org:10001/onap/org.onap.dcaegen2.deployments.redis-cluster-container:1.0.0
-nexus3.onap.org:10001/onap/org.onap.dcaegen2.deployments.tca-cdap-container:1.1.1
+nexus3.onap.org:10001/onap/org.onap.dcaegen2.deployments.tca-cdap-container:1.1.2
nexus3.onap.org:10001/onap/org.onap.dcaegen2.deployments.tls-init-container:1.0.3
nexus3.onap.org:10001/onap/org.onap.dcaegen2.platform.configbinding.app-app:2.3.0
nexus3.onap.org:10001/onap/org.onap.dcaegen2.platform.deployment-handler:4.0.1
@@ -152,19 +152,19 @@ nexus3.onap.org:10001/onap/portal-app:2.5.0
nexus3.onap.org:10001/onap/portal-db:2.5.0
nexus3.onap.org:10001/onap/portal-sdk:2.5.0
nexus3.onap.org:10001/onap/portal-wms:2.5.0
-nexus3.onap.org:10001/onap/sdc-backend:1.4.0
-nexus3.onap.org:10001/onap/sdc-backend-init:1.4.0
-nexus3.onap.org:10001/onap/sdc-cassandra-init:1.4.0
-nexus3.onap.org:10001/onap/sdc-elasticsearch:1.4.0
-nexus3.onap.org:10001/onap/sdc-frontend:1.4.0
-nexus3.onap.org:10001/onap/sdc-init-elasticsearch:1.4.0
-nexus3.onap.org:10001/onap/sdc-kibana:1.4.0
-nexus3.onap.org:10001/onap/sdc-onboard-backend:1.4.0
-nexus3.onap.org:10001/onap/sdc-onboard-cassandra-init:1.4.0
-nexus3.onap.org:10001/onap/sdnc-ansible-server-image:1.5.1
-nexus3.onap.org:10001/onap/sdnc-dmaap-listener-image:1.5.1
-nexus3.onap.org:10001/onap/sdnc-image:1.5.1
-nexus3.onap.org:10001/onap/sdnc-ueb-listener-image:1.5.1
+nexus3.onap.org:10001/onap/sdc-backend-init:1.4.1
+nexus3.onap.org:10001/onap/sdc-backend:1.4.1
+nexus3.onap.org:10001/onap/sdc-cassandra-init:1.4.1
+nexus3.onap.org:10001/onap/sdc-elasticsearch:1.4.1
+nexus3.onap.org:10001/onap/sdc-frontend:1.4.1
+nexus3.onap.org:10001/onap/sdc-init-elasticsearch:1.4.1
+nexus3.onap.org:10001/onap/sdc-kibana:1.4.1
+nexus3.onap.org:10001/onap/sdc-onboard-backend:1.4.1
+nexus3.onap.org:10001/onap/sdc-onboard-cassandra-init:1.4.1
+nexus3.onap.org:10001/onap/sdnc-ansible-server-image:1.5.2
+nexus3.onap.org:10001/onap/sdnc-dmaap-listener-image:1.5.2
+nexus3.onap.org:10001/onap/sdnc-image:1.5.2
+nexus3.onap.org:10001/onap/sdnc-ueb-listener-image:1.5.2
nexus3.onap.org:10001/onap/search-data-service:1.3.1
nexus3.onap.org:10001/onap/search-data-service:1.4.3
nexus3.onap.org:10001/onap/service-decomposition:1.5.1
@@ -182,8 +182,8 @@ nexus3.onap.org:10001/onap/so/vnfm-adapter:1.4.1
nexus3.onap.org:10001/onap/sparky-be:1.4.0
nexus3.onap.org:10001/onap/spike:1.4.0
nexus3.onap.org:10001/onap/testsuite:1.4.0
-nexus3.onap.org:10001/onap/usecase-ui:1.2.2
nexus3.onap.org:10001/onap/usecase-ui-server:1.2.1
+nexus3.onap.org:10001/onap/usecase-ui:1.2.2
nexus3.onap.org:10001/onap/validation:1.3.1
nexus3.onap.org:10001/onap/vfc/catalog:1.3.1
nexus3.onap.org:10001/onap/vfc/db:1.3.0
@@ -202,11 +202,11 @@ nexus3.onap.org:10001/onap/vfc/wfengine-activiti:1.3.0
nexus3.onap.org:10001/onap/vfc/wfengine-mgrservice:1.3.0
nexus3.onap.org:10001/onap/vfc/ztesdncdriver:1.3.0
nexus3.onap.org:10001/onap/vfc/ztevnfmdriver:1.3.1
-nexus3.onap.org:10001/onap/vid:4.0.0
-nexus3.onap.org:10001/onap/vnfsdk/refrepo:1.2.0
-nexus3.onap.org:10001/onap/workflow-backend:1.4.0
-nexus3.onap.org:10001/onap/workflow-frontend:1.4.0
-nexus3.onap.org:10001/onap/workflow-init:1.4.0
+nexus3.onap.org:10001/onap/vid:4.2.0
+nexus3.onap.org:10001/onap/vnfsdk/refrepo:1.3.0
+nexus3.onap.org:10001/onap/workflow-backend:1.4.1
+nexus3.onap.org:10001/onap/workflow-frontend:1.4.1
+nexus3.onap.org:10001/onap/workflow-init:1.4.1
nexus3.onap.org:10001/sonatype/nexus:2.14.8-01
nexus3.onap.org:10001/zookeeper:3.4
oomk8s/mariadb-client-init:3.0.0
diff --git a/build/data_lists/onap_pip_packages.list b/build/data_lists/onap_pip_packages.list
index ba5cdf7e..ab4949b2 100644
--- a/build/data_lists/onap_pip_packages.list
+++ b/build/data_lists/onap_pip_packages.list
@@ -7,6 +7,5 @@ itsdangerous==1.1.0
Jinja2==2.10.1
MarkupSafe==1.1.1
requests==2.22.0
-setuptools==40.7.1
urllib3==1.25.3
Werkzeug==0.15.4
diff --git a/docs/InstallGuide.rst b/docs/InstallGuide.rst
index 7849047d..95b5749f 100644
--- a/docs/InstallGuide.rst
+++ b/docs/InstallGuide.rst
@@ -406,23 +406,23 @@ This will take a while so be patient.
.. _oooi_installguide_postinstall:
-Part 4. Postinstallation and troubleshooting
---------------------------------------------
+Part 4. Post-installation and troubleshooting
+---------------------------------------------
-After all the playbooks are finished, it will still take a lot of time until all pods will be up and running. You can monitor your newly created kubernetes cluster for example like this::
+After all of the playbooks are run successfully, it will still take a lot of time until all pods are up and running. You can monitor your newly created kubernetes cluster for example like this::
$ ssh -i ~/.ssh/offline_ssh_key root@10.8.8.4 # tailor this command to connect to your infra-node
$ watch -d -n 5 'kubectl get pods --all-namespaces'
-Alternatively you can monitor progress with ``helm_deployment_status.py`` script located in offline-installer directory. While on infra-node this can be achieved like this::
+Alternatively you can monitor progress with ``helm_deployment_status.py`` script located in offline-installer directory. Transfer it to infra-node and run::
$ python helm_deployment_status.py -n <namespace_name> # namespace defaults to onap
-To automatically verify functionality, after deployment becomes ready or after timeout period expires, add path to healthcheck scripts::
+To automatically verify functionality with healthchecks after deployment becomes ready or after timeout period expires, append ``-hp`` switch followed by the full path to the healthcheck script and ``--health-mode`` optional switch with appropriate mode supported by that script (``health`` by default, ``--help`` displays available modes)::
- $ python helm_deployment_status.py -hp <app_data_path>/<app_name>/helm_charts/robot/ete-k8s.sh
+ $ python helm_deployment_status.py -hp <app_data_path>/<app_name>/helm_charts/robot/ete-k8s.sh --health-mode <healthcheck mode>
-It is strongly recommended to check help for ``helm_deployment_status.py`` to be able to tailor monitoring to your needs since default values might not be what you are looking for.
+It is strongly recommended to tailor ``helm_deployment_status.py`` to your needs since default values might not be what you'd expect. The defaults can be displayed with ``--help`` switch.
Final result of installation varies based on number of k8s nodes used and distribution of pods. In some dev envs we quite frequently hit problems with not all pods properly deployed. In successful deployments all jobs should be in successful state.
This can be verified using ::
diff --git a/helm_deployment_status.py b/helm_deployment_status.py
index 448263d5..8f9a931d 100755
--- a/helm_deployment_status.py
+++ b/helm_deployment_status.py
@@ -25,30 +25,21 @@ import sys
import argparse
import yaml
import requests
-import subprocess
+from subprocess import Popen,STDOUT,PIPE
import datetime
from time import sleep
from os.path import expanduser
from itertools import chain
import csv
from requests.packages.urllib3.exceptions import InsecureRequestWarning
-
+from base64 import b64decode
+from tempfile import NamedTemporaryFile
def add_resource_kind(resources, kind):
for item in resources:
item['kind'] = kind
return resources
-def get_resources(server, namespace, api, kind, ssl_verify=False):
- url = '/'.join([server, api, 'namespaces', namespace, kind])
- try:
- req = requests.get(url, verify=ssl_verify)
- except requests.exceptions.ConnectionError as err:
- sys.exit('Could not connect to {}'.format(server))
- json = req.json()
- # kind is <resource>List in response so [:-4] removes 'List' from value
- return add_resource_kind(json['items'], json['kind'][:-4])
-
def pods_by_parent(pods, parent):
for pod in pods:
if pod['metadata']['labels']['app'] == parent:
@@ -87,48 +78,44 @@ def analyze_k8s_controllers(resources_data):
return resources
-def get_k8s_controllers(namespace, k8s_url):
+def get_k8s_controllers(k8s):
k8s_controllers = {}
- k8s_controllers['deployments'] = {'data': get_resources(k8s_url, namespace,
+ k8s_controllers['deployments'] = {'data': k8s.get_resources(
'apis/apps/v1', 'deployments')}
- k8s_controllers['deployments'].update(analyze_k8s_controllers(k8s_controllers['deployments']['data']))
+ k8s_controllers['deployments'].update(analyze_k8s_controllers(
+ k8s_controllers['deployments']['data']))
- k8s_controllers['statefulsets'] = {'data': get_resources(k8s_url, namespace,
+ k8s_controllers['statefulsets'] = {'data': k8s.get_resources(
'apis/apps/v1', 'statefulsets')}
- k8s_controllers['statefulsets'].update(analyze_k8s_controllers(k8s_controllers['statefulsets']['data']))
+ k8s_controllers['statefulsets'].update(analyze_k8s_controllers(
+ k8s_controllers['statefulsets']['data']))
- k8s_controllers['jobs'] = {'data': get_resources(k8s_url, namespace,
+ k8s_controllers['jobs'] = {'data': k8s.get_resources(
'apis/batch/v1', 'jobs')}
- k8s_controllers['jobs'].update(analyze_k8s_controllers(k8s_controllers['jobs']['data']))
+ k8s_controllers['jobs'].update(analyze_k8s_controllers(
+ k8s_controllers['jobs']['data']))
not_ready_controllers = chain.from_iterable(
k8s_controllers[x]['not_ready_list'] for x in k8s_controllers)
return k8s_controllers, list(not_ready_controllers)
-def get_k8s_url(kube_config):
- # TODO: Get login info
- with open(kube_config) as f:
- config = yaml.load(f)
- # TODO: Support cluster by name
- return config['clusters'][0]['cluster']['server']
-
-def exec_healthcheck(hp_script, namespace):
- try:
- hc = subprocess.check_output(
- ['sh', hp_script, namespace, 'health'],
- stderr=subprocess.STDOUT)
- return 0, hc.output
- except subprocess.CalledProcessError as err:
- return err.returncode, err.output
+def exec_healthcheck(hp_script, namespace, hp_mode):
+ # spawn healthcheck script and redirect it's stderr to stdout
+ hc = Popen(['sh',hp_script,namespace,hp_mode],stdout=PIPE,stderr=STDOUT)
+ # Trace the output of subprocess until it has finished
+ for line in iter(hc.stdout.readline, ''):
+ print(line.strip())
+ hc.poll() # set returncode in Popen object
+ return hc.returncode
-def check_readiness(k8s_url, namespace, verbosity):
- k8s_controllers, not_ready_controllers = get_k8s_controllers(namespace, k8s_url)
+def check_readiness(k8s, verbosity):
+ k8s_controllers, not_ready_controllers = get_k8s_controllers(k8s)
# check pods only when it is explicitly wanted (judging readiness by deployment status)
if verbosity > 1:
- pods = get_resources(k8s_url, namespace, 'api/v1', 'pods')
+ pods = k8s.get_resources('api/v1', 'pods')
unready_pods = chain.from_iterable(
get_names(not_ready_pods(
pods_by_parent(pods, x)))
@@ -139,11 +126,11 @@ def check_readiness(k8s_url, namespace, verbosity):
print_status(verbosity, k8s_controllers, unready_pods)
return not not_ready_controllers
-def check_in_loop(k8s_url, namespace, max_time, sleep_time, verbosity):
+def check_in_loop(k8s, max_time, sleep_time, verbosity):
max_end_time = datetime.datetime.now() + datetime.timedelta(minutes=max_time)
ready = False
while datetime.datetime.now() < max_end_time:
- ready = check_readiness(k8s_url, namespace, verbosity)
+ ready = check_readiness(k8s, verbosity)
if ready:
return ready
sleep(sleep_time)
@@ -184,7 +171,8 @@ def print_status(verbosity, resources, not_ready_pods):
print('\n'.join(status_strings), '\n')
def parse_args():
- parser = argparse.ArgumentParser(description='Monitor ONAP deployment progress')
+ parser = argparse.ArgumentParser(description='Monitor ONAP deployment progress',
+ formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--namespace', '-n', default='onap',
help='Kubernetes namespace of ONAP')
parser.add_argument('--server', '-s', help='address of Kubernetes cluster')
@@ -192,6 +180,9 @@ def parse_args():
default=expanduser('~') + '/.kube/config',
help='path to .kube/config file')
parser.add_argument('--health-path', '-hp', help='path to ONAP robot ete-k8s.sh')
+ parser.add_argument('--health-mode', default='health', help='healthcheck mode',
+ choices=('health','healthdist','distribute','instantiate','instantiateVFWCL',
+ 'instantiateDemoVFWCL','portal'))
parser.add_argument('--no-helm', action='store_true', help='Do not check Helm')
parser.add_argument('--check-frequency', '-w', default=300, type=int,
help='time between readiness checks in seconds')
@@ -201,9 +192,86 @@ def parse_args():
help='run check loop only once')
parser.add_argument('-v', dest='verbosity', action='count', default=0,
help='increase output verbosity, e.g. -vv is more verbose than -v')
+ parser.add_argument('--no-ssl-auth', action='store_true',
+ help='Disable SSL certificate based authentication while connecting to server')
return parser.parse_args()
+class Kubernetes:
+ '''Class exposing get_resources() routine for connecting to kube API.
+ It keeps all attributes required by that call as an internal
+ object state.'''
+
+ requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
+
+ def __init__(self,args):
+
+ self.config = args.kubeconfig
+ self.url = args.server if args.server is not None else \
+ self._get_k8s_url()
+ self.no_ssl_auth = args.no_ssl_auth
+ self.certs = self._get_k8s_certs() if not self.no_ssl_auth else {}
+ self.namespace = args.namespace
+
+ # Setup tmp file with ca chain only if certs were gathered successfully
+ # and --no-ssl-auth wasn't set
+ if self.certs and not self.no_ssl_auth:
+ self._setup_cert_files()
+
+ def get_resources(self, api, kind):
+ '''Performs actual API call'''
+ url = '/'.join([self.url, api, 'namespaces', self.namespace, kind])
+ try:
+ if self.no_ssl_auth:
+ req = requests.get(url, verify=False)
+ else:
+ req = requests.get(url, verify=self.crt_tmp_file.name, cert=self.crt_tmp_file.name)
+ except requests.exceptions.ConnectionError as err:
+ sys.exit('Error: Could not connect to {}'.format(self.url))
+ if req.status_code == 200:
+ json = req.json()
+ # kind is <resource>List in response so [:-4] removes 'List' from value
+ return add_resource_kind(json['items'], json['kind'][:-4])
+ elif (req.status_code == 401):
+ sys.exit('Error: Server replied with "401 Unauthorized" while making connection')
+ else:
+ sys.exit("Error: There's been an unspecified issue while making a request to the API")
+
+ def _setup_cert_files(self):
+ '''Helper funtion to setup named file for requests.get() call
+ in self.get_resources() which is able read certificate only
+ from file'''
+ ca_chain = NamedTemporaryFile()
+ for crt in self.certs.values():
+ ca_chain.write(crt)
+ ca_chain.read() # flush the file buffer
+ self.crt_tmp_file = ca_chain
+
+ def _get_k8s_url(self):
+ # TODO: Get login info
+ with open(self.config) as f:
+ config = yaml.load(f)
+ # TODO: Support cluster by name
+ return config['clusters'][0]['cluster']['server']
+
+ def _get_k8s_certs(self):
+ '''Helper function to read and decode certificates from kube config'''
+ with open(self.config) as f:
+ config = yaml.load(f)
+ certs = {}
+ try:
+ certs.update(dict(ca_cert=b64decode(
+ config['clusters'][0]['cluster']['certificate-authority-data'])))
+ certs.update(dict(client_cert=b64decode(
+ config['users'][0]['user']['client-certificate-data'])))
+ certs.update(dict(client_key=b64decode(
+ config['users'][0]['user']['client-key-data'])))
+ except KeyError as err:
+ print('Warning: could not get Kubernetes config for certificates. ' \
+ 'Turning off SSL authentication.')
+ self.no_ssl_auth = True
+ return certs
+
def main():
args = parse_args()
@@ -218,25 +286,20 @@ def main():
except IOError as err:
sys.exit(err.strerror)
- requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
- k8s_url = args.server if args.server is not None else get_k8s_url(args.kubeconfig)
+ k8s = Kubernetes(args)
ready = False
if args.single_run:
- ready = check_readiness(k8s_url, args.namespace, args.verbosity)
+ ready = check_readiness(k8s, args.verbosity)
else:
- if not check_in_loop(k8s_url, args.namespace, args.max_time, args.check_frequency, args.verbosity):
+ if not check_in_loop(k8s, args.max_time, args.check_frequency, args.verbosity):
# Double-check last 5 minutes and write verbosely in case it is not ready
- ready = check_readiness(k8s_url, args.namespace, 2)
+ ready = check_readiness(k8s, 2)
if args.health_path is not None:
- try:
- hc_rc, hc_output = exec_healthcheck(args.health_path, args.namespace)
- except IOError as err:
- sys.exit(err.strerror)
- if args.verbosity > 1 or hc_rc > 0:
- print(hc_output.decode('utf-8'))
- sys.exit(hc_rc)
+ hc_rc = exec_healthcheck(args.health_path, args.namespace, args.health_mode)
+ if hc_rc:
+ sys.exit(hc_rc)
if not ready:
sys.exit('Deployment is not ready')