diff options
50 files changed, 1876 insertions, 556 deletions
diff --git a/.gitreview b/.gitreview new file mode 100644 index 00000000..7c81745f --- /dev/null +++ b/.gitreview @@ -0,0 +1,4 @@ +[gerrit] +host=gerrit.onap.org +port=29418 +project=oom/offline-installer.git diff --git a/INFO.yaml b/INFO.yaml new file mode 100644 index 00000000..e2ddfecf --- /dev/null +++ b/INFO.yaml @@ -0,0 +1,72 @@ +--- +project: 'oom-offline-installer' +project_creation_date: '2018-12-14' +lifecycle_state: 'Incubation' +project_category: '' +project_lead: &onap_releng_ptl + name: 'Mike Elliott' + email: 'mike.elliott@amdocs.com' + id: 'melliott' + company: 'Amdocs' + timezone: 'Canada/Ontario' +primary_contact: *onap_releng_ptl +issue_tracking: + type: 'jira' + url: 'https://jira.onap.org/projects/OOM' + key: 'OOM' +mailing_list: + type: 'groups.io' + url: 'lists.onap.org' + tag: '<[sub-project_name]>' +realtime_discussion: '' +meetings: + - type: 'zoom' + agenda: 'https://wiki.onap.org/pages/viewpage.action?pageId=8228913' + url: 'https://wiki.onap.org/pages/viewpage.action?pageId=8228913' + server: 'n/a' + channel: 'n/a' + repeats: 'weekly' + time: '14:00 UTC' +repositories: + - 'oom' + - 'oom-offline-installer' + - 'oom-registrator' +committers: + - <<: *onap_releng_ptl + - name: 'Michal Ptacek' + email: 'm.ptacek@partner.samsung.com' + company: 'Samsung' + id: 'Michal.Ptacek' + timezone: 'Europe/Prague' + - name: 'Samuli Silvius' + email: 's.silvius@partner.samsung.com' + company: 'Samsung' + id: 'silvius' + timezone: 'Europe/Helsinky' + - name: 'Petr Ospaly' + email: 'p.ospaly@partner.samsung.com' + company: 'Samsung' + id: 'p.ospaly' + timezone: 'Europe/Prague' + - name: 'Tomas Levora' + email: 't.levora@partner.samsung.com' + company: 'Samsung' + id: 't.levora' + timezone: 'Europe/Prague' + - name: 'Milan Verespej' + email: 'm.verespej@partner.samsung.com' + company: 'Samsung' + id: 'milan.verespej' + timezone: 'Europe/Prague' + - name: 'Bartlomiej Grzybowski' + email: 'b.grzybowski@partner.samsung.com' + company: 'Samsung' + id: 'bgrzybowski' + timezone: 'Europe/Warsaw' + - name: 'Michal Zegan' + email: 'm.zegan@samsung.com' + company: 'Samsung' + id: 'm.zegan' + timezone: 'Europe/Warsaw' +tsc: + approval: 'https://lists.onap.org/pipermail/onap-tsc' diff --git a/ansible/application/README.md b/ansible/application/README.md index 36f69bd5..0e12da42 100644 --- a/ansible/application/README.md +++ b/ansible/application/README.md @@ -1,7 +1,7 @@ # Application specific configuration This directory is **empty** on purpose in git. Content in this folder is -populated packaging time (see package.sh/package.conf) and can be modified if needed +populated in packaging time by build/package.py and can be modified if needed also on target server where package is installed. ## Application configuration @@ -29,8 +29,6 @@ Helm charts transfer from packaging up to the target infra server. Installer supports optional custom pre and post install roles. Custom roles' code folders are placed into this directory at packaging time and names of those folders shall be configured in application_configuration.yml with variable `application_pre_install_role` and `application_post_install_role`. -Note that these directory names must correspond to those configured in APP_CONFIGURATION inside package.conf -during package build time. Example: ``` @@ -44,7 +42,7 @@ inventory file in git ansible/inventory/hosts.yml cannot be directly used anyway and at least ip addresses need to be changed according to target servers after installer installation and before starting installer execution. -So it's better to place also hosts.yml to this application directory and edit it here. -That can be done either at packaging time same way as application_configuration.yml -or after package has been installed to server where ansible process are run just -before lauching any playbooks. +So it's better to place also hosts.yml to this application directory and edit it there. +That can be done either at packaging time same way as in application_configuration.yml +or after package has been installed to the install server where ansible process are run just +before launching any playbooks. diff --git a/ansible/group_vars/all.yml b/ansible/group_vars/all.yml index d8fe37dd..d3cdfc1d 100755 --- a/ansible/group_vars/all.yml +++ b/ansible/group_vars/all.yml @@ -9,14 +9,11 @@ resources_dir: # tarfile name within resources_dir directory with offline infrastructure binaries. -# Content of APP_BINARY_RESOURCES_DIR (defined in package.conf) packaged by package.sh to single tar file. resources_filename: # tarfile name within resources_dir directory with auxiliary resources. -# Content of APP_AUX_BINARIES (defined in package.conf) packaged by package.sh to single tar file. # the purpose of auxiliary resources is to provide user an interface # to distribute to infra node tar file with application specific files. -# Docker images in tar format are currently the only supported content of aux_resources package. aux_resources_filename: # resources can be exported via nfs @@ -101,9 +98,6 @@ runtime_images: {} # Helm repository. # Content of the folder must be Helm chart directories of the app with Makefile. # In case of ONAP OOM it would be <oom_repo>/kubernetes folder content. -# NOTE: This default value should not be changed if not really needed and it -# must match with the variable "HELM_CHARTS_DIR_IN_PACKAGE" value in package.sh -# script! app_helm_charts_install_directory: application/helm_charts # Specify target dir where helm charts are copied into on infra node. diff --git a/ansible/rke.yml b/ansible/rke.yml index 13e7bb5b..ab6c0bb5 100644 --- a/ansible/rke.yml +++ b/ansible/rke.yml @@ -2,6 +2,13 @@ - name: Gather facts for all hosts hosts: all +- name: Configure etcd (RKE) + hosts: kubernetes-etcd + roles: + - role: rke + vars: + mode: etcd + - name: Configure kubernetes cluster (RKE) hosts: infrastructure roles: diff --git a/ansible/roles/rke/defaults/main.yml b/ansible/roles/rke/defaults/main.yml index 2f160fc2..d9c044b6 100644 --- a/ansible/roles/rke/defaults/main.yml +++ b/ansible/roles/rke/defaults/main.yml @@ -6,6 +6,46 @@ kube_config_dir: "{{ ansible_env.HOME }}/.kube" cluster_config_dir: "{{ app_data_path }}/cluster" # Whether dashboard is exposed. rke_dashboard_exposed: true +rke_etcd: + # By default rke creates bind mount: + # /var/lib/etcd -> /var/lib/rancher/etcd + # These parameters provide means of modifying it: + # - custom bind mount + # - option to use volatile storage + + # Custom bind mount + # + # I did not find a proper way (in the docs) how to override the + # defaults so I just abuse the extra_* args for the rke etcd + # service. It means that it will create another mount point in the + # container and you should use different pathnames than default... + # + # The custom bind mount is by default disabled. + enabled_custom_etcd_storage: false + + # Applicated only if custom mount is enabled. + # Paths must be absolute (start with '/') + # + # Path on the kubernetes/etcd node + storage_path: /var/lib/etcd-custom + # Path inside the container where it is mounted. + storage_mountpoint: /var/lib/rancher/etcd-custom + + # On top of it (with or without custom mount) you can use tmpfs + # as a volatile storage. + # + # CAUTION: This will create temporary filesystem (in the memory) + # so if an etcd node will be poweroff then all etcd data will be + # lost!!! + # + # Don't use unless you don't care about your cluster! + # + # This is intended as an attempt to make deployment little bit + # more faster...by default it is disabled. + enabled_unsafe_volatile_storage: false + # Size of the volatile storage - tmpfs (this will eat your RAM) + tmpfs_size: 5G + rke: # rke (rancher) images etcd: rancher/coreos-etcd:v3.2.24-rancher1 diff --git a/ansible/roles/rke/molecule/default/prepare.yml b/ansible/roles/rke/molecule/default/prepare.yml index 6bad2b80..b012790a 100644 --- a/ansible/roles/rke/molecule/default/prepare.yml +++ b/ansible/roles/rke/molecule/default/prepare.yml @@ -13,3 +13,6 @@ - role: prepare-rke vars: mode: infra + - role: prepare-kubectl + vars: + kubectl_install: true diff --git a/ansible/roles/rke/tasks/rke_etcd.yml b/ansible/roles/rke/tasks/rke_etcd.yml new file mode 100644 index 00000000..3dddf9e8 --- /dev/null +++ b/ansible/roles/rke/tasks/rke_etcd.yml @@ -0,0 +1,9 @@ +--- +- name: "Mount tmpfs as etcd storage - non-persistent data (BEWARE)" + mount: + path: "{{ rke_etcd.storage_path if rke_etcd.enabled_custom_etcd_storage else '/var/lib/etcd' }}" + src: tmpfs + fstype: tmpfs + opts: "defaults,size={{ rke_etcd.tmpfs_size }},mode=700" + state: mounted + when: rke_etcd.enabled_unsafe_volatile_storage diff --git a/ansible/roles/rke/templates/cluster.yml.j2 b/ansible/roles/rke/templates/cluster.yml.j2 index 2012ab92..737d306f 100644 --- a/ansible/roles/rke/templates/cluster.yml.j2 +++ b/ansible/roles/rke/templates/cluster.yml.j2 @@ -25,8 +25,15 @@ nodes: services: etcd: image: "" +{% if rke_etcd.enabled_custom_etcd_storage %} + extra_args: + data-dir: "{{ rke_etcd.storage_mountpoint }}" + extra_binds: + - "{{ rke_etcd.storage_path }}:{{ rke_etcd.storage_mountpoint }}" +{% else %} extra_args: {} extra_binds: [] +{% endif %} extra_env: [] external_urls: [] ca_cert: "" diff --git a/ansible/test/roles/prepare-rke/tasks/infra.yml b/ansible/test/roles/prepare-rke/tasks/infra.yml index e9971f77..6e7bcb96 100644 --- a/ansible/test/roles/prepare-rke/tasks/infra.yml +++ b/ansible/test/roles/prepare-rke/tasks/infra.yml @@ -8,9 +8,3 @@ get_url: url: "https://github.com/rancher/rke/releases/download/v{{ rke_version }}/rke_linux-amd64" dest: "{{ app_data_path }}/downloads/rke_linux-amd64" - -- name: "Install kubectl-{{ kubectl_version }}" - get_url: - url: "https://storage.googleapis.com/kubernetes-release/release/v{{ kubectl_version }}/bin/linux/amd64/kubectl" - dest: "/usr/local/bin/kubectl" - mode: 0755 diff --git a/build/build_nexus_blob.sh b/build/build_nexus_blob.sh index 6ecab695..51ab05e7 100755 --- a/build/build_nexus_blob.sh +++ b/build/build_nexus_blob.sh @@ -62,31 +62,127 @@ LISTS_DIR="${LOCAL_PATH}/data_lists" COMMANDS=(jq docker expect npm twine) usage () { - echo " Example usage: build_nexus_blob.sh --input-directory </path/to/downloaded/files/dir> --output-directory - </path/to/output/dir> --resource-list-directory </path/to/dir/with/resource/list> [--load-docker-images] - - -i | --input-directory directory containing file needed to create nexus blob. The structure of this directory must organized as described in build guide - -ld | --load-docker-images load docker images from stored files in the input directory - -o | --output-directory - -rl | --resource-list-directory directory with files containing docker, pypi and npm lists + echo " + Usage: $(basename $0) [OPTION...] [FILE]... + + This script prepares Nexus repositories data blobs for ONAP + + Following dependencies are required: nodejs, jq, docker, twine, expect + By default, without any lists or dirs provided, the resources are expected as downloaded + during download process and default lists will be used to build the Nexus blob in the same + resources dir + + Examples: + $(basename $0) --input-directory </path/to/downloaded/files/dir> -ld --output-directory + </path/to/output/dir> --resource-list-directory </path/to/dir/with/resource/list> + # Docker images, npms and pypi packages will be loaded from specified directory + # and the blob is created + $(basename $0) -d </path/to/docker/images/list> -d </path/to/another/docker/images/list> + -n </path/to/npm/list> -p </path/to/pip/list> + # Docker images, npms and pypi packages will be pushed to Nexus based and provided data + # lists (multiple lists can be provided) + + -d | --docker use specific list of docker images to be pushed into Nexus + (in case of -ld used, this list will be used for loading of + the images) + -h | --help print this usage + -i | --input-directory use specific directory containing resources needed to + create nexus blob + The structure of this directory must organized as described + in build guide + -ld | --load-docker-images load docker images from resource directory + -n | --npm list of npm packages to be pushed into Nexus + -o | --output-directory use specific directory for the target blob + -p | --pypi use specific list of pypi packages to be pushed into Nexus + -rl | --resource-list-directory use specific directory with docker, pypi and npm lists " exit 1 } +publish_ports () { + for REGISTRY in $(sed -n '/\.[^/].*\//p' ${1} | sed -e 's/\/.*$//' | sort -u | grep -v ${DEFAULT_REGISTRY} || true) ${NEXUS_PORT}; do + if [[ ${REGISTRY} != *":"* ]]; then + if [[ ${PUBLISHED_PORTS} != *"80:${NEXUS_DOCKER_PORT}"* ]]; then + PUBLISHED_PORTS="${PUBLISHED_PORTS} -p 80:${NEXUS_DOCKER_PORT}" + fi + else + REGISTRY_PORT="$(sed 's/^.*\:\([[:digit:]]*\)$/\1/' <<< ${REGISTRY})" + if [[ ${PUBLISHED_PORTS} != *"${REGISTRY_PORT}:${NEXUS_DOCKER_PORT}"* ]]; then + PUBLISHED_PORTS="${PUBLISHED_PORTS} -p ${REGISTRY_PORT}:${NEXUS_DOCKER_PORT}" + fi + fi + done +} + +simulated_hosts () { + SIMUL_HOSTS=($(sed -n '/\.[^/].*\//p' ${1} | sed -e 's/\/.*$// ; s/:.*$//' | sort -u | grep -v ${DEFAULT_REGISTRY} || true ) ${NEXUS_DOMAIN}) + for HOST in "${SIMUL_HOSTS[@]}"; do + if ! grep -wq ${HOST} /etc/hosts; then + echo "127.0.0.1 ${HOST}" >> /etc/hosts + fi + done +} + load_docker_images () { for ARCHIVE in $(sed $'s/\r// ; /^#/d ; s/\:/\_/g ; s/\//\_/g ; s/$/\.tar/g' ${1} | awk '{ print $1 }'); do docker load -i ${NXS_SRC_DOCKER_IMG_DIR}/${ARCHIVE} done } +push_npm () { + for ARCHIVE in $(sed $'s/\r// ; s/\\@/\-/g ; s/$/\.tgz/g' ${1}); do + npm publish --access public ${ARCHIVE} > /dev/null + echo "NPM ${ARCHIVE} pushed to Nexus" + done +} + +push_pip () { + for PACKAGE in $(sed $'s/\r//; s/==/-/' ${NXS_PYPI_LIST}); do + twine upload -u "${NEXUS_USERNAME}" -p "${NEXUS_PASSWORD}" --repository-url ${PYPI_REGISTRY} ${PACKAGE}* + echo "PYPI ${PACKAGE} pushed to Nexus" + done +} + +docker_login () { + for REGISTRY in $(sed -n '/\.[^/].*\//p' ${1} | sed -e 's/\/.*$//' | sort -u | grep -v ${DEFAULT_REGISTRY}) ${DOCKER_REGISTRY}; do + if ! grep -wqs ${REGISTRY} ~/.docker/config.json; then + echo "Docker login to ${REGISTRY}" + echo -n "${NEXUS_PASSWORD}" | docker login -u "${NEXUS_USERNAME}" --password-stdin ${REGISTRY} > /dev/null + fi + done +} + +push_docker () { + for IMAGE in $(sed $'s/\r// ; /^#/d' ${1} | awk '{ print $1 }'); do + PUSH="" + if [[ ${IMAGE} != *"/"* ]]; then + PUSH="${DOCKER_REGISTRY}/library/${IMAGE}" + elif [[ ${IMAGE} == *"${DEFAULT_REGISTRY}"* ]]; then + if [[ ${IMAGE} == *"/"*"/"* ]]; then + PUSH="$(sed 's/'"${DEFAULT_REGISTRY}"'/'"${DOCKER_REGISTRY}"'/' <<< ${IMAGE})" + else + PUSH="$(sed 's/'"${DEFAULT_REGISTRY}"'/'"${DOCKER_REGISTRY}"'\/library/' <<< ${IMAGE})" + fi + elif [[ -z $(sed -n '/\.[^/].*\//p' <<< ${IMAGE}) ]]; then + PUSH="${DOCKER_REGISTRY}/${IMAGE}" + fi + if [[ ! -z ${PUSH} ]]; then + docker tag ${IMAGE} ${PUSH} + else + PUSH="${IMAGE}" + fi + docker push ${PUSH} + echo "${IMAGE} pushed as ${PUSH} to Nexus" + done +} + # Verify all dependencies are available in PATH FAILED_COMMANDS=() -for cmd in ${COMMANDS[*]}; -do +for cmd in ${COMMANDS[*]}; do command -v $cmd >/dev/null 2>&1 || FAILED_COMMANDS+=($cmd) done -if [ ${#FAILED_COMMANDS[*]} -gt 0 ]; -then + +if [ ${#FAILED_COMMANDS[*]} -gt 0 ]; then echo "Following commands where not found in PATH and are required:" echo ${FAILED_COMMANDS[*]} echo "Aborting." @@ -95,14 +191,23 @@ fi while [ "${1}" != "" ]; do case ${1} in + -d | --docker ) shift + NXS_DOCKER_IMG_LISTS+=("${1}") + ;; -i | --input-directory ) shift DATA_DIR="${1}" ;; -ld | --load-docker-images ) DOCKER_LOAD="true" ;; + -n | --npm ) shift + NXS_NPM_LISTS+=("${1}") + ;; -o | --output-directory ) shift NEXUS_DATA_DIR="${1}" ;; + -p | --pypi ) shift + NXS_PYPI_LISTS+=("${1}") + ;; -rl | --resource-list-directory ) shift LISTS_DIR="${1}" ;; @@ -119,34 +224,42 @@ NXS_SRC_NPM_DIR="${DATA_DIR}/offline_data/npm_tar" NXS_SRC_PYPI_DIR="${DATA_DIR}/offline_data/pypi" # Setup specific resources lists +NXS_INFRA_LIST="${LISTS_DIR}/infra_docker_images.list" NXS_DOCKER_IMG_LIST="${LISTS_DIR}/onap_docker_images.list" +NXS_RKE_DOCKER_IMG_LIST="${LISTS_DIR}/rke_docker_images.list" NXS_NPM_LIST="${LISTS_DIR}/onap_npm.list" NXS_PYPI_LIST="${LISTS_DIR}/onap_pip_packages.list" # Setup Nexus image used for build and install infra -INFRA_LIST="${LISTS_DIR}/infra_docker_images.list" -NEXUS_IMAGE="$(grep sonatype/nexus3 ${INFRA_LIST})" +NEXUS_IMAGE="$(grep sonatype/nexus3 ${NXS_INFRA_LIST})" NEXUS_IMAGE_TAR="${DATA_DIR}/offline_data/docker_images_infra/$(sed 's/\//\_/ ; s/$/\.tar/ ; s/\:/\_/' <<< ${NEXUS_IMAGE})" +# Set default lists if nothing specific defined by user +if [ $((${#NXS_DOCKER_IMG_LISTS[@]} + ${#NXS_NPM_LISTS[@]} + ${#NXS_PYPI_LISTS[@]})) -eq 0 ]; then + NXS_DOCKER_IMG_LISTS=("${NXS_DOCKER_IMG_LIST}" "${NXS_RKE_DOCKER_IMG_LIST}") + NXS_NPM_LISTS[0]="${NXS_NPM_LIST}" + NXS_PYPI_LISTS[0]="${NXS_PYPI_LIST}" +fi + +# Backup /etc/hosts +HOSTS_BACKUP="$(eval ${TIMESTAMP}_hosts.bk)" +cp /etc/hosts /etc/${HOSTS_BACKUP} + +# Backup the current docker registry settings +if [ -f ~/.docker/config.json ]; then + DOCKER_CONF_BACKUP="$(eval ${TIMESTAMP}_config.json.bk)" + mv ~/.docker/config.json ~/.docker/${DOCKER_CONF_BACKUP} +fi + # Setup default ports published to host as docker registry PUBLISHED_PORTS="-p ${NEXUS_PORT}:${NEXUS_PORT} -p ${NEXUS_DOCKER_PORT}:${NEXUS_DOCKER_PORT}" # Setup additional ports published to host based on simulated docker registries -for REGISTRY in $(sed -n '/\.[^/].*\//p' ${NXS_DOCKER_IMG_LIST} | sed -e 's/\/.*$//' | sort -u | grep -v ${DEFAULT_REGISTRY} || true); do - if [[ ${REGISTRY} != *":"* ]]; then - if [[ ${PUBLISHED_PORTS} != *"80:${NEXUS_DOCKER_PORT}"* ]]; then - PUBLISHED_PORTS="${PUBLISHED_PORTS} -p 80:${NEXUS_DOCKER_PORT}" - fi - else - REGISTRY_PORT="$(sed 's/^.*\:\([[:digit:]]*\)$/\1/' <<< ${REGISTRY})" - if [[ ${PUBLISHED_PORTS} != *"${REGISTRY_PORT}:${NEXUS_DOCKER_PORT}"* ]]; then - PUBLISHED_PORTS="${PUBLISHED_PORTS} -p ${REGISTRY_PORT}:${NEXUS_DOCKER_PORT}" - fi - fi -done - # Setup simulated domain names to be able to push all to private Nexus repository -SIMUL_HOSTS="$(sed -n '/\.[^/].*\//p' ${NXS_DOCKER_IMG_LIST} | sed -e 's/\/.*$// ; s/:.*$//' | sort -u | grep -v ${DEFAULT_REGISTRY} || true) ${NEXUS_DOMAIN}" +for DOCKER_IMG_LIST in "${NXS_DOCKER_IMG_LISTS[@]}"; do + publish_ports "${DOCKER_IMG_LIST}" + simulated_hosts "${DOCKER_IMG_LIST}" +done # Nexus repository configuration setup NEXUS_CONFIG_GROOVY='import org.sonatype.nexus.security.realm.RealmManager @@ -186,23 +299,6 @@ repositoryManager.update(conf)' NEXUS_CONFIG=$(echo "${NEXUS_CONFIG_GROOVY}" | jq -Rsc '{"name":"configure", "type":"groovy", "content":.}') ################################# -# Prepare the local environment # -################################# - -# Add simulated domain names to /etc/hosts -HOSTS_BACKUP="$(eval ${TIMESTAMP}_hosts.bk)" -cp /etc/hosts /etc/${HOSTS_BACKUP} -for DNS in ${SIMUL_HOSTS}; do - echo "127.0.0.1 ${DNS}" >> /etc/hosts -done - -# Backup the current docker registry settings -if [ -f ~/.docker/config.json ]; then - DOCKER_CONF_BACKUP="$(eval ${TIMESTAMP}_config.json.bk)" - mv ~/.docker/config.json ~/.docker/${DOCKER_CONF_BACKUP} -fi - -################################# # Docker repository preparation # ################################# @@ -210,7 +306,9 @@ if [ "${DOCKER_LOAD}" == "true" ]; then # Load predefined Nexus image docker load -i ${NEXUS_IMAGE_TAR} # Load all necessary images - load_docker_images ${NXS_DOCKER_IMG_LIST} + for DOCKER_IMG_LIST in "${NXS_DOCKER_IMG_LISTS[@]}"; do + load_docker_images "${DOCKER_IMG_LIST}" + done fi ################################ @@ -282,20 +380,19 @@ if [[ ! -z "${PATCHED_NPM}" ]] && ! zgrep -aq "${NPM_REGISTRY}" "${PATCHED_NPM}" fi # Push NPM packages to Nexus repository -for ARCHIVE in $(sed $'s/\r// ; s/\\@/\-/g ; s/$/\.tgz/g' ${NXS_NPM_LIST});do - npm publish --access public ${ARCHIVE} > /dev/null - echo "NPM ${ARCHIVE} pushed to Nexus" +for NPM_LIST in "${NXS_NPM_LISTS[@]}"; do + push_npm "${NPM_LIST}" done popd +npm logout ############################### ## Populate PyPi repository # ############################### pushd ${NXS_SRC_PYPI_DIR} -for PACKAGE in $(sed $'s/\r//; s/==/-/' ${NXS_PYPI_LIST}); do - twine upload -u "${NEXUS_USERNAME}" -p "${NEXUS_PASSWORD}" --repository-url ${PYPI_REGISTRY} ${PACKAGE}* - echo "PYPI ${PACKAGE} pushed to Nexus" +for PYPI_LIST in "${NXS_PYPI_LISTS[@]}"; do + push_pip "${PYPI_LIST}" done popd @@ -304,34 +401,12 @@ popd ############################### # Login to simulated docker registries -for REGISTRY in $(sed -n '/\.[^/].*\//p' ${NXS_DOCKER_IMG_LIST} | sed -e 's/\/.*$//' | sort -u | grep -v ${DEFAULT_REGISTRY}) ${DOCKER_REGISTRY}; do - echo "Docker login to ${REGISTRY}" - docker login -u "${NEXUS_USERNAME}" -p "${NEXUS_PASSWORD}" ${REGISTRY} > /dev/null -done - # Push images to private nexus based on the list # Images from default registry need to be tagged to private registry # and those without defined repository in tag uses default repository 'library' -for IMAGE in $(sed $'s/\r// ; /^#/d' ${NXS_DOCKER_IMG_LIST} | awk '{ print $1 }'); do - PUSH="" - if [[ ${IMAGE} != *"/"* ]]; then - PUSH="${DOCKER_REGISTRY}/library/${IMAGE}" - elif [[ ${IMAGE} == *"${DEFAULT_REGISTRY}"* ]]; then - if [[ ${IMAGE} == *"/"*"/"* ]]; then - PUSH="$(sed 's/'"${DEFAULT_REGISTRY}"'/'"${DOCKER_REGISTRY}"'/' <<< ${IMAGE})" - else - PUSH="$(sed 's/'"${DEFAULT_REGISTRY}"'/'"${DOCKER_REGISTRY}"'\/library/' <<< ${IMAGE})" - fi - elif [[ -z $(sed -n '/\.[^/].*\//p' <<< ${IMAGE}) ]]; then - PUSH="${DOCKER_REGISTRY}/${IMAGE}" - fi - if [[ ! -z ${PUSH} ]]; then - docker tag ${IMAGE} ${PUSH} - else - PUSH="${IMAGE}" - fi - docker push ${PUSH} - echo "${IMAGE} pushed as ${PUSH} to Nexus" +for DOCKER_IMG_LIST in "${NXS_DOCKER_IMG_LISTS[@]}"; do + docker_login "${DOCKER_IMG_LIST}" + push_docker "${DOCKER_IMG_LIST}" done ############################## @@ -344,7 +419,7 @@ echo "Stopping Nexus and returning backups" docker stop ${NEXUS_CONT_ID} > /dev/null # Return backed up configuration files -mv -f /etc/${HOSTS_BACKUP} /etc/hosts +mv -f "/etc/${HOSTS_BACKUP}" /etc/hosts if [ -f ~/.docker/${DOCKER_CONF_BACKUP} ]; then mv -f ~/.docker/${DOCKER_CONF_BACKUP} ~/.docker/config.json diff --git a/build/data_lists/deb_packages.list b/build/data_lists/deb_packages.list deleted file mode 100644 index ef9b0229..00000000 --- a/build/data_lists/deb_packages.list +++ /dev/null @@ -1,22 +0,0 @@ -archive.ubuntu.com/ubuntu/ubuntu/pool/universe/a/ansible/ansible_2.0.0.2-2_all.deb -archive.ubuntu.com/ubuntu/ubuntu/pool/main/i/ieee-data/ieee-data_20150531.1_all.deb -archive.debian.org/debian/pool/main/liby/libyaml/libyaml-0-2_0.1.6-3_amd64.deb -security.ubuntu.com/ubuntu/pool/main/p/python-crypto/python-crypto_2.6.1-6ubuntu0.16.04.3_amd64.deb -archive.ubuntu.com/ubuntu/pool/universe/p/python-ecdsa/python-ecdsa_0.13-2_all.deb -archive.ubuntu.com/ubuntu/pool/main/p/python-httplib2/python-httplib2_0.9.1+dfsg-1_all.deb -archive.ubuntu.com/ubuntu/pool/main/j/jinja2/python-jinja2_2.8-1_all.deb -archive.ubuntu.com/ubuntu/pool/main/m/markupsafe/python-markupsafe_0.23-2build2_amd64.deb -archive.ubuntu.com/ubuntu/pool/main/p/python-netaddr/python-netaddr_0.7.18-1_all.deb -archive.ubuntu.com/ubuntu/pool/main/p/paramiko/python-paramiko_1.16.0-1ubuntu0.2_all.deb -archive.ubuntu.com/ubuntu/pool/universe/libs/libselinux/python-selinux_2.2.2-1_amd64.deb -archive.ubuntu.com/ubuntu/pool/main/s/six/python-six_1.10.0-3_all.deb -archive.ubuntu.com/ubuntu/pool/main/p/pyyaml/python-yaml_3.11-3build1_amd64.deb -security.ubuntu.com/ubuntu/pool/main/a/apt/apt-utils_1.2.29ubuntu0.1_amd64.deb -security.ubuntu.com/ubuntu/pool/main/c/cron/cron_3.0pl1-128ubuntu2_amd64.deb -archive.ubuntu.com/ubuntu/pool/main/g/gobject-introspection/libgirepository-1.0-1_1.46.0-3ubuntu1_amd64.deb -archive.ubuntu.com/ubuntu/pool/main/d/dbus-glib/libdbus-glib-1-2_0.106-1_amd64.deb -security.ubuntu.com/ubuntu/pool/main/a/apt/libapt-inst2.0_1.2.29ubuntu0.1_amd64.deb -archive.ubuntu.com/ubuntu/pool/main/i/iso-codes/iso-codes_3.65-1_all.deb -security.ubuntu.com/ubuntu/pool/main/d/dh-python/dh-python_2.20151103ubuntu1_all.deb -security.ubuntu.com/ubuntu/pool/main/d/distro-info-data/distro-info-data_0.28ubuntu0.9_all.deb -archive.ubuntu.com/ubuntu/pool/main/g/gobject-introspection/gir1.2-glib-2.0_1.46.0-3ubuntu1_amd64.deb diff --git a/build/download/requirements.txt b/build/download/requirements.txt index 3eee2a2f..681c0dd5 100644 --- a/build/download/requirements.txt +++ b/build/download/requirements.txt @@ -1,3 +1,3 @@ -docker==3.7.2 +docker>=3.7.2 prettytable==0.7.2 retrying==1.3.3 diff --git a/build/fetch_and_patch_charts.sh b/build/fetch_and_patch_charts.sh deleted file mode 100755 index 22d45e66..00000000 --- a/build/fetch_and_patch_charts.sh +++ /dev/null @@ -1,70 +0,0 @@ -#! /usr/bin/env bash - -# COPYRIGHT NOTICE STARTS HERE -# -# Copyright 2018 © Samsung Electronics Co., Ltd. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# COPYRIGHT NOTICE ENDS HERE - -# This simple script should be used during build / packaging process -# and it should be referenced in BuildGuide. -# Patching of helm charts is the only way for OOM charts to be compatible -# with this offline installer. This will become obsolete once native -# solution is implemented in charts themselves and which is tracked -# in OOM-1610 - -# fail fast -set -e - -# colours -_R='\033[0;31;1m' #Red -_G='\033[0;32;1m' #Green -_Y='\033[0;33;1m' #Yellow -C_='\033[0m' #Color off - -usage () { - echo "Usage:" - echo -e "./$(basename $0) <helm charts repo> <commit/tag/branch> <patchfile> <target_dir>\n" - echo "Example: ./$(basename $0) https://gerrit.onap.org/r/oom master /root/offline-installer/patches/onap.patch /root/offline-installer/ansible/application/helm_charts" -} - -if [ "$#" -ne 4 ]; then - echo "This script should get exactly 4 arguments!" - echo -e "Wrong number of parameters provided\n" - usage - exit 1 -fi - -# main -# git and patch tools are preconditions for this to work -CURR=1 -TOTAL=5 -PATCH_FILE=$(realpath "${3}") - -echo -e "${_G}[Step $((CURR++))/${TOTAL} cloning repo with charts to be patched]${C_}" -git clone --recurse-submodules "${1}" "${4}" - -echo -e "${_G}[Step $((CURR++))/${TOTAL} setting working dir to ${4}]${C_}" -pushd "${4}" - -echo -e "${_G}[Step $((CURR++))/${TOTAL} git-checkout to correct base]${C_}" -git checkout "${2}" - -echo -e "${_G}[Step $((CURR++))/${TOTAL} patching charts]${C_}" -git apply "${PATCH_FILE}" - -echo -e "${_G}[Step $((CURR++))/${TOTAL} returning to original working directory]${C_}" -popd - diff --git a/build/package.conf b/build/package.conf deleted file mode 100644 index d74eac0a..00000000 --- a/build/package.conf +++ /dev/null @@ -1,80 +0,0 @@ -# For the packaging script it is expected that all artifacts are present on local file system. -# Artifacts include: -# - installer source code (this git repository content) -# - all binary artifacts pre-downloaded from internet (docker images, rpm packages, npm packages, Maven artifacts etc.) -# Script will create 3 packages: -# offline-${PROJECT_NAME}-${PROJECT_VERSION}-sw.tar -# - installer code (ansible dir in this git repo) -# - Files/dirs defined by APP_CONFIGURATION if any. -# - Directory content of HELM_CHARTS_DIR if defined. -# offline-${PROJECT_NAME}-${PROJECT_VERSION}-resources.tar -# - Directory content of APP_BINARY_RESOURCES_DIR if defined. -# offline-${PROJECT_NAME}-${PROJECT_VERSION}-aux-resources.tar -# - Files defined by APP_AUX_BINARIES if any. - -########################### -# Application Helm charts # -########################### - -# Provide application installed to Kubernetes cluster. Helm chart is the supported format https://helm.sh/. -# Directory provided here must contain all the Chart directories of the application (https://docs.helm.sh/developing_charts/#charts) and Makefile. -# E.g. in case of ONAP oom repo it will be the content of kubernetes directory. -# NOTE: Leaving this variable commented out will mean that no Helm application will be installed to -# offline Kubernetes cluster. This may be sometimes wanted. -#HELM_CHARTS_DIR=<oom-clone>/kubernetes -HELM_CHARTS_DIR=/tmp/oom-clone/kubernetes - -##################################### -# Application install configuration # -##################################### - -# APP_CONFIGURATION array variable can be used to provide files/directories -# into sw package available for the Ansible process to consume. -# The main configuration file for your application is -# "application_configuration.yml" (name of file can be anything) where user -# shall provide values to control ansible installer process. Yml file is given -# as command line parameter to ansible run. -# See more from UserGuide documentation (LINK HERE) how to use installer. -# Available configuration parameters user can configure are seen from group_vars files: -# ansible/group_vars/all.yml -# ansible/group_vars/infrastucture.yml -# ansible/group_vars/kubernetes.yml -# Additionally user can optionally provide own ansible roles code to customize install process. -# At the moment 2 custom ansible roles are supported pre and post install roles, which are -# run by installer prior Helm install and after Kubernetes app has been installed. -# In application_configuration.yml those role names are configured with variables: -# application_pre_install_role: my-pre-install-role -# application_post_install_role: my-post-install-role -# And according to Ansible functionality, roles' code must be placed to directories -# with the same name: -#APP_CONFIGURATION=( -# <offline-installer-clone>/config/application_configuration.yml -# <offline-installer-clone>/patches/my-pre-install-role -# ~/myappfiles/my-post-install-role -#) -APP_CONFIGURATION=( - /tmp/offline-installer/config/application_configuration.yml - /tmp/offline-installer/patches/onap-patch-role -) - -# APP_BINARY_RESOURCES_DIR is variable to directory containing directories and files for offline -# installer infra and the the application run in that infra. -# Currently mixed with infra and app binaries to same, maybe to be separated in the future. -# Following directories and files are expected: -# downloads ... directory with exacutable binaries for offline infra usage (e.g. rancher, kubectl, jq, helm) -# git-repo ... directory with git repos for application needs to be simulated -# http ... directory with http simulation files (e.g. Maven artifacts are simulated here) -# offline_data ... directory with offline infra specific docker images -# pkg ... directory with rpm/deb packages mainly for offline infra -# nexus_data.tar ... tar file with pre-generated nexus blobs containing e.g. docker images for the application. -#APP_BINARY_RESOURCES_DIR=~/myappfiles/all_binaries -APP_BINARY_RESOURCES_DIR=/tmp/onap-offline/resources - -# APP_AUX_BINARIES is array variable for additional application files. -# Docker images supported currently in tar format. -#APP_AUX_BINARIES=( -# ~/myappfiles/docker_images_populated_runtime/aaa-component-0.0.1.tar -# ~/myappfiles/docker_images_populated_runtime/xyz-component-0.0.1.tar -#) -APP_AUX_BINARIES=() - diff --git a/build/package.py b/build/package.py index 8a1808b3..d30b40c1 100755 --- a/build/package.py +++ b/build/package.py @@ -1,4 +1,4 @@ -#! /usr/bin/env python +#! /usr/bin/env python3 # -*- coding: utf-8 -*- # COPYRIGHT NOTICE STARTS HERE @@ -68,7 +68,7 @@ def prepare_application_repository(directory, url, refspec, patch_path): return repository -def create_package_info_file(output_file, repository_list): +def create_package_info_file(output_file, repository_list, tag): """ Generates text file in json format containing basic information about the build :param output_file: @@ -78,7 +78,8 @@ def create_package_info_file(output_file, repository_list): log.info('Generating package.info file') build_info = { 'Build_info': { - 'build_date': datetime.now().strftime('%Y-%m-%d_%H-%M') + 'build_date': datetime.now().strftime('%Y-%m-%d_%H-%M'), + 'Version': tag } } for repository in repository_list: @@ -98,25 +99,36 @@ def create_package(tar_content, file_name): log.info('Creating package {}'.format(file_name)) with tarfile.open(file_name, 'w') as output_tar_file: for src, dst in tar_content.items(): - output_tar_file.add(src, dst) + if src != '': + output_tar_file.add(src, dst) -def build_offline_deliverables(application_repository_url, +def build_offline_deliverables(build_version, + application_repository_url, application_repository_reference, application_patch_file, + application_charts_dir, + application_configuration, + application_patch_role, output_dir, resources_directory, + aux_directory, skip_sw, skip_resources, skip_aux, overwrite): """ Prepares offline deliverables + :param build_version: Version for packages tagging :param application_repository_url: git repository hosting application helm charts :param application_repository_reference: git refspec for repository hosting application helm charts :param application_patch_file: git patch file to be applied over application repository + :param application_charts_dir: path to directory under application repository containing helm charts + :param application_configuration: path to application configuration file (helm override configuration) + :param application_patch_role: path to application patch role (executed just before helm deploy) :param output_dir: Destination directory for saving packages :param resources_directory: Path to resource directory + :param aux_directory: Path to aux binary directory :param skip_sw: skip sw package generation :param skip_resources: skip resources package generation :param skip_aux: skip aux package generation @@ -128,6 +140,7 @@ def build_offline_deliverables(application_repository_url, if not overwrite: log.error('Output directory is not empty, use overwrite to force build') raise FileExistsError + shutil.rmtree(output_dir) # Git offline_repository_dir = os.path.join(script_location, '..') @@ -141,23 +154,23 @@ def build_offline_deliverables(application_repository_url, # Package info info_file = os.path.join(output_dir, 'package.info') - create_package_info_file(info_file, [application_repository, offline_repository]) + create_package_info_file(info_file, [application_repository, offline_repository], build_version) # packages layout as dictionaries. <file> : <file location under tar archive> sw_content = { os.path.join(offline_repository_dir, 'ansible'): 'ansible', - os.path.join(offline_repository_dir, 'config', - 'application_configuration.yml'): 'ansible/application/application_configuration.yml', - os.path.join(offline_repository_dir, 'patches', 'onap-patch-role'): 'ansible/application/onap-patch-role', - os.path.join(application_dir, 'kubernetes'): 'ansible/application/helm_charts', - info_file: 'packge.info' + application_configuration: 'ansible/application/application_configuration.yml', + application_patch_role: 'ansible/application/onap-patch-role', + os.path.join(application_dir, application_charts_dir): 'ansible/application/helm_charts', + info_file: 'package.info' } resources_content = { resources_directory: '', - info_file: 'packge.info' + info_file: 'package.info' } aux_content = { - info_file: 'packge.info' + aux_directory: '', + info_file: 'package.info' } if not skip_sw: @@ -167,7 +180,7 @@ def build_offline_deliverables(application_repository_url, os.path.join(offline_repository_dir, 'ansible', 'docker', 'build_ansible_image.sh')) installer_build.check_returncode() os.chdir(script_location) - sw_package_tar_path = os.path.join(output_dir, 'sw_package.tar') + sw_package_tar_path = os.path.join(output_dir, 'sw_package' + build_version + '.tar') create_package(sw_content, sw_package_tar_path) if not skip_resources: @@ -201,11 +214,11 @@ def build_offline_deliverables(application_repository_url, createrepo = subprocess.run(['createrepo', os.path.join(resources_directory, 'pkg', 'rhel')]) createrepo.check_returncode() - resources_package_tar_path = os.path.join(output_dir, 'resources_package.tar') + resources_package_tar_path = os.path.join(output_dir, 'resources_package' + build_version + '.tar') create_package(resources_content, resources_package_tar_path) if not skip_aux: - aux_package_tar_path = os.path.join(output_dir, 'aux_package.tar') + aux_package_tar_path = os.path.join(output_dir, 'aux_package'+ build_version + '.tar') create_package(aux_content, aux_package_tar_path) shutil.rmtree(application_dir) @@ -216,16 +229,28 @@ def run_cli(): Run as cli tool """ parser = argparse.ArgumentParser(description='Create Package For Offline Installer') + parser.add_argument('--build-version', + help='version of the build', default='custom') parser.add_argument('application_repository_url', metavar='application-repository-url', help='git repository hosting application helm charts') parser.add_argument('--application-repository_reference', default='master', help='git refspec for repository hosting application helm charts') parser.add_argument('--application-patch_file', help='git patch file to be applied over application repository', default='') + parser.add_argument('--application-charts_dir', + help='path to directory under application repository containing helm charts ', default='kubernetes') + parser.add_argument('--application-configuration', + help='path to application configuration file (helm override configuration)', + default='') + parser.add_argument('--application-patch-role', + help='path to application patch role file (ansible role) to be executed right before installation', + default='') parser.add_argument('--output-dir', '-o', default=os.path.join(script_location, '..', '..'), help='Destination directory for saving packages') - parser.add_argument('--resources-directory', + parser.add_argument('--resources-directory', default='', help='Path to resource directory') + parser.add_argument('--aux-directory', + help='Path to aux binary directory', default='') parser.add_argument('--skip-sw', action='store_true', default=False, help='Set to skip sw package generation') parser.add_argument('--skip-resources', action='store_true', default=False, @@ -243,11 +268,16 @@ def run_cli(): else: logging.basicConfig(stream=sys.stdout, level=logging.INFO, format='%(message)s') - build_offline_deliverables(args.application_repository_url, + build_offline_deliverables(args.build_version, + args.application_repository_url, args.application_repository_reference, args.application_patch_file, + args.application_charts_dir, + args.application_configuration, + args.application_patch_role, args.output_dir, args.resources_directory, + args.aux_directory, args.skip_sw, args.skip_resources, args.skip_aux, @@ -256,4 +286,3 @@ def run_cli(): if __name__ == '__main__': run_cli() - diff --git a/build/package.sh b/build/package.sh deleted file mode 100755 index a3c1ded2..00000000 --- a/build/package.sh +++ /dev/null @@ -1,267 +0,0 @@ -#! /usr/bin/env bash - -# COPYRIGHT NOTICE STARTS HERE -# -# Copyright 2018-2019 © Samsung Electronics Co., Ltd. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# COPYRIGHT NOTICE ENDS HERE - - -# Scope of this packaging script is to generate tarfiles for offline installation -# Build of any additional artifacts is out of scope for this script -set -e - -crash () { - local exit_code="$1" - local cause="$2" - echo "Packaging script finished prematurely" - echo "Cause: $2" - exit "${exit_code}" -} - -crash_arguments () { - echo "Missing some mandatory arguments!" - usage - exit 1 -} - -usage () { - echo "Usage:" - echo " ./$(basename $0) <project_name> <version> <packaging_target_dir> [--conf <file>] [--force]" - echo "" - echo "Options:" - echo " --force Remove packaging_target_dir if exists prior to script execution" - echo " --conf Custom configuration file path for script" - echo "" - echo "Example:" - echo " ./$(basename $0) myproject 1.0.1 /tmp/package --conf ~/myproject.conf" - echo "" - echo "packaging_target_dir will be created if does not exist. All tars will be produced into it." -} - -function create_tar { - local tar_dir="$1" - local tar_name="$2" - - cd ${tar_dir} - touch ${tar_name} # Trick to avoid sporadic "tar: .: file changed as we read it" warning message - tar --exclude=${tar_name} -cf ../${tar_name} . - cd - &> /dev/null # Trick to avoid printing new dir on stdout - - # Remove packaged folders - find ${tar_dir}/* -maxdepth 0 -type d -exec rm -rf '{}' \; - # Remove packaged files - find ${tar_dir}/* ! -name ${tar_name} -exec rm '{}' \; - echo "Tar file created to $(dirname ${tar_dir})/${tar_name}" -} - -function create_pkg { - local pkg_type="$1" - echo "[Creating ${pkg_type} package]" - create_tar "${PKG_ROOT}" offline-${PROJECT_NAME}-${PROJECT_VERSION}-${pkg_type}.tar - rm -rf "${PKG_ROOT}" -} - -function add_metadata { - local metafile="$1" - echo "Project name: ${PROJECT_NAME}" >> "${metafile}" - echo "Project version: ${PROJECT_VERSION}" >> "${metafile}" - echo "Package date: ${TIMESTAMP}" >> "${metafile}" -} - -function add_additions { - local source="$1" - local target="$2" - if [ -d "${source}" ]; then - mkdir -p "${target}/$(basename $source)" - cp -r "${source}" "${target}" - echo "Adding directory ... $(basename $source)" - else - if [ -f "${source}" ]; then - cp "${source}" "${target}" - echo "Adding file ... $(basename $source)" - else - crash 4 "Invalid source specified for packaging: $1" - fi - fi -} - -function build_sw_artifacts { - cd ${LOCAL_PATH}/../ansible/docker - ./build_ansible_image.sh - if [ $? -ne 0 ]; then - crash 5 "Building of ansible runner image failed." - fi - cd - -} - -function create_sw_package { - PKG_ROOT="${PACKAGING_TARGET_DIR}/sw" - - # Create directory structure of the sw package - mkdir -p "${PKG_ROOT}" - cp -r ${LOCAL_PATH}/../ansible "${PKG_ROOT}" - - # Add application additional files/dirs into package based on package.conf - for item in "${APP_CONFIGURATION[@]}";do - # all SW package addons are expected within ./ansible/application folder - add_additions "${item}" "${PKG_ROOT}/${APPLICATION_FILES_IN_PACKAGE}" - done - - # Application Helm charts - # To be consistent with resources and aux dir, create charts dir even if no charts provided. - mkdir -p ${PKG_ROOT}/${HELM_CHARTS_DIR_IN_PACKAGE} - if [ ! -z "${HELM_CHARTS_DIR}" ]; - then - echo "Add application Helm charts" - # Copy charts available for ansible playbook to use/move them to target server/dir - cp -r "${HELM_CHARTS_DIR}"/* ${PKG_ROOT}/${HELM_CHARTS_DIR_IN_PACKAGE} - else - echo "No Helm charts defined, no application will be automatically installed by this package!" - fi - - # Add metadata to the package - add_metadata "${PKG_ROOT}"/package.info - - # Create sw tar package - create_pkg sw -} - -function create_resource_package { - PKG_ROOT="${PACKAGING_TARGET_DIR}/resources" - - # Create directory structure of the resource package - mkdir -p "${PKG_ROOT}" - - # Add artifacts into resource package based on package.conf config - if [ ! -z ${APP_BINARY_RESOURCES_DIR} ]; then - cp -r ${APP_BINARY_RESOURCES_DIR}/* ${PKG_ROOT} - fi - - # tar file with nexus_data is expected, we should find and untar it - # before resource.tar is created - for i in `ls -1 ${PKG_ROOT} | grep tar`; do - tar tvf "${PKG_ROOT}/${i}" | grep nexus_data &> /dev/null - if [ $? -eq 0 ]; then - echo "Debug: tar file with nexus blobs detected ${PKG_ROOT}/${i}. Start unarchive ..." - tar xf "${PKG_ROOT}/${i}" -C "${PKG_ROOT}" &> /dev/null - echo "Debug: unarchive finished. Removing original file" - rm -f "${PKG_ROOT}/${i}" - fi - done - - create_pkg resources -} - -function create_aux_package { - PKG_ROOT="${PACKAGING_TARGET_DIR}/aux" - - # Create directory structure of the aux resource package - mkdir -p "${PKG_ROOT}" - - # Add artifacts into resource packagee based on package.conf config - for item in "${APP_AUX_BINARIES[@]}";do - add_additions "${item}" "${PKG_ROOT}" - done - - create_pkg aux-resources -} - -# -# =================== Main =================== -# - -PROJECT_NAME="$1" -PROJECT_VERSION="$2" -PACKAGING_TARGET_DIR="$3" - -TIMESTAMP=$(date -u +%Y%m%dT%H%M%S) -SCRIPT_DIR=$(dirname "${0}") -LOCAL_PATH=$(readlink -f "$SCRIPT_DIR") - -# Relative location inside the package for application related files. -# Application means Kubernetes application installed by Helm charts on ready cluster (e.g. onap). -APPLICATION_FILES_IN_PACKAGE="ansible/application" - -# Relative location inside the package to place Helm charts to be available for -# Ansible process to transfer them into machine (infra node) running Helm repository. -# NOTE: This is quite hardcoded place to put them and agreement with Ansible code -# is done in ansible/group_vars/all.yml with variable "app_helm_charts_install_directory" -# whihc value must match to value of this variable (with exception of slash '/' -# prepended so that ansible docker/chroot process can see the dir). -# This variable can be of course changed in package.conf if really needed if -# corresponding ansible variable "app_helm_charts_install_directory" value -# adjusted accordingly. -HELM_CHARTS_DIR_IN_PACKAGE="${APPLICATION_FILES_IN_PACKAGE}/helm_charts" - -if [ $# -eq 0 ]; then - crash_arguments -fi - -CONF_FILE="" -FORCE_REMOVE=0 -arg_ind=0 -for arg in "$@"; do - shift - ((arg_ind+=1)) - if [[ ${arg} =~ ^[-]{1,2}[a-zA-Z-]+$ && ${arg_ind} -lt 4 ]]; then - echo "Non-positional parameters should follow mandatory arguments!" - usage - exit 1 - fi - case "$arg" in - -c|--conf) - CONF_FILE="$1" ;; - --force) - FORCE_REMOVE=1 ;; - *) - set -- "$@" "$arg" - if [ "$#" -lt 3 ]; then - crash_arguments - fi ;; - esac -done - -if [ -z ${CONF_FILE} ]; then - CONF_FILE=${LOCAL_PATH}/package.conf # Fall to default conf file -fi - -if [ ! -f ${CONF_FILE} ]; then - crash 2 "Mandatory config file missing! Provide it with --conf option or ${LOCAL_PATH}/package.conf" -fi - -source ${CONF_FILE} -pushd ${LOCAL_PATH} - -# checking bash capability of parsing arrays -whotest[0]='test' || (crash 3 "Arrays not supported in this version of bash.") - -# Prepare output directory for our packaging -# Check target dir exists and is not empty -if [ -d ${PACKAGING_TARGET_DIR} ] && [ "$(ls -A ${PACKAGING_TARGET_DIR})" ]; then - if [ ${FORCE_REMOVE} -eq 0 ]; then - crash 1 "Target directory not empty. Use --force to overwrite it." - else - rm -rf ${PACKAGING_TARGET_DIR} - fi -fi - -# Create all tars -build_sw_artifacts -create_sw_package -create_resource_package -create_aux_package - -popd diff --git a/build/requirements.txt b/build/requirements.txt index 2c404aed..39544458 100644 --- a/build/requirements.txt +++ b/build/requirements.txt @@ -1,2 +1,2 @@ -docker==3.7.2 +docker>=3.7.2 gitpython==2.1.11 diff --git a/tools/cicdansible/.gitignore b/tools/cicdansible/.gitignore new file mode 100644 index 00000000..bb3e4abb --- /dev/null +++ b/tools/cicdansible/.gitignore @@ -0,0 +1,2 @@ +*.retry +resources/ diff --git a/tools/cicdansible/ansible.cfg b/tools/cicdansible/ansible.cfg new file mode 100644 index 00000000..e74dda58 --- /dev/null +++ b/tools/cicdansible/ansible.cfg @@ -0,0 +1,18 @@ +#Ansible configuration used when running the playbook. +[defaults] +#Stdout callback. +stdout_callback=debug +#Default verbosity level, for logging all module outputs. +verbosity=1 + +[inventory] +#Fail when inventory parsing fails. +any_unparsed_is_failed=true + +[connection] +#Enable ansible pipelining. +pipelining=true + +[ssh_connection] +#Increase control persist settings. +ssh_args=-C -o ControlMaster=auto -o ControlPersist=30m diff --git a/tools/cicdansible/group_vars/all.yml b/tools/cicdansible/group_vars/all.yml new file mode 100644 index 00000000..f886b628 --- /dev/null +++ b/tools/cicdansible/group_vars/all.yml @@ -0,0 +1,66 @@ +--- +#General configuration, can be overridden in cmdline. +#Authentication/keystone url. +os_auth_url: "" +#Openstack username. +os_username: "" +#Password. +os_password: "" +#Domain name. +os_domain_name: "default" +#Project name. +os_project_name: "" +#The name or id of public network used to communicate with instances. +public_network: "" +#Floating ip address for first node instance +first_node_ip: "" +#Floating ip of infra instance. +infra_ip: "" +#Floating ip of installer. +installer_ip: "" +#Openstack flavor name for nodes. +node_flavor_name: "" +#Flavor name for infra instance. +infra_flavor_name: "" +#Flavor name for installer instance. +installer_flavor_name: "" +#Name of the image for instances. +image_name: "" +#Whether to use a volume for /dockerdata-nfs or to use ephemeral disk. +#True by default, most openstack providers offer ssd volumes probably. +use_volume_for_nfs: true +#Cidr of private subnet where instances are connected. +subnet_cidr: "10.1.0.0/24" +#Start of dhcp allocation range for subnet. +subnet_range_start: "10.1.0.4" +#Subnet allocation range end. +subnet_range_end: "10.1.0.254" +#Ip address of router used as a gateway to external network. +router_addr: "10.1.0.1" +#Cidr of external subnet to allow access to, 0.0.0.0/0 means allow internet access. +# For offline deployment it is recommended to set this to a cidr of intranet. +external_subnet_cidr: "" +#Address of cicd docker registry. +cicd_docker_registry: "" +#Number of nodes to deploy. +num_nodes: "3" +#Stack name to deploy on heat. +stack_name: "installer-test" +#Address of resource server with packages. +resource_host: "" +#Directory with all onap packages (on resource host). +resources_dir: "" +#Filename of software package. +resources_sw_filename: "sw_package.tar" +#Filename of binary resources. +resources_filename: "resources_package.tar" +#Filename of auxiliary resources. +aux_resources_filename: "aux_package.tar" +#Whether to deploy app. +#Setting it to false will skip deployment, but instance preconfiguration +#will still be done and sw resources uploaded to the installer host. +install_app: true +# This is a string containing base64-encoded yaml blob passed to offline installer via -e option. +# You can use it to override any variable in offline installer except those +# supported directly by cicdansible. +application_config: '' diff --git a/tools/cicdansible/group_vars/instances.yml b/tools/cicdansible/group_vars/instances.yml new file mode 100644 index 00000000..0d756a57 --- /dev/null +++ b/tools/cicdansible/group_vars/instances.yml @@ -0,0 +1,11 @@ +#Configuration for all instances. +#User to log in to instances as. +ansible_user: root +#Whether to become root using sudo or such like, no by default. +ansible_become: no +#Private key to use to access instances. +ansible_private_key_file: "{{ lookup('env', 'HOME') }}/.ssh/id_rsa" +#Arguments to skip host key verification for instances, modify only if you know what you are doing. +disable_ssh_host_auth: "-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null" +ansible_ssh_common_args: "{{ disable_ssh_host_auth }}" + diff --git a/tools/cicdansible/group_vars/nodes.yml b/tools/cicdansible/group_vars/nodes.yml new file mode 100644 index 00000000..76a222c2 --- /dev/null +++ b/tools/cicdansible/group_vars/nodes.yml @@ -0,0 +1,5 @@ +#Configuration for kubernetes nodes. +#This redirects ssh connections through the installer instance, to allow connecting via internal ip. +#It should work even on openssh versions lacking -j option support. +#The value is based heavily on the default from parent group. +ansible_ssh_common_args: "{{ disable_ssh_host_auth }} -o ProxyCommand='ssh {{ disable_ssh_host_auth }} -i {{ ansible_private_key_file }} -W %h:%p root@{{ installer_ip }}'" diff --git a/tools/cicdansible/group_vars/resources.yml b/tools/cicdansible/group_vars/resources.yml new file mode 100644 index 00000000..e7c0f773 --- /dev/null +++ b/tools/cicdansible/group_vars/resources.yml @@ -0,0 +1,6 @@ +#Resource host configuration. +#Define used private key. +ansible_private_key_file: "{{ lookup('env', 'HOME') }}/.ssh/id_rsa" +#User login data. +ansible_user: root +ansible_become: no diff --git a/tools/cicdansible/heat/config.yaml b/tools/cicdansible/heat/config.yaml new file mode 100644 index 00000000..e1f0309f --- /dev/null +++ b/tools/cicdansible/heat/config.yaml @@ -0,0 +1,10 @@ +#cloud-config +#Enable root login. +disable_root: false +#Output everything to /dev/console... +output: { all: "/dev/console" } +#Initialization. +runcmd: + - | + set -efxu -o pipefail + %{NOTIFY_COMMAND} --data-binary '{"status": "SUCCESS", "reason": "instance started successfully"}' diff --git a/tools/cicdansible/heat/installer.env b/tools/cicdansible/heat/installer.env new file mode 100644 index 00000000..9765ce30 --- /dev/null +++ b/tools/cicdansible/heat/installer.env @@ -0,0 +1 @@ +#Environment file diff --git a/tools/cicdansible/heat/installer.yaml b/tools/cicdansible/heat/installer.yaml new file mode 100644 index 00000000..7b3f10c0 --- /dev/null +++ b/tools/cicdansible/heat/installer.yaml @@ -0,0 +1,330 @@ +#This is the environment heat template, compatible with openstack ocata. +heat_template_version: 2017-02-24 +description: "Heat template for deploying onap env" +parameters: + auth_key: + label: "Auth public key" + description: "The public key used to authenticate to instances" + type: string + node_flavor_name: + label: "name of node flavor" + description: "The name of the flavor used to create kubernetes nodes" + type: string + constraints: + - custom_constraint: nova.flavor + description: "need to specify a valid flavor" + infra_flavor_name: + label: "name of infra flavor" + description: "flavor used to create infra instance" + type: string + constraints: + - custom_constraint: nova.flavor + description: "need to specify a valid flavor" + installer_flavor_name: + label: "name of installer flavor" + description: "flavor used to create installer instance" + type: string + constraints: + - custom_constraint: nova.flavor + description: "need to specify a valid flavor" + image_name: + label: "image name" + description: "name of the image from which to create all instances, should be rhel 7.6 or centos image" + type: string + constraints: + - custom_constraint: glance.image + description: "must specify a valid image name" + subnet_cidr: + label: "private subnet cidr" + description: "Cidr of a private subnet instances will be connected to" + type: string + constraints: + - custom_constraint: net_cidr + subnet_range_start: + label: "subnet dhcp allocation range start" + description: "Start of range of dhcp allocatable ips on private subnet" + type: string + constraints: + - custom_constraint: ip_addr + subnet_range_end: + label: "end of subnet dhcp allocation range" + description: "End of private subnet's dhcp allocation range" + type: string + constraints: + - custom_constraint: ip_addr + router_addr: + label: "ip address of router" + description: "IP address of the router allowing access to other networks incl. company network" + type: string + constraints: + - custom_constraint: ip_addr + public_network_name: + label: "name of the public network" + description: "Name of the public, internet facing network, also allowing access to company internal hosts" + type: string + constraints: + - custom_constraint: neutron.network + description: "Must specify a valid network name or id" + external_subnet_cidr: + label: "external subnet cidr" + description: "The CIDR of the external subnet, that should be accessible from instances, even when internet access is cut. Putting 0.0.0.0/0 here means access to internet." + type: string + constraints: + - custom_constraint: net_cidr + installer_ip: + label: "floating ip of the installer" + description: "a pre-allocated floating ip that will be associated with the installer instance" + type: string + infra_ip: + label: "floating ip of the infra" + description: "a pre-allocated floating ip that will be associated with the infrastructure instance" + type: string + node_ip: + label: "floating ip of the first node" + description: "a pre-allocated floating ip that will be associated with the first kubernetes node and allow accessing onap" + type: string + num_nodes: + label: "num nodes" + description: "the number of kubernetes nodes to create, min 1" + type: number + constraints: + - range: { min: 1 } + description: "must be a positive number" + use_volume_for_nfs: + type: boolean + label: "use volume for nfs storage" + description: "Indicates whether a cinder volume should be used for nfs storage or not. If not checked, the nfs would be stored in the root disk" +conditions: + #Condition for nfs volume usage. + use_volume_for_nfs: { get_param: use_volume_for_nfs } +resources: + # Security group used to secure access to instances. + secgroup: + type: OS::Neutron::SecurityGroup + properties: + rules: + # Egress rule allowing access to external_subnet_cidr. + - direction: egress + ethertype: IPv4 + remote_ip_prefix: { get_param: external_subnet_cidr } + # Ingress rule, allowing also inbound access by external network. + - direction: ingress + ethertype: IPv4 + remote_ip_prefix: { get_param: external_subnet_cidr } + # Allow outbound communication with the internal subnet. + - direction: egress + ethertype: IPv4 + remote_ip_prefix: { get_param: subnet_cidr } + # Allow inbound communication from internal network. + - direction: ingress + ethertype: IPv4 + remote_ip_prefix: { get_param: subnet_cidr } + # Allow outbound access to 169.254.0.0/16, mainly for metadata. We do not need inbound. + - direction: egress + ethertype: IPv4 + remote_ip_prefix: 169.254.0.0/16 + #A network that our test environment will be connected to. + privnet: + type: OS::Neutron::Net + #Subnet that instances will live in. + privsubnet: + type: OS::Neutron::Subnet + properties: + network: { get_resource: privnet } + cidr: { get_param: subnet_cidr } + allocation_pools: + - { start: { get_param: subnet_range_start }, end: { get_param: subnet_range_end } } + gateway_ip: { get_param: router_addr } + ip_version: 4 + #A port connected to the private network, taken by router. + routerport: + type: OS::Neutron::Port + properties: + network: { get_resource: privnet } + fixed_ips: + - { subnet: { get_resource: privsubnet }, ip_address: { get_param: router_addr } } + security_groups: [{ get_resource: secgroup }] + #This is a router, routing between us and the internet. + #It has an external gateway to public network. + router: + type: OS::Neutron::Router + properties: + external_gateway_info: + network: { get_param: public_network_name } + #This is a router interface connecting it to our private subnet's router port. + routercon: + type: OS::Neutron::RouterInterface + properties: + router: { get_resource: router } + port: { get_resource: routerport } + + #Key used to authenticate to instances as root. + key: + type: OS::Nova::KeyPair + properties: + name: { get_param: "OS::stack_name" } + public_key: { get_param: auth_key } + #Handle to signal about starting up of instances. + instance_wait_handle: + type: OS::Heat::WaitConditionHandle + #Monitor waiting for all instances to start. + instance_wait: + type: OS::Heat::WaitCondition + properties: + handle: { get_resource: instance_wait_handle } + timeout: 1200 + count: + yaql: + data: { num_nodes: { get_param: num_nodes } } + #This is number of all nodes + 2 (infra instance and installer) + expression: "$.data.num_nodes + 2" + #Affinity Policy - nodes spread onto as many physical machines as possible (aka. .anti-affinity.). + anti_affinity_group: + type: OS::Nova::ServerGroup + properties: + name: k8s nodes on separate computes + policies: + - anti-affinity + #Resource group to deploy n nodes using node template for each, each node numbered starting from 0. + nodes: + type: OS::Heat::ResourceGroup + properties: + count: { get_param: num_nodes } + resource_def: + type: node.yaml + properties: + nodenum: "%index%" + key_name: { get_resource: key } + image_name: { get_param: image_name } + network: { get_resource: privnet } + subnet: { get_resource: privsubnet } + flavor_name: { get_param: node_flavor_name } + notify_command: { get_attr: ["instance_wait_handle", "curl_cli"] } + security_group: { get_resource: secgroup } + scheduler_hints: + group: { get_resource: anti_affinity_group } + depends_on: [routercon, instance_wait_handle] + #Nfs storage volume for first node. + nfs_storage: + type: OS::Cinder::Volume + condition: use_volume_for_nfs + properties: + name: nfs_storage + size: 50 + #Attachment of volume to first node. + nfs_storage_attachment: + type: OS::Cinder::VolumeAttachment + condition: use_volume_for_nfs + properties: + instance_uuid: { get_attr: [nodes, "resource.0"] } + volume_id: { get_resource: nfs_storage } + #Floating ip association for node (first only). + node_fip_assoc: + type: OS::Neutron::FloatingIPAssociation + properties: + floatingip_id: { get_param: node_ip } + port_id: { get_attr: ["nodes", "resource.0.port_id"] } + #Openstack volume used for storing resources. + resources_storage: + type: "OS::Cinder::Volume" + properties: + name: "resources_storage" + size: 120 + #Instance representing infrastructure instance, created using subtemplate. + infra: + type: "instance.yaml" + properties: + instance_name: infra + network: { get_resource: privnet } + subnet: { get_resource: privsubnet } + key_name: { get_resource: key } + flavor_name: { get_param: infra_flavor_name } + image_name: { get_param: image_name } + notify_command: { get_attr: ["instance_wait_handle", "curl_cli"] } + security_group: { get_resource: secgroup } + scheduler_hints: {} + depends_on: [instance_wait_handle] + #Volume attachment for infra node. + resources_storage_attachment: + type: OS::Cinder::VolumeAttachment + properties: + volume_id: { get_resource: resources_storage } + instance_uuid: { get_resource: infra } + #Floating ip association for infra. + infra_fip_assoc: + type: OS::Neutron::FloatingIPAssociation + properties: + floatingip_id: { get_param: infra_ip } + port_id: { get_attr: ["infra", "port_id"] } + #Small installer vm having access to other instances, used to install onap. + installer: + type: "instance.yaml" + properties: + instance_name: installer + image_name: { get_param: image_name } + flavor_name: { get_param: installer_flavor_name } + key_name: { get_resource: key } + network: { get_resource: privnet } + subnet: { get_resource: privsubnet } + notify_command: { get_attr: ["instance_wait_handle", "curl_cli"] } + security_group: { get_resource: secgroup } + scheduler_hints: {} + depends_on: instance_wait_handle + #Floating ip for installer. + installer_fip_assoc: + type: OS::Neutron::FloatingIPAssociation + properties: + floatingip_id: { get_param: installer_ip } + port_id: { get_attr: [installer, port_id] } + #Map of node volumes, taken from volumes output param. + node_volumes: + type: OS::Heat::Value + properties: + type: json + #We need yaql transformation to be done on the volume map. + value: + yaql: + data: + #This is a map of node number to value of "volumes" attribute, that contains + #a list of volumes written as pairs [volumeid, mountpoint]. + volumes: { get_attr: [nodes, attributes, volumes] } + #We need yaql expressions to transform node numbers to node names in the form "node0" and similar. + #However we don't need anything more complicated. + expression: "$.data.volumes?.items()?.toDict('node'+str($[0]), $[1])" + #List of infra specific volumes (not a map as above). + infra_volumes: + type: OS::Heat::Value + properties: + value: + - [{ get_resource: resources_storage }, "/opt/onap"] + #Contains node0 specific volume list. + node0_volumes: + type: OS::Heat::Value + properties: + #Note that it returns an empty list if nfs volume is disabled. + value: + if: + - use_volume_for_nfs + - - [{ get_resource: nfs_storage }, "/dockerdata-nfs"] + - [] +#Output values +outputs: + installer_ip: + value: { get_attr: [installer, ip] } + description: "Internal ip of installer instance" + infra_ip: + value: { get_attr: [infra, ip] } + description: "Internal ip of infra instance" + node_ips: + value: { get_attr: [nodes, ip] } + description: "Serialized json list of node internal ips starting at node0" + volumes: + description: "map of volumes per each instance" + value: + #Can do deep merging only with yaql. + yaql: + data: + node_volumes: { get_attr: [node_volumes, value]} + infra_volumes: { infra: { get_attr: [infra_volumes, value] }} + node0_volumes: {node0: { get_attr: [node0_volumes, value] }} + expression: "$.data.node_volumes?.mergeWith($.data.infra_volumes)?.mergeWith($.data.node0_volumes)" diff --git a/tools/cicdansible/heat/instance.yaml b/tools/cicdansible/heat/instance.yaml new file mode 100644 index 00000000..5429eb6e --- /dev/null +++ b/tools/cicdansible/heat/instance.yaml @@ -0,0 +1,62 @@ +#Template for instances. +heat_template_version: 2017-02-24 +description: "template instantiating and configuring a single instance (any)" +parameters: + instance_name: + type: string + network: + type: string + subnet: + type: string + image_name: + type: string + flavor_name: + type: string + key_name: + type: string + notify_command: + type: string + security_group: + type: string + scheduler_hints: + type: json + default: {} +#Resources. +resources: + #This is the network port to attach instance to. + port: + type: OS::Neutron::Port + properties: + network: { get_param: network } + security_groups: [ { get_param: security_group } ] + fixed_ips: + - { subnet: { get_param: subnet }} + #cloudinit configuration stuff. + config: + type: OS::Heat::SoftwareConfig + properties: + config: + str_replace_strict: + template: { get_file: config.yaml } + params: + "%{NOTIFY_COMMAND}": { get_param: notify_command } + #Actual instance to create. + instance: + type: OS::Nova::Server + properties: + name: { get_param: instance_name } + image: { get_param: image_name } + flavor: { get_param: flavor_name } + key_name: { get_param: key_name } + networks: + - port: { get_resource: port } + user_data_format: SOFTWARE_CONFIG + user_data: { get_resource: config } + scheduler_hints: { get_param: scheduler_hints } +outputs: + OS::stack_id: + value: { get_resource: instance } + port_id: + value: { get_resource: port } + ip: + value: { get_attr: ["port", "fixed_ips", 0, "ip_address"] } diff --git a/tools/cicdansible/heat/node.yaml b/tools/cicdansible/heat/node.yaml new file mode 100644 index 00000000..cd628eec --- /dev/null +++ b/tools/cicdansible/heat/node.yaml @@ -0,0 +1,62 @@ +#This yaml template instantiates kubernetes nodes (using instance.yaml subtemplate). +#It contains some node specific things, and has been split from main template +#to be able to do some late evaluation tricks. +heat_template_version: 2017-02-24 +description: "This template instantiates a single kubernetes node using the instance.yaml subtemplate" +parameters: + key_name: + type: string + flavor_name: + type: string + nodenum: + type: number + image_name: + type: string + network: + type: string + subnet: + type: string + notify_command: + type: string + security_group: + type: string + scheduler_hints: + type: json +resources: + #Volume for storing /var/lib/docker for node. + docker_storage: + type: OS::Cinder::Volume + properties: + name: docker_storage + size: 120 + #Call generic instance template. + instance: + type: instance.yaml + properties: + instance_name: + str_replace_strict: + template: "node%index%" + params: { "%index%": { get_param: nodenum } } + key_name: { get_param: key_name } + image_name: { get_param: image_name } + network: { get_param: network } + subnet: { get_param: subnet } + flavor_name: { get_param: flavor_name } + notify_command: { get_param: notify_command } + security_group: { get_param: security_group } + scheduler_hints: { get_param: scheduler_hints } + #Attachment of docker volume to node. + docker_storage_attachment: + type: OS::Cinder::VolumeAttachment + properties: + volume_id: { get_resource: docker_storage } + instance_uuid: { get_resource: instance } +outputs: + OS::stack_id: + value: { get_resource: instance } + port_id: + value: { get_attr: ["instance", "port_id"] } + ip: + value: { get_attr: ["instance", "ip"] } + volumes: + value: [[{ get_resource: docker_storage }, "/var/lib/docker"]] diff --git a/tools/cicdansible/hosts.yml b/tools/cicdansible/hosts.yml new file mode 100644 index 00000000..e4c416cf --- /dev/null +++ b/tools/cicdansible/hosts.yml @@ -0,0 +1,28 @@ +#Default inventory. +#This file should not be modified, instead modify group_vars. +#NOTE +#All kubernetes nodes including the first node are added to inventory dynamically. +#Instances group with children. +instances: + hosts: + +#Installer instance. + installer: + #Do not modify. + ansible_host: "{{ installer_ip }}" + +#Infra instance. + infra: + #Do not modify. + ansible_host: "{{ infra_ip }}" + + children: + #Empty group for nodes, populated dynamically, do not modify please. + nodes: + +#The group for resource host, only first entry is considered. +#This host contains onap installer packages including scripts. +resources: + hosts: + resource_host: + ansible_host: "{{ resource_host }}" diff --git a/tools/cicdansible/install.yml b/tools/cicdansible/install.yml new file mode 100644 index 00000000..13071c31 --- /dev/null +++ b/tools/cicdansible/install.yml @@ -0,0 +1,36 @@ +--- +#Installation of onap on open stack driven by ansible. +#Default parameters are set in group_vars/*.yml. +#Inventory is in hosts.yml, and parameters specific to instances are set there. +#Deploy infrastructure. +- name: "deploy infrastructure" + hosts: localhost + gather_facts: false + roles: + - role: setup_openstack_infrastructure + vars: + mode: deploy +#Play that configures all instances. +- name: "Instance configuration" + hosts: instances + any_errors_fatal: true + roles: + - role: setup_openstack_infrastructure + vars: + mode: configure + - role: configure_instances +#Play that downloads sw resources. +- name: "Download resources" + hosts: resources + gather_facts: false + roles: + - role: install + vars: + mode: download_resources +#Perform installation. +- name: "Perform installation" + hosts: installer + roles: + - role: install + vars: + mode: install diff --git a/tools/cicdansible/library/os_floating_ips_facts.py b/tools/cicdansible/library/os_floating_ips_facts.py new file mode 100644 index 00000000..ad546004 --- /dev/null +++ b/tools/cicdansible/library/os_floating_ips_facts.py @@ -0,0 +1,61 @@ +#!/usr/bin/python +ANSIBLE_METADATA = { + 'METADATA_VERSION': '1.1', + 'supported_by': 'community', + 'status': 'preview' +} + +DOCUMENTATION = ''' +--- +module: "os_floating_ips_facts" +short_description: "Retrieves facts about floating ips" +description: + - "This module retrieves facts about one or more floating ips allocated to project." +version_added: "2.7" +author: + - "Michal Zegan" +requirements: + - "python => 2.7" + - "openstacksdk" +options: + floating_ip: + description: + - "The floating ip to retrieve facts for" + type: "str" + network: + description: + - "Name or id of the floating ip network to query." + required: true + type: "str" +notes: + - "Registers facts starting with openstack_floating_ips" +extends_documentation_fragment: openstack +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.openstack import openstack_full_argument_spec, openstack_module_kwargs, openstack_cloud_from_module + +def run_module(): + args=openstack_module_kwargs() + argspec=openstack_full_argument_spec( + floating_ip=dict(type=str), + network=dict(type=str, required=True)) + module=AnsibleModule(argument_spec=argspec, **args) + sdk, cloud = openstack_cloud_from_module(module) + try: + fip_network=cloud.network.find_network(module.params['network']) + filter=dict( + project_id=cloud.current_project_id, + floating_network_id=fip_network.id) + if not (module.params['floating_ip'] is None): + filter['floating_ip_address'] = module.params['floating_ip'] + ips=[dict(x) for x in cloud.network.ips(**filter)] + module.exit_json( + changed=False, + ansible_facts=dict(openstack_floating_ips=ips) + ) + except sdk.exceptions.OpenStackCloudException as e: + module.fail_json(msg=str(e)) + +if __name__ == '__main__': + run_module() diff --git a/tools/cicdansible/roles/configure_instances/tasks/cicd_registry.yml b/tools/cicdansible/roles/configure_instances/tasks/cicd_registry.yml new file mode 100644 index 00000000..f3c54ca3 --- /dev/null +++ b/tools/cicdansible/roles/configure_instances/tasks/cicd_registry.yml @@ -0,0 +1,10 @@ +#Configure access to cicd docker registry. +- name: "Ensure that docker config directory exists" + file: + path: /etc/docker + mode: 0700 + state: directory +- name: "Allow insecure access to cicd docker registry" + template: + src: daemon.json.j2 + dest: /etc/docker/daemon.json diff --git a/tools/cicdansible/roles/configure_instances/tasks/general.yml b/tools/cicdansible/roles/configure_instances/tasks/general.yml new file mode 100644 index 00000000..6ed9982e --- /dev/null +++ b/tools/cicdansible/roles/configure_instances/tasks/general.yml @@ -0,0 +1,26 @@ +#General instance configuration. +#Modify /etc/hosts on every instance to add every instance there including itself. +- name: "Add hosts to /etc/hosts" + lineinfile: + path: /etc/hosts + insertafter: EOF + regexp: "^[^ ]+ {{ item }}$" + state: present + line: "{{ hostvars[item].ansible_default_ipv4.address }} {{ item }}" + loop: "{{ groups['instances'] }}" +#Copy private ssh key to instances for easy connecting between them. +- name: "Ensure ssh directory exists" + file: + path: /root/.ssh + owner: root + group: root + mode: 0700 + state: directory +- name: "Install ssh private key" + copy: + src: "{{ ansible_private_key_file }}" + dest: /root/.ssh/id_rsa + mode: 0400 +#Add public ssh host keys of all instances to trust them. +- name: "Add host keys of instances to known_hosts" + shell: "ssh-keyscan {{ groups['instances'] | join(' ') }} > /root/.ssh/known_hosts" diff --git a/tools/cicdansible/roles/configure_instances/tasks/main.yml b/tools/cicdansible/roles/configure_instances/tasks/main.yml new file mode 100644 index 00000000..fe5b4b7d --- /dev/null +++ b/tools/cicdansible/roles/configure_instances/tasks/main.yml @@ -0,0 +1,5 @@ +#Initial instance configuration. +- include_tasks: general.yml +#Configure cicd registry access, but skip installer. +- include_tasks: cicd_registry.yml + when: "inventory_hostname != 'installer'" diff --git a/tools/cicdansible/roles/configure_instances/templates/daemon.json.j2 b/tools/cicdansible/roles/configure_instances/templates/daemon.json.j2 new file mode 100644 index 00000000..1c3ca9bb --- /dev/null +++ b/tools/cicdansible/roles/configure_instances/templates/daemon.json.j2 @@ -0,0 +1,3 @@ +{ +"insecure-registries": ["{{ cicd_docker_registry }}"] +} diff --git a/tools/cicdansible/roles/install/defaults/main.yml b/tools/cicdansible/roles/install/defaults/main.yml new file mode 100644 index 00000000..b21e6323 --- /dev/null +++ b/tools/cicdansible/roles/install/defaults/main.yml @@ -0,0 +1,3 @@ +--- +installer_deploy_path: "{{ ansible_user_dir }}/installer" +install_timeout: 10600 diff --git a/tools/cicdansible/roles/install/tasks/download_resources.yml b/tools/cicdansible/roles/install/tasks/download_resources.yml new file mode 100644 index 00000000..7f042596 --- /dev/null +++ b/tools/cicdansible/roles/install/tasks/download_resources.yml @@ -0,0 +1,6 @@ +#Download resources/scripts to controller. +- name: "Download software resources" + fetch: + src: "{{ resources_dir }}/{{ resources_sw_filename }}" + flat: yes + dest: "resources/" diff --git a/tools/cicdansible/roles/install/tasks/install.yml b/tools/cicdansible/roles/install/tasks/install.yml new file mode 100644 index 00000000..35df7976 --- /dev/null +++ b/tools/cicdansible/roles/install/tasks/install.yml @@ -0,0 +1,48 @@ +#Onap installation tasks +#Copy ssh private key used for resource server access +- name: "Copy resource server access key" + copy: + src: "{{ hostvars[groups['resources'][0]].ansible_private_key_file }}" + dest: "{{ ansible_user_dir }}/.ssh/res.pem" + mode: 0600 +#Unarchive resources. +- name: "Ensure {{ installer_deploy_path }} directory exists" + file: + path: "{{ installer_deploy_path }}" + state: directory +- name: "Extract sw resources" + unarchive: + src: "resources/{{ hostvars[groups['resources'][0]].resources_sw_filename }}" + dest: "{{ installer_deploy_path }}" +#Generate ansible inventory and extra vars. +- name: "Generate ansible inventory for installer" + template: + src: inventory.yml.j2 + dest: "{{ installer_deploy_path }}/ansible/inventory/hosts.yml" +- name: "generate application specific config overrides" + copy: + content: "{{ application_config | b64decode }}" + dest: "{{ installer_deploy_path }}/ansible/application/application_overrides.yml" +# This generates a file with locations of resource files in resource host, we +# do it only to allow manually running offline installer without +# typing them by hand. We cannot use +# inventory template because it will be overridden +# by application_configuration.yml. +- name: Generate resource location file + copy: + content: | + resources_dir: {{ resources_dir }} + resources_filename: {{ resources_filename }} + aux_resources_filename: {{ aux_resources_filename }} + app_data_path: /opt/onap/resources + dest: "{{ installer_deploy_path }}/ansible/application/resources.yml" +#Run script. +- name: "Execute installation" + shell: + ./run_playbook.sh + -e @application/application_configuration.yml -e @application/application_overrides.yml + -e @application/resources.yml -i inventory/hosts.yml site.yml + args: + chdir: "{{ installer_deploy_path }}/ansible" + async: "{{ install_timeout }}" + when: install_app diff --git a/tools/cicdansible/roles/install/tasks/main.yml b/tools/cicdansible/roles/install/tasks/main.yml new file mode 100644 index 00000000..04ac4c3d --- /dev/null +++ b/tools/cicdansible/roles/install/tasks/main.yml @@ -0,0 +1 @@ +- include_tasks: "{{ mode }}.yml" diff --git a/tools/cicdansible/roles/install/templates/inventory.yml.j2 b/tools/cicdansible/roles/install/templates/inventory.yml.j2 new file mode 100644 index 00000000..36bf3bd3 --- /dev/null +++ b/tools/cicdansible/roles/install/templates/inventory.yml.j2 @@ -0,0 +1,36 @@ +all: + vars: + ansible_ssh_private_key_file: /root/.ssh/id_rsa + ansible_ssh_common_args: "-o StrictHostKeyChecking=no" + children: + resources: + vars: + ansible_ssh_private_key_file: /root/.ssh/res.pem + ansible_user: "{{ hostvars[groups['resources'][0]].ansible_user }}" + ansible_become: "{{ hostvars[groups['resources'][0]].ansible_become }}" + hosts: + resource_host: + ansible_host: {{ resource_host }} + infrastructure: + hosts: + infra_host: + ansible_host: infra + cluster_ip: {{ hostvars['infra'].ansible_default_ipv4.address }} + kubernetes: + children: + kubernetes-node: + hosts: +{% for h in groups['nodes'] %} + {{ h }}: + ansible_host: "{{ hostvars[h].ansible_default_ipv4.address }}" + cluster_ip: "{{ hostvars[h].ansible_default_ipv4.address }}" +{% endfor %} + kubernetes-control-plane: + hosts: + infra_host + kubernetes-etcd: + hosts: + infra_host + nfs-server: + hosts: + node0 diff --git a/tools/cicdansible/roles/setup_openstack_infrastructure/tasks/configure/main.yml b/tools/cicdansible/roles/setup_openstack_infrastructure/tasks/configure/main.yml new file mode 100644 index 00000000..44de5795 --- /dev/null +++ b/tools/cicdansible/roles/setup_openstack_infrastructure/tasks/configure/main.yml @@ -0,0 +1,11 @@ +#Openstack specific configuration running on instances. +#Get volumes. +- name: "get volume info" + set_fact: + volumes: "{{ (hostvars['localhost'].heat_stack.stack.outputs | selectattr('output_key', 'equalto', 'volumes') | list).0.output_value[inventory_hostname] | default([]) }}" +- name: "Configure volumes" + include_tasks: configure/volume.yml + vars: + volume_id: "{{ item[0] }}" + mountpoint: "{{ item[1] }}" + loop: "{{ volumes }}" diff --git a/tools/cicdansible/roles/setup_openstack_infrastructure/tasks/configure/volume.yml b/tools/cicdansible/roles/setup_openstack_infrastructure/tasks/configure/volume.yml new file mode 100644 index 00000000..8c553850 --- /dev/null +++ b/tools/cicdansible/roles/setup_openstack_infrastructure/tasks/configure/volume.yml @@ -0,0 +1,47 @@ +#Configure a single openstack volume. +- name: "Set volume path" + set_fact: + volume_path: "/dev/disk/by-id/virtio-{{ volume_id | truncate(20, True, '') }}" +- name: "Set partition path" + set_fact: + partition_path: "{{ volume_path }}-part1" +- name: "Wait for volume" + #We do not do it normally, because we want to trigger udev (workaround for some bugs). + shell: "udevadm trigger && udevadm settle && [[ -b {{ volume_path }} ]]" + register: result + retries: 30 + delay: 10 + until: result.rc == 0 +- name: "Partition volume" + parted: + device: "{{ volume_path }}" + number: 1 + label: msdos + flags: boot + part_type: primary + state: present +- name: "Wait for partition to appear" + stat: + path: "{{ partition_path }}" + follow: true + register: part_stat + delay: 1 + retries: 5 + until: part_stat.stat.isblk is defined and part_stat.stat.isblk +- name: "Create xfs filesystem on volume" + filesystem: + dev: "{{ partition_path }}" + type: xfs +- name: "Ensure that the mountpoint exists" + file: + path: "{{ mountpoint }}" + owner: root + group: root + mode: 0755 + state: directory +- name: "Mount filesystem" + mount: + src: "{{ partition_path }}" + path: "{{ mountpoint }}" + fstype: xfs + state: mounted diff --git a/tools/cicdansible/roles/setup_openstack_infrastructure/tasks/deploy/heat.yml b/tools/cicdansible/roles/setup_openstack_infrastructure/tasks/deploy/heat.yml new file mode 100644 index 00000000..5f9bc4f6 --- /dev/null +++ b/tools/cicdansible/roles/setup_openstack_infrastructure/tasks/deploy/heat.yml @@ -0,0 +1,37 @@ +#Tasks for stack redeployment. +#Delete the heat stack before deployment. +- name: "delete deployment to force redeploy" + os_stack: + auth: "{{ os_auth }}" + auth_type: token + name: "{{ stack_name }}" + state: absent +#Deploy heat stack with infrastructure. +- name: "Deploy the infrastructure via heat" + os_stack: + auth: "{{ os_auth }}" + auth_type: token + name: "{{ stack_name }}" + template: "heat/installer.yaml" + state: present + environment: + - "heat/installer.env" + parameters: + num_nodes: "{{ num_nodes }}" + public_network_name: "{{ public_network }}" + external_subnet_cidr: "{{ external_subnet_cidr }}" + subnet_cidr: "{{ subnet_cidr }}" + subnet_range_start: "{{ subnet_range_start }}" + subnet_range_end: "{{ subnet_range_end }}" + router_addr: "{{ router_addr }}" + auth_key: "{{ auth_public_key }}" + image_name: "{{ image_name }}" + node_flavor_name: "{{ node_flavor_name }}" + infra_flavor_name: "{{ infra_flavor_name }}" + installer_flavor_name: "{{ installer_flavor_name }}" + node_ip: "{{ floating_ips_by_address[first_node_ip].id }}" + infra_ip: "{{ floating_ips_by_address[infra_ip].id }}" + installer_ip: "{{ floating_ips_by_address[installer_ip].id }}" + use_volume_for_nfs: "{{ use_volume_for_nfs }}" + wait: true + register: heat_stack diff --git a/tools/cicdansible/roles/setup_openstack_infrastructure/tasks/deploy/main.yml b/tools/cicdansible/roles/setup_openstack_infrastructure/tasks/deploy/main.yml new file mode 100644 index 00000000..324f5374 --- /dev/null +++ b/tools/cicdansible/roles/setup_openstack_infrastructure/tasks/deploy/main.yml @@ -0,0 +1,8 @@ +--- +#This mode expects some variables, and deploys infrastructure on open stack. +#Execute prerequisites. +- include_tasks: deploy/prereq.yml +#Deploy stack. +- include_tasks: deploy/heat.yml +#Register instances in inventory. +- include_tasks: deploy/register_instances.yml diff --git a/tools/cicdansible/roles/setup_openstack_infrastructure/tasks/deploy/prereq.yml b/tools/cicdansible/roles/setup_openstack_infrastructure/tasks/deploy/prereq.yml new file mode 100644 index 00000000..2fe8717a --- /dev/null +++ b/tools/cicdansible/roles/setup_openstack_infrastructure/tasks/deploy/prereq.yml @@ -0,0 +1,41 @@ +#Prerequisite tasks before stack deployment. +#Authenticate to cloud. +- name: "authenticate to cloud" + os_auth: + auth: + auth_url: "{{ os_auth_url }}" + username: "{{ os_username }}" + password: "{{ os_password }}" + domain_name: "{{ os_domain_name }}" + project_name: "{{ os_project_name }}" + project_domain_name: "{{ os_domain_name }}" +#Will use the token from this point on. +- name: "set token" + set_fact: + os_auth: + auth_url: "{{ os_auth_url }}" + token: "{{ auth_token }}" + project_name: "{{ os_project_name }}" + project_domain_name: "{{ os_domain_name }}" +#Retrieve floating ip info. +- name: "get floating ip facts" + os_floating_ips_facts: + auth: "{{ os_auth }}" + auth_type: token + network: "{{ public_network }}" +#Group floating ips by ip address to allow looking them up. +- name: "group floating ips by address" + set_fact: + floating_ips_by_address: "{{ floating_ips_by_address | default({}) | combine({item.floating_ip_address: item}) }}" + loop: "{{ query('items', openstack_floating_ips) }}" +- name: "fail if required floating ips do not exist" + fail: msg="The required floating ips do not exist" + when: "(not (first_node_ip in floating_ips_by_address) + or not (infra_ip in floating_ips_by_address) + or not (installer_ip in floating_ips_by_address))" +#Get a ssh public key to be passed to heat, it requires ssh-keygen with -y option. +- name: "Retrieve public key from ssh private key" + command: "ssh-keygen -y -f {{ hostvars['installer'].ansible_private_key_file }}" + register: public_key_generation +- set_fact: + auth_public_key: "{{ public_key_generation.stdout }}" diff --git a/tools/cicdansible/roles/setup_openstack_infrastructure/tasks/deploy/register_instances.yml b/tools/cicdansible/roles/setup_openstack_infrastructure/tasks/deploy/register_instances.yml new file mode 100644 index 00000000..a50ecd22 --- /dev/null +++ b/tools/cicdansible/roles/setup_openstack_infrastructure/tasks/deploy/register_instances.yml @@ -0,0 +1,9 @@ +#Register instances as hosts in inventory. +#Installer and infra are statically registered. +#Register node instances dynamically. +- name: "Register node instances" + add_host: + name: "node{{ item[0] }}" + groups: nodes + ansible_host: "{{ item[1] }}" + loop: "{{ query('indexed_items', (heat_stack.stack.outputs | selectattr('output_key', 'equalto', 'node_ips') | list).0.output_value) }}" diff --git a/tools/cicdansible/roles/setup_openstack_infrastructure/tasks/main.yml b/tools/cicdansible/roles/setup_openstack_infrastructure/tasks/main.yml new file mode 100644 index 00000000..7a00abff --- /dev/null +++ b/tools/cicdansible/roles/setup_openstack_infrastructure/tasks/main.yml @@ -0,0 +1 @@ +- include_tasks: "{{ mode }}/main.yml" diff --git a/tools/helm-healer.sh b/tools/helm-healer.sh new file mode 100755 index 00000000..b030fcac --- /dev/null +++ b/tools/helm-healer.sh @@ -0,0 +1,537 @@ +#!/bin/sh + +PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin + +# +# globals and defaults +# + +NAMESPACE= +OVERRIDES= +HELM_CHART_RELEASE_NAME= +HELM_DELETE_ALL= +HELM_SKIP_DEPLOY= +VOLUME_STORAGE= +HELM_TIMEOUT=3600 +RELEASE_PREFIX=onap + +# +# control variables +# + +CMD=$(basename "$0") +COLOR_ON_RED='\033[0;31;1m' +COLOR_ON_GREEN='\033[0;32;1m' +COLOR_OFF='\033[0m' + + +# +# functions +# + +help() +{ +cat <<EOF +${CMD} - simple tool for fixing onap helm deployment + +DESCRIPTION + This script does nothing smart or special it just tries to + redeploy onap component. It can fix only problems related to + race conditions or timeouts. Nothing else. It will not fix + broken ONAP - there is no such ambition - that effort should + be directed in the upstream. + +USAGE + ${CMD} -h|--help + This help + + ${CMD} -n|--namespace <namespace> + (-f|--file <override>)... + (-s|--storage <directory>)|--no-storage-deletion + [-p|--release-prefix <release prefix>] + [-t|--timeout <secs>] + [(-c|--component <component release name>)...| + (-D|--delete-all)] + [-C|--clean-only] + + Usage 1 (simple heuristics - redeploy failed components): + ${CMD} -n onap -f /some/override1.yml -s /dockerdata-nfs + + Usage 2 (redeploy ONLY explicit listed components): + ${CMD} -n onap -f /some/override1.yml -s /dockerdata-nfs \ + -c onap-aaf -c onap-sdc -c onap-portal + + Usage 3 (delete EVERYTHING and redeploy): + ${CMD} -n onap -f /some/override1.yml -s /dockerdata-nfs \ + --delete-all + + Usage 4 (just clean - do not redeploy) + ${CMD} -n onap -f /some/override1.yml -s /dockerdata-nfs \ + --delete-all --clean-only + + Namespace argument and at least one override file are mandatory + for this script to execute. Also you must provide path to the + storage or explicitly request to not delete file storage of the + component. + + Storage should be directory where persistent volume resides. It + will work only if component created a persistent volume with the + same filename as its release name. Otherwise no effect. The + exception is when '--delete-all' is used - in that case all + content of the storage is deleted (because ONAP is not consistent + with the volume directory names - eg.: sdnc). + + CAUTION 1: filename of an override file cannot contain whitespace! + This is actually helm/onap deploy plugin issue which does not + handle such files. So I dropped the more complicated version of + this script when there is no reason to support something on what + will helm deploy choke anyway. + + '--prefix' option is helm release argument - it is actually prefix + when you list the helm releases - helm is little confusing here. + + CAUTION 2: By default release prefix is 'onap' - if you deployed + release 'onap' and now run this script with different prefix then + it will skip all 'onap-*' components and will deploy a new release + with new prefix - BEWARE TO USE PROPER RELEASE PREFIX! + + Timeout set the waiting time for helm deploy per component. + + '--component' references to release name of the chart which you + want to redeploy excplicitly - otherwise 'ALL FAILED' components + will be redeployed. You can target more than one component at once + - just use the argument multiple times. + + Component option is mutually exclusive with the '--delete-all' + which will delete all components - healthy or not. Actually it will + delete the whole NAMESPACE and everything in it. + + '--clean-only' can be used with any usage: heuristics, explicit + component list or with '--delete-all'. It basically just skips the + last step - the actual redeploy. +EOF +} + +msg() +{ + echo -e "${COLOR_ON_GREEN}INFO: $@ ${COLOR_OFF}" +} + +error() +{ + echo -e "${COLOR_ON_RED}ERROR: $@ ${COLOR_OFF}" +} + +# remove all successfully completed jobs +clean_jobs() +{ + kubectl get jobs -n ${NAMESPACE} \ + --ignore-not-found=true \ + --no-headers=true | \ + while read -r _job _completion _duration _age ; do + _done=$(echo ${_completion} | awk 'BEGIN {FS="/";} {print $1;}') + _desired=$(echo ${_completion} | awk 'BEGIN {FS="/";} {print $2;}') + if [ "$_desired" -eq "$_done" ] ; then + delete_job "$_job" + fi + done +} + +get_failed_labels() +{ + get_labels 'status.phase==Failed' +} + +# arg: [optional: selector] +get_labels() +{ + if [ -n "$1" ] ; then + _selector="--field-selector=${1}" + else + _selector= + fi + + kubectl get pods -n ${NAMESPACE} \ + --show-labels=true \ + --include-uninitialized=true \ + ${_selector} \ + --ignore-not-found=true \ + --no-headers=true | \ + while read -r _pod _ready _status _restart _age _labels ; do + [ -z "$_labels" ] && break + for _label in $(echo "$_labels" | tr ',' ' ') ; do + case "$_label" in + release=*) + _label=$(echo "$_label" | sed 's/release=//') + echo "$_label" + ;; + esac + done + done | sort -u +} + +# arg: <release name> +helm_undeploy() +{ + msg "Undeploy helm release name: ${1}" + helm undeploy ${1} --purge +} + +# arg: <job name> +delete_job() +{ + kubectl delete job -n ${NAMESPACE} \ + --cascade=true \ + --now=true \ + --include-uninitialized=true \ + --wait=true \ + ${1} + + # wait for job to be deleted + _output=start + while [ -n "$_output" ] && sleep 1 ; do + _output=$(kubectl get pods -n ${NAMESPACE} \ + --ignore-not-found=true \ + --no-headers=true \ + --selector="job-name=${1}") + done +} + +# arg: <resource> <release name> +delete_resource() +{ + _resource="$1" + _release="$2" + + msg "Delete ${_resource} for ${_release}..." + { + kubectl get ${_resource} -n ${NAMESPACE} \ + --ignore-not-found=true \ + --selector="release=${_release}" \ + --no-headers=true + + # this is due to missing "release" label in some pods + # grep for the rescue... + kubectl get ${_resource} -n ${NAMESPACE} \ + --no-headers=true | grep "^${_release}" + } | awk '{print $1}' | sort -u | while read -r _name _rest ; do + echo "Deleting '${_name}'" + kubectl delete ${_resource} -n ${NAMESPACE} \ + --cascade=true \ + --now=true \ + --include-uninitialized=true \ + --wait=true \ + ${_name} \ + 2>&1 | grep -iv 'not[[:space:]]*found' + + # wait for resource to be deleted + _output=start + while [ -n "$_output" ] && sleep 1 ; do + _output=$(kubectl get ${_resource} -n ${NAMESPACE} \ + --ignore-not-found=true \ + --no-headers=true \ + --field-selector="metadata.name=${_name}") + done + done +} + +delete_namespace() +{ + msg "Delete the whole namespace: ${NAMESPACE}" + kubectl delete namespace \ + --cascade=true \ + --now=true \ + --include-uninitialized=true \ + --wait=true \ + "$NAMESPACE" + + # wait for namespace to be deleted + _output=start + while [ -n "$_output" ] && sleep 1 ; do + _output=$(kubectl get all -n ${NAMESPACE} \ + --ignore-not-found=true \ + --no-headers=true) + done +} + +# arg: [optional: subdir] +delete_storage() +{ + _node=$(kubectl get nodes \ + --selector=node-role.kubernetes.io/worker \ + -o wide \ + --no-headers=true | \ + awk '{print $6}' | head -n 1) + + if [ -z "$_node" ] ; then + error "Could not list kubernetes nodes - SKIPPING DELETION" + else + if [ -n "$1" ] ; then + msg "Delete directory '${VOLUME_STORAGE}/${1}' on $_node" + ssh -T $_node <<EOF +rm -rf "${VOLUME_STORAGE}/${1}" +EOF + else + msg "Delete directories '${VOLUME_STORAGE}/*' on $_node" + ssh -T $_node <<EOF +find "${VOLUME_STORAGE}" -maxdepth 1 -mindepth 1 -exec rm -rf '{}' \; +EOF + fi + fi +} + +# arg: <release name> +redeploy_component() +{ + _chart=$(echo "$1" | sed 's/[^-]*-//') + helm_undeploy ${1} + # TODO: does deleted secret per component break something? + for x in jobs deployments pods pvc pv ; do + delete_resource ${x} ${1} + done + + if [ -n "$VOLUME_STORAGE" ] ; then + msg "Persistent volume data deletion in directory: ${VOLUME_STORAGE}/${1}" + delete_storage "$1" + fi + + # TODO: until I can verify that this does the same for this component as helm deploy + #msg "Redeployment of the component ${1}..." + #helm install "local/${_chart}" --name ${1} --namespace ${NAMESPACE} --wait --timeout ${HELM_TIMEOUT} +} + + +# +# arguments +# + +state=nil +arg_namespace= +arg_overrides= +arg_timeout= +arg_storage= +arg_nostorage= +arg_components= +arg_prefix= +arg_deleteall= +arg_cleanonly= +while [ -n "$1" ] ; do + case $state in + nil) + case "$1" in + -h|--help) + help + exit 0 + ;; + -n|--namespace) + state=namespace + ;; + -f|--file) + state=override + ;; + -t|--timeout) + state=timeout + ;; + -s|--storage) + state=storage + ;; + --no-storage-deletion) + if [ -n "$arg_storage" ] ; then + error "Usage of storage argument together with no storage deletion option!" + exit 1 + elif [ -z "$arg_nostorage" ] ; then + arg_nostorage=nostorage + else + error "Duplicit argument for no storage option! (IGNORING)" + fi + ;; + -c|--component) + if [ -n "$arg_deleteall" ] ; then + error "'Delete all components' used already - argument mismatch" + exit 1 + fi + state=component + ;; + -D|--delete-all) + if [ -n "$arg_components" ] ; then + error "Explicit component(s) provided already - argument mismatch" + exit 1 + elif [ -z "$arg_deleteall" ] ; then + arg_deleteall=deleteall + else + error "Duplicit argument for 'delete all' option! (IGNORING)" + fi + ;; + -p|--prefix) + state=prefix + ;; + -C|--clean-only) + if [ -z "$arg_cleanonly" ] ; then + arg_cleanonly=cleanonly + else + error "Duplicit argument for 'clean only' option! (IGNORING)" + fi + ;; + *) + error "Unknown parameter: $1" + exit 1 + ;; + esac + ;; + namespace) + if [ -z "$arg_namespace" ] ; then + arg_namespace="$1" + state=nil + else + error "Duplicit argument for namespace!" + exit 1 + fi + ;; + override) + if ! [ -f "$1" ] ; then + error "Wrong filename for override file: $1" + exit 1 + fi + arg_overrides="${arg_overrides} -f $1" + state=nil + ;; + component) + arg_components="${arg_components} $1" + state=nil + ;; + prefix) + if [ -z "$arg_prefix" ] ; then + arg_prefix="$1" + state=nil + else + error "Duplicit argument for release prefix!" + exit 1 + fi + ;; + timeout) + if [ -z "$arg_timeout" ] ; then + if ! echo "$1" | grep -q '^[0-9]\+$' ; then + error "Timeout must be an integer: $1" + exit 1 + fi + arg_timeout="$1" + state=nil + else + error "Duplicit argument for timeout!" + exit 1 + fi + ;; + storage) + if [ -n "$arg_nostorage" ] ; then + error "Usage of storage argument together with no storage deletion option!" + exit 1 + elif [ -z "$arg_storage" ] ; then + arg_storage="$1" + state=nil + else + error "Duplicit argument for storage!" + exit 1 + fi + ;; + esac + shift +done + +# sanity check +if [ -z "$arg_namespace" ] ; then + error "Missing namespace" + help + exit 1 +else + NAMESPACE="$arg_namespace" +fi + +if [ -z "$arg_overrides" ] ; then + error "Missing override file(s)" + help + exit 1 +else + OVERRIDES="$arg_overrides" +fi + +if [ -n "$arg_prefix" ] ; then + RELEASE_PREFIX="$arg_prefix" +fi + +if [ -n "$arg_timeout" ] ; then + HELM_TIMEOUT="$arg_timeout" +fi + +if [ -n "$arg_storage" ] ; then + VOLUME_STORAGE="$arg_storage" +elif [ -z "$arg_nostorage" ] ; then + error "Missing storage argument! If it is intended then use '--no-storage-deletion' option" + exit 1 +fi + +if [ -n "$arg_components" ] ; then + HELM_CHART_RELEASE_NAME="$arg_components" +fi + +if [ -n "$arg_deleteall" ] ; then + HELM_DELETE_ALL=yes +fi + +if [ -n "$arg_cleanonly" ] ; then + HELM_SKIP_DEPLOY=yes +fi + + +# +# main +# + +# if --delete-all is used then redeploy all components (the current namespace is deleted) +if [ -n "$HELM_DELETE_ALL" ] ; then + # undeploy helm release (prefix) + helm_undeploy "$RELEASE_PREFIX" + + # we will delete the whole namespace + delete_namespace + + if [ -n "$VOLUME_STORAGE" ] ; then + delete_storage + fi +# delete and redeploy explicit or failed components... +else + # if a helm chart release name was given then just redeploy said component and quit + if [ -n "$HELM_CHART_RELEASE_NAME" ] ; then + msg "Explicitly asked for component redeploy: ${HELM_CHART_RELEASE_NAME}" + _COMPONENTS="$HELM_CHART_RELEASE_NAME" + # simple heuristics: redeploy only failed components + else + msg "Delete successfully completed jobs..." + clean_jobs + + msg "Find failed components..." + _COMPONENTS=$(get_failed_labels) + fi + + for _component in ${_COMPONENTS} ; do + if echo "$_component" | grep -q "^${RELEASE_PREFIX}-" ; then + msg "Redeploy component: ${_component}" + redeploy_component ${_component} + else + error "Component release name '${_component}' does not match release prefix: ${RELEASE_PREFIX} (SKIP)" + fi + done +fi + +if [ -z "$HELM_SKIP_DEPLOY" ] ; then + # TODO: this is suboptimal - find a way how to deploy only the affected component... + msg "Redeploy onap..." + msg helm deploy ${RELEASE_PREFIX} local/onap --namespace ${NAMESPACE} ${OVERRIDES} --timeout ${HELM_TIMEOUT} + helm deploy ${RELEASE_PREFIX} local/onap --namespace ${NAMESPACE} ${OVERRIDES} --timeout ${HELM_TIMEOUT} +else + msg "Clean only option used: Skipping redeploy..." +fi + +msg DONE + +exit $? + diff --git a/helm_deployment_status.py b/tools/helm_deployment_status.py index 8917e992..8917e992 100755 --- a/helm_deployment_status.py +++ b/tools/helm_deployment_status.py |