From d95bf2b91a8fc696fb3f002df589e90c460393df Mon Sep 17 00:00:00 2001 From: Gary Wu Date: Fri, 21 Jun 2019 15:35:18 -0700 Subject: Rename rancher VM to nfs Issue-ID: INT-1117 Signed-off-by: Gary Wu Change-Id: I97b333d61c2f12036f5a2e864fab160eacfb8b69 --- .../heat/onap-rke/env/windriver/onap-oom.env | 5 +- deployment/heat/onap-rke/k8s_vm_install.sh | 6 +- deployment/heat/onap-rke/nfs_vm_entrypoint.sh | 235 ++++++++++++++++++++ deployment/heat/onap-rke/onap-oom.yaml | 122 ++++------- deployment/heat/onap-rke/parts/onap-oom-1.yaml | 20 +- deployment/heat/onap-rke/parts/onap-oom-2.yaml | 5 +- deployment/heat/onap-rke/parts/onap-oom-3.yaml | 12 +- deployment/heat/onap-rke/rancher_vm_entrypoint.sh | 241 --------------------- deployment/heat/onap-rke/scripts/deploy.sh | 14 +- .../heat/onap-rke/scripts/gen-onap-oom-yaml.sh | 15 +- deployment/heat/onap-rke/scripts/redeploy.sh | 4 - 11 files changed, 315 insertions(+), 364 deletions(-) create mode 100644 deployment/heat/onap-rke/nfs_vm_entrypoint.sh delete mode 100644 deployment/heat/onap-rke/rancher_vm_entrypoint.sh (limited to 'deployment/heat') diff --git a/deployment/heat/onap-rke/env/windriver/onap-oom.env b/deployment/heat/onap-rke/env/windriver/onap-oom.env index fe3a3d350..db7459ed2 100644 --- a/deployment/heat/onap-rke/env/windriver/onap-oom.env +++ b/deployment/heat/onap-rke/env/windriver/onap-oom.env @@ -5,9 +5,8 @@ parameters: apt_proxy: 10.12.5.2:8000 docker_proxy: 10.12.5.2:5000 - rancher_vm_flavor: m1.large + nfs_vm_flavor: m1.large k8s_vm_flavor: m1.xlarge - etcd_vm_flavor: m1.medium orch_vm_flavor: m1.large public_net_id: 971040b2-7059-49dc-b220-4fab50cb2ad4 @@ -44,7 +43,7 @@ parameters: demoArtifactsVersion: "1.4.0" demoArtifactsRepoUrl: "https://nexus.onap.org/content/repositories/releases" scriptVersion: "1.4.0" - rancherIpAddress: "__rancher_ip_addr__" + nfsIpAddress: "__nfs_ip_addr__" config: openStackEncryptedPasswordHere: "${OS_PASSWORD_ENCRYPTED_FOR_ROBOT}" so: diff --git a/deployment/heat/onap-rke/k8s_vm_install.sh b/deployment/heat/onap-rke/k8s_vm_install.sh index cbd7be3d9..d8f833eed 100644 --- a/deployment/heat/onap-rke/k8s_vm_install.sh +++ b/deployment/heat/onap-rke/k8s_vm_install.sh @@ -13,8 +13,8 @@ printenv mkdir -p /opt/config echo "__docker_version__" > /opt/config/docker_version.txt -echo "__rancher_ip_addr__" > /opt/config/rancher_ip_addr.txt -echo "__rancher_private_ip_addr__" > /opt/config/rancher_private_ip_addr.txt +echo "__nfs_ip_addr__" > /opt/config/nfs_ip_addr.txt +echo "__nfs_private_ip_addr__" > /opt/config/nfs_private_ip_addr.txt echo "__host_private_ip_addr__" > /opt/config/host_private_ip_addr.txt echo "__mtu__" > /opt/config/mtu.txt @@ -42,7 +42,7 @@ fi mkdir -p /dockerdata-nfs -echo "__rancher_private_ip_addr__:/dockerdata-nfs /dockerdata-nfs nfs noauto,noatime,fg,retry=1,x-systemd.automount,_netdev,soft,nolock,intr,tcp,actimeo=1800 0 0" | tee -a /etc/fstab +echo "__nfs_private_ip_addr__:/dockerdata-nfs /dockerdata-nfs nfs noauto,noatime,fg,retry=1,x-systemd.automount,_netdev,soft,nolock,intr,tcp,actimeo=1800 0 0" | tee -a /etc/fstab # workaround for OpenStack intermittent failure to change default apt mirrors sed -i 's|http://archive.ubuntu.com|http://nova.clouds.archive.ubuntu.com|g' /etc/apt/sources.list diff --git a/deployment/heat/onap-rke/nfs_vm_entrypoint.sh b/deployment/heat/onap-rke/nfs_vm_entrypoint.sh new file mode 100644 index 000000000..9dada99bc --- /dev/null +++ b/deployment/heat/onap-rke/nfs_vm_entrypoint.sh @@ -0,0 +1,235 @@ +#!/bin/bash -x +# +# Copyright 2018 Huawei Technologies Co., Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# + +# allow root login +export HOME=/root +mkdir -p ~/.ssh +cp ~ubuntu/.ssh/authorized_keys ~/.ssh + +export DEBIAN_FRONTEND=noninteractive +HOST_IP=$(hostname -I) +echo $HOST_IP `hostname` >> /etc/hosts +printenv + +mkdir -p /opt/config +echo "__nfs_ip_addr__" > /opt/config/nfs_ip_addr.txt +echo "__k8s_vm_ips__" > /opt/config/k8s_vm_ips.txt +echo "__k8s_private_ips__" > /opt/config/k8s_private_ips.txt +echo "__public_net_id__" > /opt/config/public_net_id.txt +echo "__oam_network_cidr__" > /opt/config/oam_network_cidr.txt +echo "__oam_network_id__" > /opt/config/oam_network_id.txt +echo "__oam_subnet_id__" > /opt/config/oam_subnet_id.txt +echo "__sec_group__" > /opt/config/sec_group.txt +echo "__integration_gerrit_branch__" > /opt/config/integration_gerrit_branch.txt +echo "__integration_gerrit_refspec__" > /opt/config/integration_gerrit_refspec.txt +echo "__oom_gerrit_branch__" > /opt/config/oom_gerrit_branch.txt +echo "__oom_gerrit_refspec__" > /opt/config/oom_gerrit_refspec.txt +echo "__docker_manifest__" > /opt/config/docker_manifest.txt +echo "__docker_proxy__" > /opt/config/docker_proxy.txt +echo "__docker_version__" > /opt/config/docker_version.txt +echo "__kubectl_version__" > /opt/config/kubectl_version.txt +echo "__helm_version__" > /opt/config/helm_version.txt +echo "__helm_deploy_delay__" > /opt/config/helm_deploy_delay.txt +echo "__mtu__" > /opt/config/mtu.txt +echo "__portal_hostname__" > /opt/config/portal_hostname.txt + +cat < /opt/config/integration-override.yaml +__integration_override_yaml__ +EOF +sed -i 's/\_\_portal_hostname__/__portal_hostname__/g' /opt/config/integration-override.yaml +sed -i 's/\_\_public_net_id__/__public_net_id__/g' /opt/config/integration-override.yaml +sed -i 's|\_\_oam_network_cidr__|__oam_network_cidr__|g' /opt/config/integration-override.yaml +sed -i 's/\_\_oam_network_id__/__oam_network_id__/g' /opt/config/integration-override.yaml +sed -i 's/\_\_oam_subnet_id__/__oam_subnet_id__/g' /opt/config/integration-override.yaml +sed -i 's/\_\_sec_group__/__sec_group__/g' /opt/config/integration-override.yaml +sed -i 's/\_\_nfs_ip_addr__/__nfs_ip_addr__/g' /opt/config/integration-override.yaml +sed -i 's/\_\_k8s_01_vm_ip__/__k8s_01_vm_ip__/g' /opt/config/integration-override.yaml +sed -i 's/\_\_docker_proxy__/__docker_proxy__/g' /opt/config/integration-override.yaml +cp /opt/config/integration-override.yaml /root +cat /root/integration-override.yaml + +mkdir -p /etc/docker +if [ ! -z "__docker_proxy__" ]; then + cat > /etc/docker/daemon.json < /etc/docker/daemon.json < /etc/apt/apt.conf.d/30proxy< /dev/null; do + apt-get -y update + apt-get -y install curl jq make nfs-kernel-server moreutils + sleep 10 +done + +mkdir -p /dockerdata-nfs + +# update and initialize git +git config --global user.email root@nfs +git config --global user.name root@nfs +git config --global log.decorate auto + +# version control the persistence volume to see what's happening +chmod 777 /dockerdata-nfs/ +chown nobody:nogroup /dockerdata-nfs/ +cd /dockerdata-nfs/ +git init +git add -A +git commit -m "initial commit" + +# export NFS mount +echo "/dockerdata-nfs *(rw,fsid=1,async,no_root_squash,no_subtree_check)" | tee /etc/exports +exportfs -a +systemctl restart nfs-kernel-server + + + +cd ~ + +# install kubectl __kubectl_version__ +curl -s -LO https://storage.googleapis.com/kubernetes-release/release/v__kubectl_version__/bin/linux/amd64/kubectl +chmod +x ./kubectl +sudo mv ./kubectl /usr/local/bin/kubectl +mkdir -p ~/.kube + +# install helm __helm_version__ +mkdir -p helm +pushd helm +wget -q http://storage.googleapis.com/kubernetes-helm/helm-v__helm_version__-linux-amd64.tar.gz +tar -zxvf helm-v__helm_version__-linux-amd64.tar.gz +sudo cp linux-amd64/helm /usr/local/bin/helm +popd + + + + +# Clone OOM repo +cd ~ +git clone --recurse-submodules -b __oom_gerrit_branch__ https://gerrit.onap.org/r/oom +cd oom +if [ ! -z "__oom_gerrit_refspec__" ]; then + git fetch https://gerrit.onap.org/r/oom __oom_gerrit_refspec__ + git checkout FETCH_HEAD +fi +git checkout -b workarounds +git log -1 + +# Clone integration repo +cd ~ +git clone -b __integration_gerrit_branch__ https://gerrit.onap.org/r/integration +cd integration +if [ ! -z "__integration_gerrit_refspec__" ]; then + git fetch https://gerrit.onap.org/r/integration __integration_gerrit_refspec__ + git checkout FETCH_HEAD +fi + + +if [ ! -z "__docker_manifest__" ]; then + cd version-manifest/src/main/scripts + ./update-oom-image-versions.sh ../resources/__docker_manifest__ ~/oom/ +fi + +cd ~/oom +git diff +git submodule foreach --recursive 'git commit -a -m "apply manifest versions" || :' +git commit -a -m "apply manifest versions" + +cd ~/oom +# workaround to change onap portal cookie domain +#sed -i "s/^cookie_domain.*=.*/cookie_domain = __portal_hostname__/g" ./kubernetes/portal/charts/portal-app/resources/config/deliveries/properties/ONAPPORTAL/system.properties +#sed -i "s/^cookie_domain.*=.*/cookie_domain = __portal_hostname__/g" ./kubernetes/portal/charts/portal-sdk/resources/config/deliveries/properties/ONAPPORTALSDK/system.properties +git diff +git commit -a -m "set portal cookie domain" + +git tag -a "deploy0" -m "initial deployment" + + + + + + + +# wait for /root/.kube/config to show up; will be placed by deploy script after RKE completes +while [ ! -e /root/.kube/config ]; do + sleep 1m +done + + +NAMESPACE=onap +export KUBECONFIG=/root/.kube/config +kubectl config set-context $(kubectl config current-context) --namespace=$NAMESPACE +kubectl config view + + +# Enable auto-completion for kubectl +echo "source <(kubectl completion bash)" >> ~/.bashrc + + +until [ $(kubectl get cs | tail -n +2 | grep -c Healthy) -ge 5 ]; do + sleep 1m +done + + +# install tiller/helm +kubectl -n kube-system create serviceaccount tiller +kubectl create clusterrolebinding tiller --clusterrole=cluster-admin --serviceaccount=kube-system:tiller +helm init --service-account tiller +kubectl -n kube-system rollout status deploy/tiller-deploy +helm serve & +sleep 10 + +# Make ONAP helm charts +cd ~/oom/kubernetes/ +helm repo add local http://127.0.0.1:8879 +helm repo list +make all +helm search -l | grep local + +# install helm deploy plugin +rsync -avt ~/oom/kubernetes/helm/plugins ~/.helm/ +# temporary workaround to throttle the helm deploy to alleviate startup disk contention issues +if [ ! -z "__helm_deploy_delay__" ]; then + sed -i "/\^enabled:/a\ echo sleep __helm_deploy_delay__\n sleep __helm_deploy_delay__" ~/.helm/plugins/deploy/deploy.sh + sed -i 's/for subchart in \*/for subchart in aaf cassandra mariadb-galera dmaap */' ~/.helm/plugins/deploy/deploy.sh +fi + +# Deploy ONAP +helm deploy dev local/onap -f ~/oom/kubernetes/onap/resources/environments/public-cloud.yaml -f ~/integration-override.yaml --namespace $NAMESPACE --verbose + +# re-install original helm deploy plugin +rsync -avt ~/oom/kubernetes/helm/plugins ~/.helm/ + +helm list + + + +# Check ONAP status: +sleep 10 +kubectl get pods --all-namespaces +kubectl get nodes +kubectl top nodes diff --git a/deployment/heat/onap-rke/onap-oom.yaml b/deployment/heat/onap-rke/onap-oom.yaml index baab5570e..075ed66d2 100644 --- a/deployment/heat/onap-rke/onap-oom.yaml +++ b/deployment/heat/onap-rke/onap-oom.yaml @@ -27,18 +27,14 @@ parameters: type: string description: Name of the Ubuntu 18.04 image - rancher_vm_flavor: + nfs_vm_flavor: type: string - description: VM flavor for Rancher + description: VM flavor for Nfs k8s_vm_flavor: type: string description: VM flavor for k8s hosts - etcd_vm_flavor: - type: string - description: VM flavor for etcd hosts - orch_vm_flavor: type: string description: VM flavor for orch hosts @@ -87,11 +83,6 @@ parameters: type: string default: "3m" - use_ramdisk: - type: string - description: Set to "true" if you want to use a RAM disk for /dockerdata-nfs/. - default: "false" - mtu: type: number default: 1500 @@ -194,7 +185,7 @@ resources: router_id: { get_resource: router } subnet_id: { get_resource: oam_ext_subnet } - rancher_private_port: + nfs_private_port: type: OS::Neutron::Port properties: network: { get_resource: oam_network } @@ -202,32 +193,31 @@ resources: security_groups: - { get_resource: onap_sg } - rancher_floating_ip: + nfs_floating_ip: type: OS::Neutron::FloatingIP properties: floating_network_id: { get_param: public_net_id } - port_id: { get_resource: rancher_private_port } - - rancher_vm: + port_id: { get_resource: nfs_private_port } + nfs_vm: type: OS::Nova::Server properties: name: - list_join: ['-', [{ get_param: 'OS::stack_name' }, 'rancher']] + list_join: ['-', [{ get_param: 'OS::stack_name' }, 'nfs']] image: { get_param: ubuntu_1804_image } - flavor: { get_param: rancher_vm_flavor } + flavor: { get_param: nfs_vm_flavor } key_name: { get_param: key_name } networks: - - port: { get_resource: rancher_private_port } + - port: { get_resource: nfs_private_port } user_data_format: RAW user_data: str_replace: template: - get_file: rancher_vm_entrypoint.sh + get_file: nfs_vm_entrypoint.sh params: __docker_proxy__: { get_param: docker_proxy } __apt_proxy__: { get_param: apt_proxy } - __rancher_ip_addr__: { get_attr: [rancher_floating_ip, floating_ip_address] } - __rancher_private_ip_addr__: { get_attr: [rancher_floating_ip, fixed_ip_address] } + __nfs_ip_addr__: { get_attr: [nfs_floating_ip, floating_ip_address] } + __nfs_private_ip_addr__: { get_attr: [nfs_floating_ip, fixed_ip_address] } __integration_override_yaml__: { get_param: integration_override_yaml } __integration_gerrit_branch__: { get_param: integration_gerrit_branch } __integration_gerrit_refspec__: { get_param: integration_gerrit_refspec } @@ -238,7 +228,6 @@ resources: __kubectl_version__: { get_param: kubectl_version } __helm_version__: { get_param: helm_version } __helm_deploy_delay__: { get_param: helm_deploy_delay } - __use_ramdisk__: { get_param: use_ramdisk } __mtu__: { get_param: mtu } __portal_hostname__: { get_param: portal_hostname } __public_net_id__: { get_param: public_net_id } @@ -306,8 +295,8 @@ resources: __docker_proxy__: { get_param: docker_proxy } __apt_proxy__: { get_param: apt_proxy } __docker_version__: { get_param: docker_version } - __rancher_ip_addr__: { get_attr: [rancher_floating_ip, floating_ip_address] } - __rancher_private_ip_addr__: { get_attr: [rancher_floating_ip, fixed_ip_address] } + __nfs_ip_addr__: { get_attr: [nfs_floating_ip, floating_ip_address] } + __nfs_private_ip_addr__: { get_attr: [nfs_floating_ip, fixed_ip_address] } __host_private_ip_addr__: { get_attr: [k8s_01_floating_ip, fixed_ip_address] } __mtu__: { get_param: mtu } template: @@ -344,7 +333,6 @@ resources: - port: { get_resource: k8s_01_private_port } user_data_format: SOFTWARE_CONFIG user_data: { get_resource: k8s_01_vm_config } - k8s_02_private_port: type: OS::Neutron::Port properties: @@ -376,8 +364,8 @@ resources: __docker_proxy__: { get_param: docker_proxy } __apt_proxy__: { get_param: apt_proxy } __docker_version__: { get_param: docker_version } - __rancher_ip_addr__: { get_attr: [rancher_floating_ip, floating_ip_address] } - __rancher_private_ip_addr__: { get_attr: [rancher_floating_ip, fixed_ip_address] } + __nfs_ip_addr__: { get_attr: [nfs_floating_ip, floating_ip_address] } + __nfs_private_ip_addr__: { get_attr: [nfs_floating_ip, fixed_ip_address] } __host_private_ip_addr__: { get_attr: [k8s_02_floating_ip, fixed_ip_address] } __mtu__: { get_param: mtu } template: @@ -414,7 +402,6 @@ resources: - port: { get_resource: k8s_02_private_port } user_data_format: SOFTWARE_CONFIG user_data: { get_resource: k8s_02_vm_config } - k8s_03_private_port: type: OS::Neutron::Port properties: @@ -446,8 +433,8 @@ resources: __docker_proxy__: { get_param: docker_proxy } __apt_proxy__: { get_param: apt_proxy } __docker_version__: { get_param: docker_version } - __rancher_ip_addr__: { get_attr: [rancher_floating_ip, floating_ip_address] } - __rancher_private_ip_addr__: { get_attr: [rancher_floating_ip, fixed_ip_address] } + __nfs_ip_addr__: { get_attr: [nfs_floating_ip, floating_ip_address] } + __nfs_private_ip_addr__: { get_attr: [nfs_floating_ip, fixed_ip_address] } __host_private_ip_addr__: { get_attr: [k8s_03_floating_ip, fixed_ip_address] } __mtu__: { get_param: mtu } template: @@ -484,7 +471,6 @@ resources: - port: { get_resource: k8s_03_private_port } user_data_format: SOFTWARE_CONFIG user_data: { get_resource: k8s_03_vm_config } - k8s_04_private_port: type: OS::Neutron::Port properties: @@ -516,8 +502,8 @@ resources: __docker_proxy__: { get_param: docker_proxy } __apt_proxy__: { get_param: apt_proxy } __docker_version__: { get_param: docker_version } - __rancher_ip_addr__: { get_attr: [rancher_floating_ip, floating_ip_address] } - __rancher_private_ip_addr__: { get_attr: [rancher_floating_ip, fixed_ip_address] } + __nfs_ip_addr__: { get_attr: [nfs_floating_ip, floating_ip_address] } + __nfs_private_ip_addr__: { get_attr: [nfs_floating_ip, fixed_ip_address] } __host_private_ip_addr__: { get_attr: [k8s_04_floating_ip, fixed_ip_address] } __mtu__: { get_param: mtu } template: @@ -554,7 +540,6 @@ resources: - port: { get_resource: k8s_04_private_port } user_data_format: SOFTWARE_CONFIG user_data: { get_resource: k8s_04_vm_config } - k8s_05_private_port: type: OS::Neutron::Port properties: @@ -586,8 +571,8 @@ resources: __docker_proxy__: { get_param: docker_proxy } __apt_proxy__: { get_param: apt_proxy } __docker_version__: { get_param: docker_version } - __rancher_ip_addr__: { get_attr: [rancher_floating_ip, floating_ip_address] } - __rancher_private_ip_addr__: { get_attr: [rancher_floating_ip, fixed_ip_address] } + __nfs_ip_addr__: { get_attr: [nfs_floating_ip, floating_ip_address] } + __nfs_private_ip_addr__: { get_attr: [nfs_floating_ip, fixed_ip_address] } __host_private_ip_addr__: { get_attr: [k8s_05_floating_ip, fixed_ip_address] } __mtu__: { get_param: mtu } template: @@ -624,7 +609,6 @@ resources: - port: { get_resource: k8s_05_private_port } user_data_format: SOFTWARE_CONFIG user_data: { get_resource: k8s_05_vm_config } - k8s_06_private_port: type: OS::Neutron::Port properties: @@ -656,8 +640,8 @@ resources: __docker_proxy__: { get_param: docker_proxy } __apt_proxy__: { get_param: apt_proxy } __docker_version__: { get_param: docker_version } - __rancher_ip_addr__: { get_attr: [rancher_floating_ip, floating_ip_address] } - __rancher_private_ip_addr__: { get_attr: [rancher_floating_ip, fixed_ip_address] } + __nfs_ip_addr__: { get_attr: [nfs_floating_ip, floating_ip_address] } + __nfs_private_ip_addr__: { get_attr: [nfs_floating_ip, fixed_ip_address] } __host_private_ip_addr__: { get_attr: [k8s_06_floating_ip, fixed_ip_address] } __mtu__: { get_param: mtu } template: @@ -694,7 +678,6 @@ resources: - port: { get_resource: k8s_06_private_port } user_data_format: SOFTWARE_CONFIG user_data: { get_resource: k8s_06_vm_config } - k8s_07_private_port: type: OS::Neutron::Port properties: @@ -726,8 +709,8 @@ resources: __docker_proxy__: { get_param: docker_proxy } __apt_proxy__: { get_param: apt_proxy } __docker_version__: { get_param: docker_version } - __rancher_ip_addr__: { get_attr: [rancher_floating_ip, floating_ip_address] } - __rancher_private_ip_addr__: { get_attr: [rancher_floating_ip, fixed_ip_address] } + __nfs_ip_addr__: { get_attr: [nfs_floating_ip, floating_ip_address] } + __nfs_private_ip_addr__: { get_attr: [nfs_floating_ip, fixed_ip_address] } __host_private_ip_addr__: { get_attr: [k8s_07_floating_ip, fixed_ip_address] } __mtu__: { get_param: mtu } template: @@ -764,7 +747,6 @@ resources: - port: { get_resource: k8s_07_private_port } user_data_format: SOFTWARE_CONFIG user_data: { get_resource: k8s_07_vm_config } - k8s_08_private_port: type: OS::Neutron::Port properties: @@ -796,8 +778,8 @@ resources: __docker_proxy__: { get_param: docker_proxy } __apt_proxy__: { get_param: apt_proxy } __docker_version__: { get_param: docker_version } - __rancher_ip_addr__: { get_attr: [rancher_floating_ip, floating_ip_address] } - __rancher_private_ip_addr__: { get_attr: [rancher_floating_ip, fixed_ip_address] } + __nfs_ip_addr__: { get_attr: [nfs_floating_ip, floating_ip_address] } + __nfs_private_ip_addr__: { get_attr: [nfs_floating_ip, fixed_ip_address] } __host_private_ip_addr__: { get_attr: [k8s_08_floating_ip, fixed_ip_address] } __mtu__: { get_param: mtu } template: @@ -834,7 +816,6 @@ resources: - port: { get_resource: k8s_08_private_port } user_data_format: SOFTWARE_CONFIG user_data: { get_resource: k8s_08_vm_config } - k8s_09_private_port: type: OS::Neutron::Port properties: @@ -866,8 +847,8 @@ resources: __docker_proxy__: { get_param: docker_proxy } __apt_proxy__: { get_param: apt_proxy } __docker_version__: { get_param: docker_version } - __rancher_ip_addr__: { get_attr: [rancher_floating_ip, floating_ip_address] } - __rancher_private_ip_addr__: { get_attr: [rancher_floating_ip, fixed_ip_address] } + __nfs_ip_addr__: { get_attr: [nfs_floating_ip, floating_ip_address] } + __nfs_private_ip_addr__: { get_attr: [nfs_floating_ip, fixed_ip_address] } __host_private_ip_addr__: { get_attr: [k8s_09_floating_ip, fixed_ip_address] } __mtu__: { get_param: mtu } template: @@ -904,7 +885,6 @@ resources: - port: { get_resource: k8s_09_private_port } user_data_format: SOFTWARE_CONFIG user_data: { get_resource: k8s_09_vm_config } - k8s_10_private_port: type: OS::Neutron::Port properties: @@ -936,8 +916,8 @@ resources: __docker_proxy__: { get_param: docker_proxy } __apt_proxy__: { get_param: apt_proxy } __docker_version__: { get_param: docker_version } - __rancher_ip_addr__: { get_attr: [rancher_floating_ip, floating_ip_address] } - __rancher_private_ip_addr__: { get_attr: [rancher_floating_ip, fixed_ip_address] } + __nfs_ip_addr__: { get_attr: [nfs_floating_ip, floating_ip_address] } + __nfs_private_ip_addr__: { get_attr: [nfs_floating_ip, fixed_ip_address] } __host_private_ip_addr__: { get_attr: [k8s_10_floating_ip, fixed_ip_address] } __mtu__: { get_param: mtu } template: @@ -974,7 +954,6 @@ resources: - port: { get_resource: k8s_10_private_port } user_data_format: SOFTWARE_CONFIG user_data: { get_resource: k8s_10_vm_config } - k8s_11_private_port: type: OS::Neutron::Port properties: @@ -1006,8 +985,8 @@ resources: __docker_proxy__: { get_param: docker_proxy } __apt_proxy__: { get_param: apt_proxy } __docker_version__: { get_param: docker_version } - __rancher_ip_addr__: { get_attr: [rancher_floating_ip, floating_ip_address] } - __rancher_private_ip_addr__: { get_attr: [rancher_floating_ip, fixed_ip_address] } + __nfs_ip_addr__: { get_attr: [nfs_floating_ip, floating_ip_address] } + __nfs_private_ip_addr__: { get_attr: [nfs_floating_ip, fixed_ip_address] } __host_private_ip_addr__: { get_attr: [k8s_11_floating_ip, fixed_ip_address] } __mtu__: { get_param: mtu } template: @@ -1044,7 +1023,6 @@ resources: - port: { get_resource: k8s_11_private_port } user_data_format: SOFTWARE_CONFIG user_data: { get_resource: k8s_11_vm_config } - k8s_12_private_port: type: OS::Neutron::Port properties: @@ -1076,8 +1054,8 @@ resources: __docker_proxy__: { get_param: docker_proxy } __apt_proxy__: { get_param: apt_proxy } __docker_version__: { get_param: docker_version } - __rancher_ip_addr__: { get_attr: [rancher_floating_ip, floating_ip_address] } - __rancher_private_ip_addr__: { get_attr: [rancher_floating_ip, fixed_ip_address] } + __nfs_ip_addr__: { get_attr: [nfs_floating_ip, floating_ip_address] } + __nfs_private_ip_addr__: { get_attr: [nfs_floating_ip, fixed_ip_address] } __host_private_ip_addr__: { get_attr: [k8s_12_floating_ip, fixed_ip_address] } __mtu__: { get_param: mtu } template: @@ -1114,7 +1092,6 @@ resources: - port: { get_resource: k8s_12_private_port } user_data_format: SOFTWARE_CONFIG user_data: { get_resource: k8s_12_vm_config } - orch_1_private_port: type: OS::Neutron::Port properties: @@ -1146,8 +1123,8 @@ resources: __docker_proxy__: { get_param: docker_proxy } __apt_proxy__: { get_param: apt_proxy } __docker_version__: { get_param: docker_version } - __rancher_ip_addr__: { get_attr: [rancher_floating_ip, floating_ip_address] } - __rancher_private_ip_addr__: { get_attr: [rancher_floating_ip, fixed_ip_address] } + __nfs_ip_addr__: { get_attr: [nfs_floating_ip, floating_ip_address] } + __nfs_private_ip_addr__: { get_attr: [nfs_floating_ip, fixed_ip_address] } __host_private_ip_addr__: { get_attr: [orch_1_floating_ip, fixed_ip_address] } __mtu__: { get_param: mtu } template: @@ -1184,7 +1161,6 @@ resources: - port: { get_resource: orch_1_private_port } user_data_format: SOFTWARE_CONFIG user_data: { get_resource: orch_1_vm_config } - orch_2_private_port: type: OS::Neutron::Port properties: @@ -1216,8 +1192,8 @@ resources: __docker_proxy__: { get_param: docker_proxy } __apt_proxy__: { get_param: apt_proxy } __docker_version__: { get_param: docker_version } - __rancher_ip_addr__: { get_attr: [rancher_floating_ip, floating_ip_address] } - __rancher_private_ip_addr__: { get_attr: [rancher_floating_ip, fixed_ip_address] } + __nfs_ip_addr__: { get_attr: [nfs_floating_ip, floating_ip_address] } + __nfs_private_ip_addr__: { get_attr: [nfs_floating_ip, fixed_ip_address] } __host_private_ip_addr__: { get_attr: [orch_2_floating_ip, fixed_ip_address] } __mtu__: { get_param: mtu } template: @@ -1254,7 +1230,6 @@ resources: - port: { get_resource: orch_2_private_port } user_data_format: SOFTWARE_CONFIG user_data: { get_resource: orch_2_vm_config } - orch_3_private_port: type: OS::Neutron::Port properties: @@ -1286,8 +1261,8 @@ resources: __docker_proxy__: { get_param: docker_proxy } __apt_proxy__: { get_param: apt_proxy } __docker_version__: { get_param: docker_version } - __rancher_ip_addr__: { get_attr: [rancher_floating_ip, floating_ip_address] } - __rancher_private_ip_addr__: { get_attr: [rancher_floating_ip, fixed_ip_address] } + __nfs_ip_addr__: { get_attr: [nfs_floating_ip, floating_ip_address] } + __nfs_private_ip_addr__: { get_attr: [nfs_floating_ip, fixed_ip_address] } __host_private_ip_addr__: { get_attr: [orch_3_floating_ip, fixed_ip_address] } __mtu__: { get_param: mtu } template: @@ -1324,18 +1299,17 @@ resources: - port: { get_resource: orch_3_private_port } user_data_format: SOFTWARE_CONFIG user_data: { get_resource: orch_3_vm_config } - outputs: docker_proxy: value: { get_param: docker_proxy } - rancher_vm_ip: - description: The IP address of the rancher instance - value: { get_attr: [rancher_floating_ip, floating_ip_address] } + nfs_vm_ip: + description: The IP address of the nfs instance + value: { get_attr: [nfs_floating_ip, floating_ip_address] } - rancher_vm_private_ip: - description: The private IP address of the rancher instance - value: { get_attr: [rancher_floating_ip, fixed_ip_address] } + nfs_vm_private_ip: + description: The private IP address of the nfs instance + value: { get_attr: [nfs_floating_ip, fixed_ip_address] } k8s_01_vm_ip: description: The IP address of the k8s_01 instance diff --git a/deployment/heat/onap-rke/parts/onap-oom-1.yaml b/deployment/heat/onap-rke/parts/onap-oom-1.yaml index 0e0cb0fcd..d210f985d 100644 --- a/deployment/heat/onap-rke/parts/onap-oom-1.yaml +++ b/deployment/heat/onap-rke/parts/onap-oom-1.yaml @@ -24,18 +24,14 @@ parameters: type: string description: Name of the Ubuntu 18.04 image - rancher_vm_flavor: + nfs_vm_flavor: type: string - description: VM flavor for Rancher + description: VM flavor for Nfs k8s_vm_flavor: type: string description: VM flavor for k8s hosts - etcd_vm_flavor: - type: string - description: VM flavor for etcd hosts - orch_vm_flavor: type: string description: VM flavor for orch hosts @@ -84,11 +80,6 @@ parameters: type: string default: "3m" - use_ramdisk: - type: string - description: Set to "true" if you want to use a RAM disk for /dockerdata-nfs/. - default: "false" - mtu: type: number default: 1500 @@ -191,7 +182,7 @@ resources: router_id: { get_resource: router } subnet_id: { get_resource: oam_ext_subnet } - rancher_private_port: + nfs_private_port: type: OS::Neutron::Port properties: network: { get_resource: oam_network } @@ -199,9 +190,8 @@ resources: security_groups: - { get_resource: onap_sg } - rancher_floating_ip: + nfs_floating_ip: type: OS::Neutron::FloatingIP properties: floating_network_id: { get_param: public_net_id } - port_id: { get_resource: rancher_private_port } - + port_id: { get_resource: nfs_private_port } diff --git a/deployment/heat/onap-rke/parts/onap-oom-2.yaml b/deployment/heat/onap-rke/parts/onap-oom-2.yaml index bd4ba1fc0..1b63e4165 100644 --- a/deployment/heat/onap-rke/parts/onap-oom-2.yaml +++ b/deployment/heat/onap-rke/parts/onap-oom-2.yaml @@ -29,8 +29,8 @@ __docker_proxy__: { get_param: docker_proxy } __apt_proxy__: { get_param: apt_proxy } __docker_version__: { get_param: docker_version } - __rancher_ip_addr__: { get_attr: [rancher_floating_ip, floating_ip_address] } - __rancher_private_ip_addr__: { get_attr: [rancher_floating_ip, fixed_ip_address] } + __nfs_ip_addr__: { get_attr: [nfs_floating_ip, floating_ip_address] } + __nfs_private_ip_addr__: { get_attr: [nfs_floating_ip, fixed_ip_address] } __host_private_ip_addr__: { get_attr: [${VM_TYPE}_${VM_NUM}_floating_ip, fixed_ip_address] } __mtu__: { get_param: mtu } template: @@ -67,4 +67,3 @@ - port: { get_resource: ${VM_TYPE}_${VM_NUM}_private_port } user_data_format: SOFTWARE_CONFIG user_data: { get_resource: ${VM_TYPE}_${VM_NUM}_vm_config } - diff --git a/deployment/heat/onap-rke/parts/onap-oom-3.yaml b/deployment/heat/onap-rke/parts/onap-oom-3.yaml index ca71d61fd..b70150240 100644 --- a/deployment/heat/onap-rke/parts/onap-oom-3.yaml +++ b/deployment/heat/onap-rke/parts/onap-oom-3.yaml @@ -2,11 +2,11 @@ outputs: docker_proxy: value: { get_param: docker_proxy } - rancher_vm_ip: - description: The IP address of the rancher instance - value: { get_attr: [rancher_floating_ip, floating_ip_address] } + nfs_vm_ip: + description: The IP address of the nfs instance + value: { get_attr: [nfs_floating_ip, floating_ip_address] } - rancher_vm_private_ip: - description: The private IP address of the rancher instance - value: { get_attr: [rancher_floating_ip, fixed_ip_address] } + nfs_vm_private_ip: + description: The private IP address of the nfs instance + value: { get_attr: [nfs_floating_ip, fixed_ip_address] } diff --git a/deployment/heat/onap-rke/rancher_vm_entrypoint.sh b/deployment/heat/onap-rke/rancher_vm_entrypoint.sh deleted file mode 100644 index 47cd01230..000000000 --- a/deployment/heat/onap-rke/rancher_vm_entrypoint.sh +++ /dev/null @@ -1,241 +0,0 @@ -#!/bin/bash -x -# -# Copyright 2018 Huawei Technologies Co., Ltd. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# - -# allow root login -export HOME=/root -mkdir -p ~/.ssh -cp ~ubuntu/.ssh/authorized_keys ~/.ssh - -export DEBIAN_FRONTEND=noninteractive -HOST_IP=$(hostname -I) -echo $HOST_IP `hostname` >> /etc/hosts -printenv - -mkdir -p /opt/config -echo "__rancher_ip_addr__" > /opt/config/rancher_ip_addr.txt -echo "__k8s_vm_ips__" > /opt/config/k8s_vm_ips.txt -echo "__k8s_private_ips__" > /opt/config/k8s_private_ips.txt -echo "__public_net_id__" > /opt/config/public_net_id.txt -echo "__oam_network_cidr__" > /opt/config/oam_network_cidr.txt -echo "__oam_network_id__" > /opt/config/oam_network_id.txt -echo "__oam_subnet_id__" > /opt/config/oam_subnet_id.txt -echo "__sec_group__" > /opt/config/sec_group.txt -echo "__integration_gerrit_branch__" > /opt/config/integration_gerrit_branch.txt -echo "__integration_gerrit_refspec__" > /opt/config/integration_gerrit_refspec.txt -echo "__oom_gerrit_branch__" > /opt/config/oom_gerrit_branch.txt -echo "__oom_gerrit_refspec__" > /opt/config/oom_gerrit_refspec.txt -echo "__docker_manifest__" > /opt/config/docker_manifest.txt -echo "__docker_proxy__" > /opt/config/docker_proxy.txt -echo "__docker_version__" > /opt/config/docker_version.txt -echo "__kubectl_version__" > /opt/config/kubectl_version.txt -echo "__helm_version__" > /opt/config/helm_version.txt -echo "__helm_deploy_delay__" > /opt/config/helm_deploy_delay.txt -echo "__mtu__" > /opt/config/mtu.txt -echo "__portal_hostname__" > /opt/config/portal_hostname.txt - -cat < /opt/config/integration-override.yaml -__integration_override_yaml__ -EOF -sed -i 's/\_\_portal_hostname__/__portal_hostname__/g' /opt/config/integration-override.yaml -sed -i 's/\_\_public_net_id__/__public_net_id__/g' /opt/config/integration-override.yaml -sed -i 's|\_\_oam_network_cidr__|__oam_network_cidr__|g' /opt/config/integration-override.yaml -sed -i 's/\_\_oam_network_id__/__oam_network_id__/g' /opt/config/integration-override.yaml -sed -i 's/\_\_oam_subnet_id__/__oam_subnet_id__/g' /opt/config/integration-override.yaml -sed -i 's/\_\_sec_group__/__sec_group__/g' /opt/config/integration-override.yaml -sed -i 's/\_\_rancher_ip_addr__/__rancher_ip_addr__/g' /opt/config/integration-override.yaml -sed -i 's/\_\_k8s_01_vm_ip__/__k8s_01_vm_ip__/g' /opt/config/integration-override.yaml -sed -i 's/\_\_docker_proxy__/__docker_proxy__/g' /opt/config/integration-override.yaml -cp /opt/config/integration-override.yaml /root -cat /root/integration-override.yaml - -mkdir -p /etc/docker -if [ ! -z "__docker_proxy__" ]; then - cat > /etc/docker/daemon.json < /etc/docker/daemon.json < /etc/apt/apt.conf.d/30proxy< /dev/null; do - apt-get -y update - apt-get -y install curl jq make nfs-kernel-server moreutils - sleep 10 -done - -mkdir -p /dockerdata-nfs - -# use RAM disk for /dockerdata-nfs for testing -if [ "__use_ramdisk__" = "true" ]; then - echo "tmpfs /dockerdata-nfs tmpfs noatime,size=75% 1 2" >> /etc/fstab - mount /dockerdata-nfs -fi - -# update and initialize git -git config --global user.email root@rancher -git config --global user.name root@rancher -git config --global log.decorate auto - -# version control the persistence volume to see what's happening -chmod 777 /dockerdata-nfs/ -chown nobody:nogroup /dockerdata-nfs/ -cd /dockerdata-nfs/ -git init -git add -A -git commit -m "initial commit" - -# export NFS mount -echo "/dockerdata-nfs *(rw,fsid=1,async,no_root_squash,no_subtree_check)" | tee /etc/exports -exportfs -a -systemctl restart nfs-kernel-server - - - -cd ~ - -# install kubectl __kubectl_version__ -curl -s -LO https://storage.googleapis.com/kubernetes-release/release/v__kubectl_version__/bin/linux/amd64/kubectl -chmod +x ./kubectl -sudo mv ./kubectl /usr/local/bin/kubectl -mkdir -p ~/.kube - -# install helm __helm_version__ -mkdir -p helm -pushd helm -wget -q http://storage.googleapis.com/kubernetes-helm/helm-v__helm_version__-linux-amd64.tar.gz -tar -zxvf helm-v__helm_version__-linux-amd64.tar.gz -sudo cp linux-amd64/helm /usr/local/bin/helm -popd - - - - -# Clone OOM repo -cd ~ -git clone --recurse-submodules -b __oom_gerrit_branch__ https://gerrit.onap.org/r/oom -cd oom -if [ ! -z "__oom_gerrit_refspec__" ]; then - git fetch https://gerrit.onap.org/r/oom __oom_gerrit_refspec__ - git checkout FETCH_HEAD -fi -git checkout -b workarounds -git log -1 - -# Clone integration repo -cd ~ -git clone -b __integration_gerrit_branch__ https://gerrit.onap.org/r/integration -cd integration -if [ ! -z "__integration_gerrit_refspec__" ]; then - git fetch https://gerrit.onap.org/r/integration __integration_gerrit_refspec__ - git checkout FETCH_HEAD -fi - - -if [ ! -z "__docker_manifest__" ]; then - cd version-manifest/src/main/scripts - ./update-oom-image-versions.sh ../resources/__docker_manifest__ ~/oom/ -fi - -cd ~/oom -git diff -git submodule foreach --recursive 'git commit -a -m "apply manifest versions" || :' -git commit -a -m "apply manifest versions" - -cd ~/oom -# workaround to change onap portal cookie domain -#sed -i "s/^cookie_domain.*=.*/cookie_domain = __portal_hostname__/g" ./kubernetes/portal/charts/portal-app/resources/config/deliveries/properties/ONAPPORTAL/system.properties -#sed -i "s/^cookie_domain.*=.*/cookie_domain = __portal_hostname__/g" ./kubernetes/portal/charts/portal-sdk/resources/config/deliveries/properties/ONAPPORTALSDK/system.properties -git diff -git commit -a -m "set portal cookie domain" - -git tag -a "deploy0" -m "initial deployment" - - - - - - - -# wait for /root/.kube/config to show up; will be placed by deploy script after RKE completes -while [ ! -e /root/.kube/config ]; do - sleep 1m -done - - -NAMESPACE=onap -export KUBECONFIG=/root/.kube/config -kubectl config set-context $(kubectl config current-context) --namespace=$NAMESPACE -kubectl config view - - -# Enable auto-completion for kubectl -echo "source <(kubectl completion bash)" >> ~/.bashrc - - -until [ $(kubectl get cs | tail -n +2 | grep -c Healthy) -ge 5 ]; do - sleep 1m -done - - -# install tiller/helm -kubectl -n kube-system create serviceaccount tiller -kubectl create clusterrolebinding tiller --clusterrole=cluster-admin --serviceaccount=kube-system:tiller -helm init --service-account tiller -kubectl -n kube-system rollout status deploy/tiller-deploy -helm serve & -sleep 10 - -# Make ONAP helm charts -cd ~/oom/kubernetes/ -helm repo add local http://127.0.0.1:8879 -helm repo list -make all -helm search -l | grep local - -# install helm deploy plugin -rsync -avt ~/oom/kubernetes/helm/plugins ~/.helm/ -# temporary workaround to throttle the helm deploy to alleviate startup disk contention issues -if [ ! -z "__helm_deploy_delay__" ]; then - sed -i "/\^enabled:/a\ echo sleep __helm_deploy_delay__\n sleep __helm_deploy_delay__" ~/.helm/plugins/deploy/deploy.sh - sed -i 's/for subchart in \*/for subchart in aaf cassandra mariadb-galera dmaap */' ~/.helm/plugins/deploy/deploy.sh -fi - -# Deploy ONAP -helm deploy dev local/onap -f ~/oom/kubernetes/onap/resources/environments/public-cloud.yaml -f ~/integration-override.yaml --namespace $NAMESPACE --verbose - -# re-install original helm deploy plugin -rsync -avt ~/oom/kubernetes/helm/plugins ~/.helm/ - -helm list - - - -# Check ONAP status: -sleep 10 -kubectl get pods --all-namespaces -kubectl get nodes -kubectl top nodes diff --git a/deployment/heat/onap-rke/scripts/deploy.sh b/deployment/heat/onap-rke/scripts/deploy.sh index ecefe2aaf..d26916e87 100755 --- a/deployment/heat/onap-rke/scripts/deploy.sh +++ b/deployment/heat/onap-rke/scripts/deploy.sh @@ -156,18 +156,18 @@ for n in $(seq 1 5); do for i in $(seq 1 30); do sleep 30 - RANCHER_IP=$(openstack stack output show $stack_name rancher_vm_ip -c output_value -f value) + NFS_IP=$(openstack stack output show $stack_name nfs_vm_ip -c output_value -f value) K8S_IP=$(openstack stack output show $stack_name k8s_01_vm_ip -c output_value -f value) - timeout 1 ping -c 1 "$RANCHER_IP" && break + timeout 1 ping -c 1 "$NFS_IP" && break done - timeout 1 ping -c 1 "$RANCHER_IP" && break + timeout 1 ping -c 1 "$NFS_IP" && break - echo Error: OpenStack infrastructure issue: unable to reach rancher "$RANCHER_IP" + echo Error: OpenStack infrastructure issue: unable to reach NFS server "$NFS_IP" sleep 10 done -if ! timeout 1 ping -c 1 "$RANCHER_IP"; then +if ! timeout 1 ping -c 1 "$NFS_IP"; then exit 2 fi @@ -270,11 +270,11 @@ until ./rke up; do ./rke remove done -scp -i $SSH_KEY ./kube_config_cluster.yml root@$RANCHER_IP:/root/.kube/config +scp -i $SSH_KEY ./kube_config_cluster.yml root@$NFS_IP:/root/.kube/config popd sleep 2m -ssh -o StrictHostKeychecking=no -i $SSH_KEY ubuntu@$RANCHER_IP "sed -u '/Cloud-init.*finished/q' <(tail -n+0 -f /var/log/cloud-init-output.log)" +ssh -o StrictHostKeychecking=no -i $SSH_KEY ubuntu@$NFS_IP "sed -u '/Cloud-init.*finished/q' <(tail -n+0 -f /var/log/cloud-init-output.log)" exit 0 diff --git a/deployment/heat/onap-rke/scripts/gen-onap-oom-yaml.sh b/deployment/heat/onap-rke/scripts/gen-onap-oom-yaml.sh index 45a0af0f2..32fe147dc 100755 --- a/deployment/heat/onap-rke/scripts/gen-onap-oom-yaml.sh +++ b/deployment/heat/onap-rke/scripts/gen-onap-oom-yaml.sh @@ -30,26 +30,26 @@ EOF cat $PARTS_DIR/onap-oom-1.yaml cat <