aboutsummaryrefslogtreecommitdiffstats
path: root/test
diff options
context:
space:
mode:
Diffstat (limited to 'test')
-rw-r--r--test/ete/labs/huawei/k8s_vm_entrypoint.sh173
-rw-r--r--test/ete/labs/huawei/onap-oom.yaml197
-rw-r--r--test/ete/labs/huawei/rancher_vm_entrypoint.sh18
3 files changed, 195 insertions, 193 deletions
diff --git a/test/ete/labs/huawei/k8s_vm_entrypoint.sh b/test/ete/labs/huawei/k8s_vm_entrypoint.sh
new file mode 100644
index 000000000..0dc432b10
--- /dev/null
+++ b/test/ete/labs/huawei/k8s_vm_entrypoint.sh
@@ -0,0 +1,173 @@
+#!/bin/bash -x
+printenv
+
+mkdir -p /opt/config
+echo "__rancher_ip_addr__" > /opt/config/rancher_ip_addr.txt
+echo `hostname -I` `hostname` >> /etc/hosts
+mkdir -p /etc/docker
+cat > /etc/docker/daemon.json <<EOF
+{
+ "insecure-registries" : ["__docker_proxy__"]
+}
+EOF
+cat > /etc/apt/apt.conf.d/30proxy<<EOF
+Acquire::http { Proxy "http://__apt_proxy__"; };
+Acquire::https::Proxy "DIRECT";
+EOF
+apt-get -y update
+apt-get -y install jq
+
+cd ~
+
+# install docker 1.12
+curl -s https://releases.rancher.com/install-docker/1.12.sh | sh
+usermod -aG docker ubuntu
+
+# install kubernetes 1.8.6
+curl -s -LO https://storage.googleapis.com/kubernetes-release/release/v1.8.6/bin/linux/amd64/kubectl
+chmod +x ./kubectl
+sudo mv ./kubectl /usr/local/bin/kubectl
+mkdir ~/.kube
+
+# install helm 2.3
+wget -q http://storage.googleapis.com/kubernetes-helm/helm-v2.3.0-linux-amd64.tar.gz
+tar -zxvf helm-v2.3.0-linux-amd64.tar.gz
+sudo mv linux-amd64/helm /usr/local/bin/helm
+
+# Fix virtual memory allocation for onap-log:elasticsearch:
+echo "vm.max_map_count=262144" >> /etc/sysctl.conf
+sysctl -p
+
+# install rancher agent
+echo export RANCHER_IP=__rancher_ip_addr__ > api-keys-rc
+source api-keys-rc
+
+sleep 50
+until curl -s -o projects.json -H "Accept: application/json" http://$RANCHER_IP:8080/v2-beta/projects; do
+ sleep 10
+done
+OLD_PID=$(jq -r '.data[0].id' projects.json)
+
+curl -s -H "Accept: application/json" -H "Content-Type: application/json" -d '{"accountId":"1a1"}' http://$RANCHER_IP:8080/v2-beta/apikeys | tee apikeys.json
+echo export RANCHER_ACCESS_KEY=`jq -r '.publicValue' apikeys.json` >> api-keys-rc
+echo export RANCHER_SECRET_KEY=`jq -r '.secretValue' apikeys.json` >> api-keys-rc
+source api-keys-rc
+
+curl -u "${RANCHER_ACCESS_KEY}:${RANCHER_SECRET_KEY}" -X DELETE -H 'Content-Type: application/json' "http://$RANCHER_IP:8080/v2-beta/projects/$OLD_PID"
+
+until [ ! -z "$TEMPLATE_ID" ] && [ "$TEMPLATE_ID" != "null" ]; do
+ sleep 5
+ curl -s -H "Accept: application/json" http://$RANCHER_IP:8080/v2-beta/projectTemplates?name=Kubernetes | tee projectTemplatesKubernetes.json
+ TEMPLATE_ID=$(jq -r '.data[0].id' projectTemplatesKubernetes.json)
+done
+
+curl -u "${RANCHER_ACCESS_KEY}:${RANCHER_SECRET_KEY}" -X POST -H 'Content-Type: application/json' -d '{ "name":"oom", "projectTemplateId":"'$TEMPLATE_ID'" }' "http://$RANCHER_IP:8080/v2-beta/projects" | tee project.json
+PID=`jq -r '.id' project.json`
+echo export RANCHER_URL=http://$RANCHER_IP:8080/v1/projects/$PID >> api-keys-rc
+source api-keys-rc
+
+until [ $(jq -r '.state' project.json) == "active" ]; do
+ sleep 5
+ curl -s -H "Accept: application/json" http://$RANCHER_IP:8080/v1/projects/$PID | tee project.json
+done
+
+TID=$(curl -s -X POST -H "Accept: application/json" -H "Content-Type: application/json" http://$RANCHER_IP:8080/v1/projects/$PID/registrationTokens | jq -r '.id')
+touch token.json
+while [ $(jq -r .command token.json | wc -c) -lt 10 ]; do
+ sleep 5
+ curl -s -X GET -H "Accept: application/json" http://$RANCHER_IP:8080/v1/projects/$PID/registrationToken/$TID | tee token.json
+done
+CMD=$(jq -r .command token.json)
+eval $CMD
+
+# download rancher CLI
+wget -q https://github.com/rancher/cli/releases/download/v0.6.7/rancher-linux-amd64-v0.6.7.tar.xz
+unxz rancher-linux-amd64-v0.6.7.tar.xz
+tar xvf rancher-linux-amd64-v0.6.7.tar
+
+# Clone OOM:
+cd ~
+git clone -b amsterdam http://gerrit.onap.org/r/oom
+
+# Update values.yaml to point to docker-proxy instead of nexus3:
+cd ~/oom/kubernetes
+perl -p -i -e 's/nexus3.onap.org:10001/__docker_proxy__/g' `find ./ -name values.yaml`
+
+KUBETOKEN=$(echo -n 'Basic '$(echo -n "$RANCHER_ACCESS_KEY:$RANCHER_SECRET_KEY" | base64 -w 0) | base64 -w 0)
+
+# create .kube/config
+cat > ~/.kube/config <<EOF
+apiVersion: v1
+kind: Config
+clusters:
+- cluster:
+ api-version: v1
+ insecure-skip-tls-verify: true
+ server: "https://$RANCHER_IP:8080/r/projects/$PID/kubernetes:6443"
+ name: "oom"
+contexts:
+- context:
+ cluster: "oom"
+ user: "oom"
+ name: "oom"
+current-context: "oom"
+users:
+- name: "oom"
+ user:
+ token: "$KUBETOKEN"
+EOF
+cat ~/.kube/config
+
+# Update ~/oom/kubernetes/kube2msb/values.yaml kubeMasterAuthToken to use the token from ~/.kube/config
+sed -i "s/kubeMasterAuthToken:.*/kubeMasterAuthToken: $KUBETOKEN/" ~/oom/kubernetes/kube2msb/values.yaml
+
+export KUBECONFIG=/root/.kube/config
+kubectl config view
+
+# wait for kubernetes to initialze
+sleep 100
+until [ $(kubectl get pods --all-namespaces | tail -n +2 | grep -c Running) -ge 6 ]; do
+ sleep 10
+done
+
+# Put your onap_key ssh private key in ~/.ssh/onap_key
+
+# Create or edit ~/oom/kubernetes/config/onap-parameters.yaml
+cp ~/oom/kubernetes/config/onap-parameters-sample.yaml ~/oom/kubernetes/config/onap-parameters.yaml
+cat >> ~/oom/kubernetes/config/onap-parameters.yaml <<EOF
+OPENSTACK_UBUNTU_14_IMAGE: "trusty"
+OPENSTACK_PUBLIC_NET_ID: "024582bd-ef9b-48b9-9e70-e6732559d9df"
+OPENSTACK_OAM_NETWORK_ID: "a899f36c-28e1-4aa9-9451-1b9f41feefa5"
+OPENSTACK_OAM_SUBNET_ID: "b9627602-2908-4aee-94b5-4f1dc92017df"
+OPENSTACK_OAM_NETWORK_CIDR: "172.16.1.0/24"
+OPENSTACK_USERNAME: "demo"
+OPENSTACK_API_KEY: "demo"
+OPENSTACK_TENANT_NAME: "demo"
+OPENSTACK_TENANT_ID: "__public_net_id__"
+OPENSTACK_REGION: "RegionOne"
+OPENSTACK_KEYSTONE_URL: "http://192.168.1.11:5000"
+OPENSTACK_FLAVOUR_MEDIUM: "m1.medium"
+OPENSTACK_SERVICE_TENANT_NAME: "service"
+DMAAP_TOPIC: "AUTO"
+DEMO_ARTIFACTS_VERSION: "1.1.0-SNAPSHOT"
+EOF
+
+# Source the environment file:
+cd ~/oom/kubernetes/oneclick/
+source setenv.bash
+
+# run the config pod creation
+cd ~/oom/kubernetes/config
+./createConfig.sh -n onap
+
+# Wait until the config container completes.
+sleep 200
+until [ $(kubectl get pods --namespace onap -a | tail -n +2 | grep -c Completed) -eq 1 ]; do
+ sleep 10
+done
+
+# Run ONAP:
+cd ~/oom/kubernetes/oneclick/
+./createAll.bash -n onap
+
+# Check ONAP status:
diff --git a/test/ete/labs/huawei/onap-oom.yaml b/test/ete/labs/huawei/onap-oom.yaml
index ccc8540c7..ff4d83550 100644
--- a/test/ete/labs/huawei/onap-oom.yaml
+++ b/test/ete/labs/huawei/onap-oom.yaml
@@ -28,25 +28,8 @@ resources:
params:
__docker_proxy__: { get_param: docker_proxy }
__apt_proxy__: { get_param: apt_proxy }
- template: |
- #!/bin/bash -x
- printenv
-
- echo `hostname -I` `hostname` >> /etc/hosts
- mkdir -p /etc/docker
- cat > /etc/docker/daemon.json <<EOF
- {
- "insecure-registries" : ["__docker_proxy__"]
- }
- EOF
- cat > /etc/apt/apt.conf.d/30proxy<<EOF
- Acquire::http { Proxy "http://__apt_proxy__"; };
- Acquire::https::Proxy "DIRECT";
- EOF
- apt-get -y update
- apt-get -y install docker.io
- usermod -aG docker ubuntu
- docker run --restart unless-stopped -d -p 8080:8080 rancher/server:v1.6.10
+ template:
+ get_file: rancher_vm_entrypoint.sh
k8s_vm:
type: OS::Nova::Server
@@ -64,177 +47,5 @@ resources:
__docker_proxy__: { get_param: docker_proxy }
__apt_proxy__: { get_param: apt_proxy }
__rancher_ip_addr__: { get_attr: [rancher_vm, first_address] }
- template: |
- #!/bin/bash -x
- printenv
-
- mkdir -p /opt/config
- echo "__rancher_ip_addr__" > /opt/config/rancher_ip_addr.txt
- echo `hostname -I` `hostname` >> /etc/hosts
- mkdir -p /etc/docker
- cat > /etc/docker/daemon.json <<EOF
- {
- "insecure-registries" : ["__docker_proxy__"]
- }
- EOF
- cat > /etc/apt/apt.conf.d/30proxy<<EOF
- Acquire::http { Proxy "http://__apt_proxy__"; };
- Acquire::https::Proxy "DIRECT";
- EOF
- apt-get -y update
- apt-get -y install jq
-
- cd ~
-
- # install docker 1.12
- curl -s https://releases.rancher.com/install-docker/1.12.sh | sh
- usermod -aG docker ubuntu
-
- # install kubernetes 1.8.6
- curl -s -LO https://storage.googleapis.com/kubernetes-release/release/v1.8.6/bin/linux/amd64/kubectl
- chmod +x ./kubectl
- sudo mv ./kubectl /usr/local/bin/kubectl
- mkdir ~/.kube
-
- # install helm 2.3
- wget -q http://storage.googleapis.com/kubernetes-helm/helm-v2.3.0-linux-amd64.tar.gz
- tar -zxvf helm-v2.3.0-linux-amd64.tar.gz
- sudo mv linux-amd64/helm /usr/local/bin/helm
-
- # Fix virtual memory allocation for onap-log:elasticsearch:
- echo "vm.max_map_count=262144" >> /etc/sysctl.conf
- sysctl -p
-
- # install rancher agent
- echo export RANCHER_IP=__rancher_ip_addr__ > api-keys-rc
- source api-keys-rc
-
- sleep 50
- until curl -s -o projects.json -H "Accept: application/json" http://$RANCHER_IP:8080/v2-beta/projects; do
- sleep 10
- done
- OLD_PID=$(jq -r '.data[0].id' projects.json)
-
- curl -s -H "Accept: application/json" -H "Content-Type: application/json" -d '{"accountId":"1a1"}' http://$RANCHER_IP:8080/v2-beta/apikeys | tee apikeys.json
- echo export RANCHER_ACCESS_KEY=`jq -r '.publicValue' apikeys.json` >> api-keys-rc
- echo export RANCHER_SECRET_KEY=`jq -r '.secretValue' apikeys.json` >> api-keys-rc
- source api-keys-rc
-
- curl -u "${RANCHER_ACCESS_KEY}:${RANCHER_SECRET_KEY}" -X DELETE -H 'Content-Type: application/json' "http://$RANCHER_IP:8080/v2-beta/projects/$OLD_PID"
-
- until [ ! -z "$TEMPLATE_ID" ] && [ "$TEMPLATE_ID" != "null" ]; do
- sleep 5
- curl -s -H "Accept: application/json" http://$RANCHER_IP:8080/v2-beta/projectTemplates?name=Kubernetes | tee projectTemplatesKubernetes.json
- TEMPLATE_ID=$(jq -r '.data[0].id' projectTemplatesKubernetes.json)
- done
-
- curl -u "${RANCHER_ACCESS_KEY}:${RANCHER_SECRET_KEY}" -X POST -H 'Content-Type: application/json' -d '{ "name":"oom", "projectTemplateId":"'$TEMPLATE_ID'" }' "http://$RANCHER_IP:8080/v2-beta/projects" | tee project.json
- PID=`jq -r '.id' project.json`
- echo export RANCHER_URL=http://$RANCHER_IP:8080/v1/projects/$PID >> api-keys-rc
- source api-keys-rc
-
- until [ $(jq -r '.state' project.json) == "active" ]; do
- sleep 5
- curl -s -H "Accept: application/json" http://$RANCHER_IP:8080/v1/projects/$PID | tee project.json
- done
-
- TID=$(curl -s -X POST -H "Accept: application/json" -H "Content-Type: application/json" http://$RANCHER_IP:8080/v1/projects/$PID/registrationTokens | jq -r '.id')
- touch token.json
- while [ $(jq -r .command token.json | wc -c) -lt 10 ]; do
- sleep 5
- curl -s -X GET -H "Accept: application/json" http://$RANCHER_IP:8080/v1/projects/$PID/registrationToken/$TID | tee token.json
- done
- CMD=$(jq -r .command token.json)
- eval $CMD
-
- # download rancher CLI
- wget -q https://github.com/rancher/cli/releases/download/v0.6.7/rancher-linux-amd64-v0.6.7.tar.xz
- unxz rancher-linux-amd64-v0.6.7.tar.xz
- tar xvf rancher-linux-amd64-v0.6.7.tar
-
- # Clone OOM:
- cd ~
- git clone -b amsterdam http://gerrit.onap.org/r/oom
-
- # Update values.yaml to point to docker-proxy instead of nexus3:
- cd ~/oom/kubernetes
- perl -p -i -e 's/nexus3.onap.org:10001/__docker_proxy__/g' `find ./ -name values.yaml`
-
- KUBETOKEN=$(echo -n 'Basic '$(echo -n "$RANCHER_ACCESS_KEY:$RANCHER_SECRET_KEY" | base64 -w 0) | base64 -w 0)
-
- # create .kube/config
- cat > ~/.kube/config <<EOF
- apiVersion: v1
- kind: Config
- clusters:
- - cluster:
- api-version: v1
- insecure-skip-tls-verify: true
- server: "https://$RANCHER_IP:8080/r/projects/$PID/kubernetes:6443"
- name: "oom"
- contexts:
- - context:
- cluster: "oom"
- user: "oom"
- name: "oom"
- current-context: "oom"
- users:
- - name: "oom"
- user:
- token: "$KUBETOKEN"
- EOF
- cat ~/.kube/config
-
- # Update ~/oom/kubernetes/kube2msb/values.yaml kubeMasterAuthToken to use the token from ~/.kube/config
- sed -i "s/kubeMasterAuthToken:.*/kubeMasterAuthToken: $KUBETOKEN/" ~/oom/kubernetes/kube2msb/values.yaml
-
- export KUBECONFIG=/root/.kube/config
- kubectl config view
-
- # wait for kubernetes to initialze
- sleep 100
- until [ $(kubectl get pods --all-namespaces | tail -n +2 | grep -c Running) -ge 6 ]; do
- sleep 10
- done
-
- # Put your onap_key ssh private key in ~/.ssh/onap_key
-
- # Create or edit ~/oom/kubernetes/config/onap-parameters.yaml
- cp ~/oom/kubernetes/config/onap-parameters-sample.yaml ~/oom/kubernetes/config/onap-parameters.yaml
- cat >> ~/oom/kubernetes/config/onap-parameters.yaml <<EOF
- OPENSTACK_UBUNTU_14_IMAGE: "trusty"
- OPENSTACK_PUBLIC_NET_ID: "024582bd-ef9b-48b9-9e70-e6732559d9df"
- OPENSTACK_OAM_NETWORK_ID: "a899f36c-28e1-4aa9-9451-1b9f41feefa5"
- OPENSTACK_OAM_SUBNET_ID: "b9627602-2908-4aee-94b5-4f1dc92017df"
- OPENSTACK_OAM_NETWORK_CIDR: "172.16.1.0/24"
- OPENSTACK_USERNAME: "demo"
- OPENSTACK_API_KEY: "demo"
- OPENSTACK_TENANT_NAME: "demo"
- OPENSTACK_TENANT_ID: "__public_net_id__"
- OPENSTACK_REGION: "RegionOne"
- OPENSTACK_KEYSTONE_URL: "http://192.168.1.11:5000"
- OPENSTACK_FLAVOUR_MEDIUM: "m1.medium"
- OPENSTACK_SERVICE_TENANT_NAME: "service"
- DMAAP_TOPIC: "AUTO"
- DEMO_ARTIFACTS_VERSION: "1.1.0-SNAPSHOT"
- EOF
-
- # Source the environment file:
- cd ~/oom/kubernetes/oneclick/
- source setenv.bash
-
- # run the config pod creation
- cd ~/oom/kubernetes/config
- ./createConfig.sh -n onap
-
- # Wait until the config container completes.
- sleep 200
- until [ $(kubectl get pods --namespace onap -a | tail -n +2 | grep -c Completed) -eq 1 ]; do
- sleep 10
- done
-
- # Run ONAP:
- cd ~/oom/kubernetes/oneclick/
- ./createAll.bash -n onap
-
- # Check ONAP status:
+ template:
+ get_file: k8s_vm_entrypoint.sh
diff --git a/test/ete/labs/huawei/rancher_vm_entrypoint.sh b/test/ete/labs/huawei/rancher_vm_entrypoint.sh
new file mode 100644
index 000000000..da1955446
--- /dev/null
+++ b/test/ete/labs/huawei/rancher_vm_entrypoint.sh
@@ -0,0 +1,18 @@
+#!/bin/bash -x
+printenv
+
+echo `hostname -I` `hostname` >> /etc/hosts
+mkdir -p /etc/docker
+cat > /etc/docker/daemon.json <<EOF
+{
+ "insecure-registries" : ["__docker_proxy__"]
+}
+EOF
+cat > /etc/apt/apt.conf.d/30proxy<<EOF
+Acquire::http { Proxy "http://__apt_proxy__"; };
+Acquire::https::Proxy "DIRECT";
+EOF
+apt-get -y update
+apt-get -y install docker.io
+usermod -aG docker ubuntu
+docker run --restart unless-stopped -d -p 8080:8080 rancher/server:v1.6.10