diff options
Diffstat (limited to 'vagrant/tests')
-rw-r--r-- | vagrant/tests/generic_simulator/Dockerfile | 27 | ||||
-rw-r--r-- | vagrant/tests/generic_simulator/aai/responses.yml | 189 | ||||
-rw-r--r-- | vagrant/tests/generic_simulator/generic_sim.py | 109 | ||||
-rw-r--r-- | vagrant/tests/generic_simulator/requirements.txt | 11 | ||||
-rwxr-xr-x | vagrant/tests/integration_cFW.sh | 194 | ||||
-rwxr-xr-x | vagrant/tests/integration_vFW.sh | 295 | ||||
-rwxr-xr-x | vagrant/tests/multus.sh | 123 | ||||
-rwxr-xr-x | vagrant/tests/nfd.sh | 62 | ||||
-rwxr-xr-x | vagrant/tests/ovn-kubernetes.sh | 136 | ||||
-rwxr-xr-x | vagrant/tests/plugin.sh | 97 | ||||
-rwxr-xr-x | vagrant/tests/virtlet.sh | 145 |
11 files changed, 1388 insertions, 0 deletions
diff --git a/vagrant/tests/generic_simulator/Dockerfile b/vagrant/tests/generic_simulator/Dockerfile new file mode 100644 index 00000000..202cafc6 --- /dev/null +++ b/vagrant/tests/generic_simulator/Dockerfile @@ -0,0 +1,27 @@ +# SPDX-license-identifier: Apache-2.0 +############################################################################## +# Copyright (c) 2018 +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## + +FROM python:2.7 + +ARG HTTP_PROXY=${HTTP_PROXY} +ARG HTTPS_PROXY=${HTTPS_PROXY} + +ENV http_proxy $HTTP_PROXY +ENV https_proxy $HTTPS_PROXY + +EXPOSE 8080 + +RUN mkdir -p /{tmp,etc}/generic_sim + +WORKDIR /opt/generic_sim/ + +COPY . . +RUN pip install --no-cache-dir -r requirements.txt + +CMD [ "python", "generic_sim.py" ] diff --git a/vagrant/tests/generic_simulator/aai/responses.yml b/vagrant/tests/generic_simulator/aai/responses.yml new file mode 100644 index 00000000..f6d5fcd0 --- /dev/null +++ b/vagrant/tests/generic_simulator/aai/responses.yml @@ -0,0 +1,189 @@ +aai/v13/cloud-infrastructure/cloud-regions/cloud-region/CloudOwner/RegionOne: + GET: + body: '{"cloud-owner":"CloudOwner","cloud-region-id":"RegionOne","cloud-type":"openstack","owner-defined-type":"t1","cloud-region-version":"RegionOne","identity-url":"http://keystone:8080/v3","cloud-zone":"z1","complex-name":"clli1","sriov-automation":false,"cloud-extra-info":"","resource-version":"1524845154715"}' + content_type: application/json + status_code: 200 + PUT: + body: '' + content_type: application/json + status_code: 200 +aai/v13/cloud-infrastructure/cloud-regions/cloud-region/CloudOwner/RegionOne/availability-zones/availability-zone/internal: + GET: + body: '{"requestError":{"serviceException":{"messageId":"SVC3001","text":"Resource + not found for %1 using id %2 (msg=%3) (ec=%4)","variables":["GET","cloud-infrastructure/cloud-regions/cloud-region/CloudOwner/RegionOne/availability-zones/availability-zone/internal","Node + Not Found:No Node of type availability-zone found at: cloud-infrastructure/cloud-regions/cloud-region/CloudOwner/RegionOne/availability-zones/availability-zone/internal","ERR.5.4.6114"]}}}' + content_type: application/json + status_code: 200 + PUT: + body: '' + content_type: application/json + status_code: 200 +aai/v13/cloud-infrastructure/cloud-regions/cloud-region/CloudOwner/RegionOne/availability-zones/availability-zone/nova: + GET: + body: '{"requestError":{"serviceException":{"messageId":"SVC3001","text":"Resource + not found for %1 using id %2 (msg=%3) (ec=%4)","variables":["GET","cloud-infrastructure/cloud-regions/cloud-region/CloudOwner/RegionOne/availability-zones/availability-zone/nova","Node + Not Found:No Node of type availability-zone found at: cloud-infrastructure/cloud-regions/cloud-region/CloudOwner/RegionOne/availability-zones/availability-zone/nova","ERR.5.4.6114"]}}}' + content_type: application/json + status_code: 200 + PUT: + body: '' + content_type: application/json + status_code: 200 +aai/v13/cloud-infrastructure/cloud-regions/cloud-region/CloudOwner/RegionOne/flavors/flavor/100: + GET: + body: '{"requestError":{"serviceException":{"messageId":"SVC3001","text":"Resource + not found for %1 using id %2 (msg=%3) (ec=%4)","variables":["GET","cloud-infrastructure/cloud-regions/cloud-region/CloudOwner/RegionOne/flavors/flavor/100","Node + Not Found:No Node of type flavor found at: cloud-infrastructure/cloud-regions/cloud-region/CloudOwner/RegionOne/flavors/flavor/100","ERR.5.4.6114"]}}}' + content_type: application/json + status_code: 200 + PUT: + body: '' + content_type: application/json + status_code: 200 +aai/v13/cloud-infrastructure/cloud-regions/cloud-region/CloudOwner/RegionOne/flavors/flavor/110: + GET: + body: '{"requestError":{"serviceException":{"messageId":"SVC3001","text":"Resource + not found for %1 using id %2 (msg=%3) (ec=%4)","variables":["GET","cloud-infrastructure/cloud-regions/cloud-region/CloudOwner/RegionOne/flavors/flavor/110","Node + Not Found:No Node of type flavor found at: cloud-infrastructure/cloud-regions/cloud-region/CloudOwner/RegionOne/flavors/flavor/110","ERR.5.4.6114"]}}}' + content_type: application/json + status_code: 200 + PUT: + body: '' + content_type: application/json + status_code: 200 +aai/v13/cloud-infrastructure/cloud-regions/cloud-region/CloudOwner/RegionOne/flavors/flavor/111: + GET: + body: '{"requestError":{"serviceException":{"messageId":"SVC3001","text":"Resource + not found for %1 using id %2 (msg=%3) (ec=%4)","variables":["GET","cloud-infrastructure/cloud-regions/cloud-region/CloudOwner/RegionOne/flavors/flavor/111","Node + Not Found:No Node of type flavor found at: cloud-infrastructure/cloud-regions/cloud-region/CloudOwner/RegionOne/flavors/flavor/111","ERR.5.4.6114"]}}}' + content_type: application/json + status_code: 200 + PUT: + body: '' + content_type: application/json + status_code: 200 +aai/v13/cloud-infrastructure/cloud-regions/cloud-region/CloudOwner/RegionOne/flavors/flavor/112: + GET: + body: '{"requestError":{"serviceException":{"messageId":"SVC3001","text":"Resource + not found for %1 using id %2 (msg=%3) (ec=%4)","variables":["GET","cloud-infrastructure/cloud-regions/cloud-region/CloudOwner/RegionOne/flavors/flavor/112","Node + Not Found:No Node of type flavor found at: cloud-infrastructure/cloud-regions/cloud-region/CloudOwner/RegionOne/flavors/flavor/112","ERR.5.4.6114"]}}}' + content_type: application/json + status_code: 200 + PUT: + body: '' + content_type: application/json + status_code: 200 +aai/v13/cloud-infrastructure/cloud-regions/cloud-region/CloudOwner/RegionOne/flavors/flavor/113: + GET: + body: '{"requestError":{"serviceException":{"messageId":"SVC3001","text":"Resource + not found for %1 using id %2 (msg=%3) (ec=%4)","variables":["GET","cloud-infrastructure/cloud-regions/cloud-region/CloudOwner/RegionOne/flavors/flavor/113","Node + Not Found:No Node of type flavor found at: cloud-infrastructure/cloud-regions/cloud-region/CloudOwner/RegionOne/flavors/flavor/113","ERR.5.4.6114"]}}}' + content_type: application/json + status_code: 200 + PUT: + body: '' + content_type: application/json + status_code: 200 +aai/v13/cloud-infrastructure/cloud-regions/cloud-region/CloudOwner/RegionOne/flavors/flavor/114: + GET: + body: '{"requestError":{"serviceException":{"messageId":"SVC3001","text":"Resource + not found for %1 using id %2 (msg=%3) (ec=%4)","variables":["GET","cloud-infrastructure/cloud-regions/cloud-region/CloudOwner/RegionOne/flavors/flavor/114","Node + Not Found:No Node of type flavor found at: cloud-infrastructure/cloud-regions/cloud-region/CloudOwner/RegionOne/flavors/flavor/114","ERR.5.4.6114"]}}}' + content_type: application/json + status_code: 200 + PUT: + body: '' + content_type: application/json + status_code: 200 +aai/v13/cloud-infrastructure/cloud-regions/cloud-region/CloudOwner/RegionOne/flavors/flavor/115: + GET: + body: '{"requestError":{"serviceException":{"messageId":"SVC3001","text":"Resource + not found for %1 using id %2 (msg=%3) (ec=%4)","variables":["GET","cloud-infrastructure/cloud-regions/cloud-region/CloudOwner/RegionOne/flavors/flavor/115","Node + Not Found:No Node of type flavor found at: cloud-infrastructure/cloud-regions/cloud-region/CloudOwner/RegionOne/flavors/flavor/115","ERR.5.4.6114"]}}}' + content_type: application/json + status_code: 200 + PUT: + body: '' + content_type: application/json + status_code: 200 +aai/v13/cloud-infrastructure/cloud-regions/cloud-region/CloudOwner/RegionOne/flavors/flavor/116: + GET: + body: '{"requestError":{"serviceException":{"messageId":"SVC3001","text":"Resource + not found for %1 using id %2 (msg=%3) (ec=%4)","variables":["GET","cloud-infrastructure/cloud-regions/cloud-region/CloudOwner/RegionOne/flavors/flavor/116","Node + Not Found:No Node of type flavor found at: cloud-infrastructure/cloud-regions/cloud-region/CloudOwner/RegionOne/flavors/flavor/116","ERR.5.4.6114"]}}}' + content_type: application/json + status_code: 200 + PUT: + body: '' + content_type: application/json + status_code: 200 +aai/v13/cloud-infrastructure/cloud-regions/cloud-region/CloudOwner/RegionOne/flavors/flavor/117: + GET: + body: '{"requestError":{"serviceException":{"messageId":"SVC3001","text":"Resource + not found for %1 using id %2 (msg=%3) (ec=%4)","variables":["GET","cloud-infrastructure/cloud-regions/cloud-region/CloudOwner/RegionOne/flavors/flavor/117","Node + Not Found:No Node of type flavor found at: cloud-infrastructure/cloud-regions/cloud-region/CloudOwner/RegionOne/flavors/flavor/117","ERR.5.4.6114"]}}}' + content_type: application/json + status_code: 200 + PUT: + body: '' + content_type: application/json + status_code: 200 +aai/v13/cloud-infrastructure/cloud-regions/cloud-region/CloudOwner/RegionOne/images/image/660709df-e90b-471f-ac57-d8c2555e573d: + GET: + body: '{"requestError":{"serviceException":{"messageId":"SVC3001","text":"Resource + not found for %1 using id %2 (msg=%3) (ec=%4)","variables":["GET","cloud-infrastructure/cloud-regions/cloud-region/CloudOwner/RegionOne/images/image/660709df-e90b-471f-ac57-d8c2555e573d","Node + Not Found:No Node of type image found at: cloud-infrastructure/cloud-regions/cloud-region/CloudOwner/RegionOne/images/image/660709df-e90b-471f-ac57-d8c2555e573d","ERR.5.4.6114"]}}}' + content_type: application/json + status_code: 200 + PUT: + body: '' + content_type: application/json + status_code: 200 +aai/v13/cloud-infrastructure/cloud-regions/cloud-region/CloudOwner/RegionOne/tenants/tenant/3543226ffed44daf90a2f71f36c00b8d: + GET: + body: '{"requestError":{"serviceException":{"messageId":"SVC3001","text":"Resource + not found for %1 using id %2 (msg=%3) (ec=%4)","variables":["GET","cloud-infrastructure/cloud-regions/cloud-region/CloudOwner/RegionOne/tenants/tenant/3543226ffed44daf90a2f71f36c00b8d","Node + Not Found:No Node of type tenant found at: cloud-infrastructure/cloud-regions/cloud-region/CloudOwner/RegionOne/tenants/tenant/3543226ffed44daf90a2f71f36c00b8d","ERR.5.4.6114"]}}}' + content_type: application/json + status_code: 200 + PUT: + body: '' + content_type: application/json + status_code: 200 +aai/v13/cloud-infrastructure/cloud-regions/cloud-region/CloudOwner/RegionOne/tenants/tenant/b8f5d85bbcd84af28d7caa62d39f05c7: + GET: + body: '{"requestError":{"serviceException":{"messageId":"SVC3001","text":"Resource + not found for %1 using id %2 (msg=%3) (ec=%4)","variables":["GET","cloud-infrastructure/cloud-regions/cloud-region/CloudOwner/RegionOne/tenants/tenant/b8f5d85bbcd84af28d7caa62d39f05c7","Node + Not Found:No Node of type tenant found at: cloud-infrastructure/cloud-regions/cloud-region/CloudOwner/RegionOne/tenants/tenant/b8f5d85bbcd84af28d7caa62d39f05c7","ERR.5.4.6114"]}}}' + content_type: application/json + status_code: 200 + PUT: + body: '' + content_type: application/json + status_code: 200 +aai/v13/cloud-infrastructure/cloud-regions/cloud-region/CloudOwner/RegionOne: + GET: + body: '{"cloud-owner":"CloudOwner","cloud-region-id":"RegionOne","cloud-type":"openstack","owner-defined-type":"t1","cloud-region-version":"RegionOne","identity-url":"http://multicloud-ocata:80/api/multicloud-titanium_cloud/v0/CloudOwner_RegionOne/identity/v2.0","cloud-zone":"z1","complex-name":"clli1","sriov-automation":false,"cloud-extra-info":"","resource-version":"1524845276291"}' + content_type: application/json + status_code: 200 +aai/v13/cloud-infrastructure/cloud-regions/cloud-region/CloudOwner/RegionOne/esr-system-info-list: + GET: + body: '{"esr-system-info":[{"esr-system-info-id":"4ce895ad-82f7-4476-b5eb-d19d19585da2","service-url":"http://keystone:8080/v3","user-name":"admin","password":"secret","system-type":"VIM","ssl-insecure":true,"cloud-domain":"Default","default-tenant":"admin","resource-version":"1524845155617"}]}' + content_type: application/json + status_code: 200 +aai/v13/cloud-infrastructure/pservers/pserver/compute-0: + GET: + body: '{"requestError":{"serviceException":{"messageId":"SVC3001","text":"Resource + not found for %1 using id %2 (msg=%3) (ec=%4)","variables":["GET","cloud-infrastructure/pservers/pserver/compute-0","Node + Not Found:No Node of type pserver found at: cloud-infrastructure/pservers/pserver/compute-0","ERR.5.4.6114"]}}}' + content_type: application/json + status_code: 200 + PUT: + body: '' + content_type: application/json + status_code: 200 +aai/v13/cloud-infrastructure/pservers/pserver/compute-0/relationship-list/relationship: + PUT: + body: '' + content_type: application/json + status_code: 200 diff --git a/vagrant/tests/generic_simulator/generic_sim.py b/vagrant/tests/generic_simulator/generic_sim.py new file mode 100644 index 00000000..4392b652 --- /dev/null +++ b/vagrant/tests/generic_simulator/generic_sim.py @@ -0,0 +1,109 @@ +# Copyright 2018 Intel Corporation, Inc +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json +import logging + +import web +from web import webapi +import yaml + +urls = ( + '/(.*)','MockController' +) + +def setup_logger(name, log_file, level=logging.DEBUG): + print("Configuring the logger...") + handler = logging.FileHandler(log_file) + formatter = logging.Formatter('%(message)s') + handler.setFormatter(formatter) + + logger = logging.getLogger(name) + logger.setLevel(level) + logger.addHandler(handler) + + return logger + + +class MockResponse: + def __init__(self, http_verb, status_code, + content_type="application/json", body="{}", + headers={}): + self.http_verb = http_verb.lower() + self.status_code = status_code + self.content_type = content_type + self.body = body + self.headers = headers + +def _parse_responses(parsed_responses): + result = {} + for path, responses in parsed_responses.iteritems(): + new_path = path + if path.startswith("/"): + new_path = path[1:] + + result[new_path] = [] + for http_verb, response in responses.iteritems(): + result[new_path].append(MockResponse(http_verb, **response)) + return result + +def load_responses(filename): + print("Loading responses from configuration file..") + with open(filename) as yaml_file: + responses_file = yaml.safe_load(yaml_file) + responses_map = _parse_responses(responses_file) + return responses_map + + +class MockController: + + def _do_action(self, action): + logger.info('{}'.format(web.ctx.env.get('wsgi.input').read())) + action = action.lower() + url = web.ctx['fullpath'] + try: + if url.startswith("/"): + url = url[1:] + response = [ r for r in responses_map[url] if r.http_verb == action][0] + for header, value in response.headers.iteritems(): + web.header(header, value) + web.header('Content-Type', response.content_type) + print(response.body) + return response.body + except: + webapi.NotFound() + + def DELETE(self, url): + return self._do_action("delete") + + def HEAD(self, url): + return self._do_action("head") + + def PUT(self, url): + return self._do_action("put") + + def GET(self, url): + return self._do_action("get") + + def POST(self, url): + return self._do_action("post") + + def PATCH(self, url): + return self._do_action("patch") + + +logger = setup_logger('mock_controller', '/tmp/generic_sim/output.log') +responses_map = load_responses('/etc/generic_sim/responses.yml') +app = web.application(urls, globals()) +if __name__ == "__main__": + app.run() diff --git a/vagrant/tests/generic_simulator/requirements.txt b/vagrant/tests/generic_simulator/requirements.txt new file mode 100644 index 00000000..a0b6aae2 --- /dev/null +++ b/vagrant/tests/generic_simulator/requirements.txt @@ -0,0 +1,11 @@ +# SPDX-license-identifier: Apache-2.0 +############################################################################## +# Copyright (c) 2018 +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## + +PyYAML +web.py diff --git a/vagrant/tests/integration_cFW.sh b/vagrant/tests/integration_cFW.sh new file mode 100755 index 00000000..e4b305f4 --- /dev/null +++ b/vagrant/tests/integration_cFW.sh @@ -0,0 +1,194 @@ +#!/bin/bash +# SPDX-license-identifier: Apache-2.0 +############################################################################## +# Copyright (c) 2018 +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## + +set -o errexit +set -o nounset +set -o pipefail + +rm -f $HOME/*.yaml +packetgen_deployment_name=packetgen +sink_deployment_name=sink +firewall_deployment_name=firewall + +cat << NET > $HOME/unprotected-private-net-cidr-network.yaml +apiVersion: "kubernetes.cni.cncf.io/v1" +kind: Network +metadata: + name: unprotected-private-net-cidr +spec: + config: '{ + "name": "unprotected", + "type": "bridge", + "ipam": { + "type": "host-local", + "subnet": "192.168.10.0/24" + } +}' +NET + +cat << NET > $HOME/protected-private-net-cidr-network.yaml +apiVersion: "kubernetes.cni.cncf.io/v1" +kind: Network +metadata: + name: protected-private-net-cidr +spec: + config: '{ + "name": "protected", + "type": "bridge", + "ipam": { + "type": "host-local", + "subnet": "192.168.20.0/24" + } +}' +NET + +cat << NET > $HOME/onap-private-net-cidr-network.yaml +apiVersion: "kubernetes.cni.cncf.io/v1" +kind: Network +metadata: + name: onap-private-net-cidr +spec: + config: '{ + "name": "onap", + "type": "bridge", + "ipam": { + "type": "host-local", + "subnet": "10.10.0.0/16" + } +}' +NET + +cat << DEPLOYMENT > $HOME/$packetgen_deployment_name.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: $packetgen_deployment_name + labels: + app: vFirewall +spec: + replicas: 1 + selector: + matchLabels: + app: vFirewall + template: + metadata: + labels: + app: vFirewall + annotations: + kubernetes.v1.cni.cncf.io/networks: '[ + { "name": "unprotected-private-net-cidr", "interfaceRequest": "eth1" }, + { "name": "onap-private-net-cidr", "interfaceRequest": "eth2" } + ]' + spec: + containers: + - name: $packetgen_deployment_name + image: electrocucaracha/packetgen + imagePullPolicy: IfNotPresent + tty: true + stdin: true + resources: + limits: + memory: 256Mi +DEPLOYMENT + +cat << DEPLOYMENT > $HOME/$firewall_deployment_name.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: $firewall_deployment_name + labels: + app: vFirewall +spec: + replicas: 1 + selector: + matchLabels: + app: vFirewall + template: + metadata: + labels: + app: vFirewall + annotations: + kubernetes.v1.cni.cncf.io/networks: '[ + { "name": "unprotected-private-net-cidr", "interfaceRequest": "eth1" }, + { "name": "protected-private-net-cidr", "interfaceRequest": "eth2" }, + { "name": "onap-private-net-cidr", "interfaceRequest": "eth3" } + ]' + spec: + containers: + - name: $firewall_deployment_name + image: electrocucaracha/firewall + imagePullPolicy: IfNotPresent + tty: true + stdin: true + resources: + limits: + memory: 160Mi +DEPLOYMENT + +cat << DEPLOYMENT > $HOME/$sink_deployment_name.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: $sink_deployment_name + labels: + app: vFirewall +spec: + replicas: 1 + selector: + matchLabels: + app: vFirewall + template: + metadata: + labels: + app: vFirewall + annotations: + kubernetes.v1.cni.cncf.io/networks: '[ + { "name": "protected-private-net-cidr", "interfaceRequest": "eth1" }, + { "name": "onap-private-net-cidr", "interfaceRequest": "eth2" } + ]' + spec: + containers: + - name: $sink_deployment_name + image: electrocucaracha/sink + imagePullPolicy: IfNotPresent + tty: true + stdin: true + resources: + limits: + memory: 160Mi +DEPLOYMENT + +if $(kubectl version &>/dev/null); then + kubectl apply -f $HOME/unprotected-private-net-cidr-network.yaml + kubectl apply -f $HOME/protected-private-net-cidr-network.yaml + kubectl apply -f $HOME/onap-private-net-cidr-network.yaml + + for deployment_name in $packetgen_deployment_name $firewall_deployment_name $sink_deployment_name; do + kubectl delete deployment $deployment_name --ignore-not-found=true --now + while kubectl get deployment $deployment_name &>/dev/null; do + sleep 5 + done + kubectl create -f $HOME/$deployment_name.yaml + done + + for deployment_name in $packetgen_deployment_name $firewall_deployment_name $sink_deployment_name; do + status_phase="" + while [[ $status_phase != "Running" ]]; do + new_phase=$(kubectl get pods | grep $deployment_name | awk '{print $3}') + if [[ $new_phase != $status_phase ]]; then + echo "$(date +%H:%M:%S) - $deployment_name : $new_phase" + status_phase=$new_phase + fi + if [[ $new_phase == "Err"* ]]; then + exit 1 + fi + done + done +fi diff --git a/vagrant/tests/integration_vFW.sh b/vagrant/tests/integration_vFW.sh new file mode 100755 index 00000000..fa48d7c5 --- /dev/null +++ b/vagrant/tests/integration_vFW.sh @@ -0,0 +1,295 @@ +#!/bin/bash +# SPDX-license-identifier: Apache-2.0 +############################################################################## +# Copyright (c) 2018 +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## + +set -o errexit +set -o nounset +set -o pipefail + +rm -f $HOME/*.yaml +packetgen_deployment_name=packetgen +sink_deployment_name=sink +firewall_deployment_name=firewall +image_name=virtlet.cloud/ubuntu/16.04 + +if [[ ! -f $HOME/.ssh/id_rsa.pub ]]; then + echo -e "\n\n\n" | ssh-keygen -t rsa -N "" +fi +ssh_key=$(cat $HOME/.ssh/id_rsa.pub) + +cat << NET > $HOME/unprotected-private-net-cidr-network.yaml +apiVersion: "kubernetes.cni.cncf.io/v1" +kind: Network +metadata: + name: unprotected-private-net-cidr +spec: + config: '{ + "name": "unprotected", + "type": "bridge", + "ipam": { + "type": "host-local", + "subnet": "192.168.10.0/24" + } +}' +NET + +cat << NET > $HOME/protected-private-net-cidr-network.yaml +apiVersion: "kubernetes.cni.cncf.io/v1" +kind: Network +metadata: + name: protected-private-net-cidr +spec: + config: '{ + "name": "protected", + "type": "bridge", + "ipam": { + "type": "host-local", + "subnet": "192.168.20.0/24" + } +}' +NET + +cat << NET > $HOME/onap-private-net-cidr-network.yaml +apiVersion: "kubernetes.cni.cncf.io/v1" +kind: Network +metadata: + name: onap-private-net-cidr +spec: + config: '{ + "name": "onap", + "type": "bridge", + "ipam": { + "type": "host-local", + "subnet": "10.10.0.0/16" + } +}' +NET + +proxy="#!/bin/bash" +if [[ -n "${http_proxy+x}" ]]; then + proxy+=" + export http_proxy=$http_proxy + echo \"Acquire::http::Proxy \\\"$http_proxy\\\";\" | sudo tee --append /etc/apt/apt.conf.d/01proxy +" +fi +if [[ -n "${https_proxy+x}" ]]; then + proxy+=" + export https_proxy=$https_proxy + echo \"Acquire::https::Proxy \\\"$https_proxy\\\";\" | sudo tee --append /etc/apt/apt.conf.d/01proxy +" +fi +if [[ -n "${no_proxy+x}" ]]; then + proxy+=" + export no_proxy=$no_proxy" +fi + +cat << DEPLOYMENT > $HOME/$packetgen_deployment_name.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: $packetgen_deployment_name + labels: + app: vFirewall +spec: + replicas: 1 + selector: + matchLabels: + app: vFirewall + template: + metadata: + labels: + app: vFirewall + annotations: + VirtletCloudInitUserData: | + users: + - default + - name: admin + sudo: ALL=(ALL) NOPASSWD:ALL + plain_text_passwd: secret + groups: sudo + ssh_authorized_keys: + - $ssh_key + VirtletCloudInitUserDataScript: | + $proxy + + wget -O - https://raw.githubusercontent.com/electrocucaracha/vFW-demo/master/$packetgen_deployment_name | sudo -E bash + kubernetes.v1.cni.cncf.io/networks: '[ + { "name": "unprotected-private-net-cidr", "interfaceRequest": "eth1" }, + { "name": "onap-private-net-cidr", "interfaceRequest": "eth2" } + ]' + kubernetes.io/target-runtime: virtlet.cloud + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: extraRuntime + operator: In + values: + - virtlet + containers: + - name: $packetgen_deployment_name + image: $image_name + imagePullPolicy: IfNotPresent + tty: true + stdin: true + resources: + limits: + memory: 256Mi +DEPLOYMENT + +cat << DEPLOYMENT > $HOME/$firewall_deployment_name.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: $firewall_deployment_name + labels: + app: vFirewall +spec: + replicas: 1 + selector: + matchLabels: + app: vFirewall + template: + metadata: + labels: + app: vFirewall + annotations: + VirtletCloudInitUserData: | + users: + - default + - name: admin + sudo: ALL=(ALL) NOPASSWD:ALL + plain_text_passwd: secret + groups: sudo + ssh_authorized_keys: + - $ssh_key + VirtletCloudInitUserDataScript: | + $proxy + + wget -O - https://raw.githubusercontent.com/electrocucaracha/vFW-demo/master/$firewall_deployment_name | sudo -E bash + kubernetes.v1.cni.cncf.io/networks: '[ + { "name": "unprotected-private-net-cidr", "interfaceRequest": "eth1" }, + { "name": "protected-private-net-cidr", "interfaceRequest": "eth2" }, + { "name": "onap-private-net-cidr", "interfaceRequest": "eth3" } + ]' + kubernetes.io/target-runtime: virtlet.cloud + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: extraRuntime + operator: In + values: + - virtlet + containers: + - name: $firewall_deployment_name + image: $image_name + imagePullPolicy: IfNotPresent + tty: true + stdin: true + resources: + limits: + memory: 160Mi +DEPLOYMENT + +cat << DEPLOYMENT > $HOME/$sink_deployment_name.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: $sink_deployment_name + labels: + app: vFirewall +spec: + replicas: 1 + selector: + matchLabels: + app: vFirewall + template: + metadata: + labels: + app: vFirewall + annotations: + VirtletCloudInitUserData: | + users: + - default + - name: admin + sudo: ALL=(ALL) NOPASSWD:ALL + plain_text_passwd: secret + groups: sudo + ssh_authorized_keys: + - $ssh_key + VirtletCloudInitUserDataScript: | + $proxy + + wget -O - https://raw.githubusercontent.com/electrocucaracha/vFW-demo/master/$sink_deployment_name | sudo -E bash + kubernetes.v1.cni.cncf.io/networks: '[ + { "name": "protected-private-net-cidr", "interfaceRequest": "eth1" }, + { "name": "onap-private-net-cidr", "interfaceRequest": "eth2" } + ]' + kubernetes.io/target-runtime: virtlet.cloud + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: extraRuntime + operator: In + values: + - virtlet + containers: + - name: $sink_deployment_name + image: $image_name + imagePullPolicy: IfNotPresent + tty: true + stdin: true + resources: + limits: + memory: 160Mi +DEPLOYMENT + +if $(kubectl version &>/dev/null); then + kubectl apply -f $HOME/unprotected-private-net-cidr-network.yaml + kubectl apply -f $HOME/protected-private-net-cidr-network.yaml + kubectl apply -f $HOME/onap-private-net-cidr-network.yaml + + for deployment_name in $packetgen_deployment_name $firewall_deployment_name $sink_deployment_name; do + kubectl delete deployment $deployment_name --ignore-not-found=true --now + while kubectl get deployment $deployment_name &>/dev/null; do + sleep 5 + done + kubectl create -f $HOME/$deployment_name.yaml + done + + for deployment_name in $packetgen_deployment_name $firewall_deployment_name $sink_deployment_name; do + status_phase="" + while [[ $status_phase != "Running" ]]; do + new_phase=$(kubectl get pods | grep $deployment_name | awk '{print $3}') + if [[ $new_phase != $status_phase ]]; then + echo "$(date +%H:%M:%S) - $deployment_name : $new_phase" + status_phase=$new_phase + fi + if [[ $new_phase == "Err"* ]]; then + exit 1 + fi + done + done + for deployment_name in $packetgen_deployment_name $firewall_deployment_name $sink_deployment_name; do + pod_name=$(kubectl get pods | grep $deployment_name | awk '{print $1}') + vm=$(kubectl plugin virt virsh list | grep ".*$deployment_name" | awk '{print $2}') + echo "Pod name: $pod_name Virsh domain: $vm" + echo "ssh -i ~/.ssh/id_rsa.pub admin@$(kubectl get pods $pod_name -o jsonpath="{.status.podIP}")" + echo "=== Virtlet details ====" + echo "$(kubectl plugin virt virsh dumpxml $vm | grep VIRTLET_)\n" + done +fi diff --git a/vagrant/tests/multus.sh b/vagrant/tests/multus.sh new file mode 100755 index 00000000..c5f7fc71 --- /dev/null +++ b/vagrant/tests/multus.sh @@ -0,0 +1,123 @@ +#!/bin/bash +# SPDX-license-identifier: Apache-2.0 +############################################################################## +# Copyright (c) 2018 +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## + +set -o errexit +set -o nounset +set -o pipefail + +rm -f $HOME/*.yaml + +pod_name=multus-pod +deployment_name=multus-deployment + +cat << NET > $HOME/bridge-network.yaml +apiVersion: "kubernetes.cni.cncf.io/v1" +kind: Network +metadata: + name: bridge-conf +spec: + config: '{ + "name": "mynet", + "type": "bridge", + "ipam": { + "type": "host-local", + "subnet": "10.10.0.0/16" + } +}' +NET + +cat << POD > $HOME/$pod_name.yaml +apiVersion: v1 +kind: Pod +metadata: + name: $pod_name + annotations: + kubernetes.v1.cni.cncf.io/networks: '[ + { "name": "bridge-conf", "interfaceRequest": "eth1" }, + { "name": "bridge-conf", "interfaceRequest": "eth2" } + ]' +spec: # specification of the pod's contents + containers: + - name: $pod_name + image: "busybox" + command: ["top"] + stdin: true + tty: true +POD + +cat << DEPLOYMENT > $HOME/$deployment_name.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: $deployment_name + labels: + app: multus +spec: + replicas: 1 + selector: + matchLabels: + app: multus + template: + metadata: + labels: + app: multus + annotations: + kubernetes.v1.cni.cncf.io/networks: '[ + { "name": "bridge-conf", "interfaceRequest": "eth1" }, + { "name": "bridge-conf", "interfaceRequest": "eth2" } + ]' + spec: + containers: + - name: $deployment_name + image: "busybox" + command: ["top"] + stdin: true + tty: true +DEPLOYMENT + +if $(kubectl version &>/dev/null); then + kubectl apply -f $HOME/bridge-network.yaml + + kubectl delete pod $pod_name --ignore-not-found=true --now + kubectl delete deployment $deployment_name --ignore-not-found=true --now + while kubectl get pod $pod_name &>/dev/null; do + sleep 5 + done + kubectl create -f $HOME/$pod_name.yaml + while kubectl get deployment $deployment_name &>/dev/null; do + sleep 5 + done + kubectl create -f $HOME/$deployment_name.yaml + sleep 5 + + deployment_pod=$(kubectl get pods | grep $deployment_name | awk '{print $1}') + for pod in $pod_name $deployment_pod; do + status_phase="" + while [[ $status_phase != "Running" ]]; do + new_phase=$(kubectl get pods $pod | awk 'NR==2{print $3}') + if [[ $new_phase != $status_phase ]]; then + echo "$(date +%H:%M:%S) - $pod : $new_phase" + status_phase=$new_phase + fi + if [[ $new_phase == "Err"* ]]; then + exit 1 + fi + done + done + + for pod in $pod_name $deployment_pod; do + echo "===== $pod details =====" + kubectl exec -it $pod -- ip a + multus_nic=$(kubectl exec -it $pod -- ifconfig | grep "eth1") + if [ -z "$multus_nic" ]; then + exit 1 + fi + done +fi diff --git a/vagrant/tests/nfd.sh b/vagrant/tests/nfd.sh new file mode 100755 index 00000000..17548206 --- /dev/null +++ b/vagrant/tests/nfd.sh @@ -0,0 +1,62 @@ +#!/bin/bash +# SPDX-license-identifier: Apache-2.0 +############################################################################## +# Copyright (c) 2018 +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## + +set -o errexit +set -o nounset +set -o pipefail + +rm -f $HOME/*.yaml + +pod_name=nfd-pod + +cat << POD > $HOME/$pod_name.yaml +apiVersion: + v1 +kind: Pod +metadata: + name: $pod_name + labels: + env: test +spec: + containers: + - name: nginx + image: nginx +nodeSelector: + node.alpha.kubernetes-incubator.io/nfd-network-SRIOV: true +POD + +if $(kubectl version &>/dev/null); then + labels=$(kubectl get nodes -o json | jq .items[].metadata.labels) + + echo $labels + if [[ $labels != *"node.alpha.kubernetes-incubator.io"* ]]; then + exit 1 + fi + + kubectl delete pod $pod_name --ignore-not-found=true --now + while kubectl get pod $pod_name &>/dev/null; do + sleep 5 + done + kubectl create -f $HOME/$pod_name.yaml --validate=false + + for pod in $pod_name; do + status_phase="" + while [[ $status_phase != "Running" ]]; do + new_phase=$(kubectl get pods $pod | awk 'NR==2{print $3}') + if [[ $new_phase != $status_phase ]]; then + echo "$(date +%H:%M:%S) - $pod : $new_phase" + status_phase=$new_phase + fi + if [[ $new_phase == "Err"* ]]; then + exit 1 + fi + done + done +fi diff --git a/vagrant/tests/ovn-kubernetes.sh b/vagrant/tests/ovn-kubernetes.sh new file mode 100755 index 00000000..95d216bf --- /dev/null +++ b/vagrant/tests/ovn-kubernetes.sh @@ -0,0 +1,136 @@ +#!/bin/bash +# SPDX-license-identifier: Apache-2.0 +############################################################################## +# Copyright (c) 2018 +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## + +set -o errexit +set -o nounset +set -o pipefail + +apache_pod_name=apachetwin +nginx_pod_name=nginxtwin + +cat << APACHEPOD > $HOME/apache-pod.yaml +apiVersion: v1 +kind: Pod +metadata: + name: $apache_pod_name + labels: + name: webserver +spec: + containers: + - name: apachetwin + image: "busybox" + command: ["top"] + stdin: true + tty: true +APACHEPOD + +cat << NGINXPOD > $HOME/nginx-pod.yaml +apiVersion: v1 +kind: Pod +metadata: + name: $nginx_pod_name + labels: + name: webserver +spec: + containers: + - name: nginxtwin + image: "busybox" + command: ["top"] + stdin: true + tty: true +NGINXPOD + +cat << APACHEEW > $HOME/apache-e-w.yaml +apiVersion: v1 +kind: Service +metadata: + labels: + name: apacheservice + role: service + name: apacheservice +spec: + ports: + - port: 8800 + targetPort: 80 + protocol: TCP + name: tcp + selector: + name: webserver +APACHEEW + +cat << APACHENS > $HOME/apache-n-s.yaml +apiVersion: v1 +kind: Service +metadata: + labels: + name: apacheexternal + role: service + name: apacheexternal +spec: + ports: + - port: 8800 + targetPort: 80 + protocol: TCP + name: tcp + selector: + name: webserver + type: NodePort +APACHENS + +if $(kubectl version &>/dev/null); then + kubectl apply -f $HOME/apache-e-w.yaml + kubectl apply -f $HOME/apache-n-s.yaml + + kubectl delete pod $apache_pod_name --ignore-not-found=true --now + kubectl delete pod $nginx_pod_name --ignore-not-found=true --now + while kubectl get pod $apache_pod_name &>/dev/null; do + sleep 5 + done + while kubectl get pod $nginx_pod_name &>/dev/null; do + sleep 5 + done + kubectl create -f $HOME/apache-pod.yaml + kubectl create -f $HOME/nginx-pod.yaml + + status_phase="" + while [[ $status_phase != "Running" ]]; do + new_phase=$(kubectl get pods $apache_pod_name | awk 'NR==2{print $3}') + if [[ $new_phase != $status_phase ]]; then + echo "$(date +%H:%M:%S) - $new_phase" + status_phase=$new_phase + fi + if [[ $new_phase == "Err"* ]]; then + exit 1 + fi + done + status_phase="" + while [[ $status_phase != "Running" ]]; do + new_phase=$(kubectl get pods $nginx_pod_name | awk 'NR==2{print $3}') + if [[ $new_phase != $status_phase ]]; then + echo "$(date +%H:%M:%S) - $new_phase" + status_phase=$new_phase + fi + if [[ $new_phase == "Err"* ]]; then + exit 1 + fi + done + apache_ovn=$(kubectl get pod $apache_pod_name -o jsonpath="{.metadata.annotations.ovn}") + nginx_ovn=$(kubectl get pod $nginx_pod_name -o jsonpath="{.metadata.annotations.ovn}") + + echo $apache_ovn + if [[ $apache_ovn != *"\"ip_address\":\"11.11."* ]]; then + exit 1 + fi + + echo $nginx_ovn + if [[ $nginx_ovn != *"\"ip_address\":\"11.11."* ]]; then + exit 1 + fi +fi diff --git a/vagrant/tests/plugin.sh b/vagrant/tests/plugin.sh new file mode 100755 index 00000000..a40cb60c --- /dev/null +++ b/vagrant/tests/plugin.sh @@ -0,0 +1,97 @@ +#!/bin/bash +# SPDX-license-identifier: Apache-2.0 +############################################################################## +# Copyright (c) 2018 +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## + +set -o errexit +set -o nounset +set -o pipefail + +base_url="http://localhost:8081/v1/vnf_instances/" +cloud_region_id="krd" +namespace="default" +csar_id="94e414f6-9ca4-11e8-bb6a-52540067263b" + +if [[ -z $(docker images -q generic_sim) ]]; then + BUILD_ARGS="--no-cache" + if [ $HTTP_PROXY ]; then + BUILD_ARGS+=" --build-arg HTTP_PROXY=${HTTP_PROXY}" + fi + if [ $HTTPS_PROXY ]; then + BUILD_ARGS+=" --build-arg HTTPS_PROXY=${HTTPS_PROXY}" + fi + pushd generic_simulator + docker build ${BUILD_ARGS} -t generic_sim:latest . + popd +fi + +if [[ $(docker ps -q --all --filter "name=aai") ]]; then + docker rm aai -f +fi +docker run --name aai -v $(pwd)/output:/tmp/generic_sim/ -v $(pwd)/generic_simulator/aai/:/etc/generic_sim/ -p 8443:8080 -d generic_sim + +vnf_id_list=$(curl -s "${base_url}${cloud_region_id}/${namespace}" | jq -r '.vnf_id_list') + +mkdir -p ${CSAR_DIR}/${csar_id} +cat << SEQ > ${CSAR_DIR}/${csar_id}/sequence.yaml +deployment: + - deployment.yaml +service: + - service.yaml +SEQ +cat << DEPLOYMENT > ${CSAR_DIR}/${csar_id}/deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: multus-deployment + labels: + app: multus +spec: + replicas: 1 + selector: + matchLabels: + app: multus + template: + metadata: + labels: + app: multus + annotations: + kubernetes.v1.cni.cncf.io/networks: '[ + { "name": "bridge-conf", "interfaceRequest": "eth1" }, + { "name": "bridge-conf", "interfaceRequest": "eth2" } + ]' + spec: + containers: + - name: multus-deployment + image: "busybox" + command: ["top"] + stdin: true + tty: true +DEPLOYMENT +cat << SERVICE > ${CSAR_DIR}/${csar_id}/service.yaml +apiVersion: v1 +kind: Service +metadata: + name: sise-svc +spec: + ports: + - port: 80 + protocol: TCP + selector: + app: sise +SERVICE + +payload_raw=" +{ + \"cloud_region_id\": \"$cloud_region_id\", + \"namespace\": \"$namespace\", + \"csar_id\": \"$csar_id\" +} +" +payload=$(echo $payload_raw | tr '\n' ' ') +curl -v -X POST -d "$payload" "${base_url}" diff --git a/vagrant/tests/virtlet.sh b/vagrant/tests/virtlet.sh new file mode 100755 index 00000000..a8af071f --- /dev/null +++ b/vagrant/tests/virtlet.sh @@ -0,0 +1,145 @@ +#!/bin/bash +# SPDX-license-identifier: Apache-2.0 +############################################################################## +# Copyright (c) 2018 +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## + +set -o errexit +set -o nounset +set -o pipefail + +rm -f $HOME/*.yaml + +virtlet_image=virtlet.cloud/fedora +pod_name=virtlet-pod +deployment_name=virtlet-deployment + +cat << POD > $HOME/$pod_name.yaml +apiVersion: v1 +kind: Pod +metadata: + name: $pod_name + annotations: + # This tells CRI Proxy that this pod belongs to Virtlet runtime + kubernetes.io/target-runtime: virtlet.cloud + VirtletCloudInitUserDataScript: | + #!/bin/sh + echo hello world +spec: + # This nodeAffinity specification tells Kubernetes to run this + # pod only on the nodes that have extraRuntime=virtlet label. + # This label is used by Virtlet DaemonSet to select nodes + # that must have Virtlet runtime + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: extraRuntime + operator: In + values: + - virtlet + containers: + - name: $pod_name + # This specifies the image to use. + # virtlet.cloud/ prefix is used by CRI proxy, the remaining part + # of the image name is prepended with https:// and used to download the image + image: $virtlet_image + imagePullPolicy: IfNotPresent + # tty and stdin required for "kubectl attach -t" to work + tty: true + stdin: true + resources: + limits: + # This memory limit is applied to the libvirt domain definition + memory: 160Mi +POD + +cat << DEPLOYMENT > $HOME/$deployment_name.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: $deployment_name + labels: + app: virtlet +spec: + replicas: 1 + selector: + matchLabels: + app: virtlet + template: + metadata: + labels: + app: virtlet + annotations: + # This tells CRI Proxy that this pod belongs to Virtlet runtime + kubernetes.io/target-runtime: virtlet.cloud + VirtletCloudInitUserDataScript: | + #!/bin/sh + echo hello world + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: extraRuntime + operator: In + values: + - virtlet + containers: + - name: $deployment_name + # This specifies the image to use. + # virtlet.cloud/ prefix is used by CRI proxy, the remaining part + # of the image name is prepended with https:// and used to download the image + image: $virtlet_image + imagePullPolicy: IfNotPresent + # tty and stdin required for "kubectl attach -t" to work + tty: true + stdin: true + resources: + limits: + # This memory limit is applied to the libvirt domain definition + memory: 160Mi +DEPLOYMENT + +if $(kubectl version &>/dev/null); then + kubectl delete pod $pod_name --ignore-not-found=true --now + kubectl delete deployment $deployment_name --ignore-not-found=true --now + while kubectl get pod $pod_name &>/dev/null; do + sleep 5 + done + kubectl create -f $HOME/$pod_name.yaml + while kubectl get deployment $deployment_name &>/dev/null; do + sleep 5 + done + kubectl create -f $HOME/$deployment_name.yaml + sleep 5 + + deployment_pod=$(kubectl get pods | grep $deployment_name | awk '{print $1}') + for pod in $pod_name $deployment_pod; do + status_phase="" + while [[ $status_phase != "Running" ]]; do + new_phase=$(kubectl get pods $pod | awk 'NR==2{print $3}') + if [[ $new_phase != $status_phase ]]; then + echo "$(date +%H:%M:%S) - $pod : $new_phase" + status_phase=$new_phase + fi + if [[ $new_phase == "Err"* ]]; then + exit 1 + fi + done + done + + kubectl plugin virt virsh list + for pod in $pod_name $deployment_name; do + virsh_image=$(kubectl plugin virt virsh list | grep "virtlet-.*-$pod") + if [[ -z "$virsh_image" ]]; then + exit 1 + fi + done +fi |