From 14c7c32e293fffb92b6aab7a81c446eaeb087cde Mon Sep 17 00:00:00 2001 From: "pawel.denst" Date: Thu, 4 May 2023 09:03:17 +0000 Subject: Migration of the healthchecks to gerrit Changes to set default directory for results if None Issue-ID: INT-2226 Signed-off-by: pawel.denst Change-Id: I5337a55f3271ebb5e58298e1fb1aa3b665713909 --- src/onaptests/scenario/resources.py | 155 +++++ src/onaptests/scenario/status.py | 739 +++++++++++++++++++++ src/onaptests/templates/status/base.html.j2 | 261 ++++++++ .../templates/status/container_log.html.j2 | 104 +++ .../templates/status/container_versions.html.j2 | 38 ++ src/onaptests/templates/status/daemonset.html.j2 | 15 + src/onaptests/templates/status/deployment.html.j2 | 15 + src/onaptests/templates/status/index.html.j2 | 376 +++++++++++ src/onaptests/templates/status/job.html.j2 | 15 + src/onaptests/templates/status/pod.html.j2 | 101 +++ src/onaptests/templates/status/raw_output.txt.j2 | 28 + src/onaptests/templates/status/replicaset.html.j2 | 15 + src/onaptests/templates/status/service.html.j2 | 45 ++ src/onaptests/templates/status/statefulset.html.j2 | 15 + src/onaptests/templates/status/version.html.j2 | 28 + 15 files changed, 1950 insertions(+) create mode 100644 src/onaptests/scenario/resources.py create mode 100644 src/onaptests/scenario/status.py create mode 100644 src/onaptests/templates/status/base.html.j2 create mode 100644 src/onaptests/templates/status/container_log.html.j2 create mode 100644 src/onaptests/templates/status/container_versions.html.j2 create mode 100644 src/onaptests/templates/status/daemonset.html.j2 create mode 100644 src/onaptests/templates/status/deployment.html.j2 create mode 100644 src/onaptests/templates/status/index.html.j2 create mode 100644 src/onaptests/templates/status/job.html.j2 create mode 100644 src/onaptests/templates/status/pod.html.j2 create mode 100644 src/onaptests/templates/status/raw_output.txt.j2 create mode 100644 src/onaptests/templates/status/replicaset.html.j2 create mode 100644 src/onaptests/templates/status/service.html.j2 create mode 100644 src/onaptests/templates/status/statefulset.html.j2 create mode 100644 src/onaptests/templates/status/version.html.j2 (limited to 'src/onaptests') diff --git a/src/onaptests/scenario/resources.py b/src/onaptests/scenario/resources.py new file mode 100644 index 0000000..2b0352e --- /dev/null +++ b/src/onaptests/scenario/resources.py @@ -0,0 +1,155 @@ +"""Resources module.""" + + +class K8sResource(): + """K8sResource class.""" + + def __init__(self, k8s=None): + """Init the k8s resource.""" + self.k8s = k8s + self.name = "" + self.events = [] + if self.k8s: + self.name = self.k8s.metadata.name + self.specific_k8s_init() + + def specific_k8s_init(self): + """Do the specific part for k8s resource when k8s object is present.""" + pass + + def __repr__(self): + return self.name + + def __str__(self): + return self.name + + def __eq__(self, other): + if (isinstance(other, K8sResource)): + return self.name == other.name + else: + return False + +class K8sPodParentResource(K8sResource): + """K8sPodParentResource class.""" + + def __init__(self, k8s=None): + """Init the k8s pod parent resource.""" + self.pods = [] + self.failed_pods = 0 + super().__init__(k8s=k8s) + + +class Pod(K8sResource): + """Pod class.""" + + def __init__(self, k8s=None): + """Init the pod.""" + self.containers = [] + self.init_containers = [] + self.running_containers = 0 + self.runned_init_containers = 0 + self.volumes = {} + self.restart_count = 0 + self.init_restart_count = 0 + self.init_done = True + super().__init__(k8s=k8s) + + def specific_k8s_init(self): + """Specific k8s init.""" + self.set_volumes(self.k8s.spec.volumes) + + def set_volumes(self, volumes): + """Generate the volume list.""" + for volume in volumes: + volume_name = volume.name + self.volumes[volume_name] = {} + for volume_type in volume.attribute_map: + if volume_type != "name" and getattr(volume, volume_type): + self._parse_volume_type(volume, volume_name, volume_type) + + def _parse_volume_type(self, volume, name, volume_type): + """Parse volume type informations.""" + self.volumes[name][volume_type] = {} + infos = getattr(volume, volume_type) + for details in infos.attribute_map: + self.volumes[name][volume_type][details] = getattr(infos, details) + + def ready(self): + """Calculate if Pod is ready.""" + if self.init_done and self.running_containers == len(self.containers): + return True + return False + + +class Container(): + """Container class.""" + + def __init__(self, name=""): + """Init the container.""" + self.name = name + self.status = "" + self.ready = False + self.restart_count = 0 + self.image = "" + + def set_status(self, status): + """Generate status for container.""" + if status.running: + self.status = "Running" + else: + if status.terminated: + self.status = "Terminated ({})".format( + status.terminated.reason) + else: + if status.waiting: + self.status = "Waiting ({})".format( + status.waiting.reason) + else: + self.status = "Unknown" + + +class Service(K8sPodParentResource): + """Service class.""" + + def __init__(self, k8s=None): + """Init the service.""" + self.type = "" + super().__init__(k8s=k8s) + + def specific_k8s_init(self): + """Do the specific part for service when k8s object is present.""" + self.type = self.k8s.spec.type + + +class Job(K8sPodParentResource): + """Job class.""" + + +class Deployment(K8sPodParentResource): + """Deployment class.""" + +class ReplicaSet(K8sPodParentResource): + """ReplicaSet class.""" + +class StatefulSet(K8sPodParentResource): + """StatefulSet class.""" + + +class DaemonSet(K8sPodParentResource): + """DaemonSet class.""" + + +class Pvc(K8sResource): + """Pvc class.""" + + +class ConfigMap(K8sResource): + """ConfigMap class.""" + + +class Secret(K8sResource): + """Secret class.""" + + +class Ingress(K8sResource): + """Ingress class.""" \ No newline at end of file diff --git a/src/onaptests/scenario/status.py b/src/onaptests/scenario/status.py new file mode 100644 index 0000000..c8aea74 --- /dev/null +++ b/src/onaptests/scenario/status.py @@ -0,0 +1,739 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# SPDX-License-Identifier: Apache-2.0 +import json +import os +import logging +import re +import time +from natural.date import delta +from xtesting.core import testcase +from kubernetes import client, config +from kubernetes.stream import stream +from urllib3.exceptions import MaxRetryError, NewConnectionError +from jinja2 import Environment, PackageLoader, select_autoescape + +from onaptests.scenario.resources import Pod, Container, Service, Job +from onaptests.scenario.resources import Deployment, StatefulSet, DaemonSet, Pvc, ReplicaSet +from onaptests.scenario.resources import ConfigMap, Secret, Ingress + +NAMESPACE = os.getenv('K8S_NAMESPACE', 'onap') +FULL_LOGS_CONTAINERS = [ + 'dcae-bootstrap', 'dcae-cloudify-manager', 'aai-resources', + 'aai-traversal', 'aai-modelloader', 'sdnc', 'so', 'so-bpmn-infra', + 'so-openstack-adapter', 'so-sdc-controller', 'mariadb-galera', 'sdc-be', + 'sdc-fe' +] + +# patterns to be excluded from the check +WAIVER_LIST = ['integration'] + +SPECIFIC_LOGS_CONTAINERS = { + 'sdc-be': ['/var/log/onap/sdc/sdc-be/error.log'], + 'sdc-onboarding-be': ['/var/log/onap/sdc/sdc-onboarding-be/error.log'], + 'aaf-cm': [ + '/opt/app/osaaf/logs/cm/cm-service.log', + '/opt/app/osaaf/logs/cm/cm-init.log' + ], + 'aaf-fs': [ + '/opt/app/osaaf/logs/fs/fs-service.log', + '/opt/app/osaaf/logs/fs/fs-init.log' + ], + 'aaf-locate': [ + '/opt/app/osaaf/logs/locate/locate-service.log', + '/opt/app/osaaf/logs/locate/locate-init.log' + ], + 'aaf-service': [ + '/opt/app/osaaf/logs/service/authz-service.log', + '/opt/app/osaaf/logs/service/authz-init.log' + ], + 'sdc-be': [ + '/var/log/onap/sdc/sdc-be/debug.log', + '/var/log/onap/sdc/sdc-be/error.log' + ], + 'sdc-fe': [ + '/var/log/onap/sdc/sdc-fe/debug.log', + '/var/log/onap/sdc/sdc-fe/error.log' + ], + 'vid': [ + '/var/log/onap/vid/audit.log', + '/var/log/onap/vid/application.log', + '/var/log/onap/vid/debug.log', + '/var/log/onap/vid/error.log' + ], +} + +DOCKER_REPOSITORIES = [ + 'nexus3.onap.org:10001', 'docker.elastic.co', 'docker.io', 'library', + 'registry.gitlab.com', 'registry.hub.docker.com', 'k8s.gcr.io', 'gcr.io' +] +DOCKER_REPOSITORIES_NICKNAMES = { + 'nexus3.onap.org:10001': 'onap', + 'docker.elastic.co': 'elastic', + 'docker.io': 'dockerHub (docker.io)', + 'registry.hub.docker.com': 'dockerHub (registry)', + 'registry.gitlab.com': 'gitlab', + 'library': 'dockerHub (library)', + 'default': 'dockerHub', + 'k8s.gcr.io': 'google (k8s.gcr)', + 'gcr.io': 'google (gcr)' +} + +GENERIC_NAMES = { + 'postgreSQL': ['crunchydata/crunchy-postgres', 'postgres'], + 'mariadb': ['adfinissygroup/k8s-mariadb-galera-centos', 'mariadb'], + 'elasticsearch': [ + 'bitnami/elasticsearch', 'elasticsearch/elasticsearch', + 'onap/clamp-dashboard-elasticsearch' + ], + 'nginx': ['bitnami/nginx', 'nginx'], + 'cassandra': [ + 'cassandra', 'onap/music/cassandra_3_11', 'onap/music/cassandra_music', + 'onap/aaf/aaf_cass' + ], + 'zookeeper': ['google_samples/k8szk', 'onap/dmaap/zookeeper', 'zookeeper'], + 'redis': [ + 'onap/vfc/db', + 'onap/org.onap.dcaegen2.deployments.redis-cluster-container' + ], + 'consul': ['consul', 'oomk8s/consul'], + 'rabbitmq': ['ansible/awx_rabbitmq', 'rabbitmq'] +} + +MAX_LOG_BYTES = 512000 + + +class Status(testcase.TestCase): + """Retrieve status of Kubernetes resources.""" + + __logger = logging.getLogger(__name__) + + def __init__(self, kubeconfig=None, dir_result=None, **kwargs): + """Init the testcase.""" + if "case_name" not in kwargs: + kwargs["case_name"] = 'namespace_status' + super(Status, self).__init__(**kwargs) + if kubeconfig is not None: + config.load_kube_config(config_file=kubeconfig) + else: + config.load_kube_config() + self.core = client.CoreV1Api() + self.batch = client.BatchV1Api() + self.app = client.AppsV1Api() + self.networking = client.NetworkingV1Api() + if dir_result: + self.res_dir = f"{dir_result}/kubernetes-status" + else: + self.res_dir = f"{self.dir_results}/kubernetes-status" + + self.__logger.debug("namespace status init started") + self.start_time = None + self.stop_time = None + self.result = 0 + self.pods = [] + self.services = [] + self.jobs = [] + self.deployments = [] + self.replicasets =[] + self.statefulsets = [] + self.daemonsets = [] + self.pvcs = [] + self.configmaps = [] + self.secrets = [] + self.ingresses = [] + self.details = {} + + def run(self): + """Run tests.""" + self.start_time = time.time() + os.makedirs(self.res_dir, exist_ok=True) + self.__logger.debug("start test") + try: + self.k8s_pods = self.core.list_namespaced_pod(NAMESPACE).items + self.__logger.info("%4s Pods in the namespace", len(self.k8s_pods)) + + self.k8s_jobs = self.batch.list_namespaced_job(NAMESPACE).items + self.__logger.info("%4s Jobs in the namespace", len(self.k8s_jobs)) + + self.k8s_deployments = self.app.list_namespaced_deployment( + NAMESPACE).items + self.__logger.info("%4s Deployments in the namespace", + len(self.k8s_deployments)) + + self.k8s_replicasets = self.app.list_namespaced_replica_set( + NAMESPACE).items + self.__logger.info("%4s Replicasets in the namespace", + len(self.k8s_replicasets)) + + self.k8s_statefulsets = self.app.list_namespaced_stateful_set( + NAMESPACE).items + self.__logger.info("%4s StatefulSets in the namespace", + len(self.k8s_statefulsets)) + + self.k8s_daemonsets = self.app.list_namespaced_daemon_set( + NAMESPACE).items + self.__logger.info("%4s DaemonSets in the namespace", + len(self.k8s_daemonsets)) + + self.k8s_services = self.core.list_namespaced_service( + NAMESPACE).items + self.__logger.info("%4s Services in the namespace", + len(self.k8s_services)) + + self.k8s_pvcs = self.core.list_namespaced_persistent_volume_claim( + NAMESPACE).items + self.__logger.info("%4s PVCs in the namespace", len(self.pvcs)) + + self.k8s_configmaps = self.core.list_namespaced_config_map( + NAMESPACE).items + self.__logger.info("%4s ConfigMaps in the namespace", + len(self.configmaps)) + + self.k8s_secrets = self.core.list_namespaced_secret( + NAMESPACE).items + self.__logger.info("%4s Secrets in the namespace", + len(self.secrets)) + + self.k8s_ingresses = self.networking.list_namespaced_ingress( + NAMESPACE).items + self.__logger.info("%4s Ingresses in the namespace", + len(self.ingresses)) + except (ConnectionRefusedError, MaxRetryError, NewConnectionError): + self.__logger.error("namespace status test failed.") + self.__logger.error("cannot connect to Kubernetes.") + return testcase.TestCase.EX_TESTCASE_FAILED + + self.failing_statefulsets = [] + self.failing_jobs = [] + self.failing_deployments = [] + self.failing_replicasets = [] + self.failing_daemonsets = [] + self.failing_pvcs = [] + self.failing = False + + self.jinja_env = Environment(autoescape=select_autoescape(['html']), + loader=PackageLoader('onaptests.templates','status')) + self.parse_services() + jobs_pods = self.parse_jobs() + self.parse_pods(excluded_pods=jobs_pods) + self.parse_deployments() + self.parse_replicasets() + self.parse_statefulsets() + self.parse_daemonsets() + self.parse_pvcs() + self.parse_configmaps() + self.parse_secrets() + self.parse_ingresses() + self.parse_versions() + + self.jinja_env.get_template('index.html.j2').stream( + ns=self, + delta=delta).dump('{}/index.html'.format(self.res_dir)) + self.jinja_env.get_template('raw_output.txt.j2').stream( + ns=self, namespace=NAMESPACE).dump('{}/onap-k8s.log'.format( + self.res_dir)) + + self.stop_time = time.time() + if len(self.jobs) > 0: + self.details['jobs'] = { + 'number': len(self.jobs), + 'number_failing': len(self.failing_jobs), + 'failing': self.map_by_name(self.failing_jobs) + } + if len(self.deployments) > 0: + self.details['deployments'] = { + 'number': len(self.deployments), + 'number_failing': len(self.failing_deployments), + 'failing': self.map_by_name(self.failing_deployments) + } + if len(self.replicasets) > 0: + self.details['replicasets'] = { + 'number': len(self.replicasets), + 'number_failing': len(self.failing_replicasets), + 'failing': self.map_by_name(self.failing_replicasets) + } + if len(self.statefulsets) > 0: + self.details['statefulsets'] = { + 'number': len(self.statefulsets), + 'number_failing': len(self.failing_statefulsets), + 'failing': self.map_by_name(self.failing_statefulsets) + } + if len(self.daemonsets) > 0: + self.details['daemonsets'] = { + 'number': len(self.daemonsets), + 'number_failing': len(self.failing_daemonsets), + 'failing': self.map_by_name(self.failing_daemonsets) + } + if len(self.pvcs) > 0: + self.details['pvcs'] = { + 'number': len(self.pvcs), + 'number_failing': len(self.failing_pvcs), + 'failing': self.map_by_name(self.failing_pvcs) + } + if self.failing: + self.__logger.error("namespace status test failed.") + self.__logger.error("number of errored Jobs: %s", + len(self.failing_jobs)) + self.__logger.error("number of errored Deployments: %s", + len(self.failing_deployments)) + self.__logger.error("number of errored Replicasets: %s", + len(self.failing_replicasets)) + self.__logger.error("number of errored StatefulSets: %s", + len(self.failing_statefulsets)) + self.__logger.error("number of errored DaemonSets: %s", + len(self.failing_daemonsets)) + self.__logger.error("number of errored PVCs: %s", + len(self.failing_pvcs)) + return testcase.TestCase.EX_TESTCASE_FAILED + + self.result = 100 + return testcase.TestCase.EX_OK + + def parse_pods(self, excluded_pods=None): + """Parse the pods status.""" + self.__logger.info("%4s pods to parse", len(self.k8s_pods)) + for k8s in self.k8s_pods: + pod = Pod(k8s=k8s) + + if excluded_pods and pod in excluded_pods: + continue + + if k8s.status.init_container_statuses: + for k8s_container in k8s.status.init_container_statuses: + pod.runned_init_containers += self.parse_container( + pod, k8s_container, init=True) + if k8s.status.container_statuses: + for k8s_container in k8s.status.container_statuses: + pod.running_containers += self.parse_container( + pod, k8s_container) + pod.events = self.core.list_namespaced_event( + NAMESPACE, + field_selector="involvedObject.name={}".format(pod.name)).items + self.jinja_env.get_template('pod.html.j2').stream(pod=pod).dump( + '{}/pod-{}.html'.format(self.res_dir, pod.name)) + if any(waiver_elt in pod.name for waiver_elt in WAIVER_LIST): + self.__logger.warn("Waiver pattern found in pod, exclude %s", pod.name) + else: + self.pods.append(pod) + + def parse_container(self, pod, k8s_container, init=False): + """Get the logs of a container.""" + logs = "" + old_logs = "" + prefix = "" + containers_list = pod.containers + container = Container(name=k8s_container.name) + container.restart_count = k8s_container.restart_count + container.set_status(k8s_container.state) + container.ready = k8s_container.ready + container.image = k8s_container.image + if init: + prefix = "init " + containers_list = pod.init_containers + if container.restart_count > pod.init_restart_count: + pod.init_restart_count = container.restart_count + if not container.ready: + pod.init_done = False + else: + if container.restart_count > pod.restart_count: + pod.restart_count = container.restart_count + + try: + log_files = {} + logs = "" + try: + logs = self.core.read_namespaced_pod_log( + pod.name, + NAMESPACE, + container=container.name, + limit_bytes=MAX_LOG_BYTES, + ) + except UnicodeDecodeError: + logs= "{0} has an unicode decode error...".format(pod.name) + self.__logger.error( + "{0} has an unicode decode error in the logs...", pod.name, + ) + with open( + "{}/pod-{}-{}.log".format(self.res_dir, + pod.name, container.name), + 'w') as log_result: + log_result.write(logs) + if (not container.ready) and container.restart_count > 0: + old_logs = self.core.read_namespaced_pod_log( + pod.name, + NAMESPACE, + container=container.name, + previous=True) + with open( + "{}/pod-{}-{}.old.log".format(self.res_dir, + pod.name, + container.name), + 'w') as log_result: + log_result.write(old_logs) + if (container.name in FULL_LOGS_CONTAINERS): + logs = self.core.read_namespaced_pod_log( + pod.name, NAMESPACE, container=container.name) + with open( + "{}/pod-{}-{}.log".format(self.res_dir, + pod.name, container.name), + 'w') as log_result: + log_result.write(logs) + if (container.name in SPECIFIC_LOGS_CONTAINERS): + for log_file in SPECIFIC_LOGS_CONTAINERS[container.name]: + exec_command = ['/bin/sh', '-c', "cat {}".format(log_file)] + log_files[log_file] = stream( + self.core.connect_get_namespaced_pod_exec, + pod.name, + NAMESPACE, + container=container.name, + command=exec_command, + stderr=True, + stdin=False, + stdout=True, + tty=False) + log_file_slug = log_file.split('.')[0].split('/')[-1] + with open( + "{}/pod-{}-{}-{}.log".format( + self.res_dir, pod.name, + container.name, log_file_slug), + 'w') as log_result: + log_result.write(log_files[log_file]) + except client.rest.ApiException as exc: + self.__logger.warning("%scontainer %s of pod %s has an exception: %s", + prefix, container.name, pod.name, exc.reason) + self.jinja_env.get_template('container_log.html.j2').stream( + container=container, + pod_name=pod.name, + logs=logs, + old_logs=old_logs, + log_files=log_files).dump('{}/pod-{}-{}-logs.html'.format( + self.res_dir, pod.name, container.name)) + if any(waiver_elt in container.name for waiver_elt in WAIVER_LIST): + self.__logger.warn( + "Waiver pattern found in container, exclude %s", container.name) + else: + containers_list.append(container) + if k8s_container.ready: + return 1 + return 0 + + def parse_services(self): + """Parse the services.""" + self.__logger.info("%4s services to parse", len(self.k8s_services)) + for k8s in self.k8s_services: + service = Service(k8s=k8s) + + (service.pods, + service.failed_pods) = self._find_child_pods(k8s.spec.selector) + + self.jinja_env.get_template('service.html.j2').stream( + service=service).dump('{}/service-{}.html'.format( + self.res_dir, service.name)) + self.services.append(service) + + def parse_jobs(self): + """Parse the jobs. + Return a list of Pods that were created to perform jobs. + """ + self.__logger.info("%4s jobs to parse", len(self.k8s_jobs)) + jobs_pods = [] + for i in range(len(self.k8s_jobs)): + k8s = self.k8s_jobs[i] + job = Job(k8s=k8s) + job_pods = [] + + if k8s.spec.selector and k8s.spec.selector.match_labels: + (job.pods, job.failed_pods) = self._find_child_pods( + k8s.spec.selector.match_labels) + job_pods += job.pods + field_selector = "involvedObject.name={}".format(job.name) + field_selector += ",involvedObject.kind=Job" + job.events = self.core.list_namespaced_event( + NAMESPACE, + field_selector=field_selector).items + + self.jinja_env.get_template('job.html.j2').stream(job=job).dump( + '{}/job-{}.html'.format(self.res_dir, job.name)) + + # timemout job + if not k8s.status.completion_time: + self.__logger.warning("a Job is in error: {}".format(job.name)) + if any( + waiver_elt not in job.name for waiver_elt in WAIVER_LIST): + self.failing_jobs.append(job) + self.failing = True + # completed job + if any(waiver_elt not in job.name for waiver_elt in WAIVER_LIST): + self.jobs.append(job) + jobs_pods += job_pods + return jobs_pods + + def parse_deployments(self): + """Parse the deployments.""" + self.__logger.info("%4s deployments to parse", + len(self.k8s_deployments)) + for i in range(len(self.k8s_deployments)): + k8s = self.k8s_deployments[i] + deployment = Deployment(k8s=k8s) + + if k8s.spec.selector and k8s.spec.selector.match_labels: + (deployment.pods, + deployment.failed_pods) = self._find_child_pods( + k8s.spec.selector.match_labels) + field_selector = "involvedObject.name={}".format(deployment.name) + field_selector += ",involvedObject.kind=Deployment" + deployment.events = self.core.list_namespaced_event( + NAMESPACE, + field_selector=field_selector).items + + self.jinja_env.get_template('deployment.html.j2').stream( + deployment=deployment).dump('{}/deployment-{}.html'.format( + self.res_dir, deployment.name)) + + if k8s.status.unavailable_replicas: + self.__logger.warning("a Deployment is in error: {}".format(deployment.name)) + self.failing_deployments.append(deployment) + self.failing = True + + self.deployments.append(deployment) + + def parse_replicasets(self): + """Parse the replicasets.""" + self.__logger.info("%4s replicasets to parse", + len(self.k8s_replicasets)) + for i in range(len(self.k8s_replicasets)): + k8s = self.k8s_replicasets[i] + replicaset = ReplicaSet(k8s=k8s) + + if k8s.spec.selector and k8s.spec.selector.match_labels: + (replicaset.pods, + replicaset.failed_pods) = self._find_child_pods( + k8s.spec.selector.match_labels) + field_selector = "involvedObject.name={}".format(replicaset.name) + field_selector += ",involvedObject.kind=ReplicaSet" + replicaset.events = self.core.list_namespaced_event( + NAMESPACE, + field_selector=field_selector).items + + self.jinja_env.get_template('replicaset.html.j2').stream( + replicaset=replicaset).dump('{}/replicaset-{}.html'.format( + self.res_dir, replicaset.name)) + + if (not k8s.status.ready_replicas + or (k8s.status.ready_replicas < k8s.status.replicas)): + self.__logger.warning("a ReplicaSet is in error: {}".format(replicaset.name)) + self.failing_replicasets.append(replicaset) + self.failing = True + + self.replicasets.append(replicaset) + + def parse_statefulsets(self): + """Parse the statefulsets.""" + self.__logger.info("%4s statefulsets to parse", + len(self.k8s_statefulsets)) + for i in range(len(self.k8s_statefulsets)): + k8s = self.k8s_statefulsets[i] + statefulset = StatefulSet(k8s=k8s) + + if k8s.spec.selector and k8s.spec.selector.match_labels: + (statefulset.pods, + statefulset.failed_pods) = self._find_child_pods( + k8s.spec.selector.match_labels) + field_selector = "involvedObject.name={}".format(statefulset.name) + field_selector += ",involvedObject.kind=StatefulSet" + statefulset.events = self.core.list_namespaced_event( + NAMESPACE, + field_selector=field_selector).items + + self.jinja_env.get_template('statefulset.html.j2').stream( + statefulset=statefulset).dump('{}/statefulset-{}.html'.format( + self.res_dir, statefulset.name)) + + if ((not k8s.status.ready_replicas) + or (k8s.status.ready_replicas < k8s.status.replicas)): + self.__logger.warning("a StatefulSet is in error: {}".format(statefulset.name)) + self.failing_statefulsets.append(statefulset) + self.failing = True + + self.statefulsets.append(statefulset) + + def parse_daemonsets(self): + """Parse the daemonsets.""" + self.__logger.info("%4s daemonsets to parse", len(self.k8s_daemonsets)) + for i in range(len(self.k8s_daemonsets)): + k8s = self.k8s_daemonsets[i] + daemonset = DaemonSet(k8s=k8s) + + if k8s.spec.selector and k8s.spec.selector.match_labels: + (daemonset.pods, + daemonset.failed_pods) = self._find_child_pods( + k8s.spec.selector.match_labels) + field_selector = "involvedObject.name={}".format(daemonset.name) + field_selector += ",involvedObject.kind=DaemonSet" + daemonset.events = self.core.list_namespaced_event( + NAMESPACE, + field_selector=field_selector).items + + self.jinja_env.get_template('daemonset.html.j2').stream( + daemonset=daemonset).dump('{}/daemonset-{}.html'.format( + self.res_dir, daemonset.name)) + + if (k8s.status.number_ready < k8s.status.desired_number_scheduled): + self.__logger.warning("a DaemonSet is in error: {}".format(daemonset.name)) + self.failing_daemonsets.append(daemonset) + self.failing = True + + self.daemonsets.append(daemonset) + + def parse_pvcs(self): + """Parse the persistent volume claims.""" + self.__logger.info("%4s pvcs to parse", len(self.k8s_pvcs)) + for k8s in self.k8s_pvcs: + pvc = Pvc(k8s=k8s) + field_selector = f"involvedObject.name={pvc.name},involvedObject.kind=PersistentVolumeClaim" + pvc.events = self.core.list_namespaced_event( + NAMESPACE, + field_selector=field_selector).items + + if k8s.status.phase != "Bound": + self.__logger.warning("a PVC is in error: {}".format(pvc.name)) + self.failing_pvcs.append(pvc) + self.failing = True + + self.pvcs.append(pvc) + + def parse_configmaps(self): + """Parse the config maps.""" + self.__logger.info("%4s config maps to parse", + len(self.k8s_configmaps)) + for k8s in self.k8s_configmaps: + configmap = ConfigMap(k8s=k8s) + self.configmaps.append(configmap) + + def parse_secrets(self): + """Parse the secrets.""" + self.__logger.info("%4s secrets to parse", len(self.k8s_secrets)) + for k8s in self.k8s_secrets: + secret = Secret(k8s=k8s) + self.secrets.append(secret) + + def parse_ingresses(self): + """Parse the ingresses.""" + self.__logger.info("%4s ingresses to parse", len(self.k8s_ingresses)) + for k8s in self.k8s_secrets: + ingress = Ingress(k8s=k8s) + self.ingresses.append(ingress) + + def parse_versions(self): + """Parse the versions of the pods.""" + self.__logger.info("%4s pods to parse", len(self.k8s_pods)) + pod_versions = [] + containers = {} + for pod in self.k8s_pods: + pod_component = pod.metadata.name + if 'app' in pod.metadata.labels: + pod_component = pod.metadata.labels['app'] + else: + if 'app.kubernetes.io/name' in pod.metadata.labels: + pod_component = pod.metadata.labels[ + 'app.kubernetes.io/name'] + else: + self.__logger.error("pod %s has no 'app' or 'app.kubernetes.io/name' in metadata: %s", pod_component, pod.metadata.labels) + + # looks for docker version + for container in pod.spec.containers: + pod_version = {} + pod_container_version = container.image.rsplit(":", 1) + pod_container_image = pod_container_version[0] + pod_container_tag = "latest" + if len(pod_container_version) > 1: + pod_container_tag = pod_container_version[1] + + pod_version.update({ + 'container': container.name, + 'component': pod_component, + 'image': pod_container_image, + 'version': pod_container_tag + }) + pod_versions.append(pod_version) + + search_rule = "^(?P[^/]*)/*(?P[^:]*):*(?P.*)$" + search = re.search(search_rule, container.image) + name = "{}/{}".format(search.group('source'), + search.group('container')) + version = search.group('version') + if name[-1] == '/': + name = name[0:-1] + source = "default" + if search.group('source') in DOCKER_REPOSITORIES: + source = search.group('source') + name = search.group('container') + container_search_rule = "^library/(?P[^:]*)$" + container_search = re.search(container_search_rule, name) + if container_search: + name = container_search.group('real_container') + for common_component in GENERIC_NAMES.keys(): + if name in GENERIC_NAMES[common_component]: + version = "{}:{}".format(name, version) + name = common_component + break + + repository = DOCKER_REPOSITORIES_NICKNAMES[source] + if name in containers: + if version in containers[name]['versions']: + if not (pod_component in containers[name]['versions'] + [version]['components']): + containers[name]['versions'][version][ + 'components'].append(pod_component) + containers[name]['number_components'] += 1 + if not (repository in containers[name]['versions'] + [version]['repositories']): + containers[name]['versions'][version][ + 'repositories'].append(repository) + else: + containers[name]['versions'][version] = { + 'repositories': [repository], + 'components': [pod_component] + } + containers[name]['number_components'] += 1 + else: + containers[name] = { + 'versions': { + version: { + 'repositories': [repository], + 'components': [pod_component] + } + }, + 'number_components': 1 + } + + self.jinja_env.get_template('version.html.j2').stream( + pod_versions=pod_versions).dump('{}/versions.html'.format( + self.res_dir)) + self.jinja_env.get_template('container_versions.html.j2').stream( + containers=containers).dump('{}/container_versions.html'.format( + self.res_dir)) + # create a json file for version tracking + with open(self.res_dir + "/onap_versions.json", "w") as write_file: + json.dump(pod_versions, write_file) + + def _find_child_pods(self, selector): + pods_list = [] + failed_pods = 0 + if selector: + raw_selector = '' + for key, value in selector.items(): + raw_selector += key + '=' + value + ',' + raw_selector = raw_selector[:-1] + pods = self.core.list_namespaced_pod( + NAMESPACE, label_selector=raw_selector).items + for pod in pods: + for known_pod in self.pods: + if known_pod.name == pod.metadata.name: + pods_list.append(known_pod) + if not known_pod.ready(): + failed_pods += 1 + return (pods_list, failed_pods) + + def map_by_name(self, resources): + return list(map(lambda resource: resource.name, resources)) diff --git a/src/onaptests/templates/status/base.html.j2 b/src/onaptests/templates/status/base.html.j2 new file mode 100644 index 0000000..41e55de --- /dev/null +++ b/src/onaptests/templates/status/base.html.j2 @@ -0,0 +1,261 @@ +{% macro color(failing, total) %} +{% if failing == 0 %} +is-success +{% else %} +{% if (failing / total) <= 0.1 %} +is-warning +{% else %} +is-danger +{% endif %} +{% endif %} +{% endmacro %} + +{% macro percentage(failing, total) %} +{{ ((total - failing) / total) | round }} +{% endmacro %} + +{% macro statistic(resource_name, failing, total) %} +{% set success = total - failing %} +
+
+

{{ resource_name | capitalize }}

+

{{ success }}/{{ total }}

+ {{ percentage(failing, total) }} +
+
+{% endmacro %} + +{% macro pods_table(pods) %} +
+ + + + + + + + + + + + {% for pod in pods %} + + + {% if pod.init_done %} + + {% else %} + + {% endif %} + + + {% if pod.init_done %} + + {% else %} + + {% endif %} + + {% endfor %} + +
NameReadyStatusReasonRestarts
{{ pod.k8s.metadata.name }}{{ pod.running_containers }}/{{ (pod.containers | length) }}Init:{{ pod.runned_init_containers }}/{{ (pod.init_containers | length) }}{{ pod.k8s.status.phase }}{{ pod.k8s.status.reason }}{{ pod.restart_count }}{{ pod.init_restart_count }}
+
+{% endmacro %} + +{% macro key_value_description_list(title, dict) %} +
{{ title | capitalize }}:
+
+ {% if dict %} + {% for key, value in dict.items() %} + {% if loop.first %} +
+ {% endif %} +
{{ key }}:
+
{{ value }}
+ {% if loop.last %} +
+ {% endif %} + {% endfor %} + {% endif %} +
+{% endmacro %} + +{% macro description(k8s) %} +
+

Description

+
+
+ {% if k8s.spec.type %} +
Type:
+
{{ k8s.spec.type }}
+ {% if (k8s.spec.type | lower) == "clusterip" %} +
Headless:
+
{% if (k8s.spec.cluster_ip | lower) == "none" %}Yes{% else %}No{% endif %}
+ {% endif %} + {% endif %} + {{ key_value_description_list('Labels', k8s.metadata.labels) | indent(width=6) }} + {{ key_value_description_list('Annotations', k8s.metadata.annotations) | indent(width=6) }} + {% if k8s.spec.selector %} + {% if k8s.spec.selector.match_labels %} + {{ key_value_description_list('Selector', k8s.spec.selector.match_labels) | indent(width=6) }} + {% else %} + {{ key_value_description_list('Selector', k8s.spec.selector) | indent(width=6) }} + {% endif %} + {% endif %} + {% if k8s.phase %} +
Status:
+
{{ k8s.phase }}
+ {% endif %} + {% if k8s.metadata.owner_references %} +
Controlled By:
+
{{ k8s.metadata.owner_references[0].kind }}/{{ k8s.metadata.owner_references[0].name }}
+ {% endif %} +
+
+
+{% endmacro %} + +{% macro pods_container(pods, parent, has_title=True) %} +
+ {% if has_title %} +

Pods

+ {% endif %} + {% if (pods | length) > 0 %} + {{ pods_table(pods) | indent(width=2) }} + {% else %} +
{{ parent }} has no pods!
+ {% endif %} +
+{% endmacro %} + +{% macro two_level_breadcrumb(title, name) %} +
+ +
+{% endmacro %} + +{% macro pod_parent_summary(title, name, failed_pods, pods) %} +{{ summary(title, name, [{'title': 'Pod', 'failing': failed_pods, 'total': (pods | length)}]) }} +{% endmacro %} + +{% macro number_ok(number, none_value, total=None) %} +{% if number %} +{% if total and number < total %} +{{ number }} +{% else %} +{{ number }} +{% endif %} +{% else %} +{{ none_value }} +{% endif %} +{% endmacro %} + +{% macro summary(title, name, statistics) %} +
+
+
+

+ {{ title | capitalize }} {{ name }} Summary +

+ +
+
+
+{% endmacro %} + +{% macro events(events) %} +{% if events %} +
+

Events

+
+ + + + + + + + + + + {% for event in events %} + + + + + + + {% endfor %} + +
TypeCountReasonMessage
{{ event.type }}{{ event.count }}{{ event.reason }}{{ event.message }}
+
+
+{% endif %} +{% endmacro %} + + + + + + + Tests results - {% block title %}{% endblock %} + + + {% block more_head %}{% endblock %} + + + + + {% block content %}{% endblock %} + + + + diff --git a/src/onaptests/templates/status/container_log.html.j2 b/src/onaptests/templates/status/container_log.html.j2 new file mode 100644 index 0000000..454dee7 --- /dev/null +++ b/src/onaptests/templates/status/container_log.html.j2 @@ -0,0 +1,104 @@ +{% extends "base.html.j2" %} +{% block title %}Container {{ container.name }} from pod {{ pod_name }} logs{% endblock %} +{% block content %} +
+ +
+
+
+

+ Results +

+

+ By type +

+
+
    +
  • Logs
  • + {% if old_logs %} +
  • Previous Logs
  • + {% endif %} + {% if log_files %} + {% for file in log_files %} +
  • {{ file }}
  • + {% endfor %} + {% endif %} +
+
+
+
+
+ +
+
+          {{ logs }}
+        
+      
+ {% if old_logs %} +
+
+ +
+
+          {{ old_logs }}
+        
+      
+ {% endif %} + {% if log_files %} + {% for file in log_files %} +
+
+ +
+
+          {{ log_files[file] }}
+        
+      
+ {% endfor %} + {% endif %} +
+{% endblock %} + +{% block more_head %} + +{% endblock %} diff --git a/src/onaptests/templates/status/container_versions.html.j2 b/src/onaptests/templates/status/container_versions.html.j2 new file mode 100644 index 0000000..d3d283d --- /dev/null +++ b/src/onaptests/templates/status/container_versions.html.j2 @@ -0,0 +1,38 @@ + +{% extends "base.html.j2" %} +{% block title %}ONAP Docker Versions{% endblock %} + +{% block content %} +
+
+

ONAP Docker versions

+ + + + + + + + + + + {% for container in containers.keys()|sort %} + + + {% for version in containers[container]['versions'].keys()|sort %} + + + {% for component in containers[container]['versions'][version]['components']|sort %} + {% if not loop.first %} + + {% endif %} + + + {% endfor %} + {% endfor %} + {% endfor %} + +
ContainerVersionRepositoriesComponents using it
{{ container }}{{ version }}{% for repository in containers[container]['versions'][version]['repositories'] %}{{ repository }}{% if not loop.last %}, {% endif %}{% endfor %}
{{ component }}
+
+
+{% endblock %} diff --git a/src/onaptests/templates/status/daemonset.html.j2 b/src/onaptests/templates/status/daemonset.html.j2 new file mode 100644 index 0000000..2d76280 --- /dev/null +++ b/src/onaptests/templates/status/daemonset.html.j2 @@ -0,0 +1,15 @@ +{% extends "base.html.j2" %} +{% block title %}DaemonSet {{ daemonset.name }}{% endblock %} +{% block content %} + {{ two_level_breadcrumb('DaemonSet', daemonset.name) | indent(width=4) }} + + {{ pod_parent_summary('DaemonSet', daemonset.name, daemonset.failed_pods, daemonset.pods) }} + +
+ {{ description(daemonset.k8s) | indent(width=6) }} + + {{ pods_container(daemonset.pods, "DaemonSet") | indent(width=6) }} + + {{ events(daemonset.events) }} +
+{% endblock %} diff --git a/src/onaptests/templates/status/deployment.html.j2 b/src/onaptests/templates/status/deployment.html.j2 new file mode 100644 index 0000000..53a0bbb --- /dev/null +++ b/src/onaptests/templates/status/deployment.html.j2 @@ -0,0 +1,15 @@ +{% extends "base.html.j2" %} +{% block title %}Deployment {{ deployment.name }}{% endblock %} +{% block content %} + {{ two_level_breadcrumb('Deployment', deployment.name) | indent(width=4) }} + + {{ pod_parent_summary('Deployment', deployment.name, deployment.failed_pods, deployment.pods) }} + +
+ {{ description(deployment.k8s) | indent(width=6) }} + + {{ pods_container(deployment.pods, "Deployment") | indent(width=6) }} + + {{ events(deployment.events) }} +
+{% endblock %} diff --git a/src/onaptests/templates/status/index.html.j2 b/src/onaptests/templates/status/index.html.j2 new file mode 100644 index 0000000..fe49abf --- /dev/null +++ b/src/onaptests/templates/status/index.html.j2 @@ -0,0 +1,376 @@ +{% extends "base.html.j2" %} +{% block title %}Summary{% endblock %} +{% block content %} +
+
+ +
+
+ + {{ summary('Results', "", [ + { 'title': 'Jobs', 'failing': (ns.failing_jobs | length), 'total': (ns.jobs | length)}, + { 'title': 'Deployments', 'failing': (ns.failing_deployments | length), 'total': (ns.deployments | length)}, + { 'title': 'Replicasets', 'failing': (ns.failing_replicasets | length), 'total': (ns.replicasets | length)}, + { 'title': 'StatefulSets', 'failing': (ns.failing_statefulsets | length), 'total': (ns.statefulsets | length)}, + { 'title': 'DaemonSets', 'failing': (ns.failing_daemonsets | length), 'total': (ns.daemonsets | length)}, + { 'title': 'Persistent Volume Claims', 'failing': (ns.failing_pvcs | length), 'total': (ns.pvcs | length)}]) + }} + +
+
+

+ Results +

+

+ By type +

+
+
    +
  • Pods
  • +
  • Services
  • + {% if (ns.jobs | length) > 0 %} +
  • Jobs
  • + {% endif %} + {% if (ns.deployments | length) > 0 %} +
  • Deployments
  • + {% endif %} + {% if (ns.replicasets | length) > 0 %} +
  • Replicasets
  • + {% endif %} + {% if (ns.statefulsets | length) > 0 %} +
  • StatefulSets
  • + {% endif %} + {% if (ns.daemonsets | length) > 0 %} +
  • DaemonSets
  • + {% endif %} + {% if (ns.pvcs | length) > 0 %} +
  • Persistent Volume Claims
  • + {% endif %} + {% if (ns.configmaps | length) > 0 %} +
  • Config Maps
  • + {% endif %} + {% if (ns.secrets | length) > 0 %} +
  • Secrets
  • + {% endif %} + {% if (ns.ingresses | length) > 0 %} +
  • Ingresses
  • + {% endif %} +
+
+ + + {{ pods_container(ns.pods, "Namespace", has_title=False) | indent(width=6) }} + + +
+ + + + + + + + + + + {% for service in ns.services %} + + + + + + + {% endfor %} + +
NameTypePortsPods selected
{{ service.name }}{{ service.type }} + {% if service.k8s.spec.ports %} + {% for port in service.k8s.spec.ports %} + {{ port.port }}{% if port.node_port %}:{{ port.node_port }}{% endif %}/{{ port.protocol }}{% if not loop.last %},{% endif %} + {% endfor %} + {% else %} + No Ports! + {% endif %} + {% if (service.pods | length) > 0 %}{{ service.pods | length }}{% else %}0{% endif %}
+
+ + {% if (ns.jobs | length) > 0 %} + +
+ + + + + + + + + + {% for job in ns.jobs %} + + + + + + {% endfor %} + +
NameCompletionsDuration
{{ job.name }}{% if job.k8s.status.succeeded %}{{ job.k8s.status.succeeded }}{% else %}0{% endif %}/{{ job.k8s.spec.completions }}{% if job.k8s.status.completion_time %}{{ delta(job.k8s.status.completion_time, job.k8s.status.start_time)[0] }}{% else %}N/A{% endif %}
+
+ {% endif %} + + {% if (ns.deployments | length) > 0 %} + +
+ + + + + + + + + + + {% for deployment in ns.deployments %} + + + + + + + {% endfor %} + +
NameReadyUp to DateAvailable
{{ deployment.name }}{% if deployment.k8s.status.ready_replicas %}{{ deployment.k8s.status.ready_replicas }}{% else %}0{% endif %}/{{ deployment.k8s.spec.replicas }}{{ number_ok(deployment.k8s.status.updated_replicas, '0', total=deployment.k8s.spec.replicas) }}{{ number_ok(deployment.k8s.status.available_replicas, '0', total=deployment.k8s.spec.replicas) }}
+
+ {% endif %} + + {% if (ns.replicasets | length) > 0 %} + +
+ + + + + + + + + + {% for rs in ns.replicasets %} + + + + + + {% endfor %} + +
NameReadyAvailable
{{ rs.name }}{% if rs.k8s.status.ready_replicas %}{{ rs.k8s.status.ready_replicas }}{% else %}0{% endif %}/{{ rs.k8s.spec.replicas }}{{ number_ok(rs.k8s.status.available_replicas, '0', total=rs.k8s.spec.replicas) }}
+
+ {% endif %} + + {% if (ns.statefulsets | length) > 0 %} + +
+ + + + + + + + + + {% for sts in ns.statefulsets %} + + + + + + {% endfor %} + +
NameReadyUp to Date
{{ sts.name }}{% if sts.k8s.status.ready_replicas %}{{ sts.k8s.status.ready_replicas }}{% else %}0{% endif %}/{{ sts.k8s.spec.replicas }}{{ number_ok(sts.k8s.status.updated_replicas, '0', total=sts.k8s.spec.replicas) }}
+
+ {% endif %} + + {% if (ns.daemonsets | length) > 0 %} + +
+ + + + + + + + + + + + + {% for ds in ns.daemonsets %} + + + + + + + + + {% endfor %} + +
NameDesiredCurrentReadyUp to DateAvailable
{{ ds.name }}{{ sts.k8s.status.desired_number_scheduled }}{{ number_ok(sts.k8s.status.current_number_scheduled, '0', total=sts.k8s.spec.desired_number_scheduled) }}{{ number_ok(sts.k8s.status.number_ready, '0', total=sts.k8s.spec.desired_number_scheduled) }}{{ number_ok(sts.k8s.status.updated_number_scheduled, '0', total=sts.k8s.spec.desired_number_scheduled) }}{{ number_ok(sts.k8s.status.number_available, '0', total=sts.k8s.spec.desired_number_scheduled) }}
+
+ {% endif %} + + {% if (ns.pvcs | length) > 0 %} + +
+ + + + + + + + + + + + + {% for pvc in ns.pvcs %} + + + + + + + + + {% endfor %} + +
NameStatusVolumeCapacityAccess ModesStorage Class
{{ pvc.name }}{% if (pvc.k8s.status.phase | lower) == "bound" %}{{ pvc.k8s.status.phase }}{% else %}{{ pvc.k8s.status.phase }}{% endif %}{% if pvc.k8s.spec.volume_name %}{{ pvc.k8s.spec.volume_name }}{% endif %}{% if pvc.k8s.status.capacity %}{{ pvc.k8s.status.capacity.storage }}{% endif %}{% if pvc.k8s.status.access_modes %}{{ pvc.k8s.status.capacity.access_modes | join(', ') }}{% endif %}{% if pvc.k8s.spec.storage_class_name %}{{ pvc.k8s.spec.storage_class_name }}{% endif %}
+
+ {% endif %} + + {% if (ns.configmaps | length) > 0 %} + +
+ + + + + + + + {% for cm in ns.configmaps %} + + + + {% endfor %} + +
Name
{{ cm.name }}
+
+ {% endif %} + + {% if (ns.secrets | length) > 0 %} + +
+ + + + + + + + {% for secret in ns.secrets %} + + + + {% endfor %} + +
Name
{{ secret.name }}
+
+ {% endif %} + + {% if (ns.ingresses | length) > 0 %} +
+ {% endif %} + +
+{% endblock %} + +{% block more_head %} + +{% endblock %} diff --git a/src/onaptests/templates/status/job.html.j2 b/src/onaptests/templates/status/job.html.j2 new file mode 100644 index 0000000..7915ff2 --- /dev/null +++ b/src/onaptests/templates/status/job.html.j2 @@ -0,0 +1,15 @@ +{% extends "base.html.j2" %} +{% block title %}Job {{ job.name }}{% endblock %} +{% block content %} + {{ two_level_breadcrumb('Job', job.name) | indent(width=4) }} + + {{ pod_parent_summary('Job', job.name, job.failed_pods, job.pods) }} + +
+ {{ description(job.k8s) | indent(width=6) }} + + {{ pods_container(job.pods, "Job") | indent(width=6) }} + + {{ events(job.events) }} +
+{% endblock %} diff --git a/src/onaptests/templates/status/pod.html.j2 b/src/onaptests/templates/status/pod.html.j2 new file mode 100644 index 0000000..d922206 --- /dev/null +++ b/src/onaptests/templates/status/pod.html.j2 @@ -0,0 +1,101 @@ +{% macro container_table(title, containers_list) %} +
+

{{ title }}

+
+ + + + + + + + + + + {% for container in containers_list %} + + + + + + + + {% endfor %} + +
NameImageStateReadyRestart Count
{{ container.name }}{{ container.image }}{{ container.status }}{{ container.ready }}{{ container.restart_count }}
+
+
+{% endmacro %} + +{% extends "base.html.j2" %} +{% block title %}Pod {{ pod.name }}{% endblock %} +{% block content %} + {{ two_level_breadcrumb('Pod', pod.name) | indent(width=4) }} + + {{ summary('Pod', pod.name, [ + { + 'title': 'Init containers', + 'failing': ((pod.init_containers | length) - pod.runned_init_containers), + 'total': (pod.init_containers | length) + }, + { + 'title': 'Containers', + 'failing': ((pod.containers | length) - pod.running_containers), + 'total': (pod.containers | length) + }]) + }} + +
+ {{ description(pod.k8s) | indent(width=6) }} + + {% if (pod.init_containers | length) > 0 %} + {{ container_table("Init Containers", pod.init_containers) | indent(width=6) }} + {% endif %} + + + {% if (pod.containers | length) > 0 %} + {{ container_table("Containers", pod.containers) | indent(width=8) }} + {% endif %} + + {% if pod.k8s.spec.volumes %} +
+

Volumes

+
+ + + + + + + + + + {% for volume_name, volume in pod.volumes.items() %} + {% for volume_type, details in volume.items() %} + + + + + + {% endfor %} + {% endfor %} + +
NameTypeProperties
{{ volume_name }}{{ volume_type }} + + + {% for key, value in details.items() %} + + + + + {% endfor %} + +
{{ key }}{{ value }}
+
+
+
+ {% endif %} + + {{ events(pod.events) }} +
+{% endblock %} diff --git a/src/onaptests/templates/status/raw_output.txt.j2 b/src/onaptests/templates/status/raw_output.txt.j2 new file mode 100644 index 0000000..1c52531 --- /dev/null +++ b/src/onaptests/templates/status/raw_output.txt.j2 @@ -0,0 +1,28 @@ +{%- macro statistic(resource_name, failing, total, failing_list) %} +>>> Nb {{ resource_name }}: {{ total }} +>>> Nb Failed {{ resource_name }}: {{ failing }} +{%- if failing > 0 %} +>>> List of Failed {{ resource_name }}: [{{ failing_list | map(attribute='name') | join(", ") }}] +{%- endif %} +{%- endmacro %} +------------------------------------------------ +------- {{ namespace }} kubernetes tests ------------------ +------------------------------------------------ +{%- if (ns.jobs | length) > 0 %} +{{ statistic("Jobs", (ns.failing_jobs | length), (ns.jobs | length), ns.failing_jobs) }} +{%- endif %} +{%- if (ns.deployments | length) > 0 %} +{{ statistic("Deployments", (ns.failing_deployments | length), (ns.deployments | length), ns.failing_deployments) }} +{%- endif %} +{%- if (ns.statefulsets | length) > 0 %} +{{ statistic("StatefulSets", (ns.failing_statefulsets | length), (ns.statefulsets | length), ns.failing_statefulsets) }} +{%- endif %} +{%- if (ns.daemonsets | length) > 0 %} +{{ statistic("DaemonSets", (ns.failing_daemonsets | length), (ns.daemonsets | length), ns.failing_daemonsets) }} +{%- endif %} +{%- if (ns.pvcs | length) > 0 %} +{{ statistic("Persistent Volume Claims", (ns.failing_pvcs | length), (ns.pvcs | length), ns.failing_pvcs) }} +{%- endif %} +------------------------------------------------ +------------------------------------------------ +------------------------------------------------ diff --git a/src/onaptests/templates/status/replicaset.html.j2 b/src/onaptests/templates/status/replicaset.html.j2 new file mode 100644 index 0000000..f26f2fd --- /dev/null +++ b/src/onaptests/templates/status/replicaset.html.j2 @@ -0,0 +1,15 @@ +{% extends "base.html.j2" %} +{% block title %}ReplicaSet {{ replicaset.name }}{% endblock %} +{% block content %} + {{ two_level_breadcrumb('ReplicaSet', replicaset.name) | indent(width=4) }} + + {{ pod_parent_summary('ReplicaSet', replicaset.name, replicaset.failed_pods, replicaset.pods) }} + +
+ {{ description(replicaset.k8s) | indent(width=6) }} + + {{ pods_container(replicaset.pods, "ReplicaSet") | indent(width=6) }} + + {{ events(replicaset.events) }} +
+{% endblock %} diff --git a/src/onaptests/templates/status/service.html.j2 b/src/onaptests/templates/status/service.html.j2 new file mode 100644 index 0000000..31b239a --- /dev/null +++ b/src/onaptests/templates/status/service.html.j2 @@ -0,0 +1,45 @@ +{% extends "base.html.j2" %} +{% block title %}Service {{ service.name }}{% endblock %} +{% block content %} + {{ two_level_breadcrumb('Service', service.name) | indent(width=4) }} + + {{ pod_parent_summary('Service', service.name, service.failed_pods, service.pods) }} + +
+ {{ description(service.k8s) | indent(width=6) }} + + {{ pods_container(service.pods, "Service") | indent(width=6) }} + +
+

Ports

+ {% if service.k8s.spec.ports %} +
+ + + + + + + + + + + + {% for port in service.k8s.spec.ports %} + + + + + + + + {% endfor %} + +
NamePortNode PortTarget PortProtocol
{{ port.name }}{{ port.port }}{{ port.node_port }}{{ port.target_port }}{{ port.protocol }}
+
+ {% else %} +
Service has no ports!
+ {% endif %} +
+
+{% endblock %} diff --git a/src/onaptests/templates/status/statefulset.html.j2 b/src/onaptests/templates/status/statefulset.html.j2 new file mode 100644 index 0000000..1aac8eb --- /dev/null +++ b/src/onaptests/templates/status/statefulset.html.j2 @@ -0,0 +1,15 @@ +{% extends "base.html.j2" %} +{% block title %}StatefulSet {{ statefulset.name }}{% endblock %} +{% block content %} + {{ two_level_breadcrumb('StatefulSet', statefulset.name) | indent(width=4) }} + + {{ pod_parent_summary('StatefulSet', statefulset.name, statefulset.failed_pods, statefulset.pods) }} + +
+ {{ description(statefulset.k8s) | indent(width=6) }} + + {{ pods_container(statefulset.pods, "StatefulSet") | indent(width=6) }} + + {{ events(statefulset.events) }} +
+{% endblock %} diff --git a/src/onaptests/templates/status/version.html.j2 b/src/onaptests/templates/status/version.html.j2 new file mode 100644 index 0000000..40348a4 --- /dev/null +++ b/src/onaptests/templates/status/version.html.j2 @@ -0,0 +1,28 @@ + +{% extends "base.html.j2" %} +{% block title %}ONAP Docker Versions{% endblock %} + +{% block content %} +

ONAP Docker versions

+ + + + + + + + + + + + {% for pod in pod_versions %} + + + + + + + {% endfor %} + +
ComponentContainerImageVersion
{{ pod.component }}{{ pod.container }}{{ pod.image }}{{ pod.version }}
+{% endblock %} -- cgit 1.2.3-korg