aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLukasz Rajewski <lukasz.rajewski@t-mobile.pl>2023-06-20 18:18:39 +0000
committerLukasz Rajewski <lukasz.rajewski@t-mobile.pl>2023-06-21 14:52:08 +0000
commitd553ee4c89f602263f7a973654244f61b07fbca5 (patch)
tree1b4ff17e9cb6cba19adbba9dc2550fe1d2129346
parent66cb2f9168c8a8f0c19e662491019d01f16de146 (diff)
Namespace status verification 2.0
- separate steps for resource type - optional version check - new file with details of failed resources Issue-ID: INT-2246 Signed-off-by: Lukasz Rajewski <lukasz.rajewski@t-mobile.pl> Change-Id: I538f877ab4d0824fd8dd5e65255c7ecf9545a26b
-rw-r--r--run_status.py11
-rw-r--r--src/onaptests/configuration/status_settings.py91
-rw-r--r--src/onaptests/steps/cloud/check_status.py1012
-rw-r--r--src/onaptests/templates/status/index.html.j218
4 files changed, 635 insertions, 497 deletions
diff --git a/run_status.py b/run_status.py
index 0c3aaf1..82c42c9 100644
--- a/run_status.py
+++ b/run_status.py
@@ -1,8 +1,7 @@
import logging.config
import onaptests.utils.exceptions as onap_test_exceptions
from onapsdk.configuration import settings
-from onaptests.steps.cloud.check_status import CheckNamespaceStatusStep
-
+from onaptests.scenario.status import Status
if __name__ == "__main__":
@@ -11,11 +10,9 @@ if __name__ == "__main__":
logging.config.dictConfig(settings.LOG_CONFIG)
logger = logging.getLogger("Status Check")
- status = CheckNamespaceStatusStep(
- cleanup=settings.CLEANUP_FLAG)
+ status = Status()
try:
- status.execute()
- status.cleanup()
+ status.run()
+ status.clean()
except onap_test_exceptions.TestConfigurationException:
logger.error("Status Check configuration error")
- status.reports_collection.generate_report() \ No newline at end of file
diff --git a/src/onaptests/configuration/status_settings.py b/src/onaptests/configuration/status_settings.py
index 9b15a99..686bb3c 100644
--- a/src/onaptests/configuration/status_settings.py
+++ b/src/onaptests/configuration/status_settings.py
@@ -1,8 +1,95 @@
from .settings import * # pylint: disable=W0614
-""" Specific Status Check settings."""
+""" Specific Status Check """
SERVICE_NAME = "Status Check"
SERVICE_DETAILS = "Checks status of all k8s resources in the selected namespace"
SERVICE_COMPONENTS = "ALL"
STATUS_RESULTS_DIRECTORY = "/tmp"
-STORE_ARTIFACTS = True \ No newline at end of file
+STORE_ARTIFACTS = True
+CHECK_POD_VERSIONS = True
+IGNORE_EMPTY_REPLICAS = False
+STATUS_DETAILS_JSON = "status-details.json"
+
+FULL_LOGS_CONTAINERS = [
+ 'dcae-bootstrap', 'dcae-cloudify-manager', 'aai-resources',
+ 'aai-traversal', 'aai-modelloader', 'sdnc', 'so', 'so-bpmn-infra',
+ 'so-openstack-adapter', 'so-sdc-controller', 'mariadb-galera', 'sdc-be',
+ 'sdc-fe'
+]
+
+# patterns to be excluded from the check
+WAIVER_LIST = ['integration']
+
+SPECIFIC_LOGS_CONTAINERS = {
+ 'sdc-be': ['/var/log/onap/sdc/sdc-be/error.log'],
+ 'sdc-onboarding-be': ['/var/log/onap/sdc/sdc-onboarding-be/error.log'],
+ 'aaf-cm': [
+ '/opt/app/osaaf/logs/cm/cm-service.log',
+ '/opt/app/osaaf/logs/cm/cm-init.log'
+ ],
+ 'aaf-fs': [
+ '/opt/app/osaaf/logs/fs/fs-service.log',
+ '/opt/app/osaaf/logs/fs/fs-init.log'
+ ],
+ 'aaf-locate': [
+ '/opt/app/osaaf/logs/locate/locate-service.log',
+ '/opt/app/osaaf/logs/locate/locate-init.log'
+ ],
+ 'aaf-service': [
+ '/opt/app/osaaf/logs/service/authz-service.log',
+ '/opt/app/osaaf/logs/service/authz-init.log'
+ ],
+ 'sdc-be': [
+ '/var/log/onap/sdc/sdc-be/debug.log',
+ '/var/log/onap/sdc/sdc-be/error.log'
+ ],
+ 'sdc-fe': [
+ '/var/log/onap/sdc/sdc-fe/debug.log',
+ '/var/log/onap/sdc/sdc-fe/error.log'
+ ],
+ 'vid': [
+ '/var/log/onap/vid/audit.log',
+ '/var/log/onap/vid/application.log',
+ '/var/log/onap/vid/debug.log',
+ '/var/log/onap/vid/error.log'
+ ],
+}
+
+DOCKER_REPOSITORIES = [
+ 'nexus3.onap.org:10001', 'docker.elastic.co', 'docker.io', 'library',
+ 'registry.gitlab.com', 'registry.hub.docker.com', 'k8s.gcr.io', 'gcr.io'
+]
+DOCKER_REPOSITORIES_NICKNAMES = {
+ 'nexus3.onap.org:10001': 'onap',
+ 'docker.elastic.co': 'elastic',
+ 'docker.io': 'dockerHub (docker.io)',
+ 'registry.hub.docker.com': 'dockerHub (registry)',
+ 'registry.gitlab.com': 'gitlab',
+ 'library': 'dockerHub (library)',
+ 'default': 'dockerHub',
+ 'k8s.gcr.io': 'google (k8s.gcr)',
+ 'gcr.io': 'google (gcr)'
+}
+
+GENERIC_NAMES = {
+ 'postgreSQL': ['crunchydata/crunchy-postgres', 'postgres'],
+ 'mariadb': ['adfinissygroup/k8s-mariadb-galera-centos', 'mariadb'],
+ 'elasticsearch': [
+ 'bitnami/elasticsearch', 'elasticsearch/elasticsearch',
+ 'onap/clamp-dashboard-elasticsearch'
+ ],
+ 'nginx': ['bitnami/nginx', 'nginx'],
+ 'cassandra': [
+ 'cassandra', 'onap/music/cassandra_3_11', 'onap/music/cassandra_music',
+ 'onap/aaf/aaf_cass'
+ ],
+ 'zookeeper': ['google_samples/k8szk', 'onap/dmaap/zookeeper', 'zookeeper'],
+ 'redis': [
+ 'onap/vfc/db',
+ 'onap/org.onap.dcaegen2.deployments.redis-cluster-container'
+ ],
+ 'consul': ['consul', 'oomk8s/consul'],
+ 'rabbitmq': ['ansible/awx_rabbitmq', 'rabbitmq']
+}
+
+MAX_LOG_BYTES = 512000 \ No newline at end of file
diff --git a/src/onaptests/steps/cloud/check_status.py b/src/onaptests/steps/cloud/check_status.py
index 0bfce35..8b7ac46 100644
--- a/src/onaptests/steps/cloud/check_status.py
+++ b/src/onaptests/steps/cloud/check_status.py
@@ -2,333 +2,394 @@
# -*- coding: utf-8 -*-
# SPDX-License-Identifier: Apache-2.0
import json
-import os
import logging
+import os
import re
-from onapsdk.configuration import settings
-from natural.date import delta
-from xtesting.core import testcase
+from pathlib import Path
+
+from jinja2 import Environment, PackageLoader, select_autoescape
from kubernetes import client, config
from kubernetes.stream import stream
+from natural.date import delta
from urllib3.exceptions import MaxRetryError, NewConnectionError
-from jinja2 import Environment, PackageLoader, select_autoescape
+from xtesting.core import testcase
-from ..base import BaseStep
-from .resources import Pod, Container, Service, Job
-from .resources import Deployment, StatefulSet, DaemonSet, Pvc, ReplicaSet
-from .resources import ConfigMap, Secret, Ingress
+from onapsdk.configuration import settings
from onaptests.utils.exceptions import StatusCheckException
+from ..base import BaseStep
+from .resources import (ConfigMap, Container, DaemonSet, Deployment, Ingress,
+ Job, Pod, Pvc, ReplicaSet, Secret, Service,
+ StatefulSet)
+
NAMESPACE = settings.K8S_ONAP_NAMESPACE
-FULL_LOGS_CONTAINERS = [
- 'dcae-bootstrap', 'dcae-cloudify-manager', 'aai-resources',
- 'aai-traversal', 'aai-modelloader', 'sdnc', 'so', 'so-bpmn-infra',
- 'so-openstack-adapter', 'so-sdc-controller', 'mariadb-galera', 'sdc-be',
- 'sdc-fe'
-]
-
-# patterns to be excluded from the check
-WAIVER_LIST = ['integration']
-
-SPECIFIC_LOGS_CONTAINERS = {
- 'sdc-be': ['/var/log/onap/sdc/sdc-be/error.log'],
- 'sdc-onboarding-be': ['/var/log/onap/sdc/sdc-onboarding-be/error.log'],
- 'aaf-cm': [
- '/opt/app/osaaf/logs/cm/cm-service.log',
- '/opt/app/osaaf/logs/cm/cm-init.log'
- ],
- 'aaf-fs': [
- '/opt/app/osaaf/logs/fs/fs-service.log',
- '/opt/app/osaaf/logs/fs/fs-init.log'
- ],
- 'aaf-locate': [
- '/opt/app/osaaf/logs/locate/locate-service.log',
- '/opt/app/osaaf/logs/locate/locate-init.log'
- ],
- 'aaf-service': [
- '/opt/app/osaaf/logs/service/authz-service.log',
- '/opt/app/osaaf/logs/service/authz-init.log'
- ],
- 'sdc-be': [
- '/var/log/onap/sdc/sdc-be/debug.log',
- '/var/log/onap/sdc/sdc-be/error.log'
- ],
- 'sdc-fe': [
- '/var/log/onap/sdc/sdc-fe/debug.log',
- '/var/log/onap/sdc/sdc-fe/error.log'
- ],
- 'vid': [
- '/var/log/onap/vid/audit.log',
- '/var/log/onap/vid/application.log',
- '/var/log/onap/vid/debug.log',
- '/var/log/onap/vid/error.log'
- ],
-}
-
-DOCKER_REPOSITORIES = [
- 'nexus3.onap.org:10001', 'docker.elastic.co', 'docker.io', 'library',
- 'registry.gitlab.com', 'registry.hub.docker.com', 'k8s.gcr.io', 'gcr.io'
-]
-DOCKER_REPOSITORIES_NICKNAMES = {
- 'nexus3.onap.org:10001': 'onap',
- 'docker.elastic.co': 'elastic',
- 'docker.io': 'dockerHub (docker.io)',
- 'registry.hub.docker.com': 'dockerHub (registry)',
- 'registry.gitlab.com': 'gitlab',
- 'library': 'dockerHub (library)',
- 'default': 'dockerHub',
- 'k8s.gcr.io': 'google (k8s.gcr)',
- 'gcr.io': 'google (gcr)'
-}
-
-GENERIC_NAMES = {
- 'postgreSQL': ['crunchydata/crunchy-postgres', 'postgres'],
- 'mariadb': ['adfinissygroup/k8s-mariadb-galera-centos', 'mariadb'],
- 'elasticsearch': [
- 'bitnami/elasticsearch', 'elasticsearch/elasticsearch',
- 'onap/clamp-dashboard-elasticsearch'
- ],
- 'nginx': ['bitnami/nginx', 'nginx'],
- 'cassandra': [
- 'cassandra', 'onap/music/cassandra_3_11', 'onap/music/cassandra_music',
- 'onap/aaf/aaf_cass'
- ],
- 'zookeeper': ['google_samples/k8szk', 'onap/dmaap/zookeeper', 'zookeeper'],
- 'redis': [
- 'onap/vfc/db',
- 'onap/org.onap.dcaegen2.deployments.redis-cluster-container'
- ],
- 'consul': ['consul', 'oomk8s/consul'],
- 'rabbitmq': ['ansible/awx_rabbitmq', 'rabbitmq']
-}
-
-MAX_LOG_BYTES = 512000
-
-
-class CheckNamespaceStatusStep(BaseStep):
- """Check status of all k8s resources in the selected namespace."""
+
+class CheckK8sResourcesStep(BaseStep):
__logger = logging.getLogger(__name__)
- def __init__(self, cleanup: bool = False,**kwargs):
- """Init CheckNamespaceStatusStep."""
- super().__init__(cleanup=cleanup)
+ def __init__(self, resource_type: str, **kwargs):
+ """Init CheckK8sResourcesStep."""
+ super().__init__(cleanup=False)
+ self.core = client.CoreV1Api()
+ self.batch = client.BatchV1Api()
+ self.app = client.AppsV1Api()
+ self.networking = client.NetworkingV1Api()
if settings.STATUS_RESULTS_DIRECTORY:
self.res_dir = f"{settings.STATUS_RESULTS_DIRECTORY}"
else:
self.res_dir = f"{testcase.TestCase.dir_results}/kubernetes-status"
- if settings.IN_CLUSTER:
- config.load_incluster_config()
- else:
- config.load_kube_config(config_file=settings.K8S_CONFIG)
-
- self.core = client.CoreV1Api()
- self.batch = client.BatchV1Api()
- self.app = client.AppsV1Api()
- self.networking = client.NetworkingV1Api()
+ self.failing = False
+ self.resource_type = resource_type
+ self.k8s_resources = []
+ self.all_resources = []
+ self.failing_resources = []
+ self.jinja_env = Environment(autoescape=select_autoescape(['html']),
+ loader=PackageLoader('onaptests.templates','status'))
- self.__logger.debug("namespace status init started")
- self.pods = []
- self.services = []
- self.jobs = []
- self.deployments = []
- self.replicasets =[]
- self.statefulsets = []
- self.daemonsets = []
- self.pvcs = []
- self.configmaps = []
- self.secrets = []
- self.ingresses = []
- self.details = {}
+ @property
+ def component(self) -> str:
+ """Component name."""
+ return "ALL"
@property
def description(self) -> str:
"""Step description."""
- return "Check status of all k8s resources in the selected namespace."
+ return f"Check status of all k8s {self.resource_type}s in the {NAMESPACE} namespace."
- @property
- def component(self) -> str:
- """Component name."""
- return "ALL"
+ def _init_resources(self):
+ self.__logger.debug(f"Loading all k8s {self.resource_type}s in the {NAMESPACE} namespace")
+
+ def _parse_resources(self):
+ """Parse the resources."""
+ return []
+
+ def execute(self):
+ super().execute()
+ os.makedirs(self.res_dir, exist_ok=True)
+ try:
+ self._init_resources()
+ if len(self.k8s_resources) > 0:
+ self.__logger.info("%4s %ss in the namespace", len(self.k8s_resources), self.resource_type)
+ self._parse_resources()
+ self.__logger.info("%4s %ss parsed, %s failing", len(self.all_resources), self.resource_type,
+ len(self.failing_resources))
+ except (ConnectionRefusedError, MaxRetryError, NewConnectionError):
+ self.__logger.error("Test of k8s %ss failed.", self.resource_type)
+ self.__logger.error("Cannot connect to Kubernetes.")
+
+class CheckBasicK8sResourcesStep(CheckK8sResourcesStep):
+
+ __logger = logging.getLogger(__name__)
+
+ def __init__(self, resource_type: str, k8s_res_class, cleanup: bool = False,**kwargs):
+ """Init CheckBasicK8sResourcesStep."""
+ super().__init__(resource_type)
+ self.k8s_res_class = k8s_res_class
+
+ def _parse_resources(self):
+ """Parse simple k8s resources."""
+ super()._parse_resources()
+ for k8s in self.k8s_resources:
+ resource = self.k8s_res_class(k8s=k8s)
+ self.all_resources.append(resource)
@BaseStep.store_state
def execute(self):
- """Check status of all k8s resources in the selected namespace.
+ super().execute()
- Use settings values:
- - K8S_ONAP_NAMESPACE.
+class CheckK8sConfigMapsStep(CheckBasicK8sResourcesStep):
+
+ __logger = logging.getLogger(__name__)
+
+ def __init__(self, cleanup: bool = False,**kwargs):
+ """Init CheckK8sConfigMapsStep."""
+ super().__init__("configmap", ConfigMap)
+
+ def _init_resources(self):
+ super()._init_resources()
+ self.k8s_resources = self.core.list_namespaced_config_map(NAMESPACE).items
+
+class CheckK8sSecretsStep(CheckBasicK8sResourcesStep):
+
+ __logger = logging.getLogger(__name__)
+
+ def __init__(self, cleanup: bool = False,**kwargs):
+ """Init CheckK8sSecretsStep."""
+ super().__init__("secret", Secret)
+
+ def _init_resources(self):
+ super()._init_resources()
+ self.k8s_resources = self.core.list_namespaced_secret(NAMESPACE).items
+
+class CheckK8sIngressesStep(CheckBasicK8sResourcesStep):
+
+ __logger = logging.getLogger(__name__)
+
+ def __init__(self, cleanup: bool = False,**kwargs):
+ """Init CheckK8sIngressesStep."""
+ super().__init__("ingress", Ingress)
+
+ def _init_resources(self):
+ super()._init_resources()
+ self.k8s_resources = self.networking.list_namespaced_ingress(NAMESPACE).items
+
+class CheckK8sPvcsStep(CheckK8sResourcesStep):
+
+ __logger = logging.getLogger(__name__)
+
+ def __init__(self, cleanup: bool = False,**kwargs):
+ """Init CheckK8sPvcsStep."""
+ super().__init__("pvc")
+
+ def _init_resources(self):
+ super()._init_resources()
+ self.k8s_resources = self.core.list_namespaced_persistent_volume_claim(NAMESPACE).items
+ def _parse_resources(self):
+ """Parse the jobs.
+ Return a list of Pods that were created to perform jobs.
"""
+ super()._parse_resources()
+ jobs_pods = []
+ for k8s in self.k8s_resources:
+ pvc = Pvc(k8s=k8s)
+ field_selector = f"involvedObject.name={pvc.name},involvedObject.kind=PersistentVolumeClaim"
+ pvc.events = self.core.list_namespaced_event(
+ NAMESPACE,
+ field_selector=field_selector).items
+
+ if k8s.status.phase != "Bound":
+ self.__logger.warning("a PVC is in error: {}".format(pvc.name))
+ self.failing_resources.append(pvc)
+ self.failing = True
+ self.all_resources.append(pvc)
+
+ @BaseStep.store_state
+ def execute(self):
super().execute()
- os.makedirs(self.res_dir, exist_ok=True)
- self.__logger.debug("start test")
- try:
- self.k8s_pods = self.core.list_namespaced_pod(NAMESPACE).items
- self.__logger.info("%4s Pods in the namespace", len(self.k8s_pods))
-
- self.k8s_jobs = self.batch.list_namespaced_job(NAMESPACE).items
- self.__logger.info("%4s Jobs in the namespace", len(self.k8s_jobs))
-
- self.k8s_deployments = self.app.list_namespaced_deployment(
- NAMESPACE).items
- self.__logger.info("%4s Deployments in the namespace",
- len(self.k8s_deployments))
-
- self.k8s_replicasets = self.app.list_namespaced_replica_set(
- NAMESPACE).items
- self.__logger.info("%4s Replicasets in the namespace",
- len(self.k8s_replicasets))
-
- self.k8s_statefulsets = self.app.list_namespaced_stateful_set(
- NAMESPACE).items
- self.__logger.info("%4s StatefulSets in the namespace",
- len(self.k8s_statefulsets))
-
- self.k8s_daemonsets = self.app.list_namespaced_daemon_set(
- NAMESPACE).items
- self.__logger.info("%4s DaemonSets in the namespace",
- len(self.k8s_daemonsets))
-
- self.k8s_services = self.core.list_namespaced_service(
- NAMESPACE).items
- self.__logger.info("%4s Services in the namespace",
- len(self.k8s_services))
-
- self.k8s_pvcs = self.core.list_namespaced_persistent_volume_claim(
- NAMESPACE).items
- self.__logger.info("%4s PVCs in the namespace", len(self.pvcs))
-
- self.k8s_configmaps = self.core.list_namespaced_config_map(
- NAMESPACE).items
- self.__logger.info("%4s ConfigMaps in the namespace",
- len(self.configmaps))
-
- self.k8s_secrets = self.core.list_namespaced_secret(
- NAMESPACE).items
- self.__logger.info("%4s Secrets in the namespace",
- len(self.secrets))
-
- self.k8s_ingresses = self.networking.list_namespaced_ingress(
- NAMESPACE).items
- self.__logger.info("%4s Ingresses in the namespace",
- len(self.ingresses))
- except (ConnectionRefusedError, MaxRetryError, NewConnectionError):
- self.__logger.error("namespace status test failed.")
- self.__logger.error("cannot connect to Kubernetes.")
- return testcase.TestCase.EX_TESTCASE_FAILED
-
- self.failing_statefulsets = []
- self.failing_jobs = []
- self.failing_deployments = []
- self.failing_replicasets = []
- self.failing_daemonsets = []
- self.failing_pvcs = []
- self.failing = False
- self.jinja_env = Environment(autoescape=select_autoescape(['html']),
- loader=PackageLoader('onaptests.templates','status'))
- self.parse_services()
- jobs_pods = self.parse_jobs()
- self.parse_pods(excluded_pods=jobs_pods)
- self.parse_deployments()
- self.parse_replicasets()
- self.parse_statefulsets()
- self.parse_daemonsets()
- self.parse_pvcs()
- self.parse_configmaps()
- self.parse_secrets()
- self.parse_ingresses()
- self.parse_versions()
- self.jinja_env.get_template('index.html.j2').stream(
- ns=self,
- delta=delta).dump('{}/index.html'.format(self.res_dir))
- self.jinja_env.get_template('raw_output.txt.j2').stream(
- ns=self, namespace=NAMESPACE).dump('{}/onap-k8s.log'.format(
- self.res_dir))
+class CheckK8sResourcesUsingPodsStep(CheckK8sResourcesStep):
- if len(self.jobs) > 0:
- self.details['jobs'] = {
- 'number': len(self.jobs),
- 'number_failing': len(self.failing_jobs),
- 'failing': self.map_by_name(self.failing_jobs)
- }
- if len(self.deployments) > 0:
- self.details['deployments'] = {
- 'number': len(self.deployments),
- 'number_failing': len(self.failing_deployments),
- 'failing': self.map_by_name(self.failing_deployments)
- }
- if len(self.replicasets) > 0:
- self.details['replicasets'] = {
- 'number': len(self.replicasets),
- 'number_failing': len(self.failing_replicasets),
- 'failing': self.map_by_name(self.failing_replicasets)
- }
- if len(self.statefulsets) > 0:
- self.details['statefulsets'] = {
- 'number': len(self.statefulsets),
- 'number_failing': len(self.failing_statefulsets),
- 'failing': self.map_by_name(self.failing_statefulsets)
- }
- if len(self.daemonsets) > 0:
- self.details['daemonsets'] = {
- 'number': len(self.daemonsets),
- 'number_failing': len(self.failing_daemonsets),
- 'failing': self.map_by_name(self.failing_daemonsets)
- }
- if len(self.pvcs) > 0:
- self.details['pvcs'] = {
- 'number': len(self.pvcs),
- 'number_failing': len(self.failing_pvcs),
- 'failing': self.map_by_name(self.failing_pvcs)
- }
- if self.failing:
- self.__logger.error("namespace status test failed.")
- self.__logger.error("number of errored Jobs: %s",
- len(self.failing_jobs))
- self.__logger.error("number of errored Deployments: %s",
- len(self.failing_deployments))
- self.__logger.error("number of errored Replicasets: %s",
- len(self.failing_replicasets))
- self.__logger.error("number of errored StatefulSets: %s",
- len(self.failing_statefulsets))
- self.__logger.error("number of errored DaemonSets: %s",
- len(self.failing_daemonsets))
- self.__logger.error("number of errored PVCs: %s",
- len(self.failing_pvcs))
- raise StatusCheckException
+ __logger = logging.getLogger(__name__)
+
+ def __init__(self, resource_type: str, pods_source, cleanup: bool = False,**kwargs):
+ """Init CheckK8sResourcesUsingPodsStep."""
+ super().__init__(resource_type)
+ self.pods_source = pods_source
+
+ def _get_used_pods(self):
+ pods = []
+ if self.pods_source is not None:
+ pods = self.pods_source.all_resources
+ return pods
+
+ def _find_child_pods(self, selector):
+ pods_used = self._get_used_pods()
+ pods_list = []
+ failed_pods = 0
+ if selector:
+ raw_selector = ''
+ for key, value in selector.items():
+ raw_selector += key + '=' + value + ','
+ raw_selector = raw_selector[:-1]
+ pods = self.core.list_namespaced_pod(
+ NAMESPACE, label_selector=raw_selector).items
+ for pod in pods:
+ for known_pod in pods_used:
+ if known_pod.name == pod.metadata.name:
+ pods_list.append(known_pod)
+ if not known_pod.ready():
+ failed_pods += 1
+ return (pods_list, failed_pods)
+
+ @BaseStep.store_state
+ def execute(self):
+ super().execute()
+
+class CheckK8sJobsStep(CheckK8sResourcesUsingPodsStep):
+
+ __logger = logging.getLogger(__name__)
+
+ def __init__(self, cleanup: bool = False,**kwargs):
+ """Init CheckK8sJobsStep."""
+ super().__init__("job", None)
+
+ def _init_resources(self):
+ super()._init_resources()
+ self.k8s_resources = self.batch.list_namespaced_job(NAMESPACE).items
+
+ def _parse_resources(self):
+ """Parse the jobs.
+ Return a list of Pods that were created to perform jobs.
+ """
+ super()._parse_resources()
+ jobs_pods = []
+ for k8s in self.k8s_resources:
+ job = Job(k8s=k8s)
+ job_pods = []
+
+ if k8s.spec.selector and k8s.spec.selector.match_labels:
+ (job.pods, job.failed_pods) = self._find_child_pods(
+ k8s.spec.selector.match_labels)
+ job_pods += job.pods
+ field_selector = "involvedObject.name={}".format(job.name)
+ field_selector += ",involvedObject.kind=Job"
+ job.events = self.core.list_namespaced_event(
+ NAMESPACE,
+ field_selector=field_selector).items
+
+ self.jinja_env.get_template('job.html.j2').stream(job=job).dump(
+ '{}/job-{}.html'.format(self.res_dir, job.name))
+
+ # timemout job
+ if not k8s.status.completion_time:
+ self.__logger.warning("a Job is in error: {}".format(job.name))
+ if any(
+ waiver_elt not in job.name for waiver_elt in settings.WAIVER_LIST):
+ self.failing_resources.append(job)
+ self.failing = True
+ # completed job
+ if any(waiver_elt not in job.name for waiver_elt in settings.WAIVER_LIST):
+ self.all_resources.append(job)
+ jobs_pods += job_pods
+
+class CheckK8sPodsStep(CheckK8sResourcesUsingPodsStep):
+
+ __logger = logging.getLogger(__name__)
+
+ def __init__(self, pods, cleanup: bool = False,**kwargs):
+ """Init CheckK8sPodsStep."""
+ super().__init__("pod", pods)
- def parse_pods(self, excluded_pods=None):
- """Parse the pods status."""
- self.__logger.info("%4s pods to parse", len(self.k8s_pods))
- for k8s in self.k8s_pods:
+ def _init_resources(self):
+ super()._init_resources()
+ self.k8s_resources = self.core.list_namespaced_pod(NAMESPACE).items
+
+ def _parse_resources(self):
+ """Parse the pods."""
+ super()._parse_resources()
+ excluded_pods = self._get_used_pods()
+ pod_versions = []
+ containers = {}
+ for k8s in self.k8s_resources:
pod = Pod(k8s=k8s)
+ # check version firstly
+ if settings.CHECK_POD_VERSIONS:
+ pod_component = k8s.metadata.name
+ if 'app' in k8s.metadata.labels:
+ pod_component = k8s.metadata.labels['app']
+ else:
+ if 'app.kubernetes.io/name' in k8s.metadata.labels:
+ pod_component = k8s.metadata.labels[
+ 'app.kubernetes.io/name']
+ else:
+ self.__logger.error("pod %s has no 'app' or 'app.kubernetes.io/name' in metadata: %s", pod_component, k8s.metadata.labels)
+
+ ## looks for docker version
+ for container in k8s.spec.containers:
+ pod_version = {}
+ pod_container_version = container.image.rsplit(":", 1)
+ pod_container_image = pod_container_version[0]
+ pod_container_tag = "latest"
+ if len(pod_container_version) > 1:
+ pod_container_tag = pod_container_version[1]
+
+ pod_version.update({
+ 'container': container.name,
+ 'component': pod_component,
+ 'image': pod_container_image,
+ 'version': pod_container_tag
+ })
+ pod_versions.append(pod_version)
+
+ search_rule = "^(?P<source>[^/]*)/*(?P<container>[^:]*):*(?P<version>.*)$"
+ search = re.search(search_rule, container.image)
+ name = "{}/{}".format(search.group('source'),
+ search.group('container'))
+ version = search.group('version')
+ if name[-1] == '/':
+ name = name[0:-1]
+ source = "default"
+ if search.group('source') in settings.DOCKER_REPOSITORIES:
+ source = search.group('source')
+ name = search.group('container')
+ container_search_rule = "^library/(?P<real_container>[^:]*)$"
+ container_search = re.search(container_search_rule, name)
+ if container_search:
+ name = container_search.group('real_container')
+ for common_component in settings.GENERIC_NAMES.keys():
+ if name in settings.GENERIC_NAMES[common_component]:
+ version = "{}:{}".format(name, version)
+ name = common_component
+ break
+
+ repository = settings.DOCKER_REPOSITORIES_NICKNAMES[source]
+ if name in containers:
+ if version in containers[name]['versions']:
+ if not (pod_component in containers[name]['versions']
+ [version]['components']):
+ containers[name]['versions'][version][
+ 'components'].append(pod_component)
+ containers[name]['number_components'] += 1
+ if not (repository in containers[name]['versions']
+ [version]['repositories']):
+ containers[name]['versions'][version][
+ 'repositories'].append(repository)
+ else:
+ containers[name]['versions'][version] = {
+ 'repositories': [repository],
+ 'components': [pod_component]
+ }
+ containers[name]['number_components'] += 1
+ else:
+ containers[name] = {
+ 'versions': {
+ version: {
+ 'repositories': [repository],
+ 'components': [pod_component]
+ }
+ },
+ 'number_components': 1
+ }
+ # pod version check end
if excluded_pods and pod in excluded_pods:
continue
if k8s.status.init_container_statuses:
for k8s_container in k8s.status.init_container_statuses:
- pod.runned_init_containers += self.parse_container(
+ pod.runned_init_containers += self._parse_container(
pod, k8s_container, init=True)
if k8s.status.container_statuses:
for k8s_container in k8s.status.container_statuses:
- pod.running_containers += self.parse_container(
+ pod.running_containers += self._parse_container(
pod, k8s_container)
pod.events = self.core.list_namespaced_event(
NAMESPACE,
field_selector="involvedObject.name={}".format(pod.name)).items
self.jinja_env.get_template('pod.html.j2').stream(pod=pod).dump(
'{}/pod-{}.html'.format(self.res_dir, pod.name))
- if any(waiver_elt in pod.name for waiver_elt in WAIVER_LIST):
+ if any(waiver_elt in pod.name for waiver_elt in settings.WAIVER_LIST):
self.__logger.warn("Waiver pattern found in pod, exclude %s", pod.name)
else:
- self.pods.append(pod)
-
- def parse_container(self, pod, k8s_container, init=False):
+ self.all_resources.append(pod)
+
+ if settings.CHECK_POD_VERSIONS:
+ self.jinja_env.get_template('version.html.j2').stream(
+ pod_versions=pod_versions).dump('{}/versions.html'.format(
+ self.res_dir))
+ self.jinja_env.get_template('container_versions.html.j2').stream(
+ containers=containers).dump('{}/container_versions.html'.format(
+ self.res_dir))
+ # create a json file for version tracking
+ with open(self.res_dir + "/onap_versions.json", "w") as write_file:
+ json.dump(pod_versions, write_file)
+
+ def _parse_container(self, pod, k8s_container, init=False):
"""Get the logs of a container."""
logs = ""
old_logs = ""
@@ -358,7 +419,7 @@ class CheckNamespaceStatusStep(BaseStep):
pod.name,
NAMESPACE,
container=container.name,
- limit_bytes=MAX_LOG_BYTES,
+ limit_bytes=settings.MAX_LOG_BYTES,
)
except UnicodeDecodeError:
logs= "{0} has an unicode decode error...".format(pod.name)
@@ -382,7 +443,7 @@ class CheckNamespaceStatusStep(BaseStep):
container.name),
'w') as log_result:
log_result.write(old_logs)
- if (container.name in FULL_LOGS_CONTAINERS):
+ if (container.name in settings.FULL_LOGS_CONTAINERS):
logs = self.core.read_namespaced_pod_log(
pod.name, NAMESPACE, container=container.name)
with open(
@@ -390,8 +451,8 @@ class CheckNamespaceStatusStep(BaseStep):
pod.name, container.name),
'w') as log_result:
log_result.write(logs)
- if (container.name in SPECIFIC_LOGS_CONTAINERS):
- for log_file in SPECIFIC_LOGS_CONTAINERS[container.name]:
+ if (container.name in settings.SPECIFIC_LOGS_CONTAINERS):
+ for log_file in settings.SPECIFIC_LOGS_CONTAINERS[container.name]:
exec_command = ['/bin/sh', '-c', "cat {}".format(log_file)]
log_files[log_file] = stream(
self.core.connect_get_namespaced_pod_exec,
@@ -420,7 +481,7 @@ class CheckNamespaceStatusStep(BaseStep):
old_logs=old_logs,
log_files=log_files).dump('{}/pod-{}-{}-logs.html'.format(
self.res_dir, pod.name, container.name))
- if any(waiver_elt in container.name for waiver_elt in WAIVER_LIST):
+ if any(waiver_elt in container.name for waiver_elt in settings.WAIVER_LIST):
self.__logger.warn(
"Waiver pattern found in container, exclude %s", container.name)
else:
@@ -429,10 +490,22 @@ class CheckNamespaceStatusStep(BaseStep):
return 1
return 0
- def parse_services(self):
+class CheckK8sServicesStep(CheckK8sResourcesUsingPodsStep):
+
+ __logger = logging.getLogger(__name__)
+
+ def __init__(self, pods, cleanup: bool = False,**kwargs):
+ """Init CheckK8sServicesStep."""
+ super().__init__("service", pods)
+
+ def _init_resources(self):
+ super()._init_resources()
+ self.k8s_resources = self.core.list_namespaced_service(NAMESPACE).items
+
+ def _parse_resources(self):
"""Parse the services."""
- self.__logger.info("%4s services to parse", len(self.k8s_services))
- for k8s in self.k8s_services:
+ super()._parse_resources()
+ for k8s in self.k8s_resources:
service = Service(k8s=k8s)
(service.pods,
@@ -441,53 +514,28 @@ class CheckNamespaceStatusStep(BaseStep):
self.jinja_env.get_template('service.html.j2').stream(
service=service).dump('{}/service-{}.html'.format(
self.res_dir, service.name))
- self.services.append(service)
+ self.all_resources.append(service)
- def parse_jobs(self):
- """Parse the jobs.
- Return a list of Pods that were created to perform jobs.
- """
- self.__logger.info("%4s jobs to parse", len(self.k8s_jobs))
- jobs_pods = []
- for i in range(len(self.k8s_jobs)):
- k8s = self.k8s_jobs[i]
- job = Job(k8s=k8s)
- job_pods = []
+class CheckK8sDeploymentsStep(CheckK8sResourcesUsingPodsStep):
- if k8s.spec.selector and k8s.spec.selector.match_labels:
- (job.pods, job.failed_pods) = self._find_child_pods(
- k8s.spec.selector.match_labels)
- job_pods += job.pods
- field_selector = "involvedObject.name={}".format(job.name)
- field_selector += ",involvedObject.kind=Job"
- job.events = self.core.list_namespaced_event(
- NAMESPACE,
- field_selector=field_selector).items
+ __logger = logging.getLogger(__name__)
- self.jinja_env.get_template('job.html.j2').stream(job=job).dump(
- '{}/job-{}.html'.format(self.res_dir, job.name))
+ def __init__(self, pods, cleanup: bool = False,**kwargs):
+ """Init CheckK8sDeploymentsStep."""
+ super().__init__("deployment", pods)
- # timemout job
- if not k8s.status.completion_time:
- self.__logger.warning("a Job is in error: {}".format(job.name))
- if any(
- waiver_elt not in job.name for waiver_elt in WAIVER_LIST):
- self.failing_jobs.append(job)
- self.failing = True
- # completed job
- if any(waiver_elt not in job.name for waiver_elt in WAIVER_LIST):
- self.jobs.append(job)
- jobs_pods += job_pods
- return jobs_pods
+ def _init_resources(self):
+ super()._init_resources()
+ self.k8s_resources = self.app.list_namespaced_deployment(NAMESPACE).items
- def parse_deployments(self):
+ def _parse_resources(self):
"""Parse the deployments."""
- self.__logger.info("%4s deployments to parse",
- len(self.k8s_deployments))
- for i in range(len(self.k8s_deployments)):
- k8s = self.k8s_deployments[i]
+ super()._parse_resources()
+ for k8s in self.k8s_resources:
deployment = Deployment(k8s=k8s)
+ if settings.IGNORE_EMPTY_REPLICAS and k8s.spec.replicas == 0:
+ continue
if k8s.spec.selector and k8s.spec.selector.match_labels:
(deployment.pods,
deployment.failed_pods) = self._find_child_pods(
@@ -504,19 +552,32 @@ class CheckNamespaceStatusStep(BaseStep):
if k8s.status.unavailable_replicas:
self.__logger.warning("a Deployment is in error: {}".format(deployment.name))
- self.failing_deployments.append(deployment)
+ self.failing_resources.append(deployment)
self.failing = True
- self.deployments.append(deployment)
+ self.all_resources.append(deployment)
+
+class CheckK8sResplicaSetsStep(CheckK8sResourcesUsingPodsStep):
- def parse_replicasets(self):
+ __logger = logging.getLogger(__name__)
+
+ def __init__(self, pods, cleanup: bool = False,**kwargs):
+ """Init CheckK8sResplicaSetsStep."""
+ super().__init__("replicaset", pods)
+
+ def _init_resources(self):
+ super()._init_resources()
+ self.k8s_resources = self.app.list_namespaced_replica_set(NAMESPACE).items
+
+ def _parse_resources(self):
"""Parse the replicasets."""
- self.__logger.info("%4s replicasets to parse",
- len(self.k8s_replicasets))
- for i in range(len(self.k8s_replicasets)):
- k8s = self.k8s_replicasets[i]
+ super()._parse_resources()
+ for k8s in self.k8s_resources:
replicaset = ReplicaSet(k8s=k8s)
+ if settings.IGNORE_EMPTY_REPLICAS and k8s.spec.replicas == 0:
+ continue
+
if k8s.spec.selector and k8s.spec.selector.match_labels:
(replicaset.pods,
replicaset.failed_pods) = self._find_child_pods(
@@ -534,19 +595,32 @@ class CheckNamespaceStatusStep(BaseStep):
if (not k8s.status.ready_replicas
or (k8s.status.ready_replicas < k8s.status.replicas)):
self.__logger.warning("a ReplicaSet is in error: {}".format(replicaset.name))
- self.failing_replicasets.append(replicaset)
+ self.failing_resources.append(replicaset)
self.failing = True
- self.replicasets.append(replicaset)
+ self.all_resources.append(replicaset)
+
+class CheckK8sStatefulSetsStep(CheckK8sResourcesUsingPodsStep):
+
+ __logger = logging.getLogger(__name__)
+
+ def __init__(self, pods, cleanup: bool = False,**kwargs):
+ """Init CheckK8sStatefulSetsStep."""
+ super().__init__("statefulset", pods)
- def parse_statefulsets(self):
+ def _init_resources(self):
+ super()._init_resources()
+ self.k8s_resources = self.app.list_namespaced_stateful_set(NAMESPACE).items
+
+ def _parse_resources(self):
"""Parse the statefulsets."""
- self.__logger.info("%4s statefulsets to parse",
- len(self.k8s_statefulsets))
- for i in range(len(self.k8s_statefulsets)):
- k8s = self.k8s_statefulsets[i]
+ super()._parse_resources()
+ for k8s in self.k8s_resources:
statefulset = StatefulSet(k8s=k8s)
+ if settings.IGNORE_EMPTY_REPLICAS and k8s.spec.replicas == 0:
+ continue
+
if k8s.spec.selector and k8s.spec.selector.match_labels:
(statefulset.pods,
statefulset.failed_pods) = self._find_child_pods(
@@ -564,18 +638,32 @@ class CheckNamespaceStatusStep(BaseStep):
if ((not k8s.status.ready_replicas)
or (k8s.status.ready_replicas < k8s.status.replicas)):
self.__logger.warning("a StatefulSet is in error: {}".format(statefulset.name))
- self.failing_statefulsets.append(statefulset)
+ self.failing_resources.append(statefulset)
self.failing = True
- self.statefulsets.append(statefulset)
+ self.all_resources.append(statefulset)
+
+class CheckK8sDaemonSetsStep(CheckK8sResourcesUsingPodsStep):
- def parse_daemonsets(self):
+ __logger = logging.getLogger(__name__)
+
+ def __init__(self, pods, cleanup: bool = False,**kwargs):
+ """Init CheckK8sDaemonSetsStep."""
+ super().__init__("daemonset", pods)
+
+ def _init_resources(self):
+ super()._init_resources()
+ self.k8s_resources = self.app.list_namespaced_daemon_set(NAMESPACE).items
+
+ def _parse_resources(self):
"""Parse the daemonsets."""
- self.__logger.info("%4s daemonsets to parse", len(self.k8s_daemonsets))
- for i in range(len(self.k8s_daemonsets)):
- k8s = self.k8s_daemonsets[i]
+ super()._parse_resources()
+ for k8s in self.k8s_resources:
daemonset = DaemonSet(k8s=k8s)
+ if settings.IGNORE_EMPTY_REPLICAS and k8s.spec.replicas == 0:
+ continue
+
if k8s.spec.selector and k8s.spec.selector.match_labels:
(daemonset.pods,
daemonset.failed_pods) = self._find_child_pods(
@@ -592,160 +680,110 @@ class CheckNamespaceStatusStep(BaseStep):
if (k8s.status.number_ready < k8s.status.desired_number_scheduled):
self.__logger.warning("a DaemonSet is in error: {}".format(daemonset.name))
- self.failing_daemonsets.append(daemonset)
+ self.failing_resources.append(daemonset)
self.failing = True
- self.daemonsets.append(daemonset)
+ self.all_resources.append(daemonset)
- def parse_pvcs(self):
- """Parse the persistent volume claims."""
- self.__logger.info("%4s pvcs to parse", len(self.k8s_pvcs))
- for k8s in self.k8s_pvcs:
- pvc = Pvc(k8s=k8s)
- field_selector = f"involvedObject.name={pvc.name},involvedObject.kind=PersistentVolumeClaim"
- pvc.events = self.core.list_namespaced_event(
- NAMESPACE,
- field_selector=field_selector).items
+class CheckNamespaceStatusStep(CheckK8sResourcesStep):
+ """Check status of all k8s resources in the selected namespace."""
- if k8s.status.phase != "Bound":
- self.__logger.warning("a PVC is in error: {}".format(pvc.name))
- self.failing_pvcs.append(pvc)
- self.failing = True
+ __logger = logging.getLogger(__name__)
- self.pvcs.append(pvc)
-
- def parse_configmaps(self):
- """Parse the config maps."""
- self.__logger.info("%4s config maps to parse",
- len(self.k8s_configmaps))
- for k8s in self.k8s_configmaps:
- configmap = ConfigMap(k8s=k8s)
- self.configmaps.append(configmap)
-
- def parse_secrets(self):
- """Parse the secrets."""
- self.__logger.info("%4s secrets to parse", len(self.k8s_secrets))
- for k8s in self.k8s_secrets:
- secret = Secret(k8s=k8s)
- self.secrets.append(secret)
-
- def parse_ingresses(self):
- """Parse the ingresses."""
- self.__logger.info("%4s ingresses to parse", len(self.k8s_ingresses))
- for k8s in self.k8s_secrets:
- ingress = Ingress(k8s=k8s)
- self.ingresses.append(ingress)
-
- def parse_versions(self):
- """Parse the versions of the pods."""
- self.__logger.info("%4s pods to parse", len(self.k8s_pods))
- pod_versions = []
- containers = {}
- for pod in self.k8s_pods:
- pod_component = pod.metadata.name
- if 'app' in pod.metadata.labels:
- pod_component = pod.metadata.labels['app']
- else:
- if 'app.kubernetes.io/name' in pod.metadata.labels:
- pod_component = pod.metadata.labels[
- 'app.kubernetes.io/name']
- else:
- self.__logger.error("pod %s has no 'app' or 'app.kubernetes.io/name' in metadata: %s", pod_component, pod.metadata.labels)
-
- # looks for docker version
- for container in pod.spec.containers:
- pod_version = {}
- pod_container_version = container.image.rsplit(":", 1)
- pod_container_image = pod_container_version[0]
- pod_container_tag = "latest"
- if len(pod_container_version) > 1:
- pod_container_tag = pod_container_version[1]
-
- pod_version.update({
- 'container': container.name,
- 'component': pod_component,
- 'image': pod_container_image,
- 'version': pod_container_tag
- })
- pod_versions.append(pod_version)
-
- search_rule = "^(?P<source>[^/]*)/*(?P<container>[^:]*):*(?P<version>.*)$"
- search = re.search(search_rule, container.image)
- name = "{}/{}".format(search.group('source'),
- search.group('container'))
- version = search.group('version')
- if name[-1] == '/':
- name = name[0:-1]
- source = "default"
- if search.group('source') in DOCKER_REPOSITORIES:
- source = search.group('source')
- name = search.group('container')
- container_search_rule = "^library/(?P<real_container>[^:]*)$"
- container_search = re.search(container_search_rule, name)
- if container_search:
- name = container_search.group('real_container')
- for common_component in GENERIC_NAMES.keys():
- if name in GENERIC_NAMES[common_component]:
- version = "{}:{}".format(name, version)
- name = common_component
- break
-
- repository = DOCKER_REPOSITORIES_NICKNAMES[source]
- if name in containers:
- if version in containers[name]['versions']:
- if not (pod_component in containers[name]['versions']
- [version]['components']):
- containers[name]['versions'][version][
- 'components'].append(pod_component)
- containers[name]['number_components'] += 1
- if not (repository in containers[name]['versions']
- [version]['repositories']):
- containers[name]['versions'][version][
- 'repositories'].append(repository)
- else:
- containers[name]['versions'][version] = {
- 'repositories': [repository],
- 'components': [pod_component]
- }
- containers[name]['number_components'] += 1
- else:
- containers[name] = {
- 'versions': {
- version: {
- 'repositories': [repository],
- 'components': [pod_component]
- }
- },
- 'number_components': 1
- }
+ def __init__(self, cleanup: bool = False, **kwargs):
+ """Init CheckNamespaceStatusStep."""
+ super().__init__("")
+ self.__logger.debug("%s namespace status test init started", NAMESPACE)
+ if settings.IN_CLUSTER:
+ config.load_incluster_config()
+ else:
+ config.load_kube_config(config_file=settings.K8S_CONFIG)
- self.jinja_env.get_template('version.html.j2').stream(
- pod_versions=pod_versions).dump('{}/versions.html'.format(
- self.res_dir))
- self.jinja_env.get_template('container_versions.html.j2').stream(
- containers=containers).dump('{}/container_versions.html'.format(
+ self.job_list_step = CheckK8sJobsStep()
+ self.pod_list_step = CheckK8sPodsStep(self.job_list_step)
+ self.service_list_step = CheckK8sServicesStep(self.pod_list_step)
+ self.deployment_list_step = CheckK8sDeploymentsStep(self.pod_list_step)
+ self.replicaset_list_step = CheckK8sResplicaSetsStep(self.pod_list_step)
+ self.statefulset_list_step = CheckK8sStatefulSetsStep(self.pod_list_step)
+ self.daemonset_list_step = CheckK8sDaemonSetsStep(self.pod_list_step)
+ self.configmap_list_step = CheckK8sConfigMapsStep()
+ self.secret_list_step = CheckK8sSecretsStep()
+ self.ingress_list_step = CheckK8sIngressesStep()
+ self.pvc_list_step = CheckK8sPvcsStep()
+ self.add_step(self.job_list_step)
+ self.add_step(self.pod_list_step)
+ self.add_step(self.service_list_step)
+ self.add_step(self.deployment_list_step)
+ self.add_step(self.replicaset_list_step)
+ self.add_step(self.statefulset_list_step)
+ self.add_step(self.daemonset_list_step)
+ self.add_step(self.configmap_list_step)
+ self.add_step(self.secret_list_step)
+ self.add_step(self.ingress_list_step)
+ self.add_step(self.pvc_list_step)
+
+ @property
+ def description(self) -> str:
+ """Step description."""
+ return "Check status of all k8s resources in the selected namespace."
+
+ @property
+ def component(self) -> str:
+ """Component name."""
+ return "ALL"
+
+ @BaseStep.store_state
+ def execute(self):
+ """Check status of all k8s resources in the selected namespace.
+
+ Use settings values:
+ - K8S_ONAP_NAMESPACE
+ - STATUS_RESULTS_DIRECTORY
+ - STORE_ARTIFACTS
+ - CHECK_POD_VERSIONS
+ """
+ super().execute()
+
+ self.pods = self.pod_list_step.all_resources
+ self.services = self.service_list_step.all_resources
+ self.jobs = self.job_list_step.all_resources
+ self.deployments = self.deployment_list_step.all_resources
+ self.replicasets = self.replicaset_list_step.all_resources
+ self.statefulsets = self.statefulset_list_step.all_resources
+ self.daemonsets = self.daemonset_list_step.all_resources
+ self.pvcs = self.pvc_list_step.all_resources
+ self.configmaps = self.configmap_list_step.all_resources
+ self.secrets = self.secret_list_step.all_resources
+ self.ingresses = self.ingress_list_step.all_resources
+
+ self.failing_statefulsets = self.statefulset_list_step.failing_resources
+ self.failing_jobs = self.job_list_step.failing_resources
+ self.failing_deployments = self.deployment_list_step.failing_resources
+ self.failing_replicasets = self.replicaset_list_step.failing_resources
+ self.failing_daemonsets = self.daemonset_list_step.failing_resources
+ self.failing_pvcs = self.pvc_list_step.failing_resources
+
+ self.jinja_env.get_template('index.html.j2').stream(
+ ns=self,
+ delta=delta).dump('{}/index.html'.format(self.res_dir))
+ self.jinja_env.get_template('raw_output.txt.j2').stream(
+ ns=self, namespace=NAMESPACE).dump('{}/onap-k8s.log'.format(
self.res_dir))
- # create a json file for version tracking
- with open(self.res_dir + "/onap_versions.json", "w") as write_file:
- json.dump(pod_versions, write_file)
- def _find_child_pods(self, selector):
- pods_list = []
- failed_pods = 0
- if selector:
- raw_selector = ''
- for key, value in selector.items():
- raw_selector += key + '=' + value + ','
- raw_selector = raw_selector[:-1]
- pods = self.core.list_namespaced_pod(
- NAMESPACE, label_selector=raw_selector).items
- for pod in pods:
- for known_pod in self.pods:
- if known_pod.name == pod.metadata.name:
- pods_list.append(known_pod)
- if not known_pod.ready():
- failed_pods += 1
- return (pods_list, failed_pods)
+ details = {}
+ for step in self._steps:
+ if step.failing:
+ self.failing = True
+ self.__logger.info("%s failing: %s", step.resource_type, len(step.failing_resources))
+ details[step.resource_type] = {
+ 'number_all': len(step.all_resources),
+ 'number_failing': len(step.failing_resources),
+ 'failing': self.map_by_name(step.failing_resources)
+ }
+ with (Path(self.res_dir).joinpath(settings.STATUS_DETAILS_JSON)).open('w') as file:
+ json.dump(details, file, indent=4)
+ if self.failing:
+ raise StatusCheckException
def map_by_name(self, resources):
return list(map(lambda resource: resource.name, resources))
diff --git a/src/onaptests/templates/status/index.html.j2 b/src/onaptests/templates/status/index.html.j2
index fe49abf..d85c5bb 100644
--- a/src/onaptests/templates/status/index.html.j2
+++ b/src/onaptests/templates/status/index.html.j2
@@ -297,7 +297,23 @@
{% endif %}
{% if (ns.ingresses | length) > 0 %}
- <div id="ingresses"></div>
+ <!-- Ingresses table -->
+ <div id="ingresses" class="table-container">
+ <table class="table is-fullwidth is-striped is-hoverable">
+ <thead>
+ <tr>
+ <th>Name</th>
+ </tr>
+ </thead>
+ <tbody>
+ {% for ingress in ns.ingresses %}
+ <tr>
+ <td>{{ ingress.name }}</td>
+ </tr>
+ {% endfor %}
+ </tbody>
+ </table>
+ </div>
{% endif %}
</div>