diff options
Diffstat (limited to 'kubernetes/readiness')
-rw-r--r-- | kubernetes/readiness/.gitignore | 15 | ||||
-rw-r--r-- | kubernetes/readiness/dep-health-init.yaml | 47 | ||||
-rw-r--r-- | kubernetes/readiness/pom.xml | 109 | ||||
-rw-r--r-- | kubernetes/readiness/src/main/docker/Dockerfile | 17 | ||||
-rw-r--r-- | kubernetes/readiness/src/main/scripts/job_complete.py | 108 | ||||
-rw-r--r-- | kubernetes/readiness/src/main/scripts/ready.py | 203 |
6 files changed, 0 insertions, 499 deletions
diff --git a/kubernetes/readiness/.gitignore b/kubernetes/readiness/.gitignore deleted file mode 100644 index 90cb66eacd..0000000000 --- a/kubernetes/readiness/.gitignore +++ /dev/null @@ -1,15 +0,0 @@ -# Eclipse -.classpath -.factorypath -.project -.pydevproject -.settings/ - -# IntelliJ -.idea/* -*.iml - -# Mac OS -*DS_Store* - -/target
\ No newline at end of file diff --git a/kubernetes/readiness/dep-health-init.yaml b/kubernetes/readiness/dep-health-init.yaml deleted file mode 100644 index 5b97852da0..0000000000 --- a/kubernetes/readiness/dep-health-init.yaml +++ /dev/null @@ -1,47 +0,0 @@ -# Copyright © 2017 Amdocs, Bell Canada -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -apiVersion: extensions/v1beta1 -kind: Deployment -metadata: - labels: - app: healthy - version: 1.0.0 - name: healthy -spec: - selector: - matchLabels: - app: healthy - version: 1.0.0 - template: - metadata: - labels: - app: healthy - version: 1.0.0 - name: healthy - spec: - containers: - - args: - - --container-name - - hbase - command: - - /root/ready.py - image: oomk8s/readiness-check:2.0.0 - imagePullPolicy: Always - name: healthy - env: - - name: NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace diff --git a/kubernetes/readiness/pom.xml b/kubernetes/readiness/pom.xml deleted file mode 100644 index af834ff4c2..0000000000 --- a/kubernetes/readiness/pom.xml +++ /dev/null @@ -1,109 +0,0 @@ -<!-- - - ============LICENSE_START======================================================= - org.onap.aai - ================================================================================ - Copyright © 2017-2018 AT&T Intellectual Property. All rights reserved. - ================================================================================ - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - ============LICENSE_END========================================================= - ---> -<project xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns="http://maven.apache.org/POM/4.0.0" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd"> - <modelVersion>4.0.0</modelVersion> - <parent> - <groupId>org.onap.oparent</groupId> - <artifactId>oparent</artifactId> - <version>2.0.0</version> - </parent> - - <groupId>org.onap.oom.readiness.check</groupId> - <artifactId>readiness-check-docker</artifactId> - <version>2.2.1-SNAPSHOT</version> - <packaging>pom</packaging> - <name>oom-readiness-check-image</name> - <description>Contains dockerfiles and scrtipts for readiness-ckeck image.</description> - - <properties> - <docker.fabric.version>0.31.0</docker.fabric.version> - <oom.docker.namespace>onap</oom.docker.namespace> - </properties> - - <build> - <plugins> - <plugin> - <groupId>io.fabric8</groupId> - <artifactId>docker-maven-plugin</artifactId> - <version>${docker.fabric.version}</version> - <configuration> - <verbose>true</verbose> - <apiVersion>1.23</apiVersion> - <images> -  - </images> - </configuration> - <executions> - <execution> - <id>clean-images</id> - <phase>pre-clean</phase> - <goals> - <goal>remove</goal> - </goals> - <configuration> - <removeAll>true</removeAll> - </configuration> - </execution> - <execution> - <id>generate-images</id> - <phase>package</phase> - <goals> - <goal>build</goal> - </goals> - </execution> - <execution> - <id>push-images</id> - <phase>deploy</phase> - <goals> - <goal>push</goal> - </goals> - </execution> - </executions> - </plugin> - </plugins> - </build> -</project>
\ No newline at end of file diff --git a/kubernetes/readiness/src/main/docker/Dockerfile b/kubernetes/readiness/src/main/docker/Dockerfile deleted file mode 100644 index 638e8efd67..0000000000 --- a/kubernetes/readiness/src/main/docker/Dockerfile +++ /dev/null @@ -1,17 +0,0 @@ -FROM python:3-alpine3.9 - -ENV no_proxy "localhost,127.0.0.1,.cluster.local,$KUBERNETES_SERVICE_HOST" -# Setup Corporate proxy -ENV https_proxy ${HTTPS_PROXY} -ENV http_proxy ${HTTP_PROXY} - -RUN pip install requests pyyaml kubernetes - -ENV CERT="/var/run/secrets/kubernetes.io/serviceaccount/ca.crt" -ENV TOKEN="/var/run/secrets/kubernetes.io/serviceaccount/token" - -COPY maven/ /root/ -RUN chmod -R a+x /root/ - -ENTRYPOINT ["/root/ready.py"] -CMD [""]
\ No newline at end of file diff --git a/kubernetes/readiness/src/main/scripts/job_complete.py b/kubernetes/readiness/src/main/scripts/job_complete.py deleted file mode 100644 index a9570c5951..0000000000 --- a/kubernetes/readiness/src/main/scripts/job_complete.py +++ /dev/null @@ -1,108 +0,0 @@ -#!/usr/bin/env python -import getopt -import logging -import os -import sys -import time -import random - -from kubernetes import client - -# extract env variables. -namespace = os.environ['NAMESPACE'] -cert = os.environ['CERT'] -host = os.environ['KUBERNETES_SERVICE_HOST'] -token_path = os.environ['TOKEN'] - -with open(token_path, 'r') as token_file: - token = token_file.read().replace('\n', '') - -# setup logging -log = logging.getLogger(__name__) -handler = logging.StreamHandler(sys.stdout) -formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s') -handler.setFormatter(formatter) -handler.setLevel(logging.INFO) -log.addHandler(handler) -log.setLevel(logging.INFO) - -configuration = client.Configuration() -configuration.host = "https://" + host -configuration.ssl_ca_cert = cert -configuration.api_key['authorization'] = token -configuration.api_key_prefix['authorization'] = 'Bearer' -batchV1Api = client.BatchV1Api(client.ApiClient(configuration)) - - -def is_job_complete(job_name): - complete = False - log.info("Checking if " + job_name + " is complete") - response = "" - try: - response = batchV1Api.read_namespaced_job_status(job_name, namespace) - if response.status.succeeded == 1: - job_status_type = response.status.conditions[0].type - if job_status_type == "Complete": - complete = True - else: - log.info(job_name + " is not complete") - else: - log.info(job_name + " has not succeeded yet") - return complete - except Exception as e: - log.error("Exception when calling read_namespaced_job_status: %s\n" % e) - - -DEF_TIMEOUT = 10 -DESCRIPTION = "Kubernetes container job complete check utility" -USAGE = "Usage: job_complete.py [-t <timeout>] -j <job_name> " \ - "[-j <job_name> ...]\n" \ - "where\n" \ - "<timeout> - wait for container job complete timeout in min, " \ - "default is " + str(DEF_TIMEOUT) + "\n" \ - "<job_name> - name of the job to wait for\n" - - -def main(argv): - # args are a list of job names - job_names = [] - timeout = DEF_TIMEOUT - try: - opts, args = getopt.getopt(argv, "hj:t:", ["job-name=", - "timeout=", - "help"]) - for opt, arg in opts: - if opt in ("-h", "--help"): - print("%s\n\n%s" % (DESCRIPTION, USAGE)) - sys.exit() - elif opt in ("-j", "--job-name"): - job_names.append(arg) - elif opt in ("-t", "--timeout"): - timeout = float(arg) - except (getopt.GetoptError, ValueError) as e: - print("Error parsing input parameters: %s\n" % e) - print(USAGE) - sys.exit(2) - if job_names.__len__() == 0: - print("Missing required input parameter(s)\n") - print(USAGE) - sys.exit(2) - - for job_name in job_names: - timeout = time.time() + timeout * 60 - while True: - complete = is_job_complete(job_name) - if complete is True: - break - elif time.time() > timeout: - log.warning("timed out waiting for '" + job_name + - "' to be completed") - exit(1) - else: - # spread in time potentially parallel execution in multiple - # containers - time.sleep(random.randint(5, 11)) - - -if __name__ == "__main__": - main(sys.argv[1:]) diff --git a/kubernetes/readiness/src/main/scripts/ready.py b/kubernetes/readiness/src/main/scripts/ready.py deleted file mode 100644 index b932b04284..0000000000 --- a/kubernetes/readiness/src/main/scripts/ready.py +++ /dev/null @@ -1,203 +0,0 @@ -#!/usr/bin/env python -import getopt -import logging -import os -import sys -import time -import random - -from kubernetes import client - -# extract env variables. -namespace = os.environ['NAMESPACE'] -cert = os.environ['CERT'] -host = os.environ['KUBERNETES_SERVICE_HOST'] -token_path = os.environ['TOKEN'] - -with open(token_path, 'r') as token_file: - token = token_file.read().replace('\n', '') - -# setup logging -log = logging.getLogger(__name__) -handler = logging.StreamHandler(sys.stdout) -formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s') -handler.setFormatter(formatter) -handler.setLevel(logging.INFO) -log.addHandler(handler) -log.setLevel(logging.INFO) - -configuration = client.Configuration() -configuration.host = "https://" + host -configuration.ssl_ca_cert = cert -configuration.api_key['authorization'] = token -configuration.api_key_prefix['authorization'] = 'Bearer' -coreV1Api = client.CoreV1Api(client.ApiClient(configuration)) -api_instance = client.ExtensionsV1beta1Api(client.ApiClient(configuration)) -api = client.AppsV1beta1Api(client.ApiClient(configuration)) -batchV1Api = client.BatchV1Api(client.ApiClient(configuration)) - - -def is_job_complete(job_name): - complete = False - log.info("Checking if " + job_name + " is complete") - try: - response = batchV1Api.read_namespaced_job_status(job_name, namespace) - if response.status.succeeded == 1: - job_status_type = response.status.conditions[0].type - if job_status_type == "Complete": - complete = True - log.info(job_name + " is complete") - else: - log.info(job_name + " is not complete") - else: - log.info(job_name + " has not succeeded yet") - return complete - except Exception as e: - log.error("Exception when calling read_namespaced_job_status: %s\n" % e) - - -def wait_for_statefulset_complete(statefulset_name): - try: - response = api.read_namespaced_stateful_set(statefulset_name, namespace) - s = response.status - if (s.replicas == response.spec.replicas and - s.ready_replicas == response.spec.replicas and - s.observed_generation == response.metadata.generation): - log.info("Statefulset " + statefulset_name + " is ready") - return True - else: - log.info("Statefulset " + statefulset_name + " is not ready") - return False - except Exception as e: - log.error("Exception when waiting for Statefulset status: %s\n" % e) - - -def wait_for_deployment_complete(deployment_name): - try: - response = api.read_namespaced_deployment(deployment_name, namespace) - s = response.status - if (s.unavailable_replicas is None and - ( s.updated_replicas is None or s.updated_replicas == response.spec.replicas ) and - s.replicas == response.spec.replicas and - s.ready_replicas == response.spec.replicas and - s.observed_generation == response.metadata.generation): - log.info("Deployment " + deployment_name + " is ready") - return True - else: - log.info("Deployment " + deployment_name + " is not ready") - return False - except Exception as e: - log.error("Exception when waiting for deployment status: %s\n" % e) - - -def wait_for_daemonset_complete(daemonset_name): - try: - response = api_instance.read_namespaced_daemon_set(daemonset_name, namespace) - s = response.status - if s.desired_number_scheduled == s.number_ready: - log.info("DaemonSet: " + str(s.number_ready) + "/" + str(s.desired_number_scheduled) + " nodes ready --> " + daemonset_name + " is ready") - return True - else: - log.info("DaemonSet: " + str(s.number_ready) + "/" + str(s.desired_number_scheduled) + " nodes ready --> " + daemonset_name + " is not ready") - return False - except Exception as e: - log.error("Exception when waiting for DaemonSet status: %s\n" % e) - - -def is_ready(container_name): - ready = False - log.info("Checking if " + container_name + " is ready") - try: - response = coreV1Api.list_namespaced_pod(namespace=namespace, - watch=False) - for i in response.items: - # container_statuses can be None, which is non-iterable. - if i.status.container_statuses is None: - continue - for s in i.status.container_statuses: - if s.name == container_name: - name = read_name(i) - if i.metadata.owner_references[0].kind == "StatefulSet": - ready = wait_for_statefulset_complete(name) - elif i.metadata.owner_references[0].kind == "ReplicaSet": - deployment_name = get_deployment_name(name) - ready = wait_for_deployment_complete(deployment_name) - elif i.metadata.owner_references[0].kind == "Job": - ready = is_job_complete(name) - elif i.metadata.owner_references[0].kind == "DaemonSet": - ready = wait_for_daemonset_complete(i.metadata.owner_references[0].name) - - return ready - - else: - continue - return ready - except Exception as e: - log.error("Exception when calling list_namespaced_pod: %s\n" % e) - - -def read_name(item): - return item.metadata.owner_references[0].name - - -def get_deployment_name(replicaset): - api_response = api_instance.read_namespaced_replica_set_status(replicaset, - namespace) - deployment_name = read_name(api_response) - return deployment_name - - -DEF_TIMEOUT = 10 -DESCRIPTION = "Kubernetes container readiness check utility" -USAGE = "Usage: ready.py [-t <timeout>] -c <container_name> " \ - "[-c <container_name> ...]\n" \ - "where\n" \ - "<timeout> - wait for container readiness timeout in min, " \ - "default is " + str(DEF_TIMEOUT) + "\n" \ - "<container_name> - name of the container to wait for\n" - - -def main(argv): - # args are a list of container names - container_names = [] - timeout = DEF_TIMEOUT - try: - opts, args = getopt.getopt(argv, "hc:t:", ["container-name=", - "timeout=", - "help"]) - for opt, arg in opts: - if opt in ("-h", "--help"): - print("%s\n\n%s" % (DESCRIPTION, USAGE)) - sys.exit() - elif opt in ("-c", "--container-name"): - container_names.append(arg) - elif opt in ("-t", "--timeout"): - timeout = float(arg) - except (getopt.GetoptError, ValueError) as e: - print("Error parsing input parameters: %s\n" % e) - print(USAGE) - sys.exit(2) - if container_names.__len__() == 0: - print("Missing required input parameter(s)\n") - print(USAGE) - sys.exit(2) - - for container_name in container_names: - timeout = time.time() + timeout * 60 - while True: - ready = is_ready(container_name) - if ready is True: - break - elif time.time() > timeout: - log.warning("timed out waiting for '" + container_name + - "' to be ready") - exit(1) - else: - # spread in time potentially parallel execution in multiple - # containers - time.sleep(random.randint(5, 11)) - - -if __name__ == "__main__": - main(sys.argv[1:]) - |