From ed4a579fcddbb3028f7668923fa83d756bdf2f8f Mon Sep 17 00:00:00 2001 From: Dmitry Puzikov Date: Wed, 25 Sep 2019 14:19:36 +0200 Subject: Mavenized image creation for readiness-check Issue-ID: INT-1045 Change-Id: I9c7b7fd26c8f376600aa7efc9c1d9e2eee001cde Signed-off-by: Dmitry Puzikov --- kubernetes/readiness/.gitignore | 15 ++ kubernetes/readiness/docker/init/Dockerfile | 20 --- kubernetes/readiness/docker/init/job_complete.py | 108 ------------ kubernetes/readiness/docker/init/ready.py | 189 --------------------- kubernetes/readiness/pom.xml | 109 ++++++++++++ kubernetes/readiness/src/main/docker/Dockerfile | 17 ++ .../readiness/src/main/scripts/job_complete.py | 108 ++++++++++++ kubernetes/readiness/src/main/scripts/ready.py | 189 +++++++++++++++++++++ 8 files changed, 438 insertions(+), 317 deletions(-) create mode 100644 kubernetes/readiness/.gitignore delete mode 100644 kubernetes/readiness/docker/init/Dockerfile delete mode 100644 kubernetes/readiness/docker/init/job_complete.py delete mode 100644 kubernetes/readiness/docker/init/ready.py create mode 100644 kubernetes/readiness/pom.xml create mode 100644 kubernetes/readiness/src/main/docker/Dockerfile create mode 100644 kubernetes/readiness/src/main/scripts/job_complete.py create mode 100644 kubernetes/readiness/src/main/scripts/ready.py diff --git a/kubernetes/readiness/.gitignore b/kubernetes/readiness/.gitignore new file mode 100644 index 0000000000..90cb66eacd --- /dev/null +++ b/kubernetes/readiness/.gitignore @@ -0,0 +1,15 @@ +# Eclipse +.classpath +.factorypath +.project +.pydevproject +.settings/ + +# IntelliJ +.idea/* +*.iml + +# Mac OS +*DS_Store* + +/target \ No newline at end of file diff --git a/kubernetes/readiness/docker/init/Dockerfile b/kubernetes/readiness/docker/init/Dockerfile deleted file mode 100644 index b060915354..0000000000 --- a/kubernetes/readiness/docker/init/Dockerfile +++ /dev/null @@ -1,20 +0,0 @@ -FROM python:2-alpine - -ENV no_proxy "localhost,127.0.0.1,.cluster.local,$KUBERNETES_SERVICE_HOST" -# Setup Corporate proxy -ENV https_proxy ${HTTPS_PROXY} -ENV http_proxy ${HTTP_PROXY} - -RUN pip install requests pyyaml kubernetes - -ENV CERT="/var/run/secrets/kubernetes.io/serviceaccount/ca.crt" -ENV TOKEN="/var/run/secrets/kubernetes.io/serviceaccount/token" - -COPY ready.py /root/ready.py -RUN chmod a+x /root/ready.py - -COPY job_complete.py /root/job_complete.py -RUN chmod a+x /root/job_complete.py - -ENTRYPOINT ["/root/ready.py"] -CMD [""] \ No newline at end of file diff --git a/kubernetes/readiness/docker/init/job_complete.py b/kubernetes/readiness/docker/init/job_complete.py deleted file mode 100644 index a9570c5951..0000000000 --- a/kubernetes/readiness/docker/init/job_complete.py +++ /dev/null @@ -1,108 +0,0 @@ -#!/usr/bin/env python -import getopt -import logging -import os -import sys -import time -import random - -from kubernetes import client - -# extract env variables. -namespace = os.environ['NAMESPACE'] -cert = os.environ['CERT'] -host = os.environ['KUBERNETES_SERVICE_HOST'] -token_path = os.environ['TOKEN'] - -with open(token_path, 'r') as token_file: - token = token_file.read().replace('\n', '') - -# setup logging -log = logging.getLogger(__name__) -handler = logging.StreamHandler(sys.stdout) -formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s') -handler.setFormatter(formatter) -handler.setLevel(logging.INFO) -log.addHandler(handler) -log.setLevel(logging.INFO) - -configuration = client.Configuration() -configuration.host = "https://" + host -configuration.ssl_ca_cert = cert -configuration.api_key['authorization'] = token -configuration.api_key_prefix['authorization'] = 'Bearer' -batchV1Api = client.BatchV1Api(client.ApiClient(configuration)) - - -def is_job_complete(job_name): - complete = False - log.info("Checking if " + job_name + " is complete") - response = "" - try: - response = batchV1Api.read_namespaced_job_status(job_name, namespace) - if response.status.succeeded == 1: - job_status_type = response.status.conditions[0].type - if job_status_type == "Complete": - complete = True - else: - log.info(job_name + " is not complete") - else: - log.info(job_name + " has not succeeded yet") - return complete - except Exception as e: - log.error("Exception when calling read_namespaced_job_status: %s\n" % e) - - -DEF_TIMEOUT = 10 -DESCRIPTION = "Kubernetes container job complete check utility" -USAGE = "Usage: job_complete.py [-t ] -j " \ - "[-j ...]\n" \ - "where\n" \ - " - wait for container job complete timeout in min, " \ - "default is " + str(DEF_TIMEOUT) + "\n" \ - " - name of the job to wait for\n" - - -def main(argv): - # args are a list of job names - job_names = [] - timeout = DEF_TIMEOUT - try: - opts, args = getopt.getopt(argv, "hj:t:", ["job-name=", - "timeout=", - "help"]) - for opt, arg in opts: - if opt in ("-h", "--help"): - print("%s\n\n%s" % (DESCRIPTION, USAGE)) - sys.exit() - elif opt in ("-j", "--job-name"): - job_names.append(arg) - elif opt in ("-t", "--timeout"): - timeout = float(arg) - except (getopt.GetoptError, ValueError) as e: - print("Error parsing input parameters: %s\n" % e) - print(USAGE) - sys.exit(2) - if job_names.__len__() == 0: - print("Missing required input parameter(s)\n") - print(USAGE) - sys.exit(2) - - for job_name in job_names: - timeout = time.time() + timeout * 60 - while True: - complete = is_job_complete(job_name) - if complete is True: - break - elif time.time() > timeout: - log.warning("timed out waiting for '" + job_name + - "' to be completed") - exit(1) - else: - # spread in time potentially parallel execution in multiple - # containers - time.sleep(random.randint(5, 11)) - - -if __name__ == "__main__": - main(sys.argv[1:]) diff --git a/kubernetes/readiness/docker/init/ready.py b/kubernetes/readiness/docker/init/ready.py deleted file mode 100644 index 3f76d10395..0000000000 --- a/kubernetes/readiness/docker/init/ready.py +++ /dev/null @@ -1,189 +0,0 @@ -#!/usr/bin/env python -import getopt -import logging -import os -import sys -import time -import random - -from kubernetes import client - -# extract env variables. -namespace = os.environ['NAMESPACE'] -cert = os.environ['CERT'] -host = os.environ['KUBERNETES_SERVICE_HOST'] -token_path = os.environ['TOKEN'] - -with open(token_path, 'r') as token_file: - token = token_file.read().replace('\n', '') - -# setup logging -log = logging.getLogger(__name__) -handler = logging.StreamHandler(sys.stdout) -formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s') -handler.setFormatter(formatter) -handler.setLevel(logging.INFO) -log.addHandler(handler) -log.setLevel(logging.INFO) - -configuration = client.Configuration() -configuration.host = "https://" + host -configuration.ssl_ca_cert = cert -configuration.api_key['authorization'] = token -configuration.api_key_prefix['authorization'] = 'Bearer' -coreV1Api = client.CoreV1Api(client.ApiClient(configuration)) -api_instance = client.ExtensionsV1beta1Api(client.ApiClient(configuration)) -api = client.AppsV1beta1Api(client.ApiClient(configuration)) -batchV1Api = client.BatchV1Api(client.ApiClient(configuration)) - - -def is_job_complete(job_name): - complete = False - log.info("Checking if " + job_name + " is complete") - try: - response = batchV1Api.read_namespaced_job_status(job_name, namespace) - if response.status.succeeded == 1: - job_status_type = response.status.conditions[0].type - if job_status_type == "Complete": - complete = True - log.info(job_name + " is complete") - else: - log.info(job_name + " is not complete") - else: - log.info(job_name + " has not succeeded yet") - return complete - except Exception as e: - log.error("Exception when calling read_namespaced_job_status: %s\n" % e) - - -def wait_for_statefulset_complete(statefulset_name): - try: - response = api.read_namespaced_stateful_set(statefulset_name, namespace) - s = response.status - if (s.updated_replicas == response.spec.replicas and - s.replicas == response.spec.replicas and - s.ready_replicas == response.spec.replicas and - s.current_replicas == response.spec.replicas and - s.observed_generation == response.metadata.generation): - log.info("Statefulset " + statefulset_name + " is ready") - return True - else: - log.info("Statefulset " + statefulset_name + " is not ready") - return False - except Exception as e: - log.error("Exception when waiting for Statefulset status: %s\n" % e) - - -def wait_for_deployment_complete(deployment_name): - try: - response = api.read_namespaced_deployment(deployment_name, namespace) - s = response.status - if (s.unavailable_replicas is None and - ( s.updated_replicas is None or s.updated_replicas == response.spec.replicas ) and - s.replicas == response.spec.replicas and - s.ready_replicas == response.spec.replicas and - s.observed_generation == response.metadata.generation): - log.info("Deployment " + deployment_name + " is ready") - return True - else: - log.info("Deployment " + deployment_name + " is not ready") - return False - except Exception as e: - log.error("Exception when waiting for deployment status: %s\n" % e) - - -def is_ready(container_name): - ready = False - log.info("Checking if " + container_name + " is ready") - try: - response = coreV1Api.list_namespaced_pod(namespace=namespace, - watch=False) - for i in response.items: - # container_statuses can be None, which is non-iterable. - if i.status.container_statuses is None: - continue - for s in i.status.container_statuses: - if s.name == container_name: - name = read_name(i) - if i.metadata.owner_references[0].kind == "StatefulSet": - ready = wait_for_statefulset_complete(name) - elif i.metadata.owner_references[0].kind == "ReplicaSet": - deployment_name = get_deployment_name(name) - ready = wait_for_deployment_complete(deployment_name) - elif i.metadata.owner_references[0].kind == "Job": - ready = is_job_complete(name) - - return ready - - else: - continue - return ready - except Exception as e: - log.error("Exception when calling list_namespaced_pod: %s\n" % e) - - -def read_name(item): - return item.metadata.owner_reference[0].name - - -def get_deployment_name(replicaset): - api_response = api_instance.read_namespaced_replica_set_status(replicaset, - namespace) - deployment_name = read_name(api_response) - return deployment_name - - -DEF_TIMEOUT = 10 -DESCRIPTION = "Kubernetes container readiness check utility" -USAGE = "Usage: ready.py [-t ] -c " \ - "[-c ...]\n" \ - "where\n" \ - " - wait for container readiness timeout in min, " \ - "default is " + str(DEF_TIMEOUT) + "\n" \ - " - name of the container to wait for\n" - - -def main(argv): - # args are a list of container names - container_names = [] - timeout = DEF_TIMEOUT - try: - opts, args = getopt.getopt(argv, "hc:t:", ["container-name=", - "timeout=", - "help"]) - for opt, arg in opts: - if opt in ("-h", "--help"): - print("%s\n\n%s" % (DESCRIPTION, USAGE)) - sys.exit() - elif opt in ("-c", "--container-name"): - container_names.append(arg) - elif opt in ("-t", "--timeout"): - timeout = float(arg) - except (getopt.GetoptError, ValueError) as e: - print("Error parsing input parameters: %s\n" % e) - print(USAGE) - sys.exit(2) - if container_names.__len__() == 0: - print("Missing required input parameter(s)\n") - print(USAGE) - sys.exit(2) - - for container_name in container_names: - timeout = time.time() + timeout * 60 - while True: - ready = is_ready(container_name) - if ready is True: - break - elif time.time() > timeout: - log.warning("timed out waiting for '" + container_name + - "' to be ready") - exit(1) - else: - # spread in time potentially parallel execution in multiple - # containers - time.sleep(random.randint(5, 11)) - - -if __name__ == "__main__": - main(sys.argv[1:]) - diff --git a/kubernetes/readiness/pom.xml b/kubernetes/readiness/pom.xml new file mode 100644 index 0000000000..af834ff4c2 --- /dev/null +++ b/kubernetes/readiness/pom.xml @@ -0,0 +1,109 @@ + + + 4.0.0 + + org.onap.oparent + oparent + 2.0.0 + + + org.onap.oom.readiness.check + readiness-check-docker + 2.2.1-SNAPSHOT + pom + oom-readiness-check-image + Contains dockerfiles and scrtipts for readiness-ckeck image. + + + 0.31.0 + onap + + + + + + io.fabric8 + docker-maven-plugin + ${docker.fabric.version} + + true + 1.23 + + + ${docker.push.registry}/${oom.docker.namespace}/readiness-check:%l + + @ + + dir + + + + ${project.basedir}/src/main/scripts + / + + **/* + + + + + + + latest + latest-${project.version} + + try + ${project.basedir}/src/main/docker + + + + + + + clean-images + pre-clean + + remove + + + true + + + + generate-images + package + + build + + + + push-images + deploy + + push + + + + + + + \ No newline at end of file diff --git a/kubernetes/readiness/src/main/docker/Dockerfile b/kubernetes/readiness/src/main/docker/Dockerfile new file mode 100644 index 0000000000..638e8efd67 --- /dev/null +++ b/kubernetes/readiness/src/main/docker/Dockerfile @@ -0,0 +1,17 @@ +FROM python:3-alpine3.9 + +ENV no_proxy "localhost,127.0.0.1,.cluster.local,$KUBERNETES_SERVICE_HOST" +# Setup Corporate proxy +ENV https_proxy ${HTTPS_PROXY} +ENV http_proxy ${HTTP_PROXY} + +RUN pip install requests pyyaml kubernetes + +ENV CERT="/var/run/secrets/kubernetes.io/serviceaccount/ca.crt" +ENV TOKEN="/var/run/secrets/kubernetes.io/serviceaccount/token" + +COPY maven/ /root/ +RUN chmod -R a+x /root/ + +ENTRYPOINT ["/root/ready.py"] +CMD [""] \ No newline at end of file diff --git a/kubernetes/readiness/src/main/scripts/job_complete.py b/kubernetes/readiness/src/main/scripts/job_complete.py new file mode 100644 index 0000000000..a9570c5951 --- /dev/null +++ b/kubernetes/readiness/src/main/scripts/job_complete.py @@ -0,0 +1,108 @@ +#!/usr/bin/env python +import getopt +import logging +import os +import sys +import time +import random + +from kubernetes import client + +# extract env variables. +namespace = os.environ['NAMESPACE'] +cert = os.environ['CERT'] +host = os.environ['KUBERNETES_SERVICE_HOST'] +token_path = os.environ['TOKEN'] + +with open(token_path, 'r') as token_file: + token = token_file.read().replace('\n', '') + +# setup logging +log = logging.getLogger(__name__) +handler = logging.StreamHandler(sys.stdout) +formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s') +handler.setFormatter(formatter) +handler.setLevel(logging.INFO) +log.addHandler(handler) +log.setLevel(logging.INFO) + +configuration = client.Configuration() +configuration.host = "https://" + host +configuration.ssl_ca_cert = cert +configuration.api_key['authorization'] = token +configuration.api_key_prefix['authorization'] = 'Bearer' +batchV1Api = client.BatchV1Api(client.ApiClient(configuration)) + + +def is_job_complete(job_name): + complete = False + log.info("Checking if " + job_name + " is complete") + response = "" + try: + response = batchV1Api.read_namespaced_job_status(job_name, namespace) + if response.status.succeeded == 1: + job_status_type = response.status.conditions[0].type + if job_status_type == "Complete": + complete = True + else: + log.info(job_name + " is not complete") + else: + log.info(job_name + " has not succeeded yet") + return complete + except Exception as e: + log.error("Exception when calling read_namespaced_job_status: %s\n" % e) + + +DEF_TIMEOUT = 10 +DESCRIPTION = "Kubernetes container job complete check utility" +USAGE = "Usage: job_complete.py [-t ] -j " \ + "[-j ...]\n" \ + "where\n" \ + " - wait for container job complete timeout in min, " \ + "default is " + str(DEF_TIMEOUT) + "\n" \ + " - name of the job to wait for\n" + + +def main(argv): + # args are a list of job names + job_names = [] + timeout = DEF_TIMEOUT + try: + opts, args = getopt.getopt(argv, "hj:t:", ["job-name=", + "timeout=", + "help"]) + for opt, arg in opts: + if opt in ("-h", "--help"): + print("%s\n\n%s" % (DESCRIPTION, USAGE)) + sys.exit() + elif opt in ("-j", "--job-name"): + job_names.append(arg) + elif opt in ("-t", "--timeout"): + timeout = float(arg) + except (getopt.GetoptError, ValueError) as e: + print("Error parsing input parameters: %s\n" % e) + print(USAGE) + sys.exit(2) + if job_names.__len__() == 0: + print("Missing required input parameter(s)\n") + print(USAGE) + sys.exit(2) + + for job_name in job_names: + timeout = time.time() + timeout * 60 + while True: + complete = is_job_complete(job_name) + if complete is True: + break + elif time.time() > timeout: + log.warning("timed out waiting for '" + job_name + + "' to be completed") + exit(1) + else: + # spread in time potentially parallel execution in multiple + # containers + time.sleep(random.randint(5, 11)) + + +if __name__ == "__main__": + main(sys.argv[1:]) diff --git a/kubernetes/readiness/src/main/scripts/ready.py b/kubernetes/readiness/src/main/scripts/ready.py new file mode 100644 index 0000000000..3f76d10395 --- /dev/null +++ b/kubernetes/readiness/src/main/scripts/ready.py @@ -0,0 +1,189 @@ +#!/usr/bin/env python +import getopt +import logging +import os +import sys +import time +import random + +from kubernetes import client + +# extract env variables. +namespace = os.environ['NAMESPACE'] +cert = os.environ['CERT'] +host = os.environ['KUBERNETES_SERVICE_HOST'] +token_path = os.environ['TOKEN'] + +with open(token_path, 'r') as token_file: + token = token_file.read().replace('\n', '') + +# setup logging +log = logging.getLogger(__name__) +handler = logging.StreamHandler(sys.stdout) +formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s') +handler.setFormatter(formatter) +handler.setLevel(logging.INFO) +log.addHandler(handler) +log.setLevel(logging.INFO) + +configuration = client.Configuration() +configuration.host = "https://" + host +configuration.ssl_ca_cert = cert +configuration.api_key['authorization'] = token +configuration.api_key_prefix['authorization'] = 'Bearer' +coreV1Api = client.CoreV1Api(client.ApiClient(configuration)) +api_instance = client.ExtensionsV1beta1Api(client.ApiClient(configuration)) +api = client.AppsV1beta1Api(client.ApiClient(configuration)) +batchV1Api = client.BatchV1Api(client.ApiClient(configuration)) + + +def is_job_complete(job_name): + complete = False + log.info("Checking if " + job_name + " is complete") + try: + response = batchV1Api.read_namespaced_job_status(job_name, namespace) + if response.status.succeeded == 1: + job_status_type = response.status.conditions[0].type + if job_status_type == "Complete": + complete = True + log.info(job_name + " is complete") + else: + log.info(job_name + " is not complete") + else: + log.info(job_name + " has not succeeded yet") + return complete + except Exception as e: + log.error("Exception when calling read_namespaced_job_status: %s\n" % e) + + +def wait_for_statefulset_complete(statefulset_name): + try: + response = api.read_namespaced_stateful_set(statefulset_name, namespace) + s = response.status + if (s.updated_replicas == response.spec.replicas and + s.replicas == response.spec.replicas and + s.ready_replicas == response.spec.replicas and + s.current_replicas == response.spec.replicas and + s.observed_generation == response.metadata.generation): + log.info("Statefulset " + statefulset_name + " is ready") + return True + else: + log.info("Statefulset " + statefulset_name + " is not ready") + return False + except Exception as e: + log.error("Exception when waiting for Statefulset status: %s\n" % e) + + +def wait_for_deployment_complete(deployment_name): + try: + response = api.read_namespaced_deployment(deployment_name, namespace) + s = response.status + if (s.unavailable_replicas is None and + ( s.updated_replicas is None or s.updated_replicas == response.spec.replicas ) and + s.replicas == response.spec.replicas and + s.ready_replicas == response.spec.replicas and + s.observed_generation == response.metadata.generation): + log.info("Deployment " + deployment_name + " is ready") + return True + else: + log.info("Deployment " + deployment_name + " is not ready") + return False + except Exception as e: + log.error("Exception when waiting for deployment status: %s\n" % e) + + +def is_ready(container_name): + ready = False + log.info("Checking if " + container_name + " is ready") + try: + response = coreV1Api.list_namespaced_pod(namespace=namespace, + watch=False) + for i in response.items: + # container_statuses can be None, which is non-iterable. + if i.status.container_statuses is None: + continue + for s in i.status.container_statuses: + if s.name == container_name: + name = read_name(i) + if i.metadata.owner_references[0].kind == "StatefulSet": + ready = wait_for_statefulset_complete(name) + elif i.metadata.owner_references[0].kind == "ReplicaSet": + deployment_name = get_deployment_name(name) + ready = wait_for_deployment_complete(deployment_name) + elif i.metadata.owner_references[0].kind == "Job": + ready = is_job_complete(name) + + return ready + + else: + continue + return ready + except Exception as e: + log.error("Exception when calling list_namespaced_pod: %s\n" % e) + + +def read_name(item): + return item.metadata.owner_reference[0].name + + +def get_deployment_name(replicaset): + api_response = api_instance.read_namespaced_replica_set_status(replicaset, + namespace) + deployment_name = read_name(api_response) + return deployment_name + + +DEF_TIMEOUT = 10 +DESCRIPTION = "Kubernetes container readiness check utility" +USAGE = "Usage: ready.py [-t ] -c " \ + "[-c ...]\n" \ + "where\n" \ + " - wait for container readiness timeout in min, " \ + "default is " + str(DEF_TIMEOUT) + "\n" \ + " - name of the container to wait for\n" + + +def main(argv): + # args are a list of container names + container_names = [] + timeout = DEF_TIMEOUT + try: + opts, args = getopt.getopt(argv, "hc:t:", ["container-name=", + "timeout=", + "help"]) + for opt, arg in opts: + if opt in ("-h", "--help"): + print("%s\n\n%s" % (DESCRIPTION, USAGE)) + sys.exit() + elif opt in ("-c", "--container-name"): + container_names.append(arg) + elif opt in ("-t", "--timeout"): + timeout = float(arg) + except (getopt.GetoptError, ValueError) as e: + print("Error parsing input parameters: %s\n" % e) + print(USAGE) + sys.exit(2) + if container_names.__len__() == 0: + print("Missing required input parameter(s)\n") + print(USAGE) + sys.exit(2) + + for container_name in container_names: + timeout = time.time() + timeout * 60 + while True: + ready = is_ready(container_name) + if ready is True: + break + elif time.time() > timeout: + log.warning("timed out waiting for '" + container_name + + "' to be ready") + exit(1) + else: + # spread in time potentially parallel execution in multiple + # containers + time.sleep(random.randint(5, 11)) + + +if __name__ == "__main__": + main(sys.argv[1:]) + -- cgit 1.2.3-korg