diff options
5 files changed, 74 insertions, 32 deletions
diff --git a/kubernetes/appc/charts/appc-ansible-server/templates/pv.yaml b/kubernetes/appc/charts/appc-ansible-server/templates/pv.yaml index 25257eaeaf..6fef70ceb5 100644 --- a/kubernetes/appc/charts/appc-ansible-server/templates/pv.yaml +++ b/kubernetes/appc/charts/appc-ansible-server/templates/pv.yaml @@ -19,7 +19,7 @@ kind: PersistentVolume apiVersion: v1 metadata: - name: {{ include "common.fullname" $global }}-data{{$i}} + name: {{ include "common.fullname" $global }}-playbook{{$i}} namespace: {{ include "common.namespace" $global }} labels: app: {{ include "common.fullname" $global }} @@ -32,7 +32,7 @@ spec: storage: {{ $global.Values.persistence.size}} accessModes: - {{ $global.Values.persistence.accessMode }} - storageClassName: "{{ include "common.fullname" $global }}-data" + storageClassName: "{{ include "common.fullname" $global }}-playbook" persistentVolumeReclaimPolicy: {{ $global.Values.persistence.volumeReclaimPolicy }} hostPath: path: {{ $global.Values.global.persistence.mountPath | default $global.Values.persistence.mountPath }}/{{ $global.Release.Name }}/{{ $global.Values.persistence.mountSubPath }}{{$i}} diff --git a/kubernetes/appc/charts/appc-ansible-server/templates/statefulset.yaml b/kubernetes/appc/charts/appc-ansible-server/templates/statefulset.yaml index 1df20c291d..f7fa15ca89 100644 --- a/kubernetes/appc/charts/appc-ansible-server/templates/statefulset.yaml +++ b/kubernetes/appc/charts/appc-ansible-server/templates/statefulset.yaml @@ -30,6 +30,13 @@ spec: app: {{ include "common.name" . }} release: {{ .Release.Name }} spec: + initContainers: + - name: {{ include "common.name" . }}-chown + image: "busybox" + command: ["sh", "-c", "chown -R {{ .Values.config.ansibleUid }}:{{ .Values.config.ansibleGid}} {{ .Values.persistence.playbookPath }}"] + volumeMounts: + - mountPath: {{ .Values.persistence.playbookPath }} + name: {{ include "common.fullname" . }}-playbook containers: - name: {{ include "common.name" . }} command: ["/bin/bash"] @@ -60,7 +67,7 @@ spec: name: config subPath: RestServer_config - mountPath: {{ .Values.persistence.playbookPath }} - name: {{ include "common.fullname" . }}-data + name: {{ include "common.fullname" . }}-playbook resources: {{ include "common.resources" . | indent 12 }} {{- if .Values.nodeSelector }} @@ -80,20 +87,18 @@ spec: name: {{ include "common.fullname" . }} defaultMode: 0644 {{ if not .Values.persistence.enabled }} - - name: {{ include "common.fullname" . }}-data + - name: {{ include "common.fullname" . }}-playbook emptyDir: {} {{ else }} volumeClaimTemplates: - metadata: - name: {{ include "common.fullname" . }}-data + name: {{ include "common.fullname" . }}-playbook labels: name: {{ include "common.fullname" . }} spec: accessModes: [ {{ .Values.persistence.accessMode }} ] - storageClassName: {{ include "common.fullname" . }}-data + storageClassName: {{ include "common.fullname" . }}-playbook resources: requests: storage: {{ .Values.persistence.size }} {{ end }} - imagePullSecrets: - - name: "{{ include "common.namespace" . }}-docker-registry-key" diff --git a/kubernetes/appc/charts/appc-ansible-server/values.yaml b/kubernetes/appc/charts/appc-ansible-server/values.yaml index 59cf29f70c..f2dd150160 100644 --- a/kubernetes/appc/charts/appc-ansible-server/values.yaml +++ b/kubernetes/appc/charts/appc-ansible-server/values.yaml @@ -37,6 +37,8 @@ debugEnabled: false # application configuration config: + ansibleUid: 100 + ansibleGid: 101 appcChartName: appc mysqlServiceName: appc-dbhost configDir: /opt/onap/ccsdk @@ -89,7 +91,7 @@ persistence: size: 1Gi mountPath: /dockerdata-nfs mountSubPath: appc/ansible - playbookPath: /var/local + playbookPath: /home/ansible ingress: enabled: false diff --git a/kubernetes/readiness/docker/init/job_complete.py b/kubernetes/readiness/docker/init/job_complete.py index b20cb5adec..a9570c5951 100644 --- a/kubernetes/readiness/docker/init/job_complete.py +++ b/kubernetes/readiness/docker/init/job_complete.py @@ -20,7 +20,8 @@ with open(token_path, 'r') as token_file: # setup logging log = logging.getLogger(__name__) handler = logging.StreamHandler(sys.stdout) -handler.setFormatter(logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')) +formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s') +handler.setFormatter(formatter) handler.setLevel(logging.INFO) log.addHandler(handler) log.setLevel(logging.INFO) @@ -54,17 +55,22 @@ def is_job_complete(job_name): DEF_TIMEOUT = 10 DESCRIPTION = "Kubernetes container job complete check utility" -USAGE = "Usage: job_complete.py [-t <timeout>] -j <job_name> [-j <job_name> ...]\n" \ +USAGE = "Usage: job_complete.py [-t <timeout>] -j <job_name> " \ + "[-j <job_name> ...]\n" \ "where\n" \ - "<timeout> - wait for container job complete timeout in min, default is " + str(DEF_TIMEOUT) + "\n" \ + "<timeout> - wait for container job complete timeout in min, " \ + "default is " + str(DEF_TIMEOUT) + "\n" \ "<job_name> - name of the job to wait for\n" + def main(argv): # args are a list of job names job_names = [] timeout = DEF_TIMEOUT try: - opts, args = getopt.getopt(argv, "hj:t:", ["job-name=", "timeout=", "help"]) + opts, args = getopt.getopt(argv, "hj:t:", ["job-name=", + "timeout=", + "help"]) for opt, arg in opts: if opt in ("-h", "--help"): print("%s\n\n%s" % (DESCRIPTION, USAGE)) @@ -89,11 +95,14 @@ def main(argv): if complete is True: break elif time.time() > timeout: - log.warning("timed out waiting for '" + job_name + "' to be completed") + log.warning("timed out waiting for '" + job_name + + "' to be completed") exit(1) else: - # spread in time potentially parallel execution in multiple containers + # spread in time potentially parallel execution in multiple + # containers time.sleep(random.randint(5, 11)) + if __name__ == "__main__": - main(sys.argv[1:])
\ No newline at end of file + main(sys.argv[1:]) diff --git a/kubernetes/readiness/docker/init/ready.py b/kubernetes/readiness/docker/init/ready.py index 0e133eab01..db7105a18d 100644 --- a/kubernetes/readiness/docker/init/ready.py +++ b/kubernetes/readiness/docker/init/ready.py @@ -20,7 +20,8 @@ with open(token_path, 'r') as token_file: # setup logging log = logging.getLogger(__name__) handler = logging.StreamHandler(sys.stdout) -handler.setFormatter(logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')) +formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s') +handler.setFormatter(formatter) handler.setLevel(logging.INFO) log.addHandler(handler) log.setLevel(logging.INFO) @@ -31,14 +32,14 @@ configuration.ssl_ca_cert = cert configuration.api_key['authorization'] = token configuration.api_key_prefix['authorization'] = 'Bearer' coreV1Api = client.CoreV1Api(client.ApiClient(configuration)) -api_instance=client.ExtensionsV1beta1Api(client.ApiClient(configuration)) +api_instance = client.ExtensionsV1beta1Api(client.ApiClient(configuration)) api = client.AppsV1beta1Api(client.ApiClient(configuration)) batchV1Api = client.BatchV1Api(client.ApiClient(configuration)) + def is_job_complete(job_name): complete = False log.info("Checking if " + job_name + " is complete") - response = "" try: response = batchV1Api.read_namespaced_job_status(job_name, namespace) if response.status.succeeded == 1: @@ -54,11 +55,12 @@ def is_job_complete(job_name): except Exception as e: log.error("Exception when calling read_namespaced_job_status: %s\n" % e) + def wait_for_statefulset_complete(statefulset_name): try: response = api.read_namespaced_stateful_set(statefulset_name, namespace) s = response.status - if ( s.updated_replicas == response.spec.replicas and + if (s.updated_replicas == response.spec.replicas and s.replicas == response.spec.replicas and s.ready_replicas == response.spec.replicas and s.current_replicas == response.spec.replicas and @@ -71,11 +73,12 @@ def wait_for_statefulset_complete(statefulset_name): except Exception as e: log.error("Exception when waiting for Statefulset status: %s\n" % e) + def wait_for_deployment_complete(deployment_name): try: response = api.read_namespaced_deployment(deployment_name, namespace) s = response.status - if ( s.unavailable_replicas == None and + if (s.unavailable_replicas is None and s.updated_replicas == response.spec.replicas and s.replicas == response.spec.replicas and s.ready_replicas == response.spec.replicas and @@ -88,24 +91,27 @@ def wait_for_deployment_complete(deployment_name): except Exception as e: log.error("Exception when waiting for deployment status: %s\n" % e) + def is_ready(container_name): ready = False log.info("Checking if " + container_name + " is ready") try: - response = coreV1Api.list_namespaced_pod(namespace=namespace, watch=False) + response = coreV1Api.list_namespaced_pod(namespace=namespace, + watch=False) for i in response.items: # container_statuses can be None, which is non-iterable. if i.status.container_statuses is None: continue for s in i.status.container_statuses: if s.name == container_name: - if i.metadata.owner_references[0].kind == "StatefulSet": - ready = wait_for_statefulset_complete(i.metadata.owner_references[0].name) + name = read_name(i) + if i.metadata.owner_references[0].kind == "StatefulSet": + ready = wait_for_statefulset_complete(name) elif i.metadata.owner_references[0].kind == "ReplicaSet": - api_response = api_instance.read_namespaced_replica_set_status(i.metadata.owner_references[0].name, namespace) - ready = wait_for_deployment_complete(api_response.metadata.owner_references[0].name) + deployment_name = get_deployment_name(name) + ready = wait_for_deployment_complete(deployment_name) elif i.metadata.owner_references[0].kind == "Job": - ready = is_job_complete(i.metadata.owner_references[0].name) + ready = is_job_complete(name) return ready @@ -115,19 +121,36 @@ def is_ready(container_name): except Exception as e: log.error("Exception when calling list_namespaced_pod: %s\n" % e) + +def read_name(item): + return item.metadata.owner_reference[0].name + + +def get_deployment_name(replicaset): + api_response = api_instance.read_namespaced_replica_set_status(replicaset, + namespace) + deployment_name = read_name(api_response) + return deployment_name + + DEF_TIMEOUT = 10 DESCRIPTION = "Kubernetes container readiness check utility" -USAGE = "Usage: ready.py [-t <timeout>] -c <container_name> [-c <container_name> ...]\n" \ +USAGE = "Usage: ready.py [-t <timeout>] -c <container_name> " \ + "[-c <container_name> ...]\n" \ "where\n" \ - "<timeout> - wait for container readiness timeout in min, default is " + str(DEF_TIMEOUT) + "\n" \ + "<timeout> - wait for container readiness timeout in min, " \ + "default is " + str(DEF_TIMEOUT) + "\n" \ "<container_name> - name of the container to wait for\n" + def main(argv): # args are a list of container names container_names = [] timeout = DEF_TIMEOUT try: - opts, args = getopt.getopt(argv, "hc:t:", ["container-name=", "timeout=", "help"]) + opts, args = getopt.getopt(argv, "hc:t:", ["container-name=", + "timeout=", + "help"]) for opt, arg in opts: if opt in ("-h", "--help"): print("%s\n\n%s" % (DESCRIPTION, USAGE)) @@ -152,12 +175,15 @@ def main(argv): if ready is True: break elif time.time() > timeout: - log.warning("timed out waiting for '" + container_name + "' to be ready") + log.warning("timed out waiting for '" + container_name + + "' to be ready") exit(1) else: - # spread in time potentially parallel execution in multiple containers + # spread in time potentially parallel execution in multiple + # containers time.sleep(random.randint(5, 11)) + if __name__ == "__main__": main(sys.argv[1:]) |