diff options
-rw-r--r-- | docs/oom_quickstart_guide.rst | 34 | ||||
-rw-r--r-- | docs/release-notes.rst | 15 | ||||
-rw-r--r-- | kubernetes/cds/charts/cds-ui/templates/deployment.yaml | 24 | ||||
-rw-r--r-- | kubernetes/cds/charts/cds-ui/values.yaml | 18 | ||||
-rw-r--r-- | kubernetes/onap/resources/overrides/environment.yaml | 225 | ||||
-rw-r--r-- | kubernetes/onap/resources/overrides/onap-all.yaml | 2 | ||||
-rw-r--r-- | kubernetes/policy/charts/brmsgw/resources/config/pe/brmsgw.conf | 4 | ||||
-rw-r--r-- | kubernetes/readiness/docker/init/job_complete.py | 23 | ||||
-rw-r--r-- | kubernetes/readiness/docker/init/ready.py | 58 | ||||
-rwxr-xr-x | kubernetes/robot/demo-k8s.sh | 5 | ||||
-rw-r--r-- | kubernetes/robot/resources/config/eteshare/config/integration_robot_properties.py | 3 | ||||
-rw-r--r-- | kubernetes/robot/resources/config/eteshare/config/vm_properties.py | 2 | ||||
-rw-r--r-- | kubernetes/robot/values.yaml | 4 | ||||
-rw-r--r-- | kubernetes/sdc/charts/sdc-wfd-fe/templates/ingress.yaml | 34 | ||||
-rw-r--r-- | kubernetes/sdc/charts/sdc-wfd-fe/values.yaml | 1 | ||||
-rwxr-xr-x | kubernetes/so/charts/so-bpmn-infra/resources/config/overrides/override.yaml | 3 |
16 files changed, 388 insertions, 67 deletions
diff --git a/docs/oom_quickstart_guide.rst b/docs/oom_quickstart_guide.rst index 3c491b4447..a641fabf60 100644 --- a/docs/oom_quickstart_guide.rst +++ b/docs/oom_quickstart_guide.rst @@ -129,32 +129,42 @@ single command The --timeout 900 is currently required in Dublin to address long running initialization tasks for DMaaP and SO. Without this timeout value both applications may fail to deploy. - a. To deploy all ONAP applications use this command:: +To deploy all ONAP applications use this command:: > cd oom/kubernetes - > helm deploy dev local/onap --namespace onap -f onap/resources/overrides/onap-all.yaml -f onap/resources/overrides/openstack.yaml --timeout 900 + > helm deploy dev local/onap --namespace onap -f onap/resources/overrides/onap-all.yaml -f onap/resources/overrides/environment.yaml -f onap/resources/overrides/openstack.yaml --timeout 900 - b. If you are using a custom override (e.g. integration-override.yaml) use this command:: +All override files may be customized (or replaced by other overrides) as per needs. - > helm deploy dev local/onap -f /root/integration-override.yaml --namespace onap --timeout 900 +onap-all.yaml + Enables the modules in the ONAP deployment. As ONAP is very modular, it is possible to customize ONAP and disable some components through this configuration file. - c. If you have a slower cloud environment you may want to use the public-cloud.yaml - which has longer delay intervals on database updates.:: +environment.yaml - > helm deploy dev local/onap -f /root/oom/kubernetes/onap/resources/environments/public-cloud.yaml -f /root/integration-override.yaml --namespace onap --timeout 900 + Includes configuration values specific to the deployment environment. + Example: adapt readiness and liveness timers to the level of performance of your infrastructure -**Step 9.** Commands to interact with the OOM installation +openstack.yaml -Use the following to monitor your deployment and determine when ONAP is -ready for use:: + Includes all the Openstack related information for the default target tenant you want to use to deploy VNFs from ONAP and/or additional parameters for the embedded tests. + +**Step 9.** Verify ONAP installation + +Use the following to monitor your deployment and determine when ONAP is ready for use:: > kubectl get pods -n onap -o=wide -Undeploying onap can be done using the following command:: +.. note:: + While all pods may be in a Running state, it is not a guarantee that all components are running fine. + + Launch the healthcheck tests using Robot to verify that the components are healthy. + + > ~/oom/kubernetes/robot/ete-k8s.sh onap health - > helm undeploy dev --purge +**Step 10.** Undeploy ONAP +> helm undeploy dev --purge More examples of using the deploy and undeploy plugins can be found here: https://wiki.onap.org/display/DW/OOM+Helm+%28un%29Deploy+plugins diff --git a/docs/release-notes.rst b/docs/release-notes.rst index ae22cb25ee..dc10400dfb 100644 --- a/docs/release-notes.rst +++ b/docs/release-notes.rst @@ -7,16 +7,16 @@ .. Links .. _release-notes-label: -Release Notes -============= +ONAP Operations Manager Release Notes +===================================== -Version 4.0.0 Dublin Release ----------------------------- +Version 4.0.0 (Dublin Release) +------------------------------ -:Release Date: 2019-06-10 +:Release Date: 2019-06-26 Summary -======= +------- **Platform Resiliency** @@ -55,6 +55,7 @@ Summary * In default deployment OOM (consul-server-ui) exposes HTTP port 30270 outside of cluster. [`OJSI-134 <https://jira.onap.org/browse/OJSI-134>`_] * Hard coded password used for all oom deployments [`OJSI-188 <https://jira.onap.org/browse/OJSI-188>`_] +* CVE-2019-12127 - OOM exposes unprotected API/UI on port 30270 [`OJSI-202 <https://jira.onap.org/browse/OJSI-202>`_] *Known Vulnerabilities in Used Modules* @@ -78,7 +79,7 @@ Version 3.0.0 Casablanca Release :Release Date: 2018-11-30 Summary -======= +------- The focus of this release was on incremental improvements in the following areas: diff --git a/kubernetes/cds/charts/cds-ui/templates/deployment.yaml b/kubernetes/cds/charts/cds-ui/templates/deployment.yaml index ead1b2edbd..021a6a9425 100644 --- a/kubernetes/cds/charts/cds-ui/templates/deployment.yaml +++ b/kubernetes/cds/charts/cds-ui/templates/deployment.yaml @@ -50,14 +50,22 @@ spec: env: - name: HOST value: 0.0.0.0 - - name: API_BLUEPRINT_CONTROLLER_BASE_URL - value: {{ .Values.config.api.controller.baseUrl }} - - name: API_BLUEPRINT_CONTROLLER_AUTH_TOKEN - value: {{ .Values.config.api.controller.authToken }} - - name: API_BLUEPRINT_PROCESSOR_BASE_URL - value: {{ .Values.config.api.processor.baseUrl }} - - name: API_BLUEPRINT_PROCESSOR_AUTH_TOKEN - value: {{ .Values.config.api.processor.authToken }} + - name: APP_ACTION_DEPLOY_BLUEPRINT_GRPC_ENABLED + value: {{ .Values.config.app.action.deployBlueprint.grpcEnabled }} + - name: API_BLUEPRINT_CONTROLLER_HTTP_BASE_URL + value: {{ .Values.config.api.controller.http.baseUrl }} + - name: API_BLUEPRINT_CONTROLLER_HTTP_AUTH_TOKEN + value: {{ .Values.config.api.controller.http.authToken }} + - name: API_BLUEPRINT_PROCESSOR_HHTP_BASE_URL + value: {{ .Values.config.api.processor.http.baseUrl }} + - name: API_BLUEPRINT_PROCESSOR_HTTP_AUTH_TOKEN + value: {{ .Values.config.api.processor.http.authToken }} + - name: API_BLUEPRINT_PROCESSOR_GRPC_HOST + value: {{ .Values.config.api.processor.grpc.host }} + - name: API_BLUEPRINT_PROCESSOR_GRPC_PORT + value: {{ .Values.config.api.processor.grpc.port }} + - name: API_BLUEPRINT_PROCESSOR_GRPC_AUTH_TOKEN + value: {{ .Values.config.api.processor.grpc.authToken }} readinessProbe: tcpSocket: port: {{ .Values.service.internalPort }} diff --git a/kubernetes/cds/charts/cds-ui/values.yaml b/kubernetes/cds/charts/cds-ui/values.yaml index 036b888ea7..1ba8a17049 100644 --- a/kubernetes/cds/charts/cds-ui/values.yaml +++ b/kubernetes/cds/charts/cds-ui/values.yaml @@ -33,13 +33,23 @@ pullPolicy: Always # application configuration config: + app: + action: + deployBlueprint: + grpcEnabled: true api: controller: - baseUrl: http://cds-controller-blueprints:8080/api/v1 - authToken: Basic Y2NzZGthcHBzOmNjc2RrYXBwcw== + http: + baseUrl: http://cds-controller-blueprints:8080/api/v1 + authToken: Basic Y2NzZGthcHBzOmNjc2RrYXBwcw== processor: - baseUrl: http://cds-blueprints-processor-http:8080/api/v1 - authToken: Basic Y2NzZGthcHBzOmNjc2RrYXBwcw== + http: + baseUrl: http://cds-blueprints-processor-http:8080/api/v1 + authToken: Basic Y2NzZGthcHBzOmNjc2RrYXBwcw== + grpc: + host: cds-blueprints-processor-grpc + port: 9111 + authToken: Basic Y2NzZGthcHBzOmNjc2RrYXBwcw== # default number of instances replicaCount: 1 diff --git a/kubernetes/onap/resources/overrides/environment.yaml b/kubernetes/onap/resources/overrides/environment.yaml new file mode 100644 index 0000000000..75ce8e529b --- /dev/null +++ b/kubernetes/onap/resources/overrides/environment.yaml @@ -0,0 +1,225 @@ +# Copyright © 2017,2019 Amdocs, AT&T , Bell Canada +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +################################################################# +# +# These overrides will affect all helm charts (ie. applications) +# that are listed below and are 'enabled'. +# +# +# This is specifically for the environments which take time to +# deploy ONAP. This increase in timeouts prevents false restarting of +# the pods during startup configuration. +# +# These timers have been tuned by the ONAP integration team. They +# have been tested and validated in the ONAP integration lab (Intel/Windriver lab). +# They are however indicative and may be adapted to your environment as they +# depend on the performance of the infrastructure you are installing ONAP on. +# +# Please note that these timers must remain reasonable, in other words, if +# your infrastructure is not performant enough, extending the timers to very +# large value may not fix all installation issues on over subscribed hardware. +# +################################################################# +aaf: + aaf-cs: + liveness: + initialDelaySeconds: 240 + readiness: + initialDelaySeconds: 240 + aaf-gui: + liveness: + initialDelaySeconds: 120 + readiness: + initialDelaySeconds: 120 + aaf-oauth: + liveness: + initialDelaySeconds: 300 + readiness: + initialDelaySeconds: 300 + aaf-service: + liveness: + initialDelaySeconds: 300 + readiness: + initialDelaySeconds: 300 +aai: + liveness: + initialDelaySeconds: 120 + aai-champ: + liveness: + initialDelaySeconds: 120 + readiness: + initialDelaySeconds: 120 + aai-data-router: + liveness: + initialDelaySeconds: 120 + aai-sparky-be: + liveness: + initialDelaySeconds: 120 + aai-spike: + liveness: + initialDelaySeconds: 120 + aai-cassandra: + liveness: + periodSeconds: 120 + readiness: + periodSeconds: 60 +appc: + mariadb-galera: + liveness: + initialDelaySeconds: 180 + periodSeconds: 60 +cassandra: + liveness: + initialDelaySeconds: 120 + periodSeconds: 120 + readiness: + initialDelaySeconds: 120 + periodSeconds: 60 +clamp: + liveness: + initialDelaySeconds: 60 + readiness: + initialDelaySeconds: 60 +dcaegen2: + dcae-cloudify-manager: + liveness: + initialDelaySeconds: 120 + readiness: + initialDelaySeconds: 120 +dmaap: + dmaap-bus-controller: + liveness: + initialDelaySeconds: 120 + readiness: + initialDelaySeconds: 120 + message-router: + liveness: + initialDelaySeconds: 120 + readiness: + initialDelaySeconds: 120 + dmaap-dr-prov: + liveness: + initialDelaySeconds: 120 + readiness: + initialDelaySeconds: 120 + mariadb: + liveness: + initialDelaySeconds: 180 + periodSeconds: 60 + dmaap-dr-node: + liveness: + initialDelaySeconds: 120 + readiness: + initialDelaySeconds: 120 +mariadb-galera: + liveness: + initialDelaySeconds: 180 + periodSeconds: 60 + mariadb-galera-server: + liveness: + initialDelaySeconds: 120 + readiness: + initialDelaySeconds: 120 +modeling: + mariadb-galera: + liveness: + initialDelaySeconds: 180 + periodSeconds: 60 +oof: + oof-has: + music: + music-cassandra: + liveness: + periodSeconds: 120 + readiness: + periodSeconds: 60 +portal: + portal-app: + liveness: + initialDelaySeconds: 60 + readiness: + initialDelaySeconds: 60 + portal-cassandra: + liveness: + periodSeconds: 120 + readiness: + periodSeconds: 60 +sdc: + sdc-be: + liveness: + initialDelaySeconds: 120 + readiness: + initialDelaySeconds: 120 + sdc-cs: + liveness: + initialDelaySeconds: 120 + periodSeconds: 120 + readiness: + initialDelaySeconds: 120 + periodSeconds: 60 + sdc-es: + liveness: + initialDelaySeconds: 60 + readiness: + initialDelaySeconds: 120 + sdc-onboarding-be: + liveness: + initialDelaySeconds: 120 + readiness: + initialDelaySeconds: 120 +sdnc: + liveness: + initialDelaySeconds: 60 + readiness: + initialDelaySeconds: 60 + dmaap-listener: + liveness: + initialDelaySeconds: 120 + readiness: + initialDelaySeconds: 120 + mariadb-galera: + liveness: + initialDelaySeconds: 180 + periodSeconds: 60 + sdnc-ansible-server: + readiness: + initialDelaySeconds: 120 + sdnc-portal: + readiness: + initialDelaySeconds: 120 + ueb-listener: + liveness: + initialDelaySeconds: 60 + readiness: + initialDelaySeconds: 60 +so: + liveness: + initialDelaySeconds: 120 + mariadb: + liveness: + initialDelaySeconds: 900 + readiness: + initialDelaySeconds: 900 +uui: + uui-server: + liveness: + initialDelaySeconds: 120 + readiness: + initialDelaySeconds: 120 +vfc: + mariadb-galera: + liveness: + initialDelaySeconds: 180 + periodSeconds: 60 diff --git a/kubernetes/onap/resources/overrides/onap-all.yaml b/kubernetes/onap/resources/overrides/onap-all.yaml index 1716415b3e..04fbc97f54 100644 --- a/kubernetes/onap/resources/overrides/onap-all.yaml +++ b/kubernetes/onap/resources/overrides/onap-all.yaml @@ -74,3 +74,5 @@ vid: enabled: true vnfsdk: enabled: true +modeling: + enabled: true diff --git a/kubernetes/policy/charts/brmsgw/resources/config/pe/brmsgw.conf b/kubernetes/policy/charts/brmsgw/resources/config/pe/brmsgw.conf index 7f611a1976..01c79be6e3 100644 --- a/kubernetes/policy/charts/brmsgw/resources/config/pe/brmsgw.conf +++ b/kubernetes/policy/charts/brmsgw/resources/config/pe/brmsgw.conf @@ -63,5 +63,5 @@ BRMS_UEB_API_KEY= BRMS_UEB_API_SECRET= #Dependency.json file version -BRMS_DEPENDENCY_VERSION=1.4.0 -BRMS_MODELS_DEPENDENCY_VERSION=2.0.0 +BRMS_DEPENDENCY_VERSION=1.4.2 +BRMS_MODELS_DEPENDENCY_VERSION=2.0.2 diff --git a/kubernetes/readiness/docker/init/job_complete.py b/kubernetes/readiness/docker/init/job_complete.py index b20cb5adec..a9570c5951 100644 --- a/kubernetes/readiness/docker/init/job_complete.py +++ b/kubernetes/readiness/docker/init/job_complete.py @@ -20,7 +20,8 @@ with open(token_path, 'r') as token_file: # setup logging log = logging.getLogger(__name__) handler = logging.StreamHandler(sys.stdout) -handler.setFormatter(logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')) +formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s') +handler.setFormatter(formatter) handler.setLevel(logging.INFO) log.addHandler(handler) log.setLevel(logging.INFO) @@ -54,17 +55,22 @@ def is_job_complete(job_name): DEF_TIMEOUT = 10 DESCRIPTION = "Kubernetes container job complete check utility" -USAGE = "Usage: job_complete.py [-t <timeout>] -j <job_name> [-j <job_name> ...]\n" \ +USAGE = "Usage: job_complete.py [-t <timeout>] -j <job_name> " \ + "[-j <job_name> ...]\n" \ "where\n" \ - "<timeout> - wait for container job complete timeout in min, default is " + str(DEF_TIMEOUT) + "\n" \ + "<timeout> - wait for container job complete timeout in min, " \ + "default is " + str(DEF_TIMEOUT) + "\n" \ "<job_name> - name of the job to wait for\n" + def main(argv): # args are a list of job names job_names = [] timeout = DEF_TIMEOUT try: - opts, args = getopt.getopt(argv, "hj:t:", ["job-name=", "timeout=", "help"]) + opts, args = getopt.getopt(argv, "hj:t:", ["job-name=", + "timeout=", + "help"]) for opt, arg in opts: if opt in ("-h", "--help"): print("%s\n\n%s" % (DESCRIPTION, USAGE)) @@ -89,11 +95,14 @@ def main(argv): if complete is True: break elif time.time() > timeout: - log.warning("timed out waiting for '" + job_name + "' to be completed") + log.warning("timed out waiting for '" + job_name + + "' to be completed") exit(1) else: - # spread in time potentially parallel execution in multiple containers + # spread in time potentially parallel execution in multiple + # containers time.sleep(random.randint(5, 11)) + if __name__ == "__main__": - main(sys.argv[1:])
\ No newline at end of file + main(sys.argv[1:]) diff --git a/kubernetes/readiness/docker/init/ready.py b/kubernetes/readiness/docker/init/ready.py index 0e133eab01..db7105a18d 100644 --- a/kubernetes/readiness/docker/init/ready.py +++ b/kubernetes/readiness/docker/init/ready.py @@ -20,7 +20,8 @@ with open(token_path, 'r') as token_file: # setup logging log = logging.getLogger(__name__) handler = logging.StreamHandler(sys.stdout) -handler.setFormatter(logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')) +formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s') +handler.setFormatter(formatter) handler.setLevel(logging.INFO) log.addHandler(handler) log.setLevel(logging.INFO) @@ -31,14 +32,14 @@ configuration.ssl_ca_cert = cert configuration.api_key['authorization'] = token configuration.api_key_prefix['authorization'] = 'Bearer' coreV1Api = client.CoreV1Api(client.ApiClient(configuration)) -api_instance=client.ExtensionsV1beta1Api(client.ApiClient(configuration)) +api_instance = client.ExtensionsV1beta1Api(client.ApiClient(configuration)) api = client.AppsV1beta1Api(client.ApiClient(configuration)) batchV1Api = client.BatchV1Api(client.ApiClient(configuration)) + def is_job_complete(job_name): complete = False log.info("Checking if " + job_name + " is complete") - response = "" try: response = batchV1Api.read_namespaced_job_status(job_name, namespace) if response.status.succeeded == 1: @@ -54,11 +55,12 @@ def is_job_complete(job_name): except Exception as e: log.error("Exception when calling read_namespaced_job_status: %s\n" % e) + def wait_for_statefulset_complete(statefulset_name): try: response = api.read_namespaced_stateful_set(statefulset_name, namespace) s = response.status - if ( s.updated_replicas == response.spec.replicas and + if (s.updated_replicas == response.spec.replicas and s.replicas == response.spec.replicas and s.ready_replicas == response.spec.replicas and s.current_replicas == response.spec.replicas and @@ -71,11 +73,12 @@ def wait_for_statefulset_complete(statefulset_name): except Exception as e: log.error("Exception when waiting for Statefulset status: %s\n" % e) + def wait_for_deployment_complete(deployment_name): try: response = api.read_namespaced_deployment(deployment_name, namespace) s = response.status - if ( s.unavailable_replicas == None and + if (s.unavailable_replicas is None and s.updated_replicas == response.spec.replicas and s.replicas == response.spec.replicas and s.ready_replicas == response.spec.replicas and @@ -88,24 +91,27 @@ def wait_for_deployment_complete(deployment_name): except Exception as e: log.error("Exception when waiting for deployment status: %s\n" % e) + def is_ready(container_name): ready = False log.info("Checking if " + container_name + " is ready") try: - response = coreV1Api.list_namespaced_pod(namespace=namespace, watch=False) + response = coreV1Api.list_namespaced_pod(namespace=namespace, + watch=False) for i in response.items: # container_statuses can be None, which is non-iterable. if i.status.container_statuses is None: continue for s in i.status.container_statuses: if s.name == container_name: - if i.metadata.owner_references[0].kind == "StatefulSet": - ready = wait_for_statefulset_complete(i.metadata.owner_references[0].name) + name = read_name(i) + if i.metadata.owner_references[0].kind == "StatefulSet": + ready = wait_for_statefulset_complete(name) elif i.metadata.owner_references[0].kind == "ReplicaSet": - api_response = api_instance.read_namespaced_replica_set_status(i.metadata.owner_references[0].name, namespace) - ready = wait_for_deployment_complete(api_response.metadata.owner_references[0].name) + deployment_name = get_deployment_name(name) + ready = wait_for_deployment_complete(deployment_name) elif i.metadata.owner_references[0].kind == "Job": - ready = is_job_complete(i.metadata.owner_references[0].name) + ready = is_job_complete(name) return ready @@ -115,19 +121,36 @@ def is_ready(container_name): except Exception as e: log.error("Exception when calling list_namespaced_pod: %s\n" % e) + +def read_name(item): + return item.metadata.owner_reference[0].name + + +def get_deployment_name(replicaset): + api_response = api_instance.read_namespaced_replica_set_status(replicaset, + namespace) + deployment_name = read_name(api_response) + return deployment_name + + DEF_TIMEOUT = 10 DESCRIPTION = "Kubernetes container readiness check utility" -USAGE = "Usage: ready.py [-t <timeout>] -c <container_name> [-c <container_name> ...]\n" \ +USAGE = "Usage: ready.py [-t <timeout>] -c <container_name> " \ + "[-c <container_name> ...]\n" \ "where\n" \ - "<timeout> - wait for container readiness timeout in min, default is " + str(DEF_TIMEOUT) + "\n" \ + "<timeout> - wait for container readiness timeout in min, " \ + "default is " + str(DEF_TIMEOUT) + "\n" \ "<container_name> - name of the container to wait for\n" + def main(argv): # args are a list of container names container_names = [] timeout = DEF_TIMEOUT try: - opts, args = getopt.getopt(argv, "hc:t:", ["container-name=", "timeout=", "help"]) + opts, args = getopt.getopt(argv, "hc:t:", ["container-name=", + "timeout=", + "help"]) for opt, arg in opts: if opt in ("-h", "--help"): print("%s\n\n%s" % (DESCRIPTION, USAGE)) @@ -152,12 +175,15 @@ def main(argv): if ready is True: break elif time.time() > timeout: - log.warning("timed out waiting for '" + container_name + "' to be ready") + log.warning("timed out waiting for '" + container_name + + "' to be ready") exit(1) else: - # spread in time potentially parallel execution in multiple containers + # spread in time potentially parallel execution in multiple + # containers time.sleep(random.randint(5, 11)) + if __name__ == "__main__": main(sys.argv[1:]) diff --git a/kubernetes/robot/demo-k8s.sh b/kubernetes/robot/demo-k8s.sh index 0f4578364a..eb6744b1ac 100755 --- a/kubernetes/robot/demo-k8s.sh +++ b/kubernetes/robot/demo-k8s.sh @@ -59,7 +59,7 @@ function usage # Set the defaults -echo "Number of parameters:" +echo "Number of parameters:" echo $# if [ $# -lt 2 ];then @@ -199,7 +199,7 @@ do vfwclosedloop) TAG="vfwclosedloop" shift - VARIABLES="$VARIABLES -v PACKET_GENERATOR_HOST:$1" + VARIABLES="$VARIABLES -v PACKET_GENERATOR_HOST:$1 -v pkg_host:$1" shift ;; *) @@ -221,4 +221,3 @@ DISPLAY_NUM=$(($GLOBAL_BUILD_NUMBER + 90)) VARIABLEFILES="-V /share/config/vm_properties.py -V /share/config/integration_robot_properties.py -V /share/config/integration_preload_parameters.py" kubectl --namespace $NAMESPACE exec ${POD} -- ${ETEHOME}/runTags.sh ${VARIABLEFILES} ${VARIABLES} -d /share/logs/${OUTPUT_FOLDER} -i ${TAG} --display $DISPLAY_NUM 2> ${TAG}.out - diff --git a/kubernetes/robot/resources/config/eteshare/config/integration_robot_properties.py b/kubernetes/robot/resources/config/eteshare/config/integration_robot_properties.py index 69190638a7..1db75b0c18 100644 --- a/kubernetes/robot/resources/config/eteshare/config/integration_robot_properties.py +++ b/kubernetes/robot/resources/config/eteshare/config/integration_robot_properties.py @@ -130,6 +130,9 @@ GLOBAL_OOF_SNIRO_SERVER_PORT = "8698" #oof user GLOBAL_OOF_OSDF_USERNAME="{{ .Values.oofUsername }}" GLOBAL_OOF_OSDF_PASSWORD="{{ .Values.oofPassword }}" +#oof pci user +GLOBAL_OOF_PCI_USERNAME="{{ .Values.oofOsdfPciOptUsername }}" +GLOBAL_OOF_PCI_PASSWORD="{{ .Values.oofOsdfPciOptPassword }}" # oof cmso global info - everything is from the private oam network (also called onap private network) GLOBAL_OOF_CMSO_PROTOCOL = "https" GLOBAL_OOF_CMSO_SERVER_PORT = "8080" diff --git a/kubernetes/robot/resources/config/eteshare/config/vm_properties.py b/kubernetes/robot/resources/config/eteshare/config/vm_properties.py index 173def582d..9cc30319c7 100644 --- a/kubernetes/robot/resources/config/eteshare/config/vm_properties.py +++ b/kubernetes/robot/resources/config/eteshare/config/vm_properties.py @@ -75,6 +75,7 @@ GLOBAL_INJECTED_OPENSTACK_TENANT_ID_REGION_THREE = '{{ .Values.openStackTenantId GLOBAL_INJECTED_OPENSTACK_PROJECT_DOMAIN_REGION_THREE = '{{ .Values.openStackProjectNameRegionThree }}' GLOBAL_INJECTED_OPENSTACK_USER_DOMAIN_REGION_THREE = '{{ .Values.openStackDomainIdRegionThree }}' GLOBAL_INJECTED_OPENSTACK_OAM_NETWORK_CIDR_PREFIX = '{{ .Values.openStackOamNetworkCidrPrefix }}' +GLOBAL_INJECTED_OPENSTACK_PUBLIC_NETWORK = 'public' GLOBAL_INJECTED_POLICY_IP_ADDR = 'pdp.{{include "common.namespace" .}}' GLOBAL_INJECTED_POLICY_HEALTHCHECK_IP_ADDR = 'drools.{{include "common.namespace" .}}' GLOBAL_INJECTED_PORTAL_IP_ADDR = 'portal-app.{{include "common.namespace" .}}' @@ -167,6 +168,7 @@ GLOBAL_INJECTED_PROPERTIES = { "GLOBAL_INJECTED_OPENSTACK_PROJECT_DOMAIN_REGION_THREE" : '{{ .Values.openStackProjectNameRegionThree }}', "GLOBAL_INJECTED_OPENSTACK_USER_DOMAIN_REGION_THREE" : '{{ .Values.openStackDomainIdRegionThree }}', "GLOBAL_INJECTED_OPENSTACK_OAM_NETWORK_CIDR_PREFIX" : '{{ .Values.openStackOamNetworkCidrPrefix }}', + "GLOBAL_INJECTED_OPENSTACK_PUBLIC_NETWORK" : 'public', "GLOBAL_INJECTED_POLICY_IP_ADDR" : 'pdp.{{include "common.namespace" .}}', "GLOBAL_INJECTED_POLICY_HEALTHCHECK_IP_ADDR" : 'drools.{{include "common.namespace" .}}', "GLOBAL_INJECTED_POLICY_API_IP_ADDR" : 'policy-api.{{include "common.namespace" .}}', diff --git a/kubernetes/robot/values.yaml b/kubernetes/robot/values.yaml index 9afb81e4f2..65da947c95 100644 --- a/kubernetes/robot/values.yaml +++ b/kubernetes/robot/values.yaml @@ -22,7 +22,7 @@ global: # global defaults # application image repository: nexus3.onap.org:10001 -image: onap/testsuite:1.4.1 +image: onap/testsuite:1.4.2 pullPolicy: Always ubuntuInitImage: oomk8s/ubuntu-init:2.0.0 @@ -156,6 +156,8 @@ oofUsername: "oof@oof.onap.org" oofPassword: "demo123456!" cmsoUsername: "oof@oof.onap.org" cmsoPassword: "demo123456!" +oofOsdfPciOptUsername: "oof@oof.onap.org" +oofOsdfPciOptPassword: "demo123456!" oofHomingUsername: "admin1" oofHomingPassword: "plan.15" diff --git a/kubernetes/sdc/charts/sdc-wfd-fe/templates/ingress.yaml b/kubernetes/sdc/charts/sdc-wfd-fe/templates/ingress.yaml index d051eafead..e10de6df20 100644 --- a/kubernetes/sdc/charts/sdc-wfd-fe/templates/ingress.yaml +++ b/kubernetes/sdc/charts/sdc-wfd-fe/templates/ingress.yaml @@ -12,7 +12,17 @@ # See the License for the specific language governing permissions and # limitations under the License. +{{- define "rules.wf" -}} +- http: + paths: + - path: /workflows/ + backend: + serviceName: {{.Values.service.name}} + servicePort: {{.Values.service.internalPort}} +{{- end -}} + {{- if .Values.ingress.enabled }} + apiVersion: extensions/v1beta1 kind: Ingress metadata: @@ -29,11 +39,21 @@ metadata: {{- end }} spec: rules: - - http: - paths: - - path: /workflows/ - backend: - serviceName: {{.Values.service.name}} - servicePort: {{.Values.service.internalPort}} +{{- if .Values.ingress.hosts}} +{{- range .Values.ingress.hosts}} +{{- include "rules.wf" $ | nindent 2}} +{{- if .}} + host: {{. | quote}} +{{- end}} +{{- end}} + tls: + - hosts: +{{- range .Values.ingress.hosts}} +{{- if .}} + - {{ . | quote }} +{{- end}} +{{- end }} +{{- else}} +{{- include "rules.wf" . | nindent 2}} +{{- end}} {{ end }} - diff --git a/kubernetes/sdc/charts/sdc-wfd-fe/values.yaml b/kubernetes/sdc/charts/sdc-wfd-fe/values.yaml index d26c9279f0..639c811b08 100644 --- a/kubernetes/sdc/charts/sdc-wfd-fe/values.yaml +++ b/kubernetes/sdc/charts/sdc-wfd-fe/values.yaml @@ -80,6 +80,7 @@ service: ingress: enabled: false + hosts: ~ annotations: ingress.kubernetes.io/secure-backends: "false" nginx.ingress.kubernetes.io/secure-backends: "false" diff --git a/kubernetes/so/charts/so-bpmn-infra/resources/config/overrides/override.yaml b/kubernetes/so/charts/so-bpmn-infra/resources/config/overrides/override.yaml index 2c8d8658cc..35b86a33a0 100755 --- a/kubernetes/so/charts/so-bpmn-infra/resources/config/overrides/override.yaml +++ b/kubernetes/so/charts/so-bpmn-infra/resources/config/overrides/override.yaml @@ -48,6 +48,9 @@ bpelURL: http://so-bpmn-infra.{{ include "common.namespace" . }}:8081 msb-ip: msb-iag.{{ include "common.namespace" . }} msb-port: 80 mso: + rainyDay: + retryDurationMultiplier: 2 + maxRetries: 5 msoKey: 07a7159d3bf51a0e53be7a8f89699be7 correlation: timeout: 60 |