aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--INFO.yaml154
-rw-r--r--LICENSE201
-rw-r--r--README.md150
-rw-r--r--ansible.cfg13
-rw-r--r--gitlab-ci/base.yml270
-rw-r--r--inventory/group_vars/all.yaml106
-rw-r--r--onap-oom-clean.yaml5
-rw-r--r--onap-oom-configure.yaml25
-rw-r--r--onap-oom-deploy.yaml5
-rw-r--r--onap-oom-postconfigure.yaml18
-rw-r--r--onap-oom-postinstall.yaml25
-rw-r--r--onap-oom-prepare-ci.yaml5
-rw-r--r--onap-oom-prepare.yaml16
-rw-r--r--onap-oom-wait.yaml5
-rw-r--r--requirements.yml27
-rw-r--r--roles/node_prepare/defaults/main.yaml1
-rw-r--r--roles/node_prepare/handlers/main.yaml7
-rw-r--r--roles/node_prepare/tasks/main.yaml28
-rw-r--r--roles/node_prepare/tasks/nfs_client.yaml35
-rw-r--r--roles/node_prepare/tasks/nfs_client_DEBIAN.yaml8
-rw-r--r--roles/node_prepare/tasks/nfs_server.yaml47
-rw-r--r--roles/node_prepare/tasks/nfs_server_COREOS.yaml4
-rw-r--r--roles/node_prepare/tasks/nfs_server_DEBIAN.yaml9
-rw-r--r--roles/oom_clean/defaults/main.yaml8
-rw-r--r--roles/oom_clean/tasks/helm3.yaml66
-rw-r--r--roles/oom_clean/tasks/main.yaml151
-rw-r--r--roles/oom_configure/defaults/main.yaml35
-rw-r--r--roles/oom_configure/tasks/main.yaml210
-rw-r--r--roles/oom_configure/templates/components-overrides.yaml.j2153
-rw-r--r--roles/oom_configure/templates/onap-overrides.yaml.j2202
-rw-r--r--roles/oom_configure/templates/so-overrides.yaml.j263
-rw-r--r--roles/oom_generate_artifacts/defaults/main.yaml7
-rw-r--r--roles/oom_generate_artifacts/tasks/loadbalancer_facts.yaml71
-rw-r--r--roles/oom_generate_artifacts/tasks/main.yaml82
-rw-r--r--roles/oom_launch/defaults/main.yaml7
-rw-r--r--roles/oom_launch/tasks/main.yaml199
-rw-r--r--roles/oom_postconfigure/defaults/main.yaml7
-rw-r--r--roles/oom_postconfigure/tasks/main.yaml52
-rw-r--r--roles/oom_prepare/defaults/main.yaml8
-rw-r--r--roles/oom_prepare/tasks/main.yaml242
-rw-r--r--roles/oom_wait/tasks/main.yaml40
-rw-r--r--roles/prepare_ci/defaults/main.yaml6
-rw-r--r--roles/prepare_ci/tasks/install_DEBIAN.yaml11
-rw-r--r--roles/prepare_ci/tasks/main.yaml57
-rw-r--r--roles/prepare_ci/vars/debian.yaml18
-rwxr-xr-xrun.sh152
-rw-r--r--scripts/README.md76
-rwxr-xr-xscripts/chained-ci-init.sh102
-rwxr-xr-xscripts/clean.sh76
-rw-r--r--scripts/prepare_ssh.yml5
-rw-r--r--scripts/rc.sh145
-rw-r--r--scripts/ssh_prepare/defaults/main.yml4
-rw-r--r--scripts/ssh_prepare/tasks/main.yml40
-rw-r--r--scripts/ssh_prepare/templates/config.j233
-rw-r--r--vars/ddf.yml18
-rw-r--r--vars/vaulted_ssh_credentials.yml118
56 files changed, 3628 insertions, 0 deletions
diff --git a/INFO.yaml b/INFO.yaml
new file mode 100644
index 0000000..e57620e
--- /dev/null
+++ b/INFO.yaml
@@ -0,0 +1,154 @@
+---
+project: 'integration/pipelines/oom-automatic-installation'
+project_creation_date: '2022-10-06'
+lifecycle_state: 'Incubation'
+project_category: ''
+project_lead: &onap_releng_ptl
+ name: 'Marek Szwalkiewicz'
+ email: 'marek.szwalkiewicz@external.t-mobile.pl'
+ company: 'T-Mobile'
+ id: 'mszwalkiewicz'
+ timezone: 'Europe/Warsaw'
+primary_contact: *onap_releng_ptl
+issue_tracking:
+ type: 'jira'
+ url: 'https://jira.onap.org/projects/INT'
+ key: 'INT'
+mailing_list:
+ type: 'groups.io'
+ url: 'lists.onap.org'
+ tag: 'integration'
+realtime_discussion: ''
+meetings:
+ - type: 'zoom'
+ agenda: 'https://wiki.onap.org/display/DW/Integration+Meeting+Minutes'
+ url: 'https://wiki.onap.org/pages/viewpage.action?pageId=6593670'
+ server: 'n/a'
+ channel: 'n/a'
+ repeats: 'weekly'
+ time: '13:00 UTC'
+repositories:
+ - 'integration/pipelines/oom-automatic-installation'
+committers:
+ - <<: *onap_releng_ptl
+ - name: 'Catherine Lefevre'
+ email: 'cl664y@att.com'
+ company: 'AT&T'
+ id: 'Katel34'
+ timezone: 'Europe/Belgium'
+ - name: 'Morgan Richomme'
+ email: 'morgan.richomme@orange.com'
+ company: 'orange'
+ id: 'mrichomme'
+ timezone: 'France/Paris'
+ - name: 'Bartek Grzybowski'
+ email: 'b.grzybowski@partner.samsung.com'
+ company: 'samsung'
+ id: 'bgrzybowski'
+ timezone: 'Poland/Warsaw'
+ - name: 'Krzysztof Kuzmicki'
+ email: 'krzysztof.kuzmicki@nokia.com'
+ company: 'nokia'
+ id: 'kkuzmick'
+ timezone: 'Europe/Warsaw'
+ - name: 'Andreas Geissler'
+ email: 'andreas-geissler@telekom.de'
+ company: 'Deutsche Telekom'
+ id: 'andreasgeissler'
+ timezone: 'Europe/Berlin'
+ - name: 'Michal Jaggielo'
+ email: 'Michal.Jagiello@t-mobile.pl'
+ company: 'T-Mobile'
+ id: 'MichalJagielloTMPL'
+ timezone: 'Europe/Warsaw'
+ - name: 'Lukasz Rajewski'
+ email: 'lukasz.rajewski@orange.com'
+ company: 'Orange'
+ id: 'rajewluk'
+ timezone: 'Europe/Warsaw'
+ - name: 'Illia Halych'
+ email: 'illia.halych@t-mobile.pl'
+ company: 'T-Mobile'
+ id: 'elihalych'
+ timezone: 'Europe/Warsaw'
+ - name: 'Alexander Mazuruk'
+ email: 'a.mazuruk@samsung.com'
+ company: 'Samsung'
+ id: 'aalexanderr'
+ timezone: 'Europe/Warsaw'
+ - name: 'Maciej Lisowski'
+ email: 'm.lisowski2@partner.samsung.com'
+ company: 'Samsung'
+ id: 'mlisowski'
+ timezone: 'Europe/Warsaw'
+ - name: 'Marcin Sebastian Krasowski'
+ email: 'm.krasowski@samsung.com'
+ company: 'Samsung'
+ id: 'mkrasowski'
+ timezone: 'Europe/Warsaw'
+ - name: 'Fiachra Corcoran'
+ email: 'fiachra.corcoran@est.tech'
+ company: 'Ericsson'
+ id: 'efiacor'
+ timezone: 'Europe/Dublin'
+tsc:
+ # yamllint disable rule:line-length
+ approval: 'https://lists.onap.org/pipermail/onap-tsc'
+ changes:
+ - type: 'Addition'
+ name: 'Brian Freeman, Mariusz Wagner'
+ link: 'https://wiki.onap.org/display/DW/TSC+2019-02-21'
+ - type: 'Addition'
+ name: 'Morgan Richomme'
+ link: 'https://wiki.onap.org/display/DW/TSC+2019-10-17'
+ - type: 'Addition'
+ name: 'Bartek, Marcin, Eric'
+ link: 'https://lists.onap.org/g/onap-tsc/message/5772'
+ - type: 'Addition'
+ name: 'Krzysztof Kuzmicki'
+ link: 'https://lists.onap.org/g/onap-tsc/topic/onap_integration_committer/73303463'
+ - type: 'Addition'
+ name: 'Pawel Wieczorek'
+ link: 'https://lists.onap.org/g/onap-tsc/topic/onap_integration_committer/73303462'
+ - type: 'Addition'
+ name: 'Andreas Geissler'
+ link: 'https://lists.onap.org/g/onap-tsc/topic/onap_integration_committer/73303461'
+ - type: 'Addition'
+ name: 'Michal Jaggiello'
+ link: 'https://lists.onap.org/g/onap-tsc/message/7102'
+ - type: 'Addition'
+ name: 'Lukasz Rajewski'
+ link: 'https://lists.onap.org/g/onap-tsc/message/7102'
+ - type: 'Addition'
+ name: 'Thierry Hardy'
+ link: 'https://lists.onap.org/g/onap-tsc/message/7102'
+ - type: 'Addition'
+ name: 'Lasse Kaihlavirta'
+ link: 'https://lists.onap.org/g/onap-tsc/message/7280'
+ - type: 'Addition'
+ name: 'Illia Halych'
+ link: 'https://lists.onap.org/g/onap-tsc/message/7696'
+ - type: 'Addition'
+ name: 'Bartosz Gardziejewski, Alexander Mazuruk'
+ link: 'https://lists.onap.org/g/onap-tsc/message/7893'
+ - type: 'Deletion'
+ name: 'Thierry Hardy, Lasse Kaihlavirta'
+ link: 'https://lists.onap.org/g/onap-tsc/message/7893'
+ - type: 'Deletion'
+ name: 'Marcin Przybysz'
+ link: 'https://lists.onap.org/g/onap-tsc/message/7893'
+ - type: 'Deletion'
+ name: 'Bartosz Gardziejewski'
+ link: 'https://lists.onap.org/g/onap-tsc/message/8286'
+ - type: 'Deletion'
+ name: 'Christophe Closset, Brian Freeman'
+ link: 'https://lists.onap.org/g/onap-tsc/message/8449'
+ - type: 'Addition'
+ name: 'Maciej Lisowski, Marcin Sebastian Krasowski'
+ link: 'https://lists.onap.org/g/onap-tsc/message/8449'
+ - type: 'Addition'
+ name: 'Fiachra Corcoran'
+ link: 'https://lists.onap.org/g/onap-tsc/message/8634'
+ - type: 'Addition'
+ name: 'Marek Szwalkiewicz'
+ link: 'https://lists.onap.org/g/onap-tsc/message/9275'
diff --git a/LICENSE b/LICENSE
new file mode 100644
index 0000000..5b656e9
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright 2019 Orange-OpenSource / lfn / onap
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/README.md b/README.md
new file mode 100644
index 0000000..748cfef
--- /dev/null
+++ b/README.md
@@ -0,0 +1,150 @@
+# ONAP automatic installation via OOM
+
+This project aims to automatically install ONAP. Its config source
+is shared config files among all OPNFV installers:
+- PDF - Pod Description File: describing the hardware level of the
+ infrastructure hosting the VIM
+- IDF - Installer Description File: A flexible file allowing installer to
+ set specific parameters close to the infra settings, linked with the install
+ sequence
+- DDF - Datacenter Description File: A flexible file allowing installer to set
+ specific information about the datacenter where OOM is deployed
+
+## Goal
+
+The goal of this installer is to install in a repeatable and reliable way ONAP
+using OOM installer.
+
+
+## Input
+
+ - configuration files:
+ - mandatory:
+ - vars/pdf.yml: POD Description File
+ - vars/idf.yml: POD Infrastructure description File
+ - vars/ddf.yml: Datacenter Description File
+ - vars/user_cloud.yml: Credential to connect to an OpenStack (in order
+ to create a first cloud inside ONAP)
+ - inventory/infra: the ansible inventory for the servers
+ - optional:
+ - vars/vaulted_ssh_credentials.yml: Ciphered private/public pair of key
+ that allows to connect to jumphost and servers
+ - vars/components-overrides.yml: if you want to deploy a specific
+ set of components, set it here.
+ - Environment variables:
+ - mandatory:
+ - PRIVATE_TOKEN: to get the artifact
+ - artifacts_src: the url to get the artifacts
+ - OR artifacts_bin: b64_encoded zipped artifacts (tbd)
+ - ANSIBLE_VAULT_PASSWORD: the vault password needed by ciphered ansible
+ vars
+ - optional:
+ - RUNNER_TAG:
+ - override the default gitlab-runner tag
+ - CLEAN:
+ - role: Do we clean previus ONAP installation
+ - values type: Boolean
+ - default: False
+ - ANSIBLE_VERBOSE:
+ - role: verbose option for ansible
+ - values: "", "-vvv"
+ - default: ""
+ - GERRIT_REVIEW:
+ - role: gerrit review to use
+ - value type: string
+ - default: ""
+ - GERRIT_PATCHSET:
+ - role: gerrit patchset to use in the gerrit review
+ - value type: string
+ - default: ""
+ - HELM_VERSION:
+ - role: the helm version that should be present
+ - default: "v3.8.2"
+ - USE_JUMPHOST:
+ - role: do we need to connect via a jumphost or not?
+ - value type: boolean
+ - default: "yes"
+ - PROXY_COMMAND:
+ - role: do we need to use a proxy command to reach the jumphost or
+ not?
+ - value: "", "the proxy command (example: connect -S socks:1080 %h
+ %p)"
+ - default: ""
+ - VNFS_TENANT_NAME:
+ - role: the name of the first tenant for VNF
+ - value type: string
+ - default: the value in idf (os_infra.tenant.name).
+ - VNFS_USER_NAME:
+ - role: the name of the first tenant user for VNF
+ - value type: string
+ - default: the value in idf (os_infra.user.name).
+ - ONAP_REPOSITORY:
+ - role: choose the repository where to download ONAP
+ - value type: string
+ - default: nexus.onap.eu
+ - ONAP_NAMESPACE:
+ - role: the namespace deployment in kubernetes
+ - value type: string
+ - default: "onap"
+ - ONAP_CHART_NAME:
+ - role: the name of the deployment in helm
+ - value type: string
+ - default: the value of ONAP_NAMESPACE
+ - OOM_BRANCH
+ - role: branch/tag of OOM to deploy
+ - value type: string
+ - default: "master"
+ - ONAP_FLAVOR:
+ - role: the size of ONAP Pods limits
+ - values: "small", "large", "unlimited"
+ - default: "unlimited"
+ - POD:
+ - role: name of the pod when we'll insert healtcheck results
+ - value type;: string
+ - default: empty
+ - DEPLOYMENT:
+ - role: name of the deployment for right tagging when we'll insert
+ healtcheck results
+ - value type: string
+ - default: "rancher"
+ - DEPLOYMENT_TYPE:
+ - role: type of ONAP deployment expected
+ - values: "core", "small", "medium", "full"
+ - default: "full"
+ - ADDITIONAL_COMPONENTS:
+ - role: additional components to install on top of a deployment type
+ - value type: comma-separated list (example: "clamp,policy")
+ - TEST_RESULT_DB_URL:
+ - role: url of test db api
+ - value type: string
+ - default: "http://testresults.opnfv.org/test/api/v1/results"
+ - INGRESS:
+ - role: do we want to use ingress with ONAP or not
+ - value type: boolean
+ - default: False
+ - GATHER_NODE_FACTS:
+ - role: do we need to gather facts from node on postinstallation
+ - value type: boolean
+ - default: true
+ - HELM3_USE_SQL
+ - role: ask to use SQL backend for helm3
+ - value type: bool
+ - default: False
+
+
+## Output
+ - artifacts:
+ - vars/cluster.yml
+
+## Deployment types
+
+- core: aaf, aai, dmaap, sdc, sdnc, so, strimzi
+- small: core + appc, cli, esr, log, msb, multicloud, nbi, portal, vid
+- medium: small + clamp, contrib, dcaegen2, oof, policy, pomba
+- full: all onap components
+
+## Additional components:
+
+List of components available:
+
+- medium components + modeling, vnfsdk, vfc, uui, sniro_emulator
diff --git a/ansible.cfg b/ansible.cfg
new file mode 100644
index 0000000..9538e95
--- /dev/null
+++ b/ansible.cfg
@@ -0,0 +1,13 @@
+[defaults]
+host_key_checking=False
+forks = 20
+stdout_callback = yaml
+inventory_ignore_extensions = ~, .orig, .bak, .ini, .cfg, .retry, .pyc, .pyo, .creds
+callback_whitelist = full_skip
+interpreter_python = auto
+[ssh_connection]
+pipelining = True
+retries = 5
+ssh_args = -o ControlMaster=auto -o ControlPersist=60s
+scp_if_ssh = True
+transfer_method = scp
diff --git a/gitlab-ci/base.yml b/gitlab-ci/base.yml
new file mode 100644
index 0000000..54fae67
--- /dev/null
+++ b/gitlab-ci/base.yml
@@ -0,0 +1,270 @@
+---
+stages:
+ - test
+ - clean
+ - prepare
+ - configure_core
+ - deploy_core
+ - wait_core
+ - configure_small
+ - deploy_small
+ - wait_small
+ - configure_medium
+ - deploy_medium
+ - wait_medium
+ - configure_full
+ - deploy_full
+ - wait_full
+ - postinstall
+ - postconfigure
+
+variables:
+ target_folder: /opt/auto_oom
+ use_jumphost: "true"
+ pod_description_name: pod4
+ pod_fqdn: opnfv.fr
+ jumphost_user: opnfv
+ branch: master
+ pod: pod4
+ DEPLOYMENT_REQUESTED: full
+ ANSIBLE_DOCKER_IMAGE:
+ registry.gitlab.com/orange-opensource/lfn/ci_cd/docker_ansible_openstacksdk
+ ANSIBLE_DOCKER_TAG: "2.10"
+ CHAINED_CI_INIT: scripts/chained-ci-init.sh
+ GIT_SUBMODULE_STRATEGY: recursive
+
+.syntax_checking: &syntax_docker
+ stage: test
+ extends: .syntax_checking_tags
+ except:
+ - schedules
+ - triggers
+ - web
+ - pipelines
+ - external
+
+.deployment_and_test: &deployment_and_test
+ image: ${ANSIBLE_DOCKER_IMAGE}:${ANSIBLE_DOCKER_TAG}
+ extends: .ansible_run_tags
+ artifacts:
+ paths:
+ - vars/openstack_infos.yml
+ - vars/cluster.yml
+ - vars/hosts
+ when: always
+ before_script:
+ - curl -s ifconfig.me || true
+ - chmod 700 .
+ - . ./${CHAINED_CI_INIT} -a -i inventory/infra
+ after_script:
+ - ./scripts/clean.sh
+ #retry: 1
+
+yaml_checking:
+ image: docker.nexus.azure.onap.eu/sdesbure/yamllint:latest
+ script:
+ - yamllint .gitlab-ci.yml
+ - yamllint *.y?ml
+ - yamllint inventory/group_vars/all.yaml
+ - yamllint roles/*/tasks/*.y?ml
+ <<: *syntax_docker
+
+ansible_linting:
+ image: docker.nexus.azure.onap.eu/sdesbure/ansible-lint:latest
+ script:
+ - ansible-lint -x ANSIBLE0010,ANSIBLE0013 onap-*.yaml
+ <<: *syntax_docker
+
+# Clean
+clean:
+ stage: clean
+ script:
+ - ./run.sh clean
+ <<: *deployment_and_test
+ only:
+ variables:
+ - $CLEAN == 'True'
+ - $CLEAN == 'true'
+ - $CLEAN == 'Yes'
+ - $CLEAN == 'yes'
+ refs:
+ - schedules
+ - triggers
+ - web
+ retry: 2
+
+# Prepare
+prepare:
+ stage: prepare
+ only:
+ - schedules
+ - triggers
+ - web
+ - external
+ - pipelines
+ script:
+ - ./run.sh prepare
+ <<: *deployment_and_test
+
+# Configure
+.configure: &configure
+ script:
+ - ./run.sh configure
+ <<: *deployment_and_test
+
+.core: &core
+ only:
+ refs:
+ - schedules
+ - triggers
+ - web
+ - external
+ - pipelines
+ variables:
+ - $DEPLOYMENT_REQUESTED == "core"
+
+.small: &small
+ only:
+ refs:
+ - schedules
+ - triggers
+ - web
+ - external
+ - pipelines
+ variables:
+ - $DEPLOYMENT_REQUESTED == "small"
+
+.medium: &medium
+ only:
+ refs:
+ - schedules
+ - triggers
+ - web
+ - external
+ - pipelines
+ variables:
+ - $DEPLOYMENT_REQUESTED == "medium"
+
+.full: &full
+ only:
+ refs:
+ - schedules
+ - triggers
+ - web
+ - external
+ - pipelines
+ variables:
+ - $DEPLOYMENT_REQUESTED == "full"
+
+configure_core:
+ stage: configure_core
+ variables:
+ DEPLOYMENT_TYPE: core
+ <<: *core
+ <<: *configure
+
+configure_small:
+ stage: configure_small
+ variables:
+ DEPLOYMENT_TYPE: small
+ <<: *small
+ <<: *configure
+
+configure_medium:
+ stage: configure_medium
+ variables:
+ DEPLOYMENT_TYPE: medium
+ <<: *medium
+ <<: *configure
+
+configure_full:
+ stage: configure_full
+ variables:
+ DEPLOYMENT_TYPE: full
+ <<: *configure
+ <<: *full
+
+# Deploy
+.deploy: &deploy
+ script:
+ - ./run.sh deploy
+ <<: *deployment_and_test
+
+deploy_core:
+ stage: deploy_core
+ <<: *core
+ <<: *deploy
+
+deploy_small:
+ stage: deploy_small
+ <<: *small
+ <<: *deploy
+
+deploy_medium:
+ stage: deploy_medium
+ <<: *medium
+ <<: *deploy
+
+deploy_full:
+ stage: deploy_full
+ <<: *full
+ <<: *deploy
+
+.wait: &wait
+ allow_failure: true
+ timeout: 1h
+ script:
+ - ./run.sh wait
+ <<: *deployment_and_test
+
+wait_for_end_of_install_core:
+ stage: wait_core
+ <<: *core
+ <<: *wait
+
+wait_for_end_of_install_small:
+ stage: wait_small
+ <<: *small
+ <<: *wait
+
+wait_for_end_of_install_medium:
+ stage: wait_medium
+ <<: *medium
+ <<: *wait
+
+wait_for_end_of_install_full:
+ stage: wait_full
+ <<: *full
+ <<: *wait
+
+# Postconfiguration
+postconfiguration:
+ stage: postconfigure
+ allow_failure: true
+ only:
+ - schedules
+ - triggers
+ - web
+ - external
+ - pipelines
+ script:
+ - ./run.sh postconfiguration
+ <<: *deployment_and_test
+
+ # Postconfiguration
+postinstallation:
+ stage: postinstall
+ only:
+ - schedules
+ - triggers
+ - web
+ - external
+ - pipelines
+ dependencies:
+ - configure_full
+ - configure_core
+ - configure_small
+ - configure_medium
+ script:
+ - ./run.sh postinstallation
+ <<: *deployment_and_test
diff --git a/inventory/group_vars/all.yaml b/inventory/group_vars/all.yaml
new file mode 100644
index 0000000..1da6588
--- /dev/null
+++ b/inventory/group_vars/all.yaml
@@ -0,0 +1,106 @@
+---
+oom_path: /opt/oom
+onap_base_url: https://gerrit.onap.org/r
+oom_url: "{{ onap_base_url}}/oom"
+branch: "{{ lookup('env','OOM_BRANCH')| default('master', true) }}"
+nfs_folder: /dockerdata-nfs
+
+generic_override_path: "{{ oom_path }}/kubernetes/onap/resources/overrides"
+onap_kubernetes_path: "{{ oom_path }}/kubernetes/onap"
+onap_chart_path: "{{ onap_kubernetes_path }}/Chart.yaml"
+contrib_path: "{{ oom_path }}/kubernetes/contrib"
+charts_path: "{{ oom_path }}/kubernetes/dist/packages"
+
+# Openstack needed variables
+openstack_tenant_name: "{{ lookup('env','TENANT_NAME') |
+ default(os_infra.tenant.name, true) }}"
+openstack_user_name: "{{ lookup('env','USER_NAME') |
+ default(os_infra.user.name, true) }}"
+
+base_dir: "{{ lookup('env', 'RUN_ROOT') | default(playbook_dir, true) }}"
+
+# ONAP needed variables
+onap_namespace: "{{ lookup('env', 'ONAP_NAMESPACE') | default('onap', true) }}"
+chart_name: "{{ lookup('env', 'ONAP_CHART_NAME') | default(onap_namespace, true) }}"
+onap_release_name: "{{ lookup('env', 'ONAP_RELEASE_NAME') | default('onap', true) }}"
+
+helm_version: "{{ lookup('env', 'HELM_VERSION') | default('v3.8.2', true) }}"
+use_servicemesh: "{{ lookup('env', 'SERVICEMESH') | default(False, true) }}"
+use_ingress: "{{ lookup('env', 'INGRESS') | default(False, true) }}"
+use_metrics: "{{ lookup('env', 'METRICS') | default(False, true) }}"
+use_custom_resources_metrics:
+ "{{ lookup('env', 'METRICS_CRD') | default(False, true) }}"
+
+# variable needed to access jumphost
+ssh_id_rsa: "{{ vault_ssh_id_rsa }}"
+gather_nodes_fact: "{{ lookup('env','GATHER_NODE_FACTS') |
+ default(true, true) }}"
+
+oom_etc_path: "{{ ansible_user_dir }}/oom/{{ branch }}"
+onap_all_file: "{{ generic_override_path }}/{{ use_ingress|
+ ternary('onap-all-ingress-nginx-vhost', 'onap-all') }}.yaml"
+#onap_all_file: "{{ generic_override_path }}/{{ use_servicemesh|
+# ternary('onap-all-ingress-istio', 'onap-all') }}.yaml"
+override_file: "{{ oom_etc_path }}/onap-overrides.yaml"
+override_components: "{{ oom_etc_path }}/onap-components.yaml"
+override_gating_component: "{{ oom_etc_path }}/gating-component-overrides.yaml"
+deployment_file: "{{ oom_etc_path }}/deployment.yaml"
+repository: "{{ lookup('env', 'ONAP_REPOSITORY') |
+ default('nexus3.onap.org:10001', true) }}"
+proxy_for_dockerhub: "{{ lookup('env', 'DOCKER_HUB_PROXY') |
+ default('', true) }}"
+proxy_for_elastic: "{{ lookup('env', 'ELASTIC_PROXY') |
+ default('', true) }}"
+proxy_for_k8s_gcr: "{{ lookup('env', 'K8S_GCR_PROXY') |
+ default('', true) }}"
+
+deployment_requested: "{{ lookup('env', 'DEPLOYMENT_REQUESTED') | default('full', true)"
+
+deployment_type: "{{ lookup('env','DEPLOYMENT_TYPE')| default('core', true) }}"
+
+gerrit_review: "{{ lookup('env', 'GERRIT_REVIEW') | default('', true) }}"
+
+gerrit_patchset: "{{ lookup('env', 'GERRIT_PATCHSET') | default('', true) }}"
+
+project: "{{ lookup('env', 'PROJECT') | default('oom', true) }}"
+
+project_dir_mapping:
+ aai/oom: kubernetes/aai
+ testsuite/oom: kubernetes/robot
+
+# deployment_type variable
+core_onap: "{{ (deployment_type == 'core') }}"
+small_onap: "{{ (deployment_type == 'small') }}"
+medium_onap: "{{ (deployment_type == 'medium') }}"
+full_onap: "{{ (deployment_type == 'full') }}"
+nbi_working_tag: "2.1.1"
+onap_flavor: "{{ lookup('env','ONAP_FLAVOR')| default('small', true) }}"
+additional_components: "{{ lookup('env','ADDITIONAL_COMPONENTS') |
+ default('', true) }}"
+
+portal_enabled: "{{ small_onap or medium_onap or
+ ('portal' in additional_components) }}"
+
+strimzi_version: "{{ lookup('env', 'STRIMZI_VERSION') | default('0.31.1', true) }}"
+onap_version: "{{ onap_versions[branch] | default('11.0.0') }}"
+
+onap_versions:
+ master: 11.0.0
+ kohn: 11.0.0
+ jakarta: 10.0.0
+
+use_global_storage: "{{ os_infra.onap.global_storage.enabled | default(false) }}"
+
+helmv3_use_sql: "{{ lookup('env','HELM3_USE_SQL') |
+ default(False, true) }}"
+
+postgres_namespace: helm
+postgres_svc: postgres
+postgres_secret_name: postgres-postgresql
+postgres_user: helm
+postgres_db: helm
+postgres_port: 30347
+postgres_url: "postgresql://{{
+ postgres_svc }}.{{ postgres_namespace }}:{{ postgres_port }}/{{
+ postgres_db }}?user={{ postgres_user }}&password={{
+ postgres_password }}&sslmode=disable"
diff --git a/onap-oom-clean.yaml b/onap-oom-clean.yaml
new file mode 100644
index 0000000..10a3644
--- /dev/null
+++ b/onap-oom-clean.yaml
@@ -0,0 +1,5 @@
+---
+- hosts: kube-master
+ run_once: "yes"
+ roles:
+ - oom_clean
diff --git a/onap-oom-configure.yaml b/onap-oom-configure.yaml
new file mode 100644
index 0000000..569de7f
--- /dev/null
+++ b/onap-oom-configure.yaml
@@ -0,0 +1,25 @@
+---
+- hosts: kube-node
+ gather_facts: "no"
+ tasks:
+ - name: gather facts
+ setup:
+ when: gather_nodes_fact
+
+- hosts: kube-master
+ run_once: "yes"
+ vars_files:
+ - "vars/idf.yml"
+ - "vars/pdf.yml"
+ pre_tasks:
+ - name: check if openstack_infos exists
+ ansible.builtin.stat:
+ path: "{{ base_dir }}/vars/openstack_infos.yml"
+ delegate_to: localhost
+ register: stat
+
+ - name: include user clouds info
+ include_vars: "{{ base_dir }}/vars/openstack_infos.yml"
+ when: stat.stat.exists
+ roles:
+ - oom_configure
diff --git a/onap-oom-deploy.yaml b/onap-oom-deploy.yaml
new file mode 100644
index 0000000..c9714f6
--- /dev/null
+++ b/onap-oom-deploy.yaml
@@ -0,0 +1,5 @@
+---
+- hosts: kube-master
+ run_once: "yes"
+ roles:
+ - oom_launch
diff --git a/onap-oom-postconfigure.yaml b/onap-oom-postconfigure.yaml
new file mode 100644
index 0000000..416d88c
--- /dev/null
+++ b/onap-oom-postconfigure.yaml
@@ -0,0 +1,18 @@
+---
+- hosts: kube-master
+ run_once: "yes"
+ vars_files:
+ - "vars/ddf.yml"
+ - "vars/pdf.yml"
+ pre_tasks:
+ - name: check if openstack_infos exists
+ ansible.builtin.stat:
+ path: "{{ base_dir }}/vars/openstack_infos.yml"
+ delegate_to: localhost
+ register: stat
+
+ - name: include user clouds info
+ include_vars: "{{ base_dir }}/vars/openstack_infos.yml"
+ when: stat.stat.exists
+ roles:
+ - oom_postconfigure
diff --git a/onap-oom-postinstall.yaml b/onap-oom-postinstall.yaml
new file mode 100644
index 0000000..1f203f3
--- /dev/null
+++ b/onap-oom-postinstall.yaml
@@ -0,0 +1,25 @@
+---
+- hosts: kube-node
+ gather_facts: "no"
+ tasks:
+ - name: gather facts
+ setup:
+ when: gather_nodes_fact
+
+- hosts: kube-master
+ run_once: "yes"
+ vars_files:
+ - "vars/ddf.yml"
+ - "vars/pdf.yml"
+ pre_tasks:
+ - name: check if openstack_infos exists
+ ansible.builtin.stat:
+ path: "{{ base_dir }}/vars/openstack_infos.yml"
+ delegate_to: localhost
+ register: stat
+
+ - name: include user clouds info
+ include_vars: "{{ base_dir }}/vars/openstack_infos.yml"
+ when: stat.stat.exists
+ roles:
+ - oom_generate_artifacts
diff --git a/onap-oom-prepare-ci.yaml b/onap-oom-prepare-ci.yaml
new file mode 100644
index 0000000..396d4a3
--- /dev/null
+++ b/onap-oom-prepare-ci.yaml
@@ -0,0 +1,5 @@
+---
+- hosts: kube-master
+ become: "yes"
+ roles:
+ - prepare_ci
diff --git a/onap-oom-prepare.yaml b/onap-oom-prepare.yaml
new file mode 100644
index 0000000..a7c956b
--- /dev/null
+++ b/onap-oom-prepare.yaml
@@ -0,0 +1,16 @@
+---
+- hosts: k8s-cluster, nfs-server
+ gather_facts: no
+ vars_files:
+ - "vars/pdf.yml"
+ - "vars/idf.yml"
+ roles:
+ - node_prepare
+
+- hosts: kube-master
+ vars_files:
+ - "vars/pdf.yml"
+ - "vars/idf.yml"
+ roles:
+ - orange.os_infra_manager.cloud
+ - oom_prepare
diff --git a/onap-oom-wait.yaml b/onap-oom-wait.yaml
new file mode 100644
index 0000000..57eed8e
--- /dev/null
+++ b/onap-oom-wait.yaml
@@ -0,0 +1,5 @@
+---
+- hosts: kube-master
+ run_once: "yes"
+ roles:
+ - oom_wait
diff --git a/requirements.yml b/requirements.yml
new file mode 100644
index 0000000..0b06f8c
--- /dev/null
+++ b/requirements.yml
@@ -0,0 +1,27 @@
+---
+roles:
+ - src: git+https://gitlab.com/Orange-OpenSource/lfn/infra/create_disk_role.git
+ version: master
+ name: create_disk
+ - src: git+https://gitlab.com/Orange-OpenSource/lfn/infra/apt-install-role.git
+ version: master
+ name: apt_install
+collections:
+ - name: ansible.netcommon
+ source: https://galaxy.ansible.com
+ version: 2.5.1
+ - name: community.kubernetes
+ source: https://galaxy.ansible.com
+ version: 1.1.1
+ - name: openstack.cloud
+ source: https://galaxy.ansible.com
+ version: 1.1.0
+ - name: ansible.posix
+ source: https://galaxy.ansible.com
+ version: 1.1.1
+ - name: community.general
+ source: https://galaxy.ansible.com
+ version: 1.2.0
+ - name: https://gitlab.com/Orange-OpenSource/lfn/infra/os_infra_manager_collection.git
+ type: git
+ version: staging
diff --git a/roles/node_prepare/defaults/main.yaml b/roles/node_prepare/defaults/main.yaml
new file mode 100644
index 0000000..3160f83
--- /dev/null
+++ b/roles/node_prepare/defaults/main.yaml
@@ -0,0 +1 @@
+nfs_daemon: nfs-kernel-server \ No newline at end of file
diff --git a/roles/node_prepare/handlers/main.yaml b/roles/node_prepare/handlers/main.yaml
new file mode 100644
index 0000000..73aa2ed
--- /dev/null
+++ b/roles/node_prepare/handlers/main.yaml
@@ -0,0 +1,7 @@
+---
+- name: restart nfs server
+ systemd:
+ name: "{{ nfs_daemon }}"
+ state: restarted
+ daemon_reload: "yes"
+ become: "yes"
diff --git a/roles/node_prepare/tasks/main.yaml b/roles/node_prepare/tasks/main.yaml
new file mode 100644
index 0000000..afbaeb4
--- /dev/null
+++ b/roles/node_prepare/tasks/main.yaml
@@ -0,0 +1,28 @@
+---
+# TODO: retrieve facts on Openstack to dynamically find subnet for etc export
+- name: install nfs
+ when: not use_global_storage
+ block:
+ - name: gather facts
+ setup:
+
+ - name: create nfs server on controller
+ import_tasks: nfs_server.yaml
+ when: inventory_hostname in groups['nfs-server']
+
+ - name: flush handlers
+ meta: flush_handlers
+
+ - name: mount dockerdata in nfs
+ import_tasks: nfs_client.yaml
+ when: inventory_hostname in groups['k8s-cluster'] and
+ inventory_hostname not in groups['nfs-server']
+
+ - name: put right value for max_map_count
+ become: "yes"
+ ansible.posix.sysctl:
+ name: vm.max_map_count
+ value: 1048575
+ sysctl_set: "yes"
+ state: present
+ reload: "yes"
diff --git a/roles/node_prepare/tasks/nfs_client.yaml b/roles/node_prepare/tasks/nfs_client.yaml
new file mode 100644
index 0000000..315fdab
--- /dev/null
+++ b/roles/node_prepare/tasks/nfs_client.yaml
@@ -0,0 +1,35 @@
+---
+- name: install packages
+ import_tasks: nfs_client_DEBIAN.yaml
+ when: ansible_os_family | lower == "debian"
+
+- name: Create mountable dir
+ become: "yes"
+ ansible.builtin.file:
+ path: "{{ nfs_folder }}"
+ state: directory
+ mode: 0777
+ owner: root
+ group: root
+
+- name: set mountpoints
+ become: "yes"
+ ansible.posix.mount:
+ name: "{{ nfs_folder }}"
+ src:
+ "{{ hostvars[groups['nfs-server'][0]].ansible_default_ipv4.address }}\
+ :{{ nfs_folder }}"
+ fstype: nfs4
+ dump: 0
+ passno: 2
+ opts: "nfsvers=4.1,rsize=131072,wsize=131072"
+ state: mounted
+
+- name: Ensure mountable dir is accessible to everyone
+ become: "yes"
+ ansible.builtin.file:
+ path: "{{ nfs_folder }}"
+ state: directory
+ mode: 0777
+ owner: root
+ group: root
diff --git a/roles/node_prepare/tasks/nfs_client_DEBIAN.yaml b/roles/node_prepare/tasks/nfs_client_DEBIAN.yaml
new file mode 100644
index 0000000..f3eb6ee
--- /dev/null
+++ b/roles/node_prepare/tasks/nfs_client_DEBIAN.yaml
@@ -0,0 +1,8 @@
+---
+- name: "[Debian] Ensure NFS utilities are installed."
+ include_role:
+ name: apt_install
+ vars:
+ environment: "{{ proxy_env }}"
+ packages:
+ - nfs-common
diff --git a/roles/node_prepare/tasks/nfs_server.yaml b/roles/node_prepare/tasks/nfs_server.yaml
new file mode 100644
index 0000000..6179172
--- /dev/null
+++ b/roles/node_prepare/tasks/nfs_server.yaml
@@ -0,0 +1,47 @@
+---
+- name: apply distribution specific tasks
+ include_tasks: "nfs_server_{{ ansible_os_family | upper }}.yaml"
+ when: >
+ ansible_os_family | lower == "debian" or
+ ansible_os_family | lower == "coreos"
+
+- name: retrieve server pdf information
+ ansible.builtin.set_fact:
+ server_pdf:
+ "{{ nodes | selectattr('name', 'in', inventory_hostname) | first }}"
+
+- name: create nfs volume
+ include_role:
+ name: create_disk
+ vars:
+ disks: "{{ server_pdf.disks }}"
+ disk_purpose: nfs
+ mount_path: "{{ nfs_folder }}"
+ force_full_erase: False
+ when: (server_pdf.disks | selectattr('name', 'eq', 'disk-nfs') |
+ list | length) > 0
+
+- name: Create mountable dir
+ become: "yes"
+ ansible.builtin.file:
+ path: "{{ nfs_folder }}"
+ state: directory
+ mode: 0777
+ owner: root
+ group: root
+
+- name: create file in order that nfs server is happy...
+ become: "yes"
+ ansible.builtin.file:
+ path: "{{ nfs_folder }}/do_not_remove"
+ state: touch
+
+- name: copy /etc/exports
+ become: "yes"
+ ansible.builtin.lineinfile:
+ path: /etc/exports
+ owner: root
+ group: root
+ regexp: "^{{ nfs_folder }}"
+ line: "{{ nfs_folder }} *(rw,async,no_root_squash,no_subtree_check)"
+ notify: restart nfs server
diff --git a/roles/node_prepare/tasks/nfs_server_COREOS.yaml b/roles/node_prepare/tasks/nfs_server_COREOS.yaml
new file mode 100644
index 0000000..ad59414
--- /dev/null
+++ b/roles/node_prepare/tasks/nfs_server_COREOS.yaml
@@ -0,0 +1,4 @@
+---
+- name: override default nfs daemon name
+ ansible.builtin.set_fact:
+ nfs_daemon: nfsd
diff --git a/roles/node_prepare/tasks/nfs_server_DEBIAN.yaml b/roles/node_prepare/tasks/nfs_server_DEBIAN.yaml
new file mode 100644
index 0000000..38b4d14
--- /dev/null
+++ b/roles/node_prepare/tasks/nfs_server_DEBIAN.yaml
@@ -0,0 +1,9 @@
+---
+- name: "[Debian] Ensure NFS utilities are installed."
+ include_role:
+ name: apt_install
+ vars:
+ environment: "{{ proxy_env }}"
+ packages:
+ - nfs-common
+ - nfs-kernel-server
diff --git a/roles/oom_clean/defaults/main.yaml b/roles/oom_clean/defaults/main.yaml
new file mode 100644
index 0000000..adf0ae3
--- /dev/null
+++ b/roles/oom_clean/defaults/main.yaml
@@ -0,0 +1,8 @@
+faulty_pods:
+ - ejbca
+
+helm_env: {}
+
+helm_env_postgres:
+ HELM_DRIVER: sql
+ HELM_DRIVER_SQL_CONNECTION_STRING: "{{ postgres_url }}" \ No newline at end of file
diff --git a/roles/oom_clean/tasks/helm3.yaml b/roles/oom_clean/tasks/helm3.yaml
new file mode 100644
index 0000000..f6eb3a9
--- /dev/null
+++ b/roles/oom_clean/tasks/helm3.yaml
@@ -0,0 +1,66 @@
+---
+- name: "[HELM3] retrieve helm postgres secret"
+ community.kubernetes.k8s_info:
+ api_version: v1
+ kind: Secret
+ name: "{{ postgres_secret_name }}"
+ namespace: "{{ postgres_namespace }}"
+ register: postgres_secrets
+ when: helmv3_use_sql|bool
+
+- name: "[HELM3] retrieve helm postrgres password"
+ set_fact:
+ postgres_password: "{{
+ postgres_secrets.resources[0].data['postgresql-password'] | b64decode }}"
+ when: helmv3_use_sql|bool
+
+- name: set helm environment with postgres
+ set_fact:
+ helm_env: "{{ helm_env_postgres }}"
+ when: helmv3_use_sql|bool
+
+- name: "[HELM3] list previously installed components"
+ shell: |
+ set -o pipefail && helm list -a -n {{ onap_namespace }} |
+ awk '{print $1}' | grep {{ chart_name }}- || true
+ args:
+ executable: /bin/bash
+ environment: "{{ helm_env }}"
+ register: components
+ changed_when: "false"
+
+- name: "[HELM3] remove previously installed components"
+ command:
+ "helm uninstall {{ item }} -n {{ onap_namespace }}"
+ loop: "{{ components.stdout_lines }}"
+ environment: "{{ helm_env }}"
+ register: helm_undeploy
+ async: 900
+ poll: 0
+
+- name: "[HELM3] Wait for component deletion"
+ ansible.builtin.async_status:
+ jid: "{{ item.ansible_job_id }}"
+ register: _jobs
+ until: _jobs.finished
+ delay: 5
+ retries: 300
+ loop: "{{ helm_undeploy.results }}"
+ loop_control:
+ label: "{{ item.item }}"
+
+- name: "[HELM3] check if an onap installation has been launched before"
+ shell: |
+ set -o pipefail && helm list -a -n {{ onap_namespace }} |
+ awk '{print $1}' | grep -c {{ chart_name }} || true
+ args:
+ executable: /bin/bash
+ environment: "{{ helm_env }}"
+ register: launched
+ changed_when: "false"
+
+- name: "[HELM3] remove previous installation"
+ command:
+ "helm uninstall {{ chart_name }} -n {{ onap_namespace }}"
+ environment: "{{ helm_env }}"
+ when: launched.stdout != '0' \ No newline at end of file
diff --git a/roles/oom_clean/tasks/main.yaml b/roles/oom_clean/tasks/main.yaml
new file mode 100644
index 0000000..4f14200
--- /dev/null
+++ b/roles/oom_clean/tasks/main.yaml
@@ -0,0 +1,151 @@
+---
+- name: check helm version
+ command: "helm version --template {% raw %}'{{.Version}}'{% endraw %}"
+ register: helm_version
+
+# Return of previous command will be "v3.3.4" for v3 and up and "<no value>"
+# for version 2.
+- name: store helm version
+ ansible.builtin.set_fact:
+ helmv3: "{{ ('<' in helm_version.stdout) | ternary(false, true) }}"
+
+- name: "HELM 3 not installed - stop playbook"
+ ansible.builtin.fail:
+ msg: HELM 3 not installed
+ when: not helmv3
+
+- name: "[HELM3] Remove previous installation"
+ include_tasks: helm3.yaml
+ when: helmv3
+
+- name: get number of remaining pods
+ command: >
+ kubectl get pods --namespace {{ onap_namespace }} --no-headers
+ -o custom-columns=NAME:.metadata.name
+ changed_when: False
+ register: pods
+
+- name: delete remaining faulty pods
+ command: >
+ kubectl delete pods --namespace {{ onap_namespace }} --force
+ --grace-period 0 {{ item }}
+ loop: "{{ pods.stdout_lines }}"
+ when: (pods.stdout_lines | length) <= (faulty_pods | length) and
+ ((item | regex_replace('^[a-zA-Z0-9]+-') |
+ regex_replace('-[0-9a-z]+-[0-9a-z]+$')) in faulty_pods)
+ changed_when: True
+
+- name: get number of remaining jobs
+ command: >
+ kubectl get jobs --namespace {{ onap_namespace }} --no-headers
+ -o custom-columns=NAME:.metadata.name
+ changed_when: false
+ register: jobs
+
+- name: delete remaining faulty jobs
+ command: >
+ kubectl delete job --namespace {{ onap_namespace }} --force
+ --grace-period 0 {{ item }}
+ loop: "{{ jobs.stdout_lines }}"
+
+- name: get number of remaining pvcs
+ command: >
+ kubectl get pvc --namespace {{ onap_namespace }} --no-headers
+ -o custom-columns=NAME:.metadata.name
+ changed_when: false
+ register: pvcs
+
+- name: delete remaining faulty pvcs
+ command: >
+ kubectl delete pvc --namespace {{ onap_namespace }} --force
+ --grace-period 0 {{ item }}
+ loop: "{{ pvcs.stdout_lines }}"
+
+- name: check if namespace is for namespace full deletion
+ shell: |
+ set -o pipefail && kubectl get namespace {{ onap_namespace }}
+ -o jsonpath="{.status.phase}" || true
+ args:
+ executable: /bin/bash
+ register: ns_status
+ ignore_errors: yes
+ changed_when: False
+
+- name: delete onap namespace
+ community.kubernetes.k8s:
+ state: absent
+ definition:
+ apiVersion: v1
+ kind: Namespace
+ metadata:
+ name: "{{ onap_namespace }}"
+ when: (not ns_status.failed) and ('Terminating' not in ns_status.stdout)
+
+- name: delete onap tests namespace
+ community.kubernetes.k8s:
+ state: absent
+ definition:
+ apiVersion: v1
+ kind: Namespace
+ metadata:
+ name: "{{ onap_namespace }}-tests"
+ when: (not ns_status.failed) and ('Terminating' not in ns_status.stdout)
+
+- name: wait for namespace full deletion
+ shell: |
+ set -o pipefail && kubectl get namespace |
+ grep -c {{ onap_namespace }} || true
+ args:
+ executable: /bin/bash
+ register: kube
+ changed_when:
+ kube.stdout == '0'
+ until:
+ kube.stdout == '0'
+ retries: 600
+ delay: 1
+
+- name: list all remaining persistent volumes
+ shell: |
+ set -o pipefail &&
+ kubectl get pv -o=jsonpath='{range .items[*]}{.metadata.name}{"\n"}{end}' |
+ grep {{ chart_name }} || true
+ args:
+ executable: /bin/bash
+ register: persistent_volumes
+ changed_when: "false"
+
+- name: remove remaining persistent volumes
+ shell: |
+ set -o pipefail && kubectl delete pv {{ item }} || true
+ args:
+ executable: /bin/bash
+ changed_when: "true"
+ loop: "{{ persistent_volumes.stdout_lines }}"
+
+- name: "list all onap directories in {{ nfs_folder }}"
+ ansible.builtin.find:
+ paths: "{{ nfs_folder }}"
+ recurse: no
+ file_type: directory
+ register: onap_directories
+
+- name: "delete onap directory in {{ nfs_folder }}"
+ become: "yes"
+ ansible.builtin.file:
+ path: "{{ item.path }}"
+ state: absent
+ loop: "{{ onap_directories.files }}"
+ loop_control:
+ label: "{{ item.path }}"
+
+- name: delete component-gating-overrides.yaml if present
+ ansible.builtin.file:
+ path: "{{ override_gating_component }}"
+ state: absent
+
+- name: remove oom directory
+ become: "yes"
+ ansible.builtin.file:
+ path: "{{ oom_path }}"
+ state: absent
diff --git a/roles/oom_configure/defaults/main.yaml b/roles/oom_configure/defaults/main.yaml
new file mode 100644
index 0000000..461e13b
--- /dev/null
+++ b/roles/oom_configure/defaults/main.yaml
@@ -0,0 +1,35 @@
+---
+openstack_tenant_name:
+ "{{ lookup('env','VNFS_TENANT_NAME') |
+ default(os_infra.tenant.name, true) }}"
+openstack_user_name:
+ "{{ lookup('env','VNFS_USER_NAME') |
+ default(os_infra.user.name, true) }}"
+openstack_service_tenant_name: service
+
+component_enabled:
+ a1policymanagement: "{{ medium_onap or ('a1policymanagement' in additional_components) }}"
+ cli: "{{ small_onap or medium_onap or ('cli' in additional_components) }}"
+ consul: "{{ small_onap or medium_onap or ('consul' in additional_components) }}"
+ cps: "{{ small_onap or medium_onap or ('cps' in additional_components) }}"
+ contrib: "{{ medium_onap or ('contrib' in additional_components) }}"
+ dcaegen2: "{{ medium_onap or ('dcaegen2' in additional_components) }}"
+ dcaegen2_services: "{{ medium_onap or ('dcaegen2_services' in additional_components) }}"
+ dcaemod: "{{ 'dcaemod' in additional_components }}"
+ esr: "{{ small_onap or medium_onap or ('esr' in additional_components) }}"
+ holmes: "{{ medium_onap or ('holmes' in additional_components) }}"
+ log: "{{ small_onap or medium_onap or ('log' in additional_components) }}"
+ msb: "{{ small_onap or medium_onap or ('msb' in additional_components) }}"
+ multicloud: "{{ small_onap or medium_onap or ('multicloud' in additional_components) }}"
+ nbi: "{{ small_onap or medium_onap or ('nbi' in additional_components) }}"
+ oof: "{{ medium_onap or ('oof' in additional_components) }}"
+ policy: "{{ medium_onap or ('policy' in additional_components) }}"
+ pomba: "{{ medium_onap or ('pomba' in additional_components) }}"
+ portal: "{{ portal_enabled }}"
+ sniro_emulator: "{{ 'sniro_emulator' in additional_components }}"
+ uui: "{{ 'uui' in additional_components }}"
+ vfc: "{{ 'vfc' in additional_components }}"
+ vid: "{{ small_onap or medium_onap or ('vid' in additional_components) }}"
+ vnfsdk: "{{ 'vnfsdk' in additional_components }}"
+ modeling: "{{ 'modeling' in additional_components }}"
+ cds: "{{ small_onap or medium_onap or ('cds' in additional_components) }}"
diff --git a/roles/oom_configure/tasks/main.yaml b/roles/oom_configure/tasks/main.yaml
new file mode 100644
index 0000000..dc9e100
--- /dev/null
+++ b/roles/oom_configure/tasks/main.yaml
@@ -0,0 +1,210 @@
+---
+- name: fetch cloud config
+ ansible.builtin.fetch:
+ dest: /tmp/clouds.yaml
+ src: "{{ ansible_user_dir }}/.config/openstack/clouds.yaml"
+ flat: "yes"
+
+- name: load cloud config
+ include_vars: /tmp/clouds.yaml
+
+- name: initialize os_auth_url
+ ansible.builtin.set_fact:
+ os_auth_url: "{{ clouds[openstack_user_name].auth.auth_url }}"
+
+- name: add v3 at end of os_auth_url
+ ansible.builtin.set_fact:
+ os_auth_url:
+ "{{ ((os_auth_url[-3:] == 'v3/') or (os_auth_url[-2:] == 'v3')) |
+ ternary(os_auth_url | regex_replace('/$', ''),
+ (os_auth_url[-1:] == '/') | ternary(
+ os_auth_url ~ 'v3',
+ os_auth_url ~ '/v3')) }}"
+
+- name: set tenant id
+ ansible.builtin.set_fact:
+ tenant_id: "{{ clouds[openstack_user_name].auth.project_id }}"
+ when: clouds[openstack_user_name].auth.project_id is defined
+
+- name: retrieve tenant id
+ block:
+ - name: load cloud config
+ openstack.cloud.os_client_config:
+
+ # - name: retrieve info from VNF tenant
+ # os_project_facts:
+ # cloud: "{{ openstack_user_name }}"
+ # name: "{{ openstack_tenant_name }}"
+ # register: tenant
+ # ISSUE with shade: You are not authorized to perform the requested action:
+ # identity:list_projects.
+ #
+ # - name: retrieve tenant ID
+ # set_fact:
+ # tenant_id: "{{ tenant.ansible_facts.openstack_projects.0.id }}"
+
+ - name: retrieve info from VNF tenant -- bash way
+ shell: >-
+ set -o pipefail && \
+ openstack --os-cloud {{ openstack_user_name }} project list -f json |
+ jq -r '[.[]| select(.Name=="{{ openstack_tenant_name }}") | .ID] |
+ first'
+ args:
+ executable: /bin/bash
+ changed_when: False
+ register: tenant
+
+ - name: retrieve tenant ID -- bash way
+ ansible.builtin.set_fact:
+ tenant_id: "{{ tenant.stdout_lines.0 }}"
+ when: clouds[openstack_user_name].auth.project_id is not defined
+
+- name: generate openstack info file
+ ansible.builtin.copy:
+ content: |
+ openstack_user_name: {{ openstack_user_name }}
+ openstack_tenant_name: {{ openstack_tenant_name }}
+ openstack_tenant_id: {{ tenant_id }}
+ dest: "{{ base_dir }}/vars/openstack_infos.yml"
+ delegate_to: localhost
+
+- name: generate encrypted password for robot
+ shell: |
+ set -o pipefail &&\
+ echo -n '{{ clouds[openstack_user_name].auth.password }}' |
+ openssl aes-128-ecb -e -K `cat encryption.key` -nosalt |
+ xxd -c 256 -p
+ args:
+ chdir: "{{ oom_path }}/kubernetes/so/resources/config/mso"
+ executable: /bin/bash
+ changed_when: false
+ register: shell
+
+- name: save robot encrypted password
+ ansible.builtin.set_fact:
+ robot_encrypted_password: "{{ shell.stdout }}"
+
+- name: set so_crypto container name
+ set_fact:
+ so_crypto: "{{ proxy_for_dockerhub }}/sdesbure/so_crypto"
+ when: proxy_for_dockerhub | bool
+
+- name: set so_crypto container name
+ set_fact:
+ so_crypto: "sdesbure/so_crypto"
+ when: not proxy_for_dockerhub | bool
+
+- name: generate encrypted password for so
+ shell: >
+ docker run --rm {{ so_crypto }}
+ {{ clouds[openstack_user_name].auth.password }}
+ `cat encryption.key`
+ args:
+ chdir: "{{ oom_path }}/kubernetes/so/resources/config/mso"
+ changed_when: False
+ register: shell
+
+- name: save so encrypted password
+ ansible.builtin.set_fact:
+ encrypted_password: "{{ shell.stdout }}"
+
+- name: create config override directory
+ ansible.builtin.file:
+ path: "{{ oom_etc_path }}"
+ recurse: "yes"
+ state: directory
+
+- name: check if a deployment has already been done
+ ansible.builtin.stat:
+ path: "{{ deployment_file }}"
+ register: deployment_stat
+
+- name: get deployment.yaml
+ when: deployment_stat.stat.exists
+ block:
+ - name: create temporary local file for deployment.yaml
+ ansible.builtin.tempfile:
+ state: file
+ suffix: temp
+ register: tmp_deployment
+ delegate_to: "127.0.0.1"
+
+ - name: fetch deployment info
+ ansible.builtin.fetch:
+ dest: "{{ tmp_deployment.path }}"
+ src: "{{ deployment_file }}"
+ flat: "yes"
+
+ - name: load deployment info
+ include_vars:
+ file: "{{ tmp_deployment.path }}"
+
+ - name: change deployment type if needed
+ ansible.builtin.set_fact:
+ deployment_type: "{{ deployment }}"
+ when: deployment_type == "micro" or
+ (deployment_type == "small" and deployment != "micro" ) or
+ deployment == "full"
+
+ always:
+ - name: destroy the local tmp_deployment
+ ansible.builtin.file:
+ path: "{{ tmp_deployment.path }}"
+ state: absent
+ delegate_to: "127.0.0.1"
+
+- name: "generate config override template for deployment {{ deployment_type }}"
+ ansible.builtin.template:
+ src: onap-overrides.yaml.j2
+ dest: "{{ override_file }}"
+
+- name: check if pre generated component override file exists
+ ansible.builtin.stat:
+ path: "{{ base_dir }}/vars/components-overrides.yml"
+ delegate_to: localhost
+ register: stat
+
+- name: copy pre generated component override file
+ ansible.builtin.copy:
+ dest: "{{ override_components }}"
+ src: "{{ base_dir }}/vars/components-overrides.yml"
+ when: stat.stat.exists
+
+- name: "generate config override template for deployment {{ deployment_type }}"
+ ansible.builtin.template:
+ src: components-overrides.yaml.j2
+ dest: "{{ override_components }}"
+ when: (not stat.stat.exists) and (core_onap or small_onap or medium_onap)
+
+- name: "generate so override template"
+ ansible.builtin.template:
+ src: so-overrides.yaml.j2
+ dest: "{{ override_gating_component }}"
+ when: project == 'so'
+
+- name: save on which step we are
+ ansible.builtin.copy:
+ content: |
+ ---
+ deployment: {{ deployment_type }}
+ dest: "{{ deployment_file }}"
+
+- name: "[facts retrieved] get first node IP address (case ip not defined)"
+ ansible.builtin.set_fact:
+ first_node_ip: "{{
+ hostvars[groups['kube-node'].0].ansible_default_ipv4.address }}"
+ when: gather_nodes_fact
+
+- name: "[No Facts retrieved] get first node IP address (case ip not defined)"
+ ansible.builtin.set_fact:
+ first_node_ip: "{{ hostvars[groups['kube-node'].0].ip }}"
+ when: not gather_nodes_fact
+
+- name: generate etc/hosts for utilities
+ become: "yes"
+ ansible.builtin.blockinfile:
+ path: /etc/hosts
+ marker: "# {mark} ANSIBLE MANAGED UTILITIES HOSTS"
+ block: |
+ {{ first_node_ip }} minio.minio
+ {{ first_node_ip }} {{ postgres_svc }}.{{ postgres_namespace }}
diff --git a/roles/oom_configure/templates/components-overrides.yaml.j2 b/roles/oom_configure/templates/components-overrides.yaml.j2
new file mode 100644
index 0000000..a58cbac
--- /dev/null
+++ b/roles/oom_configure/templates/components-overrides.yaml.j2
@@ -0,0 +1,153 @@
+---
+aaf:
+ enabled: true
+a1policymanagement:
+{% if component_enabled.a1policymanagement %}
+ enabled: true
+{% else %}
+ enabled: false
+{% endif %}
+appc:
+{% if component_enabled.appc %}
+ enabled: true
+{% else %}
+ enabled: false
+{% endif %}
+cli:
+{% if component_enabled.cli %}
+ enabled: true
+{% else %}
+ enabled: false
+{% endif %}
+consul:
+{% if component_enabled.consul %}
+ enabled: true
+{% else %}
+ enabled: false
+{% endif %}
+contrib:
+{% if component_enabled.contrib %}
+ enabled: true
+{% else %}
+ enabled: false
+{% endif %}
+dcaegen2:
+{% if component_enabled.dcaegen2 %}
+ enabled: true
+{% else %}
+ enabled: false
+{% endif %}
+dcaegen2-services:
+{% if component_enabled.dcaegen2_services %}
+ enabled: true
+{% else %}
+ enabled: false
+{% endif %}
+dcaemod:
+{% if component_enabled.dcaemod %}
+ enabled: true
+{% else %}
+ enabled: false
+{% endif %}
+esr:
+{% if component_enabled.esr %}
+ enabled: true
+{% else %}
+ enabled: false
+{% endif %}
+holmes:
+{% if component_enabled.holmes %}
+ enabled: true
+{% else %}
+ enabled: false
+{% endif %}
+log:
+{% if component_enabled.log %}
+ enabled: true
+{% else %}
+ enabled: false
+{% endif %}
+modeling:
+{% if component_enabled.modeling %}
+ enabled: true
+{% else %}
+ enabled: false
+{% endif %}
+msb:
+{% if component_enabled.msb %}
+ enabled: true
+{% else %}
+ enabled: false
+{% endif %}
+multicloud:
+{% if component_enabled.multicloud %}
+ enabled: true
+{% else %}
+ enabled: false
+{% endif %}
+nbi:
+{% if component_enabled.nbi %}
+ enabled: true
+{% else %}
+ enabled: false
+{% endif %}
+oof:
+{% if component_enabled.oof %}
+ enabled: true
+{% else %}
+ enabled: false
+{% endif %}
+policy:
+{% if component_enabled.policy %}
+ enabled: true
+{% else %}
+ enabled: false
+{% endif %}
+pomba:
+{% if component_enabled.pomba %}
+ enabled: true
+{% else %}
+ enabled: false
+{% endif %}
+portal:
+{% if component_enabled.portal %}
+ enabled: true
+{% else %}
+ enabled: false
+{% endif %}
+sniro-emulator:
+{% if component_enabled.sniro_emulator %}
+ enabled: true
+{% else %}
+ enabled: false
+{% endif %}
+uui:
+{% if component_enabled.uui %}
+ enabled: true
+{% else %}
+ enabled: false
+{% endif %}
+vfc:
+{% if component_enabled.vfc %}
+ enabled: true
+{% else %}
+ enabled: false
+{% endif %}
+vid:
+{% if component_enabled.vid %}
+ enabled: true
+{% else %}
+ enabled: false
+{% endif %}
+vnfsdk:
+{% if component_enabled.vnfsdk %}
+ enabled: true
+{% else %}
+ enabled: false
+{% endif %}
+cds:
+{% if component_enabled.cds %}
+ enabled: true
+{% else %}
+ enabled: false
+{% endif %}
diff --git a/roles/oom_configure/templates/onap-overrides.yaml.j2 b/roles/oom_configure/templates/onap-overrides.yaml.j2
new file mode 100644
index 0000000..a2bb227
--- /dev/null
+++ b/roles/oom_configure/templates/onap-overrides.yaml.j2
@@ -0,0 +1,202 @@
+---
+global:
+ repository: {{ repository }}
+{% if proxy_for_dockerhub %}
+ dockerHubRepository: "{{ proxy_for_dockerhub }}"
+{% endif %}
+{% if proxy_for_k8s_gcr %}
+ googleK8sRepository: "{{ proxy_for_k8s_gcr }}"
+{% endif %}
+{% if proxy_for_elastic %}
+ elasticRepository: "{{ proxy_for_elastic }}"
+{% endif %}
+ flavor: {{ onap_flavor }}
+ masterPassword: gatingPassword
+{% if use_ingress %}
+ ingress:
+ enabled: true
+{% endif %}
+{% if use_servicemesh %}
+ serviceMesh:
+ enabled: true
+ tls: true
+ aafEnabled: false
+ cmpv2Enabled: false
+ tlsEnabled: false
+ msbEnabled: false
+{% endif %}
+
+{% if use_global_storage %}
+ persistence:
+ storageClass: {{ os_infra.onap.global_storage.class | default('-') }}
+{% endif %}
+{% if use_metrics %}
+ metrics:
+ enabled: true
+{% if use_custom_resources_metrics %}
+ custom_resources: true
+{% endif %}
+{% endif %}
+{% if use_global_storage and os_infra.onap.global_storage.fast_class is defined %}
+aaf:
+ aaf-cass:
+ persistence:
+ storageClassOverride: "{{ os_infra.onap.global_storage.fast_class }}"
+ aaf-sms:
+ aaf-sms-quorumclient:
+ persistence:
+ storageClassOverride: "{{ os_infra.onap.global_storage.fast_class }}"
+{% endif %}
+appc:
+ config:
+ enableClustering: false
+ openStackType: "OpenStackProvider"
+ openStackName: "OpenStack"
+ openStackKeyStoneUrl: {{ os_auth_url }}
+ openStackServiceTenantName: {{ openstack_service_tenant_name }}
+ openStackDomain: {{ clouds[openstack_user_name].auth.user_domain_name | default('Default') }}
+ openStackUserName: {{ openstack_user_name }}
+ openStackEncryptedPassword: "{{ encrypted_password }}"
+{% if use_global_storage and os_infra.onap.global_storage.fast_class is defined %}
+ mariadb-galera:
+ persistence:
+ storageClassOverride: "{{ os_infra.onap.global_storage.fast_class }}"
+{% endif %}
+cassandra:
+ liveness:
+ initialDelaySeconds: 30
+ timeoutSeconds: 30
+ periodSeconds: 120
+ readiness:
+ initialDelaySeconds: 30
+ timeoutSeconds: 30
+ periodSeconds: 60
+ startup:
+ initialDelaySeconds: 30
+ periodSeconds: 30
+ timeoutSeconds: 120
+{% if use_global_storage and os_infra.onap.global_storage.fast_class is defined %}
+ persistence:
+ storageClassOverride: "{{ os_infra.onap.global_storage.fast_class }}"
+{% endif %}
+{% if use_global_storage and os_infra.onap.global_storage.rwx_class is defined %}
+cds:
+ cds-blueprints-processor:
+ persistence:
+ storageClassOverride: "{{ os_infra.onap.global_storage.rwx_class }}"
+contrib:
+ netbox:
+ netbox-app:
+ persistence:
+ storageClassOverride: "{{ os_infra.onap.global_storage.rwx_class }}"
+{% endif %}
+{% if use_global_storage and os_infra.onap.global_storage.fast_class is defined %}
+dcaegen2:
+ dcae-bootstrap:
+ postgres:
+ persistence:
+ storageClassOverride: "{{ os_infra.onap.global_storage.fast_class }}"
+ dcae-dashboard:
+ postgres:
+ persistence:
+ storageClassOverride: "{{ os_infra.onap.global_storage.fast_class }}"
+ dcae-inventory-api:
+ postgres:
+ persistence:
+ storageClassOverride: "{{ os_infra.onap.global_storage.fast_class }}"
+ dcae-redis:
+ persistence:
+ storageClassOverride: "{{ os_infra.onap.global_storage.fast_class }}"
+{% endif %}
+dcaegen2-services:
+ dcae-datafile-collector:
+ enabled: false
+ dcae-pm-mapper:
+ enabled: false
+{% if use_global_storage and os_infra.onap.global_storage.rwx_class is defined %}
+dcaemod:
+ dcaemod-genprocessor:
+ persistence:
+ storageClassOverride: "{{ os_infra.onap.global_storage.rwx_class }}"
+{% endif %}
+{% if use_global_storage and os_infra.onap.global_storage.fast_class is defined %}
+dmaap:
+ dmaap-bc:
+ postgres:
+ persistence:
+ storageClassOverride: "{{ os_infra.onap.global_storage.fast_class }}"
+ dmaap-dr-prov:
+ mariadb-galera:
+ persistence:
+ storageClassOverride: "{{ os_infra.onap.global_storage.fast_class }}"
+ message-router:
+ persistence:
+ storageClassOverride: "{{ os_infra.onap.global_storage.fast_class }}"
+mariadb-galera:
+ persistence:
+ storageClassOverride: "{{ os_infra.onap.global_storage.fast_class }}"
+modeling:
+ mariadb-galera:
+ persistence:
+ storageClassOverride: "{{ os_infra.onap.global_storage.fast_class }}"
+{% endif %}
+nbi:
+ config:
+ openStackRegion: {{ clouds[openstack_user_name].region_name }}
+ openStackVNFTenantId: {{ tenant_id }}
+ cloudOwner: {{ details.pod_owner }}
+{% if use_global_storage and os_infra.onap.global_storage.fast_class is defined %}
+oof:
+ music:
+ music-cassandra:
+ persistence:
+ storageClassOverride: "{{ os_infra.onap.global_storage.fast_class }}"
+ zookeeper:
+ persistence:
+ storageClassOverride: "{{ os_infra.onap.global_storage.fast_class }}"
+{% endif %}
+robot:
+ config:
+ openStackEncryptedPasswordHere: "{{ robot_encrypted_password }}"
+{% if use_ingress %}
+ useIngressHost:
+ enabled: true
+{% endif %}
+{% if use_global_storage and os_infra.onap.global_storage.fast_class is defined %}
+sdc:
+ sdc-es:
+ persistence:
+ storageClassOverride: "{{ os_infra.onap.global_storage.fast_class }}"
+{% endif %}
+so:
+ so-catalog-db-adapter:
+ config:
+ openStackUserName: {{ openstack_user_name }}
+ openStackRegion: {{ clouds[openstack_user_name].region_name }}
+ openStackKeyStoneUrl: {{ os_auth_url }}
+ openStackServiceTenantName: {{ openstack_service_tenant_name }}
+ openStackEncryptedPasswordHere: "{{ encrypted_password }}"
+ openStackTenantId: {{ tenant_id }}
+ openStackKeystoneVersion: "KEYSTONE_V3"
+ openStackProjectDomainName:
+ {{ clouds[openstack_user_name].auth.user_domain_name | default('Default') }}
+ openStackUserDomainName:
+ {{ clouds[openstack_user_name].project_domain_name | default('Default') }}
+ so-mariadb:
+ config:
+ # gerrit branch where the latest heat code is checked in
+ gerritBranch: {{ branch }}
+{% if use_global_storage and os_infra.onap.global_storage.fast_class is defined %}
+vfc:
+ mariadb-galera:
+ persistence:
+ storageClassOverride: "{{ os_infra.onap.global_storage.fast_class }}"
+vid:
+ mariadb-galera:
+ persistence:
+ storageClassOverride: "{{ os_infra.onap.global_storage.fast_class }}"
+vnfsdk:
+ postgres:
+ persistence:
+ storageClassOverride: "{{ os_infra.onap.global_storage.fast_class }}"
+{% endif %}
diff --git a/roles/oom_configure/templates/so-overrides.yaml.j2 b/roles/oom_configure/templates/so-overrides.yaml.j2
new file mode 100644
index 0000000..837f04c
--- /dev/null
+++ b/roles/oom_configure/templates/so-overrides.yaml.j2
@@ -0,0 +1,63 @@
+---
+global:
+ soBaseImage: orange-opensource/lfn/onap/build-so/base-image:1.0
+so:
+ repositoryOverride: &gitlabRegistry registry.gitlab.com
+ image: orange-opensource/lfn/onap/build-so/api-handler-infra:{{ gerrit_review }}-{{ gerrit_patchset }}
+ soHelpers: &soHelpers
+ repositoryOverride: {{ repository }}
+ certInitializer:
+ repositoryOverride: {{ repository }}
+ so-bpmn-infra:
+ repositoryOverride: *gitlabRegistry
+ image: orange-opensource/lfn/onap/build-so/bpmn-infra:{{ gerrit_review }}-{{ gerrit_patchset }}
+ soHelpers: *soHelpers
+ so-catalog-db-adapter:
+ repositoryOverride: *gitlabRegistry
+ image: orange-opensource/lfn/onap/build-so/catalog-db-adapter:{{ gerrit_review }}-{{ gerrit_patchset }}
+ soHelpers: *soHelpers
+ so-cnf-adapter:
+ repositoryOverride: *gitlabRegistry
+ image: orange-opensource/lfn/onap/build-so/mso-cnf-adapter:{{ gerrit_review }}-{{ gerrit_patchset }}
+ soHelpers: *soHelpers
+ so-etsi-nfvo-ns-lcm:
+ repositoryOverride: *gitlabRegistry
+ image: orange-opensource/lfn/onap/build-so/so-etsi-nfvo-ns-lcm:{{ gerrit_review }}-{{ gerrit_patchset }}
+ soHelpers: *soHelpers
+ so-monitoring:
+ repositoryOverride: *gitlabRegistry
+ image: orange-opensource/lfn/onap/build-so/so-monitoring:{{ gerrit_review }}-{{ gerrit_patchset }}
+ soHelpers: *soHelpers
+ so-nssmf-adapter:
+ repositoryOverride: *gitlabRegistry
+ image: orange-opensource/lfn/onap/build-so/nssmf-adapter:{{ gerrit_review }}-{{ gerrit_patchset }}
+ soHelpers: *soHelpers
+ so-oof-adapter:
+ repositoryOverride: *gitlabRegistry
+ image: orange-opensource/lfn/onap/build-so/so-oof-adapter:{{ gerrit_review }}-{{ gerrit_patchset }}
+ soHelpers: *soHelpers
+ so-openstack-adapter:
+ repositoryOverride: *gitlabRegistry
+ image: orange-opensource/lfn/onap/build-so/openstack-adapter:{{ gerrit_review }}-{{ gerrit_patchset }}
+ soHelpers: *soHelpers
+ so-request-db-adapter:
+ repositoryOverride: *gitlabRegistry
+ image: orange-opensource/lfn/onap/build-so/request-db-adapter:{{ gerrit_review }}-{{ gerrit_patchset }}
+ soHelpers: *soHelpers
+ so-sdc-controller:
+ repositoryOverride: *gitlabRegistry
+ image: orange-opensource/lfn/onap/build-so/sdc-controller:{{ gerrit_review }}-{{ gerrit_patchset }}
+ soHelpers: *soHelpers
+ so-sdnc-adapter:
+ repositoryOverride: *gitlabRegistry
+ image: orange-opensource/lfn/onap/build-so/sdnc-adapter:{{ gerrit_review }}-{{ gerrit_patchset }}
+ soHelpers: *soHelpers
+ so-vfc-adapter:
+ repositoryOverride: {{ repository }}
+ repositoryOverride: *gitlabRegistry
+ image: orange-opensource/lfn/onap/build-so/vfc-adapter:{{ gerrit_review }}-{{ gerrit_patchset }}
+ soHelpers: *soHelpers
+ so-vnfm-adapter:
+ repositoryOverride: *gitlabRegistry
+ image: orange-opensource/lfn/onap/build-so/vnfm-adapter:{{ gerrit_review }}-{{ gerrit_patchset }}
+ soHelpers: *soHelpers
diff --git a/roles/oom_generate_artifacts/defaults/main.yaml b/roles/oom_generate_artifacts/defaults/main.yaml
new file mode 100644
index 0000000..2e8b012
--- /dev/null
+++ b/roles/oom_generate_artifacts/defaults/main.yaml
@@ -0,0 +1,7 @@
+---
+aai_server: aai.api.sparky.simpledemo.onap.org
+aai_port: 30233
+aai_user: AAI
+aai_password: AAI
+msb_server: msb.api.simpledemo.onap.org
+msb_port: 30280
diff --git a/roles/oom_generate_artifacts/tasks/loadbalancer_facts.yaml b/roles/oom_generate_artifacts/tasks/loadbalancer_facts.yaml
new file mode 100644
index 0000000..aa2afcf
--- /dev/null
+++ b/roles/oom_generate_artifacts/tasks/loadbalancer_facts.yaml
@@ -0,0 +1,71 @@
+---
+- name: retrieve istio-ingressgateway device information
+ command: "kubectl get svc -o json -n istio-system istio-ingressgateway"
+ register: ingress_gw
+ changed_when: "false"
+ when: use_servicemesh
+
+- name: get IP of portal loadbalancer
+ ansible.builtin.set_fact:
+ first_node_ip: "{{
+ (ingress_gw.stdout|from_json).status.loadBalancer.ingress.0.ip }}"
+ when: use_servicemesh
+
+- name: retrieve portal device information
+ command: "kubectl get svc -o json -n {{ onap_namespace }} portal-app"
+ register: portal
+ changed_when: "false"
+ when: portal_enabled and not use_servicemesh
+
+- name: get IP of portal loadbalancer
+ ansible.builtin.set_fact:
+ portal_lb: "{{
+ (portal.stdout|from_json).status.loadBalancer.ingress.0.ip }}"
+ ignore_errors: yes
+ register: portal_lb_ip
+ when: portal_enabled and not use_servicemesh
+
+- name: get external IP of portal loadbalancer
+ ansible.builtin.set_fact:
+ portal_lb: "{{ (portal.stdout|from_json).spec.externalIPs.0 }}"
+ ignore_errors: "yes"
+ register: portal_external_ip_check
+ when: portal_enabled and and not use_servicemesh and ((portal_lb_ip is not defined) or
+ (portal_lb_ip|length == 0))
+
+- name: "[Facts retrieved] get first node IP address (case ip not defined)"
+ ansible.builtin.set_fact:
+ first_node_ip: "{{
+ hostvars[groups['kube-node'].0].ansible_default_ipv4.address }}"
+ when: gather_nodes_fact and not use_servicemesh
+
+- name: "[No Facts retrieved] get first node IP address (case ip not defined)"
+ ansible.builtin.set_fact:
+ first_node_ip: "{{ hostvars[groups['kube-node'].0].ip }}"
+ when: not gather_nodes_fact and not use_servicemesh
+
+- block:
+ - name: list all used ips
+ ansible.builtin.set_fact:
+ used_ips: "{{ used_ips|default([]) + [
+ hostvars[item].ansible_default_ipv4.address ~ '/' ~
+ ((hostvars[item].ansible_default_ipv4.network ~ '/' ~
+ hostvars[item].ansible_default_ipv4.netmask) |
+ ipaddr('prefix'))
+ ] }}"
+ loop: "{{ groups['k8s-cluster'] }}"
+ - name: generate network in ipaddr type
+ ansible.builtin.set_fact:
+ network: "{{ (ansible_default_ipv4.network ~ '/' ~
+ ansible_default_ipv4.netmask) | ipaddr('net') }}"
+ - name: generate the list of addresses in network
+ ansible.builtin.set_fact:
+ addresses: "{{ addresses|default([]) + [network | ipaddr(item)] }}"
+ loop: "{{ range(1, network | ipaddr('size') - 1) | list }}"
+
+ - name: pick a random address for portal
+ ansible.builtin.set_fact:
+ portal_lb: "{{ addresses | difference(used_ips) | random }}"
+ when: gather_nodes_fact and
+ portal_enabled and not use_servicemesh and
+ ((portal_lb_ip is not defined) or (portal_lb_ip|length == 0))
diff --git a/roles/oom_generate_artifacts/tasks/main.yaml b/roles/oom_generate_artifacts/tasks/main.yaml
new file mode 100644
index 0000000..55559ed
--- /dev/null
+++ b/roles/oom_generate_artifacts/tasks/main.yaml
@@ -0,0 +1,82 @@
+---
+- name: generate load balancer facts
+ import_tasks: loadbalancer_facts.yaml
+
+- name: update portal app to reflect this choice
+ command: |
+ kubectl patch svc portal-app -p \
+ '{"spec":{"externalIPs":["{{ portal_lb | ipaddr('address') }}"] }}' \
+ -n {{ onap_namespace }}
+ when: gather_nodes_fact and
+ portal_enabled and
+ ((portal_lb_ip is not defined) or (portal_lb_ip|length == 0)) and
+ ((portal_external_ip_check is not defined) or
+ portal_external_ip_check.failed)
+
+- name: generate etc/hosts
+ become: "yes"
+ ansible.builtin.blockinfile:
+ path: /etc/hosts
+ marker: "# {mark} ANSIBLE MANAGED OOM HOSTS"
+ block: |
+ {{ first_node_ip }} portal.api.simpledemo.onap.org
+ {{ first_node_ip }} vid.api.simpledemo.onap.org
+ {{ first_node_ip }} sdc.api.fe.simpledemo.onap.org
+ {{ first_node_ip }} sdc.api.be.simpledemo.onap.org
+ {{ first_node_ip }} portal-sdk.simpledemo.onap.org
+ {{ first_node_ip }} policy.api.simpledemo.onap.org
+ {{ first_node_ip }} aai.api.sparky.simpledemo.onap.org
+ {{ first_node_ip }} cli.api.simpledemo.onap.org
+ {{ first_node_ip }} msb.api.simpledemo.onap.org
+ {{ first_node_ip }} so.api.simpledemo.onap.org
+ {{ first_node_ip }} appc.api.simpledemo.onap.org
+ {{ first_node_ip }} sdnc.api.simpledemo.onap.org
+ {{ first_node_ip }} nbi.api.simpledemo.onap.org
+ {{ first_node_ip }} consul.api.simpledemo.onap.org
+ {{ first_node_ip }} kibana.api.simpledemo.onap.org
+ {{ first_node_ip }} mr.api.simpledemo.onap.org
+ {{ first_node_ip }} uui.api.simpledemo.onap.org
+ {{ first_node_ip }} aaf.api.simpledemo.onap.org
+ {{ first_node_ip }} robot.api.simpledemo.onap.org
+ {{ first_node_ip }} dcae.api.simpledemo.onap.org
+ {{ first_node_ip }} sdc.workflow.plugin.simpledemo.onap.org
+ {{ first_node_ip }} sdc.dcae.plugin.simpledemo.onap.org
+
+- name: generate hosts file for Non-Ingress Setup
+ ansible.builtin.copy:
+ dest: "{{ playbook_dir }}/vars/hosts"
+ content: |
+ {{ first_node_ip }} portal.api.simpledemo.onap.org
+ {{ first_node_ip }} vid.api.simpledemo.onap.org
+ {{ first_node_ip }} sdc.api.fe.simpledemo.onap.org
+ {{ first_node_ip }} sdc.api.be.simpledemo.onap.org
+ {{ first_node_ip }} portal-sdk.simpledemo.onap.org
+ {{ first_node_ip }} policy.api.simpledemo.onap.org
+ {{ first_node_ip }} aai.api.sparky.simpledemo.onap.org
+ {{ first_node_ip }} cli.api.simpledemo.onap.org
+ {{ first_node_ip }} msb.api.simpledemo.onap.org
+ {{ first_node_ip }} so.api.simpledemo.onap.org
+ {{ first_node_ip }} appc.api.simpledemo.onap.org
+ {{ first_node_ip }} sdnc.api.simpledemo.onap.org
+ {{ first_node_ip }} nbi.api.simpledemo.onap.org
+ {{ first_node_ip }} consul.api.simpledemo.onap.org
+ {{ first_node_ip }} kibana.api.simpledemo.onap.org
+ {{ first_node_ip }} mr.api.simpledemo.onap.org
+ {{ first_node_ip }} uui.api.simpledemo.onap.org
+ {{ first_node_ip }} aaf.api.simpledemo.onap.org
+ {{ first_node_ip }} robot.api.simpledemo.onap.org
+ {{ first_node_ip }} dcae.api.simpledemo.onap.org
+ {{ first_node_ip }} sdc.workflow.plugin.simpledemo.onap.org
+ {{ first_node_ip }} sdc.dcae.plugin.simpledemo.onap.org
+ delegate_to: localhost
+
+- name: generate cluster config file
+ ansible.builtin.copy:
+ dest: "{{ playbook_dir }}/vars/cluster.yml"
+ content: |
+ oom_cluster_ip: {{ first_node_ip }}
+ onap_namespace: {{ onap_namespace }}
+ openstack_tenant_id: {{ openstack_tenant_id }}
+ openstack_tenant_name: {{ openstack_tenant_name }}
+ deployment_type: {{ deployment_type }}
+ delegate_to: localhost
diff --git a/roles/oom_launch/defaults/main.yaml b/roles/oom_launch/defaults/main.yaml
new file mode 100644
index 0000000..e64f60f
--- /dev/null
+++ b/roles/oom_launch/defaults/main.yaml
@@ -0,0 +1,7 @@
+onap_timeout: 900
+
+helm_env: {}
+
+helm_env_postgres:
+ HELM_DRIVER: sql
+ HELM_DRIVER_SQL_CONNECTION_STRING: "{{ postgres_url }}"
diff --git a/roles/oom_launch/tasks/main.yaml b/roles/oom_launch/tasks/main.yaml
new file mode 100644
index 0000000..9ed4144
--- /dev/null
+++ b/roles/oom_launch/tasks/main.yaml
@@ -0,0 +1,199 @@
+---
+- name: check if onap/Chart.yaml file exists
+ ansible.builtin.stat:
+ path: "{{ onap_chart_path }}"
+ register: onap_chart_stat
+
+- name: load onap/Chart.yaml
+ ansible.builtin.slurp:
+ src: "{{ onap_chart_path }}"
+ register: onap_chart_content
+ when: onap_chart_stat.stat.exists
+
+- name: set version according to release found in onap chart
+ set_fact:
+ onap_version:
+ "{{ (onap_chart_content['content'] | b64decode | from_yaml).version }}"
+ when: onap_chart_stat.stat.exists
+
+- name: show version that will be used
+ debug:
+ msg: "will deploy onap version {{ onap_version }}"
+
+- name: check if a environment.yaml exists
+ ansible.builtin.stat:
+ path: "{{ generic_override_path }}/environment.yaml"
+ register: environment_stat
+
+- name: set environment.yaml override
+ ansible.builtin.set_fact:
+ environment_override: "--values {{ generic_override_path }}/environment.yaml"
+ when: environment_stat.stat.exists
+
+- name: do not set environment.yaml override
+ ansible.builtin.set_fact:
+ environment_override: ""
+ when: not environment_stat.stat.exists
+
+- name: check if a onap-components.yaml exists
+ ansible.builtin.stat:
+ path: "{{ override_components }}"
+ register: component_stat
+
+- name: set onap-components.yaml override
+ ansible.builtin.set_fact:
+ component_override: "--values {{ override_components }}"
+ when: component_stat.stat.exists
+
+- name: do not set onap-components.yaml override
+ ansible.builtin.set_fact:
+ component_override: ""
+ when: not component_stat.stat.exists
+
+- name: check if a component-gating-overrides.yaml exists
+ ansible.builtin.stat:
+ path: "{{ override_gating_component }}"
+ register: gating_stat
+
+- name: set component-gating-overrides.yaml override
+ ansible.builtin.set_fact:
+ so_override: "--values {{ override_gating_component }}"
+ when: gating_stat.stat.exists and project == "so"
+
+- name: do not set component-gating-overrides.yaml override
+ ansible.builtin.set_fact:
+ so_override: ""
+ when: not gating_stat.stat.exists or project != "so"
+
+- name: check helm version
+ command: "helm version --template {% raw %}'{{.Version}}'{% endraw %}"
+ register: helm_version
+
+# Return of previous command will be "v3.3.4" for v3 and up and "<no value>"
+# for version 2.
+- name: store helm version
+ ansible.builtin.set_fact:
+ helmv3: "{{ ('<' in helm_version.stdout) | ternary(false, true) }}"
+
+- name: "HELM 3 not installed - stop playbook"
+ ansible.builtin.fail:
+ msg: HELM 3 not installed
+ when: not helmv3
+
+- name: set timeout
+ set_fact:
+ onap_timeout: "{{ onap_timeout }}s"
+
+- name: retrieve helm postgres secret
+ community.kubernetes.k8s_info:
+ api_version: v1
+ kind: Secret
+ name: "{{ postgres_secret_name }}"
+ namespace: "{{ postgres_namespace }}"
+ register: postgres_secrets
+ when: helmv3_use_sql|bool
+
+- name: retrieve helm postgres password
+ set_fact:
+ postgres_password: "{{
+ postgres_secrets.resources[0].data['postgresql-password'] | b64decode }}"
+ when: helmv3_use_sql|bool
+
+- name: set helm environment with postgres
+ set_fact:
+ helm_env: "{{ helm_env_postgres }}"
+ when: helmv3_use_sql|bool
+
+- name: update helm repo
+ command: "helm repo up"
+
+- name: create ONAP namespace
+ run_once: "yes"
+ community.kubernetes.k8s:
+ state: present
+ definition:
+ apiVersion: v1
+ kind: Namespace
+ metadata:
+ name: "{{ onap_namespace }}"
+ labels:
+ istio-injection: "{{ (os_infra.onap.istioEnabled | default(true)) |
+ ternary ('enabled', 'disabled') }}"
+ name: "{{ onap_namespace }}"
+
+- name: generate command line for launch
+ set_fact:
+ helm_launch: >
+ helm deploy {{ chart_name }} local/onap
+ --namespace {{ onap_namespace }}
+ --version {{ onap_version }}
+ --values {{ onap_all_file }}
+ {{ environment_override }}
+ --values {{ override_file }}
+ {{ component_override }}
+ {{ so_override }}
+ --timeout {{ onap_timeout }}
+
+- name: show deploy execution command line
+ debug:
+ var: helm_launch
+
+- name: "[HELMv3] launch installation"
+ command: "{{ helm_launch }}"
+ register: yolo3
+ changed_when: true
+ async: 4800
+ poll: 0
+ when: helmv3
+ environment: "{{ helm_env }}"
+
+- name: "[HELMv3] wait for helm deploy to finish"
+ async_status:
+ jid: "{{ yolo3.ansible_job_id }}"
+ register: job_result3
+ until: job_result3.finished
+ retries: 480
+ delay: 10
+ when: helmv3
+
+- name: "[HELMv3] see output"
+ ansible.builtin.debug:
+ msg: "{{ job_result3.stdout }}"
+ when: helmv3
+
+- name: check if a deployment has already been done
+ ansible.builtin.stat:
+ path: "{{ deployment_file }}"
+ register: deployment_stat
+
+- name: get deployment.yaml
+ when: deployment_stat.stat.exists
+ block:
+ - name: create temporary local file for deployment.yaml
+ ansible.builtin.tempfile:
+ state: file
+ suffix: temp
+ register: tmp_deployment
+ delegate_to: "127.0.0.1"
+
+ - name: fetch deployment info
+ ansible.builtin.fetch:
+ dest: "{{ tmp_deployment.path }}"
+ src: "{{ deployment_file }}"
+ flat: "yes"
+
+ - name: load deployment info
+ include_vars:
+ file: "{{ tmp_deployment.path }}"
+
+ always:
+ - name: destroy the local tmp_deployment
+ ansible.builtin.file:
+ path: "{{ tmp_deployment.path }}"
+ state: absent
+ delegate_to: "127.0.0.1"
+
+- name: grab a beer
+ ansible.builtin.debug:
+ msg: " .:.\n _oOoOo\n \
+ [_|||||\n |||||\n ~~~~~"
diff --git a/roles/oom_postconfigure/defaults/main.yaml b/roles/oom_postconfigure/defaults/main.yaml
new file mode 100644
index 0000000..c164448
--- /dev/null
+++ b/roles/oom_postconfigure/defaults/main.yaml
@@ -0,0 +1,7 @@
+---
+aai_server: aai.api.sparky.simpledemo.onap.org
+aai_port: 30233
+aai_user: AAI
+aai_password: AAI
+msb_server: msb.api.simpledemo.onap.org
+msb_port: 30283
diff --git a/roles/oom_postconfigure/tasks/main.yaml b/roles/oom_postconfigure/tasks/main.yaml
new file mode 100644
index 0000000..b00c6b4
--- /dev/null
+++ b/roles/oom_postconfigure/tasks/main.yaml
@@ -0,0 +1,52 @@
+---
+- name: check if chartmuseum script exists
+ ansible.builtin.stat:
+ path: "{{ contrib_path }}/tools/registry-initialize.sh"
+ register: chartmuseum_script
+
+- name: wait for chartmuseum to be up
+ run_once: true
+ community.kubernetes.k8s_info:
+ kind: Deployment
+ wait: true
+ name: "{{ onap_release_name }}-chartmuseum"
+ namespace: "{{ onap_namespace }}"
+ wait_sleep: 10
+ wait_timeout: 600
+ register: chartmuseum_deployment
+
+- name: run internal chart museum result push
+ run_once: true
+ ansible.builtin.shell: |
+ {{ contrib_path }}/tools/registry-initialize.sh -d {{ charts_path }}
+ for package in certInitializer repositoryGenerator readinessCheck postgres serviceAccount mongo common
+ do
+ {{ contrib_path }}/tools/registry-initialize.sh -d {{ charts_path }} -p $package
+ done
+ when: chartmuseum_script.stat.exists and
+ chartmuseum_deployment.resources|length > 0 and
+ chartmuseum_deployment.resources[0].status.availableReplicas > 0
+
+
+- name: fetch cloud config
+ ansible.builtin.fetch:
+ dest: /tmp/clouds.yaml
+ src: "{{ ansible_user_dir }}/.config/openstack/clouds.yaml"
+ flat: "yes"
+
+- name: load cloud config
+ include_vars: /tmp/clouds.yaml
+
+- name: initialize os_auth_url
+ ansible.builtin.set_fact:
+ os_auth_url: "{{ clouds[openstack_user_name].auth.auth_url }}"
+
+- name: add v3 at end of os_auth_url
+ ansible.builtin.set_fact:
+ os_auth_url:
+ "{{ ((os_auth_url[-3:] == 'v3/') or (os_auth_url[-2:] == 'v3')) |
+ ternary(os_auth_url | regex_replace('/$', ''),
+ (os_auth_url[-1:] == '/') | ternary(
+ os_auth_url ~ 'v3',
+ os_auth_url ~ '/v3')) }}"
+
diff --git a/roles/oom_prepare/defaults/main.yaml b/roles/oom_prepare/defaults/main.yaml
new file mode 100644
index 0000000..89d5539
--- /dev/null
+++ b/roles/oom_prepare/defaults/main.yaml
@@ -0,0 +1,8 @@
+---
+helm_server_port: 8879
+helm_path: /usr/local/bin/helm
+chartmuseum_path: /usr/local/bin/chartmuseum
+# new values can be found here: https://github.com/fishworks/fish-food/blob/main/Food/chartmuseum.lua
+chartmuseum_version: v0.12.0
+chartmuseum_sha: 53402edf5ac9f736cb6da8f270f6bbf356dcbbe5592d8a09ee6f91a2dc30e4f6
+helm_push_version: v0.10.3
diff --git a/roles/oom_prepare/tasks/main.yaml b/roles/oom_prepare/tasks/main.yaml
new file mode 100644
index 0000000..043ec52
--- /dev/null
+++ b/roles/oom_prepare/tasks/main.yaml
@@ -0,0 +1,242 @@
+---
+- name: remove oom directory
+ ansible.builtin.file:
+ path: "{{ oom_path }}"
+ state: absent
+
+- name: set review_path (oom case)
+ ansible.builtin.set_fact:
+ review_path: "{{ oom_path }}"
+ when: project == 'oom'
+
+- name: "clone oom {{ branch }}"
+ ansible.builtin.git:
+ repo: "{{ oom_url }}"
+ dest: "{{ oom_path }}"
+ version: "{{ branch }}"
+
+- name: "configure git" # noqa 303
+ shell: |
+ git config --global user.email "You@example.com";
+ git config --global user.name "Your Name"
+ changed_when: "false"
+
+- name: override helm path for CoreOS
+ ansible.builtin.set_fact:
+ helm_path: /home/core/bin/helm
+ when: ansible_os_family | lower == "coreos"
+
+- name: retrieve review_path and clone when not in oom case
+ block:
+ - name: set review_path (not oom case)
+ ansible.builtin.set_fact:
+ review_path: "{{ oom_path }}/{{ project_dir_mapping[project] }}"
+
+ - name: ensure review directory is not there
+ ansible.builtin.file:
+ path: "{{ review_path }}"
+ state: absent
+
+ - name: "clone {{ project }} {{ branch }}"
+ ansible.builtin.git:
+ repo: "{{ onap_base_url }}/{{ project }}"
+ dest: "{{ review_path }}"
+ version: "{{ branch }}"
+ when: project != 'oom' and 'oom' in project
+
+- name: generate review end of url
+ ansible.builtin.set_fact:
+ review_end_url: "{{ gerrit_review[-2:] }}/{{ gerrit_review }}/\
+ {{ gerrit_patchset }}"
+ when: gerrit_review and 'oom' in project
+
+- name: "retrieve change branch for project {{ project }}" # noqa 303
+ shell:
+ cmd: >
+ git pull --no-edit {{ onap_base_url }}/{{ project }}
+ refs/changes/{{ review_end_url }}
+ chdir: "{{ review_path }}"
+ when: gerrit_review and 'oom' in project
+
+- name: "retrieve right submodules if needed for oom {{ project }}" # noqa 303
+ shell:
+ cmd: >
+ git submodule update
+ chdir: "{{ review_path }}"
+ when: gerrit_review and project == 'oom'
+
+- name: check helm version
+ command: "helm version --template {% raw %}'{{.Version}}'{% endraw %}"
+ register: helm_version
+
+# Return of previous command will be "v3.3.4" for v3 and up and "<no value>"
+# for version 2.
+- name: store helm version
+ ansible.builtin.set_fact:
+ helmv3: "{{ ('<' in helm_version.stdout) | ternary(false, true) }}"
+
+- name: "HELM 3 not installed - stop playbook"
+ ansible.builtin.fail:
+ msg: HELM 3 not installed
+ when: not helmv3
+
+- name: create .local/helm folder
+ ansible.builtin.file:
+ path: "{{ ansible_user_dir }}/.local/helm"
+ state: directory
+ recurse: "yes"
+
+- name: retrieve chartmuseum
+ become: true
+ ansible.builtin.get_url:
+ dest: "{{ chartmuseum_path }}"
+ url: "https://s3.amazonaws.com/chartmuseum/release/\
+ {{ chartmuseum_version }}/bin/linux/amd64/chartmuseum"
+ checksum: "sha256:{{ chartmuseum_sha }}"
+ mode: 0777
+
+- name: create chartmuseum folder
+ ansible.builtin.file:
+ path: "{{ ansible_user_dir }}/.chartstorage"
+ state: directory
+
+- name: create .local/chartmuseum folder
+ ansible.builtin.file:
+ path: "{{ ansible_user_dir }}/.local/chartmuseum"
+ state: directory
+ recurse: "yes"
+
+- name: start helm server
+ become: "yes"
+ shell: "start-stop-daemon --start --background --oknodo \
+ --chuid {{ ansible_user_uid }} --group {{ ansible_user_gid }} \
+ --exec {{ chartmuseum_path }} -- --port={{ helm_server_port }} \
+ --storage='local' --allow-overwrite --debug \
+ --storage-local-rootdir='{{ ansible_user_dir }}/.chartstorage' \
+ > {{ ansible_user_dir }}/.local/chartmuseum/chartmuseum.log 2>&1"
+ changed_when: "true"
+
+- name: list all helm repositories
+ command: "helm repo list -o json"
+ register: repos
+
+- name: remove all helm repositories
+ community.kubernetes.helm_repository:
+ name: "{{ item }}"
+ state: absent
+ loop: "{{ repos.stdout | from_json | map(attribute='name') | list }}"
+
+- name: add helm local repository
+ community.kubernetes.helm_repository:
+ name: local
+ repo_url: "http://127.0.0.1:{{ helm_server_port }}"
+
+- name: add helm local repository
+ community.kubernetes.helm_repository:
+ name: onap
+ repo_url: "http://127.0.0.1:{{ helm_server_port }}"
+
+- name: check if Helm cm-push plugin is installed
+ shell: "helm plugin list | grep cm-push | grep 0.10 | wc -l"
+ register: helm_plugin_cm_push
+ changed_when: "false"
+
+- name: Install Helm cm-push plugin
+ command: "helm plugin install --version {{ helm_push_version }} https://github.com/chartmuseum/helm-push.git"
+ changed_when: "true"
+ when: helm_plugin_cm_push.stdout == "0"
+
+- name: Install Helm deploy plugin
+ community.kubernetes.helm_plugin:
+ plugin_path: "{{ oom_path }}/kubernetes/helm/plugins/deploy"
+ namespace: default
+ state: present
+
+- name: Install Helm undeploy plugin
+ community.kubernetes.helm_plugin:
+ plugin_path: "{{ oom_path }}/kubernetes/helm/plugins/undeploy"
+ namespace: default
+ state: present
+
+- name: Add Kafka Strimzi repository
+ community.kubernetes.helm_repository:
+ name: strimzi
+ repo_url: https://strimzi.io/charts/
+
+- name: Install kafka strimzi
+ community.kubernetes.helm:
+ name: strimzi-kafka-operator
+ chart_ref: strimzi/strimzi-kafka-operator
+ release_namespace: strimzi-system
+ create_namespace: true
+ chart_version: "{{ strimzi_version }}"
+ values:
+ watchAnyNamespace: True
+
+- name: compile helm packages
+ command: "make SKIP_LINT=TRUE all"
+ async: 3600
+ poll: 0
+ changed_when: "true"
+ args:
+ chdir: "{{ oom_path }}/kubernetes"
+ register: make_helm
+
+- name: "wait for helm compile to finish"
+ async_status:
+ jid: "{{ make_helm.ansible_job_id }}"
+ register: job_result
+ until: job_result.finished
+ retries: 360
+ delay: 10
+
+- name: "[review case] generate helm make logs filename"
+ set_fact:
+ helm_log: "make-{{ gerrit_review }}-{{ gerrit_patchset }}.log"
+ when: gerrit_review
+
+- name: "[normal case] generate helm make logs filename"
+ set_fact:
+ helm_log: "make-{{ branch }}.log"
+ when: not gerrit_review
+
+- name: save helm package output
+ copy:
+ dest: "{{ ansible_user_dir }}/.local/helm/{{ helm_log }}"
+ content: "{{ job_result.stdout }}"
+
+- name: "[WORKAROUND] readd helm local repository"
+ command: "helm repo add local http://127.0.0.1:{{ helm_server_port }}"
+ when: not helmv3
+
+- name: check if user clouds exists
+ stat:
+ path: "{{ base_dir }}/vars/user_cloud.yml"
+ delegate_to: localhost
+ register: stat
+
+- name: get user clouds
+ block:
+ - name: include user clouds info
+ include_vars:
+ file: "{{ base_dir }}/vars/user_cloud.yml"
+ name: user_cloud
+
+ - name: retrieve OpenStack user name
+ set_fact:
+ openstack_user_name: "{{ user_cloud | list | first }}"
+
+ - name: retrieve OpenStack informations
+ set_fact:
+ openstack_tenant_name:
+ "{{ user_cloud[openstack_user_name].auth.project_name }}"
+ os_auth_url:
+ "{{ user_cloud[openstack_user_name].auth.auth_url }}"
+
+ - name: generate openstack info file
+ copy:
+ content: |
+ openstack_user_name: {{ openstack_user_name }}
+ openstack_tenant_name: {{ openstack_tenant_name }}
+ dest: "{{ base_dir }}/vars/openstack_infos.yml"
+ delegate_to: localhost
diff --git a/roles/oom_wait/tasks/main.yaml b/roles/oom_wait/tasks/main.yaml
new file mode 100644
index 0000000..924e526
--- /dev/null
+++ b/roles/oom_wait/tasks/main.yaml
@@ -0,0 +1,40 @@
+---
+- name: wait for all containers to be started
+ shell:
+ "set -o pipefail && \
+ kubectl get po -n {{ onap_namespace }} | \
+ grep -c ContainerCreating || true"
+ args:
+ executable: /bin/bash
+ register: kube
+ changed_when:
+ kube.stdout == '0'
+ until:
+ kube.stdout == '0'
+ retries: 1000
+ delay: 10
+
+- name: wait for all containers to be initialized
+ shell:
+ "set -o pipefail && \
+ kubectl get po -n {{ onap_namespace }} | \
+ grep Init | grep -cv Error || true"
+ args:
+ executable: /bin/bash
+ register: kube
+ changed_when:
+ kube.stdout == '0'
+ until:
+ kube.stdout == '0'
+ retries: 1000
+ delay: 10
+
+- name: get result
+ shell:
+ "kubectl get po -n {{ onap_namespace }}"
+ changed_when: "false"
+ register: kube
+
+- name: show result
+ ansible.builtin.debug:
+ msg: "{{ kube.stdout }}"
diff --git a/roles/prepare_ci/defaults/main.yaml b/roles/prepare_ci/defaults/main.yaml
new file mode 100644
index 0000000..34cfc01
--- /dev/null
+++ b/roles/prepare_ci/defaults/main.yaml
@@ -0,0 +1,6 @@
+---
+ci_packages: []
+ci_packages_to_be_removed:
+ - python3-yaml
+ci_python3_packages: []
+proxy_env: {}
diff --git a/roles/prepare_ci/tasks/install_DEBIAN.yaml b/roles/prepare_ci/tasks/install_DEBIAN.yaml
new file mode 100644
index 0000000..9537976
--- /dev/null
+++ b/roles/prepare_ci/tasks/install_DEBIAN.yaml
@@ -0,0 +1,11 @@
+---
+- name: load os specific configuration
+ include_vars: "debian.yaml"
+ when: ansible_os_family | lower == "debian"
+
+- name: "[Debian] install needed packages"
+ include_role:
+ name: apt_install
+ vars:
+ environment: "{{ proxy_env }}"
+ packages: "{{ ci_packages }}"
diff --git a/roles/prepare_ci/tasks/main.yaml b/roles/prepare_ci/tasks/main.yaml
new file mode 100644
index 0000000..a008fd1
--- /dev/null
+++ b/roles/prepare_ci/tasks/main.yaml
@@ -0,0 +1,57 @@
+---
+- name: load os specific configuration
+ include_vars: "{{ ansible_os_family | lower }}.yaml"
+ when: ansible_os_family | lower == "debian"
+
+- name: "[Debian] install needed packages"
+ include_role:
+ name: apt_install
+ vars:
+ environment: "{{ proxy_env }}"
+ packages: "{{ ci_packages }}"
+ when: ansible_os_family | lower == "debian"
+
+- name: "[Non Debian] install needed packages"
+ ansible.builtin.package:
+ name: "{{ item }}"
+ state: present
+ loop: "{{ ci_packages }}"
+ when: ansible_os_family | lower != "debian"
+
+# Workaround
+# Conflict between the python3-yaml installed with the package manager
+# preventing the one from pip
+# Observed on daily/weekly on the 26th of June
+# ERROR: Cannot uninstall 'PyYAML'. It is a distutils installed project and
+# thus we cannot accurately determine which files belong to it which would lead
+# to only a partial uninstall.
+# As a workaround, we force the uninstallation of the python3-yaml package
+# before starting the installation
+- name: "[Debian] remove unexpected packages"
+ ansible.builtin.apt:
+ name: "{{ item }}"
+ state: absent
+ loop: "{{ ci_packages_to_be_removed }}"
+ when: ansible_os_family | lower == "debian"
+
+- name: "[Non Debian] remove unexpected packages"
+ ansible.builtin.package:
+ name: "{{ item }}"
+ state: absent
+ loop: "{{ ci_packages_to_be_removed }}"
+ when: ansible_os_family | lower != "debian"
+# End of Workaround
+
+- name: "[Python 3] install needed python packages"
+ pip:
+ name: "{{ item }}"
+ state: present
+ loop: "{{ ci_python3_packages }}"
+ when: ansible_python_version is version('3', '>=')
+
+
+- name: allow oom_path parent directory to be usable by user
+ ansible.builtin.file:
+ path: "{{ oom_path.split('/')[0:-1] | join('/') }}"
+ state: directory
+ mode: 0777
diff --git a/roles/prepare_ci/vars/debian.yaml b/roles/prepare_ci/vars/debian.yaml
new file mode 100644
index 0000000..5b9029c
--- /dev/null
+++ b/roles/prepare_ci/vars/debian.yaml
@@ -0,0 +1,18 @@
+---
+ci_packages:
+ - jq
+ - build-essential
+ - libffi-dev
+ - git
+ - python3-pip
+ - rsync
+ci_python3_packages:
+ - openshift==0.11.2
+ - stevedore==1.32.0
+ - dogpile.cache==0.6.5
+ - openstacksdk==0.43.0
+ - shade==1.33.0
+ - os-client-config==2.0.0
+ - python-openstackclient==5.2.1
+ - python-heatclient==1.18.0
+ - jsonschema
diff --git a/run.sh b/run.sh
new file mode 100755
index 0000000..55f9fda
--- /dev/null
+++ b/run.sh
@@ -0,0 +1,152 @@
+#!/bin/bash
+
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2018 Orange and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+set -o errexit
+set -o nounset
+set -o pipefail
+
+labels=$*
+
+RUN_SCRIPT=${0}
+RUN_ROOT=$(dirname $(readlink -f ${RUN_SCRIPT}))
+export RUN_ROOT=$RUN_ROOT
+source ${RUN_ROOT}/scripts/rc.sh
+
+# register our handler
+trap submit_bug_report ERR
+
+#-------------------------------------------------------------------------------
+# If no labels are set with args, run all
+#-------------------------------------------------------------------------------
+if [[ $labels = "" ]]; then
+ labels="ci prepare configure deploy wait postconfigure check_containers healtcheck"
+fi
+
+step_banner "Fetch galaxy roles"
+ansible-galaxy install -r requirements.yml
+
+if [[ $labels = *"clean"* ]]; then
+ #-------------------------------------------------------------------------------
+ # Prepare CI
+ # - install needed packages and verify directory are writable
+ #-------------------------------------------------------------------------------
+ step_banner "Prepare CI"
+ ansible-playbook ${ANSIBLE_VERBOSE} \
+ -i ${RUN_ROOT}/inventory/infra \
+ ${RUN_ROOT}/onap-oom-prepare-ci.yaml --vault-id ${RUN_ROOT}/.vault
+
+ step_banner "CI prepared"
+
+ #-------------------------------------------------------------------------------
+ # Prepare OOM
+ # - create helm servers
+ # - compile OOM helm packages and push them to local server
+ #-------------------------------------------------------------------------------
+ step_banner "Clean OOM"
+ ansible-playbook ${ANSIBLE_VERBOSE} \
+ -i ${RUN_ROOT}/inventory/infra \
+ ${RUN_ROOT}/onap-oom-clean.yaml --vault-id ${RUN_ROOT}/.vault
+
+ step_banner "OOM cleaned"
+fi
+
+if [[ $labels = *"prepare"* ]]; then
+ #-------------------------------------------------------------------------------
+ # Prepare CI
+ # - install needed packages and verify directory are writable
+ #-------------------------------------------------------------------------------
+ step_banner "Prepare CI"
+ ansible-playbook ${ANSIBLE_VERBOSE} \
+ -i ${RUN_ROOT}/inventory/infra \
+ ${RUN_ROOT}/onap-oom-prepare-ci.yaml --vault-id ${RUN_ROOT}/.vault
+
+ step_banner "CI prepared"
+
+ #-------------------------------------------------------------------------------
+ # Prepare OOM
+ # - create helm servers
+ # - compile OOM helm packages and push them to local server
+ #-------------------------------------------------------------------------------
+ step_banner "Prepare OOM"
+ ansible-playbook ${ANSIBLE_VERBOSE} \
+ -i ${RUN_ROOT}/inventory/infra \
+ ${RUN_ROOT}/onap-oom-prepare.yaml --vault-id ${RUN_ROOT}/.vault
+
+ step_banner "OOM prepared"
+fi
+
+#-------------------------------------------------------------------------------
+# Configure OOM
+# - retrieve tenant information
+# - encrypt tenant password
+# - generate OOM configuration
+#-------------------------------------------------------------------------------
+if [[ $labels = *"configure"* ]]; then
+ step_banner "Configure OOM ${DEPLOYMENT_TYPE}"
+ ansible-playbook ${ANSIBLE_VERBOSE} \
+ -i inventory/infra \
+ ${RUN_ROOT}/onap-oom-configure.yaml --vault-id ${RUN_ROOT}/.vault
+
+ step_banner "OOM ${DEPLOYMENT_TYPE} configured"
+fi
+
+#-------------------------------------------------------------------------------
+# Deploy OOM
+# - launch installation via HELM
+#-------------------------------------------------------------------------------
+if [[ $labels = *"deploy"* ]]; then
+ step_banner "Deploy OOM"
+ ansible-playbook ${ANSIBLE_VERBOSE} \
+ -i inventory/infra \
+ ${RUN_ROOT}/onap-oom-deploy.yaml --vault-id ${RUN_ROOT}/.vault
+
+ step_banner "OOM deployed"
+fi
+
+#-------------------------------------------------------------------------------
+# Wait for End of Deployment
+# - Wait that all pods are started
+#-------------------------------------------------------------------------------
+if [[ $labels = *"wait"* ]]; then
+ step_banner "Wait for end of deployment"
+ ansible-playbook ${ANSIBLE_VERBOSE} \
+ -i inventory/infra \
+ ${RUN_ROOT}/onap-oom-wait.yaml --vault-id ${RUN_ROOT}/.vault
+
+ step_banner "End of deployment done"
+fi
+
+#-------------------------------------------------------------------------------
+# Postconfigure OOM
+# - Create VIM in multicloud
+#-------------------------------------------------------------------------------
+if [[ $labels = *"postconfiguration"* ]]; then
+ step_banner "Post configure OOM"
+ ansible-playbook ${ANSIBLE_VERBOSE} \
+ -i inventory/infra \
+ ${RUN_ROOT}/onap-oom-postconfigure.yaml --vault-id ${RUN_ROOT}/.vault
+
+ step_banner "OOM postconfigured"
+fi
+
+#-------------------------------------------------------------------------------
+# Postinstallation OOM
+# - Generate /etc/hosts
+#-------------------------------------------------------------------------------
+if [[ $labels = *"postinstallation"* ]]; then
+ step_banner "Post install OOM"
+ ansible-playbook ${ANSIBLE_VERBOSE} \
+ -i inventory/infra \
+ ${RUN_ROOT}/onap-oom-postinstall.yaml --vault-id ${RUN_ROOT}/.vault
+
+ step_banner "OOM postinstalled"
+fi
diff --git a/scripts/README.md b/scripts/README.md
new file mode 100644
index 0000000..abebcdd
--- /dev/null
+++ b/scripts/README.md
@@ -0,0 +1,76 @@
+# chained-ci-tools
+
+Library to unify the usage of chained-ci
+
+How to run
+----
+
+To prepare the environment just run:
+```./<chained-ci-tools_folder>/chained-ci-init.sh [-a] [-i inventory]```
+
+This will prepare depending on your artifacts and env vars:
+
+- The vault key file
+- Get the artifacts that came from chained-ci
+- Set the ssh key and the ssh config
+
+Options are:
+- ```-a```: Read the remote artifact
+- ```-i inventory```: Set the inventory file for ssh config
+
+For security purpose, the environment should ALWAYS be clean! the proposed
+script underneath will:
+- Remove the vault key file
+- Remove the ssh config file with id_rsa key files
+- Vault ALL the artifact files of the current job. To add an exception, and do
+ not vault a file, or a folder, you can set the NOVAULT_LIST parameter filled
+ with paths separated by a carriage return or a space, like this:
+ ```
+ NOVAULT_LIST="""folder1/file2
+ folder2/file2
+ folder3/"""
+ ```
+ or
+ ```
+ NOVAULT_LIST="folder1/file2 folder2/file2 folder3/"
+ ```
+ Please note the '/' at the end of the folder; it will work without but you may
+ also filter all names starting with "folder3"
+
+
+to use the clean script of the environment, just run:
+```
+./<chained-ci-tools_folder>/clean.sh
+```
+
+
+Use it as a submodule
+----------
+
+```
+git submodule add https://gitlab.com/Orange-OpenSource/lfn/ci_cd/chained-ci-tools.git scripts/chained-ci-tools
+```
+
+If you use the CI, don't forget to add the following parameter in ```.gitlab-ci.yml```
+```
+variables:
+ GIT_SUBMODULE_STRATEGY: recursive
+```
+
+
+Chained-ci-tools in gitlab-ci.yml
+--------
+
+In your ```.gitlab-ci.yml```, you can add:
+```
+.chained_ci_tools: &chained_ci_tools
+ before_script:
+ - ./scripts/chained-ci-tools/chained-ci-init.sh -a -i inventory
+ after_script:
+ - ./scripts/chained-ci-tools/clean.sh
+```
+
+and add this block when you need to run it
+```
+<<: *chained_ci_tools
+```
diff --git a/scripts/chained-ci-init.sh b/scripts/chained-ci-init.sh
new file mode 100755
index 0000000..1ea78e2
--- /dev/null
+++ b/scripts/chained-ci-init.sh
@@ -0,0 +1,102 @@
+#!/usr/bin/env bash
+
+export RUN_SCRIPT=${BASH_SOURCE[0]}
+
+if [ -r $1 ]; then
+ echo """
+<!> DEPRECATION <!>
+<!> You are using a deprecated call to this script.
+<!> Please use the following options:
+<!> -i inventory : to set the inventory path to generate the ssh config file
+<!> -a : to read the remote artifact
+"""
+ DEPRECATED_WAY="True"
+ INVENTORY=$1
+ REMOTE_ARTIFACT="True"
+else
+ while getopts ai: option
+ do
+ case "${option}"
+ in
+ a) REMOTE_ARTIFACT="True";; # Read the remote artifact
+ i) INVENTORY=${OPTARG};; # Set the inventory file for ssh config
+ esac
+ done
+fi
+
+export TOOLS_FOLDER=$(dirname $(readlink -f ${RUN_SCRIPT}))
+export ROOT_FOLDER=${PWD}
+. ${TOOLS_FOLDER}/rc.sh
+trap submit_bug_report ERR
+
+##############################################
+step_banner "Tasked trigger infos"
+##############################################
+echo "POD: ${pod}"
+echo "Pipeline triggered by: ${source_job_name}"
+
+##############################################
+step_banner "Prepare environment"
+##############################################
+
+# Set Vault password
+VAULT_OPT=''
+if [ -n "${ANSIBLE_VAULT_PASSWORD}" ]; then
+ step_line "ansible vault password file"
+ echo ${ANSIBLE_VAULT_PASSWORD} > ${ROOT_FOLDER}/.vault
+ export VAULT_OPT="--vault-password-file ${ROOT_FOLDER}/.vault"
+else
+ step_line no vault password provided
+fi
+
+##############################################
+step_banner "Get artifacts"
+##############################################
+if [ "${CI_PIPELINE_SOURCE}" == "trigger" ] && [ "${REMOTE_ARTIFACT}" == "True" ]; then
+ if [ -n "${artifacts_src}" ] || [ -n "${artifacts_bin}" ]; then
+ if [ -n "${artifacts_src}" ]; then
+ step_line "getting artifact from source url"
+ step_line "(your may need to set PRIVATE_TOKEN argument to access non public artifact)"
+ curl -L -s -H "PRIVATE-TOKEN: ${PRIVATE_TOKEN}" -o "${ROOT_FOLDER}/artifacts.zip" "${artifacts_src}"
+ elif [ -n "${artifacts_bin}" ]; then
+ step_line "getting artifact from its binary content"
+ echo "${artifacts_bin}" | base64 -d > ${ROOT_FOLDER}/artifacts.zip
+ fi
+ step_line "unzip artifacts"
+ unzip -o ${ROOT_FOLDER}/artifacts.zip -d ${ROOT_FOLDER}
+ rm ${ROOT_FOLDER}/artifacts.zip
+ else
+ step_line "No artifact provided"
+ exit -1
+ fi
+else
+ step_line "Pipeline not triggered (\$CI_PIPELINE_SOURCE=${CI_PIPELINE_SOURCE})"
+ step_line "or remote artifact option '-a' not set"
+fi
+
+##############################################
+step_banner "Set SSH config"
+##############################################
+if [ -e ${ROOT_FOLDER}/vars/vaulted_ssh_credentials.yml ]; then
+ if [ -z "${INVENTORY}" ]; then
+ error_line "No Inventory provided (-i option)"
+ exit -1
+ else
+ check_ci_var ANSIBLE_VAULT_PASSWORD
+ check_ci_var INVENTORY
+ step_line Generate SSH config
+ ansible-playbook ${ansible_verbose} -i ${INVENTORY} ${VAULT_OPT} ${TOOLS_FOLDER}/prepare_ssh.yml
+ export SSH_OPT="-F ${ROOT_FOLDER}/ssh_config"
+ export ANSIBLE_SSH_ARGS="-C -o ControlMaster=auto -o ControlPersist=60s ${SSH_OPT}"
+ if [ "${DEPRECATED_WAY}" == "True" ]; then
+ step_line Add symlink to support DEPRECATED calls of this script
+ ln -s ${ROOT_FOLDER}/ssh_config ${ROOT_FOLDER}/config
+ fi
+ fi
+else
+ step_line "no ssh creds"
+fi
+
+##############################################
+step_banner "End of preparation"
+##############################################
diff --git a/scripts/clean.sh b/scripts/clean.sh
new file mode 100755
index 0000000..7f255fc
--- /dev/null
+++ b/scripts/clean.sh
@@ -0,0 +1,76 @@
+#!/usr/bin/env sh
+
+export TOOLS_FOLDER=$(dirname $(readlink -f ${0}))
+export ROOT_FOLDER=${PWD}
+. ${TOOLS_FOLDER}/rc.sh
+
+###############################################################
+step_banner Artifact ciphering
+###############################################################
+
+# Function to check a file is in a list
+file_in_list (){
+ LIST=$(echo $1|tr '\\n' ' ') #if we send it with CR separator
+ FILE=$2
+ for FILTER in ${LIST}; do
+ if $(echo ${FILE}| grep "^${FILTER}" 2>&1 >/dev/null); then
+ return 0
+ fi
+ done
+ return 1
+}
+
+if [ -e ${ROOT_FOLDER}/.vault ]; then
+ #Ensure we have a NOVAULT_LIST
+ NOVAULT_LIST="fake/file ${NOVAULT_LIST}"
+ #Get artifacts paths
+ INV_PATHS=$(cat .gitlab-ci.yml | yq --arg job ${CI_JOB_NAME} -r '.[$job].artifacts.paths[]')
+ #Read paths
+ for INV_PATH in ${INV_PATHS}; do
+ if [ -e ${INV_PATH} ]; then
+ #If the artifact is a directory, reads files in it
+ if [ -d ${INV_PATH} ]; then
+ FILES=$(find ${INV_PATH} -type f)
+ else
+ FILES=${INV_PATH}
+ fi
+ # For each file, vault or not
+ for FILE in ${FILES}; do
+ if $(file_in_list "${NOVAULT_LIST}" ${FILE}); then
+ echo "${FILE}: Not vaulting"
+ else
+ if $(head -n1 ${FILE} |grep "^\$ANSIBLE_VAULT;" > /dev/null); then
+ echo "${FILE}: Already vaulted"
+ else
+ echo "${FILE}: Vaulting"
+ ansible-vault encrypt --vault-password-file ${ROOT_FOLDER}/.vault ${FILE}
+ fi
+ fi
+ done
+ fi
+ done
+fi
+
+###############################################################
+step_banner Cleaning all files
+###############################################################
+if [ -e ${ROOT_FOLDER}/.vault ]; then
+ step_line remove vault file
+ rm ${ROOT_FOLDER}/.vault
+fi
+if [ -e ${ROOT_FOLDER}/id_rsa ]; then
+ step_line remove ssh certs
+ rm ${ROOT_FOLDER}/id_rsa
+fi
+if [ -e ${ROOT_FOLDER}/id_rsa.pub ]; then
+ step_line remove pub ssh certs
+ rm ${ROOT_FOLDER}/id_rsa.pub
+fi
+if [ -e ${ROOT_FOLDER}/ssh_config ]; then
+ step_line remove ssh config
+ rm ${ROOT_FOLDER}/ssh_config
+fi
+if [ -e ${ROOT_FOLDER}/vars/openstack_openrc ]; then
+ step_line remove openstack admin rc
+ rm ${ROOT_FOLDER}/vars/openstack_openrc
+fi
diff --git a/scripts/prepare_ssh.yml b/scripts/prepare_ssh.yml
new file mode 100644
index 0000000..cc09d05
--- /dev/null
+++ b/scripts/prepare_ssh.yml
@@ -0,0 +1,5 @@
+---
+- hosts: localhost
+ gather_facts: "no"
+ roles:
+ - ssh_prepare
diff --git a/scripts/rc.sh b/scripts/rc.sh
new file mode 100644
index 0000000..4ab435d
--- /dev/null
+++ b/scripts/rc.sh
@@ -0,0 +1,145 @@
+#!/usr/bin/env bash
+
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2018 Orange and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+##
+# Bug report function
+##
+submit_bug_report() {
+ SHELL_VERBOSE=${SHELL_VERBOSE:-}
+ local lc="$BASH_COMMAND" rc=$?
+ echo ""
+ echo "---------------------------------------------------------------------"
+ step_line "Crash on command"
+ echo
+ echo $lc
+ echo
+ step_line "Exit code"
+ echo
+ echo $rc
+ if [ ! -z ${SHELL_VERBOSE} ]; then
+ echo
+ step_line "Environment variables"
+ echo
+ env | grep -v artifacts_bin \
+ | sort \
+ | sed 's/^/export /' \
+ | sed 's/PASSWORD=.*/PASSWORD=***HIDDEN***/' \
+ | sed 's/artifacts_bin=.*/artifacts_bin=***HIDDEN***/'
+ fi
+ echo "---------------------------------------------------------------------"
+ step_banner "Clean"
+ ${TOOLS_FOLDER}/clean.sh
+ echo "---------------------------------------------------------------------"
+}
+
+
+##
+# Pretty print
+##
+step_banner() {
+ echo ""
+ echo "====================================================================="
+ echo "${RUN_SCRIPT}"
+ date
+ echo "$*"
+ echo "====================================================================="
+ echo ""
+}
+
+step_line() {
+ echo ">>> ${*}"
+}
+
+error_line() {
+ echo "!!! ${*}"
+}
+
+##
+# Test A CI var is required
+##
+check_ci_var() {
+ var_name=$1
+ if [ -z "${!var_name}" ]; then
+ error_line
+ error_line "Variable \$${var_name} must be defined"
+ error_line "Please set it in your gitlab project (Settings / CI-CD / variables page)"
+ error_line
+ exit
+ fi
+}
+
+##
+# Warn if run as root
+##
+no_root_needed() {
+ step_line "Check if we are root"
+ if [[ $(whoami) == "root" ]]; then
+ echo "WARNING: This script should not be run as root!"
+ echo "Elevated privileges are aquired automatically when necessary"
+ echo "Waiting 10s to give you a chance to stop the script (Ctrl-C)"
+ for x in $(seq 10 -1 1); do echo -n "$x..."; sleep 1; done
+ fi
+}
+
+##
+# Ensure root folder is not world readable
+##
+ansible_prepare(){
+ step_line "Set local folder not writable to others"
+ chmod 600 ${ROOT_FOLDER}
+}
+
+##
+# SSH Options
+##
+ssh_opt(){
+ SSH_OPT=''
+ if [ -e ${ROOT_FOLDER}/vars/vaulted_ssh_credentials.yml ]; then
+ SSH_OPT="${SSH_OPT} -F ${ROOT_FOLDER}/ssh_config"
+ fi
+ echo ${SSH_OPT}
+}
+
+##
+# Vault Options
+##
+vault_opt(){
+ VAULT_OPT=''
+ if [ -n "${ANSIBLE_VAULT_PASSWORD}" ]; then
+ VAULT_OPT="--vault-password-file ${ROOT_FOLDER}/.vault"
+ fi
+ echo ${VAULT_OPT}
+}
+
+##
+# Get Ansible SSH Options
+##
+ansible_ssh_opt(){
+ ANSIBLE_SSH_ARGS="-C -o ControlMaster=auto -o ControlPersist=60s"
+ if [ -n "${ANSIBLE_VAULT_PASSWORD}" ]; then
+ ANSIBLE_SSH_ARGS="${ANSIBLE_SSH_ARGS} $(ssh_opt)"
+ fi
+ echo ${ANSIBLE_SSH_ARGS}
+}
+
+##
+# Cat file that may be vaulted
+##
+cat_file(){
+ FILE=$1
+ if [ -e ${ROOT_FOLDER}/.vault ] \
+ && $(grep '^\$ANSIBLE_VAULT;1\..;AES256' ${FILE} > /dev/null); then
+ ansible-vault view --vault-password-file=${ROOT_FOLDER}/.vault ${FILE}
+ else
+ cat ${FILE}
+ fi
+}
diff --git a/scripts/ssh_prepare/defaults/main.yml b/scripts/ssh_prepare/defaults/main.yml
new file mode 100644
index 0000000..f074f01
--- /dev/null
+++ b/scripts/ssh_prepare/defaults/main.yml
@@ -0,0 +1,4 @@
+---
+# variable needed to access jumphost
+ssh_id_rsa: "{{ vault_ssh_id_rsa }}"
+ssh_id_rsa_pub: "{{ vault_ssh_id_rsa_pub }}"
diff --git a/scripts/ssh_prepare/tasks/main.yml b/scripts/ssh_prepare/tasks/main.yml
new file mode 100644
index 0000000..e47ab11
--- /dev/null
+++ b/scripts/ssh_prepare/tasks/main.yml
@@ -0,0 +1,40 @@
+---
+- set_fact:
+ base_dir: "{{ lookup('env', 'ROOT_FOLDER') | default(playbook_dir, true) }}"
+
+- name: check if vaulted ssh credentials exists
+ stat:
+ path: "{{ base_dir }}/vars/vaulted_ssh_credentials.yml"
+ register: creds_stat
+
+- name: include vaulted ssh credentials
+ include_vars: "{{ base_dir }}/vars/vaulted_ssh_credentials.yml"
+ when: creds_stat.stat.exists
+
+- name: check if vaulted ssh_gateways file exists
+ stat:
+ path: "{{ base_dir }}/vars/ssh_gateways.yml"
+ register: gw_stat
+
+- name: include vaulted ssh gateways
+ include_vars: "{{ base_dir }}/vars/ssh_gateways.yml"
+ when: gw_stat.stat.exists
+
+- name: create id_rsa file
+ copy:
+ dest: "{{ base_dir }}/id_rsa"
+ content: "{{ ssh_id_rsa }}"
+ mode: 0600
+ when: creds_stat.stat.exists
+
+- name: create id_rsa.pub file
+ copy:
+ dest: "{{ base_dir }}/id_rsa.pub"
+ content: "{{ ssh_id_rsa_pub }}"
+ mode: 0600
+ when: creds_stat.stat.exists
+
+- name: generate ssh config
+ template:
+ src: config.j2
+ dest: "{{ base_dir }}/ssh_config"
diff --git a/scripts/ssh_prepare/templates/config.j2 b/scripts/ssh_prepare/templates/config.j2
new file mode 100644
index 0000000..375efd7
--- /dev/null
+++ b/scripts/ssh_prepare/templates/config.j2
@@ -0,0 +1,33 @@
+Host *
+{% if creds_stat.stat.exists %}
+ IdentityFile {{ base_dir }}/id_rsa
+{% endif %}
+ UserKnownHostsFile=/dev/null
+ StrictHostKeyChecking=no
+
+{% if gw_stat.stat.exists %}
+{% for gw in ssh_gateways | default([]) %}
+host {{ gw.name }}
+ Hostname {{ gw.public_fqdn | default(gw.ansible_host) }}
+ User {{ gw.ansible_user }}
+{% if gw.ansible_port is defined %}
+ Port {{ gw.ansible_port }}
+{% endif %}
+{% if gw.proxy_command is defined %}
+ ProxyCommand {{ gw.proxy_command }}
+{% endif %}
+
+{% endfor %}
+{% endif %}
+
+{% for node in groups.all %}
+{% if hostvars[node].ansible_host is defined %}
+host {{ node }} {{ hostvars[node].public_fqdn | default('') }} {{ hostvars[node].ansible_host }}
+ Hostname {{ hostvars[node].public_fqdn | default(hostvars[node].ansible_host) }}
+ User {{ hostvars[node].ansible_user }}
+{% if gw_stat.stat.exists %}
+ ProxyCommand ssh -F {{ base_dir }}/ssh_config -W %h:%p {{ ssh_gateways[0].name }}
+{% endif %}
+{% endif %}
+
+{% endfor %}
diff --git a/vars/ddf.yml b/vars/ddf.yml
new file mode 100644
index 0000000..80f908e
--- /dev/null
+++ b/vars/ddf.yml
@@ -0,0 +1,18 @@
+---
+datacenter:
+ id: DL-DELL
+ code: DL-DELL
+ url: http://dl-control.sdp.telekom.de/
+ name: Darmstadt Dell Lab
+ type: datacenter
+ street1: Deutsche-Telekom-Allee 1
+ street2:
+ city: Darmstadt
+ state: Germany
+ postal_code: 64295
+ country: Hessen
+ region: Europe
+ latitude: 49.865940
+ longitude: 8.626236
+ elevation: 92
+ lata: "02"
diff --git a/vars/vaulted_ssh_credentials.yml b/vars/vaulted_ssh_credentials.yml
new file mode 100644
index 0000000..7adc961
--- /dev/null
+++ b/vars/vaulted_ssh_credentials.yml
@@ -0,0 +1,118 @@
+vault_ssh_id_rsa: !vault |
+ $ANSIBLE_VAULT;1.1;AES256
+ 65653766396137306366306161373231333165383632303130656233303061613863343231323537
+ 3230353261326566363830353936373563393036333863380a616564353734306266303232343438
+ 63313437353234386536366430333735353034323234636639363361353739353431623466636330
+ 6134343932643165310a376137376461386663313333366265396338343739353466366637326365
+ 34613565643934633232653966343064323134646361616533633036613635666131636364326233
+ 64343163306534633739326365333339323836643034653830633132623733396531633030323431
+ 35386462656465623231613638376665656332656238623765353064393439353464383163616532
+ 39376662643436613130353732633232316639663932653435653766396564366437323232386264
+ 37323062396336393763333436373932623936663735646333306665333830323032376539613933
+ 34393861643634343038623137306263343437633166613166303437396430396538333138336165
+ 36383930323563386434313633393136633362363938303266623237366666656136376166623833
+ 64336136343463316665623065373263356334346430303362623936613731303336643131313637
+ 64633938313335373961343466346561353833656636623332333361656239393138346264393539
+ 65323765616362666465366261363566316635306631656566633635383537326364653862373330
+ 34366136363532376539313338633561386364336438363632383533623663316564643866633361
+ 64646336396638656339396466373732656531383261336166323161313365633835623032313666
+ 31326535636632653936396361666633373463633037353431323036303036666635393762393538
+ 66336363636639623663643439626363393162303966333636633733373237356562383361363464
+ 37393737316330346265333664626436656638363764393230306236643866386561346138343533
+ 37616232303937303966303662366161343964303430366335333638363162306536346564343165
+ 35313530333531386430353162313530633162633138303231343439626164623238643235333066
+ 62626234633565303535663530656434316339653262306337313164306266613166643136376461
+ 31353438306138313631353866643636313535666164646635643131343861646333633064393864
+ 61353837306430363133633430656534313733313465383964656165616364333831376364326530
+ 38326635326665326637363662346132353766343938666231656432666236396335303635373864
+ 39383032666532323435633038353564633531393233616332366436626233383562333335323936
+ 34336431343731363230383530303162303333633063653362373563353463663866316563656139
+ 63353831313765336538663533333034316234383230323666656430656539613732336635343431
+ 30306565343734313736376265346665636439393734656533343466343833373931333439383633
+ 64323030353238633732323933343235376439633731303266623964663438313861356630663631
+ 30373830623266353733333339353365333730633339373666323131323739343132353233616561
+ 65336631306334623538646638393331356434663637316264376233636539636131353339306637
+ 36373235313231353061313130303466376663336538373133396665633533383535666536306266
+ 64623737663833383766623631343835613935386535353531343136643539343531396665386233
+ 38626632323565303761666333646538376366336636613031306635326433396538366439396563
+ 66613033336539326431376266373965343935653765333665323832613935653330303862323861
+ 63646335613465326364663634373338326239386138396535663633643330353565363438373439
+ 34336163663030383863373339616465636263636532383237666333613564383361396433326630
+ 64376638393236373066323439303361386165326237653761316538383837623338633161323331
+ 61373964373362316639303131613462646433376236353430633130356138333463316664646536
+ 65313030376438613036343930323339366631333037366666613164353336313531376462353734
+ 39343531376263306465363230666634393533363039313065643161396637343131353536616639
+ 33623639386364663433613766626337356237666166613030386662316666613062613464386437
+ 64356336336336326466636131633061613430663732353539653936313366386139656535303631
+ 64316137636562316430393165623866306239353634383930353165313163373731393737303664
+ 30633031386337373330613938396264633264396165633637653937373065393163613566663261
+ 66643263303464663561346166313365613666366264353964343739323965353730656333373937
+ 35386338326134613835653636323265383566616362363135623636363166623734356139613430
+ 30346335653633363561613564653235323666643433623230393934663930616461373539303266
+ 66353563366431306434373861353665356638646130386664626639633230366430346264396531
+ 33633332663336343130336535313539363962383464303837373562613638383965306233336164
+ 38376438366532373766613131663266343439326166393430613433393337613563386637313261
+ 38333338643965363461396266396236666638653134366134356263323630343662323833316462
+ 31643539626331613434366636613037303536396265313664663562613737363339353066653237
+ 65346662356333626139343834353065663465333835306362343932303164373866303562366563
+ 39653938643530366162666465313736383930623863313435373866373266313763343132353438
+ 31363933343433336561363762623031316538323961353937646333343435373865363964616332
+ 63636534306533613732336663633238643461326163333731663632303031323166313930353130
+ 32626137386231306632646233323863393738346231643138376536646162386431323537343330
+ 64383933303463616164366436366662646566643135353361313766316236343863376534653936
+ 66666364353036323035366666346134393037383230643662383333653933336239646232353165
+ 66363239383964376238336339383363303265363466353138656639383934653538383765376339
+ 63356164376532343463316338663061366661623438326266313764336564356234373730313437
+ 31343135656332623537303164303338336663313839393163636331393739666166333330313533
+ 30323863643365663264613961336431623230626439656565616538316231336562633331323237
+ 34623463316165653032396666613966666664376364643832653837353739623531666166623165
+ 33613765623065633964373937666635333334626631383030336234666130313639386631623231
+ 37356132306266336333366464666133613161323336643764633339316561383233643162306135
+ 36653764663566323036343636613837353761643739353239343934626638356438343062373737
+ 30666230353931393935363834646166316333313633666337613534386539623835343731303932
+ 39643233353933643136646162373136623966376236353365396565303833653764623561386634
+ 34616463643035316262616463626466336565656531613135666130616333653564333465343533
+ 36616563666464636636663363343831343166393062316333646231343931323066393264633666
+ 30313439303032393263616531663635333032633962666431646339306438303864373164653435
+ 37333530313066613164323331306564353034613538616136376462373533383338326165663135
+ 35326465626333393065313138643433373264313836373864393538633266353634626665396363
+ 34386661646336636535373662346661616164353538353531363066363766393932653363306536
+ 65343562393239613961653530363735643962633731613537393130383533383763316536383131
+ 31616138633832393131613931613931623338306430613862663863313931633537373561333738
+ 64396533626337663265396530313937643762646136636161303139643635333663646364383635
+ 61626132616531393136373165646233666536316266356634373562346365623461663062623136
+ 63396439653635326634646635373930663361343632663439643732333538303665616266373463
+ 61353838303436343432613465376133646230343133643230353762613438333639646230623832
+ 34643562623238613762373466366361343363633764383232383836663730363532333366393365
+ 61373432363034386336323665396139316363353961393964326436346339353132343266656139
+ 32383936353061376238343062393436666438333637353366623535633563353531306330643630
+ 32323535353162386431653636303765643432653465343761386433616137616136623833646233
+ 34376164353935313035
+vault_ssh_id_rsa_pub: !vault |
+ $ANSIBLE_VAULT;1.1;AES256
+ 61666434333632316631393339323432323032383935333831336137383836663639366634653566
+ 6539343264373630396230663062326662376631623736330a333536643065376338323330616638
+ 64346632326438616566626662326463623539323365353164376566383632656130326534336365
+ 3966373764363862360a643830326137386336303433313931613862393534646436393463383838
+ 35326364316639623235316263643661636635346461336162636231363233386530383736633830
+ 34663966653965323735613531333239663032373337613339656530663932323137386330393632
+ 35653833383761333034633932353134373237313731663936383536633061646134633735366232
+ 62316163396564303334663634383431336466383038626466623162623266643365666131393636
+ 39323533376264336166666564326437633530613034373864383036326134633062616562646533
+ 35306563613037643462346538616536313735333631323132383235393037396430363637383639
+ 64356666666134393639643561373234346665653031656331333738346535333161303436663661
+ 35353064643862313064653635373736303034333430616436373034633064333434646431326362
+ 30633635313936303630616433386137623261643133393961386238303633613465326163336361
+ 61323233393064366566363132393935393463353437646235636466343963383062323862653463
+ 32623835343031653061303262656231623963633366633736666664636131396664666135376637
+ 30373231353135353337326431623537376464666663653664333863333531343232616335343539
+ 66326466666162336435663466363639386136396461326164626233663936363233313830363237
+ 30343164366366616130343537636532333237666135383538363338663830623764656164643863
+ 33313530376663613234306136333836313762633764383336373437633236616538323865373565
+ 63656237356434323333613638663631616163366561623231303634626662663364313130336638
+ 61656332373735396331663764356164663232333161663461386135323863366262326536626361
+ 32333762643764323562643062346234393834343237636132313333336332616339633766353036
+ 35373063373063633639333136653963646439633730363233376637336436303934383732613933
+ 64653162393836613835306166643636623066626135326331623634396162623033666463393966
+ 3438
+