summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorVictor Morales <victor.morales@intel.com>2018-03-13 12:26:08 -0700
committerVictor Morales <victor.morales@intel.com>2018-03-13 12:26:08 -0700
commit4d7590ed7425a94c0f87a8461548c2461d79a710 (patch)
tree083ffc33a4cd6d8eff42deeea1da0b50c49efdfe
parentceb22354fcb078e8991a66dc9bc11dd5f21e77f4 (diff)
Migrate vagrant-onap to devtool repo
This change covers the migration of the vagrant-onap tool's code which was located under integration repo to devtool's repository. The tool was renamed to avoid misunderstandings about its goals. Change-Id: I79df8c35fccaa266a789217d441a6cf1183bd42a Signed-off-by: Victor Morales <victor.morales@intel.com> Issue-ID: INT-441
-rw-r--r--.gitignore3
-rw-r--r--.gitreview4
-rw-r--r--CONTRIBUTING.md29
-rw-r--r--LICENSE201
-rw-r--r--README.md93
-rw-r--r--Vagrantfile528
-rw-r--r--doc/source/conf.py40
-rw-r--r--doc/source/features/configure_execution.rst72
-rw-r--r--doc/source/features/consuming_scripts.rst88
-rw-r--r--doc/source/features/example_usage.rst151
-rw-r--r--doc/source/features/features.rst16
-rw-r--r--doc/source/features/openstack.rst89
-rw-r--r--doc/source/index.rst23
-rw-r--r--doc/source/install/index.rst102
-rw-r--r--doc/source/install/known_issues.rst63
-rw-r--r--etc/settings.yaml.development6
-rw-r--r--etc/settings.yaml.testing6
-rwxr-xr-xlib/_composed_functions35
-rwxr-xr-xlib/_onap_functions107
-rwxr-xr-xlib/aai147
-rwxr-xr-xlib/appc43
-rwxr-xr-xlib/ccsdk36
-rwxr-xr-xlib/commons119
-rwxr-xr-xlib/config/env-vars79
-rwxr-xr-xlib/dcae92
-rw-r--r--lib/files/aai.pem102
-rw-r--r--lib/files/all-in-one585
-rw-r--r--lib/files/globals.yml2
-rw-r--r--lib/files/haproxy.cfg120
-rw-r--r--lib/files/kolla-build.conf5
-rw-r--r--lib/files/kubectl_config_generator.py40
-rw-r--r--lib/files/passwords.yml216
-rw-r--r--lib/files/settings.xml369
-rwxr-xr-xlib/functions450
-rwxr-xr-xlib/mr31
-rwxr-xr-xlib/msb50
-rwxr-xr-xlib/mso94
-rwxr-xr-xlib/multicloud51
-rwxr-xr-xlib/oom207
-rwxr-xr-xlib/openstack75
-rwxr-xr-xlib/policy53
-rwxr-xr-xlib/portal98
-rwxr-xr-xlib/robot45
-rwxr-xr-xlib/sdc88
-rwxr-xr-xlib/sdnc64
-rwxr-xr-xlib/vfc96
-rwxr-xr-xlib/vid49
-rwxr-xr-xlib/vnfsdk47
-rwxr-xr-xlib/vvp40
-rwxr-xr-xtests/_test_base33
-rwxr-xr-xtests/asserts94
-rw-r--r--tests/projects.txt209
-rwxr-xr-xtests/test_aai100
-rwxr-xr-xtests/test_appc95
-rwxr-xr-xtests/test_ccsdk90
-rwxr-xr-xtests/test_dcae111
-rwxr-xr-xtests/test_functions191
-rwxr-xr-xtests/test_mr38
-rwxr-xr-xtests/test_msb61
-rwxr-xr-xtests/test_mso67
-rwxr-xr-xtests/test_multicloud54
-rwxr-xr-xtests/test_policy100
-rwxr-xr-xtests/test_portal59
-rwxr-xr-xtests/test_robot48
-rwxr-xr-xtests/test_sdc69
-rwxr-xr-xtests/test_sdnc69
-rwxr-xr-xtests/test_vfc68
-rwxr-xr-xtests/test_vid51
-rwxr-xr-xtests/test_vnfsdk57
-rwxr-xr-xtests/test_vvp55
-rw-r--r--tools/Run.ps1120
-rwxr-xr-xtools/get_repos.sh38
-rwxr-xr-xtools/run.sh100
-rwxr-xr-xtools/setup.sh133
-rwxr-xr-xtools/setup_openstack.sh23
-rwxr-xr-xtools/update_project_list.sh5
-rw-r--r--tox.ini30
-rwxr-xr-xvagrant_utils/postinstall.sh26
-rwxr-xr-xvagrant_utils/unit_testing.sh14
79 files changed, 7357 insertions, 0 deletions
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..49ed25d
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,3 @@
+.tox/
+doc/build/
+.vagrant/
diff --git a/.gitreview b/.gitreview
new file mode 100644
index 0000000..3f94445
--- /dev/null
+++ b/.gitreview
@@ -0,0 +1,4 @@
+[gerrit]
+host=gerrit.onap.org
+port=29418
+project=integration/devtool.git
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
new file mode 100644
index 0000000..8f84227
--- /dev/null
+++ b/CONTRIBUTING.md
@@ -0,0 +1,29 @@
+First off, thank you for considering contributing to Devtool for ONAP project.
+It's people like you that make it such a great tool.
+
+Devtool is an open source project and we love to receive contributions from our
+community — you! There are many ways to contribute, from writing tutorials or
+blog posts, improving the documentation, submitting bug reports and feature
+requests or writing code which can be incorporated into Devtool itself.
+
+Unit Testing
+============
+
+The **_tests_** folder contains ~~scripts~~ _test suites_ that ensure the proper
+implementation of the _functions_ created on **_lib_** folder. In order to
+display the Usage information you must execute the script with question mark as
+an argument like follows:
+
+ $ ./tools/run.sh
+
+or using PowerShell
+
+ PS C:\> Set-ExecutionPolicy Bypass -Scope CurrentUser
+ PS C:\> .\tools\Run.ps1 testing -?
+
+Examples
+--------
+
+ $ ./tools/run.sh -y testing # Executes all the Unit Tests unattended mode
+ $ ./tools/run.sh -s functions testing # Executes all the Unit Tests of Functions Test Suite
+ $ ./tools/run.sh -s functions -c install_maven testing # Executes the install_maven Unit Test of Functions Test Suite
diff --git a/LICENSE b/LICENSE
new file mode 100644
index 0000000..8dada3e
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright {yyyy} {name of copyright owner}
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/README.md b/README.md
new file mode 100644
index 0000000..47fed4b
--- /dev/null
+++ b/README.md
@@ -0,0 +1,93 @@
+# Devtool for ONAP
+
+This project pretends to collect information about a way to developing/building
+services for [ONAP project](https://www.onap.org/) providing an automated
+development environment.
+
+### Problem Being Solved
+
+* Reduce the barrier of entry to allow new ONAP developers to ramp up on to
+active development quickly
+* Reduce the cost to the community in responding to simple environment setup
+questions faced by new developers
+
+---
+
+| Component | Requirement |
+|------------------|---------------------------------------|
+| Vagrant | >= 1.8.6 |
+| Provider | VirtualBox, Libvirt or OpenStack |
+| Operating System | Linux, Mac OS or Windows |
+| Hard Disk | > 8 GB of free disk |
+| Memory | > 12 GB |
+
+---
+
+## Execution:
+
+#### Deploying a single application
+
+* Windows
+
+ PS C:\> cd devtool
+ PS C:\devtool> Set-ExecutionPolicy Bypass -Scope CurrentUser
+ PS C:\devtool> .\tools\Run.ps1 <app_name>
+
+* Linux or Mac OS
+
+ $ cd devtool
+ $ ./tools/run.sh <app_name>
+
+current options include:
+
+| app_name | description |
+|:----------:|-------------------------------------|
+| aai | Active and Available Inventory |
+| appc | Application Controller |
+| ccsdk | Common Controller SDK |
+| dcae | Data Collection Analytics & Events |
+| mr | Message Router |
+| mso | Master Service Orchestrator |
+| msb | Microservices Bus Project |
+| multicloud | Multi Cloud |
+| oom | ONAP Operations Manager |
+| policy | Policy |
+| portal | Portal |
+| robot | Robot |
+| sdc | Service Design & Creation |
+| sdnc | Software Defined Network Controller |
+| vfc | Virtual Function Controller |
+| vid | Virtual Infrastructure Development |
+| vnfsdk | VNF SDK |
+| vvp | VNF Validation Program |
+
+| app_name | description |
+|:----------:|------------------------------------------|
+| all_in_one | All ONAP services in a VM (experimental) |
+| testing | Unit Test VM |
+
+| app_name | description |
+|:----------:|----------------------|
+| openstack | OpenStack Deployment |
+
+#### Generating documentation
+
+The documentation of this project was written in reStructuredText
+format which is located under the [docs folder](../blob/master/doc/source/index.rst).
+It's possible to format these documents to HTML using Sphinix python
+tool.
+
+ $ tox -e docs
+
+This results in the creation of a new *doc/build/html* folder with
+the documentation converted in HTML pages that can be viewed through
+the preferred Web Browser.
+
+## Contributing
+
+Bug reports and patches are most welcome.
+See the [contribution guidelines](CONTRIBUTING.md).
+
+## License
+
+Apache-2.0
diff --git a/Vagrantfile b/Vagrantfile
new file mode 100644
index 0000000..4b10392
--- /dev/null
+++ b/Vagrantfile
@@ -0,0 +1,528 @@
+# -*- mode: ruby -*-
+# vi: set ft=ruby :
+
+Vagrant::DEFAULT_SERVER_URL.replace('https://vagrantcloud.com')
+
+configuration = {
+ # Generic parameters used across all ONAP components
+ 'key_name' => 'ecomp_key',
+ 'pub_key' => '',
+ 'nexus_repo' => 'https://nexus.onap.org/content/sites/raw',
+ 'nexus_repo_root' => 'https://nexus.onap.org',
+ 'nexus_url_snapshot' => 'https://nexus.onap.org/content/repositories/snapshots',
+ 'nexus_docker_repo' => 'nexus3.onap.org:10001',
+ 'nexus_username' => 'docker',
+ 'nexus_password' => 'docker',
+ 'dmaap_topic' => 'AUTO',
+ 'artifacts_version' => '1.0.0',
+ 'docker_version' => 'latest',
+ # Parameters for DCAE instantiation
+ 'dcae_zone' => 'iad4',
+ 'dcae_state' => 'vi',
+ 'openstack_tenant_id' => '',
+ 'openstack_username' => '',
+ 'openstack_api_key' => '',
+ 'openstack_password' => '',
+ 'odl_version' => '0.5.3-Boron-SR3',
+ # Parameters for enabling features
+ 'debug' => 'True',
+ 'build_image' => 'True',
+ 'clone_repo' => 'True',
+ 'compile_repo' => 'False',
+ 'enable_oparent' => 'True',
+ 'skip_get_images' => 'False',
+ 'skip_install' => 'True'
+}
+
+box = {
+ :virtualbox => 'ubuntu/trusty64',
+ :libvirt => 'elastic/ubuntu-14.04-x86_64',
+ :openstack => nil
+}
+
+nodes = [
+ {
+ :name => "aai",
+ :ips => ['10.252.0.6', "192.168.50.6"],
+ :macs => [],
+ :cpus => 2,
+ :cpu => "50",
+ :ram => 8 * 1024,
+ :groups => ["individual"],
+ :args => ["aai"],
+ :fwds => [
+ { :guest => 8446, :host => 8446, :guest_ip => '192.168.50.6' },
+ { :guest => 9446, :host => 9446, :guest_ip => '192.168.50.6' },
+ ]
+ },
+ {
+ :name => "all-in-one",
+ :ips => ['10.252.1.3', "192.168.51.3"],
+ :macs => [],
+ :cpus => 2,
+ :cpu => "50",
+ :ram => 12 * 1024,
+ :groups => ["all-in-one"],
+ :flavor => 'm1.xlarge',
+ :args => ['mr', 'sdc', 'aai', 'mso', 'robot', 'vid', 'sdnc', 'portal', 'dcae', 'policy', 'appc', 'vfc', 'ccsdk', 'multicloud', 'vnfsdk', 'vpp', 'msb'],
+ },
+ {
+ :name => "appc",
+ :ips => ['10.252.0.14', "192.168.50.14"],
+ :macs => [],
+ :cpus => 2,
+ :cpu => "50",
+ :ram => 4 * 1024,
+ :groups => ["individual"],
+ :args => ["appc"],
+ },
+ {
+ :name => "ccsdk",
+ :ips => ['10.252.0.19', "192.168.50.19"],
+ :macs => [],
+ :cpus => 2,
+ :cpu => "50",
+ :ram => 4 * 1024,
+ :groups => ["individual"],
+ :args => ["ccsdk"],
+ },
+ {
+ :name => "dcae",
+ :ips => ['10.252.0.12', "192.168.50.12"],
+ :macs => [],
+ :cpus => 2,
+ :cpu => "50",
+ :ram => 4 * 1024,
+ :groups => ["individual"],
+ :args => ["dcae"],
+ },
+ {
+ :name => "dns",
+ :ips => ['10.252.0.3', "192.168.50.3"],
+ :macs => [],
+ :cpus => 2,
+ :cpu => "50",
+ :ram => 1 * 1024,
+ :groups => ["individual"],
+ :flavor => 'm1.small',
+ :args => [" "]
+ },
+ {
+ :name => "message-router",
+ :ips => ['10.252.0.4', "192.168.50.4"],
+ :macs => [],
+ :cpus => 2,
+ :cpu => "50",
+ :ram => 4 * 1024,
+ :groups => ["individual"],
+ :args => ["mr"],
+ },
+ {
+ :name => "mso",
+ :ips => ['10.252.0.20', "192.168.50.20"],
+ :macs => [],
+ :cpus => 2,
+ :cpu => "50",
+ :ram => 4 * 1024,
+ :groups => ["individual"],
+ :args => ["mso"],
+ },
+ {
+ :name => "msb",
+ :ips => ['10.252.0.7', "192.168.50.7"],
+ :macs => [],
+ :cpus => 2,
+ :cpu => "50",
+ :ram => 4 * 1024,
+ :groups => ["individual"],
+ :args => ["msb"],
+ },
+ {
+ :name => "multicloud",
+ :ips => ['10.252.0.16', "192.168.50.16"],
+ :macs => [],
+ :cpus => 2,
+ :cpu => "50",
+ :ram => 4 * 1024,
+ :groups => ["individual"],
+ :args => ["multicloud"],
+ :fwds => [
+ { :guest => 9003, :host => 9003, :guest_ip => '192.168.50.16' },
+ ]
+ },
+ {
+ :name => "oom",
+ :ips => ['10.252.0.21', "192.168.50.21"],
+ :macs => [],
+ :cpus => 16,
+ :cpu => "50",
+ :ram => 64 * 1024,
+ :groups => ["individual"],
+ :args => ["oom"],
+ :hd => { :virtualbox => "61440", :libvirt => "60G", },
+ :fwds => [
+ { :guest => 8880, :host => 8880, :guest_ip => '192.168.50.21' },
+ { :guest => 8989, :host => 8989, :guest_ip => '192.168.50.21' },
+ ]
+ },
+ {
+ :name => "policy",
+ :ips => ['10.252.0.13', "192.168.50.13"],
+ :macs => [],
+ :cpus => 2,
+ :cpu => "50",
+ :ram => 4 * 1024,
+ :groups => ["individual"],
+ :args => ["policy"],
+ },
+ {
+ :name => "portal",
+ :ips => ['10.252.0.11', "192.168.50.11"],
+ :macs => [],
+ :cpus => 2,
+ :cpu => "50",
+ :ram => 4 * 1024,
+ :groups => ["individual"],
+ :args => ["portal"],
+ },
+ {
+ :name => "robot",
+ :ips => ['10.252.0.8', "192.168.50.8"],
+ :macs => [],
+ :cpus => 2,
+ :cpu => "50",
+ :ram => 4 * 1024,
+ :groups => ["individual"],
+ :args => ["robot"],
+ },
+ {
+ :name => "sdc",
+ :ips => ['10.252.0.5', "192.168.50.5"],
+ :macs => [],
+ :cpus => 2,
+ :cpu => "50",
+ :ram => 6 * 1024,
+ :groups => ["individual"],
+ :args => ["sdc"],
+ :hd => { :virtualbox => "20480", :libvirt => "20G", },
+ :fwds => [
+ { :guest => 8285, :host => 8285, :guest_ip => '192.168.50.5' },
+ ]
+ },
+ {
+ :name => "sdnc",
+ :ips => ['10.252.0.10', "192.168.50.10"],
+ :macs => [],
+ :cpus => 2,
+ :cpu => "50",
+ :ram => 4 * 1024,
+ :groups => ["individual"],
+ :args => ["sdnc"],
+ },
+ {
+ :name => "testing",
+ :ips => ['10.252.2.3', "192.168.52.3"],
+ :macs => [],
+ :cpus => 2,
+ :cpu => "50",
+ :ram => 4 * 1024,
+ :groups => ["testing"],
+ :flavor => 'm1.small',
+ :args => [""],
+ },
+ {
+ :name => "vfc",
+ :ips => ['10.252.0.15', "192.168.50.15"],
+ :macs => [],
+ :cpus => 2,
+ :cpu => "50",
+ :ram => 4 * 1024,
+ :groups => ["individual"],
+ :args => ['vfc'],
+ },
+ {
+ :name => "vid",
+ :ips => ['10.252.0.9', "192.168.50.9"],
+ :macs => [],
+ :cpus => 2,
+ :cpu => "50",
+ :ram => 4 * 1024,
+ :groups => ["individual"],
+ :args => ['vid'],
+ },
+ {
+ :name => "vnfsdk",
+ :ips => ['10.252.0.18', "192.168.50.18"],
+ :macs => [],
+ :cpus => 2,
+ :cpu => "50",
+ :ram => 4 * 1024,
+ :groups => ["individual"],
+ :args => ['vnfsdk'],
+ },
+ {
+ :name => "vvp",
+ :ips => ['10.252.0.17', "192.168.50.17"],
+ :macs => [],
+ :cpus => 2,
+ :cpu => "50",
+ :ram => 4 * 1024,
+ :groups => ["individual"],
+ :args => ['vvp'],
+ },
+ {
+ :name => "openstack",
+ :ips => ['10.252.3.3', "192.168.53.3"],
+ :macs => [],
+ :cpus => 2,
+ :cpu => "50",
+ :ram => 8 * 1024,
+ :groups => ["individual"],
+ :args => ['openstack'],
+ :fwds => [
+ { :guest => 80, :host => 8888, :guest_ip => '192.168.53.4' },
+ { :guest => 6080, :host => 6080, :guest_ip => '192.168.53.4' },
+ ]
+ }
+]
+
+run_path = 'vagrant_utils/postinstall.sh'
+sdc_volume = 'vol1-sdc-data.vdi'
+
+Vagrant.require_version ">= 1.8.6"
+
+# Determine the provider used
+provider = (ENV['VAGRANT_DEFAULT_PROVIDER'] || :virtualbox).to_sym
+puts "[INFO] Provider: #{provider} "
+
+vd_conf = ENV.fetch('VD_CONF', 'etc/settings.yaml')
+if File.exist?(vd_conf)
+ require 'yaml'
+ user_conf = YAML.load_file(vd_conf)
+ configuration.update(user_conf)
+end
+
+# Set network interface
+net_interface = 'vboxnet0'
+is_windows = Gem.win_platform?
+if is_windows
+ net_interface = 'VirtualBox Host-Only Ethernet Adapter #2'
+end
+puts "[INFO] Net interface: #{net_interface}"
+
+
+# If argument is given use it. Otherwise use Env: DEPLOY_MODE else use default
+requested_machine = ARGV[1]
+
+deploy_mode = ENV.fetch('DEPLOY_MODE', 'individual')
+if requested_machine != nil
+ if requested_machine.include?("all-in-one") || requested_machine.include?("testing")
+ deploy_mode = requested_machine
+ end
+end
+
+# Catch the status of all machines
+if ARGV[0] == 'status' || ARGV[0] == 'destroy'
+ deploy_mode = 'NA'
+end
+
+puts "[INFO] Deploy Mode: #{deploy_mode}"
+
+# In case of all-in-one or testing clean the nodes list
+case deploy_mode
+ when 'all-in-one'
+ nodes.select! do |node|
+ if node[:name].include?("all-in-one")
+ true if node[:name]
+ end
+ end
+
+ when 'individual'
+ nodes.select! do |node|
+ if node[:groups][0].include?("individual")
+ true if node[:name]
+ end
+ end
+
+ when 'testing'
+ nodes.select! do |node|
+ if node[:name].include?("testing")
+ true if node[:name]
+ end
+ end
+end
+
+Vagrant.configure("2") do |config|
+
+ # PROXY definitions
+ if ENV['http_proxy'] != nil and ENV['https_proxy'] != nil
+ if not Vagrant.has_plugin?('vagrant-proxyconf')
+ system 'vagrant plugin install vagrant-proxyconf'
+ raise 'vagrant-proxyconf was installed but it requires to execute again'
+ end
+ config.proxy.enabled = { docker: false }
+ config.proxy.http = ENV['http_proxy']
+ config.proxy.https = ENV['https_proxy']
+ configuration['socks_proxy'] = ENV['socks_proxy']
+ end
+
+ if Vagrant.has_plugin?('vagrant-vbguest')
+ puts 'vagrant-vbguest auto_update feature will be disable to avoid sharing conflicts'
+ config.vbguest.auto_update = false
+ end
+
+ sync_type = "virtualbox"
+ if provider == :libvirt
+ if not Vagrant.has_plugin?('vagrant-libvirt')
+ system 'vagrant plugin install vagrant-libvirt'
+ raise 'vagrant-libvirt was installed but it requires to execute again'
+ end
+ sync_type = "nfs"
+ end
+
+ if provider == :openstack
+ config.ssh.username = 'ubuntu'
+ if not Vagrant.has_plugin?('vagrant-openstack-provider')
+ system 'vagrant plugin install vagrant-openstack-provider'
+ raise 'vagrant-openstack-provider was installed but it requires to execute again'
+ end
+ end
+
+ nodes.each do |node|
+ config.vm.define node[:name] do |nodeconfig|
+
+ # NO_PROXY definitions
+ if ENV['no_proxy'] != nil
+ if not Vagrant.has_plugin?('vagrant-proxyconf')
+ system 'vagrant plugin install vagrant-proxyconf'
+ raise 'vagrant-proxyconf was installed but it requires to execute again'
+ end
+ config.proxy.no_proxy = node[:ips].join(",") + "," + ENV['no_proxy']
+ end
+
+ # Common Settings:
+
+ nodeconfig.vm.provider "virtualbox" do |vbox|
+ vbox.customize ['modifyvm', :id, '--nictype1', 'virtio']
+ vbox.customize ['modifyvm', :id, '--audio', 'none']
+ vbox.customize ['modifyvm', :id, '--vram', '1']
+ vbox.customize ['modifyvm', :id, "--cpuhotplug", "off"]
+ vbox.customize ['modifyvm', :id, "--cpuexecutioncap", node[:cpu]]
+ vbox.customize ['modifyvm', :id, "--cpus", node[:cpus]]
+ vbox.customize ["modifyvm", :id, "--memory", node[:ram]]
+
+ # Set Network
+ nodeconfig.vm.network :private_network,
+ :adapter => 2,
+ :name => net_interface,
+ :ip => node[:ips][0]
+
+ nodeconfig.vm.network :private_network,
+ :adapter => 3,
+ :ip => node[:ips][1],
+ :type => :static
+
+ # Set Storage
+ if node.has_key? :hd
+ volume_file = node[:name] + '-vol1-data.vdi'
+ unless File.exist?(volume_file)
+ vbox.customize ['createmedium', 'disk', '--filename', volume_file, '--size', node[:hd][provider]]
+ end
+ vbox.customize ['storageattach', :id, '--storagectl', 'SATAController', '--port', 1, '--device', 0, '--type', 'hdd', '--medium', volume_file]
+ end
+ end
+
+ nodeconfig.vm.provider "libvirt" do |lbox|
+ lbox.memory = node[:ram]
+ lbox.nested = true
+ lbox.cpu_mode = 'host-passthrough'
+ lbox.cpus = node[:cpus]
+
+ # Set Network
+ nodeconfig.vm.network :private_network,
+ :ip => node[:ips][0]
+
+ nodeconfig.vm.network :private_network,
+ :ip => node[:ips][1],
+ :type => :static
+
+ # Set Storage
+ if node.has_key? :hd
+ lbox.storage :file, bus: 'sata', device: 'sda', size: node[:hd][provider]
+ end
+ end
+ if node.has_key? :fwds
+ node[:fwds].each do |fwd|
+ nodeconfig.vm.network :forwarded_port,
+ :guest => fwd[:guest],
+ :guest_ip => fwd[:guest_ip],
+ :host => fwd[:host],
+ :host_ip => "0.0.0.0"
+ end
+ end
+
+ nodeconfig.vm.provider :openstack do |obox|
+ obox.openstack_auth_url = ENV.fetch('OS_AUTH_URL', '')
+ obox.tenant_name = ENV.fetch('OS_TENANT_NAME', '')
+ obox.username = ENV.fetch('OS_USERNAME', '')
+ obox.password = ENV.fetch('OS_PASSWORD', '')
+ obox.region = ENV.fetch('OS_REGION_NAME', '')
+ obox.identity_api_version = ENV.fetch('OS_IDENTITY_API_VERSION', '')
+ obox.domain_name = ENV.fetch('OS_PROJECT_DOMAIN_ID', '')
+ obox.project_name = ENV.fetch('OS_PROJECT_NAME', '')
+ obox.floating_ip_pool = ENV.fetch('OS_FLOATING_IP_POOL', '')
+ obox.floating_ip_pool_always_allocate = (ENV['OS_FLOATING_IP_ALWAYS_ALLOCATE'] == 'true')
+ obox.image = ENV.fetch('OS_IMAGE', '')
+ obox.security_groups = [ENV.fetch('OS_SEC_GROUP', '')]
+ obox.networks = ENV.fetch('OS_NETWORK', '')
+ obox.flavor = node[:flavor]
+ obox.server_name = node[:name]
+ end
+
+ # Set Box type
+ if "openstack" == node[:name]
+ box = {
+ :virtualbox => 'ubuntu/xenial64',
+ :libvirt => 'elastic/ubuntu-16.04-x86_64'
+ }
+ end
+ nodeconfig.vm.box = box[provider]
+
+ # Set Node name
+ nodeconfig.vm.hostname = node[:name]
+
+ # Set Sync Folder
+ nodeconfig.vm.synced_folder ".", "/vagrant", disabled: true
+ nodeconfig.vm.synced_folder './opt', '/opt/onap/', create: true, type: sync_type
+ nodeconfig.vm.synced_folder './lib', '/var/onap/', create: true, type: sync_type
+ if !is_windows
+ nodeconfig.vm.synced_folder '~/.m2', '/root/.m2/', create: true
+ end
+
+ # Specific settings:
+
+ if node[:name].include? "testing"
+ nodeconfig.vm.synced_folder './tests', '/var/onap_tests/', create: true
+ test_suite = ENV.fetch('TEST_SUITE', '*')
+ test_case = ENV.fetch('TEST_CASE', '*')
+ # Override variables
+ run_path = 'vagrant_utils/unit_testing.sh'
+ node[:args] = [test_suite, test_case]
+ else
+ configuration['skip_get_images'] = ENV.fetch('SKIP_GET_IMAGES', configuration['skip_get_images'])
+ configuration['skip_install'] = ENV.fetch('SKIP_INSTALL', configuration['skip_install'])
+ end
+
+ if node[:name].include? "vfc"
+ nodeconfig.vm.provision 'docker'
+ end
+
+ nodeconfig.vm.provision 'shell' do |s|
+ s.path = run_path
+ s.args = node[:args]
+ s.env = configuration
+ end
+
+ end #nodeconfig
+ end #node
+end #config
diff --git a/doc/source/conf.py b/doc/source/conf.py
new file mode 100644
index 0000000..906f249
--- /dev/null
+++ b/doc/source/conf.py
@@ -0,0 +1,40 @@
+# -*- coding: utf-8 -*-
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import sys
+
+BASE_DIR = os.path.dirname(os.path.abspath(__file__))
+ROOT = os.path.abspath(os.path.join(BASE_DIR, "..", ".."))
+
+sys.path.insert(0, ROOT)
+sys.path.insert(0, BASE_DIR)
+
+# -- General configuration ----------------------------------------------------
+
+# Add any Sphinx extension module names here, as strings. They can be
+# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
+extensions = [
+ 'sphinx.ext.autodoc',
+ #'sphinx.ext.intersphinx'
+]
+
+# The suffix of source filenames.
+source_suffix = '.rst'
+
+# The master toctree document.
+master_doc = 'index'
+
+# General information about the project.
+project = u'ONAP on Vagrant'
diff --git a/doc/source/features/configure_execution.rst b/doc/source/features/configure_execution.rst
new file mode 100644
index 0000000..df08605
--- /dev/null
+++ b/doc/source/features/configure_execution.rst
@@ -0,0 +1,72 @@
+=======================
+Modify execution values
+=======================
+
+In order to provide a flexible platform that adjusts to different developer
+needs, there are two mechanisms to configure the execution of this project.
+
+Settings configuration file
+---------------------------
+
+The first mechanism refers to the process to replace default configuration
+values in the settings configuration file. This file needs to be placed into
+the *./etc* folder and named *settings.yaml*. It must contain the key/pair
+configuration values that will be overriden.
+
+.. note::
+
+ There are sample files (e. g. settings.yaml.development and
+ settings.yaml.testing) placed into the *./etc* folder. Their purpose is to
+ provide a reference of different configurations.
+
+.. end
+
+Configuration values:
+
++------------------+-------------------+---------------------------------------+
+| Key | Values | Description |
++==================+===================+=======================================+
+| build_image | "True" or "False" | Determines if the Docker image is |
+| | | retrieved from public hub or built |
+| | | from source code. |
++------------------+-------------------+---------------------------------------+
+| clone_repo | "True" or "False" | Determines if all the source code |
+| | | repositories of a given component are |
+| | | cloned locally. |
++------------------+-------------------+---------------------------------------+
+| compile_repo | "True" or "False" | Determines if all the source code |
+| | | repositories of a given component are |
+| | | going to be compiled. |
++------------------+-------------------+---------------------------------------+
+| enable_oparent | "True" or "False" | Determines if the OParent project |
+| | | will be used during the maven |
+| | | compilation. |
++------------------+-------------------+---------------------------------------+
+| skip_get_images | "True" or "False" | Determines if the process to build or |
+| | | retrieve docker images of a given |
+| | | component are going to skipped. |
++------------------+-------------------+---------------------------------------+
+| skip_install | "True" or "False" | Determines if the process to start |
+| | | the services of a given component |
+| | | will be started. |
++------------------+-------------------+---------------------------------------+
+
+Parameters
+----------
+
+The **skip_get_images** and **skip_install** are the only two configuration
+values that can be overriden using *-g* and *-i* respectively by the run scripts
+(*./tools/run.sh* and *.\\tools\\Run.ps1*).
+
+.. note::
+
+ The script parameters take precendence of the configuration file.
+
+.. end
+
+.. code-block:: console
+
+ $ ./tools/run.sh sdc -g
+
+.. end
+
diff --git a/doc/source/features/consuming_scripts.rst b/doc/source/features/consuming_scripts.rst
new file mode 100644
index 0000000..4290c25
--- /dev/null
+++ b/doc/source/features/consuming_scripts.rst
@@ -0,0 +1,88 @@
+==============================
+Using the provisioning scripts
+==============================
+
+Vagrant is a platform that uses prebaked images called
+*vagrant boxes* to guarranty that running multiple times a
+provisioning script will result in an expected output. This
+mechanism is crucial for reducing the number of external factors
+during the creation, development and testing of provisioning scripts.
+However, it's possible to provide an ONAP development enviroment
+without having to install Vagrant tool. This document explains how to
+consume the provisioning scripts localed in **./lib** folder to
+provision a development environment and the environment variables
+that modifies their behavior.
+
+This project was built on an Ubuntu 14.04 ("Trusty") Operating System,
+therefore it's necessary to have an user who has *sudo* permissions to
+access to a Bare Metal or Virtual Machine.
+
+The following instructions retrieve the provisioning scripts and place
+them into the */var/onap/* folder.
+
+.. code-block:: console
+
+ $ sudo apt-get install git -y
+ $ git clone https://git.onap.org/integration/devtool
+ $ cd /var/
+ $ ln -s ~/devtool/lib/ ./onap
+
+.. end
+
+Loading a provisioning script will be based on the desired ONAP
+service, for example to setup the development environment for Active
+and Available Inventory (AAI) service will be required to load the
+*/var/onap/aai* script.
+
+.. note::
+
+ The **git_src_folder** environment variable specifies the
+ source code destination folder, it's default value is */opt/onap/*
+ but it can be changed only after is loaded the provisioning
+ scripts.
+
+.. end
+
+.. code-block:: console
+
+ # source /var/onap/aai
+
+.. end
+
+Lastly, every script has defined a initialization function with
+*init_* as prefix. This function is the starting point to provision
+the chosen ONAP service. This example uses the *init_aai* function
+to provision a AAI Developement environment.
+
+.. note::
+
+ The **compile_repo** environment variable defines whether or not
+ the source code located on the repositories of the service.
+ Enabling this value can impact the provisioning time of the
+ service.
+
+.. end
+.. note::
+
+ **nexus_docker_repo**, **nexus_username** and **nexus_password**
+ environment variables specify the connectivity to a private Docker
+ Hub.
+
+.. end
+.. note::
+
+ **build_image** environment variable allows the Docker images
+ from source code. Enabling this value can impact the
+ provisioning time of the service.
+
+.. end
+
+.. code-block:: console
+
+ # init_aai
+
+.. end
+
+As result, the source code is pulled into */opt/onap/aai/* folder and the
+AAI services are up and running with the proper connection to the
+Titan Distributed Graph Database.
diff --git a/doc/source/features/example_usage.rst b/doc/source/features/example_usage.rst
new file mode 100644
index 0000000..1aae887
--- /dev/null
+++ b/doc/source/features/example_usage.rst
@@ -0,0 +1,151 @@
+=================================================
+Example usage to bring up a developer environment
+=================================================
+
+In the example, we will bring up a single ONAP component using the Devtool ONAP
+tool.
+
+There are multiple scenarios in which this tool can be made use of by a
+developer, they usually fall in the following use cases.
+
+Use case 1: Use Devtool ONAP to just clone project related source code
+----------------------------------------------------------------------
+
+In this use case, the developer needs just the source code of the project to work on.
+
+Since the Devtool ONAP project supports building docker containers and compiling
+source files, we need to first edit the settings.yaml file to add key value pairs
+indicating we need to only clone repo and not build docker image or compile then.
+By default, Devtool ONAP clones repo, but to not run the build process and cloning
+docker images, the following are required to be added in the settings file.
+
+.. code-block:: console
+
+ skip_get_images: "True"
+
+.. end
+
+The reason this is done is because as mentioned in the
+`configure execution docs. <https://git.onap.org/integration/devtool/tree/doc/source/features/configure_execution.rst>`,
+the default values taken are:
+
+.. code-block:: console
+
+ 'build_image' => 'True',
+ 'clone_repo' => 'True',
+ 'compile_repo' => 'False',
+ 'enable_oparent' => 'True',
+ 'skip_get_images' => 'False',
+ 'skip_install' => 'True'
+
+.. end
+
+We override them and skip_get_images is given precedence over build_image.
+
+Use case 2: Use Devtool ONAP to clone project related source code and clone Docker Images
+-----------------------------------------------------------------------------------------
+
+In this use case, the developer needs to clone docker images of the project to work on.
+
+For this case, we will edit the settings.yaml file to add key value pairs indicating we
+need to clone repo and clone docker image from Nexus.
+
+.. code-block:: console
+
+ build_images: "False"
+ compile_repo: "True"
+ skip_get_images: "False"
+ skip_install: "True"
+
+.. end
+
+Use case 3: Use Devtool ONAP to clone project related source code and build Docker Images locally
+-------------------------------------------------------------------------------------------------
+
+In this use case, the developer needs to build docker images of the project to work on.
+
+For this case, we will edit the settings.yaml file to add key value pairs indicating we need to
+clone repo and build docker image locally and not fetch them from Nexus.
+
+.. code-block:: console
+
+ build_images: "True"
+ compile_repo: "True"
+ skip_get_images: "False"
+ skip_install: "True"
+
+.. end
+
+Use case 4: Use Devtool ONAP to clone project related source code and build Docker Images and start services
+------------------------------------------------------------------------------------------------------------
+
+In this use case, the developer needs to build docker images of the project he or
+she wanted to work on and start the services running inside them.
+
+For this case, we will edit the settings.yaml file to add key value pairs indicating
+we need to clone repo, compile repo, build docker image and run the image.
+
+.. code-block:: console
+
+ build_images: "True"
+ compile_repo: "True"
+ skip_get_images: "False"
+ skip_install: "False"
+
+.. end
+
+Once the required changes to the settings file is added, we can use the run.sh
+script in tools directory to setup the development environment.
+
+Example steps for setting up a development environment for VFC project.
+-----------------------------------------------------------------------
+
+In this example we will be using Devtool ONAP to get all the source code of VFC
+project and the developer can point the IDE to the cloned repo in the ./opt directory
+and start the development process.
+
+.. code-block:: console
+
+ $ ./tools/run.sh vfc
+
+.. end
+
+At the end of the setup process, all the VFC related source code will be present
+in the devtool/opt/ directory. The developer can point an IDE to this directory
+and start contributing. When the changes are done, the developer can SSH into the VM
+running VFC and tests can be executed by running Maven for Java and Tox for Python
+from the ~/opt/vfc directory.
+
+.. code-block:: console
+
+ $ vagrant ssh vfc
+ $ cd ~/opt/onap/vfc/<vfc-subrepo>
+ $ tox -e py27
+
+.. end
+
+This way the tool helps the developer to clone repos of a particular project,
+without having to manually search for repos and setup an environment.
+
+Also, if something gets messed up in the VM, the developer can tear down the VM
+and spin a fresh one without having to lose the changes made to the source code since
+the ./opt files are in sync from the host to the VM.
+
+.. code-block:: console
+
+ $ vagrant destroy vfc
+
+.. end
+
+Testing
+-------
+
+Use the run.sh script to test if the provisioning scripts run without errors.
+
+And example test to check the number of covered repositories with this tool.
+
+.. code-block:: console
+
+ $ ./tools/run.sh testing -y -c coverity_repos -s functions
+
+.. end
diff --git a/doc/source/features/features.rst b/doc/source/features/features.rst
new file mode 100644
index 0000000..9055301
--- /dev/null
+++ b/doc/source/features/features.rst
@@ -0,0 +1,16 @@
+.. _features:
+
+=================
+Advanced features
+=================
+
+.. toctree::
+ :maxdepth: 1
+
+ openstack.rst
+ consuming_scripts.rst
+ configure_execution.rst
+ example_usage.rst
+
+This chapter explains how to use Devtool ONAP tool for Advanced features
+like different providers.
diff --git a/doc/source/features/openstack.rst b/doc/source/features/openstack.rst
new file mode 100644
index 0000000..f44bc62
--- /dev/null
+++ b/doc/source/features/openstack.rst
@@ -0,0 +1,89 @@
+==================
+OpenStack Provider
+==================
+
+It's possible to use Vagrant to provision VMs on OpenStack using the
+`Vagrant OpenStack Cloud Provider <https://github.com/ggiamarchi/vagrant-openstack-provider/>`.
+The only requirement for the Cloud provider is to have an Ubuntu Cloud
+image accesible to your tenant and a Security Rule that allows to do
+SSH into the instance.
+
+Environment variables
+---------------------
+
+The usage of environment variables in OpenStack command-line clients
+is to avoid repeating some values. These variables have *OS_* as
+prefix. This provider will use them for authentication to Keystone
+service.
+
+.. code-block:: console
+
+ export OS_AUTH_URL=http://<keystone_ip>:5000/v3
+ export OS_TENANT_NAME=<project_or_tenant_name>
+ export OS_PROJECT_NAME=<project_or_tenant_name>
+ export OS_USERNAME=<openstack_username>
+ export OS_PASSWORD=<openstack_password>
+ export OS_REGION_NAME=<openstack_region_name>
+ export OS_IDENTITY_API_VERSION=<keystone_version_number>
+ export OS_PROJECT_DOMAIN_ID=<openstack_domain_name>
+
+.. end
+
+OpenStack Vagrant provider needs additional information about the
+name of the image to be used and the networking where the instance
+will be provisioned. That information can be passed using the
+following variables
+
+.. code-block:: console
+
+ export OS_IMAGE=<ubuntu_cloud_image_name>
+ export OS_NETWORK=<neutron_private_network>
+ export OS_FLOATING_IP_POOL=<neutron_floating_ip_pool>
+ export OS_SEC_GROUP=<onap-ssh-secgroup>
+
+.. end
+
+Tenant setup
+------------
+
+The *tools/setup_openstack.sh* script can be useful to get an idea
+of the process to setup the OpenStack environment with the necessary
+requirements. This script depends on the Environment Variables
+explained previously.
+
+----
+
+Devstack
+--------
+
+It's possible to use this plugin to provision instances on
+`Devstack <https://docs.openstack.org/devstack/latest/>`. This is
+an example of the *local.conf* file that can be used as input
+for Devstack
+
+.. path local.conf
+.. code-block:: ini
+
+ [[local|localrc]]
+ ADMIN_PASSWORD=<password>
+ DATABASE_PASSWORD=<password>
+ RABBIT_PASSWORD=<password>
+ SERVICE_PASSWORD=<password>
+ SERVICE_TOKEN=<token>
+
+ # Used to only upload the Ubuntu Cloud Image
+ DOWNLOAD_DEFAULT_IMAGES=False
+ IMAGE_URLS+="http://cloud-images.ubuntu.com/trusty/current/trusty-server-cloudimg-amd64-disk1.img"
+
+ # (Optional) These values helps to improve the experience deploying and using Devstack
+ GIT_BASE=https://git.openstack.org
+ FORCE_CONFIG_DRIVE="True"
+ disable_service tempest
+
+.. end
+
+.. note::
+
+ There is a validation that checks if the
+ *vagrant-openstack-provider* plugin is installed raising an error
+ for those cases when it isn't.
diff --git a/doc/source/index.rst b/doc/source/index.rst
new file mode 100644
index 0000000..af5777c
--- /dev/null
+++ b/doc/source/index.rst
@@ -0,0 +1,23 @@
+ONAP on Vagrant tool
+====================
+
+This project collects instructions to setup a development environment
+using different providers like VirtualBox, Libvirt or OpenStack.
+
+.. seealso::
+
+ You may want to read the `Bootstrap documentation`__ to get an idea of the
+ concepts.
+
+ __ https://wiki.onap.org/display/DW/ONAP+on+Vagrant
+
+Table of contents
+-----------------
+
+.. toctree::
+ :maxdepth: 2
+
+ Installation Guide <install/index>
+ Known Issues <install/known_issues>
+ Advanced Features <features/features>
+
diff --git a/doc/source/install/index.rst b/doc/source/install/index.rst
new file mode 100644
index 0000000..62f57e6
--- /dev/null
+++ b/doc/source/install/index.rst
@@ -0,0 +1,102 @@
+==================
+Installation Guide
+==================
+
+This project collects instructions related to the automatic creation
+of a development environment. However, this requires only two
+components previous to its execution. These are an automation
+building tool (Vagrant) and a provider platform (VirtualBox, Libvirt
+and OpenStack). This section explains how to install the most common
+set of configuration(Vagrant/VirtualBox) in different Operating
+Systems.
+
+Ubuntu 14.04 ("Trusty")
+-----------------------
+
+.. code-block:: console
+
+ $ wget -q https://releases.hashicorp.com/vagrant/2.0.1/vagrant_2.0.1_x86_64.deb
+ $ sudo dpkg -i vagrant_2.0.1_x86_64.deb
+
+ $ echo "deb http://download.virtualbox.org/virtualbox/debian trusty contrib" >> /etc/apt/sources.list
+ $ wget -q https://www.virtualbox.org/download/oracle_vbox_2016.asc -O- | sudo apt-key add -
+ $ wget -q https://www.virtualbox.org/download/oracle_vbox.asc -O- | sudo apt-key add -
+ $ sudo apt-get update -y
+ $ sudo apt-get install -y virtualbox-5.1 dkms
+
+ $ sudo apt install -y nfs-kernel-server
+
+.. end
+
+CentOS
+------
+
+.. code-block:: console
+
+ $ wget -q https://releases.hashicorp.com/vagrant/2.0.1/vagrant_2.0.1_x86_64.rpm
+ $ sudo yum install vagrant_2.0.1_x86_64.rpm
+
+ $ wget -q http://download.virtualbox.org/virtualbox/rpm/rhel/virtualbox.repo -P /etc/yum.repos.d
+ $ sudo yum --enablerepo=epel install dkms
+ $ wget -q https://www.virtualbox.org/download/oracle_vbox.asc -O- | rpm --import -
+ $ sudo yum install -y VirtualBox-5.1
+
+ $ sudo yum install -y nfs-utils nfs-utils-lib
+
+.. end
+
+Mac OS
+------
+
+.. code-block:: console
+
+ $ /usr/bin/ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)"
+ $ brew cask install vagrant
+ $ brew cask install virtualbox
+
+.. end
+
+Windows 7+ (PowerShell v2+)
+---------------------------
+
+.. code-block:: console
+
+ PS C:\> Set-ExecutionPolicy AllSigned
+ PS C:\> iex ((New-Object System.Net.WebClient).DownloadString('https://chocolatey.org/install.ps1'))
+ PS C:\> choco install vagrant
+ PS C:\> choco install virtualbox
+
+.. end
+
+.. note::
+
+ Some corporations use Proxy Servers to protect their assets
+ from security threats. This project uses the Proxy Environment
+ variables to connect to those servers in order to download the
+ content required during the setup. The methods to setup these
+ variables depends on the Operating system that is used.
+
+ * Linux or Mac OS
+
+ .. code-block:: console
+
+ $ export http_proxy=<proxy>
+ $ export https_proxy=<proxy>
+ $ export no_proxy=<no_proxy_urls>
+
+ .. end
+
+ * Windows
+
+ .. code-block:: console
+
+ C:\> setx http_proxy <proxy>
+ C:\> setx https_proxy <proxy>
+ C:\> setx no_proxy <no_proxy_urls>
+
+ .. end
+
+.. note::
+
+ Vagrant can be configured to use a different default provider
+ through the environment variable **VAGRANT_DEFAULT_PROVIDER**.
diff --git a/doc/source/install/known_issues.rst b/doc/source/install/known_issues.rst
new file mode 100644
index 0000000..8db55e7
--- /dev/null
+++ b/doc/source/install/known_issues.rst
@@ -0,0 +1,63 @@
+============
+Known Issues
+============
+
+Virtualbox guest additions conflict with shared directories
+-----------------------------------------------------------
+
+If the **vagrant-vbguest** plugin is installed on the host, then an
+updated version of the Virtualbox guest additions will be installed
+on the guest in the /opt directory. Once this projects Vagrantfile
+mounts the ./opt directory on the host to the /opt directory on the
+guest during the provisioning process, the guest addtions on the
+guest are hidden and subsequent mounts of shared directories with the
+host will fail.
+
+The simplest workaround appears to be uninstalling the
+*vagrant-vbguest* plugin on the host system. This has been observed
+to work on a Windows 10 host using virtualbox 5.1.26.
+
+Check if vagrant-vbguest plugin is installed
+
+- Linux or Mac
+
+.. code-block:: console
+
+ $ vagrant plugin list
+.. end
+
+- Windows
+
+.. code-block:: console
+
+ C:\> vagrant plugin list
+.. end
+
+Remove vagrant-vbguest plugin
+
+- Linux or Mac
+
+.. code-block:: console
+
+ $ vagrant plugin uninstall vagrant-vbguest
+.. end
+
+- Windows
+
+.. code-block:: console
+
+ C:\> vagrant plugin uninstall vagrant-vbguest
+.. end
+
+
+Network configuration in Windows
+--------------------------------
+
+Some Virtual Machines present a problem in their network configuration so to
+make sure the install will work as it should install the virtualbox from the
+cmd window with the following command:
+
+.. code-block:: console
+
+ c:\downloads\VirtualBox-5.1.20-114628-Win.exe -msiparams NETWORKTYPE=NDIS5
+.. end
diff --git a/etc/settings.yaml.development b/etc/settings.yaml.development
new file mode 100644
index 0000000..7e1a1ec
--- /dev/null
+++ b/etc/settings.yaml.development
@@ -0,0 +1,6 @@
+build_image: "True"
+clone_repo: "True"
+compile_repo: "False"
+enable_oparent: "True"
+skip_get_images: "False"
+skip_install: "True"
diff --git a/etc/settings.yaml.testing b/etc/settings.yaml.testing
new file mode 100644
index 0000000..0a81e2d
--- /dev/null
+++ b/etc/settings.yaml.testing
@@ -0,0 +1,6 @@
+build_image: "False"
+clone_repo: "False"
+compile_repo: "False"
+enable_oparent: "False"
+skip_get_images: "False"
+skip_install: "False"
diff --git a/lib/_composed_functions b/lib/_composed_functions
new file mode 100755
index 0000000..9f2d0a1
--- /dev/null
+++ b/lib/_composed_functions
@@ -0,0 +1,35 @@
+#!/bin/bash
+
+# build_docker_image() - Build Docker container image from source code
+function build_docker_image {
+ local src_folder=$1
+ local profile=$2
+ install_maven
+ install_docker
+ pushd $src_folder
+
+ # Cleanup external repo
+ sed -i 's|${docker.push.registry}/||g' pom.xml
+ local mvn_docker="mvn clean package docker:build"
+ if [ $profile ]; then
+ mvn_docker+=" -P $profile"
+ fi
+ if [ $http_proxy ]; then
+ if ! grep -ql "docker.buildArg.http_proxy" pom.xml ; then
+ mvn_docker+=" -Ddocker.buildArg.http_proxy=$http_proxy"
+ fi
+ if ! grep -ql "docker.buildArg.HTTP_PROXY" pom.xml ; then
+ mvn_docker+=" -Ddocker.buildArg.HTTP_PROXY=$http_proxy"
+ fi
+ fi
+ if [ $https_proxy ]; then
+ if ! grep -ql "docker.buildArg.https_proxy" pom.xml ; then
+ mvn_docker+=" -Ddocker.buildArg.https_proxy=$https_proxy"
+ fi
+ if ! grep -ql "docker.buildArg.HTTPS_PROXY" pom.xml ; then
+ mvn_docker+=" -Ddocker.buildArg.HTTPS_PROXY=$https_proxy"
+ fi
+ fi
+ eval $mvn_docker
+ popd
+}
diff --git a/lib/_onap_functions b/lib/_onap_functions
new file mode 100755
index 0000000..c65e589
--- /dev/null
+++ b/lib/_onap_functions
@@ -0,0 +1,107 @@
+#!/bin/bash
+
+# create_configuration_files() - Store credentials in files
+function create_configuration_files {
+ local onap_config_folder="/opt/config"
+
+ mkdir -p $onap_config_folder
+ pushd $onap_config_folder
+ echo $nexus_docker_repo > nexus_docker_repo.txt
+ echo $nexus_username > nexus_username.txt
+ echo $nexus_password > nexus_password.txt
+ echo $openstack_username > openstack_username.txt
+ echo $openstack_tenant_id > tenant_id.txt
+ echo $dmaap_topic > dmaap_topic.txt
+ echo $docker_version > docker_version.txt
+ popd
+}
+
+# docker_openecomp_login() - Login to OpenECOMP Docker Hub
+function docker_openecomp_login {
+ install_docker
+ docker login -u ${nexus_username:-docker} -p ${nexus_password:-docker} ${nexus_docker_repo:-nexus3.onap.org:10001}
+}
+
+# pull_openecomp_image() - Pull Docker container image from a Docker Registry Hub
+function pull_openecomp_image {
+ local image=$1
+ local tag=$2
+ docker_openecomp_login
+ pull_docker_image ${nexus_docker_repo:-nexus3.onap.org:10001}/openecomp/${image}:${docker_version:-latest} $tag
+ docker logout
+}
+
+# pull_onap_image() - Pull Docker container image from a Docker Registry Hub
+function pull_onap_image {
+ local image=$1
+ local tag=$2
+ docker_openecomp_login
+ pull_docker_image ${nexus_docker_repo:-nexus3.onap.org:10001}/onap/${image}:${docker_version:-latest} $tag
+ docker logout
+}
+
+# configure_bind()- Configure bind utils
+function configure_bind {
+ _install_bind
+ mkdir /etc/bind/zones
+
+ curl -k $nexus_repo/org.openecomp.demo/boot/$artifacts_version/db_simpledemo_openecomp_org -o /etc/bind/zones/db.simpledemo.openecomp.org
+ curl -k $nexus_repo/org.openecomp.demo/boot/$artifacts_version/named.conf.options -o /etc/bind/named.conf.options
+ curl -k $nexus_repo/org.openecomp.demo/boot/$artifacts_version/named.conf.local -o /etc/bind/named.conf.local
+
+ modprobe ip_gre
+ sed -i "s/OPTIONS=.*/OPTIONS=\"-4 -u bind\"/g" /etc/default/bind9
+ service bind9 restart
+}
+
+# _configure_maven() - This function creates a maven configuration file in case that doesn't exist
+function _configure_maven {
+ local proxies_start=" <!--"
+ local proxies=" \|"
+ local proxies_end=" \|-->"
+ local mvn_http=""
+ local mvn_https=""
+
+ if [ $http_proxy ] | [ $https_proxy ]; then
+ proxies_start=" <proxies>"
+ proxies=" "
+ proxies_end=" <\/proxies>"
+ if [ $http_proxy ]; then
+ proxy_domain=`echo $http_proxy | awk -F/ '{print $3}' | awk -F: '{print $1}'`
+ proxy_port=`echo $http_proxy | awk -F/ '{print $3}' | awk -F: '{print $2}'`
+ mvn_http="<proxy>\n <id>http</id>\n <active>true</active>\n <protocol>http</protocol>\n <host>$proxy_domain</host>\n <port>$proxy_port</port>\n <nonProxyHosts>${no_proxy}</nonProxyHosts>\n </proxy>"
+ fi
+ if [ $https_proxy ]; then
+ proxy_domain=`echo $https_proxy | awk -F/ '{print $3}' | awk -F: '{print $1}'`
+ proxy_port=`echo $https_proxy | awk -F/ '{print $3}' | awk -F: '{print $2}'`
+ mvn_https="<proxy>\n <id>https</id>\n <active>true</active>\n <protocol>https</protocol>\n <host>$proxy_domain</host>\n <port>$proxy_port</port>\n <nonProxyHosts>${no_proxy}</nonProxyHosts>\n </proxy>"
+ fi
+ fi
+
+ mkdir -p $(dirname $mvn_conf_file)
+ if [ ! -f $mvn_conf_file ]; then
+ if [[ "$enable_oparent" == "True" ]]; then
+ clone_repo oparent
+ cp $git_src_folder/oparent/settings.xml $mvn_conf_file
+ sed -i "s|<\/profiles>|<\/profiles>\n%PROXIES_START%\n%PROXIES% %HTTP_PROXY%\n%PROXIES% %HTTPS_PROXY%\n%PROXIES_END%|g" $mvn_conf_file
+ else
+ cp /var/onap/files/settings.xml $mvn_conf_file
+ fi
+
+ sed -e "
+ s|%PROXIES_START%|$proxies_start|g;
+ s|%PROXIES%|$proxies|g;
+ s|%HTTP_PROXY%|$mvn_http|g;
+ s|%HTTPS_PROXY%|$mvn_https|g;
+ s|%PROXIES_END%|$proxies_end|g
+ " -i $mvn_conf_file
+ fi
+}
+
+# configure_service() - Download and configure a specific service in upstart
+function configure_service {
+ local service_script=$1
+ curl -k $nexus_repo/org.openecomp.demo/boot/$artifacts_version/$service_script -o /etc/init.d/$service_script
+ chmod +x /etc/init.d/$service_script
+ update-rc.d $service_script defaults
+}
diff --git a/lib/aai b/lib/aai
new file mode 100755
index 0000000..1ce3485
--- /dev/null
+++ b/lib/aai
@@ -0,0 +1,147 @@
+#!/bin/bash
+
+source /var/onap/functions
+
+hbase_version=1.2.0
+
+# install_hadoop() - Function that installs Hadoop
+function install_hadoop {
+ local release=titan
+ local version=1.0.0
+ local filename=$release-$version-hadoop1
+ local dest_folder=/opt/hadoop/current
+
+ if [ ! -d $dest_folder ]; then
+ curl http://s3.thinkaurelius.com/downloads/$release/$filename.zip -o /tmp/${filename}.zip
+ install_package unzip
+ mkdir -p $dest_folder
+ unzip /tmp/${filename}.zip -d $dest_folder
+ fi
+
+ pushd $dest_folder/${filename}
+ # Change commitlog_directory and data_file_directories values (https://stackoverflow.com/a/26856246/1707651)
+ sed -i "s|db/cassandra/data|/tmp/data|g" conf/cassandra/cassandra.yaml
+ sed -i "s|db/cassandra/commitlog|/tmp/commitlog|g" conf/cassandra/cassandra.yaml
+
+ install_java
+ ./bin/titan.sh start
+ popd
+}
+
+# install_haproxy() - Function that install HAProxy
+function install_haproxy {
+ if is_package_installed haproxy; then
+ return
+ fi
+ install_package software-properties-common
+ add-apt-repository -y ppa:vbernat/haproxy-1.7
+ update_repos
+ install_package haproxy
+ cp /var/onap/files/haproxy.cfg /etc/haproxy/
+ cp /var/onap/files/aai.pem /etc/ssl/private/
+ chmod 640 /etc/ssl/private/aai.pem
+ chown root:ssl-cert /etc/ssl/private/aai.pem
+ mkdir -p /usr/local/etc/haproxy
+ #echo "127.0.0.1 localhost aai-traversal.api.simpledemo.openecomp.org aai-resources.api.simpledemo.openecomp.org" >> /etc/hosts
+
+ service haproxy restart
+}
+
+# compile_aai_repos() - Function that compiles AAI source repo.
+function compile_aai_repos {
+ local repos="aai/aai-common aai/resources aai/logging-service aai/traversal"
+ if [[ "$compile_repo" == "True" ]]; then
+ repos="${repos[aai]}"
+ fi
+
+ for repo in ${repos[@]}; do
+ compile_src ${src_folders[aai]}${repo#*aai}
+ done
+}
+
+# setup_titan() - Function that configures AAI services to connect to Hadoop Titan
+function setup_titan {
+ local subdirectory="bundleconfig-local/etc/appprops"
+ install_python_package crudini
+
+ for dirc in resources/aai-resources traversal/aai-traversal; do
+ for file in titan-cached.properties titan-realtime.properties; do
+ crudini --set "${src_folders[aai]}/$dirc/$subdirectory/$file" "" "storage.backend" "cassandra"
+ crudini --set "${src_folders[aai]}/$dirc/$subdirectory/$file" "" "storage.hostname" "localhost"
+ done
+ done
+
+ # Add the schema to the local instance
+ compile_src ${src_folders[aai]}/resources/aai-resources/
+ uninstall_packages default-jre openjdk-7-jdk openjdk-7-jre openjdk-7-jre-headless
+ pushd ${src_folders[aai]}
+ java -DAJSC_HOME=${src_folders[aai]}/resources/aai-resources -DBUNDLECONFIG_DIR="bundleconfig-local" -cp aai-common/aai-core/target/aai-core-*.jar:resources/aai-resources/target/aai-resources.jar:resources/aai-resources/target/userjars/* org.onap.aai.dbgen.GenTester
+ popd
+}
+
+# _start_data_managment() - Funtion that start a data management service
+function _start_data_managment {
+ local service=$1
+ local debug_port=$2
+
+ install_maven
+ pushd ${src_folders[aai]}/$service
+ export MAVEN_OPTS="-Xms1024m -Xmx5120m -XX:PermSize=2024m -Xdebug -Xnoagent -Djava.compiler=NONE -Xrunjdwp:transport=dt_socket,address=$debug_port,server=y,suspend=n"
+ mvn -P runAjsc &
+ popd
+}
+
+# start_aai_microservices() - Function that starts AAI microservices
+function start_aai_microservices {
+ _start_data_managment resources 9446
+ sleep 360
+ _start_data_managment traversal 9447
+}
+
+# install_aai() - Install AAI Services
+function install_aai {
+ install_docker_compose
+ pushd ${src_folders[aai]}/test-config
+ ./deploy_vm2.sh
+ ./deploy_vm1.sh
+ popd
+}
+
+# get_aai_images() - Function that pulls or creates AAI docker images
+function get_aai_images {
+ pull_docker_image elasticsearch:2.4.1
+ docker_openecomp_login
+ pull_docker_image ${nexus_docker_repo:-nexus3.onap.org:10001}/aaionap/hbase:${hbase_version}
+
+ if [[ "$build_image" == "True" ]]; then
+ unset MAVEN_OPTS
+ for project in resources/aai-resources traversal/aai-traversal search-data-service data-router model-loader sparky-be; do
+ build_docker_image ${src_folders[aai]}/$project
+ done
+ else
+ for image in aai-resources aai-traversal search-data-service data-router model-loader sparky-be; do
+ pull_onap_image $image
+ done
+ fi
+}
+
+# init_aai() - Function that initialize AAI services
+function init_aai {
+ install_hadoop
+ install_haproxy
+
+ if [[ "$clone_repo" == "True" ]]; then
+ clone_repos "aai"
+ fi
+ compile_aai_repos
+
+ setup_titan
+ #start_aai_microservices
+
+ if [[ "$skip_get_images" == "False" ]]; then
+ get_aai_images
+ fi
+ if [[ "$skip_install" == "False" ]]; then
+ install_aai
+ fi
+}
diff --git a/lib/appc b/lib/appc
new file mode 100755
index 0000000..95654fc
--- /dev/null
+++ b/lib/appc
@@ -0,0 +1,43 @@
+#!/bin/bash
+
+source /var/onap/sdnc
+source /var/onap/functions
+
+# _build_appc_images() - Function that creates APPC images from source code.
+function _build_appc_images {
+ get_sdnc_images
+ build_docker_image ${src_folders[appc]}/deployment/installation/appc docker
+}
+
+# get_appc_images() - Function that gets or build APPC docker images
+function get_appc_images {
+ if [[ "$build_image" == "True" ]]; then
+ _build_appc_images
+ else
+ for image in appc-image dgbuilder-sdnc-image; do
+ pull_openecomp_image $image openecomp/$image:latest
+ done
+ fi
+}
+
+# install_appc() - Function that clones and installs the APPC services from source code
+function install_appc {
+ run_docker_compose ${src_folders[appc]}/deployment/docker-compose
+}
+
+# init_appc() - Function that initialize APPC services
+function init_appc {
+ if [[ "$clone_repo" == "True" ]]; then
+ clone_repos "appc"
+ if [[ "$compile_repo" == "True" ]]; then
+ compile_repos "appc"
+ fi
+ fi
+
+ if [[ "$skip_get_images" == "False" ]]; then
+ get_appc_images
+ if [[ "$skip_install" == "False" ]]; then
+ install_appc
+ fi
+ fi
+}
diff --git a/lib/ccsdk b/lib/ccsdk
new file mode 100755
index 0000000..93ee0c8
--- /dev/null
+++ b/lib/ccsdk
@@ -0,0 +1,36 @@
+#!/bin/bash
+
+source /var/onap/functions
+
+# _build_ccsdk_images() - Build CCSDK Docker images from source code
+function _build_ccsdk_images {
+ install_package unzip
+ compile_src ${src_folders[ccsdk]}/distribution
+ for image in ubuntu opendaylight odlsli dgbuilder-docker; do
+ build_docker_image ${src_folders[ccsdk]}/distribution/$image docker
+ done
+}
+
+# get_ccsdk_images() - Get CCSDK Docker images
+function get_ccsdk_images {
+ if [[ "$build_image" == "True" ]]; then
+ _build_ccsdk_images
+ else
+ for image in ubuntu odl dgbuilder; do
+ pull_onap_image ccsdk-$image-image
+ done
+ fi
+}
+
+# init_ccsdk() - Function that initialize Multi Cloud services
+function init_ccsdk {
+ if [[ "$clone_repo" == "True" ]]; then
+ clone_repos "ccsdk"
+ if [[ "$compile_repo" == "True" ]]; then
+ compile_repos "ccsdk"
+ fi
+ fi
+ if [[ "$skip_get_images" == "False" ]]; then
+ get_ccsdk_images
+ fi
+}
diff --git a/lib/commons b/lib/commons
new file mode 100755
index 0000000..90f73d2
--- /dev/null
+++ b/lib/commons
@@ -0,0 +1,119 @@
+#!/bin/bash
+
+# update_repos() - Function that updates linux repositories
+function update_repos {
+ echo "Updating repositories list..."
+ if [ -f /var/onap/files/sources.list ]; then
+ cp /var/onap/files/sources.list /etc/apt/sources.list
+ fi
+ source /etc/os-release || source /usr/lib/os-release
+ case ${ID,,} in
+ *suse)
+ zypper -n ref
+ ;;
+ ubuntu|debian)
+ if [[ "$debug" == "False" ]]; then
+ apt-get update > /dev/null
+ else
+ apt-get update
+ fi
+ ;;
+ rhel|centos|fedora)
+ yum updateinfo
+ ;;
+ esac
+}
+
+# is_package_installed() - Function to tell if a package is installed
+function is_package_installed {
+ if [[ -z "$@" ]]; then
+ return 1
+ fi
+ source /etc/os-release || source /usr/lib/os-release
+ case ${ID,,} in
+ *suse)
+ CHECK_CMD="zypper search --match-exact --installed"
+ ;;
+ ubuntu|debian)
+ CHECK_CMD="dpkg -l"
+ ;;
+ rhel|centos|fedora)
+ CHECK_CMD="rpm -q"
+ ;;
+ esac
+ ${CHECK_CMD} "$@" &> /dev/null
+}
+
+# install_packages() - Install a list of packages
+function install_packages {
+ local package=$@
+ source /etc/os-release || source /usr/lib/os-release
+ case ${ID,,} in
+ *suse)
+ ;;
+ ubuntu|debian)
+ apt-get install -y -qq $package
+ ;;
+ rhel|centos|fedora)
+ ;;
+ esac
+}
+
+# install_package() - Install specific package if doesn't exist
+function install_package {
+ local package=$1
+
+ if ! is_package_installed $package; then
+ echo "Installing $package..."
+
+ source /etc/os-release || source /usr/lib/os-release
+ case ${ID,,} in
+ *suse)
+ zypper install -y $package
+ ;;
+ ubuntu|debian)
+ if [[ "$debug" == "False" ]]; then
+ apt-get install -y -qq -o=Dpkg::Use-Pty=0 $package
+ else
+ apt-get install -y $package
+ fi
+ ;;
+ rhel|centos|fedora)
+ PKG_MANAGER=$(which dnf || which yum)
+ ${PKG_MANAGER} -y install $package
+ ;;
+ esac
+ fi
+}
+
+# uninstall_packages() - Uninstall a list of packages
+function uninstall_packages {
+ local packages=$@
+ source /etc/os-release || source /usr/lib/os-release
+ case ${ID,,} in
+ *suse)
+ ;;
+ ubuntu|debian)
+ apt-get purge -y -qq $packages
+ ;;
+ rhel|centos|fedora)
+ ;;
+ esac
+}
+
+# uninstall_package() - Uninstall specific package if exists
+function uninstall_package {
+ local package=$1
+ if is_package_installed $package; then
+ source /etc/os-release || source /usr/lib/os-release
+ case ${ID,,} in
+ *suse)
+ ;;
+ ubuntu|debian)
+ apt-get purge -y -qq $package
+ ;;
+ rhel|centos|fedora)
+ ;;
+ esac
+ fi
+}
diff --git a/lib/config/env-vars b/lib/config/env-vars
new file mode 100755
index 0000000..7712de8
--- /dev/null
+++ b/lib/config/env-vars
@@ -0,0 +1,79 @@
+#!/bin/bash
+
+# Source code destination folder
+git_src_folder=/opt/onap
+
+declare -A src_folders
+src_folders=(
+["aai"]="$git_src_folder/aai"
+["appc"]="$git_src_folder/appc"
+["ccsdk"]="$git_src_folder/ccsdk"
+["dcae"]="$git_src_folder/dcae"
+["mr"]="$git_src_folder/dcae/message-router"
+["msb"]="$git_src_folder/msb"
+["mso"]="$git_src_folder/mso"
+["multicloud"]="$git_src_folder/multicloud"
+["oom"]="$git_src_folder/oom"
+["policy"]="$git_src_folder/policy"
+["portal"]="$git_src_folder/portal"
+["robot"]="$git_src_folder/testsuite"
+["sdc"]="$git_src_folder/sdc"
+["sdnc"]="$git_src_folder/openecomp/sdnc"
+["vfc"]="$git_src_folder/vfc"
+["vid"]="$git_src_folder/vid"
+["vnfsdk"]="$git_src_folder/vnfsdk"
+["vvp"]="$git_src_folder/vvp"
+)
+
+# Repositories list
+declare -A repos
+repos=(
+["aai"]="aai/aai-common aai/aai-config aai/aai-data aai/aai-service \
+aai/babel aai/champ aai/data-router aai/esr-gui aai/esr-server aai/gizmo \
+aai/logging-service aai/model-loader aai/resources aai/rest-client \
+aai/router-core aai/search-data-service aai/test-config aai/traversal \
+aai/sparky-fe aai/sparky-be"
+["appc"]="appc appc/deployment"
+["ccsdk"]="ccsdk ccsdk/dashboard ccsdk/distribution ccsdk/parent \
+ccsdk/platform/blueprints ccsdk/platform/nbapi \
+ccsdk/platform/plugins ccsdk/sli ccsdk/sli/adaptors ccsdk/sli/core \
+ccsdk/sli/northbound ccsdk/sli/plugins ccsdk/storage \
+ccsdk/storage/esaas ccsdk/storage/pgaas ccsdk/utils"
+["dcae"]="dcae dcae/apod dcae/apod/analytics dcae/apod/buildtools \
+dcae/apod/cdap dcae/collectors dcae/collectors/ves dcae/controller \
+dcae/controller/analytics dcae/dcae-inventory dcae/demo \
+dcae/demo/startup dcae/demo/startup/aaf dcae/demo/startup/controller \
+dcae/demo/startup/message-router dcae/dmaapbc dcae/operation \
+dcae/operation/utils dcae/orch-dispatcher dcae/pgaas dcae/utils \
+dcae/utils/buildtools"
+["msb"]="msb/apigateway msb/discovery msb/java-sdk msb/swagger-sdk"
+["mso"]="mso mso/chef-repo mso/docker-config mso/libs mso/mso-config"
+["multicloud"]="multicloud multicloud/framework multicloud/openstack \
+multicloud/openstack/vmware multicloud/openstack/windriver \
+multicloud/azure"
+["oom"]="oom oom/registrator"
+["policy"]="policy/api policy/common policy/docker \
+policy/drools-applications policy/drools-pdp policy/engine \
+policy/gui policy/pap policy/pdp"
+["portal"]="portal portal/sdk ecompsdkos ui/dmaapbc"
+["robot"]="testsuite testsuite/heatbridge testsuite/properties \
+testsuite/python-testing-utils"
+["sdc"]="sdc sdc/jtosca sdc/sdc-distribution-client \
+sdc/sdc-docker-base sdc/sdc-titan-cassandra sdc/sdc-tosca \
+sdc/sdc-vnfdesign sdc/sdc-workflow-designer sdc/sdc_common"
+["sdnc"]="sdnc/adaptors sdnc/architecture sdnc/core sdnc/features \
+sdnc/northbound sdnc/oam sdnc/parent sdnc/plugins"
+["vfc"]="vfc/gvnfm vfc/gvnfm/vnflcm vfc/gvnfm/vnfmgr \
+vfc/gvnfm/vnfres vfc/nfvo vfc/nfvo/catalog vfc/nfvo/driver \
+vfc/nfvo/driver/ems vfc/nfvo/driver/sfc vfc/nfvo/driver/vnfm \
+vfc/nfvo/driver/vnfm/gvnfm vfc/nfvo/driver/vnfm/svnfm vfc/nfvo/lcm \
+vfc/nfvo/resmanagement vfc/nfvo/wfengine"
+["vid"]="vid vid/asdcclient"
+["vnfsdk"]="vnfsdk/compliance vnfsdk/functest vnfsdk/lctest \
+vnfsdk/model vnfsdk/pkgtools vnfsdk/refrepo vnfsdk/validation"
+["vvp"]="vvp/ansible-ice-bootstrap vvp/cms vvp/devkit \
+vvp/documentation vvp/engagementmgr vvp/gitlab vvp/image-scanner \
+vvp/jenkins vvp/portal vvp/postgresql vvp/test-engine \
+vvp/validation-scripts"
+)
+
diff --git a/lib/dcae b/lib/dcae
new file mode 100755
index 0000000..25efddd
--- /dev/null
+++ b/lib/dcae
@@ -0,0 +1,92 @@
+#!/bin/bash
+
+source /var/onap/functions
+
+# _create_config_file() - Creates a configuration yaml file for the controller
+function _create_config_file {
+ cat > ${src_folders[dcae]}/controller/config.yaml << EOL
+ZONE: $dcae_zone
+STATE: $dcae_state
+DCAE-VERSION: $artifacts_version
+HORIZON-URL: https://mycloud.rackspace.com/cloud/$tenant_id
+KEYSTONE-URL: https://identity.api.rackspacecloud.com/v2.0
+OPENSTACK-TENANT-ID: $tenant_id
+OPENSTACK-TENANT-NAME: OPEN-ECOMP
+OPENSTACK-REGION: $openstack_region
+OPENSTACK-PRIVATE-NETWORK: $openstack_private_network_name
+OPENSTACK-USER: $openstack_user
+OPENSTACK-PASSWORD: $openstack_password
+OPENSTACK-KEYNAME: ${key_name}${rand_str}_dcae
+OPENSTACK-PUBKEY: $pub_key
+
+NEXUS-URL-ROOT: $nexus_repo_root
+NEXUS-USER: $nexus_username
+NEXUS-PASSWORD: $nexus_password
+NEXUS-URL-SNAPSHOTS: $nexus_url_snapshots
+NEXUS-RAWURL: $nexus_repo
+
+DOCKER-REGISTRY: $nexus_docker_repo
+
+GIT-MR-REPO: http://gerrit.onap.org/r/dcae/demo/startup/message-router.git
+EOL
+}
+
+# _build_dcae_images() Function that builds DCAE docker images from source code.
+function _build_dcae_images {
+ if [[ "$compile_repo" != "True" ]]; then
+ compile_repos "dcae"
+ fi
+ build_docker_image ${src_folders[dcae]}/dmaapbc openecomp/dcae-dmaapbc
+ build_docker_image ${src_folders[dcae]}/orch-dispatcher dcae/orch-dispatcher
+
+ pushd ${src_folders[dcae]}/demo
+ bash dcae-demo-controller/src/main/docker-build/build.sh
+ popd
+
+ build_docker_image ${src_folders[dcae]}/dcae-inventory
+}
+
+# get_dcae_images() - Function that retrieves or builds DCAE docker images.
+function get_dcae_images {
+ if [[ "$build_image" == "True" ]]; then
+ _build_dcae_images
+ else
+ pull_openecomp_image dcae-dmaapbc openecomp/dcae-dmaapbc
+ pull_openecomp_image dcae-controller
+ fi
+}
+
+# install_dcae() - Function that clones and installs the DCAE controller services from source code
+function install_dcae {
+ pushd ${src_folders[dcae]}/demo/startup/controller
+ if [[ "$build_image" == "True" ]]; then
+ dcae_image=`docker images | grep dcae-controller | awk '{print $1 ":" $2}'`
+ sed -i "s|DOCKER-REGISTRY/openecomp/dcae-controller:DCAE-VERSION|$dcae_image|g" docker-compose.yml
+ sed -i "s|MTU|$MTU|g" docker-compose.yml
+ run_docker_compose .
+ else
+ bash init.sh
+ install_package make
+ make up
+ fi
+ popd
+ # run_docker_image -p 8080:8080 -d -v <some local directory>/config.yml:/opt/config.yml --name dcae-inventory <docker registry>/dcae-inventory:<version>
+}
+
+# init_dcae() - Function that initialize DCAE Controller services
+function init_dcae {
+ if [[ "$clone_repo" == "True" ]]; then
+ clone_repos "dcae"
+ if [[ "$compile_repo" == "True" ]]; then
+ compile_repos "dcae"
+ fi
+ fi
+
+ _create_config_file
+ if [[ "$skip_get_images" == "False" ]]; then
+ get_dcae_images
+ if [[ "$skip_install" == "False" ]]; then
+ install_dcae
+ fi
+ fi
+}
diff --git a/lib/files/aai.pem b/lib/files/aai.pem
new file mode 100644
index 0000000..d446705
--- /dev/null
+++ b/lib/files/aai.pem
@@ -0,0 +1,102 @@
+Bag Attributes
+ friendlyName: aaiopenecomp
+ localKeyID: 54 69 6D 65 20 31 34 39 35 35 31 32 38 30 33 36 34 39
+subject=/C=US/ST=NJ/L=Bedminster/O=OpenECOMP/OU=SimpleDemo/CN=aai.api.simpledemo.openecomp.org/emailAddress=aai-host@api.simpledemo.openecomp.org
+issuer=/C=US/ST=NJ/L=Bedminster/O=OpenECOMP/OU=simpledemo/CN=OpenECOMP simpledemo Server CA X1/emailAddress=simpledemo@openecomp.org
+-----BEGIN CERTIFICATE-----
+MIIEiTCCA3GgAwIBAgIJAIPKfDLcn3MpMA0GCSqGSIb3DQEBCwUAMIGtMQswCQYD
+VQQGEwJVUzELMAkGA1UECAwCTkoxEzARBgNVBAcMCkJlZG1pbnN0ZXIxEjAQBgNV
+BAoMCU9wZW5FQ09NUDETMBEGA1UECwwKc2ltcGxlZGVtbzEqMCgGA1UEAwwhT3Bl
+bkVDT01QIHNpbXBsZWRlbW8gU2VydmVyIENBIFgxMScwJQYJKoZIhvcNAQkBFhhz
+aW1wbGVkZW1vQG9wZW5lY29tcC5vcmcwHhcNMTYxMTMwMTUzODM5WhcNMTcxMTMw
+MTUzODM5WjCBuTELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAk5KMRMwEQYDVQQHDApC
+ZWRtaW5zdGVyMRIwEAYDVQQKDAlPcGVuRUNPTVAxEzARBgNVBAsMClNpbXBsZURl
+bW8xKTAnBgNVBAMMIGFhaS5hcGkuc2ltcGxlZGVtby5vcGVuZWNvbXAub3JnMTQw
+MgYJKoZIhvcNAQkBFiVhYWktaG9zdEBhcGkuc2ltcGxlZGVtby5vcGVuZWNvbXAu
+b3JnMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAwQrQl8A0rT0Jjlos
+Mr/7LEhT5UOif4GGPOk+3NCIxT3lOqAbUf+d9ZXyT2jWFRiKWua03vQ+Dxc8c2h2
+RRuH8LwEiOiWqPjWRxNqsARzZMI3ryHFCFBZh0FcpjH9kEeKVlLDYuV68k+ZucKd
+NiqUNn61lD7kbmEGwvzKwf91FrJ09+CBMx1OnWKm3gCNKDqAEFMZCOdn2MgesJYB
+/03lzPBS1jDfBXImXRcTBzpgA+wdCLn0cIQ1eLWUwS5tUqUJNh36nHdVyJ0P2Yjd
+JLuxhFcmBKOz1ShyyO+BBtKBO8EGbU6qKflOiwOw0Fsn8LjKcrHQ58NPui5y04BU
+Rypf3QIDAQABo4GdMIGaMAwGA1UdEwEB/wQCMAAwDgYDVR0PAQH/BAQDAgO4MB0G
+A1UdDgQWBBQyMUOsE2J+CKzK0qd8KFBD2gaWyjBbBgNVHSAEVDBSMFAGBFUdIAAw
+SDBGBggrBgEFBQcCAjA6GjhLZWVwIGF3YXkgZnJvbSBjaGlsZHJlbi4gIFRoaXMg
+Y2VydGlmaWNhdGUgaXMgbm90IGEgdG95LjANBgkqhkiG9w0BAQsFAAOCAQEAnkoy
+2tWJOyyyIQwtVojUxv1GWQPnw3WCUcKpuX4CJhHXLxNErW1fBg7bmo08BNmBPPpq
+WrJsy5lbBgUo9kgpViux5Stfy1rRIRsRLfl/icgCvJmUAxkmRCZL7yUvwG4K7s+8
+DwT+nW/XuWNP6Hd/qHccexB6COJ8KwvTdVoxAkCdX8qw4MCb/f7Kb1yle/vwBM5Q
+UUONCJ4bEns1vnb9DGlNDUJNwCfwORAaVJpVS38Mv4UnSTmb2KMePtCWcx/dNsYR
+2XrSGqLDnTvHwOpyhbfFTmackysGoSuDytORXy8YbwEiF13BwEK8i3rgNN0Z2ojf
+cpmE2xxmaa+A2uuN6g==
+-----END CERTIFICATE-----
+Bag Attributes
+ friendlyName: root
+ 2.16.840.1.113894.746875.1.1: <Unsupported tag 6>
+subject=/C=US/ST=NJ/L=Bedminster/O=OpenECOMP/OU=simpledemo/CN=OpenECOMP simpledemo Server CA X1/emailAddress=simpledemo@openecomp.org
+issuer=/C=US/ST=NJ/L=Bedminster/O=OpenECOMP/OU=simpledemo/CN=OpenECOMP simpledemo Root Certification Authority/emailAddress=simpledemo@openecomp.org
+-----BEGIN CERTIFICATE-----
+MIIFpTCCA42gAwIBAgIJAJqx8dKnCZZoMA0GCSqGSIb3DQEBCwUAMIG9MQswCQYD
+VQQGEwJVUzELMAkGA1UECAwCTkoxEzARBgNVBAcMCkJlZG1pbnN0ZXIxEjAQBgNV
+BAoMCU9wZW5FQ09NUDETMBEGA1UECwwKc2ltcGxlZGVtbzE6MDgGA1UEAwwxT3Bl
+bkVDT01QIHNpbXBsZWRlbW8gUm9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTEn
+MCUGCSqGSIb3DQEJARYYc2ltcGxlZGVtb0BvcGVuZWNvbXAub3JnMB4XDTE2MTEy
+ODIxMTQyNloXDTIxMTEyNzIxMTQyNlowga0xCzAJBgNVBAYTAlVTMQswCQYDVQQI
+DAJOSjETMBEGA1UEBwwKQmVkbWluc3RlcjESMBAGA1UECgwJT3BlbkVDT01QMRMw
+EQYDVQQLDApzaW1wbGVkZW1vMSowKAYDVQQDDCFPcGVuRUNPTVAgc2ltcGxlZGVt
+byBTZXJ2ZXIgQ0EgWDExJzAlBgkqhkiG9w0BCQEWGHNpbXBsZWRlbW9Ab3BlbmVj
+b21wLm9yZzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALr4rivKQuRk
+YNf5Ig40e1nqj6s6LB1vgMOYbKfRziOFpPcUpsHPOhusHowiUsrU1vdFSzPz6Ej7
+PjlmNSg2Qka8YCn9kd6QgM7U0KcPJvIucBp+qjifH3EvP0jgDPhDeVRYxzV454dv
+5kQ9uCpswJP7YAnX51dkWeH8nwPUoagt31bOl9LXENSrgxEThxdLYMJnQJWk2CmV
+otXM4tT1dxyJxFUrZ6uJCEAYw5VtlplqihHf8lHy+sWQavtsLz/4dc+sGeXSTfoI
+voKvoh3uZ5gEhGV8yfJxk1veX5y5/AxP80vQ+smWYjTnQL5QQ57y4bciez4XVBmQ
+SWimWtOi4e8CAwEAAaOBtTCBsjAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQE
+AwIBhjAdBgNVHQ4EFgQUTqdsYgGNGubdJHq9tsaJhM9HE5wwcAYDVR0gBGkwZzBl
+BgRVHSAAMF0wWwYIKwYBBQUHAgIwTxpNSWYgeW91IHRydXN0IHRoaXMgY2VydCB0
+aGVuIHdlIGhhdmUgYSBicmlkZ2UgdGhhdCB5b3UgbWlnaHQgYmUgaW50ZXJlc3Rl
+ZCBpbi4wDQYJKoZIhvcNAQELBQADggIBAKNNlRqFuE/JgV1BHyYK0xoSXH4aZP/7
+IoHtDVcSaZAOOuFOUrwVMUbzRBebbb6RpFwt/X+NLFUGysd+XNLF7W7lzxKtmFNX
+n4OpNkBe0y5O7yurus8rERHzu3jiOSgVo+WzDlGpYSRnG3hI2qPWqD+Puzx/WwI8
+XUTuzEQQ3gUSyVFfXHpay3VpYmLZiLJ9WKY5SDw7Ie6Sxrju4Qm1HwnFY8wHZGcs
+2KMQzorJ1ZNQf523yUTghbT0rKaSFaD8zugPtI2ONfFG/QgrkQXo78opzPsHnHwa
+SxGSiAgeLbwAUCvPNl27zr6k6+7TcNjV0VUivAs0OG3VEAdgi7UWYB+30KfWwHwE
+zGmvd4IAGqIqlqLcSVArN5z8JK1B5nfjQn5UrclU1vK+dnuiKE2X4rKuBTRYRFR/
+km+mj4koYFPKFHndmJl1uv2OCJK9l5CSIuKWeI1qv8BASKqgNdoT/SKBXqxgYlCb
+o+j4IDjxrxChRO+e5vl9lA7INfRrbljCkUjfLRa+v2q9tWQ3+EQUwwnSrSfihh2T
+j0Tksr6b8dDsvMlCdOKG1B+JPcEXORSFKNXVTEfjqpJG8s16kFAocWt3S6xO0k1t
+qbQp+3tWQgW2TGnX0rMZzB6NGRNfWhlYmq2zHgXkiCIZ26Ztgt/LNbwEvN3+VlLo
+z/Rd+SKtlrfb
+-----END CERTIFICATE-----
+Bag Attributes
+ friendlyName: aaiopenecomp
+ localKeyID: 54 69 6D 65 20 31 34 39 35 35 31 32 38 30 33 36 34 39
+Key Attributes: <No Attributes>
+-----BEGIN PRIVATE KEY-----
+MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDBCtCXwDStPQmO
+Wiwyv/ssSFPlQ6J/gYY86T7c0IjFPeU6oBtR/531lfJPaNYVGIpa5rTe9D4PFzxz
+aHZFG4fwvASI6Jao+NZHE2qwBHNkwjevIcUIUFmHQVymMf2QR4pWUsNi5XryT5m5
+wp02KpQ2frWUPuRuYQbC/MrB/3UWsnT34IEzHU6dYqbeAI0oOoAQUxkI52fYyB6w
+lgH/TeXM8FLWMN8FciZdFxMHOmAD7B0IufRwhDV4tZTBLm1SpQk2Hfqcd1XInQ/Z
+iN0ku7GEVyYEo7PVKHLI74EG0oE7wQZtTqop+U6LA7DQWyfwuMpysdDnw0+6LnLT
+gFRHKl/dAgMBAAECggEBAJko2HkeIW01mUhdWOXnFgR7WjzzXZEmlffr41lVBr7f
+rejGsQZs9cms73R7rCdOsi8PDoA6bqaQfADg571K659fvYVWbHqh+3im+iWvUlKm
+GYIVG/vNrEq43CZsUU7Qw/xba/QiOFraNxCATTV1sORPwgddouXEi5XW9ZPX9/FJ
+wORx4L/K0DfHX1rr+rtOoHCJdZYhn3Ij87kmR8Mwg0fNeWhHqtxUEyM/itRjCvOe
+mgt2V8DORhmq12L4+5QJctBrkBVRp9Rh6YSZZBGnKbTSgf4q648BdkJDLSK4cguT
+D6BAw3gxj5V4wt5W0wn2JpjadFwnixrTzvMP/yAqfK0CgYEA93nBAoUPw8nzQkwk
+8iWBjfJ999Rw92hnnvk3xbcQcGfgUYuB4dxwe6FQTmFIVylt81er1YUvMb3ao7fo
+5ZcGnI5p1idjsd27kbZJLxb5Oh919hKu5IfkfYsVgnC0UdKCTgH5CaH0U4ATuXwt
+RL6qm0XcLALs5y2OO6z3s+mYhisCgYEAx7EQ8MA45bkXnRQiHBhGcIIcr2sRDfVJ
+OhHmGxx3EcYgtsIYKTqtQOyIt/nQxo6iyNL9bzfzBTybFJLuj63ZG1Ef4LosJedl
+eAU2NsKv5MlKYDSdNbLAJ0Op9I2Xu/pXQecPwY/3MkIQArdQCLevMLEGywCsuJTn
+BjkJNDkb9hcCgYAhoFiaiAwJVYKJSqFmibQd3opBR4uGApi54DE021gPff3b9rHS
+R8q88cFgtRVISqfW/d2qaKtt/dcckdvCfo/2a99zqux/+ZoIBZXSITQCMs4rfoRn
+JxPj/ycQD1JhH9J22QvGxEvXoLqNZJMeGS5DZO2yyT75dpYyA6Gwv5fq+wKBgQC5
+AhV917lfLELyZurLOLpaFlHZO8rLMcusH1UfHRo7v2IjsGOOHyzRD9QDD1IcA55R
+jRj8Z8uhuGq9fvvC5gBVTK3KGPI6E85wifOWfH1V7CAaTeSxEXDxb8EQL/a6U89v
+4VE5tdYCHC6VNZzS1staw0hV38QmJt57Z3Bdj+OV9QKBgE/b9fxpo+SVQ37BzNNY
+SEKTTijaddz8fdomApg6a2eFJL93Ej/op7N7gnHtPWMivPnRRza9ZjfnG+aZ7n2J
+sWyBiZK9xliS2TsF3l3q9Z0Vaq3i1nOlV7Bd20ZS8KjQjDtKnIRfLkQDkvmXbU5L
+emwkdsQZbpPFJch3mCGtI7JW
+-----END PRIVATE KEY-----
diff --git a/lib/files/all-in-one b/lib/files/all-in-one
new file mode 100644
index 0000000..efdb2bf
--- /dev/null
+++ b/lib/files/all-in-one
@@ -0,0 +1,585 @@
+# These initial groups are the only groups required to be modified. The
+# additional groups are for more control of the environment.
+[control]
+localhost ansible_connection=local
+
+[network]
+localhost ansible_connection=local
+
+[compute]
+localhost ansible_connection=local
+
+[storage]
+localhost ansible_connection=local
+
+[monitoring]
+localhost ansible_connection=local
+
+[deployment]
+localhost ansible_connection=local
+
+# You can explicitly specify which hosts run each project by updating the
+# groups in the sections below. Common services are grouped together.
+[chrony-server:children]
+haproxy
+
+[chrony:children]
+network
+compute
+storage
+monitoring
+
+[collectd:children]
+compute
+
+[baremetal:children]
+control
+
+[grafana:children]
+monitoring
+
+[etcd:children]
+control
+compute
+
+[karbor:children]
+control
+
+[kibana:children]
+control
+
+[telegraf:children]
+compute
+control
+monitoring
+network
+storage
+
+[elasticsearch:children]
+control
+
+[haproxy:children]
+network
+
+[hyperv]
+#hyperv_host
+
+[hyperv:vars]
+#ansible_user=user
+#ansible_password=password
+#ansible_port=5986
+#ansible_connection=winrm
+#ansible_winrm_server_cert_validation=ignore
+
+[mariadb:children]
+control
+
+[rabbitmq:children]
+control
+
+[outward-rabbitmq:children]
+control
+
+[qdrouterd:children]
+control
+
+[mongodb:children]
+control
+
+[keystone:children]
+control
+
+[glance:children]
+control
+
+[nova:children]
+control
+
+[neutron:children]
+network
+
+[openvswitch:children]
+network
+compute
+manila-share
+
+[opendaylight:children]
+network
+
+[cinder:children]
+control
+
+[cloudkitty:children]
+control
+
+[freezer:children]
+control
+
+[memcached:children]
+control
+
+[horizon:children]
+control
+
+[swift:children]
+control
+
+[barbican:children]
+control
+
+[heat:children]
+control
+
+[murano:children]
+control
+
+[ceph:children]
+control
+
+[ironic:children]
+control
+
+[influxdb:children]
+monitoring
+
+[magnum:children]
+control
+
+[sahara:children]
+control
+
+[solum:children]
+control
+
+[mistral:children]
+control
+
+[manila:children]
+control
+
+[panko:children]
+control
+
+[gnocchi:children]
+control
+
+[ceilometer:children]
+control
+
+[aodh:children]
+control
+
+[congress:children]
+control
+
+[tacker:children]
+control
+
+# Tempest
+[tempest:children]
+control
+
+[senlin:children]
+control
+
+[vmtp:children]
+control
+
+[trove:children]
+control
+
+[watcher:children]
+control
+
+[rally:children]
+control
+
+[searchlight:children]
+control
+
+[octavia:children]
+control
+
+[designate:children]
+control
+
+[placement:children]
+control
+
+[bifrost:children]
+deployment
+
+[zun:children]
+control
+
+[skydive:children]
+monitoring
+
+[redis:children]
+control
+
+# Additional control implemented here. These groups allow you to control which
+# services run on which hosts at a per-service level.
+#
+# Word of caution: Some services are required to run on the same host to
+# function appropriately. For example, neutron-metadata-agent must run on the
+# same host as the l3-agent and (depending on configuration) the dhcp-agent.
+
+# Glance
+[glance-api:children]
+glance
+
+[glance-registry:children]
+glance
+
+# Nova
+[nova-api:children]
+nova
+
+[nova-conductor:children]
+nova
+
+[nova-consoleauth:children]
+nova
+
+[nova-novncproxy:children]
+nova
+
+[nova-scheduler:children]
+nova
+
+[nova-spicehtml5proxy:children]
+nova
+
+[nova-compute-ironic:children]
+nova
+
+[nova-serialproxy:children]
+nova
+
+# Neutron
+[neutron-server:children]
+control
+
+[neutron-dhcp-agent:children]
+neutron
+
+[neutron-l3-agent:children]
+neutron
+
+[neutron-lbaas-agent:children]
+neutron
+
+[neutron-metadata-agent:children]
+neutron
+
+[neutron-vpnaas-agent:children]
+neutron
+
+[neutron-bgp-dragent:children]
+neutron
+
+# Ceph
+[ceph-mon:children]
+ceph
+
+[ceph-rgw:children]
+ceph
+
+[ceph-osd:children]
+storage
+
+# Cinder
+[cinder-api:children]
+cinder
+
+[cinder-backup:children]
+storage
+
+[cinder-scheduler:children]
+cinder
+
+[cinder-volume:children]
+storage
+
+# Cloudkitty
+[cloudkitty-api:children]
+cloudkitty
+
+[cloudkitty-processor:children]
+cloudkitty
+
+# Freezer
+[freezer-api:children]
+freezer
+
+# iSCSI
+[iscsid:children]
+compute
+storage
+ironic-conductor
+
+[tgtd:children]
+storage
+
+# Karbor
+[karbor-api:children]
+karbor
+
+[karbor-protection:children]
+karbor
+
+[karbor-operationengine:children]
+karbor
+
+# Manila
+[manila-api:children]
+manila
+
+[manila-scheduler:children]
+manila
+
+[manila-share:children]
+network
+
+[manila-data:children]
+manila
+
+# Swift
+[swift-proxy-server:children]
+swift
+
+[swift-account-server:children]
+storage
+
+[swift-container-server:children]
+storage
+
+[swift-object-server:children]
+storage
+
+# Barbican
+[barbican-api:children]
+barbican
+
+[barbican-keystone-listener:children]
+barbican
+
+[barbican-worker:children]
+barbican
+
+# Trove
+[trove-api:children]
+trove
+
+[trove-conductor:children]
+trove
+
+[trove-taskmanager:children]
+trove
+
+# Heat
+[heat-api:children]
+heat
+
+[heat-api-cfn:children]
+heat
+
+[heat-engine:children]
+heat
+
+# Murano
+[murano-api:children]
+murano
+
+[murano-engine:children]
+murano
+
+# Ironic
+[ironic-api:children]
+ironic
+
+[ironic-conductor:children]
+ironic
+
+[ironic-inspector:children]
+ironic
+
+[ironic-pxe:children]
+ironic
+
+# Magnum
+[magnum-api:children]
+magnum
+
+[magnum-conductor:children]
+magnum
+
+# Solum
+[solum-api:children]
+solum
+
+[solum-worker:children]
+solum
+
+[solum-deployer:children]
+solum
+
+[solum-conductor:children]
+solum
+
+# Mistral
+[mistral-api:children]
+mistral
+
+[mistral-executor:children]
+mistral
+
+[mistral-engine:children]
+mistral
+
+# Aodh
+[aodh-api:children]
+aodh
+
+[aodh-evaluator:children]
+aodh
+
+[aodh-listener:children]
+aodh
+
+[aodh-notifier:children]
+aodh
+
+# Panko
+[panko-api:children]
+panko
+
+# Gnocchi
+[gnocchi-api:children]
+gnocchi
+
+[gnocchi-statsd:children]
+gnocchi
+
+[gnocchi-metricd:children]
+gnocchi
+
+# Sahara
+[sahara-api:children]
+sahara
+
+[sahara-engine:children]
+sahara
+
+# Ceilometer
+[ceilometer-api:children]
+ceilometer
+
+[ceilometer-central:children]
+ceilometer
+
+[ceilometer-notification:children]
+ceilometer
+
+[ceilometer-collector:children]
+ceilometer
+
+[ceilometer-compute:children]
+compute
+
+# Congress
+[congress-api:children]
+congress
+
+[congress-datasource:children]
+congress
+
+[congress-policy-engine:children]
+congress
+
+# Multipathd
+[multipathd:children]
+compute
+
+# Watcher
+[watcher-api:children]
+watcher
+
+[watcher-engine:children]
+watcher
+
+[watcher-applier:children]
+watcher
+
+# Senlin
+[senlin-api:children]
+senlin
+
+[senlin-engine:children]
+senlin
+
+# Searchlight
+[searchlight-api:children]
+searchlight
+
+[searchlight-listener:children]
+searchlight
+
+# Octavia
+[octavia-api:children]
+octavia
+
+[octavia-health-manager:children]
+octavia
+
+[octavia-housekeeping:children]
+octavia
+
+[octavia-worker:children]
+octavia
+
+# Designate
+[designate-api:children]
+designate
+
+[designate-central:children]
+designate
+
+[designate-mdns:children]
+network
+
+[designate-worker:children]
+designate
+
+[designate-sink:children]
+designate
+
+[designate-backend-bind9:children]
+designate
+
+# Placement
+[placement-api:children]
+placement
+
+# Zun
+[zun-api:children]
+zun
+
+[zun-compute:children]
+compute
+
+# Skydive
+[skydive-analyzer:children]
+skydive
+
+[skydive-agent:children]
+compute
+network
+
+# Tacker
+[tacker-server:children]
+tacker
+
+[tacker-conductor:children]
+tacker
diff --git a/lib/files/globals.yml b/lib/files/globals.yml
new file mode 100644
index 0000000..d10cc3d
--- /dev/null
+++ b/lib/files/globals.yml
@@ -0,0 +1,2 @@
+---
+openstack_release: "master"
diff --git a/lib/files/haproxy.cfg b/lib/files/haproxy.cfg
new file mode 100644
index 0000000..ac4b754
--- /dev/null
+++ b/lib/files/haproxy.cfg
@@ -0,0 +1,120 @@
+global
+ log /dev/log local0
+ stats socket /usr/local/etc/haproxy/haproxy.socket mode 660 level admin
+ stats timeout 30s
+ user root
+ group root
+ daemon
+ #################################
+ # Default SSL material locations#
+ #################################
+ ca-base /etc/ssl/certs
+ crt-base /etc/ssl/private
+
+ # Default ciphers to use on SSL-enabled listening sockets.
+ # For more information, see ciphers(1SSL). This list is from:
+ # https://hynek.me/articles/hardening-your-web-servers-ssl-ciphers/
+ # An alternative list with additional directives can be obtained from
+ # https://mozilla.github.io/server-side-tls/ssl-config-generator/?server=haproxy
+ tune.ssl.default-dh-param 2048
+
+defaults
+ log global
+ mode http
+ option httplog
+# option dontlognull
+# errorfile 400 /etc/haproxy/errors/400.http
+# errorfile 403 /etc/haproxy/errors/403.http
+# errorfile 408 /etc/haproxy/errors/408.http
+# errorfile 500 /etc/haproxy/errors/500.http
+# errorfile 502 /etc/haproxy/errors/502.http
+# errorfile 503 /etc/haproxy/errors/503.http
+# errorfile 504 /etc/haproxy/errors/504.http
+
+ option http-server-close
+ option forwardfor except 127.0.0.1
+ retries 6
+ option redispatch
+ maxconn 50000
+ timeout connect 50000
+ timeout client 480000
+ timeout server 480000
+ timeout http-keep-alive 30000
+
+
+frontend IST_8443
+ mode http
+ bind 0.0.0.0:8443 name https ssl crt /etc/ssl/private/aai.pem
+# log-format %ci:%cp\ [%t]\ %ft\ %b/%s\ %Tq/%Tw/%Tc/%Tr/%Tt\ %ST\ %B\ %CC\ %CS\ %tsc\ %ac/%fc/%bc/%sc/%rc\ %sq/%bq\ %hr\ %hs\ {%[ssl_c_verify],%{+Q}[ssl_c_s_dn],%{+Q}[ssl_c_i_dn]}\ %{+Q}r
+ log-format "%ci:%cp [%tr] %ft %b/%s %TR/%Tw/%Tc/%Tr/%Ta %ST %B %CC \ %CS %tsc %ac/%fc/%bc/%sc/%rc %sq/%bq %hr %hs %{+Q}r"
+ option httplog
+ log global
+ option logasap
+ option forwardfor
+ capture request header Host len 100
+ capture response header Host len 100
+ option log-separate-errors
+ option forwardfor
+ http-request set-header X-Forwarded-Proto https if { ssl_fc }
+ http-request set-header X-AAI-Client-SSL TRUE if { ssl_c_used }
+ http-request set-header X-AAI-SSL %[ssl_fc]
+ http-request set-header X-AAI-SSL-Client-Verify %[ssl_c_verify]
+ http-request set-header X-AAI-SSL-Client-DN %{+Q}[ssl_c_s_dn]
+ http-request set-header X-AAI-SSL-Client-CN %{+Q}[ssl_c_s_dn(cn)]
+ http-request set-header X-AAI-SSL-Issuer %{+Q}[ssl_c_i_dn]
+ http-request set-header X-AAI-SSL-Client-NotBefore %{+Q}[ssl_c_notbefore]
+ http-request set-header X-AAI-SSL-Client-NotAfter %{+Q}[ssl_c_notafter]
+ http-request set-header X-AAI-SSL-ClientCert-Base64 %{+Q}[ssl_c_der,base64]
+ http-request set-header X-AAI-SSL-Client-OU %{+Q}[ssl_c_s_dn(OU)]
+ http-request set-header X-AAI-SSL-Client-L %{+Q}[ssl_c_s_dn(L)]
+ http-request set-header X-AAI-SSL-Client-ST %{+Q}[ssl_c_s_dn(ST)]
+ http-request set-header X-AAI-SSL-Client-C %{+Q}[ssl_c_s_dn(C)]
+ http-request set-header X-AAI-SSL-Client-O %{+Q}[ssl_c_s_dn(O)]
+ reqadd X-Forwarded-Proto:\ https
+ reqadd X-Forwarded-Port:\ 8443
+
+#######################
+#ACLS FOR PORT 8446####
+#######################
+
+ acl is_Port_8446_generic path_reg -i ^/aai/v[0-9]+/search/generic-query$
+ acl is_Port_8446_nodes path_reg -i ^/aai/v[0-9]+/search/nodes-query$
+ acl is_Port_8446_version path_reg -i ^/aai/v[0-9]+/query$
+ acl is_named-query path_beg -i /aai/search/named-query
+ acl is_search-model path_beg -i /aai/search/model
+ use_backend IST_AAI_8446 if is_Port_8446_generic or is_Port_8446_nodes or is_Port_8446_version or is_named-query or is_search-model
+
+ default_backend IST_Default_8447
+
+
+#######################
+#DEFAULT BACKEND 847###
+#######################
+
+backend IST_Default_8447
+ balance roundrobin
+ http-request set-header X-Forwarded-Port %[src_port]
+ http-response set-header Strict-Transport-Security max-age=16000000;\ includeSubDomains;\ preload;
+ server aai aai:8447 port 8447 ssl verify none
+
+#######################
+# BACKEND 8446#########
+#######################
+
+backend IST_AAI_8446
+ balance roundrobin
+ http-request set-header X-Forwarded-Port %[src_port]
+ http-response set-header Strict-Transport-Security max-age=16000000;\ includeSubDomains;\ preload;
+ server aai aai:8446 port 8446 ssl verify none
+
+listen IST_AAI_STATS
+ mode http
+ bind *:8080
+ stats uri /stats
+ stats enable
+ stats refresh 30s
+ stats hide-version
+ stats auth admin:admin
+ stats show-legends
+ stats show-desc IST AAI APPLICATION NODES
+ stats admin if TRUE
diff --git a/lib/files/kolla-build.conf b/lib/files/kolla-build.conf
new file mode 100644
index 0000000..8dd14e6
--- /dev/null
+++ b/lib/files/kolla-build.conf
@@ -0,0 +1,5 @@
+[DEFAULT]
+base = ubuntu
+profile = main
+
+[profiles]
diff --git a/lib/files/kubectl_config_generator.py b/lib/files/kubectl_config_generator.py
new file mode 100644
index 0000000..6b5a6e9
--- /dev/null
+++ b/lib/files/kubectl_config_generator.py
@@ -0,0 +1,40 @@
+import requests
+import os
+import base64
+
+RANCHER_URL = str(os.environ['RANCHER_URL'])
+RANCHER_ENVIRONMENT_ID = str(os.environ['RANCHER_ENVIRONMENT'])
+data = requests.post(RANCHER_URL + '/v1/projects/' + RANCHER_ENVIRONMENT_ID + '/apikeys',
+ {"accountId": RANCHER_ENVIRONMENT_ID,
+ "description": "ONAP on Kubernetes",
+ "name": "ONAP on Kubernetes",
+ "publicValue": "string",
+ "secretValue": "password"})
+json_dct = data.json()
+access_key = json_dct['publicValue']
+secret_key = json_dct['secretValue']
+auth_header = 'Basic ' + base64.b64encode(access_key + ':' + secret_key)
+token = "\"" + str(base64.b64encode(auth_header)) + "\""
+dct = \
+"""
+apiVersion: v1
+kind: Config
+clusters:
+- cluster:
+ api-version: v1
+ insecure-skip-tls-verify: true
+ server: "{}/r/projects/{}/kubernetes:6443"
+ name: "onap_on_kubernetes"
+contexts:
+- context:
+ cluster: "onap_on_kubernetes"
+ user: "onap_on_kubernetes"
+ name: "onap_on_kubernetes"
+current-context: "onap_on_kubernetes"
+users:
+- name: "onap_on_kubernetes"
+ user:
+ token: {}
+""".format(RANCHER_URL, RANCHER_ENVIRONMENT_ID, token)
+with open("config", "w") as file:
+ file.write(dct)
diff --git a/lib/files/passwords.yml b/lib/files/passwords.yml
new file mode 100644
index 0000000..f376e31
--- /dev/null
+++ b/lib/files/passwords.yml
@@ -0,0 +1,216 @@
+---
+###################
+# Ceph options
+####################
+# These options must be UUID4 values in string format
+# XXXXXXXX-XXXX-4XXX-XXXX-XXXXXXXXXXXX
+ceph_cluster_fsid:
+ceph_rgw_keystone_password:
+# for backward compatible consideration, rbd_secret_uuid is only used for nova,
+# cinder_rbd_secret_uuid is used for cinder
+rbd_secret_uuid:
+cinder_rbd_secret_uuid:
+
+###################
+# Database options
+####################
+database_password:
+
+####################
+# Docker options
+####################
+# This should only be set if you require a password for your Docker registry
+docker_registry_password:
+
+######################
+# OpenDaylight options
+######################
+opendaylight_password:
+
+####################
+# OpenStack options
+####################
+aodh_database_password:
+aodh_keystone_password:
+
+barbican_database_password:
+barbican_keystone_password:
+barbican_p11_password:
+barbican_crypto_key:
+
+keystone_admin_password:
+keystone_database_password:
+
+grafana_database_password:
+grafana_admin_password:
+
+glance_database_password:
+glance_keystone_password:
+
+gnocchi_database_password:
+gnocchi_keystone_password:
+
+karbor_database_password:
+karbor_keystone_password:
+karbor_openstack_infra_id:
+
+kuryr_keystone_password:
+
+nova_database_password:
+nova_api_database_password:
+nova_keystone_password:
+
+placement_keystone_password:
+
+neutron_database_password:
+neutron_keystone_password:
+metadata_secret:
+
+cinder_database_password:
+cinder_keystone_password:
+
+cloudkitty_database_password:
+cloudkitty_keystone_password:
+
+panko_database_password:
+panko_keystone_password:
+
+freezer_database_password:
+freezer_keystone_password:
+
+sahara_database_password:
+sahara_keystone_password:
+
+designate_database_password:
+designate_pool_manager_database_password:
+designate_keystone_password:
+# This option must be UUID4 value in string format
+designate_pool_id:
+# This option must be HMAC-MD5 value in string format
+designate_rndc_key:
+
+swift_keystone_password:
+swift_hash_path_suffix:
+swift_hash_path_prefix:
+
+heat_database_password:
+heat_keystone_password:
+heat_domain_admin_password:
+
+murano_database_password:
+murano_keystone_password:
+murano_agent_rabbitmq_password:
+
+ironic_database_password:
+ironic_keystone_password:
+
+ironic_inspector_database_password:
+ironic_inspector_keystone_password:
+
+magnum_database_password:
+magnum_keystone_password:
+
+mistral_database_password:
+mistral_keystone_password:
+
+trove_database_password:
+trove_keystone_password:
+
+ceilometer_database_password:
+ceilometer_keystone_password:
+
+watcher_database_password:
+watcher_keystone_password:
+
+congress_database_password:
+congress_keystone_password:
+
+rally_database_password:
+
+senlin_database_password:
+senlin_keystone_password:
+
+solum_database_password:
+solum_keystone_password:
+
+horizon_secret_key:
+horizon_database_password:
+
+telemetry_secret_key:
+
+manila_database_password:
+manila_keystone_password:
+
+octavia_database_password:
+octavia_keystone_password:
+octavia_ca_password:
+
+searchlight_keystone_password:
+
+tacker_database_password:
+tacker_keystone_password:
+
+zun_database_password:
+zun_keystone_password:
+
+memcache_secret_key:
+
+#HMAC secret key
+osprofiler_secret:
+
+nova_ssh_key:
+ private_key:
+ public_key:
+
+kolla_ssh_key:
+ private_key:
+ public_key:
+
+keystone_ssh_key:
+ private_key:
+ public_key:
+
+bifrost_ssh_key:
+ private_key:
+ public_key:
+
+####################
+# Gnocchi options
+####################
+gnocchi_project_id:
+gnocchi_resource_id:
+gnocchi_user_id:
+
+####################
+# Qdrouterd options
+####################
+qdrouterd_password:
+
+####################
+# RabbitMQ options
+####################
+rabbitmq_password:
+rabbitmq_cluster_cookie:
+outward_rabbitmq_password:
+outward_rabbitmq_cluster_cookie:
+
+####################
+# HAProxy options
+####################
+haproxy_password:
+keepalived_password:
+
+####################
+# Kibana options
+####################
+kibana_password:
+
+####################
+# etcd options
+####################
+etcd_cluster_token:
+
+####################
+# redis options
+####################
+redis_master_password:
diff --git a/lib/files/settings.xml b/lib/files/settings.xml
new file mode 100644
index 0000000..862a3e8
--- /dev/null
+++ b/lib/files/settings.xml
@@ -0,0 +1,369 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<settings xmlns="http://maven.apache.org/SETTINGS/1.0.0"
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+ xsi:schemaLocation="http://maven.apache.org/SETTINGS/1.0.0 http://maven.apache.org/xsd/settings-1.0.0.xsd">
+ <!-- offline | Determines whether maven should attempt to connect to the
+ network when executing a build. | This will have an effect on artifact downloads,
+ artifact deployment, and others. | | Default: false <offline>false</offline> -->
+ <!-- proxies | This is a list of proxies which can be used on this machine
+ to connect to the network. | Unless otherwise specified (by system property
+ or command-line switch), the first proxy | specification in this list marked
+ as active will be used. | -->
+%PROXIES_START%
+%PROXIES% %HTTP_PROXY%
+%PROXIES% %HTTPS_PROXY%
+%PROXIES_END%
+ <!-- mirrors | This is a list of mirrors to be used in downloading artifacts
+ from remote repositories. | | It works like this: a POM may declare a repository
+ to use in resolving certain artifacts. | However, this repository may have
+ problems with heavy traffic at times, so people have mirrored | it to several
+ places. | | That repository definition will have a unique id, so we can create
+ a mirror reference for that | repository, to be used as an alternate download
+ site. The mirror site will be the preferred | server for that repository.
+ | -->
+ <!-- profiles | This is a list of profiles which can be activated in a variety
+ of ways, and which can modify | the build process. Profiles provided in the
+ settings.xml are intended to provide local machine- | specific paths and
+ repository locations which allow the build to work in the local environment.
+ | | For example, if you have an integration testing plugin - like cactus
+ - that needs to know where | your Tomcat instance is installed, you can provide
+ a variable here such that the variable is | dereferenced during the build
+ process to configure the cactus plugin. | | As noted above, profiles can
+ be activated in a variety of ways. One way - the activeProfiles | section
+ of this document (settings.xml) - will be discussed later. Another way essentially
+ | relies on the detection of a system property, either matching a particular
+ value for the property, | or merely testing its existence. Profiles can also
+ be activated by JDK version prefix, where a | value of '1.4' might activate
+ a profile when the build is executed on a JDK version of '1.4.2_07'. | Finally,
+ the list of active profiles can be specified directly from the command line.
+ | | NOTE: For profiles defined in the settings.xml, you are restricted to
+ specifying only artifact | repositories, plugin repositories, and free-form
+ properties to be used as configuration | variables for plugins in the POM.
+ | | -->
+ <profiles>
+ <profile>
+ <id>00_maven</id>
+ <repositories>
+ <repository>
+ <id>00_maven</id>
+ <url>https://maven.restlet.com</url>
+ </repository>
+ </repositories>
+ </profile>
+ <profile>
+ <id>10_nexus</id>
+ <repositories>
+ <repository>
+ <id>10_nexus</id>
+ <url>http://repo.maven.apache.org/maven2/</url>
+ <releases>
+ <enabled>true</enabled>
+ </releases>
+ <snapshots>
+ <enabled>true</enabled>
+ </snapshots>
+ </repository>
+ </repositories>
+ <pluginRepositories>
+ <pluginRepository>
+ <id>10_nexus</id>
+ <url>http://repo.maven.apache.org/maven2/</url>
+ <releases>
+ <enabled>true</enabled>
+ </releases>
+ <snapshots>
+ <enabled>true</enabled>
+ </snapshots>
+ </pluginRepository>
+ </pluginRepositories>
+ </profile>
+ <profile>
+ <id>20_openecomp-public</id>
+ <repositories>
+ <repository>
+ <id>20_openecomp-public</id>
+ <name>20_openecomp-public</name>
+ <url>https://nexus.onap.org/content/repositories/public/</url>
+ <releases>
+ <enabled>true</enabled>
+ <updatePolicy>daily</updatePolicy>
+ </releases>
+ <snapshots>
+ <enabled>false</enabled>
+ </snapshots>
+ </repository>
+ </repositories>
+ <pluginRepositories>
+ <pluginRepository>
+ <id>20_openecomp-public</id>
+ <name>20_openecomp-public</name>
+ <url>https://nexus.onap.org/content/repositories/public/</url>
+ <releases>
+ <enabled>true</enabled>
+ <updatePolicy>daily</updatePolicy>
+ </releases>
+ <snapshots>
+ <enabled>false</enabled>
+ </snapshots>
+ </pluginRepository>
+ </pluginRepositories>
+ </profile>
+ <profile>
+ <id>30_openecomp-staging</id>
+ <repositories>
+ <repository>
+ <id>30_openecomp-staging</id>
+ <name>30_openecomp-staging</name>
+ <url>https://nexus.onap.org/content/repositories/staging/</url>
+ <releases>
+ <enabled>true</enabled>
+ <updatePolicy>daily</updatePolicy>
+ </releases>
+ <snapshots>
+ <enabled>false</enabled>
+ </snapshots>
+ </repository>
+ </repositories>
+ <pluginRepositories>
+ <pluginRepository>
+ <id>30_openecomp-staging</id>
+ <name>30_openecomp-staging</name>
+ <url>https://nexus.onap.org/content/repositories/staging/</url>
+ <releases>
+ <enabled>true</enabled>
+ <updatePolicy>daily</updatePolicy>
+ </releases>
+ <snapshots>
+ <enabled>false</enabled>
+ </snapshots>
+ </pluginRepository>
+ </pluginRepositories>
+ </profile>
+ <profile>
+ <id>40_openecomp-release</id>
+ <repositories>
+ <repository>
+ <id>40_openecomp-release</id>
+ <name>40_openecomp-release</name>
+ <url>https://nexus.onap.org/content/repositories/releases/</url>
+ <releases>
+ <enabled>true</enabled>
+ <updatePolicy>daily</updatePolicy>
+ </releases>
+ <snapshots>
+ <enabled>false</enabled>
+ </snapshots>
+ </repository>
+ </repositories>
+ <pluginRepositories>
+ <pluginRepository>
+ <id>40_openecomp-release</id>
+ <name>40_openecomp-release</name>
+ <url>https://nexus.onap.org/content/repositories/releases/</url>
+ <releases>
+ <enabled>true</enabled>
+ <updatePolicy>daily</updatePolicy>
+ </releases>
+ <snapshots>
+ <enabled>false</enabled>
+ </snapshots>
+ </pluginRepository>
+ </pluginRepositories>
+ </profile>
+ <profile>
+ <id>50_openecomp-snapshots</id>
+ <repositories>
+ <repository>
+ <id>50_openecomp-snapshot</id>
+ <name>50_openecomp-snapshot</name>
+ <url>https://nexus.onap.org/content/repositories/snapshots/</url>
+ <releases>
+ <enabled>false</enabled>
+ </releases>
+ <snapshots>
+ <enabled>true</enabled>
+ </snapshots>
+ </repository>
+ </repositories>
+ <pluginRepositories>
+ <pluginRepository>
+ <id>50_openecomp-snapshot</id>
+ <name>50_openecomp-snapshot</name>
+ <url>https://nexus.onap.org/content/repositories/snapshots/</url>
+ <releases>
+ <enabled>false</enabled>
+ </releases>
+ <snapshots>
+ <enabled>true</enabled>
+ </snapshots>
+ </pluginRepository>
+ </pluginRepositories>
+ </profile>
+ <profile>
+ <id>60_opendaylight-release</id>
+ <repositories>
+ <repository>
+ <id>60_opendaylight-mirror</id>
+ <name>60_opendaylight-mirror</name>
+ <url>https://nexus.opendaylight.org/content/repositories/public/</url>
+ <releases>
+ <enabled>true</enabled>
+ <updatePolicy>daily</updatePolicy>
+ </releases>
+ <snapshots>
+ <enabled>false</enabled>
+ </snapshots>
+ </repository>
+ </repositories>
+ <pluginRepositories>
+ <pluginRepository>
+ <id>60_opendaylight-mirror</id>
+ <name>60_opendaylight-mirror</name>
+ <url>https://nexus.opendaylight.org/content/repositories/public/</url>
+ <releases>
+ <enabled>true</enabled>
+ <updatePolicy>daily</updatePolicy>
+ </releases>
+ <snapshots>
+ <enabled>false</enabled>
+ </snapshots>
+ </pluginRepository>
+ </pluginRepositories>
+ </profile>
+ <profile>
+ <id>70_opendaylight-snapshots</id>
+ <repositories>
+ <repository>
+ <id>70_opendaylight-snapshot</id>
+ <name>70_opendaylight-snapshot</name>
+ <url>https://nexus.opendaylight.org/content/repositories/opendaylight.snapshot/</url>
+ <releases>
+ <enabled>false</enabled>
+ </releases>
+ <snapshots>
+ <enabled>true</enabled>
+ </snapshots>
+ </repository>
+ </repositories>
+ <pluginRepositories>
+ <pluginRepository>
+ <id>70_opendaylight-snapshot</id>
+ <name>70_opendaylight-snapshot</name>
+ <url>https://nexus.opendaylight.org/content/repositories/opendaylight.snapshot/</url>
+ <releases>
+ <enabled>false</enabled>
+ </releases>
+ <snapshots>
+ <enabled>true</enabled>
+ </snapshots>
+ </pluginRepository>
+ </pluginRepositories>
+ </profile>
+ <profile>
+ <id>80_onap</id>
+ <repositories>
+ <repository>
+ <id>onap-snapshots</id>
+ <name>onap-snapshots</name>
+ <url>https://nexus.onap.org/content/repositories/snapshots/</url>
+ <releases>
+ <enabled>false</enabled>
+ </releases>
+ <snapshots>
+ <enabled>true</enabled>
+ </snapshots>
+ </repository>
+ <repository>
+ <id>onap-staging</id>
+ <name>onap-staging</name>
+ <url>https://nexus.onap.org/content/repositories/staging/</url>
+ <releases>
+ <enabled>true</enabled>
+ </releases>
+ <snapshots>
+ <enabled>false</enabled>
+ </snapshots>
+ </repository>
+ <repository>
+ <id>onap-releases</id>
+ <name>onap-releases</name>
+ <url>https://nexus.onap.org/content/repositories/releases/</url>
+ <releases>
+ <enabled>true</enabled>
+ </releases>
+ <snapshots>
+ <enabled>false</enabled>
+ </snapshots>
+ </repository>
+ <repository>
+ <id>onap-public</id>
+ <name>onap-public</name>
+ <url>https://nexus.onap.org/content/repositories/public/</url>
+ <releases>
+ <enabled>true</enabled>
+ </releases>
+ <snapshots>
+ <enabled>false</enabled>
+ </snapshots>
+ </repository>
+ </repositories>
+ <pluginRepositories>
+ <pluginRepository>
+ <id>onap-snapshots</id>
+ <name>onap-snapshots</name>
+ <url>https://nexus.onap.org/content/repositories/snapshots/</url>
+ <releases>
+ <enabled>false</enabled>
+ </releases>
+ <snapshots>
+ <enabled>true</enabled>
+ </snapshots>
+ </pluginRepository>
+ <pluginRepository>
+ <id>onap-staging</id>
+ <name>onap-staging</name>
+ <url>https://nexus.onap.org/content/repositories/staging/</url>
+ <releases>
+ <enabled>true</enabled>
+ </releases>
+ <snapshots>
+ <enabled>false</enabled>
+ </snapshots>
+ </pluginRepository>
+ <pluginRepository>
+ <id>onap-releases</id>
+ <name>onap-releases</name>
+ <url>https://nexus.onap.org/content/repositories/releases/</url>
+ <releases>
+ <enabled>true</enabled>
+ </releases>
+ <snapshots>
+ <enabled>false</enabled>
+ </snapshots>
+ </pluginRepository>
+ <pluginRepository>
+ <id>onap-public</id>
+ <name>onap-public</name>
+ <url>https://nexus.onap.org/content/repositories/public/</url>
+ <releases>
+ <enabled>true</enabled>
+ </releases>
+ <snapshots>
+ <enabled>false</enabled>
+ </snapshots>
+ </pluginRepository>
+ </pluginRepositories>
+ </profile>
+ </profiles>
+ <activeProfiles>
+ <activeProfile>00_maven</activeProfile>
+ <activeProfile>10_nexus</activeProfile>
+ <activeProfile>20_openecomp-public</activeProfile>
+ <activeProfile>30_openecomp-staging</activeProfile>
+ <activeProfile>40_openecomp-release</activeProfile>
+ <activeProfile>50_openecomp-snapshots</activeProfile>
+ <activeProfile>60_opendaylight-release</activeProfile>
+ <activeProfile>70_opendaylight-snapshots</activeProfile>
+ <activeProfile>80_onap</activeProfile>
+ </activeProfiles>
+</settings>
diff --git a/lib/functions b/lib/functions
new file mode 100755
index 0000000..f40761f
--- /dev/null
+++ b/lib/functions
@@ -0,0 +1,450 @@
+#!/bin/bash
+
+source /var/onap/commons
+source /var/onap/config/env-vars
+source /var/onap/_composed_functions
+source /var/onap/_onap_functions
+
+export MTU=$(/sbin/ifconfig | grep MTU | sed 's/.*MTU://' | sed 's/ .*//' |sort -n | head -1)
+export NIC=$(ip route get 8.8.8.8 | awk '{ print $5; exit }')
+export IP_ADDRESS=$(ifconfig $NIC | grep "inet addr" | tr -s ' ' | cut -d' ' -f3 | cut -d':' -f2)
+
+mvn_conf_file=/root/.m2/settings.xml
+
+# configure_dns() - DNS/GW IP address configuration
+function configure_dns {
+ echo "nameserver 10.0.0.1" >> /etc/resolvconf/resolv.conf.d/head
+ resolvconf -u
+}
+
+# get_next_ip() - Function that provides the next ip
+function get_next_ip {
+ local ip=${1:-$IP_ADDRESS}
+ ip_hex=$(printf '%.2X%.2X%.2X%.2X\n' `echo $ip | sed -e 's/\./ /g'`)
+ next_ip_hex=$(printf %.8X `echo $(( 0x$ip_hex + 1 ))`)
+ echo $(printf '%d.%d.%d.%d\n' `echo $next_ip_hex | sed -r 's/(..)/0x\1 /g'`)
+}
+
+# _git_timed() - git can sometimes get itself infinitely stuck with transient network
+# errors or other issues with the remote end. This wraps git in a
+# timeout/retry loop and is intended to watch over non-local git
+# processes that might hang.
+function _git_timed {
+ local count=0
+ local timeout=0
+
+ install_package git
+ until timeout -s SIGINT ${timeout} git "$@"; do
+ # 124 is timeout(1)'s special return code when it reached the
+ # timeout; otherwise assume fatal failure
+ if [[ $? -ne 124 ]]; then
+ exit 1
+ fi
+
+ count=$(($count + 1))
+ if [ $count -eq 3 ]; then
+ exit 1
+ fi
+ sleep 5
+ done
+}
+
+# clone_repo() - Clone Git repository into specific folder
+function clone_repo {
+ local repo_url=${3:-"https://git.onap.org/"}
+ local repo=$1
+ local dest_folder=${2:-$git_src_folder/$repo}
+ if [ ! -d $dest_folder ]; then
+ if [[ "$debug" == "False" ]]; then
+ _git_timed clone --quiet ${repo_url}${repo} $dest_folder
+ else
+ _git_timed clone ${repo_url}${repo} $dest_folder
+ fi
+ fi
+}
+
+# clone_repos() - Function that clones source repositories for a given project
+function clone_repos {
+ local project=$1
+ local repo_name=${2:-$project}
+
+ for repo in ${repos[$project]}; do
+ clone_repo $repo ${src_folders[$project]}${repo#*$repo_name}
+ done
+}
+
+# _install_bind() - Install bind utils
+function _install_bind {
+ install_packages bind9 bind9utils
+}
+
+# install_java() - Install java binaries
+function install_java {
+ if is_package_installed openjdk-8-jdk; then
+ return
+ fi
+ source /etc/os-release || source /usr/lib/os-release
+ case ${ID,,} in
+ *suse)
+ ;;
+ ubuntu|debian)
+ install_package software-properties-common
+ add-apt-repository -y ppa:openjdk-r/ppa
+ ;;
+ rhel|centos|fedora)
+ ;;
+ esac
+ update_repos
+
+ # Remove Java 7
+ uninstall_packages default-jre openjdk-7-jdk openjdk-7-jre openjdk-7-jre-headless
+
+ install_package openjdk-8-jdk
+ # ca-certificates-java is not a dependency in the Oracle JDK/JRE so this must be explicitly installed.
+ /var/lib/dpkg/info/ca-certificates-java.postinst configure
+}
+
+# install_maven() - Install maven binaries
+function install_maven {
+ if is_package_installed maven3; then
+ return
+ fi
+ install_java
+ source /etc/os-release || source /usr/lib/os-release
+ case ${ID,,} in
+ *suse)
+ ;;
+ ubuntu|debian)
+ install_package software-properties-common
+ add-apt-repository -y ppa:andrei-pozolotin/maven3
+ ;;
+ rhel|centos|fedora)
+ ;;
+ esac
+ update_repos
+ install_package maven3
+
+ # Remove Java 7
+ uninstall_package openjdk-7-jdk
+
+ _configure_maven
+}
+
+# _configure_docker_settings() - Configures Docker settings
+function _configure_docker_settings {
+ local docker_conf_backup=/tmp/docker.backup
+ local docker_conf=/etc/default/docker
+ local chameleonsocks_filename=chameleonsocks.sh
+ local max_concurrent_downloads=${1:-3}
+
+ cp ${docker_conf} ${docker_conf_backup}
+ if [ $http_proxy ]; then
+ echo "export http_proxy=$http_proxy" >> $docker_conf
+ fi
+ if [ $https_proxy ]; then
+ echo "export https_proxy=$https_proxy" >> $docker_conf
+ #If you have a socks proxy, then use that to connect to the nexus repo
+ #via a redsocks container
+ if [ $socks_proxy ]; then
+ wget https://raw.githubusercontent.com/crops/chameleonsocks/master/$chameleonsocks_filename
+ chmod 755 $chameleonsocks_filename
+ socks=$(echo $socks_proxy | sed -e "s/^.*\///" | sed -e "s/:.*$//")
+ port=$(echo $socks_proxy | sed -e "s/^.*://")
+ PROXY=$socks PORT=$port ./$chameleonsocks_filename --install
+ rm $chameleonsocks_filename
+ cp ${docker_conf_backup} ${docker_conf}
+ fi
+ fi
+ rm ${docker_conf_backup}
+
+ echo "DOCKER_OPTS=\"-H tcp://0.0.0.0:2375 -H unix:///var/run/docker.sock --max-concurrent-downloads $max_concurrent_downloads \"" >> $docker_conf
+ usermod -aG docker $USER
+
+ source /etc/os-release || source /usr/lib/os-release
+ case ${ID,,} in
+ *suse)
+ ;;
+ ubuntu|debian)
+ service docker restart
+ sleep 10
+ ;;
+ rhel|centos|fedora)
+ ;;
+ esac
+}
+
+# install_nodejs() - Download and install NodeJS
+function install_nodejs {
+ if is_package_installed nodejs; then
+ return
+ fi
+ curl -sL https://deb.nodesource.com/setup_6.x | sudo -E bash -
+ install_package nodejs
+
+ # Update NPM to latest version
+ npm install npm -g
+}
+
+# install_python() - Install Python 2.7 and other tools necessary for development.
+function install_python {
+ install_packages python2.7 python-dev
+}
+
+# _install_pip() - Install Python Package Manager
+function _install_pip {
+ install_python
+ if ! which pip; then
+ curl -sL https://bootstrap.pypa.io/get-pip.py | python
+ fi
+}
+
+# install_python_package() - Install python modules
+function install_python_package {
+ local python_packages=$@
+
+ _install_pip
+ pip install $python_packages
+}
+
+# install_python_requirements() - Install a list of python modules defined in requirement.txt file
+function install_python_requirements {
+ local python_project_path=$1
+
+ _install_pip
+ pushd $python_project_path
+ pip install -r requirements.txt
+ popd
+}
+
+# install_docker() - Download and install docker-engine
+function install_docker {
+ if $(docker version &>/dev/null); then
+ return
+ fi
+ source /etc/os-release || source /usr/lib/os-release
+ case ${ID,,} in
+ *suse)
+ ;;
+ ubuntu|debian)
+ install_packages software-properties-common linux-image-extra-$(uname -r) linux-image-extra-virtual apt-transport-https ca-certificates curl
+ curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
+ add-apt-repository \
+ "deb [arch=amd64] https://download.docker.com/linux/ubuntu \
+ $(lsb_release -cs) stable"
+ ;;
+ rhel|centos|fedora)
+ ;;
+ esac
+ update_repos
+
+ install_package docker-ce
+ _configure_docker_settings
+}
+
+# pull_docker_image() - Pull Docker container image from the Public Docker Registry Hub
+function pull_docker_image {
+ install_docker
+ local image=$1
+ local tag=$2
+ docker pull ${image}
+ if [ ${tag} ]; then
+ docker tag ${image} $tag
+ fi
+}
+
+# wait_docker_pull() - Function that waits for all docker pull processes
+function wait_docker_pull {
+ local counter=60
+ local delay=${1:-60}
+
+ sleep $delay
+ while [ $(ps -ef | grep "docker pull" | wc -l) -gt 1 ]; do
+ sleep $delay
+ counter=$((counter - 1))
+ if [ "$counter" -eq 0 ]; then
+ break
+ fi
+ done
+}
+
+# run_docker_image() - Starts a Docker instance
+function run_docker_image {
+ install_docker
+ docker run $@
+}
+
+# run_docker_compose() - Ensures that docker compose is installed and run it in background
+function run_docker_compose {
+ local folder=$1
+
+ install_docker_compose
+ pushd $folder
+ /opt/docker/docker-compose up -d
+ popd
+}
+
+# install_docker_compose() - Download and install docker-engine
+function install_docker_compose {
+ local docker_compose_version=${1:-1.12.0}
+ if [ ! -d /opt/docker ]; then
+ mkdir /opt/docker
+ curl -L https://github.com/docker/compose/releases/download/$docker_compose_version/docker-compose-`uname -s`-`uname -m` > /opt/docker/docker-compose
+ chmod +x /opt/docker/docker-compose
+ fi
+}
+
+# install_chefdk() - Install ChefDK package
+function install_chefdk {
+ local chefdk_version="2.4.17"
+
+ if is_package_installed chefdk; then
+ return
+ fi
+ pushd $(mktemp -d)
+ source /etc/os-release || source /usr/lib/os-release
+ case ${ID,,} in
+ *suse)
+ ;;
+ ubuntu|debian)
+ chefdk_pkg="chefdk_$chefdk_version-1_amd64.deb"
+ chefdk_url="https://packages.chef.io/files/stable/chefdk/$chefdk_version/ubuntu/$VERSION_ID/$chefdk_pkg"
+
+ wget $chefdk_url
+ dpkg -i $chefdk_pkg
+ apt-get install -f -y
+ ;;
+ rhel|centos|fedora)
+ rpm -Uvh "https://packages.chef.io/files/stable/chefdk/$chefdk_version/el/7/chefdk-$chefdk_version-1.el7.x86_64.rpm"
+ ;;
+ esac
+ popd
+}
+
+# _install_ODL() - Download and Install OpenDayLight SDN controller
+function _install_ODL {
+ if [ ! -d /opt/opendaylight/current ]; then
+ mkdir -p /opt/opendaylight/
+ wget "https://nexus.opendaylight.org/content/repositories/public/org/opendaylight/integration/distribution-karaf/"$odl_version"/distribution-karaf-"$odl_version".tar.gz" -P /opt/
+ tar xvf "/opt/distribution-karaf-"$odl_version".tar.gz" -C /tmp/
+ mv "/tmp/distribution-karaf-"$odl_version /opt/opendaylight/current
+ rm -rf "/opt/distribution-karaf-"$odl_version".tar.gz"
+ fi
+}
+
+# start_ODL() - Start OpenDayLight SDN controller
+function start_ODL {
+ _install_ODL
+ if [ -d /opt/opendaylight ]; then
+ export JAVA_HOME=/usr/lib/jvm/java-8-openjdk-amd64/jre
+ /opt/opendaylight/current/bin/start
+ sleep 180
+ /opt/opendaylight/current/bin/client feature:install odl-dlux-all
+ fi
+}
+
+# compile_src() - Function that compiles the java source code thru maven
+function compile_src {
+ local src_folder=$1
+ pushd $src_folder
+ local mvn_build='mvn clean install -DskipTests=true -Dmaven.test.skip=true -Dmaven.javadoc.skip=true -Dadditionalparam=-Xdoclint:none'
+ if [[ "$debug" == "False" ]]; then
+ mvn_build+=" -q"
+ fi
+ if [ -f pom.xml ]; then
+ install_maven
+ echo "Compiling $src_folder folder..."
+ eval $mvn_build
+ fi
+ popd
+}
+
+# compile_repos() - Function that compiles source repositories for a given project
+function compile_repos {
+ local project=$1
+
+ for repo in ${repos[$project]}; do
+ compile_src ${src_folders[$project]}${repo#*$project}
+ done
+}
+
+# build_docker_image() - Build Docker container image from source code
+function build_docker_image {
+ local src_folder=$1
+ local profile=$2
+ install_docker
+ pushd $src_folder
+
+ if [ -f pom.xml ]; then
+ install_maven
+ # Cleanup external repo
+ sed -i 's|${docker.push.registry}/||g' pom.xml
+ local docker_build="mvn clean package docker:build -DskipTests=true -Dmaven.test.skip=true -Dmaven.javadoc.skip=true"
+ if [ $profile ]; then
+ docker_build+=" -P $profile"
+ fi
+ if [[ "$debug" == "False" ]]; then
+ docker_build+=" -q"
+ fi
+ if [ $http_proxy ]; then
+ if ! grep -ql "docker.buildArg.http_proxy" pom.xml ; then
+ docker_build+=" -Ddocker.buildArg.http_proxy=$http_proxy"
+ fi
+ if ! grep -ql "docker.buildArg.HTTP_PROXY" pom.xml ; then
+ docker_build+=" -Ddocker.buildArg.HTTP_PROXY=$http_proxy"
+ fi
+ fi
+ if [ $https_proxy ]; then
+ if ! grep -ql "docker.buildArg.https_proxy" pom.xml ; then
+ docker_build+=" -Ddocker.buildArg.https_proxy=$https_proxy"
+ fi
+ if ! grep -ql "docker.buildArg.HTTPS_PROXY" pom.xml ; then
+ docker_build+=" -Ddocker.buildArg.HTTPS_PROXY=$https_proxy"
+ fi
+ fi
+ elif [ -f Dockerfile ]; then
+ # NOTE: Workaround for dmmapbc images
+ sed -i '/LocalKey/d' Dockerfile
+ sed -i "s/nexus3.onap.org\:10003\///g" Dockerfile
+ local docker_build="docker build -t $profile -f ./Dockerfile ."
+ if [ $http_proxy ]; then
+ docker_build+=" --build-arg http_proxy=$http_proxy"
+ docker_build+=" --build-arg HTTP_PROXY=$http_proxy"
+ fi
+ if [ $https_proxy ]; then
+ docker_build+=" --build-arg https_proxy=$https_proxy"
+ docker_build+=" --build-arg HTTPS_PROXY=$https_proxy"
+ fi
+ fi
+ echo $docker_build
+ eval $docker_build
+ popd
+}
+
+# mount_external_partition() - Create partition and mount the external volume
+function mount_external_partition {
+ local dev_name="/dev/$1"
+ local mount_dir=$2
+
+ sfdisk $dev_name << EOF
+;
+EOF
+ mkfs -t ext4 ${dev_name}1
+ mkdir -p $mount_dir
+ mount ${dev_name}1 $mount_dir
+ echo "${dev_name}1 $mount_dir ext4 errors=remount-ro,noatime,barrier=0 0 1" >> /etc/fstab
+}
+
+# add_no_proxy_value() - Add no_proxy values into environment file, used for internal IPs generated at deploy time
+function add_no_proxy_value {
+ if [[ `grep "no_proxy" /etc/environment` ]]; then
+ sed -i.bak "s/^no_proxy.*$/&,$1/" /etc/environment
+ else
+ echo "no_proxy=$1" >> /etc/environment
+ fi
+ if [[ `grep "NO_PROXY" /etc/environment` ]]; then
+ sed -i.bak "s/^NO_PROXY.*$/&,$1/" /etc/environment
+ else
+ echo "NO_PROXY=$1" >> /etc/environment
+ fi
+}
+
diff --git a/lib/mr b/lib/mr
new file mode 100755
index 0000000..bba7486
--- /dev/null
+++ b/lib/mr
@@ -0,0 +1,31 @@
+#!/bin/bash
+
+source /var/onap/functions
+
+# get_mr_images() - Function that retrieves the Message Router Docker images
+function get_mr_images {
+ pull_docker_image attos/dmaap
+ pull_docker_image wurstmeister/zookeeper
+}
+
+# install_message_router() - Downloads and configure message router source code
+function install_message_router {
+ install_docker_compose
+
+ pushd ${src_folders[mr]}
+ bash deploy.sh
+ popd
+}
+
+# init_mr() - Function that initialize Message Router services
+function init_mr {
+ if [[ "$clone_repo" == "True" ]]; then
+ clone_repo dcae/demo/startup/message-router ${src_folders[mr]}
+ fi
+ if [[ "$skip_get_images" == "False" ]]; then
+ get_mr_images
+ if [[ "$skip_install" == "False" ]]; then
+ install_message_router
+ fi
+ fi
+}
diff --git a/lib/msb b/lib/msb
new file mode 100755
index 0000000..bcf27fe
--- /dev/null
+++ b/lib/msb
@@ -0,0 +1,50 @@
+#!/bin/bash
+
+source /var/onap/functions
+
+# _build_msb_images() - Function that creates Microservices Docker images from source code
+function _build_msb_images {
+ if [[ "$compile_repo" != "True" ]]; then
+ compile_repos "msb"
+ fi
+
+ build_docker_image ${src_folders[msb]}/apigateway/distributions/msb-apigateway/src/main/basedocker onap/msb/msb_base
+ build_docker_image ${src_folders[msb]}/apigateway/distributions/msb-apigateway/src/main/docker onap/msb/msb_apigateway
+ build_docker_image ${src_folders[msb]}/discovery/distributions/msb-discovery/src/main/docker onap/msb/msb_discovery
+}
+
+# get_msb_images() - Function that retrieves the Microservices Bus images
+function get_msb_images {
+ pull_docker_image "consul:0.9.3"
+ if [[ "$build_image" == "True" ]]; then
+ _build_msb_images
+ else
+ unset docker_version
+ for image in base apigateway discovery; do
+ pull_onap_image msb/msb_$image
+ done
+ fi
+}
+
+# install_msb() - Downloads and configure Microservices Bus source code
+function install_msb {
+ run_docker_image -d --net=host --name msb_consul consul:0.9.3
+ run_docker_image -d --net=host --name msb_discovery nexus3.onap.org:10001/onap/msb/msb_discovery
+ run_docker_image -d --net=host -e "ROUTE_LABELS=visualRange:1" --name msb_internal_apigateway nexus3.onap.org:10001/onap/msb/msb_apigateway
+}
+
+# init_msb() - Function that initialize Message Router services
+function init_msb {
+ if [[ "$clone_repo" == "True" ]]; then
+ clone_repos "msb"
+ if [[ "$compile_repo" == "True" ]]; then
+ compile_repos "msb"
+ fi
+ fi
+ if [[ "$skip_get_images" == "False" ]]; then
+ get_msb_images
+ if [[ "$skip_install" == "False" ]]; then
+ install_msb
+ fi
+ fi
+}
diff --git a/lib/mso b/lib/mso
new file mode 100755
index 0000000..6dd0676
--- /dev/null
+++ b/lib/mso
@@ -0,0 +1,94 @@
+#!/bin/bash
+
+source /var/onap/functions
+
+# get_mso_images() - Function that retrieves or create MSO Docker images
+function get_mso_images {
+ if [[ "$build_image" == "True" ]]; then
+ export GIT_NO_PROJECT=/opt/
+ compile_src ${src_folders[mso]}
+ build_docker_image ${src_folders[mso]}/packages/docker docker
+ fi
+}
+
+# install_mso() - Install MSO Docker configuration project
+function install_mso {
+ MSO_ENCRYPTION_KEY=$(cat /opt/mso/docker-config/encryption.key)
+ echo -n "$openstack_api_key" | openssl aes-128-ecb -e -K $MSO_ENCRYPTION_KEY -nosalt | xxd -c 256 -p > /opt/config/api_key.txt
+
+ # Deployments in OpenStack require a keystone file
+ if [ -e /opt/config/keystone.txt ]; then
+ KEYSTONE_URL=$(cat /opt/config/keystone.txt)
+ DCP_CLLI="DEFAULT_KEYSTONE"
+ AUTH_TYPE="USERNAME_PASSWORD"
+ else
+ KEYSTONE_URL="https://identity.api.rackspacecloud.com/v2.0"
+ DCP_CLLI="RAX_KEYSTONE"
+ AUTH_TYPE="RACKSPACE_APIKEY"
+ fi
+
+ # Update the MSO configuration file.
+ read -d '' MSO_CONFIG_UPDATES <<-EOF
+{
+"default_attributes":
+ {
+ "asdc-connections":
+ {
+ "asdc-controller1":
+ {
+ "environmentName": "$dmaap_topic"
+ }
+ },
+ "mso-po-adapter-config":
+ {
+ "identity_services":
+ [
+ {
+ "dcp_clli": "$DCP_CLLI",
+ "identity_url": "$KEYSTONE_URL",
+ "mso_id": "$openstack_username",
+ "mso_pass": "$openstack_password",
+ "admin_tenant": "service",
+ "member_role": "admin",
+ "tenant_metadata": "true",
+ "identity_server_type": "KEYSTONE",
+ "identity_authentication_type": "$AUTH_TYPE"
+ }
+ ]
+ }
+ }
+}
+EOF
+ export MSO_CONFIG_UPDATES
+ export MSO_DOCKER_IMAGE_VERSION=$docker_version
+
+ install_docker
+ install_docker_compose
+ # Deploy the environment
+ pushd ${src_folders[mso]}/docker-config
+ chmod +x deploy.sh
+ if [[ "$build_image" == "True" ]]; then
+ bash deploy.sh
+ else
+ # This script takes in input 2 nexus repos (the first one for the MSO image, the second one for mariadb)
+ bash deploy.sh $nexus_docker_repo $nexus_username $nexus_password $nexus_docker_repo $nexus_username $nexus_password
+ fi
+ popd
+}
+
+# init_mso() - Function that initialize MSO services
+function init_mso {
+ if [[ "$clone_repo" == "True" ]]; then
+ clone_repos "mso"
+ if [[ "$compile_repo" == "True" ]]; then
+ compile_repos "mso"
+ fi
+ fi
+
+ if [[ "$skip_get_images" == "False" ]]; then
+ get_mso_images
+ if [[ "$skip_install" == "False" ]]; then
+ install_mso
+ fi
+ fi
+}
diff --git a/lib/multicloud b/lib/multicloud
new file mode 100755
index 0000000..ff6f970
--- /dev/null
+++ b/lib/multicloud
@@ -0,0 +1,51 @@
+#!/bin/bash
+
+source /var/onap/functions
+
+openstack_release="newton"
+
+# _build_multicloud_images() - Function that builds docker images from source code
+function _build_multicloud_images {
+ install_docker
+ pushd ${src_folders[multicloud]}/openstack/$openstack_release
+ install_python_requirements .
+ python setup.py develop
+ #bash build_image.sh
+ popd
+}
+
+# get_multicloud_images() -
+function get_multicloud_images {
+ if [[ "$build_image" == "True" ]]; then
+ _build_multicloud_images
+ else
+ pull_onap_image multicloud/openstack-$openstack_release
+ fi
+}
+
+# install_multicloud() -
+function install_multicloud {
+ #run_docker_compose ${src_folders[multicloud]}/openstack/$openstack_release
+ if [[ "$build_image" == "True" ]]; then
+ multicloud-api --port 9003 --host 0.0.0.0 &
+ else
+ docker_id=`docker images | grep onap/multicloud/openstack-$openstack_release | grep latest | awk '{print $3; exit}'`
+ docker run -d -p 0.0.0.0:9003:9003 $docker_id
+ fi
+}
+
+# init_multicloud() - Function that initialize Multi Cloud services
+function init_multicloud {
+ if [[ "$clone_repo" == "True" ]]; then
+ clone_repos "multicloud"
+ if [[ "$compile_repo" == "True" ]]; then
+ compile_repos "multicloud"
+ fi
+ fi
+ if [[ "$skip_get_images" == "False" ]]; then
+ get_multicloud_images
+ if [[ "$skip_install" == "False" ]]; then
+ install_multicloud
+ fi
+ fi
+}
diff --git a/lib/oom b/lib/oom
new file mode 100755
index 0000000..d52c029
--- /dev/null
+++ b/lib/oom
@@ -0,0 +1,207 @@
+#!/bin/bash
+
+source /var/onap/functions
+
+RANCHER_PORT=8880
+oom_delay=30
+export RANCHER_URL=http://localhost:$RANCHER_PORT
+export RANCHER_ACCESS_KEY='access_key'
+export RANCHER_SECRET_KEY='secret_key'
+
+# _install_docker() - Function that installs Docker version for Rancher
+function _install_docker {
+ if ! $(docker version &>/dev/null); then
+ curl https://releases.rancher.com/install-docker/1.12.sh | sh
+ _configure_docker_settings 15
+ fi
+}
+
+# _pull_rancher_images() - Function that retrieves Rancher images required for k8s
+function _pull_rancher_images {
+ for image in "net:v0.13.5" "k8s:v1.8.5-rancher3" \
+"lb-service-rancher:v0.7.17" "network-manager:v0.7.18" "metadata:v0.9.5" \
+"kubectld:v0.8.5" "kubernetes-agent:v0.6.6" "dns:v0.15.3" \
+"kubernetes-auth:v0.0.8" "healthcheck:v0.3.3" "etcd:v2.3.7-13" \
+"etc-host-updater:v0.0.3" "net:holder"; do
+ pull_docker_image rancher/$image &
+ done
+}
+
+# _pull_k8s_images() - Function that retrieves Google k8s images
+function _pull_k8s_images {
+ for image in "kubernetes-dashboard-amd64:v1.7.1" \
+"k8s-dns-sidecar-amd64:1.14.5" "k8s-dns-kube-dns-amd64:1.14.5" \
+"k8s-dns-dnsmasq-nanny-amd64:1.14.5" "heapster-influxdb-amd64:v1.3.3" \
+"heapster-grafana-amd64:v4.4.3" "heapster-amd64:v1.4.0" "pause-amd64:3.0"; do
+ pull_docker_image gcr.io/google_containers/$image &
+ done
+}
+
+# _install_rancher() - Function that installs Rancher CLI and container
+function _install_rancher {
+ local rancher_version=v0.6.5
+ local rancher_server_version=v1.6.10
+ local rancher_server=rancher/server:$rancher_server_version
+
+ if [ ! -d /opt/rancher/current ]; then
+ mkdir -p /opt/rancher/current
+ wget https://github.com/rancher/cli/releases/download/$rancher_version/rancher-linux-amd64-$rancher_version.tar.gz
+ tar -xzf rancher-linux-amd64-$rancher_version.tar.gz -C /tmp
+ mv /tmp/rancher-$rancher_version/rancher /opt/rancher/current/
+ fi
+
+ _install_docker
+ pull_docker_image $rancher_server
+ run_docker_image -d --restart=unless-stopped -p $RANCHER_PORT:8080 $rancher_server
+ while true; do
+ if curl --fail -X GET $RANCHER_URL; then
+ break
+ fi
+ echo "waiting for racher"
+ sleep $oom_delay
+ done
+}
+
+# _install_kubernetes() - Function that deploys kubernetes via RancherOS host registration
+function _install_kubernetes {
+ local rancher_agent_version=v1.2.7
+ local rancher_agent=rancher/agent:$rancher_agent_version
+
+ _install_rancher
+
+ _pull_rancher_images
+ _pull_k8s_images
+ pull_docker_image $rancher_agent
+ wait_docker_pull
+
+ pushd /opt/rancher/current/
+ export RANCHER_ENVIRONMENT=`./rancher env create -t kubernetes onap_on_kubernetes`
+ popd
+
+ install_python_package rancher-agent-registration
+ export no_proxy=$no_proxy,$IP_ADDRESS
+ rancher-agent-registration --host-ip $IP_ADDRESS --url http://$IP_ADDRESS:$RANCHER_PORT --environment $RANCHER_ENVIRONMENT --key $RANCHER_ACCESS_KEY --secret $RANCHER_SECRET_KEY
+}
+
+# _install_kubectl() - Function that installs kubectl as client for kubernetes
+function _install_kubectl {
+ if ! $(kubectl version &>/dev/null); then
+ rm -rf ~/.kube
+ curl -LO https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/amd64/kubectl
+ chmod +x ./kubectl
+ mv ./kubectl /usr/local/bin/kubectl
+ mkdir ~/.kube
+ pushd ~/.kube
+ python /var/onap/files/kubectl_config_generator.py
+ popd
+ fi
+}
+
+# _install_helm() - Function that install Kubernetes Package Manager
+function _install_helm {
+ local helm_version=v2.3.0
+
+ if ! $(helm version &>/dev/null); then
+ wget http://storage.googleapis.com/kubernetes-helm/helm-${helm_version}-linux-amd64.tar.gz
+ tar -zxvf helm-${helm_version}-linux-amd64.tar.gz -C /tmp
+ mv /tmp/linux-amd64/helm /usr/local/bin/helm
+ helm init
+ fi
+}
+
+# _pull_images_from_yaml() - Function that parses a yaml file and pull their images
+function _pull_images_from_yaml_file {
+ local values_file=$1
+ local prefix=$2
+ local s='[[:space:]]*'
+ local w='[a-zA-Z0-9_]*'
+ fs=`echo @|tr @ '\034'`
+
+ for line in $(sed -ne "s|^\($s\):|\1|" \
+-e "s|^\($s\)\($w\)$s:$s[\"']\(.*\)[\"']$s\$|\1$fs\2$fs\3|p" \
+-e "s|^\($s\)\($w\)$s:$s\(.*\)$s\$|\1$fs\2$fs\3|p" $values_file |
+awk -F$fs '{
+indent = length($1)/2;
+vname[indent] = $2;
+for (i in vname) {
+ if (i > indent) {
+ delete vname[i]}
+ }
+ if (length($3) > 0) {
+ vn=""; for (i=0; i<indent; i++) {vn=(vn)(vname[i])(".")}
+ printf("%s%s%s=%s\n", "'$prefix'",vn, $2, $3);
+ }
+}' | grep image); do
+ echo $line
+ if echo $line | grep -q Version ; then
+ pull_docker_image "$image_name:$(echo $line | awk -F "=" '{print $2}')" &
+ else
+ image_name=`echo ${line#*=}`
+ if [[ ${image_name#*${nexus_docker_repo:-nexus3.onap.org:10001}} == *:* ]]; then
+ pull_docker_image $image_name &
+ else
+ pull_docker_image $image_name:latest
+ fi
+ fi
+ done
+}
+
+# get_oom_images() - Function that retrieves ONAP images from official hub
+function get_oom_images {
+ if [[ "$build_image" == "True" ]]; then
+ # TODO(electrocucaracha): Create a function for calling the build docker function of every ONAP project
+ echo "Not Implemented"
+ else
+ if [[ "$clone_repo" != "True" ]]; then
+ clone_repos "oom"
+ fi
+
+ docker_openecomp_login
+ for values_file in `find ${src_folders[oom]}/kubernetes -name values.yaml -type f`; do
+ _pull_images_from_yaml_file $values_file
+ done
+ docker logout
+ wait_docker_pull
+ fi
+}
+
+# _install_oom() - Function that clones OOM and deploys ONAP
+function install_oom {
+ if [[ "$clone_repo" != "True" ]]; then
+ clone_repos "oom"
+ fi
+ pushd ${src_folders[oom]}/kubernetes/oneclick
+ source setenv.bash
+
+ pushd ${src_folders[oom]}/kubernetes/config
+ cp onap-parameters-sample.yaml onap-parameters.yaml
+ ./createConfig.sh -n onap
+ popd
+
+ for app in consul msb mso message-router sdnc vid robot portal policy appc aai sdc dcaegen2 log cli multicloud clamp vnfsdk uui aaf vfc kube2msb; do
+ ./createAll.bash -n onap -a $app
+ done
+ popd
+}
+
+# init_oom() - Function that deploys ONAP using OOM
+function init_oom {
+ mount_external_partition sda /var/lib/docker/
+ _install_kubernetes
+ _install_kubectl
+ _install_helm
+ if [[ "$clone_repo" == "True" ]]; then
+ clone_repos "oom"
+ fi
+
+ if [[ "$skip_get_images" == "False" ]]; then
+ get_oom_images
+ if [[ "$skip_install" == "False" ]]; then
+ until kubectl cluster-info; do
+ echo "waiting for kubernetes host"
+ sleep $oom_delay
+ done
+ install_oom
+ fi
+ fi
+}
diff --git a/lib/openstack b/lib/openstack
new file mode 100755
index 0000000..5e51890
--- /dev/null
+++ b/lib/openstack
@@ -0,0 +1,75 @@
+#!/bin/bash
+
+source /var/onap/functions
+
+kolla_config=/etc/kolla
+kolla_build=$kolla_config/kolla-build.conf
+kolla_passwords=$kolla_config/passwords.yml
+kolla_globals=$kolla_config/globals.yml
+kolla_inventory=/var/onap/files/all-in-one
+
+# install_dependencies() - Function that installs Kolla-Ansible requirements
+function install_dependencies {
+ install_docker
+
+ mkdir -p /etc/systemd/system/docker.service.d
+ tee /etc/systemd/system/docker.service.d/kolla.conf <<-'EOF'
+[Service]
+MountFlags=shared
+EOF
+ systemctl daemon-reload
+ systemctl restart docker
+
+ install_python_package ansible docker kolla-ansible python-openstackclient
+}
+
+# configure_deploy() - Function that modifies configuration files
+function configure_deploy {
+ local network_id=$1
+ local enable_opendaylight=${2-False}
+ local openstack_services="main = ceilometer,cinder,glance,heat,horizon,isci,keystone,neutron,nova-,swift"
+ nic=$(ip route get $network_id | awk '{ print $4; exit }')
+ ip_address=$(ip route get $network_id | awk '{ print $6; exit }')
+ internal_vip_address=$(get_next_ip $ip_address)
+
+ if [[ `env | grep -i "proxy"` ]]; then
+ add_no_proxy_value $internal_vip_address
+ fi
+
+ mkdir -p $kolla_config
+ cp /var/onap/files/globals.yml $kolla_globals
+ cp /var/onap/files/passwords.yml $kolla_passwords
+ cp /var/onap/files/kolla-build.conf $kolla_build
+ kolla-genpwd
+ echo "network_interface: \"$nic\"" >> $kolla_globals
+ echo "kolla_internal_vip_address: \"$internal_vip_address\"" >> $kolla_globals
+ echo "api_interface: \"{{ network_interface }}\"" >> $kolla_globals
+ if [[ $enable_opendaylight == True ]]; then
+ echo "enable_opendaylight: \"yes\"" >> $kolla_globals
+ openstack_services+=",opendaylight"
+ fi
+ echo $openstack_services >> $kolla_build
+
+ echo "$ip_address $(hostname)" >> /etc/hosts
+}
+
+# get_openstack_images() - Function that retrieves or builds docker images
+function get_openstack_images {
+ if [[ "$build_image" == "True" ]]; then
+ install_python_package kolla
+ kolla-build --config-file $kolla_build
+ else
+ kolla-ansible pull -i $kolla_inventory
+ fi
+}
+
+# deploy_openstack() - Function that provisions an OpenStack deployment
+function deploy_openstack {
+ install_dependencies
+ configure_deploy ${1:-"192.168.53.0"} "True"
+
+ get_openstack_images
+ kolla-ansible deploy -i $kolla_inventory
+ kolla-ansible post-deploy
+ echo "source /etc/kolla/admin-openrc.sh" >> ${HOME}/.bashrc
+}
diff --git a/lib/policy b/lib/policy
new file mode 100755
index 0000000..1e633be
--- /dev/null
+++ b/lib/policy
@@ -0,0 +1,53 @@
+#!/bin/bash
+
+source /var/onap/functions
+
+# _build_policy_images() - Function that build Policy docker images from source code
+function _build_policy_images {
+ compile_src ${src_folders[policy]}/docker
+ pushd ${src_folders[policy]}/docker
+ install_maven
+ mvn prepare-package
+ cp -r target/policy-pe/* policy-pe/
+ cp -r target/policy-drools/* policy-drools
+ install_docker
+ bash docker_verify.sh
+ popd
+}
+
+# get_policy_images() - Function that retrieves Policy docker images
+function get_policy_images {
+ if [[ "$build_image" == "True" ]]; then
+ _build_policy_images
+ else
+ for image in db pe drools nexus; do
+ pull_onap_image policy/policy-$image onap/policy/policy-$image:latest
+ done
+ fi
+}
+
+# install_policy() - Function that clones and installs the Policy services from source code
+function install_policy {
+ pushd ${src_folders[policy]}/docker
+ chmod +x config/drools/drools-tweaks.sh
+ echo $IP_ADDRESS > config/pe/ip_addr.txt
+ run_docker_compose .
+ popd
+}
+
+# init_policy() - Function that initialize Policy services
+function init_policy {
+ if [[ "$clone_repo" == "True" ]]; then
+ clone_repos "policy"
+ if [[ "$compile_repo" == "True" ]]; then
+ compile_repos "policy"
+ fi
+ fi
+
+ if [[ "$skip_get_images" == "False" ]]; then
+ get_policy_images
+ if [[ "$skip_install" == "False" ]]; then
+ install_policy
+ fi
+ fi
+}
diff --git a/lib/portal b/lib/portal
new file mode 100755
index 0000000..fe54698
--- /dev/null
+++ b/lib/portal
@@ -0,0 +1,98 @@
+#!/bin/bash
+
+source /var/onap/functions
+
+# clone_all_portal_repos() - Function that clones Portal source repo.
+function clone_all_portal_repos {
+ for repo in ${repos[portal]}; do
+ if [[ "$repo" == "ui/dmaapbc" ]];then
+ prefix="ui"
+ else
+ prefix="portal"
+ fi
+ clone_repo $repo ${src_folders[portal]}/${repo#*$prefix}
+ done
+}
+
+# compile_all_portal_repos() - Function that compiles Portal source repo.
+function compile_all_portal_repos {
+ for repo in ${repos[portal]}; do
+ if [[ "$repo" == "ui/dmaapbc" ]];then
+ prefix="ui"
+ else
+ prefix="portal"
+ fi
+ compile_src ${src_folders[portal]}/${repo#*$prefix}
+ done
+}
+
+# _build_portal_images() - Function that builds Portal Docker images from source code
+function _build_portal_images {
+ install_maven
+
+ pushd ${src_folders[portal]}/deliveries
+ chmod +x *.sh
+ export MVN=$(which mvn)
+ export GLOBAL_SETTINGS_FILE=/usr/share/maven3/conf/settings.xml
+ export SETTINGS_FILE=$HOME/.m2/settings.xml
+ bash build_portalapps_dockers.sh
+ popd
+}
+
+# get_portal_images() - Function to get Portal images.
+function get_portal_images {
+ if [[ "$build_image" == "True" ]]; then
+ _build_portal_images
+ else
+ pull_openecomp_image portaldb ecompdb:portal
+ pull_openecomp_image portalapps ep:1610-1
+ fi
+ pull_docker_image mariadb
+}
+
+# _install_mariadb() - Pull and create a MariaDB container
+function _install_mariadb {
+ docker create --name data_vol_portal -v /var/lib/mysql mariadb
+}
+
+# install_portal() - Function that installs the source code of Portal
+function install_portal {
+ install_docker
+ docker rm -f ecompdb_portal
+ docker rm -f 1610-1
+
+ pushd ${src_folders[portal]}/deliveries
+ mkdir -p /PROJECT/OpenSource/UbuntuEP/logs
+ install_package unzip
+ unzip -o etc.zip -d /PROJECT/OpenSource/UbuntuEP/
+
+ _install_mariadb
+ install_docker_compose
+ bash portal_vm_init.sh
+
+ sleep 180
+
+ if [ ! -e /opt/config/boot.txt ]; then
+ install_package mysql-client
+ mysql -u root -p'Aa123456' -h $IP_ADDRESS < Apps_Users_OnBoarding_Script.sql
+ echo "yes" > /opt/config/boot.txt
+ fi
+ popd
+}
+
+# init_portal() - Function that initialize Portal services
+function init_portal {
+ if [[ "$clone_repo" == "True" ]]; then
+ clone_all_portal_repos
+ if [[ "$compile_repo" == "True" ]]; then
+ compile_all_portal_repos
+ fi
+ fi
+
+ if [[ "$skip_get_images" == "False" ]]; then
+ get_portal_images
+ if [[ "$skip_install" == "False" ]]; then
+ install_portal
+ fi
+ fi
+}
diff --git a/lib/robot b/lib/robot
new file mode 100755
index 0000000..70f8cf7
--- /dev/null
+++ b/lib/robot
@@ -0,0 +1,45 @@
+#!/bin/bash
+
+source /var/onap/functions
+
+# _setup_ete_folder() - Create and copy ete folder structure
+function _setup_ete_folder {
+ mkdir -p /opt/eteshare/config
+
+ cp ${src_folders[robot]}/integration_* /opt/eteshare/config
+ cp ${src_folders[robot]}/vm_config2robot.sh /opt/eteshare/config
+ cp ${src_folders[robot]}/ete.sh /opt
+ cp ${src_folders[robot]}/demo.sh /opt
+
+ chmod +x /opt/ete.sh
+ chmod +x /opt/demo.sh
+}
+
+# get_robot_images() - Pull or build the Robot Docker images
+function get_robot_images {
+ pull_openecomp_image testsuite
+}
+
+# install_robot() - Run Robot services
+function install_robot {
+ docker rm -f openecompete_container
+ run_docker_image -d --name openecompete_container -v /opt/eteshare:/share -p 88:88 $nexus_docker_repo/openecomp/testsuite:$docker_version
+}
+
+# init_robot() - Function that initialize Robot services
+function init_robot {
+ if [[ "$clone_repo" == "True" ]]; then
+ clone_repos "robot" "testsuite"
+ _setup_ete_folder
+ if [[ "$compile_repo" == "True" ]]; then
+ compile_repos "robot"
+ fi
+ fi
+
+ if [[ "$skip_get_images" == "False" ]]; then
+ get_robot_images
+ if [[ "$skip_install" == "False" ]]; then
+ install_robot
+ fi
+ fi
+}
diff --git a/lib/sdc b/lib/sdc
new file mode 100755
index 0000000..71a5fea
--- /dev/null
+++ b/lib/sdc
@@ -0,0 +1,88 @@
+#!/bin/bash
+
+source /var/onap/functions
+
+# _init_data_folders() - Function that initialize the data folders
+function _init_data_folders {
+ mkdir -p /data/environments
+ mkdir -p /data/scripts
+ mkdir -p /data/logs/BE
+ mkdir -p /data/logs/FE
+ chmod 777 /data
+ chmod 777 /data/logs
+}
+
+# _setup_docker_aliases() - Function that setups the aliases required by sdc scripts
+function _setup_docker_aliases {
+ cat <<EOL > /root/.bash_aliases
+alias dcls='/data/scripts/docker_clean.sh \$1'
+alias dlog='/data/scripts/docker_login.sh \$1'
+alias drun='/data/scripts/docker_run.sh'
+alias health='/data/scripts/docker_health.sh'
+EOL
+}
+
+# get_sdc_images() - Function that retrieves the SDC docker images
+function get_sdc_images {
+ build_docker_image ${src_folders[sdc]}/sdc-docker-base
+ build_docker_image ${src_folders[sdc]}/utils/webseal-simulator docker
+ if [[ "$build_image" == "True" ]]; then
+ compile_src ${src_folders[sdc]}
+ for project in catalog-fe test-apis-ci; do
+ compile_src ${src_folders[sdc]}/$project
+ done
+ build_docker_image ${src_folders[sdc]}/sdc-os-chef docker
+ else
+ for image in elasticsearch init-elasticsearch cassandra kibana backend frontend sanity; do
+ pull_onap_image sdc-$image &
+ done
+ wait_docker_pull
+ fi
+}
+
+# install_sdc() - Function that pull templates and executes
+function install_sdc {
+ local ENV_NAME=$dmaap_topic
+ local MR_IP_ADDR='10.0.11.1'
+
+ pushd ${src_folders[sdc]}/utils/webseal-simulator
+ bash scripts/simulator_docker_run.sh
+ popd
+
+ _init_data_folders
+
+ cp ${src_folders[sdc]}/sdc-os-chef/scripts/{docker_run.sh,docker_health.sh,docker_login.sh,docker_clean.sh,simulator_docker_run.sh} /data/scripts
+ chmod +x /data/scripts/*.sh
+
+ cat ${src_folders[sdc]}/sdc-os-chef/environments/Template.json | sed "s/yyy/"$IP_ADDRESS"/g" > /data/environments/$ENV_NAME.json
+ sed -i "s/xxx/"$ENV_NAME"/g" /data/environments/$ENV_NAME.json
+ sed -i "s/\"ueb_url_list\":.*/\"ueb_url_list\": \""$MR_IP_ADDR","$MR_IP_ADDR"\",/g" /data/environments/$ENV_NAME.json
+ sed -i "s/\"fqdn\":.*/\"fqdn\": [\""$MR_IP_ADDR"\", \""$MR_IP_ADDR"\"]/g" /data/environments/$ENV_NAME.json
+
+ install_docker
+ if [[ "$skip_get_images" == "False" ]]; then
+ bash /data/scripts/docker_run.sh -e $ENV_NAME -l
+ else
+ bash /data/scripts/docker_run.sh -e $ENV_NAME -r $docker_version -p $(echo $nexus_docker_repo | cut -d':' -f2)
+ fi
+ install_chefdk
+}
+
+# init_sdc() - Function that initialize SDC services
+function init_sdc {
+ mount_external_partition sdb /data/
+ if [[ "$clone_repo" == "True" ]]; then
+ clone_repos "sdc"
+ if [[ "$compile_repo" == "True" ]]; then
+ compile_repos "sdc"
+ fi
+ fi
+
+ if [[ "$skip_get_images" == "False" ]]; then
+ get_sdc_images
+ fi
+ if [[ "$skip_install" == "False" ]]; then
+ install_sdc
+ fi
+ _setup_docker_aliases
+}
diff --git a/lib/sdnc b/lib/sdnc
new file mode 100755
index 0000000..8dacf9e
--- /dev/null
+++ b/lib/sdnc
@@ -0,0 +1,64 @@
+#!/bin/bash
+
+source /var/onap/functions
+source /var/onap/ccsdk
+
+# compile_all_sdnc_repos() - Function that compiles SDNC source repo.
+function compile_all_sdnc_repos {
+ for repo in ${repos[sdnc]}; do
+ if [[ "$repo" == "sdnc/core" ]]; then
+ compile_src ${src_folders[sdnc]}/core/rootpom
+ fi
+ compile_src ${src_folders[sdnc]}${repo#*sdnc}
+ done
+}
+
+# _build_sdnc_images() - Builds SDNC images from source code
+function _build_sdnc_images {
+ local folder=${src_folders[sdnc]}/oam
+
+ get_ccsdk_images
+ install_package unzip
+ # The OAM code depends on all the SDNC repos which should be downloaded and compiled first
+ if [[ "$compile_repo" != "True" ]]; then
+ compile_src $folder
+ fi
+ for dirc in ubuntu sdnc admportal dgbuilder; do
+ build_docker_image $folder/installation/$dirc
+ done
+}
+
+# get_sdnc_images() - Build or retrieve necessary images
+function get_sdnc_images {
+ if [[ "$build_image" == "True" ]]; then
+ _build_sdnc_images
+ else
+ for image in sdnc-image admportal-sdnc-image dgbuilder-sdnc-image; do
+ pull_openecomp_image $image openecomp/$image:latest
+ done
+ fi
+ pull_docker_image mysql/mysql-server:5.6
+}
+
+# install_sdnc() - Download and install SDNC services from source code
+function install_sdnc {
+ run_docker_compose ${src_folders[sdnc]}/oam/installation/src/main/yaml
+}
+
+# init_sdnc() - Function that initialize SDNC services
+function init_sdnc {
+ if [[ "$clone_repo" == "True" ]]; then
+ clone_repos "sdnc"
+ if [[ "$compile_repo" == "True" ]]; then
+ compile_all_sdnc_repos
+ fi
+ fi
+
+ if [[ "$skip_get_images" == "False" ]]; then
+ get_sdnc_images
+ if [[ "$skip_install" == "False" ]]; then
+ start_ODL
+ install_sdnc
+ fi
+ fi
+}
diff --git a/lib/vfc b/lib/vfc
new file mode 100755
index 0000000..64f7df0
--- /dev/null
+++ b/lib/vfc
@@ -0,0 +1,96 @@
+#!/bin/bash
+
+source /var/onap/functions
+
+# compile_all_vfc_repos() - Function that compiles VF-C source repo.
+function compile_all_vfc_repos {
+ install_python_package tox
+
+ tox_repos=("gvnfm/vnflcm/lcm" "gvnfm/vnfmgr/mgr" "gvnfm/vnfres/res" "nfvo/lcm" \
+ "nfvo/driver/vnfm/gvnfm/gvnfmadapter" "nfvo/driver/vnfm/svnfm/zte/vmanager")
+ for dirc in ${tox_repos[@]}; do
+ pushd ${src_folders[vfc]}/$dirc
+ tox -e py27
+ popd
+ done
+
+ # TODO(sshank): Add compile for other vfc_repos. (Java based.)
+
+ # Java based:
+ # nfvo/catalog
+ # nfvo/driver/ems/ems/sems/boco/ems-driver
+ # nfvo/driver/sfc/zte/sfc-driver
+ # nfvo/driver/vnfm/gvnfm/juju/juju-vnfmadapter
+ # nfvo/driver/vnfm/svnfm/huawei/vnfmadapter
+ # nfvo/resmanagement
+ # nfvo/wfengine
+}
+
+# _build_vfc_image() - Build VFC docker image
+function _build_vfc_image {
+ pushd ${src_folders[vfc]}/$1/docker
+ sed -i "s/^push_image/#push_image/g" build_image.sh
+ sed -i 's|IMAGE_NAME="${DOCKER_REPOSITORY}/${ORG}/${PROJECT}/${IMAGE}"|IMAGE_NAME=${ORG}/${IMAGE}|g' build_image.sh
+ ./build_image.sh
+ popd
+}
+
+# get_vfc_images() - Build VFC docker images
+function get_vfc_images {
+ if [[ "$build_image" == "True" ]]; then
+ install_docker
+ # Separate methods are required since the image build process will change.
+ _build_vfc_image gvnfm/vnflcm/lcm onap/nslcm
+ _build_vfc_image gvnfm/vnfmgr/mgr onap/gvnfmdriver
+ _build_vfc_image gvnfm/vnfres/res onap/vnfres
+ _build_vfc_image nfvo/lcm onap/vnflcm
+ _build_vfc_image nfvo/driver/vnfm/gvnfm/gvnfmadapter
+
+ build_gvnfm_lcm_image
+ build_gvnfm_vnfmgr_image
+ build_gvnfm_vnfres_image
+ build_nfvo_lcm_image
+ build_nfvo_vnfm_gvnfmadapter_image
+ # TODO(sshank): Add other VFC component docker image builds.
+ else
+ for image in gvnfm/vnflcm/lcm gvnfm/vnfmgr/mgr gvnfm/vnfres/res nfvo/lcm nfvo/driver/vnfm/gvnfm/gvnfmadapter; do
+ pull_onap_image vfc/$image
+ done
+ fi
+}
+
+# install_vfc() - Download and install vfc service from source code
+function install_vfc {
+ nslcm_image=`docker images | grep nslcm | grep latest| awk '{print $1 ":" $2}'`
+ vnflcm_image=`docker images | grep vnflcm | grep latest| awk '{print $1 ":" $2}'`
+ vnfmgr_image=`docker images | grep vnfmgr | grep latest| awk '{print $1 ":" $2}'`
+ vnfres_image=`docker images | grep vnfres | grep latest| awk '{print $1 ":" $2}'`
+ gvnfmdriver_image=`docker images | grep gvnfmdriver | grep latest| awk '{print $1 ":" $2}'`
+
+ run_docker_image -d --name vfc-nslcm -p 8403:8403 -e MSB_ADDR=127.0.0.1 $nslcm_image
+ run_docker_image -d --name vfc-vnflcm -p 8801:8801 -e MSB_ADDR=127.0.0.1 $vnflcm_image
+ run_docker_image -d --name vfc-vnfmgr -p 8803:8803 -e MSB_ADDR=127.0.0.1 $vnfmgr_image
+ run_docker_image -d --name vfc-vnfres -p 8802:8802 -e MSB_ADDR=127.0.0.1 $vnfres_image
+ run_docker_image -d --name vfc-gvnfmdriver -p 8484:8484 -e MSB_ADDR=127.0.0.1 $gvnfmdriver_image
+
+ # TODO(sshank): Run other VFC component docker images.
+}
+
+# init_vfc() - Function that initialize VF-C services
+function init_vfc {
+ install_package libmysqlclient-dev
+
+ if [[ "$clone_repo" == "True" ]]; then
+ clone_repos "vfc"
+ if [[ "$compile_repo" == "True" ]]; then
+ compile_all_vfc_repos
+ fi
+ fi
+
+ if [[ "$skip_get_images" == "False" ]]; then
+ get_vfc_images
+ if [[ "$skip_install" == "False" ]]; then
+ install_vfc
+ fi
+ fi
+}
diff --git a/lib/vid b/lib/vid
new file mode 100755
index 0000000..0c7ad85
--- /dev/null
+++ b/lib/vid
@@ -0,0 +1,49 @@
+#!/bin/bash
+
+source /var/onap/functions
+
+# _build_vid_images() - Function that builds VID docker images
+function _build_vid_images {
+ if [[ "$compile_repo" != "True" ]]; then
+ compile_src ${src_folders[vid]}
+ fi
+ build_docker_image ${src_folders[vid]}/deliveries
+}
+
+# get_vid_images() - Function that retrieves VID docker images
+function get_vid_images {
+ if [[ "$build_image" == "True" ]]; then
+ _build_vid_images
+ else
+ pull_openecomp_image vid
+ fi
+ pull_docker_image mariadb:10
+}
+
+# install_vid() - Download and configure Vid source code
+function install_vid {
+ vid_image=`docker images | grep vid | grep latest| awk '{print $1 ":" $2}'`
+
+ docker rm -f vid-mariadb
+ docker rm -f vid-server
+
+ run_docker_image --name vid-mariadb -e MYSQL_DATABASE=vid_openecomp -e MYSQL_USER=vidadmin -e MYSQL_PASSWORD=Kp8bJ4SXszM0WXlhak3eHlcse2gAw84vaoGGmJvUy2U -e MYSQL_ROOT_PASSWORD=LF+tp_1WqgSY -v /opt/vid/lf_config/vid-my.cnf:/etc/mysql/my.cnf -v /opt/vid/lf_config/vid-pre-init.sql:/docker-entrypoint-initdb.d/vid-pre-init.sql -v /var/lib/mysql -d mariadb:10
+ run_docker_image -e VID_MYSQL_DBNAME=vid_openecomp -e VID_MYSQL_PASS=Kp8bJ4SXszM0WXlhak3eHlcse2gAw84vaoGGmJvUy2U --name vid-server -p 8080:8080 --link vid-mariadb:vid-mariadb-docker-instance -d $vid_image
+}
+
+# init_vid() - Function that initialize Vid services
+function init_vid {
+ if [[ "$clone_repo" == "True" ]]; then
+ clone_repos "vid"
+ if [[ "$compile_repo" == "True" ]]; then
+ compile_repos "vid"
+ fi
+ fi
+
+ if [[ "$skip_get_images" == "False" ]]; then
+ get_vid_images
+ if [[ "$skip_install" == "False" ]]; then
+ install_vid
+ fi
+ fi
+}
diff --git a/lib/vnfsdk b/lib/vnfsdk
new file mode 100755
index 0000000..ea7fa33
--- /dev/null
+++ b/lib/vnfsdk
@@ -0,0 +1,47 @@
+#!/bin/bash
+
+source /var/onap/functions
+
+# _build_vnfsdk_images() - Builds VNFSDK images from source code
+function _build_vnfsdk_images {
+ install_package unzip
+ pushd ${src_folders[vnfsdk]}/refrepo/vnfmarket-be/deployment/docker/docker-refrepo
+ build_docker_image .
+ popd
+}
+
+# get_vnfsdk_images - Function that clones vnfsdk Docker images
+function get_vnfsdk_images {
+ if [[ "$build_image" == "True" ]]; then
+ # TODO(sshank): Has errors building.
+ _build_vnfsdk_images
+ else
+ pull_docker_image refrepo:1.0-STAGING-latest
+ pull_docker_image refrepo:latest
+ fi
+}
+
+# install_vnfsdk - Function that installs vnfsdk Docker images
+function install_vnfsdk {
+ install_docker_compose
+ pushd ${src_folders[vnfsdk]}/refrepo/vnfmarket-be/deployment/install
+ /opt/docker/docker-compose up -d
+ popd
+}
+
+# init_vnfsdk() - Init VNFSDK services
+function init_vnfsdk {
+ if [[ "$clone_repo" == "True" ]]; then
+ clone_repos "vnfsdk"
+ if [[ "$compile_repo" == "True" ]]; then
+ compile_repos "vnfsdk"
+ fi
+ fi
+
+ if [[ "$skip_get_images" == "False" ]]; then
+ get_vnfsdk_images
+ if [[ "$skip_install" == "False" ]]; then
+ install_vnfsdk
+ fi
+ fi
+}
diff --git a/lib/vvp b/lib/vvp
new file mode 100755
index 0000000..f24431e
--- /dev/null
+++ b/lib/vvp
@@ -0,0 +1,40 @@
+#!/bin/bash
+
+source /var/onap/functions
+
+# _build_vvp_images() - Builds VNFSDK images from source code
+function _build_vvp_images {
+ echo "pass"
+}
+
+# get_vvp_images - Function that clones vvp Docker images
+function get_vvp_images {
+ if [[ "$build_image" == "True" ]]; then
+ _build_vvp_images
+ else
+ pull_docker_image refrepo:1.0-STAGING-latest
+ pull_docker_image refrepo:latest
+ fi
+}
+
+# install_vvp - Function that installs vvp Docker images
+function install_vvp {
+ echo "pass"
+}
+
+# init_vvp() - Init VNFSDK services
+function init_vvp {
+ if [[ "$clone_repo" == "True" ]]; then
+ clone_repos "vvp"
+ if [[ "$compile_repo" == "True" ]]; then
+ compile_repos "vvp"
+ fi
+ fi
+
+ if [[ "$skip_get_images" == "False" ]]; then
+ get_vvp_images
+ if [[ "$skip_install" == "False" ]]; then
+ install_vvp
+ fi
+ fi
+}
diff --git a/tests/_test_base b/tests/_test_base
new file mode 100755
index 0000000..b30632d
--- /dev/null
+++ b/tests/_test_base
@@ -0,0 +1,33 @@
+#!/bin/bash
+
+if [[ "$debug" == "True" ]]; then
+ set -o xtrace
+fi
+
+source /var/onap_tests/asserts
+source /var/onap/commons
+
+# main() - Starting point for Unit Tests
+function main {
+ local covered_functions=("$@")
+
+ update_repos
+ len=${#covered_functions[@]}
+ echo "1..$len"
+ for i in ${!covered_functions[@]}; do
+ dpkg --get-selections > installed-software
+ sort -o installed-software installed-software
+ test_${covered_functions[$i]}
+ echo "ok $((i+1)) - test_${covered_functions[$i]}"
+
+ # Teardown process
+ if is_package_installed docker-ce; then
+ docker images -q | xargs docker rmi -f
+ fi
+ dpkg --get-selections > installed-software_new
+ sort -o installed-software_new installed-software_new
+ apt-get purge -y -qq $(comm -3 installed-software installed-software_new | awk '{print $1}')
+ #rm -rf $git_src_folder
+ #rm -rf ~/.m2/
+ done
+}
diff --git a/tests/asserts b/tests/asserts
new file mode 100755
index 0000000..441b9f0
--- /dev/null
+++ b/tests/asserts
@@ -0,0 +1,94 @@
+#!/bin/bash
+
+source /var/onap/commons
+
+# asserts_http_status_code() - Function that determines if a HTTP status code is retrieved from URL
+function asserts_http_status_code {
+ local url=$1
+ local expected_code=${2:-"200"}
+
+ code=$(curl -I $url | head -n 1 | cut -d$' ' -f2)
+ local error_msg=${3:-"The URL $url responded with $code status code"}
+ if [[ "$code" != "$expected_code" ]]; then
+ raise_error $error_msg
+ fi
+}
+
+# asserts_process() - Function that verifies if a specific process is running
+function asserts_process {
+ local process=$1
+ local error_msg=${2:-"There is no $process running process"}
+
+ if [[ "ps -ef | grep $process" == "" ]]; then
+ raise_error $error_msg
+ fi
+}
+
+# asserts_java_process() - Function that verifies if a specific java process is running
+function asserts_java_process {
+ local process=$1
+ local error_msg=${2:-"There is no $process java running process"}
+
+ install_java
+ if [[ "jps | grep $process" == "" ]]; then
+ raise_error $error_msg
+ fi
+}
+
+# asserts_image_running() - Function that verifies if a specific image is running
+function asserts_image_running {
+ local image=$1
+ local error_msg=${2:-"There is no process with $image image running"}
+
+ asserts_image $image
+ if [[ "$(docker ps -q --filter=ancestor=$image 2> /dev/null)" == "" ]]; then
+ raise_error $error_msg
+ fi
+}
+
+# asserts_image() - Function that verifies if a specific image was created
+function asserts_image {
+ local image=$1
+ local error_msg=${2:-"There is no $image image"}
+
+ install_docker
+ if [[ "$(docker images -q $image 2> /dev/null)" == "" ]]; then
+ raise_error $error_msg
+ fi
+}
+
+# asserts_installed_package() - Function that verifies if a specific package was installed.
+function asserts_installed_package {
+ local package=$1
+ local error_msg=${2:-"$package wasn't installed"}
+
+ if ! is_package_installed $package; then
+ raise_error $error_msg
+ fi
+}
+
+# asserts_file_exist() - Function that verifies if a specific file exists
+function asserts_file_exist {
+ local file=$1
+ local error_msg=${2:-"$file doesn't exist"}
+
+ if [ ! -f $file ]; then
+ raise_error $error_msg
+ fi
+}
+
+# asserts_env_set() - Function that verities that an environment variable is set
+function asserts_env_set {
+ local variable=$1
+ local error_msg=${2:-"$variable wasn't set"}
+
+ if [ -z ${variable+x} ]; then
+ raise_error $error_msg
+ fi
+}
+
+# raise_error() - Function that prints and exits the execution
+function raise_error {
+ echo $@
+ exit 1
+}
diff --git a/tests/projects.txt b/tests/projects.txt
new file mode 100644
index 0000000..0aba508
--- /dev/null
+++ b/tests/projects.txt
@@ -0,0 +1,209 @@
+aaf/authz
+aaf/cadi
+aaf/inno
+aaf/luaplugin
+aai/aai-common
+aai/aai-config
+aai/aai-data
+aai/aai-service
+aai/babel
+aai/champ
+aai/data-router
+aai/esr-gui
+aai/esr-server
+aai/gizmo
+aai/logging-service
+aai/model-loader
+aai/resources
+aai/rest-client
+aai/router-core
+aai/search-data-service
+aai/sparky-be
+aai/sparky-fe
+aai/test-config
+aai/traversal
+appc
+appc/deployment
+ccsdk/dashboard
+ccsdk/distribution
+ccsdk/parent
+ccsdk/platform/blueprints
+ccsdk/platform/nbapi
+ccsdk/platform/plugins
+ccsdk/sli/adaptors
+ccsdk/sli/core
+ccsdk/sli/northbound
+ccsdk/sli/plugins
+ccsdk/storage/esaas
+ccsdk/storage/pgaas
+ccsdk/utils
+ci-management
+clamp
+cli
+dcae
+dcae/apod
+dcae/apod/analytics
+dcae/apod/buildtools
+dcae/apod/cdap
+dcae/collectors
+dcae/collectors/ves
+dcae/controller
+dcae/controller/analytics
+dcae/dcae-inventory
+dcae/demo
+dcae/demo/startup
+dcae/demo/startup/aaf
+dcae/demo/startup/controller
+dcae/demo/startup/message-router
+dcae/dmaapbc
+dcae/operation
+dcae/operation/utils
+dcae/orch-dispatcher
+dcae/pgaas
+dcae/utils
+dcae/utils/buildtools
+dcaegen2
+dcaegen2/analytics
+dcaegen2/analytics/tca
+dcaegen2/collectors
+dcaegen2/collectors/snmptrap
+dcaegen2/collectors/ves
+dcaegen2/deployments
+dcaegen2/platform
+dcaegen2/platform/blueprints
+dcaegen2/platform/cdapbroker
+dcaegen2/platform/cli
+dcaegen2/platform/configbinding
+dcaegen2/platform/deployment-handler
+dcaegen2/platform/inventory-api
+dcaegen2/platform/plugins
+dcaegen2/platform/policy-handler
+dcaegen2/platform/registrator
+dcaegen2/platform/servicechange-handler
+dcaegen2/utils
+demo
+dmaap/buscontroller
+dmaap/datarouter
+dmaap/dbcapi
+dmaap/messagerouter/dmaapclient
+dmaap/messagerouter/messageservice
+dmaap/messagerouter/mirroragent
+dmaap/messagerouter/msgrtr
+doc
+doc/tools
+ecompsdkos
+externalapi/nbi
+holmes/common
+holmes/dsa
+holmes/engine-management
+holmes/rule-management
+integration
+logging-analytics
+modeling/modelspec
+modeling/toscaparsers
+msb/apigateway
+msb/discovery
+msb/java-sdk
+msb/swagger-sdk
+mso
+mso/chef-repo
+mso/docker-config
+mso/libs
+mso/mso-config
+multicloud/azure
+multicloud/framework
+multicloud/openstack
+multicloud/openstack/vmware
+multicloud/openstack/windriver
+ncomp
+ncomp/cdap
+ncomp/core
+ncomp/docker
+ncomp/maven
+ncomp/openstack
+ncomp/sirius
+ncomp/sirius/manager
+ncomp/utils
+oom
+oom/registrator
+oparent
+optf/cmso
+optf/has
+optf/osdf
+policy/api
+policy/common
+policy/docker
+policy/drools-applications
+policy/drools-pdp
+policy/engine
+policy/gui
+policy/pap
+policy/pdp
+portal
+portal/sdk
+sdc
+sdc/jtosca
+sdc/sdc-distribution-client
+sdc/sdc-docker-base
+sdc/sdc-titan-cassandra
+sdc/sdc-tosca
+sdc/sdc-workflow-designer
+sdnc/adaptors
+sdnc/architecture
+sdnc/core
+sdnc/features
+sdnc/northbound
+sdnc/oam
+sdnc/parent
+sdnc/plugins
+so
+so/chef-repo
+so/docker-config
+so/libs
+so/so-config
+testsuite
+testsuite/heatbridge
+testsuite/properties
+testsuite/python-testing-utils
+ui
+ui/dmaapbc
+university
+usecase-ui
+usecase-ui/server
+vfc/gvnfm/vnflcm
+vfc/gvnfm/vnfmgr
+vfc/gvnfm/vnfres
+vfc/nfvo/catalog
+vfc/nfvo/driver/ems
+vfc/nfvo/driver/sfc
+vfc/nfvo/driver/vnfm/gvnfm
+vfc/nfvo/driver/vnfm/svnfm
+vfc/nfvo/lcm
+vfc/nfvo/resmanagement
+vfc/nfvo/wfengine
+vid
+vid/asdcclient
+vnfrqts/epics
+vnfrqts/guidelines
+vnfrqts/requirements
+vnfrqts/testcases
+vnfrqts/usecases
+vnfsdk/compliance
+vnfsdk/functest
+vnfsdk/lctest
+vnfsdk/model
+vnfsdk/pkgtools
+vnfsdk/refrepo
+vnfsdk/validation
+vvp/ansible-ice-bootstrap
+vvp/cms
+vvp/devkit
+vvp/documentation
+vvp/engagementmgr
+vvp/gitlab
+vvp/image-scanner
+vvp/jenkins
+vvp/portal
+vvp/postgresql
+vvp/test-engine
+vvp/validation-scripts
diff --git a/tests/test_aai b/tests/test_aai
new file mode 100755
index 0000000..dd027d2
--- /dev/null
+++ b/tests/test_aai
@@ -0,0 +1,100 @@
+#!/bin/bash
+
+source /var/onap_tests/_test_base
+source /var/onap/aai
+
+covered_functions=(
+#"install_hadoop" "install_haproxy" "clone_all_aai_repos" "compile_aai_repos" "setup_titan" "start_aai_microservices" "install_hbase" "install_ajsc_aai" "install_model_loader"
+"install_hadoop" "install_haproxy" "clone_all_aai_repos" "compile_aai_repos" "setup_titan" "install_hbase"
+)
+
+# test_install_hadoop() - Verify that Hadoop is downloaded and started properly
+function test_install_hadoop {
+ install_hadoop
+
+ asserts_file_exist /opt/hadoop/current/titan-1.0.0-hadoop1/bin/titan.sh
+ asserts_java_process Elasticsearch
+ asserts_java_process GremlinServer
+ asserts_java_process CassandraDaemon
+}
+
+# test_install_haproxy() - Verify that HAProxy is installed properly
+function test_install_haproxy {
+ install_haproxy
+
+ asserts_installed_package haproxy
+ asserts_process haproxy
+}
+
+# test_clone_all_aai_repos() - Verify that all the AAI Repos are cloned
+function test_clone_all_aai_repos {
+ clone_repos "aai"
+
+ asserts_file_exist ${src_folders[aai]}/aai-common/pom.xml
+ asserts_file_exist ${src_folders[aai]}/aai-config/cookbooks/aai-resources/runlist-aai-resources.json
+ asserts_file_exist ${src_folders[aai]}/aai-data/environments/solo.json
+ asserts_file_exist ${src_folders[aai]}/aai-service/pom.xml
+ asserts_file_exist ${src_folders[aai]}/babel/README.md
+ asserts_file_exist ${src_folders[aai]}/champ/pom.xml
+ asserts_file_exist ${src_folders[aai]}/data-router/pom.xml
+ asserts_file_exist ${src_folders[aai]}/esr-gui/pom.xml
+ asserts_file_exist ${src_folders[aai]}/esr-server/pom.xml
+ asserts_file_exist ${src_folders[aai]}/gizmo/pom.xml
+ asserts_file_exist ${src_folders[aai]}/logging-service/pom.xml
+ asserts_file_exist ${src_folders[aai]}/model-loader/pom.xml
+ asserts_file_exist ${src_folders[aai]}/resources/pom.xml
+ asserts_file_exist ${src_folders[aai]}/rest-client/pom.xml
+ asserts_file_exist ${src_folders[aai]}/router-core/pom.xml
+ asserts_file_exist ${src_folders[aai]}/search-data-service/pom.xml
+ asserts_file_exist ${src_folders[aai]}/sparky-be/pom.xml
+ asserts_file_exist ${src_folders[aai]}/sparky-fe/pom.xml
+ asserts_file_exist ${src_folders[aai]}/test-config/docker-compose-app.yml
+ asserts_file_exist ${src_folders[aai]}/traversal/pom.xml
+}
+
+# test_compile_aai_repos() - Verify that all the AAI Repositories complile properly
+function test_compile_aai_repos {
+ clone_repos "aai"
+ compile_aai_repos
+
+ for common in annotations auth core schema utils; do
+ asserts_file_exist ${src_folders[aai]}/aai-common/aai-$common/target/aai-$common-1.1.0-SNAPSHOT.jar
+ done
+
+ for service in common-logging eelf-logging logging-api; do
+ asserts_file_exist ${src_folders[aai]}/logging-service/$service/target/$service-1.1.0-SNAPSHOT.jar
+ done
+
+ asserts_file_exist ${src_folders[aai]}/resources/aai-resources/target/aai-resources.jar
+ asserts_file_exist ${src_folders[aai]}/traversal/aai-traversal/target/traversal.jar
+}
+
+# test_setup_titan() - Verify that Titan Cassandra DB is up and running
+function test_setup_titan {
+ clone_repos "aai"
+ install_hadoop
+ setup_titan
+
+ # TODO(electrocucaracha): Validate the DB creation
+}
+
+# test_start_aai_microservices() - Verify that AAI Resources and Traversal images works
+function test_start_aai_microservices {
+ clone_repos "aai"
+ start_aai_microservices
+
+ # TODO(electrocucaracha): Investigate how to run AAI microservices in background
+}
+
+# test_install_hbase() - Verify that AAI HBase service is up and running properly
+function test_install_hbase {
+ install_hbase
+
+ asserts_image_running aai-hbase-${hbase_version}
+}
+
+if [ "$1" != '*' ]; then
+ unset covered_functions
+ covered_functions=$1
+fi
+main "${covered_functions[@]}"
diff --git a/tests/test_appc b/tests/test_appc
new file mode 100755
index 0000000..f567d7f
--- /dev/null
+++ b/tests/test_appc
@@ -0,0 +1,95 @@
+#!/bin/bash
+
+source /var/onap_tests/_test_base
+source /var/onap/appc
+
+covered_functions=(
+"clone_all_appc_repos" "compile_all_appc_repos" "get_appc_images" "install_appc"
+)
+
+# test_clone_all_appc_repos() - Verify that the APPC source code is cloned
+function test_clone_all_appc_repos {
+ clone_repos "appc"
+
+ asserts_file_exist ${src_folders[appc]}/pom.xml
+ asserts_file_exist ${src_folders[appc]}/deployment/pom.xml
+}
+
+# test_compile_all_appc_repos() - Verify that the APPC source code is compiled properly
+function test_compile_all_appc_repos {
+ clone_repos "appc"
+ compile_repos "appc"
+
+ for adapter in appc-ansible-adapter appc-chef-adapter appc-dmaap-adapter appc-iaas-adapter appc-netconf-adapter appc-rest-adapter appc-rest-healthcheck-adapter; do
+ asserts_file_exist ${src_folders[appc]}/appc-adapters/$adapter/$adapter-bundle/target/$adapter-bundle-*-SNAPSHOT.jar
+ asserts_file_exist ${src_folders[appc]}/appc-adapters/$adapter/$adapter-features/target/$adapter-features-*-SNAPSHOT.jar
+ if [[ "$adapter" == "appc-netconf-adapter" ]]; then
+ asserts_file_exist ${src_folders[appc]}/appc-adapters/$adapter/appc-netconf-installer/target/$adapter-*-SNAPSHOT.zip
+ else
+ asserts_file_exist ${src_folders[appc]}/appc-adapters/$adapter/$adapter-installer/target/$adapter-*-SNAPSHOT.zip
+ fi
+ done
+ asserts_file_exist ${src_folders[appc]}/appc-adapters/appc-dmaap-adapter/appc-message-adapter-api/target/appc-message-adapter-api-*-SNAPSHOT.jar
+ asserts_file_exist ${src_folders[appc]}/appc-adapters/appc-dmaap-adapter/appc-message-adapter-factory/target/appc-message-adapter-factory-*-SNAPSHOT.jar
+ asserts_file_exist ${src_folders[appc]}/appc-adapters/appc-ssh-adapter/appc-ssh-adapter-api/target/appc-ssh-adapter-api-*-SNAPSHOT.jar
+ asserts_file_exist ${src_folders[appc]}/appc-adapters/appc-ssh-adapter/appc-ssh-adapter-features/target/appc-ssh-adapter-features-*-SNAPSHOT.jar
+ asserts_file_exist ${src_folders[appc]}/appc-adapters/appc-ssh-adapter/appc-ssh-adapter-sshd/target/appc-ssh-adapter-sshd-*-SNAPSHOT.jar
+ asserts_file_exist ${src_folders[appc]}/appc-adapters/appc-ssh-adapter/appc-ssh-adapter-tests/target/appc-ssh-adapter-tests-*-SNAPSHOT.jar
+
+ #for component in appc-event-listener appc-oam appc-provider; do
+ for component in appc-event-listener appc-provider; do
+ asserts_file_exist ${src_folders[appc]}/$component/$component-bundle/target/$component-bundle-*-SNAPSHOT.jar
+ asserts_file_exist ${src_folders[appc]}/$component/$component-features/target/$component-features-*-SNAPSHOT.jar
+ asserts_file_exist ${src_folders[appc]}/$component/$component-installer/target/$component-*-SNAPSHOT.zip
+ done
+ #for component in appc-oam appc-provider; do
+ for component in appc-provider; do
+ asserts_file_exist ${src_folders[appc]}/$component/$component-model/target/$component-model-*-SNAPSHOT.jar
+ done
+
+ asserts_file_exist ${src_folders[appc]}/appc-common/target/appc-common-*-SNAPSHOT.jar
+
+ asserts_file_exist ${src_folders[appc]}/appc-dg/appc-dg-shared/appc-dg-dependency-model/target/appc-dg-dependency-model-*-SNAPSHOT.jar
+ asserts_file_exist ${src_folders[appc]}/appc-dg/appc-dg-shared/appc-dg-domain-model-lib/target/appc-dg-domain-model-lib-*-SNAPSHOT.jar
+
+ asserts_file_exist ${src_folders[appc]}/appc-dispatcher/appc-command-executor/appc-command-executor-api/target/appc-command-executor-api-*-SNAPSHOT.jar
+ asserts_file_exist ${src_folders[appc]}/appc-dispatcher/appc-dispatcher-common/appc-data-access-lib/target/appc-data-access-lib-*-SNAPSHOT.jar
+ asserts_file_exist ${src_folders[appc]}/appc-dispatcher/appc-dispatcher-common/domain-model-lib/target/domain-model-lib-*-SNAPSHOT.jar
+ asserts_file_exist ${src_folders[appc]}/appc-dispatcher/appc-dispatcher-common/execution-queue-management-lib/target/execution-queue-management-lib-*-SNAPSHOT.jar
+ asserts_file_exist ${src_folders[appc]}/appc-dispatcher/appc-license-manager/appc-license-manager-api/target/appc-license-manager-api-*-SNAPSHOT.jar
+ asserts_file_exist ${src_folders[appc]}/appc-dispatcher/appc-request-handler/appc-request-handler-api/target/appc-request-handler-api-*-SNAPSHOT.jar
+
+
+ #asserts_file_exist ${src_folders[appc]}/appc-lifecycle-management/appc-lifecycle-management-api/target/appc-lifecycle-management-api-*-SNAPSHOT.jar
+ #asserts_file_exist ${src_folders[appc]}/appc-lifecycle-management/appc-lifecycle-management-core/target/appc-lifecycle-management-core-*-SNAPSHOT.jar
+ #asserts_file_exist ${src_folders[appc]}/appc-lifecycle-management/appc-lifecycle-management-features/target/appc-lifecycle-management-features-*-SNAPSHOT.jar
+ #asserts_file_exist ${src_folders[appc]}/appc-lifecycle-management/appc-lifecycle-management-installer/target/appc-lifecycle-management-*-SNAPSHOT.zip
+ #asserts_file_exist ${src_folders[appc]}/appc-lifecycle-management/state-machine-lib/target/state-machine-lib-*-SNAPSHOT.jar
+
+ asserts_file_exist ${src_folders[appc]}/appc-metric/appc-metric-bundle/target/appc-metric-bundle-*-SNAPSHOT.jar
+
+ asserts_file_exist ${src_folders[appc]}/deployment/platform-logic/installer/target/platform-logic-installer-*-SNAPSHOT.zip
+}
+
+# test_get_appc_images() - Verify that APPC Docker images can be retrieved
+function test_get_appc_images {
+ clone_repos "appc"
+ get_appc_images
+
+ asserts_image openecomp/appc-image
+}
+
+# test_install_appc() - Verify that the APPC Docker images are up and running
+function test_install_appc {
+ clone_repos "appc"
+ get_appc_images
+ install_appc
+
+ asserts_image_running openecomp/appc-image
+}
+
+if [ "$1" != '*' ]; then
+ unset covered_functions
+ covered_functions=$1
+fi
+main "${covered_functions[@]}"
diff --git a/tests/test_ccsdk b/tests/test_ccsdk
new file mode 100755
index 0000000..28de270
--- /dev/null
+++ b/tests/test_ccsdk
@@ -0,0 +1,90 @@
+#!/bin/bash
+
+source /var/onap_tests/_test_base
+source /var/onap/ccsdk
+
+covered_functions=(
+"clone_ccsdk_repos" "compile_ccsdk_repos" "get_ccsdk_images"
+)
+
+# test_clone_ccsdk_repos() - Verify that CCSDL repositories are retrieved properly
+function test_clone_ccsdk_repos {
+ clone_repos "ccsdk"
+
+ asserts_file_exist ${src_folders[ccsdk]}/dashboard/pom.xml
+ asserts_file_exist ${src_folders[ccsdk]}/distribution/pom.xml
+ asserts_file_exist ${src_folders[ccsdk]}/parent/pom.xml
+ asserts_file_exist ${src_folders[ccsdk]}/platform/blueprints/pom.xml
+ asserts_file_exist ${src_folders[ccsdk]}/platform/nbapi/pom.xml
+ asserts_file_exist ${src_folders[ccsdk]}/platform/plugins/pom.xml
+ asserts_file_exist ${src_folders[ccsdk]}/sli/adaptors/pom.xml
+ asserts_file_exist ${src_folders[ccsdk]}/sli/core/pom.xml
+ asserts_file_exist ${src_folders[ccsdk]}/sli/northbound/pom.xml
+ asserts_file_exist ${src_folders[ccsdk]}/sli/plugins/pom.xml
+ asserts_file_exist ${src_folders[ccsdk]}/storage/esaas/pom.xml
+ asserts_file_exist ${src_folders[ccsdk]}/storage/pgaas/pom.xml
+ asserts_file_exist ${src_folders[ccsdk]}/utils/pom.xml
+}
+
+# test_compile_ccsdk_repos() - Verify the compilation of CCSDK repositories
+function test_compile_ccsdk_repos {
+ clone_repos "ccsdk"
+ compile_repos "ccsdk"
+
+ asserts_file_exist ${src_folders[ccsdk]}/dashboard/ccsdk-app-common/target/ccsdk-app-common-1.1.0-SNAPSHOT.jar
+ asserts_file_exist ${src_folders[ccsdk]}/dashboard/ccsdk-app-os/target/ccsdk-app-os-1.1.0-SNAPSHOT.war
+ asserts_file_exist ${src_folders[ccsdk]}/dashboard/ccsdk-app-overlay/target/ccsdk-app-overlay-1.1.0-SNAPSHOT.war
+ asserts_file_exist ${src_folders[ccsdk]}/distribution/dgbuilder/target/dgbuilder.0.1.0-SNAPSHOT.zip
+ asserts_file_exist ${src_folders[ccsdk]}/distribution/platform-logic/installer/target/platform-logic-installer-0.1.0-SNAPSHOT.zip
+ asserts_file_exist ${src_folders[ccsdk]}/platform/nbapi/target/commonnbapi-*.war
+
+ #for adaptor in aai-service mdsal-resource resource-assignment sql-resource; do
+ #for component in features provider; do
+ #asserts_file_exist ${src_folders[ccsdk]}/sli/adaptors/$adaptor/$component/target/$adaptor-$component-0.1.0-SNAPSHOT.jar
+ #done
+ #asserts_file_exist ${src_folders[ccsdk]}/sli/adaptors/$adaptor/installer/target/sdnc-$adaptor-0.1.0-SNAPSHOT-installer.zip
+ #done
+
+ #for core in dblib filters sli sliapi sliPluginUtils; do
+ for core in dblib; do
+ for component in features provider; do
+ asserts_file_exist ${src_folders[ccsdk]}/sli/core/$core/$component/target/$core-$component-*-SNAPSHOT.jar
+ done
+ asserts_file_exist ${src_folders[ccsdk]}/sli/core/$core/installer/target/sdnc-$core-*-SNAPSHOT-installer.zip
+ done
+ #asserts_file_exist ${src_folders[ccsdk]}/sli/core/sli/common/target/sli-common-*-SNAPSHOT.jar
+ #asserts_file_exist ${src_folders[ccsdk]}/sli/core/sli/recording/target/sli-recording-*-SNAPSHOT.jar
+ #asserts_file_exist ${src_folders[ccsdk]}/sli/core/sliapi/model/target/sliapi-model-*-SNAPSHOT.jar
+
+ for northbound in asdcApi dataChange; do
+ for component in features model provider; do
+ asserts_file_exist ${src_folders[ccsdk]}/sli/northbound/$northbound/$component/target/$northbound-$component-*-SNAPSHOT.jar
+ done
+ asserts_file_exist ${src_folders[ccsdk]}/sli/northbound/$northbound/installer/target/sdnc-$northbound-*-SNAPSHOT-installer.zip
+ done
+ asserts_file_exist ${src_folders[ccsdk]}/sli/northbound/dmaap-listener/target/dmaap-listener-*-SNAPSHOT.jar
+ asserts_file_exist ${src_folders[ccsdk]}/sli/northbound/ueb-listener/target/ueb-listener-*-SNAPSHOT.jar
+
+ for plugin in properties-node restapi-call-node; do
+ for component in features provider; do
+ asserts_file_exist ${src_folders[ccsdk]}/sli/plugins/$plugin/$component/target/$plugin-$component-*-SNAPSHOT.jar
+ done
+ asserts_file_exist ${src_folders[ccsdk]}/sli/plugins/$plugin/installer/target/sdnc-$plugin-*-SNAPSHOT-installer.zip
+ done
+}
+
+# test_get_ccsdk_images() - Verify that CCSDK images are retrieved or built properly
+function test_get_ccsdk_images {
+ clone_repos "ccsdk"
+ get_ccsdk_images
+
+ for image in dgbuilder odlsli odl ubuntu; do
+ asserts_image onap/ccsdk-$image-image
+ done
+}
+
+if [ "$1" != '*' ]; then
+ unset covered_functions
+ covered_functions=$1
+fi
+main "${covered_functions[@]}"
diff --git a/tests/test_dcae b/tests/test_dcae
new file mode 100755
index 0000000..3c5400f
--- /dev/null
+++ b/tests/test_dcae
@@ -0,0 +1,111 @@
+#!/bin/bash
+
+source /var/onap_tests/_test_base
+source /var/onap/dcae
+
+covered_functions=(
+"clone_all_dcae_repos" "compile_all_dcae_repos" "get_dcae_images" "install_dcae"
+)
+
+# test_clone_all_dcae_repos() - Verifies that can retrieve DCAE repositories properly
+function test_clone_all_dcae_repos {
+ clone_repos "dcae"
+
+ asserts_file_exist ${src_folders[dcae]}/apod/README.md
+ asserts_file_exist ${src_folders[dcae]}/apod/analytics/pom.xml
+ asserts_file_exist ${src_folders[dcae]}/apod/buildtools/LICENSE.txt
+ asserts_file_exist ${src_folders[dcae]}/apod/cdap/LICENSE.txt
+ asserts_file_exist ${src_folders[dcae]}/collectors/README.md
+ asserts_file_exist ${src_folders[dcae]}/collectors/ves/pom.xml
+ asserts_file_exist ${src_folders[dcae]}/controller/pom.xml
+ asserts_file_exist ${src_folders[dcae]}/controller/analytics/pom.xml
+ asserts_file_exist ${src_folders[dcae]}/dcae-inventory/pom.xml
+ asserts_file_exist ${src_folders[dcae]}/demo/pom.xml
+ asserts_file_exist ${src_folders[dcae]}/demo/startup/README.md
+ asserts_file_exist ${src_folders[dcae]}/demo/startup/aaf/LICENSE.txt
+ asserts_file_exist ${src_folders[dcae]}/demo/startup/controller/LICENSE.txt
+ asserts_file_exist ${src_folders[dcae]}/demo/startup/message-router/LICENSE.txt
+ asserts_file_exist ${src_folders[dcae]}/dmaapbc/pom.xml
+ asserts_file_exist ${src_folders[dcae]}/operation/README.md
+ asserts_file_exist ${src_folders[dcae]}/operation/utils/pom.xml
+ asserts_file_exist ${src_folders[dcae]}/orch-dispatcher/LICENSE.txt
+ asserts_file_exist ${src_folders[dcae]}/pgaas/LICENSE.txt
+ asserts_file_exist ${src_folders[dcae]}/utils/README.md
+ asserts_file_exist ${src_folders[dcae]}/utils/buildtools/LICENSE.txt
+}
+
+# test_compile_all_dcae_repos() - Verify that the DCAE source code is compiled properly
+function test_compile_all_dcae_repos {
+ clone_repos "dcae"
+ compile_repos "dcae"
+
+ asserts_file_exist ${src_folders[dcae]}/collectors/ves/target/VESCollector-1.1.0-SNAPSHOT.jar
+
+ asserts_file_exist ${src_folders[dcae]}/controller/analytics/dcae-analytics-cdap-common-model/target/dcae-analytics-cdap-common-model-1.1.0-SNAPSHOT.jar
+
+ asserts_file_exist ${src_folders[dcae]}/controller/dcae-controller-core/dcae-controller-core-model/target/dcae-controller-core-model-1.1.0-SNAPSHOT.jar
+ asserts_file_exist ${src_folders[dcae]}/controller/dcae-controller-core/dcae-controller-core-utils/target/dcae-controller-core-utils-1.1.0-SNAPSHOT.jar
+ asserts_file_exist ${src_folders[dcae]}/controller/dcae-controller-core/dcae-controller-operation-utils/target/dcae-controller-operation-utils-1.1.0-SNAPSHOT.jar
+ asserts_file_exist ${src_folders[dcae]}/controller/dcae-controller-core/dcae-controller-platform-model/target/dcae-controller-platform-model-1.1.0-SNAPSHOT.jar
+ asserts_file_exist ${src_folders[dcae]}/controller/dcae-controller-core/dcae-controller-platform-server/target/dcae-controller-platform-server-1.1.0-SNAPSHOT.jar
+
+ asserts_file_exist ${src_folders[dcae]}/controller/dcae-controller-service/dcae-controller-service-dmaap-drsub/dcae-controller-service-dmaap-drsub-manager/target/dcae-controller-service-dmaap-drsub-manager-1.1.0-SNAPSHOT.jar
+ asserts_file_exist ${src_folders[dcae]}/controller/dcae-controller-service/dcae-controller-service-dmaap-drsub/dcae-controller-service-dmaap-drsub-model/target/dcae-controller-service-dmaap-drsub-model-1.1.0-SNAPSHOT.jar
+ asserts_file_exist ${src_folders[dcae]}/controller/dcae-controller-service/dcae-controller-service-standardeventcollector/dcae-controller-service-standardeventcollector-manager/target/dcae-controller-service-standardeventcollector-manager-1.1.0-SNAPSHOT.jar
+ asserts_file_exist ${src_folders[dcae]}/controller/dcae-controller-service/dcae-controller-service-standardeventcollector/dcae-controller-service-standardeventcollector-model/target/dcae-controller-service-standardeventcollector-model-1.1.0-SNAPSHOT.jar
+ asserts_file_exist ${src_folders[dcae]}/controller/dcae-controller-service/dcae-controller-service-storage-postgres-model/target/dcae-controller-service-storage-postgres-model-1.1.0-SNAPSHOT.jar
+
+ asserts_file_exist ${src_folders[dcae]}/controller/dcae-controller-service-cdap/dcae-controller-service-cdap-adaptor/target/dcae-controller-service-cdap-adaptor-1.1.0-SNAPSHOT.jar
+ asserts_file_exist ${src_folders[dcae]}/controller/dcae-controller-service-cdap/dcae-controller-service-cdap-cluster/dcae-controller-service-cdap-cluster-manager/target/dcae-controller-service-cdap-cluster-manager-1.1.0-SNAPSHOT.jar
+ asserts_file_exist ${src_folders[dcae]}/controller/dcae-controller-service-cdap/dcae-controller-service-cdap-cluster/dcae-controller-service-cdap-cluster-model/target/dcae-controller-service-cdap-cluster-model-1.1.0-SNAPSHOT.jar
+ asserts_file_exist ${src_folders[dcae]}/controller/dcae-controller-service-cdap/dcae-controller-service-cdap-model/target/dcae-controller-service-cdap-model-1.1.0-SNAPSHOT.jar
+
+ asserts_file_exist ${src_folders[dcae]}/controller/dcae-controller-service-docker/dcae-controller-service-common-docker/dcae-controller-service-common-docker-manager/target/dcae-controller-service-common-docker-manager-1.1.0-SNAPSHOT.jar
+ asserts_file_exist ${src_folders[dcae]}/controller/dcae-controller-service-docker/dcae-controller-service-common-docker/dcae-controller-service-common-docker-model/target/dcae-controller-service-common-docker-model-1.1.0-SNAPSHOT.jar
+ asserts_file_exist ${src_folders[dcae]}/controller/dcae-controller-service-docker/dcae-controller-service-docker-adaptor/target/dcae-controller-service-docker-adaptor-1.1.0-SNAPSHOT.jar
+ asserts_file_exist ${src_folders[dcae]}/controller/dcae-controller-service-docker/dcae-controller-service-docker-host/dcae-controller-service-docker-host-manager/target/dcae-controller-service-docker-host-manager-1.1.0-SNAPSHOT.jar
+ asserts_file_exist ${src_folders[dcae]}/controller/dcae-controller-service-docker/dcae-controller-service-docker-host/dcae-controller-service-docker-host-model/target/dcae-controller-service-docker-host-model-1.1.0-SNAPSHOT.jar
+ asserts_file_exist ${src_folders[dcae]}/controller/dcae-controller-service-docker/dcae-controller-service-docker-model/target/dcae-controller-service-docker-model-1.1.0-SNAPSHOT.jar
+
+ asserts_file_exist ${src_folders[dcae]}/controller/dcae-controller-service-vm/dcae-controller-service-common-vm/dcae-controller-service-common-vm-manager/target/dcae-controller-service-common-vm-manager-1.1.0-SNAPSHOT.jar
+ asserts_file_exist ${src_folders[dcae]}/controller/dcae-controller-service-vm/dcae-controller-service-common-vm/dcae-controller-service-common-vm-model/target/dcae-controller-service-common-vm-model-1.1.0-SNAPSHOT.jar
+ asserts_file_exist ${src_folders[dcae]}/controller/dcae-controller-service-vm/dcae-controller-service-vm-adaptor/target/dcae-controller-service-vm-adaptor-1.1.0-SNAPSHOT.jar
+ asserts_file_exist ${src_folders[dcae]}/controller/dcae-controller-service-vm/dcae-controller-service-vm-model/target/dcae-controller-service-vm-model-1.1.0-SNAPSHOT.jar
+
+ asserts_file_exist ${src_folders[dcae]}/dcae-inventory/target/dcae-inventory-1.1.0.jar
+
+ asserts_file_exist ${src_folders[dcae]}/demo/dcae-demo-controller/target/dcae-demo-controller-1.1.0-SNAPSHOT-runtime.zip
+
+ asserts_file_exist ${src_folders[dcae]}/dmaapbc/target/dcae_dmaapbc.jar
+
+ asserts_file_exist ${src_folders[dcae]}/operation/utils/operation-utils/target/operation-utils-1.1.0-SNAPSHOT.jar
+}
+
+# test_get_dcae_images() - Function that verifies DCAE images are retrieved properly
+function test_get_dcae_images {
+ clone_repos "dcae"
+ compile_repos "dcae"
+ get_dcae_images
+
+ asserts_image openecomp/dcae-dmaapbc
+ #asserts_image dcae/orch-dispatcher
+ asserts_image dcae-controller
+ asserts_image dcae-inventory
+}
+
+# test_install_dcae() - Function that verifies that DCAE services are up and running
+function test_install_dcae {
+ clone_repos "dcae"
+ compile_all_dcae_repos
+ get_dcae_images
+ install_dcae
+
+ dcae_image=`docker images | grep dcae-controller | awk '{print $1 ":" $2}'`
+ asserts_image_running $dcae_image
+}
+
+if [ "$1" != '*' ]; then
+ unset covered_functions
+ covered_functions=$1
+fi
+main "${covered_functions[@]}"
diff --git a/tests/test_functions b/tests/test_functions
new file mode 100755
index 0000000..dd48215
--- /dev/null
+++ b/tests/test_functions
@@ -0,0 +1,191 @@
+#!/bin/bash
+
+source /var/onap_tests/_test_base
+source /var/onap/functions
+
+covered_functions=(
+"create_configuration_files" "clone_repo"
+"configure_bind" "install_java" "install_maven" "install_nodejs" "install_python"
+"install_docker" "pull_docker_image" "install_docker_compose" "configure_service"
+"start_ODL" "compile_src" "build_docker_image" "docker_openecomp_login"
+"pull_openecomp_image" "pull_onap_image" "coverity_repos" "add_no_proxy_value"
+)
+
+# test_create_configuration_files() - Verify the creation of a configuration files
+function test_create_configuration_files {
+ create_configuration_files
+
+ asserts_file_exist /opt/config/nexus_docker_repo.txt
+ asserts_file_exist /opt/config/nexus_username.txt
+ asserts_file_exist /opt/config/nexus_password.txt
+ asserts_file_exist /opt/config/openstack_username.txt
+ asserts_file_exist /opt/config/tenant_id.txt
+ asserts_file_exist /opt/config/dmaap_topic.txt
+ asserts_file_exist /opt/config/docker_version.txt
+}
+
+# test_docker_openecomp_login() - Verify the proper login to OpenECOMP Docker Hub
+function test_docker_openecomp_login {
+ docker_openecomp_login
+}
+
+# test_pull_openecomp_image() - Verify the OpenECOMP container image pulling process
+function test_pull_openecomp_image {
+ local image_name=portal-apps
+ unset docker_version
+ pull_openecomp_image $image_name
+
+ asserts_image $nexus_docker_repo/openecomp/$image_name
+}
+
+# test_pull_onap_image() - Verify the ONAP cointainer pulling process
+function test_pull_onap_image {
+ local image_name=portal-apps
+ unset docker_version
+ pull_onap_image $image_name
+
+ asserts_image $nexus_docker_repo/onap/$image_name
+}
+
+# test_clone_repo() - Verify cloning and pulling source code from repositories
+function test_clone_repo {
+ clone_repo demo
+
+ asserts_installed_package git
+ asserts_file_exist $git_src_folder/demo/LICENSE.TXT
+}
+
+# test_configure_bind() - Verify the correct installation and configuration of bind
+function test_configure_bind {
+ configure_bind
+
+ asserts_installed_package bind9
+ asserts_installed_package bind9utils
+ asserts_file_exist /etc/bind/zones/db.simpledemo.openecomp.org
+ asserts_file_exist /etc/bind/named.conf.options
+ asserts_file_exist /etc/bind/named.conf.local
+
+ rm -rf /etc/bind/
+}
+
+# test_install_java() - Verify the correct installation of java
+function test_install_java {
+ install_java
+
+ asserts_installed_package openjdk-8-jdk
+}
+
+# test_install_maven() - Verify the correct installation and configuration of maven
+function test_install_maven {
+ install_maven
+
+ asserts_installed_package maven3
+ asserts_installed_package openjdk-8-jdk
+ asserts_file_exist $mvn_conf_file
+}
+
+# test_install_nodejs() - Verify the correct installation of NodeJS tools
+function test_install_nodejs {
+ install_nodejs
+
+ asserts_installed_package nodejs
+ asserts_file_exist /usr/bin/npm
+}
+
+# test_install_python() - Verify the correct installation of Python
+function test_install_python {
+ install_python
+ asserts_installed_package python2.7
+ asserts_installed_package python-dev
+}
+
+# test_install_docker() - Verify the correct installation of Docker
+function test_install_docker {
+ install_docker
+
+ asserts_installed_package docker-ce
+}
+
+# test_pull_docker_image() - Verify the correct retrieve of a specific docker image
+function test_pull_docker_image {
+ local image=attos/dmaap
+ pull_docker_image $image
+
+ asserts_image $image
+}
+
+# test_install_docker_compose() - Verify the correct installation of Docker Compose tool
+function test_install_docker_compose {
+ install_docker_compose
+
+ asserts_file_exist /opt/docker/docker-compose
+}
+
+# test_configure_service() - Verify the correct configuration of a specific init service
+function test_configure_service {
+ local service=mso
+
+ configure_service $service
+
+ asserts_file_exist /etc/init.d/$service
+
+ rm -rf /etc/init.d/$service
+}
+
+# test_start_ODL() - Verify the installation and configuration of OpenDayLight controller
+function test_start_ODL {
+ start_ODL
+
+ asserts_file_exist /opt/opendaylight/current/bin/start
+}
+
+# test_compile_src() - Verify the compilation of java code using maven tools
+function test_compile_src {
+ local repo=vid/asdcclient
+ clone_repo $repo
+ compile_src $git_src_folder/$repo
+
+ asserts_file_exist $git_src_folder/$repo/target/asdcclient-1.0.2-SNAPSHOT.jar
+}
+
+# test_build_docker_image() - Verify that a docker image is created from source code
+function test_build_docker_image {
+ clone_repo ccsdk/distribution
+ build_docker_image $git_src_folder/ccsdk/distribution/ubuntu docker
+
+ asserts_image onap/ccsdk-ubuntu-image
+}
+
+# test_coverity_repos() - Verify that all the repos are covered by scripts
+function test_coverity_repos {
+ pushd /var/onap_tests/
+ cp projects.txt remaining_projects.txt
+ for project in "${repos[@]}"; do
+ for covered_repo in $project; do
+ sed -i '/^'${covered_repo//\//\\/}'$/d' remaining_projects.txt
+ done
+ done
+
+ threshold=75
+ num_projects=$(wc -l < projects.txt)
+ num_remaining_projects=$(wc -l < remaining_projects.txt)
+ coverage=`echo "scale=2; 100-($num_remaining_projects/$num_projects*100)" | bc | cut -d . -f 1`
+ if [ $coverage -lt $threshold ]; then
+ raise_error "There are repositories that are not covered by scripts"
+ fi
+ popd
+}
+
+# test_add_no_proxy_value - Verify that the no_proxy value is correctly set
+function test_add_no_proxy_value {
+ local ip="172.16.0.3"
+ add_no_proxy_value $ip
+
+ asserts_env_set no_proxy
+}
+
+if [ "$1" != '*' ]; then
+ unset covered_functions
+ covered_functions=$1
+fi
+main "${covered_functions[@]}"
diff --git a/tests/test_mr b/tests/test_mr
new file mode 100755
index 0000000..ad1bcd2
--- /dev/null
+++ b/tests/test_mr
@@ -0,0 +1,38 @@
+#!/bin/bash
+
+source /var/onap_tests/_test_base
+source /var/onap/mr
+
+covered_functions=(
+"clone_mr_repos" "get_mr_images" "install_message_router"
+)
+
+# test_clone_mr_repos() - Verify that Message Router repositories are cloned properly
+function test_clone_mr_repos {
+ clone_repo dcae/demo/startup/message-router ${src_folders[mr]}
+
+ asserts_file_exist ${src_folders[mr]}/deploy.sh
+}
+
+# test_get_mr_images() - Verify that Message Router Docker images are retrieved
+function test_get_mr_images {
+ get_mr_images
+
+ asserts_image attos/dmaap
+ asserts_image wurstmeister/zookeeper
+}
+
+# test_install_message_router() - Verify the built and start of Message Router services
+function test_install_message_router {
+ clone_repo dcae/demo/startup/message-router ${src_folders[mr]}
+ get_mr_images
+ install_message_router
+
+ asserts_image_running dockerfiles_kafka
+}
+
+if [ "$1" != '*' ]; then
+ unset covered_functions
+ covered_functions=$1
+fi
+main "${covered_functions[@]}"
diff --git a/tests/test_msb b/tests/test_msb
new file mode 100755
index 0000000..0848d33
--- /dev/null
+++ b/tests/test_msb
@@ -0,0 +1,61 @@
+#!/bin/bash
+
+source /var/onap_tests/_test_base
+source /var/onap/msb
+
+covered_functions=(
+"clone_all_msb_repos" "compile_all_msb_repos" "get_msb_images" "install_msb"
+)
+
+# test_clone_all_msb_repos() - Verify the source code retrieve of Microservice Bus project
+function test_clone_all_msb_repos {
+ clone_repos "msb"
+
+ asserts_file_exist ${src_folders[msb]}/apigateway/pom.xml
+ asserts_file_exist ${src_folders[msb]}/discovery/pom.xml
+ asserts_file_exist ${src_folders[msb]}/java-sdk/pom.xml
+ asserts_file_exist ${src_folders[msb]}/swagger-sdk/pom.xml
+}
+
+# test_compile_all_msb_repos() - Verify the correct compilation of MSB project
+function test_compile_all_msb_repos {
+ clone_repos "msb"
+ compile_repos "msb"
+
+ asserts_file_exist ${src_folders[msb]}/apigateway/apiroute/apiroute-service/target/original-apiroute-service-1.1.0-SNAPSHOT.jar
+ asserts_file_exist ${src_folders[msb]}/apigateway/apiroute/apiroute-standalone/target/apiroute-1.1.0-SNAPSHOT.zip
+ asserts_file_exist ${src_folders[msb]}/discovery/discovery-ui/target/discovery-ui-1.1.0-SNAPSHOT.jar
+ asserts_file_exist ${src_folders[msb]}/discovery/sdclient/discovery-service/target/original-discovery-service-1.1.0-SNAPSHOT.jar
+ asserts_file_exist ${src_folders[msb]}/java-sdk/target/msb-java-sdk-1.1.0-SNAPSHOT.jar
+ asserts_file_exist ${src_folders[msb]}/swagger-sdk/target/swagger-sdk-1.1.0-SNAPSHOT.jar
+}
+
+# test_get_msb_images() - Verify the creation of MSB Docker images
+function test_get_msb_images {
+ clone_repos "msb"
+ get_msb_images
+
+ for image in base apigateway discovery; do
+ asserts_image $nexus_docker_repo/onap/msb/msb_$image
+ done
+}
+
+# test_install_msb - Verify the execution of MSB Docker images
+function test_install_msb {
+ clone_repos "msb"
+ get_msb_images
+ install_msb
+
+ for image in apigateway discovery; do
+ asserts_image_running $nexus_docker_repo/onap/msb/msb_$image
+ done
+
+ asserts_http_status_code "http://127.0.0.1:10081/api/microservices/v1/services"
+ asserts_http_status_code "http://127.0.0.1/api/aai/v8/cloud-infrastructure/cloud-regions"
+}
+
+if [ "$1" != '*' ]; then
+ unset covered_functions
+ covered_functions=$1
+fi
+main "${covered_functions[@]}"
diff --git a/tests/test_mso b/tests/test_mso
new file mode 100755
index 0000000..6a6bef7
--- /dev/null
+++ b/tests/test_mso
@@ -0,0 +1,67 @@
+#!/bin/bash
+
+source /var/onap_tests/_test_base
+source /var/onap/mso
+
+covered_functions=(
+"clone_all_mso_repos" "compile_all_mso_repos" "get_mso_images" "install_mso"
+)
+
+# test_clone_all_mso_repos() - Verify the source code retrieve of MSO project
+function test_clone_all_mso_repos {
+ clone_repos "mso"
+
+ asserts_file_exist ${src_folders[mso]}/pom.xml
+ asserts_file_exist ${src_folders[mso]}/chef-repo/LICENSE.txt
+ asserts_file_exist ${src_folders[mso]}/docker-config/LICENSE.txt
+ asserts_file_exist ${src_folders[mso]}/libs/pom.xml
+ asserts_file_exist ${src_folders[mso]}/mso-config/LICENSE.txt
+}
+
+# test_compile_all_mso_repos() - Verify the correct compilation of MSO projects
+function test_compile_all_mso_repos {
+ clone_repos "mso"
+ compile_repos "mso"
+
+ asserts_file_exist ${src_folders[mso]}/libs/ceilometer-client/target/ceilometer-client-1.1.0-SNAPSHOT.jar
+ asserts_file_exist ${src_folders[mso]}/libs/ceilometer-model/target/ceilometer-model-1.1.0-SNAPSHOT.jar
+ asserts_file_exist ${src_folders[mso]}/libs/glance-client/target/glance-client-1.1.0-SNAPSHOT.jar
+ asserts_file_exist ${src_folders[mso]}/libs/glance-model/target/glance-model-1.1.0-SNAPSHOT.jar
+ asserts_file_exist ${src_folders[mso]}/libs/heat-client/target/heat-client-1.1.0-SNAPSHOT.jar
+ asserts_file_exist ${src_folders[mso]}/libs/heat-model/target/heat-model-1.1.0-SNAPSHOT.jar
+ asserts_file_exist ${src_folders[mso]}/libs/keystone-client/target/keystone-client-1.1.0-SNAPSHOT.jar
+ asserts_file_exist ${src_folders[mso]}/libs/keystone-model/target/keystone-model-1.1.0-SNAPSHOT.jar
+ asserts_file_exist ${src_folders[mso]}/libs/nova-client/target/nova-client-1.1.0-SNAPSHOT.jar
+ asserts_file_exist ${src_folders[mso]}/libs/nova-model/target/nova-model-1.1.0-SNAPSHOT.jar
+ asserts_file_exist ${src_folders[mso]}/libs/openstack-client/target/openstack-client-1.1.0-SNAPSHOT.jar
+ asserts_file_exist ${src_folders[mso]}/libs/quantum-client/target/quantum-client-1.1.0-SNAPSHOT.jar
+ asserts_file_exist ${src_folders[mso]}/libs/quantum-model/target/quantum-model-1.1.0-SNAPSHOT.jar
+ asserts_file_exist ${src_folders[mso]}/libs/swift-client/target/swift-client-1.1.0-SNAPSHOT.jar
+ asserts_file_exist ${src_folders[mso]}/libs/swift-model/target/swift-model-1.1.0-SNAPSHOT.jar
+}
+
+# test_get_mso_images() - Verify the creation of MSO Docker images
+function test_get_mso_images {
+ clone_repos "mso"
+ install_mso
+
+ for image in mso mso-arquillian wildfly ubuntu-update jacoco; do
+ asserts_image openecomp/$image
+ done
+}
+
+# test_install_mso - Verify the execution of MSO Docker images
+function test_install_mso {
+ clone_repos "mso"
+ install_mso
+ install_mso_docker_config
+
+ asserts_image_running openecomp/mso
+ asserts_image_running mariadb:10.1.11
+}
+
+if [ "$1" != '*' ]; then
+ unset covered_functions
+ covered_functions=$1
+fi
+main "${covered_functions[@]}"
diff --git a/tests/test_multicloud b/tests/test_multicloud
new file mode 100755
index 0000000..1b5b85d
--- /dev/null
+++ b/tests/test_multicloud
@@ -0,0 +1,54 @@
+#!/bin/bash
+
+source /var/onap_tests/_test_base
+source /var/onap/multicloud
+
+covered_functions=(
+"clone_multicloud_repos" "compile_multicloud_repos" "get_multicloud_images" "install_multicloud"
+)
+
+# test_clone_multicloud_repos() - Verify that Multi Cloud repositories are cloned properly
+function test_clone_multicloud_repos {
+ clone_repos "multicloud"
+
+ #asserts_file_exist ${src_folders[multicloud]}/
+ asserts_file_exist ${src_folders[multicloud]}/framework/pom.xml
+ asserts_file_exist ${src_folders[multicloud]}/openstack/pom.xml
+ asserts_file_exist ${src_folders[multicloud]}/openstack/vmware/pom.xml
+ asserts_file_exist ${src_folders[multicloud]}/openstack/windriver/pom.xml
+ #asserts_file_exist ${src_folders[multicloud]}/azure/
+}
+
+# test_compile_multicloud_repos() -
+function test_compile_multicloud_repos {
+ clone_repos "multicloud"
+ compile_repos "multicloud"
+
+ asserts_file_exist ${src_folders[multicloud]}/openstack/newton/target/multicloud-openstack-newton-1.0.0-SNAPSHOT.zip
+ asserts_file_exist ${src_folders[multicloud]}/openstack/ocata/target/multicloud-openstack-ocata-1.0.0-SNAPSHOT.zip
+ asserts_file_exist ${src_folders[multicloud]}/openstack/windriver/target/multicloud-openstack-windriver-1.0.0-SNAPSHOT.zip
+}
+
+# test_get_multicloud_images() -
+function test_get_multicloud_images {
+ clone_repos "multicloud"
+ get_multicloud_images
+
+ asserts_image onap/multicloud/openstack-$openstack_release
+}
+
+# test_install_multicloud() - Verify the built and start of Multi Cloud services
+function test_install_multicloud {
+ clone_repos "multicloud"
+ get_multicloud_images
+ install_multicloud
+
+ # NOTE(electrocucaracha): Depends on https://gerrit.onap.org/r/#/c/23631/
+ asserts_http_status_code http://127.0.0.1:9003/api/multicloud-$openstack_release/v0/swagger.json
+}
+
+if [ "$1" != '*' ]; then
+ unset covered_functions
+ covered_functions=$1
+fi
+main "${covered_functions[@]}"
diff --git a/tests/test_policy b/tests/test_policy
new file mode 100755
index 0000000..b666cf9
--- /dev/null
+++ b/tests/test_policy
@@ -0,0 +1,100 @@
+#!/bin/bash
+
+source /var/onap_tests/_test_base
+source /var/onap/policy
+
+covered_functions=(
+"clone_all_policy_repos"
+"compile_all_policy_repos"
+"get_policy_images"
+"install_policy"
+)
+
+# test_clone_all_policy_repos() - Verify cloning of Policy source code
+function test_clone_all_policy_repos {
+ clone_repos "policy"
+
+ asserts_file_exist ${src_folders[policy]}/api/pom.xml
+ asserts_file_exist ${src_folders[policy]}/common/pom.xml
+ asserts_file_exist ${src_folders[policy]}/docker/pom.xml
+ asserts_file_exist ${src_folders[policy]}/drools-applications/pom.xml
+ asserts_file_exist ${src_folders[policy]}/drools-pdp/pom.xml
+ asserts_file_exist ${src_folders[policy]}/engine/pom.xml
+ asserts_file_exist ${src_folders[policy]}/gui/pom.xml
+ asserts_file_exist ${src_folders[policy]}/pap/pom.xml
+ asserts_file_exist ${src_folders[policy]}/pdp/pom.xml
+}
+
+# test_compile_all_policy_repos() - Verify compiling of Policy source code
+function test_compile_all_policy_repos {
+ clone_repos "policy"
+ compile_repos "policy"
+
+ asserts_file_exist ${src_folders[policy]}/common/common-logging/target/ONAP-Logging-*-SNAPSHOT.jar
+ asserts_file_exist ${src_folders[policy]}/common/integrity-audit/target/integrity-audit-*-SNAPSHOT.jar
+ asserts_file_exist ${src_folders[policy]}/common/integrity-monitor/target/integrity-monitor-*-SNAPSHOT.jar
+ asserts_file_exist ${src_folders[policy]}/common/site-manager/target/site-manager-*-SNAPSHOT.jar
+ for actor in appc appclcm so test vfc; do
+ asserts_file_exist ${src_folders[policy]}/drools-applications/controlloop/common/actors/actor.$actor/target/actor.$actor-*-SNAPSHOT.jar
+ done
+ asserts_file_exist ${src_folders[policy]}/drools-applications/controlloop/common/actors/actorServiceProvider/target/actorServiceProvider-*-SNAPSHOT.jar
+ asserts_file_exist ${src_folders[policy]}/drools-applications/controlloop/common/eventmanager/target/eventmanager-*-SNAPSHOT.jar
+ asserts_file_exist ${src_folders[policy]}/drools-applications/controlloop/common/guard/target/guard-*-SNAPSHOT.jar
+ for module in aai appc appclcm events rest sdc so trafficgenerator vfc; do
+ asserts_file_exist ${src_folders[policy]}/drools-applications/controlloop/common/model-impl/$module/target/$module-*-SNAPSHOT.jar
+ done
+ asserts_file_exist ${src_folders[policy]}/drools-applications/controlloop/common/policy-yaml/target/policy-yaml-*-SNAPSHOT.jar
+ for package in apps artifacts; do
+ asserts_file_exist ${src_folders[policy]}/drools-applications/controlloop/packages/$package/target/$package-*-SNAPSHOT.zip
+ done
+ #asserts_file_exist ${src_folders[policy]}/drools-applications/controlloop/packages/basex/target/basex-*-SNAPSHOT.tar.gz
+ asserts_file_exist ${src_folders[policy]}/drools-applications/controlloop/templates/template.demo/target/template.demo-*-SNAPSHOT.jar
+ asserts_file_exist ${src_folders[policy]}/drools-applications/controlloop/templates/template.demo/target/template.demo-*-SNAPSHOT.jar
+ for feature in eelf healthcheck session-persistence; do
+ asserts_file_exist ${src_folders[policy]}/drools-pdp/feature-$feature/target/feature-$feature-*-SNAPSHOT.jar
+ done
+ #asserts_file_exist ${src_folders[policy]}/drools-pdp/packages/base/target/base-*-SNAPSHOT.tar.gz
+ #asserts_file_exist ${src_folders[policy]}/drools-pdp/packages/install/target/install-drools-*-SNAPSHOT.zip
+ for policy in core endpoints management utils; do
+ asserts_file_exist ${src_folders[policy]}/drools-pdp/policy-$policy/target/policy-$policy-*-SNAPSHOT.jar
+ done
+ for engine in BRMSGateway LogParser ONAP-PDP ONAP-XACML ONAP-REST; do
+ asserts_file_exist ${src_folders[policy]}/engine/$engine/target/$engine-*-SNAPSHOT.jar
+ done
+ for engine in ONAP-PAP-REST ONAP-PDP-REST ONAP-SDK-APP; do
+ asserts_file_exist ${src_folders[policy]}/engine/$engine/target/$engine-*-SNAPSHOT.war
+ done
+ asserts_file_exist ${src_folders[policy]}/engine/packages/base/target/base-*-SNAPSHOT.tar.gz
+ asserts_file_exist ${src_folders[policy]}/engine/packages/install/target/install-*-SNAPSHOT.zip
+ asserts_file_exist ${src_folders[policy]}/engine/POLICY-SDK-APP/target/POLICY-SDK-APP-*-SNAPSHOT.war
+ asserts_file_exist ${src_folders[policy]}/engine/PolicyEngineAPI/target/PolicyEngineAPI-*-SNAPSHOT.jar
+ asserts_file_exist ${src_folders[policy]}/engine/PolicyEngineClient/target/PolicyEngineClient-*-SNAPSHOT.jar
+ asserts_file_exist ${src_folders[policy]}/engine/PolicyEngineUtils/target/PolicyEngineUtils-*-SNAPSHOT.jar
+}
+
+# test_get_policy_images() - Verify that Policy Docker images are retrieved properly
+function test_get_policy_images {
+ clone_repos "policy"
+ get_policy_images
+
+ for image in os nexus db base drools pe; do
+ asserts_image onap/policy/policy-$image
+ done
+}
+
+# test_install_policy() - Verify that Policy services are started properly
+function test_install_policy {
+ clone_repos "policy"
+ get_policy_images
+ install_policy
+
+ for image in pe drools db nexus; do
+ asserts_image_running onap/policy/policy-$image
+ done
+}
+
+if [ "$1" != '*' ]; then
+ unset covered_functions
+ covered_functions=$1
+fi
+main "${covered_functions[@]}"
diff --git a/tests/test_portal b/tests/test_portal
new file mode 100755
index 0000000..d20f173
--- /dev/null
+++ b/tests/test_portal
@@ -0,0 +1,59 @@
+#!/bin/bash
+
+source /var/onap_tests/_test_base
+source /var/onap/portal
+
+covered_functions=(
+"clone_all_portal_repos"
+"compile_all_portal_repos"
+"get_portal_images"
+"install_portal"
+)
+
+# test_clone_all_portal_repos() - Verify cloning of Portal source code
+function test_clone_all_portal_repos {
+ clone_all_portal_repos
+
+ asserts_file_exist ${src_folders[portal]}/pom.xml
+ asserts_file_exist ${src_folders[portal]}/ecompsdkos/LICENSE.txt
+ asserts_file_exist ${src_folders[portal]}/dmaapbc/pom.xml
+ asserts_file_exist ${src_folders[portal]}/sdk/LICENSE.txt
+}
+
+# test_compile_all_portal_repos() - Verify compiling of Portal source code
+function test_compile_all_portal_repos {
+ clone_all_portal_repos
+ compile_all_portal_repos
+
+ asserts_file_exist ${src_folders[portal]}/ecomp-portal-BE-common/target/ecompportal-be-common.war
+ asserts_file_exist ${src_folders[portal]}/ecomp-portal-BE-common-test/target/ecomp-portal-BE-common-test.jar
+ asserts_file_exist ${src_folders[portal]}/ecomp-portal-BE-os/target/ecompportal-be-os.war
+}
+
+# test_get_portal_images() - Verify that Portal Docker images are retrieved or built properly
+function test_get_portal_images {
+ clone_all_portal_repos
+ get_portal_images
+
+ asserts_image portal-db
+ asserts_image portal-wms
+ asserts_image portal-apps
+ asserts_image mariadb
+}
+
+# test_install_portal() - Verify installation of Portal services
+function test_install_portal {
+ clone_all_portal_repos
+ install_mariadb
+ install_portal
+
+ asserts_image_running portal-db
+ asserts_image_running portal-wms
+ asserts_image_running portal-apps
+}
+
+if [ "$1" != '*' ]; then
+ unset covered_functions
+ covered_functions=$1
+fi
+main "${covered_functions[@]}"
diff --git a/tests/test_robot b/tests/test_robot
new file mode 100755
index 0000000..b96a088
--- /dev/null
+++ b/tests/test_robot
@@ -0,0 +1,48 @@
+#!/bin/bash
+
+source /var/onap_tests/_test_base
+source /var/onap/robot
+
+covered_functions=(
+"clone_robot_repos" "compile_robot_repos" "get_robot_images" "install_robot"
+)
+
+# test_clone_robot_repos() - Verify that Robot repositories are cloned properly
+function test_clone_robot_repos {
+ clone_repos "robot" "testsuite"
+
+ asserts_file_exist ${src_folders[robot]}/LICENSE.TXT
+ asserts_file_exist ${src_folders[robot]}/heatbridge/pom.xml
+ asserts_file_exist ${src_folders[robot]}/properties/LICENSE.TXT
+ asserts_file_exist ${src_folders[robot]}/python-testing-utils/LICENSE.TXT
+}
+
+# test_compile_robot_repos() - Verify that Robot source code can be compiled properly
+function test_compile_robot_repos {
+ clone_repos "robot" "testsuite"
+ compile_repos "robot"
+
+ #asserts_file_exist $testsuite_src_folder/heatbridge/target/maven-python/dist/heatbridge-0.3.0.dev0-py2-none-any.whl
+}
+
+# test_get_robot_images() - Verify that Robot Docker images are retrieved
+function test_get_robot_images {
+ get_robot_images
+
+ asserts_image $nexus_docker_repo/openecomp/testsuite
+}
+
+# test_install_robot() - Verify the built and start of Robot services
+function test_install_robot {
+ clone_repos "robot" "testsuite"
+ get_robot_images
+ install_robot
+
+ asserts_image_running $nexus_docker_repo/openecomp/testsuite
+}
+
+if [ "$1" != '*' ]; then
+ unset covered_functions
+ covered_functions=$1
+fi
+main "${covered_functions[@]}"
diff --git a/tests/test_sdc b/tests/test_sdc
new file mode 100755
index 0000000..6765780
--- /dev/null
+++ b/tests/test_sdc
@@ -0,0 +1,69 @@
+#!/bin/bash
+
+source /var/onap_tests/_test_base
+source /var/onap/sdc
+
+covered_functions=(
+"clone_all_sdc_repos" "compile_all_sdc_repos" "get_sdc_images" "install_sdc"
+)
+
+# test_clone_all_sdc_repos() - Verifies the retrieval of SDC source code repos
+function test_clone_all_sdc_repos {
+ clone_repos "sdc"
+
+ asserts_file_exist ${src_folders[sdc]}/pom.xml
+ asserts_file_exist ${src_folders[sdc]}/sdc-os-chef/pom.xml
+ asserts_file_exist ${src_folders[sdc]}/jtosca/pom.xml
+ asserts_file_exist ${src_folders[sdc]}/sdc-distribution-client/pom.xml
+ asserts_file_exist ${src_folders[sdc]}/sdc-titan-cassandra/pom.xml
+ asserts_file_exist ${src_folders[sdc]}/sdc-tosca/pom.xml
+ asserts_file_exist ${src_folders[sdc]}/sdc_common/pom.xml
+}
+
+# test_compile_all_sdc_repos() - Verifies the correct compilation of SDC repositories
+function test_compile_all_sdc_repos {
+ clone_repos "sdc"
+ compile_repos "sdc"
+
+ #asserts_file_exist ${src_folders[sdc]}/jtosca/target/jtosca-1.1.10-SNAPSHOT.jar
+ #asserts_file_exist ${src_folders[sdc]}/sdc-distribution-client/sdc-distribution-ci/target/sdc-distribution-ci-1.1.*-SNAPSHOT.jar
+ #asserts_file_exist ${src_folders[sdc]}/sdc-distribution-client/sdc-distribution-client/target/sdc-distribution-client-1.1.*-SNAPSHOT.jar
+ #asserts_file_exist ${src_folders[sdc]}/sdc-titan-cassandra/target/jamm-0.3.0.jar
+ #asserts_file_exist ${src_folders[sdc]}/sdc-tosca/target/sdc-tosca-1.1.*-SNAPSHOT.jar
+
+ #for dirc in logging sdc-artifact-generator; do
+ for dirc in logging; do
+ name="openecomp-$dirc"
+ for module in api core; do
+ fullname="$name-$module"
+ asserts_file_exist ${src_folders[sdc]}/sdc_common/$name-lib/$fullname/target/$fullname-1.1.0-SNAPSHOT.jar
+ done
+ done
+}
+
+# test_get_sdc_images() - Verifies the correct retrieval of SDC Docker images
+function test_get_sdc_images {
+ clone_repos "sdc"
+ get_sdc_images
+
+ for image in sanity elasticsearch cassandra kibana frontend backend; do
+ asserts_image openecomp/base_sdc-$image
+ done
+}
+
+# test_install_sdc() - Verifies that SDC services are up and running
+function test_install_sdc {
+ clone_repos "sdc"
+ get_sdc_images
+ install_sdc
+
+ for image in elasticsearch cassandra kibana frontend backend; do
+ asserts_image_running openecomp/base_sdc-$image
+ done
+}
+
+if [ "$1" != '*' ]; then
+ unset covered_functions
+ covered_functions=$1
+fi
+main "${covered_functions[@]}"
diff --git a/tests/test_sdnc b/tests/test_sdnc
new file mode 100755
index 0000000..c4ddc56
--- /dev/null
+++ b/tests/test_sdnc
@@ -0,0 +1,69 @@
+#!/bin/bash
+
+source /var/onap_tests/_test_base
+source /var/onap/sdnc
+
+covered_functions=(
+"clone_all_sdnc_repos" "compile_all_sdnc_repos" "get_sdnc_images" "install_sdnc"
+)
+
+# test_clone_all_sdnc_repos() - Verify the source code retrieve of SDNC project
+function test_clone_all_sdnc_repos {
+ clone_repos "sdnc"
+
+ asserts_file_exist ${src_folders[sdnc]}/adaptors/pom.xml
+ #asserts_file_exist ${src_folders[sdnc]}/architecture/docs/index.rst
+ asserts_file_exist ${src_folders[sdnc]}/core/pom.xml
+ #asserts_file_exist ${src_folders[sdnc]}/features/docs/index.rst
+ asserts_file_exist ${src_folders[sdnc]}/northbound/pom.xml
+ asserts_file_exist ${src_folders[sdnc]}/oam/pom.xml
+ #asserts_file_exist ${src_folders[sdnc]}/parent/docs/index.rst
+ asserts_file_exist ${src_folders[sdnc]}/plugins/pom.xml
+}
+
+# test_compile_all_sdnc_repos() - Verify the correct compilation of SDNC projects
+function test_compile_all_sdnc_repos {
+ clone_repos "sdnc"
+ compile_all_sdnc_repos
+
+ for component in generic-resource-api vnfapi vnftools; do
+ if [[ "$component" == "vnfapi" ]]; then
+ asserts_file_exist ${src_folders[sdnc]}/northbound/vnfapi/model/target/vnfapi-model-1.2.0-SNAPSHOT.jar
+ fi
+ asserts_file_exist ${src_folders[sdnc]}/northbound/$component/installer/target/sdnc-$component-1.2.0-SNAPSHOT-installer.zip
+ asserts_file_exist ${src_folders[sdnc]}/northbound/$component/features/target/$component-features-1.2.0-SNAPSHOT.jar
+ asserts_file_exist ${src_folders[sdnc]}/northbound/$component/provider/target/$component-provider-1.2.0-SNAPSHOT.jar
+ done
+ asserts_file_exist ${src_folders[sdnc]}/oam/admportal/target/admportal.*-SNAPSHOT.zip
+ asserts_file_exist ${src_folders[sdnc]}/oam/dgbuilder/target/dgbuilder.*-SNAPSHOT.zip
+ asserts_file_exist ${src_folders[sdnc]}/oam/platform-logic/installer/target/platform-logic-installer-*-SNAPSHOT.zip
+}
+
+# test_get_sdnc_images() - Verify that the SDNC images are created or retrieved
+function test_get_sdnc_images {
+ clone_repos "sdnc"
+ get_sdnc_images
+
+ asserts_image onap/sdnc-image
+ asserts_image onap/admportal-sdnc-image
+ asserts_image onap/dgbuilder-sdnc-image
+ asserts_image mysql/mysql-server:5.6
+}
+
+# test_install_sdnc() - Verify that the SDNC Docker containers are up and running
+function test_install_sdnc {
+ clone_repos "sdnc"
+ get_sdnc_images
+ install_sdnc
+
+ asserts_image_running onap/ccsdk-dgbuilder-image
+ asserts_image_running onap/admportal-sdnc-image
+ asserts_image_running onap/sdnc-image
+ asserts_image_running mysql/mysql-server:5.6
+}
+
+if [ "$1" != '*' ]; then
+ unset covered_functions
+ covered_functions=$1
+fi
+main "${covered_functions[@]}"
diff --git a/tests/test_vfc b/tests/test_vfc
new file mode 100755
index 0000000..3fadeaf
--- /dev/null
+++ b/tests/test_vfc
@@ -0,0 +1,68 @@
+#!/bin/bash
+
+source /var/onap_tests/_test_base
+source /var/onap/vfc
+
+covered_functions=(
+"clone_all_vfc_repos" "compile_all_vfc_repos" "install_vfc" "get_vfc_images"
+)
+
+# test_clone_all_vfc_repos() - Verify cloning and pulling source code from repositories
+function test_clone_all_vfc_repos {
+ clone_repos "vfc"
+
+ #asserts_file_exist ${src_folders[vfc]}/pom.xml
+ #asserts_file_exist ${src_folders[vfc]}/gvnfm
+ asserts_file_exist ${src_folders[vfc]}/gvnfm/vnflcm/pom.xml
+ asserts_file_exist ${src_folders[vfc]}/gvnfm/vnfmgr/pom.xml
+ asserts_file_exist ${src_folders[vfc]}/gvnfm/vnfres/pom.xml
+ #asserts_file_exist ${src_folders[vfc]}/nfvo
+ asserts_file_exist ${src_folders[vfc]}/nfvo/catalog/run.sh
+ #asserts_file_exist ${src_folders[vfc]}/nfvo/driver
+ asserts_file_exist ${src_folders[vfc]}/nfvo/driver/ems/pom.xml
+ asserts_file_exist ${src_folders[vfc]}/nfvo/driver/sfc/pom.xml
+ #asserts_file_exist ${src_folders[vfc]}/nfvo/driver/vnfm
+ asserts_file_exist ${src_folders[vfc]}/nfvo/driver/vnfm/gvnfm/pom.xml
+ asserts_file_exist ${src_folders[vfc]}/nfvo/driver/vnfm/svnfm/pom.xml
+ asserts_file_exist ${src_folders[vfc]}/nfvo/lcm/pom.xml
+ asserts_file_exist ${src_folders[vfc]}/nfvo/wfengine/wso2/pom.xml
+}
+
+# test_compile_all_vfc_repos() - Verify that all the VFC modules are compiled properly
+function test_compile_all_vfc_repos {
+ clone_repos "vfc"
+ compile_all_vfc_repos
+
+ # TODO(electrocucaracha): Add asserts_file_exist
+}
+
+# test_get_vfc_images() - Verify all VFC images are built correctly.
+function test_get_vfc_images {
+ clone_repos "vfc"
+ get_vfc_images
+
+ asserts_image onap/gvnfmdriver
+ asserts_image onap/nslcm
+ asserts_image onap/vnfres
+ asserts_image onap/vnfmgr
+ asserts_image onap/vnflcm
+}
+
+# test_install_vfc() - Verify that the VFC are up and running
+function test_install_vfc {
+ clone_repos "vfc"
+ get_vfc_images
+ install_vfc
+
+ asserts_image_running onap/nslcm
+ asserts_image_running onap/gvnfmdriver
+ asserts_image_running onap/vnfres
+ asserts_image_running onap/vnfmgr
+ asserts_image_running onap/vnflcm
+}
+
+if [ "$1" != '*' ]; then
+ unset covered_functions
+ covered_functions=$1
+fi
+main "${covered_functions[@]}"
diff --git a/tests/test_vid b/tests/test_vid
new file mode 100755
index 0000000..79a4152
--- /dev/null
+++ b/tests/test_vid
@@ -0,0 +1,51 @@
+#!/bin/bash
+
+source /var/onap_tests/_test_base
+source /var/onap/vid
+
+covered_functions=(
+"clone_all_vid_repos" "compile_all_vid_repos" "get_vid_images" "install_vid"
+)
+
+# test_clone_all_vid_repos() - Verifies that VID repositories are retrieved properly
+function test_clone_all_vid_repos {
+ clone_repos "vid"
+
+ asserts_file_exist ${src_folders[vid]}/pom.xml
+ asserts_file_exist ${src_folders[vid]}/asdcclient/pom.xml
+}
+
+# test_compile_all_vid_repos() - Verifies that VID source code is compiled properly
+function test_compile_all_vid_repos {
+ clone_repos "vid"
+ compile_repos "vid"
+
+ asserts_file_exist ${src_folders[vid]}/asdcclient/target/asdcclient-1.0.2-SNAPSHOT.jar
+ asserts_file_exist ${src_folders[vid]}/epsdk-app-onap/target/vid.war
+ asserts_file_exist ${src_folders[vid]}/vid-app-common/target/vid-common.war
+}
+
+# test_get_vid_images() - Verifies that VID Docker images are built properly
+function test_get_vid_images {
+ clone_repos "vid"
+ get_vid_images
+
+ asserts_image openecomp/vid
+ asserts_image mariadb:10
+}
+
+# test_install_vid() - Verifies taht VID services are up and running
+function test_install_vid {
+ clone_repos "vid"
+ get_vid_images
+ install_vid
+
+ vid_image=`docker images | grep vid | grep latest| awk '{print $1 ":" $2}'`
+ asserts_image_running $vid_image
+}
+
+if [ "$1" != '*' ]; then
+ unset covered_functions
+ covered_functions=$1
+fi
+main "${covered_functions[@]}"
diff --git a/tests/test_vnfsdk b/tests/test_vnfsdk
new file mode 100755
index 0000000..cd29a97
--- /dev/null
+++ b/tests/test_vnfsdk
@@ -0,0 +1,57 @@
+#!/bin/bash
+
+source /var/onap_tests/_test_base
+source /var/onap/vnfsdk
+
+covered_functions=(
+"clone_all_vnfsdk_repos" "compile_all_vnfsdk_repos" "get_vnfsdk_images" "install_vnfsdk"
+)
+
+# test_clone_all_vnfsdk_repos() - Verify the cloning of VNFSDK source repo.
+function test_clone_all_vnfsdk_repos {
+ clone_repos "vnfsdk"
+
+ asserts_file_exist ${src_folders[vnfsdk]}/compliance/veslibrary/README
+ asserts_file_exist ${src_folders[vnfsdk]}/functest/pom.xml
+ asserts_file_exist ${src_folders[vnfsdk]}/lctest/pom.xml
+ asserts_file_exist ${src_folders[vnfsdk]}/model/docs/index.rst
+ asserts_file_exist ${src_folders[vnfsdk]}/pkgtools/pom.xml
+ asserts_file_exist ${src_folders[vnfsdk]}/refrepo/pom.xml
+ asserts_file_exist ${src_folders[vnfsdk]}/validation/pom.xml
+}
+
+# test_compile_all_vnfsdk_repos () - Verify if VNFSDK source repo compiles correctly.
+function test_compile_all_vnfsdk_repos {
+ clone_repos "vnfsdk"
+ compile_repos "vnfsdk"
+
+ asserts_file_exist ${src_folders[vnfsdk]}/lctest/lifecycle-test/target/lifecycle-test-service-1.0.0-SNAPSHOT.war
+ asserts_file_exist ${src_folders[vnfsdk]}/pkgtools/target/vnf-sdk-pkgtools-1.0.0-SNAPSHOT.zip
+ asserts_file_exist ${src_folders[vnfsdk]}/refrepo/vnfmarket-be/vnf-sdk-marketplace/target/ROOT.war
+ asserts_file_exist ${src_folders[vnfsdk]}/validation/csarvalidation/target/validation-csar-1.0.0-SNAPSHOT.jar
+}
+
+# test_get_vnfsdk_images() - Verify that the VNFSDK images are created or retrieved
+function test_get_vnfsdk_images {
+ clone_repos "vnfsdk"
+ get_vnfsdk_images
+
+ asserts_image refrepo:1.0-STAGING-latest
+ asserts_image refrepo:latest
+}
+
+# test_install_vnfsdk() - Verify that VNFSDK docker images are running.
+function test_install_vnfsdk {
+ clone_repos "vnfsdk"
+ get_vnfsdk_images
+ install_vnfsdk
+
+ asserts_image_running refrepo:1.0-STAGING-latest
+ asserts_image refrepo:latest
+}
+
+if [ "$1" != '*' ]; then
+ unset covered_functions
+ covered_functions=$1
+fi
+main "${covered_functions[@]}"
diff --git a/tests/test_vvp b/tests/test_vvp
new file mode 100755
index 0000000..8e9594d
--- /dev/null
+++ b/tests/test_vvp
@@ -0,0 +1,55 @@
+#!/bin/bash
+
+source /var/onap_tests/_test_base
+source /var/onap/vvp
+
+covered_functions=(
+"clone_all_vvp_repos" "compile_all_vvp_repos" "get_vvp_images" "install_vvp"
+)
+
+# test_clone_all_vvp_repos() - Verify the cloning of VNFSDK source repo.
+function test_clone_all_vvp_repos {
+ clone_repos "vvp"
+
+ asserts_file_exist ${src_folders[vvp]}/cms/pom.xml
+ asserts_file_exist ${src_folders[vvp]}/devkit/LICENSE.TXT
+ asserts_file_exist ${src_folders[vvp]}/engagementmgr/pom.xml
+ asserts_file_exist ${src_folders[vvp]}/gitlab/pom.xml
+ asserts_file_exist ${src_folders[vvp]}/image-scanner/pom.xml
+ asserts_file_exist ${src_folders[vvp]}/jenkins/pom.xml
+ asserts_file_exist ${src_folders[vvp]}/portal/pom.xml
+ asserts_file_exist ${src_folders[vvp]}/postgresql/pom.xml
+ asserts_file_exist ${src_folders[vvp]}/test-engine/pom.xml
+ asserts_file_exist ${src_folders[vvp]}/validation-scripts/LICENSE.txt
+}
+
+# test_compile_all_vvp_repos () - Verify if VNFSDK source repo compiles correctly.
+function test_compile_all_vvp_repos {
+ clone_repos "vvp"
+ compile_repos "vvp"
+
+ # asserts_file_exist ${src_folders[vvp]}/
+}
+
+# test_get_vvp_images() - Verify that the VNFSDK images are created or retrieved
+function test_get_vvp_images {
+ clone_repos "vvp"
+ get_vvp_images
+
+ #asserts_image refrepo:latest
+}
+
+# test_install_vvp() - Verify that VNFSDK docker images are running.
+function test_install_vvp {
+ clone_repos "vvp"
+ get_vvp_images
+ install_vvp
+
+ #asserts_image_running refrepo:1.0-STAGING-latest
+}
+
+if [ "$1" != '*' ]; then
+ unset covered_functions
+ covered_functions=$1
+fi
+main "${covered_functions[@]}"
diff --git a/tools/Run.ps1 b/tools/Run.ps1
new file mode 100644
index 0000000..de57a15
--- /dev/null
+++ b/tools/Run.ps1
@@ -0,0 +1,120 @@
+<#
+.SYNOPSIS
+This script helps to configure its environment variables based on the component selected.
+
+.EXAMPLE
+.\tools\Run.ps1 testing -s functions -c install_maven -y
+
+.EXAMPLE
+.\tools\Run.ps1 all_in_one
+
+.EXAMPLE
+.\tools\Run.ps1 aai
+
+.PARAMETER s
+Test suite to use in testing mode.
+
+.PARAMETER c
+Test case to use in testing mode.
+
+.PARAMETER y
+Skips warning prompt.
+
+.PARAMETER g
+Skips creation or retrieve image process.
+
+.PARAMETER i
+Skips installation service process.
+
+.LINK
+https://wiki.onap.org/display/DW/ONAP+on+Vagrant
+#>
+
+Param(
+ [ValidateSet("all_in_one","dns", "mr", "sdc", "aai", "mso", "robot", "vid", "sdnc", "portal", "dcae", "policy", "appc", "vfc", "multicloud", "ccsdk", "vnfsdk", "vvp", "openstack", "msb", "oom", "testing")]
+
+ [Parameter(Mandatory=$True,Position=0)]
+ [ValidateNotNullOrEmpty()]
+ [String]
+ $Command
+,
+ [Parameter(Mandatory=$False,HelpMessage="Test suite to use in testing mode.")]
+ [Alias("suite")]
+ [String]
+ $s = "*"
+,
+ [Parameter(Mandatory=$False,HelpMessage="Test case to sue in testing mode.")]
+ [Alias("case")]
+ [String]
+ $c = "*"
+,
+ [Parameter(Mandatory=$False,HelpMessage="Skips warning prompt.")]
+ [AllowNull()]
+ [Switch]
+ $y = $True
+,
+ [Parameter(Mandatory=$False,HelpMessage="Skips creation or retrieve image process.")]
+ [AllowNull()]
+ [Switch]
+ $skip_get_images = $True
+,
+ [Parameter(Mandatory=$False,HelpMessage="Skips warning prompt.")]
+ [AllowNull()]
+ [Switch]
+ $skip_install = $True
+)
+
+if ( -Not "testing".Equals($Command) )
+ {
+ if($PsBoundParameters.ContainsKey('s'))
+ {
+ Write-Host "Test suite should only be specified in testing mode."
+ Write-Host ".\tools\Run.ps1 -?"
+ exit 1
+ }
+ if($PsBoundParameters.ContainsKey('c'))
+ {
+ Write-Host "Test case should only be specified in testing mode."
+ Write-Host ".\tools\Run.ps1 -?"
+ exit 1
+ }
+ }
+
+$env:SKIP_GET_IMAGES=$skip_get_images
+$env:SKIP_INSTALL=$skip_install
+
+switch ($Command)
+ {
+ "all_in_one" { $env:DEPLOY_MODE="all-in-one" }
+ { @("dns", "mr", "sdc", "aai", "mso", "robot", "vid", "sdnc", "portal", "dcae", "policy", "appc", "vfc", "multicloud", "ccsdk", "vnfsdk", "vvp", "openstack", "msb", "oom") -contains $_ } { $env:DEPLOY_MODE="individual" }
+ "testing"
+ {
+ $env:DEPLOY_MODE="testing"
+ If(-Not $y)
+ {
+ Write-Host "Warning: This test script will delete the contents of ../opt/ and ~/.m2."
+ $yn = Read-Host "Would you like to continue? [y]es/[n]o: "
+ switch ($yn)
+ {
+ { @("n", "N") -contains $_ }
+ {
+ Write-Host "Exiting."
+ exit 0
+ }
+ }
+ }
+ $env:TEST_SUITE=$s
+ $env:TEST_CASE=$c
+
+ &cmd.exe /c rd /s /q .\opt\
+ &cmd.exe /c rd /s /q $HOME\.m2\
+ }
+ default
+ {
+ Write-Output $"Usage: $0 {all_in_one|dns|mr|sdc|aai|mso|robot|vid|sdnc|portal|dcae|policy|appc|vfc|multicloud|ccsdk|vnfsdk|vvp|testing}"
+ exit 1
+ }
+ }
+
+vagrant destroy -f $Command
+vagrant up $Command
diff --git a/tools/get_repos.sh b/tools/get_repos.sh
new file mode 100755
index 0000000..ae5d4e0
--- /dev/null
+++ b/tools/get_repos.sh
@@ -0,0 +1,38 @@
+#!/bin/bash
+
+repos=(
+"aai/aai-data" "aai/aai-config" "aai/aai-service" "aai/data-router" "aai/logging-service" "aai/model-loader" "aai/resources" "aai/rest-client" "aai/router-core" "aai/search-data-service" "aai/sparky-be" "aai/sparky-fe" "aai/test-config" "aai/traversal"
+"appc" "appc/deployment"
+"ci-management"
+"dcae" "dcae/apod" "dcae/apod/analytics" "dcae/apod/buildtools" "dcae/apod/cdap" "dcae/collectors" "dcae/collectors/ves" "dcae/controller" "dcae/controller/analytics" "dcae/dcae-inventory" "dcae/demo" "dcae/demo/startup" "dcae/demo/startup/aaf" "dcae/demo/startup/controller" "dcae/demo/startup/message-router" "dcae/dmaapbc" "dcae/operation" "dcae/operation/utils" "dcae/orch-dispatcher" "dcae/pgaas" "dcae/utils" "dcae/utils/buildtools"
+"demo"
+"ecompsdkos"
+"mso" "mso/chef-repo" "mso/docker-config" "mso/libs" "mso/mso-config"
+"ncomp" "ncomp/cdap" "ncomp/core" "ncomp/docker" "ncomp/maven" "ncomp/openstack" "ncomp/sirius" "ncomp/sirius/manager" "ncomp/utils"
+"policy/common" "policy/docker" "policy/drools-applications" "policy/drools-pdp" "policy/engine"
+"portal"
+"sdc" "sdc/sdc-distribution-client" "sdc/sdc-titan-cassandra" "sdc/sdc_common"
+"sdnc/adaptors" "sdnc/core" "sdnc/northbound" "sdnc/oam" "sdnc/plugins"
+"testsuite" "testsuite/heatbridge" "testsuite/properties" "testsuite/python-testing-utils"
+"ui" "ui/dmaapbc"
+"vid" "vid/asdcclient")
+
+function git_clone_or_pull {
+ local repo=$1
+ local folder="./opt/$1"
+ local mvn_build=$2
+ if [ ! -d $folder ]; then
+ git clone https://git.onap.org/$repo $folder
+ fi
+ pushd $folder > /dev/null
+ git pull -q
+ if [ -f .gitreview ]; then
+ git review -s
+ fi
+ popd > /dev/null
+}
+
+for repo in ${repos[@]}; do
+ echo "Working on $repo repository..."
+ git_clone_or_pull $repo
+done
diff --git a/tools/run.sh b/tools/run.sh
new file mode 100755
index 0000000..27e0aa3
--- /dev/null
+++ b/tools/run.sh
@@ -0,0 +1,100 @@
+#!/bin/bash
+
+function usage {
+ cat <<EOF
+Usage: run.sh Command [-y] [-?]
+Optional arguments:
+ -y
+ Skips warning prompt.
+ -g
+ Skips creation or retrieve image process.
+ -i
+ Skips installation service process.
+ -s <suite>
+ Test suite to use in testing mode.
+ -c <case>
+ Test case to use in testing mode.
+Commands:
+ all_in_one Deploy in all-in-one mode.
+ dns|mr|sdc|aai|mso|robot|vid|sdnc|portal|dcae|policy|appc|vfc|vnfsdk|multicloud|ccsdk|vvp|openstack|msb|oom Deploy chosen service.
+ testing Deploy in testing mode.
+EOF
+}
+
+run=false
+test_suite="*"
+test_case="*"
+
+COMMAND=$1
+
+while getopts "ygis:c:" OPTION "${@:2}"; do
+ case "$OPTION" in
+ y)
+ run=true
+ ;;
+ g)
+ export SKIP_GET_IMAGES="True"
+ ;;
+ i)
+ export SKIP_INSTALL="True"
+ ;;
+ s)
+ if [ "$COMMAND" != "testing" ] ; then
+ echo "Test suite should only be specified in testing mode."
+ echo "./tools/run.sh -? for usage."
+ exit 1
+ fi
+ test_suite=$OPTARG
+ ;;
+ c)
+ if [ "$COMMAND" != "testing" ] ; then
+ echo "Test case should only be specified in testing mode."
+ echo "./tools/run.sh -? for usage."
+ exit 1
+ fi
+ test_case=$OPTARG
+ ;;
+ \?)
+ usage
+ exit 1
+ ;;
+ esac
+done
+
+case $COMMAND in
+ "all_in_one" )
+ export DEPLOY_MODE='all-in-one'
+ ;;
+ "dns" | "mr" | "sdc" | "aai" | "mso" | "robot" | "vid" | "sdnc" | "portal" | "dcae" | "policy" | "appc" | "vfc" | "vnfsdk"| "multicloud" | "ccsdk" | "vvp" | "openstack" | "msb" | "oom" )
+ export DEPLOY_MODE='individual'
+ ;;
+ "testing" )
+ export DEPLOY_MODE='testing'
+ if [ "$run" == false ] ; then
+ while true ; do
+ echo "Warning: This test script will delete the contents of ../opt/ and ~/.m2."
+ read -p "Would you like to continue? [y]es/[n]o: " yn
+ case $yn in
+ [Yy]*)
+ break
+ ;;
+ [Nn]*)
+ echo "Exiting."
+ exit 0
+ ;;
+ esac
+ done
+ fi
+
+ export TEST_SUITE=$test_suite
+ export TEST_CASE=$test_case
+ rm -rf ./opt/
+ rm -rf ~/.m2/
+ ;;
+ * )
+ usage
+ exit 1
+esac
+
+vagrant destroy -f $COMMAND
+vagrant up $COMMAND
diff --git a/tools/setup.sh b/tools/setup.sh
new file mode 100755
index 0000000..fe3a3fc
--- /dev/null
+++ b/tools/setup.sh
@@ -0,0 +1,133 @@
+#!/bin/bash
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2017-2018
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+vagrant_version=2.0.2
+
+function usage {
+ cat <<EOF
+usage: setup.sh -p <PROVIDER>
+Argument:
+ -p Vagrant provider
+EOF
+}
+
+while getopts ":p:" OPTION; do
+ case $OPTION in
+ p)
+ provider=$OPTARG
+ ;;
+ \?)
+ usage
+ exit 1
+ ;;
+ esac
+done
+
+case $provider in
+ "virtualbox" | "libvirt" )
+ export VAGRANT_DEFAULT_PROVIDER=${provider}
+ ;;
+ * )
+ usage
+ exit 1
+esac
+source /etc/os-release || source /usr/lib/os-release
+
+packages=()
+case ${ID,,} in
+ *suse)
+ INSTALLER_CMD="sudo -H -E zypper -q install -y --no-recommends"
+
+ # Vagrant installation
+ vagrant_pgp="pgp_keys.asc"
+ wget -q https://keybase.io/hashicorp/$vagrant_pgp
+ wget -q https://releases.hashicorp.com/vagrant/$vagrant_version/vagrant_${vagrant_version}_x86_64.rpm
+ gpg --quiet --with-fingerprint $vagrant_pgp
+ sudo rpm --import $vagrant_pgp
+ sudo rpm --checksig vagrant_${vagrant_version}_x86_64.rpm
+ sudo rpm --install vagrant_${vagrant_version}_x86_64.rpm
+ rm vagrant_${vagrant_version}_x86_64.rpm
+ rm $vagrant_pgp
+
+ case $VAGRANT_DEFAULT_PROVIDER in
+ virtualbox)
+ wget -q http://download.virtualbox.org/virtualbox/rpm/opensuse/$VERSION/virtualbox.repo -P /etc/zypp/repos.d/
+ $INSTALLER_CMD --enablerepo=epel dkms
+ wget -q https://www.virtualbox.org/download/oracle_vbox.asc -O- | rpm --import -
+ packages+=(VirtualBox-5.1)
+ ;;
+ libvirt)
+ # vagrant-libvirt dependencies
+ packages+=(qemu libvirt libvirt-devel ruby-devel gcc qemu-kvm zlib-devel libxml2-devel libxslt-devel make)
+ # NFS
+ packages+=(nfs-kernel-server)
+ ;;
+ esac
+ sudo zypper -n ref
+ ;;
+
+ ubuntu|debian)
+ INSTALLER_CMD="sudo -H -E apt-get -y -q=3 install"
+
+ # Vagrant installation
+ wget -q https://releases.hashicorp.com/vagrant/$vagrant_version/vagrant_${vagrant_version}_x86_64.deb
+ sudo dpkg -i vagrant_${vagrant_version}_x86_64.deb
+ rm vagrant_${vagrant_version}_x86_64.deb
+
+ case $VAGRANT_DEFAULT_PROVIDER in
+ virtualbox)
+ echo "deb http://download.virtualbox.org/virtualbox/debian trusty contrib" >> /etc/apt/sources.list
+ wget -q https://www.virtualbox.org/download/oracle_vbox_2016.asc -O- | sudo apt-key add -
+ wget -q https://www.virtualbox.org/download/oracle_vbox.asc -O- | sudo apt-key add -
+ packages+=(virtualbox-5.1 dkms)
+ ;;
+ libvirt)
+ # vagrant-libvirt dependencies
+ packages+=(qemu libvirt-bin ebtables dnsmasq libxslt-dev libxml2-dev libvirt-dev zlib1g-dev ruby-dev)
+ # NFS
+ packages+=(nfs-kernel-server)
+ ;;
+ esac
+ sudo apt-get update
+ ;;
+
+ rhel|centos|fedora)
+ PKG_MANAGER=$(which dnf || which yum)
+ sudo $PKG_MANAGER updateinfo
+ INSTALLER_CMD="sudo -H -E ${PKG_MANAGER} -q -y install"
+
+ # Vagrant installation
+ wget -q https://releases.hashicorp.com/vagrant/$vagrant_version/vagrant_${vagrant_version}_x86_64.rpm
+ $INSTALLER_CMD vagrant_${vagrant_version}_x86_64.rpm
+ rm vagrant_${vagrant_version}_x86_64.rpm
+
+ case $VAGRANT_DEFAULT_PROVIDER in
+ virtualbox)
+ wget -q http://download.virtualbox.org/virtualbox/rpm/rhel/virtualbox.repo -P /etc/yum.repos.d
+ $INSTALLER_CMD --enablerepo=epel dkms
+ wget -q https://www.virtualbox.org/download/oracle_vbox.asc -O- | rpm --import -
+ packages+=(VirtualBox-5.1)
+ ;;
+ libvirt)
+ # vagrant-libvirt dependencies
+ packages+=(qemu libvirt libvirt-devel ruby-devel gcc qemu-kvm)
+ # NFS
+ packages+=(nfs-utils nfs-utils-lib)
+ ;;
+ esac
+ ;;
+
+esac
+
+${INSTALLER_CMD} ${packages[@]}
+if [ $VAGRANT_DEFAULT_PROVIDER == libvirt ]; then
+ vagrant plugin install vagrant-libvirt
+ sudo usermod -a -G libvirt $USER
+fi
diff --git a/tools/setup_openstack.sh b/tools/setup_openstack.sh
new file mode 100755
index 0000000..765ad94
--- /dev/null
+++ b/tools/setup_openstack.sh
@@ -0,0 +1,23 @@
+#!/bin/bash
+
+ubuntu_name=${OS_IMAGE:-"trusty-server-cloudimg-amd64-disk1"}
+export OS_IMAGE=$ubuntu_name
+ubuntu_glance=`openstack image list -c Name -f value | grep "$ubuntu_name"`
+ubuntu_file=/tmp/ubuntu.img
+
+sec_group_name=${OS_SEC_GROUP:-"onap-ssh-secgroup"}
+export OS_SEC_GROUP=$sec_group_name
+sec_group_list=`openstack security group list -c Name -f value | grep "$sec_group_name"`
+
+if [[ -z $ubuntu_glance ]]; then
+ if [ ! -f $ubuntu_file ]; then
+ curl http://cloud-images.ubuntu.com/trusty/current/trusty-server-cloudimg-amd64-disk1.img -o "$ubuntu_file"
+ fi
+
+ openstack image create --disk-format raw --container-format bare --public --file $ubuntu_file "$ubuntu_name"
+fi
+
+if [[ -z $sec_group_list ]]; then
+ openstack security group create "$sec_group_name"
+ openstack security group rule create --protocol tcp --remote-ip 0.0.0.0/0 --dst-port 22:22 "$sec_group_name"
+fi
diff --git a/tools/update_project_list.sh b/tools/update_project_list.sh
new file mode 100755
index 0000000..2f0ebb9
--- /dev/null
+++ b/tools/update_project_list.sh
@@ -0,0 +1,5 @@
+#!/bin/bash
+
+ssh $1@gerrit.onap.org -p 29418 gerrit ls-projects > projects.tmp
+tail -n +2 projects.tmp > tests/projects.txt
+rm projects.tmp
diff --git a/tox.ini b/tox.ini
new file mode 100644
index 0000000..6ab309f
--- /dev/null
+++ b/tox.ini
@@ -0,0 +1,30 @@
+[tox]
+minversion = 1.6
+skipsdist = True
+envlist = bashate
+
+[testenv]
+passenv = http_proxy HTTP_PROXY https_proxy HTTPS_PROXY no_proxy NO_PROXY
+usedevelop = False
+install_command = pip install {opts} {packages}
+
+[testenv:bashate]
+deps =
+ {env:BASHATE_INSTALL_PATH:bashate}
+whitelist_externals = bash
+commands = bash -c "find {toxinidir}/{tests,lib,tools} \
+ -not \( -type d -path *files* -prune \) \
+ -not \( -type f -name .*.swp* -prune \) \
+ -not \( -type f -name *.ps1 -prune \) \
+ -not \( -type f -name installed-software* -prune \) \
+ -not \( -type f -name *projects.txt -prune \) \
+ -type f \
+# E005 file does not begin with #! or have a .sh prefix
+# E006 check for lines longer than 79 columns
+# E042 local declaration hides errors
+# E043 Arithmetic compound has inconsistent return semantics
+ -print0 | xargs -0 bashate -v -iE006 -eE005,E042,E043"
+
+[testenv:docs]
+deps = sphinx
+commands = sphinx-build -W -b html doc/source doc/build/html
diff --git a/vagrant_utils/postinstall.sh b/vagrant_utils/postinstall.sh
new file mode 100755
index 0000000..3b5017a
--- /dev/null
+++ b/vagrant_utils/postinstall.sh
@@ -0,0 +1,26 @@
+#!/bin/bash
+
+if [[ "$debug" == "True" ]]; then
+ set -o xtrace
+fi
+
+if [[ "$1" == "openstack" ]]; then
+ source /var/onap/openstack
+ deploy_openstack
+ exit
+fi
+
+source /var/onap/functions
+
+update_repos
+create_configuration_files
+configure_bind
+
+for serv in $@; do
+ source /var/onap/${serv}
+ configure_service ${serv}_serv.sh
+ init_${serv}
+ echo "source /var/onap/${serv}" >> ~/.bashrc
+done
+
+echo "source /var/onap/functions" >> ~/.bashrc
diff --git a/vagrant_utils/unit_testing.sh b/vagrant_utils/unit_testing.sh
new file mode 100755
index 0000000..3a97ad9
--- /dev/null
+++ b/vagrant_utils/unit_testing.sh
@@ -0,0 +1,14 @@
+#!/bin/bash
+
+if [[ "$debug" == "True" ]]; then
+ set -o xtrace
+fi
+
+set -o errexit
+
+TEST_SUITE=${1:-*}
+TEST_CASE=${2:-*}
+
+for file in $( ls /var/onap_tests/test_$TEST_SUITE); do
+ bash ${file} $TEST_CASE
+done