summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rwxr-xr-xansible/group_vars/infrastructure.yml3
-rw-r--r--ansible/roles/application/.gitignore1
-rw-r--r--ansible/roles/application/defaults/main.yml2
l---------ansible/roles/application/molecule/custom_role/Dockerfile.j21
-rw-r--r--ansible/roles/application/molecule/custom_role/molecule.yml55
l---------ansible/roles/application/molecule/custom_role/tests1
-rw-r--r--ansible/roles/application/molecule/default/cleanup.yml6
-rw-r--r--ansible/roles/application/molecule/default/molecule.yml2
-rw-r--r--ansible/roles/application/tasks/install.yml2
-rw-r--r--ansible/roles/application/tasks/transfer-helm-charts.yml2
-rw-r--r--ansible/roles/chrony/molecule/default/playbook.yml4
-rw-r--r--ansible/roles/chrony/molecule/default/tests/test_default.py42
-rw-r--r--ansible/roles/chrony/molecule/external_time_source/molecule.yml49
-rw-r--r--ansible/roles/chrony/molecule/external_time_source/playbook.yml10
-rw-r--r--ansible/roles/chrony/molecule/external_time_source_ubuntu/molecule.yml50
-rw-r--r--ansible/roles/chrony/molecule/ubuntu/molecule.yml3
-rw-r--r--ansible/roles/dns/handlers/main.yml1
-rw-r--r--ansible/roles/helm/.yamllint11
-rw-r--r--ansible/roles/helm/molecule/default/molecule.yml32
-rw-r--r--ansible/roles/helm/molecule/default/playbook.yml5
-rw-r--r--ansible/roles/helm/molecule/default/prepare.yml5
-rw-r--r--ansible/roles/helm/molecule/default/tests/test_default.py11
-rw-r--r--ansible/roles/kubectl/.yamllint11
-rw-r--r--ansible/roles/kubectl/molecule/default/molecule.yml31
-rw-r--r--ansible/roles/kubectl/molecule/default/playbook.yml5
-rw-r--r--ansible/roles/kubectl/molecule/default/prepare.yml5
-rw-r--r--ansible/roles/kubectl/molecule/default/tests/test_default.py11
-rw-r--r--ansible/roles/nexus/.yamllint11
-rw-r--r--ansible/roles/nexus/defaults/main.yml2
-rw-r--r--ansible/roles/nexus/molecule/default/molecule.yml30
-rw-r--r--ansible/roles/nexus/molecule/default/playbook.yml11
-rw-r--r--ansible/roles/nexus/molecule/default/prepare.yml8
-rw-r--r--ansible/roles/nexus/tasks/insert-images.yml2
-rw-r--r--ansible/roles/nexus/tasks/install.yml1
-rw-r--r--ansible/roles/nexus/tasks/runtime-populate.yml2
-rw-r--r--ansible/roles/nexus/vars/main.yml2
-rw-r--r--ansible/roles/nfs/molecule/default/molecule.yml2
-rw-r--r--ansible/roles/nginx/.yamllint11
-rw-r--r--ansible/roles/nginx/defaults/main.yml2
-rw-r--r--ansible/roles/nginx/molecule/default/cleanup.yml6
-rw-r--r--ansible/roles/nginx/molecule/default/molecule.yml30
-rw-r--r--ansible/roles/nginx/molecule/default/playbook.yml5
-rw-r--r--ansible/roles/nginx/molecule/default/prepare.yml8
-rw-r--r--ansible/roles/resource-data/tasks/unarchive-resource.yml11
-rw-r--r--ansible/roles/vncserver/.yamllint11
-rw-r--r--ansible/roles/vncserver/molecule/default/cleanup.yml6
-rw-r--r--ansible/roles/vncserver/molecule/default/molecule.yml32
-rw-r--r--ansible/roles/vncserver/molecule/default/playbook.yml5
-rw-r--r--ansible/roles/vncserver/molecule/default/prepare.yml8
-rw-r--r--ansible/roles/vncserver/molecule/default/tests/test_default.py10
-rw-r--r--ansible/test/play-infrastructure/molecule/default/cleanup.yml7
-rw-r--r--ansible/test/play-infrastructure/molecule/default/vars.yml4
-rw-r--r--ansible/test/play-resources/molecule/default/cleanup.yml23
-rw-r--r--ansible/test/play-resources/molecule/default/playbook.yml10
-rw-r--r--ansible/test/play-resources/molecule/default/prepare.yml3
-rw-r--r--ansible/test/play-resources/molecule/default/vars.yml (renamed from ansible/test/play-resources/molecule/default/group_vars/all.yml)2
-rw-r--r--ansible/test/play-resources/molecule/nfs/molecule.yml7
-rw-r--r--ansible/test/play-resources/molecule/nfs/playbook.yml12
-rw-r--r--ansible/test/play-resources/molecule/nfs/prepare.yml8
-rw-r--r--ansible/test/play-resources/molecule/nfs/vars.yml7
-rw-r--r--ansible/test/roles/cleanup-application/tasks/main.yml9
-rw-r--r--ansible/test/roles/cleanup-containers/tasks/main.yml6
-rw-r--r--ansible/test/roles/cleanup-directories/tasks/main.yml7
-rw-r--r--ansible/test/roles/cleanup-nginx/tasks/main.yml6
-rw-r--r--ansible/test/roles/cleanup-rancher/tasks/main.yml18
-rw-r--r--ansible/test/roles/cleanup-vncserver/tasks/main.yml6
-rw-r--r--ansible/test/roles/prepare-application/defaults/main.yml4
-rw-r--r--ansible/test/roles/prepare-application/tasks/main.yml25
-rw-r--r--ansible/test/roles/prepare-docker/tasks/docker-packages.yml10
-rw-r--r--ansible/test/roles/prepare-docker/tasks/docker-socket-override.yml13
-rw-r--r--ansible/test/roles/prepare-docker/tasks/enable-repos.yml13
-rw-r--r--ansible/test/roles/prepare-docker/tasks/main.yml21
-rw-r--r--ansible/test/roles/prepare-docker/tasks/prepare-docker-repos.yml20
-rw-r--r--ansible/test/roles/prepare-docker/vars/main.yml7
-rw-r--r--ansible/test/roles/prepare-helm/defaults/main.yml3
-rw-r--r--ansible/test/roles/prepare-helm/tasks/main.yml18
-rw-r--r--ansible/test/roles/prepare-kubectl/defaults/main.yml7
-rw-r--r--ansible/test/roles/prepare-kubectl/tasks/main.yml14
-rw-r--r--ansible/test/roles/prepare-nexus/tasks/main.yml8
-rwxr-xr-xbuild/build_nexus_blob.sh26
-rwxr-xr-xbuild/creating_data/create-rhel-repo.sh45
-rwxr-xr-xbuild/creating_data/create-ubuntu-repo.sh33
-rwxr-xr-xbuild/creating_data/docker-images-collector.sh44
-rwxr-xr-xbuild/creating_data/download-bin-tools.sh60
-rwxr-xr-xbuild/creating_data/download-docker-images.sh39
-rwxr-xr-xbuild/creating_data/download-files.sh50
-rwxr-xr-xbuild/creating_data/download-git-repos.sh56
-rwxr-xr-xbuild/creating_data/download-http-files.sh51
-rwxr-xr-xbuild/creating_data/download-npm-pkgs.sh42
-rwxr-xr-xbuild/creating_data/download-pip.sh48
-rwxr-xr-xbuild/creating_data/save-docker-images.sh59
-rw-r--r--build/data_lists/infra_bin_utils.list3
-rw-r--r--build/data_lists/onap_docker_images.list96
-rw-r--r--build/data_lists/onap_pip_packages.list3
-rw-r--r--build/data_lists/onap_rpm.list21
-rw-r--r--build/download/base.py3
-rwxr-xr-xbuild/download/docker_images.py22
-rwxr-xr-xbuild/download/download.py173
-rwxr-xr-xbuild/download/git_repos.py22
-rwxr-xr-xbuild/download/http_files.py12
-rwxr-xr-xbuild/download/npm_packages.py8
-rwxr-xr-xbuild/download/pypi_packages.py88
-rwxr-xr-xbuild/download/rpm_packages.py15
-rwxr-xr-xbuild/download_offline_data_by_lists.sh96
-rwxr-xr-xbuild/fetch_and_patch_charts.sh2
-rw-r--r--docs/BuildGuide.rst295
-rwxr-xr-xhelm_deployment_status.py8
-rw-r--r--patches/onap-patch-role/tasks/main.yml14
-rw-r--r--patches/onap.patch27
109 files changed, 1272 insertions, 1064 deletions
diff --git a/ansible/group_vars/infrastructure.yml b/ansible/group_vars/infrastructure.yml
index 66a00b5f..08a25919 100755
--- a/ansible/group_vars/infrastructure.yml
+++ b/ansible/group_vars/infrastructure.yml
@@ -3,8 +3,6 @@ vnc_passwd: samsung
simulated_hosts:
git:
- gerrit.onap.org
- - git.rancher.io
- - github.com
http:
- git.onap.org
- nexus.onap.org
@@ -16,6 +14,7 @@ simulated_hosts:
nexus:
- docker.elastic.co
- docker.io
+ - index.docker.io
- gcr.io
- k8s.gcr.io
- nexus.{{ ansible_nodename }}
diff --git a/ansible/roles/application/.gitignore b/ansible/roles/application/.gitignore
deleted file mode 100644
index 155cbb20..00000000
--- a/ansible/roles/application/.gitignore
+++ /dev/null
@@ -1 +0,0 @@
-application/
diff --git a/ansible/roles/application/defaults/main.yml b/ansible/roles/application/defaults/main.yml
index 84fffeca..2ae668ac 100644
--- a/ansible/roles/application/defaults/main.yml
+++ b/ansible/roles/application/defaults/main.yml
@@ -11,3 +11,5 @@ helm_extra_install_options:
app_skip_helm_override: false
app_helm_override_role: application-override
app_helm_override_file: "{{ app_data_path }}/override.yaml"
+helm_overide_files:
+ - "{{ app_helm_override_file }}"
diff --git a/ansible/roles/application/molecule/custom_role/Dockerfile.j2 b/ansible/roles/application/molecule/custom_role/Dockerfile.j2
new file mode 120000
index 00000000..867ec5c3
--- /dev/null
+++ b/ansible/roles/application/molecule/custom_role/Dockerfile.j2
@@ -0,0 +1 @@
+../default/Dockerfile.j2 \ No newline at end of file
diff --git a/ansible/roles/application/molecule/custom_role/molecule.yml b/ansible/roles/application/molecule/custom_role/molecule.yml
new file mode 100644
index 00000000..f9b29d92
--- /dev/null
+++ b/ansible/roles/application/molecule/custom_role/molecule.yml
@@ -0,0 +1,55 @@
+---
+dependency:
+ name: galaxy
+driver:
+ name: docker
+lint:
+ name: yamllint
+platforms:
+ - name: instance
+ image: centos:7
+provisioner:
+ name: ansible
+ env:
+ ANSIBLE_ROLES_PATH: ../../../../test/roles
+ inventory:
+ group_vars:
+ all:
+ app_name: moleculetestapp
+ app_data_path: "/opt/{{ app_name }}"
+ app_helm_release_name: "{{ app_name }}"
+ app_kubernetes_namespace: "{{ app_name }}"
+ app_helm_charts_install_directory: application/helm_charts
+ app_helm_plugins_directory: "{{ app_helm_charts_install_directory}}/helm/plugins/"
+ app_helm_charts_infra_directory: "{{ app_data_path }}/helm_charts"
+ helm_bin_dir: /usr/local/bin
+ app_helm_build_targets:
+ - all
+ - onap
+ app_helm_chart_name: "{{ app_name }}"
+ application_pre_install_role: application/test-patch-role
+ application_post_install_role: application/test-patch-role
+ lint:
+ name: ansible-lint
+ playbooks:
+ prepare: ../default/prepare.yml
+ converge: ../default/playbook.yml
+ cleanup: ../default/cleanup.yml
+scenario:
+ name: custom_role
+ test_sequence:
+ - lint
+ - cleanup
+ - destroy
+ - dependency
+ - syntax
+ - create
+ - prepare
+ - converge
+ - verify
+ - cleanup
+ - destroy
+verifier:
+ name: testinfra
+ lint:
+ name: flake8
diff --git a/ansible/roles/application/molecule/custom_role/tests b/ansible/roles/application/molecule/custom_role/tests
new file mode 120000
index 00000000..b8ac4407
--- /dev/null
+++ b/ansible/roles/application/molecule/custom_role/tests
@@ -0,0 +1 @@
+../default/tests/ \ No newline at end of file
diff --git a/ansible/roles/application/molecule/default/cleanup.yml b/ansible/roles/application/molecule/default/cleanup.yml
new file mode 100644
index 00000000..996acaf1
--- /dev/null
+++ b/ansible/roles/application/molecule/default/cleanup.yml
@@ -0,0 +1,6 @@
+---
+- name: Cleanup infra
+ hosts: all
+ ignore_unreachable: true
+ roles:
+ - cleanup-application
diff --git a/ansible/roles/application/molecule/default/molecule.yml b/ansible/roles/application/molecule/default/molecule.yml
index 8f19d7ff..30c752e2 100644
--- a/ansible/roles/application/molecule/default/molecule.yml
+++ b/ansible/roles/application/molecule/default/molecule.yml
@@ -27,8 +27,6 @@ provisioner:
- all
- onap
app_helm_chart_name: "{{ app_name }}"
- application_pre_install_role:
- application_post_install_role:
lint:
name: ansible-lint
scenario:
diff --git a/ansible/roles/application/tasks/install.yml b/ansible/roles/application/tasks/install.yml
index bdf6e511..003631d7 100644
--- a/ansible/roles/application/tasks/install.yml
+++ b/ansible/roles/application/tasks/install.yml
@@ -71,7 +71,7 @@
{{ app_helm_release_name }}
{{ helm_repository_name }}/{{ app_helm_chart_name }}
--namespace {{ app_kubernetes_namespace }}
- {{ '' if app_skip_helm_override else '-f ' + app_helm_override_file }}
+ {% if not app_skip_helm_override %} {% for arg in helm_overide_files %} {{ '-f ' + arg }} {% endfor %} {% endif %}
{% for arg in helm_extra_install_options %} {{ arg.opt }} {% endfor %}
changed_when: true # when executed its a changed type of action
register: helm_install
diff --git a/ansible/roles/application/tasks/transfer-helm-charts.yml b/ansible/roles/application/tasks/transfer-helm-charts.yml
index 0cd7c02f..5e4240b6 100644
--- a/ansible/roles/application/tasks/transfer-helm-charts.yml
+++ b/ansible/roles/application/tasks/transfer-helm-charts.yml
@@ -40,5 +40,5 @@
dest: "{{ helm_home_dir.stdout }}/plugins"
directory_mode: true
mode: 0755
- with_items: "{{ list_of_plugins.files }}"
+ loop: "{{ list_of_plugins.files }}"
when: app_helm_plugins_directory is defined and app_helm_plugins_directory is not none
diff --git a/ansible/roles/chrony/molecule/default/playbook.yml b/ansible/roles/chrony/molecule/default/playbook.yml
index 7dccfc35..717d0f39 100644
--- a/ansible/roles/chrony/molecule/default/playbook.yml
+++ b/ansible/roles/chrony/molecule/default/playbook.yml
@@ -1,6 +1,10 @@
---
- name: Converge infrastructure hosts
hosts: infrastructure
+ tasks:
+ - name: Set cluster_ip fact
+ set_fact:
+ cluster_ip: "{{ ansible_default_ipv4.address }}"
roles:
- chrony
diff --git a/ansible/roles/chrony/molecule/default/tests/test_default.py b/ansible/roles/chrony/molecule/default/tests/test_default.py
new file mode 100644
index 00000000..08f85d37
--- /dev/null
+++ b/ansible/roles/chrony/molecule/default/tests/test_default.py
@@ -0,0 +1,42 @@
+import os
+import pytest
+
+import testinfra.utils.ansible_runner
+
+testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
+ os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all')
+
+
+@pytest.fixture
+def chrony_conf_file(host, os_family):
+ conf = host.ansible('include_vars', 'file=../../defaults/main.yml')[
+ 'ansible_facts']['chrony']['conf'][os_family]['config_file']
+ return conf
+
+
+@pytest.fixture
+def os_family(host):
+ osf = host.ansible("setup")['ansible_facts']['ansible_os_family']
+ return osf
+
+
+def test_chrony_conf_file_exists(host, chrony_conf_file):
+ assert host.file(chrony_conf_file).exists, 'Config file not found!'
+
+
+def test_chrony_service_running_enabled(host):
+ assert host.service('chronyd').is_running, \
+ 'Chronyd service is not running!'
+ assert host.service('chronyd').is_enabled, \
+ 'Chronyd service is not enabled!'
+
+
+def test_ntp_synchronized(host, chrony_conf_file):
+ assert host.file(chrony_conf_file).exists, 'Config file not found!'
+ if host.file(chrony_conf_file).contains("server "):
+ out = host.check_output('systemctl status chronyd')
+ assert 'Selected source' in out, \
+ 'Chronyd did not synchronize with NTP server.'
+ else:
+ # Host acts as a time source
+ pass
diff --git a/ansible/roles/chrony/molecule/external_time_source/molecule.yml b/ansible/roles/chrony/molecule/external_time_source/molecule.yml
new file mode 100644
index 00000000..e38f4295
--- /dev/null
+++ b/ansible/roles/chrony/molecule/external_time_source/molecule.yml
@@ -0,0 +1,49 @@
+---
+dependency:
+ name: galaxy
+driver:
+ name: docker
+lint:
+ name: yamllint
+platforms:
+ - name: infra_host
+ image: molecule-${PREBUILD_PLATFORM_DISTRO:-centos}:${PREBUILD_DISTRO_VERSION:-centos7.6}
+ pre_build_image: True
+ privileged: true
+ volume_mounts:
+ - "/sys/fs/cgroup:/sys/fs/cgroup:ro"
+ override_command: False
+ groups:
+ - infrastructure
+ - name: node0
+ image: molecule-${PREBUILD_PLATFORM_DISTRO:-centos}:${PREBUILD_DISTRO_VERSION:-centos7.6}
+ pre_build_image: True
+ privileged: true
+ volume_mounts:
+ - "/sys/fs/cgroup:/sys/fs/cgroup:ro"
+ override_command: False
+ groups:
+ - kubernetes
+provisioner:
+ name: ansible
+ lint:
+ name: ansible-lint
+ env:
+ ANSIBLE_ROLES_PATH: ../../../../test/roles
+ inventory:
+ group_vars:
+ all:
+ timesync:
+ servers:
+ - 0.pool.ntp.org
+ - 1.pool.ntp.org
+ timezone: Europe/Warsaw
+ playbooks:
+ prepare: ../default/prepare.yml
+verifier:
+ name: testinfra
+ lint:
+ name: flake8
+ options:
+ v: 1
+ directory: ../default/tests/
diff --git a/ansible/roles/chrony/molecule/external_time_source/playbook.yml b/ansible/roles/chrony/molecule/external_time_source/playbook.yml
new file mode 100644
index 00000000..7dccfc35
--- /dev/null
+++ b/ansible/roles/chrony/molecule/external_time_source/playbook.yml
@@ -0,0 +1,10 @@
+---
+- name: Converge infrastructure hosts
+ hosts: infrastructure
+ roles:
+ - chrony
+
+- name: Converge kubernetes hosts
+ hosts: kubernetes
+ roles:
+ - chrony
diff --git a/ansible/roles/chrony/molecule/external_time_source_ubuntu/molecule.yml b/ansible/roles/chrony/molecule/external_time_source_ubuntu/molecule.yml
new file mode 100644
index 00000000..6cc2854a
--- /dev/null
+++ b/ansible/roles/chrony/molecule/external_time_source_ubuntu/molecule.yml
@@ -0,0 +1,50 @@
+---
+dependency:
+ name: galaxy
+driver:
+ name: docker
+lint:
+ name: yamllint
+platforms:
+ - name: infra_host-ubuntu
+ image: molecule-${PREBUILD_PLATFORM_DISTRO:-ubuntu}:${PREBUILD_DISTRO_VERSION:-18.04}
+ pre_build_image: True
+ privileged: true
+ volume_mounts:
+ - "/sys/fs/cgroup:/sys/fs/cgroup:ro"
+ override_command: False
+ groups:
+ - infrastructure
+ - name: node0-ubuntu
+ image: molecule-${PREBUILD_PLATFORM_DISTRO:-ubuntu}:${PREBUILD_DISTRO_VERSION:-18.04}
+ pre_build_image: True
+ privileged: true
+ volume_mounts:
+ - "/sys/fs/cgroup:/sys/fs/cgroup:ro"
+ override_command: False
+ groups:
+ - kubernetes
+provisioner:
+ name: ansible
+ lint:
+ name: ansible-lint
+ env:
+ ANSIBLE_ROLES_PATH: ../../../../test/roles
+ inventory:
+ group_vars:
+ all:
+ timesync:
+ servers:
+ - 0.pool.ntp.org
+ - 1.pool.ntp.org
+ timezone: Europe/Warsaw
+ playbooks:
+ prepare: ../default/prepare.yml
+ converge: ../external_time_source/playbook.yml
+verifier:
+ name: testinfra
+ lint:
+ name: flake8
+ options:
+ v: 1
+ directory: ../default/tests/
diff --git a/ansible/roles/chrony/molecule/ubuntu/molecule.yml b/ansible/roles/chrony/molecule/ubuntu/molecule.yml
index dd2b9655..e7ede8c7 100644
--- a/ansible/roles/chrony/molecule/ubuntu/molecule.yml
+++ b/ansible/roles/chrony/molecule/ubuntu/molecule.yml
@@ -37,3 +37,6 @@ verifier:
name: testinfra
lint:
name: flake8
+ options:
+ v: 1
+ directory: ../default/tests/
diff --git a/ansible/roles/dns/handlers/main.yml b/ansible/roles/dns/handlers/main.yml
index cd1e4b47..3d7570f5 100644
--- a/ansible/roles/dns/handlers/main.yml
+++ b/ansible/roles/dns/handlers/main.yml
@@ -2,6 +2,7 @@
- name: Run dns server container
docker_container:
name: dns-server
+ network_mode: host
image: "{{ dns_server_image }}"
command: -H /simulated_hosts --log-facility=- --dns-loop-detect
capabilities: NET_ADMIN
diff --git a/ansible/roles/helm/.yamllint b/ansible/roles/helm/.yamllint
new file mode 100644
index 00000000..ad0be760
--- /dev/null
+++ b/ansible/roles/helm/.yamllint
@@ -0,0 +1,11 @@
+extends: default
+
+rules:
+ braces:
+ max-spaces-inside: 1
+ level: error
+ brackets:
+ max-spaces-inside: 1
+ level: error
+ line-length: disable
+ truthy: disable
diff --git a/ansible/roles/helm/molecule/default/molecule.yml b/ansible/roles/helm/molecule/default/molecule.yml
new file mode 100644
index 00000000..869f87f6
--- /dev/null
+++ b/ansible/roles/helm/molecule/default/molecule.yml
@@ -0,0 +1,32 @@
+---
+dependency:
+ name: galaxy
+driver:
+ name: docker
+lint:
+ name: yamllint
+platforms:
+ - name: infrastructure-server
+ image: molecule-${PREBUILD_PLATFORM_DISTRO:-centos}:${PREBUILD_DISTRO_VERSION:-centos7.6}
+ pre_build_image: True
+ privileged: true
+ override_command: False
+ groups:
+ - infrastructure
+provisioner:
+ name: ansible
+ lint:
+ name: ansible-lint
+ env:
+ ANSIBLE_ROLES_PATH: ../../../../test/roles
+ ANSIBLE_LIBRARY: ../../../../library
+ inventory:
+ group_vars:
+ all:
+ app_name: onap
+ app_data_path: "/opt/{{ app_name }}"
+ helm_bin_dir: /usr/local/bin
+verifier:
+ name: testinfra
+ lint:
+ name: flake8
diff --git a/ansible/roles/helm/molecule/default/playbook.yml b/ansible/roles/helm/molecule/default/playbook.yml
new file mode 100644
index 00000000..2705b165
--- /dev/null
+++ b/ansible/roles/helm/molecule/default/playbook.yml
@@ -0,0 +1,5 @@
+---
+- name: Converge
+ hosts: all
+ roles:
+ - helm
diff --git a/ansible/roles/helm/molecule/default/prepare.yml b/ansible/roles/helm/molecule/default/prepare.yml
new file mode 100644
index 00000000..8a149b89
--- /dev/null
+++ b/ansible/roles/helm/molecule/default/prepare.yml
@@ -0,0 +1,5 @@
+---
+- name: Prepare for helm tests
+ hosts: all
+ roles:
+ - prepare-helm
diff --git a/ansible/roles/helm/molecule/default/tests/test_default.py b/ansible/roles/helm/molecule/default/tests/test_default.py
new file mode 100644
index 00000000..2395183b
--- /dev/null
+++ b/ansible/roles/helm/molecule/default/tests/test_default.py
@@ -0,0 +1,11 @@
+import os
+
+import testinfra.utils.ansible_runner
+
+testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
+ os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all')
+
+
+def test_helm(host):
+ assert host.file('/usr/local/bin/helm').exists
+ assert host.run('helm').rc != 127
diff --git a/ansible/roles/kubectl/.yamllint b/ansible/roles/kubectl/.yamllint
new file mode 100644
index 00000000..ad0be760
--- /dev/null
+++ b/ansible/roles/kubectl/.yamllint
@@ -0,0 +1,11 @@
+extends: default
+
+rules:
+ braces:
+ max-spaces-inside: 1
+ level: error
+ brackets:
+ max-spaces-inside: 1
+ level: error
+ line-length: disable
+ truthy: disable
diff --git a/ansible/roles/kubectl/molecule/default/molecule.yml b/ansible/roles/kubectl/molecule/default/molecule.yml
new file mode 100644
index 00000000..bffb29e6
--- /dev/null
+++ b/ansible/roles/kubectl/molecule/default/molecule.yml
@@ -0,0 +1,31 @@
+---
+dependency:
+ name: galaxy
+driver:
+ name: docker
+lint:
+ name: yamllint
+platforms:
+ - name: infrastructure-server
+ image: molecule-${PREBUILD_PLATFORM_DISTRO:-centos}:${PREBUILD_DISTRO_VERSION:-centos7.6}
+ pre_build_image: True
+ privileged: true
+ override_command: False
+ groups:
+ - infrastructure
+provisioner:
+ name: ansible
+ lint:
+ name: ansible-lint
+ env:
+ ANSIBLE_ROLES_PATH: ../../../../test/roles
+ ANSIBLE_LIBRARY: ../../../../library
+ inventory:
+ group_vars:
+ all:
+ app_name: onap
+ app_data_path: "/opt/{{ app_name }}"
+verifier:
+ name: testinfra
+ lint:
+ name: flake8
diff --git a/ansible/roles/kubectl/molecule/default/playbook.yml b/ansible/roles/kubectl/molecule/default/playbook.yml
new file mode 100644
index 00000000..ab9c08a8
--- /dev/null
+++ b/ansible/roles/kubectl/molecule/default/playbook.yml
@@ -0,0 +1,5 @@
+---
+- name: Converge
+ hosts: all
+ roles:
+ - kubectl
diff --git a/ansible/roles/kubectl/molecule/default/prepare.yml b/ansible/roles/kubectl/molecule/default/prepare.yml
new file mode 100644
index 00000000..ec17626a
--- /dev/null
+++ b/ansible/roles/kubectl/molecule/default/prepare.yml
@@ -0,0 +1,5 @@
+---
+- name: Prepare for kubectl tests
+ hosts: all
+ roles:
+ - prepare-kubectl
diff --git a/ansible/roles/kubectl/molecule/default/tests/test_default.py b/ansible/roles/kubectl/molecule/default/tests/test_default.py
new file mode 100644
index 00000000..4f799b95
--- /dev/null
+++ b/ansible/roles/kubectl/molecule/default/tests/test_default.py
@@ -0,0 +1,11 @@
+import os
+
+import testinfra.utils.ansible_runner
+
+testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
+ os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all')
+
+
+def test_kubectl(host):
+ assert host.file('/usr/local/bin/kubectl').exists
+ assert host.run('kubectl').rc != 127
diff --git a/ansible/roles/nexus/.yamllint b/ansible/roles/nexus/.yamllint
new file mode 100644
index 00000000..ad0be760
--- /dev/null
+++ b/ansible/roles/nexus/.yamllint
@@ -0,0 +1,11 @@
+extends: default
+
+rules:
+ braces:
+ max-spaces-inside: 1
+ level: error
+ brackets:
+ max-spaces-inside: 1
+ level: error
+ line-length: disable
+ truthy: disable
diff --git a/ansible/roles/nexus/defaults/main.yml b/ansible/roles/nexus/defaults/main.yml
index 8f636979..194f88bf 100644
--- a/ansible/roles/nexus/defaults/main.yml
+++ b/ansible/roles/nexus/defaults/main.yml
@@ -6,4 +6,6 @@ populate_nexus: false
# but all images are pre-populated either at buildtime or at install time (populate_nexus).
runtime_images: {}
# Default rule for tarball naming translation
+nexus_url: "{{ nexus_url_scheme | default('https') }}://nexus.{{ hostvars[groups.infrastructure[0]].ansible_nodename }}{{ nexus_port | default('') }}"
nexus3_image_tar: "{{ nexus3_image | regex_replace('(\\/|\\:)', '_') }}.tar"
+nexus3_published_ports: []
diff --git a/ansible/roles/nexus/molecule/default/molecule.yml b/ansible/roles/nexus/molecule/default/molecule.yml
new file mode 100644
index 00000000..63c47724
--- /dev/null
+++ b/ansible/roles/nexus/molecule/default/molecule.yml
@@ -0,0 +1,30 @@
+---
+dependency:
+ name: galaxy
+driver:
+ name: docker
+lint:
+ name: yamllint
+platforms:
+ - name: infrastructure-server
+ image: molecule-${PREBUILD_PLATFORM_DISTRO:-centos}:${PREBUILD_DISTRO_VERSION:-centos7.6}
+ pre_build_image: true
+ privileged: true
+ override_command: false
+ volumes:
+ - /var/lib/docker
+ groups:
+ - infrastructure
+provisioner:
+ name: ansible
+ env:
+ ANSIBLE_ROLES_PATH: ../../../../test/roles
+ inventory:
+ links:
+ group_vars: ../../../../group_vars
+ lint:
+ name: ansible-lint
+verifier:
+ name: testinfra
+ lint:
+ name: flake8
diff --git a/ansible/roles/nexus/molecule/default/playbook.yml b/ansible/roles/nexus/molecule/default/playbook.yml
new file mode 100644
index 00000000..e10ea55b
--- /dev/null
+++ b/ansible/roles/nexus/molecule/default/playbook.yml
@@ -0,0 +1,11 @@
+---
+- name: Converge
+ hosts: all
+ vars:
+ nexus_port: ":8081"
+ nexus3_published_ports:
+ - "{{ '8081' + nexus_port }}"
+ nexus_url_scheme: http
+ populate_nexus: true
+ roles:
+ - nexus
diff --git a/ansible/roles/nexus/molecule/default/prepare.yml b/ansible/roles/nexus/molecule/default/prepare.yml
new file mode 100644
index 00000000..f7290bd5
--- /dev/null
+++ b/ansible/roles/nexus/molecule/default/prepare.yml
@@ -0,0 +1,8 @@
+---
+- name: Prepare
+ hosts: all
+ vars_files:
+ - ../../defaults/main.yml
+ roles:
+ - prepare-docker-dind
+ - prepare-nexus
diff --git a/ansible/roles/nexus/tasks/insert-images.yml b/ansible/roles/nexus/tasks/insert-images.yml
index 6c283330..404889f4 100644
--- a/ansible/roles/nexus/tasks/insert-images.yml
+++ b/ansible/roles/nexus/tasks/insert-images.yml
@@ -18,4 +18,4 @@
push: true
load_path: "{{ item.path }}"
timeout: 120
- changed_when: false # for idenpotence
+ changed_when: false # for idempotence
diff --git a/ansible/roles/nexus/tasks/install.yml b/ansible/roles/nexus/tasks/install.yml
index c88e5855..6ac2b131 100644
--- a/ansible/roles/nexus/tasks/install.yml
+++ b/ansible/roles/nexus/tasks/install.yml
@@ -24,6 +24,7 @@
image: "{{ nexus3_image }}"
networks:
- name: nexus_network
+ published_ports: "{{ nexus3_published_ports }}"
volumes:
- "{{ app_data_path }}/nexus_data:/nexus-data:rw"
state: started
diff --git a/ansible/roles/nexus/tasks/runtime-populate.yml b/ansible/roles/nexus/tasks/runtime-populate.yml
index ac947ec7..2d90bf22 100644
--- a/ansible/roles/nexus/tasks/runtime-populate.yml
+++ b/ansible/roles/nexus/tasks/runtime-populate.yml
@@ -8,4 +8,4 @@
# WA: block of tasks cant be executed in iterations
# need to iterate over those tasks in include
- include: "insert-images.yml"
- with_items: "{{ tar_images.files }}"
+ loop: "{{ tar_images.files }}"
diff --git a/ansible/roles/nexus/vars/main.yml b/ansible/roles/nexus/vars/main.yml
deleted file mode 100644
index 5ec51869..00000000
--- a/ansible/roles/nexus/vars/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-nexus_url: "https://nexus.{{ hostvars[groups.infrastructure[0]].ansible_nodename }}"
diff --git a/ansible/roles/nfs/molecule/default/molecule.yml b/ansible/roles/nfs/molecule/default/molecule.yml
index 7bacf3c4..a8ca6a30 100644
--- a/ansible/roles/nfs/molecule/default/molecule.yml
+++ b/ansible/roles/nfs/molecule/default/molecule.yml
@@ -19,7 +19,7 @@ platforms:
- name: nfs-net
volumes:
- /sys/fs/cgroup:/sys/fs/cgroup:ro
- - ${HOME}/data:/dockerdata-nfs:rw
+ - /dockerdata-nfs
- name: kubernetes-node-2
image: molecule-${PREBUILD_PLATFORM_DISTRO:-centos}:${PREBUILD_DISTRO_VERSION:-centos7.6}
diff --git a/ansible/roles/nginx/.yamllint b/ansible/roles/nginx/.yamllint
new file mode 100644
index 00000000..ad0be760
--- /dev/null
+++ b/ansible/roles/nginx/.yamllint
@@ -0,0 +1,11 @@
+extends: default
+
+rules:
+ braces:
+ max-spaces-inside: 1
+ level: error
+ brackets:
+ max-spaces-inside: 1
+ level: error
+ line-length: disable
+ truthy: disable
diff --git a/ansible/roles/nginx/defaults/main.yml b/ansible/roles/nginx/defaults/main.yml
index 260de9bf..8bf44197 100644
--- a/ansible/roles/nginx/defaults/main.yml
+++ b/ansible/roles/nginx/defaults/main.yml
@@ -4,7 +4,7 @@ simulated_hosts:
http:
nexus:
all_simulated_hosts:
- "{{ simulated_hosts.git + simulated_hosts.http + simulated_hosts.nexus }}"
+ "{{ simulated_hosts.git + simulated_hosts.http + simulated_hosts.nexus }}"
nginx:
ports:
diff --git a/ansible/roles/nginx/molecule/default/cleanup.yml b/ansible/roles/nginx/molecule/default/cleanup.yml
new file mode 100644
index 00000000..9870a553
--- /dev/null
+++ b/ansible/roles/nginx/molecule/default/cleanup.yml
@@ -0,0 +1,6 @@
+---
+- name: Cleanup
+ hosts: all
+ ignore_unreachable: true
+ roles:
+ - cleanup-nginx
diff --git a/ansible/roles/nginx/molecule/default/molecule.yml b/ansible/roles/nginx/molecule/default/molecule.yml
new file mode 100644
index 00000000..63c47724
--- /dev/null
+++ b/ansible/roles/nginx/molecule/default/molecule.yml
@@ -0,0 +1,30 @@
+---
+dependency:
+ name: galaxy
+driver:
+ name: docker
+lint:
+ name: yamllint
+platforms:
+ - name: infrastructure-server
+ image: molecule-${PREBUILD_PLATFORM_DISTRO:-centos}:${PREBUILD_DISTRO_VERSION:-centos7.6}
+ pre_build_image: true
+ privileged: true
+ override_command: false
+ volumes:
+ - /var/lib/docker
+ groups:
+ - infrastructure
+provisioner:
+ name: ansible
+ env:
+ ANSIBLE_ROLES_PATH: ../../../../test/roles
+ inventory:
+ links:
+ group_vars: ../../../../group_vars
+ lint:
+ name: ansible-lint
+verifier:
+ name: testinfra
+ lint:
+ name: flake8
diff --git a/ansible/roles/nginx/molecule/default/playbook.yml b/ansible/roles/nginx/molecule/default/playbook.yml
new file mode 100644
index 00000000..431bb666
--- /dev/null
+++ b/ansible/roles/nginx/molecule/default/playbook.yml
@@ -0,0 +1,5 @@
+---
+- name: Converge
+ hosts: all
+ roles:
+ - nginx
diff --git a/ansible/roles/nginx/molecule/default/prepare.yml b/ansible/roles/nginx/molecule/default/prepare.yml
new file mode 100644
index 00000000..5c6ff5d8
--- /dev/null
+++ b/ansible/roles/nginx/molecule/default/prepare.yml
@@ -0,0 +1,8 @@
+---
+- name: Prepare
+ hosts: all
+ vars_files:
+ - ../../../../roles/nginx/defaults/main.yml
+ roles:
+ - prepare-docker-dind
+ - prepare-nginx
diff --git a/ansible/roles/resource-data/tasks/unarchive-resource.yml b/ansible/roles/resource-data/tasks/unarchive-resource.yml
index 9097ddc8..879a9dfe 100644
--- a/ansible/roles/resource-data/tasks/unarchive-resource.yml
+++ b/ansible/roles/resource-data/tasks/unarchive-resource.yml
@@ -1,11 +1,11 @@
---
#
-# Wrapper to pass through following variables
+# Wrapper to pass through following variables:
# resources_source_host
# resources_dir
# resource_source_filename
# resource_destination_directory
-# And handling target directory creation and possible removal on failure.
+# and handle target directory creation and eventual removal on failure.
# Idempotence is also handled here as nothing is done if resource_destination_directory
# was already created.
#
@@ -48,9 +48,12 @@
- name: "Cleanup the destination directory {{ resource_destination_directory }} on error"
file:
- path: "{{ item.path }}"
+ path: "{{ files_item.path }}"
state: absent
- with_items: "{{ files_after_fail.files | difference(original_files.files) }}"
+ loop: "{{ files_after_fail.files | difference(original_files.files) }}"
+ loop_control:
+ label: "{{ files_item.path }}"
+ loop_var: files_item
when: files_after_fail is defined
- name: "Report failure of upload operation"
diff --git a/ansible/roles/vncserver/.yamllint b/ansible/roles/vncserver/.yamllint
new file mode 100644
index 00000000..ad0be760
--- /dev/null
+++ b/ansible/roles/vncserver/.yamllint
@@ -0,0 +1,11 @@
+extends: default
+
+rules:
+ braces:
+ max-spaces-inside: 1
+ level: error
+ brackets:
+ max-spaces-inside: 1
+ level: error
+ line-length: disable
+ truthy: disable
diff --git a/ansible/roles/vncserver/molecule/default/cleanup.yml b/ansible/roles/vncserver/molecule/default/cleanup.yml
new file mode 100644
index 00000000..16be86ef
--- /dev/null
+++ b/ansible/roles/vncserver/molecule/default/cleanup.yml
@@ -0,0 +1,6 @@
+---
+- name: Cleanup
+ hosts: all
+ ignore_unreachable: true
+ roles:
+ - cleanup-vncserver
diff --git a/ansible/roles/vncserver/molecule/default/molecule.yml b/ansible/roles/vncserver/molecule/default/molecule.yml
new file mode 100644
index 00000000..5f7e4062
--- /dev/null
+++ b/ansible/roles/vncserver/molecule/default/molecule.yml
@@ -0,0 +1,32 @@
+---
+dependency:
+ name: galaxy
+driver:
+ name: docker
+lint:
+ name: yamllint
+platforms:
+ - name: infrastructure-server
+ image: molecule-${PREBUILD_PLATFORM_DISTRO:-centos}:${PREBUILD_DISTRO_VERSION:-centos7.6}
+ pre_build_image: True
+ privileged: true
+ volumes:
+ - /var/lib/docker
+ override_command: False
+ groups:
+ - infrastructure
+provisioner:
+ name: ansible
+ lint:
+ name: ansible-lint
+ env:
+ ANSIBLE_ROLES_PATH: ../../../../test/roles
+ inventory:
+ links:
+ group_vars: ../../../../group_vars
+verifier:
+ name: testinfra
+ lint:
+ name: flake8
+ options:
+ v: 1
diff --git a/ansible/roles/vncserver/molecule/default/playbook.yml b/ansible/roles/vncserver/molecule/default/playbook.yml
new file mode 100644
index 00000000..1b253495
--- /dev/null
+++ b/ansible/roles/vncserver/molecule/default/playbook.yml
@@ -0,0 +1,5 @@
+---
+- name: Converge
+ hosts: infrastructure
+ roles:
+ - vncserver
diff --git a/ansible/roles/vncserver/molecule/default/prepare.yml b/ansible/roles/vncserver/molecule/default/prepare.yml
new file mode 100644
index 00000000..55edb8f8
--- /dev/null
+++ b/ansible/roles/vncserver/molecule/default/prepare.yml
@@ -0,0 +1,8 @@
+---
+- name: Prepare
+ hosts: all
+ vars_files:
+ - ../../defaults/main.yml
+ roles:
+ - prepare-docker-dind
+ - prepare-vncserver
diff --git a/ansible/roles/vncserver/molecule/default/tests/test_default.py b/ansible/roles/vncserver/molecule/default/tests/test_default.py
new file mode 100644
index 00000000..0164a3ee
--- /dev/null
+++ b/ansible/roles/vncserver/molecule/default/tests/test_default.py
@@ -0,0 +1,10 @@
+import os
+
+import testinfra.utils.ansible_runner
+
+testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
+ os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('infrastructure')
+
+
+def test_container_running(host):
+ assert host.docker('vnc-server').is_running
diff --git a/ansible/test/play-infrastructure/molecule/default/cleanup.yml b/ansible/test/play-infrastructure/molecule/default/cleanup.yml
new file mode 100644
index 00000000..107ec8ca
--- /dev/null
+++ b/ansible/test/play-infrastructure/molecule/default/cleanup.yml
@@ -0,0 +1,7 @@
+---
+- name: Cleanup
+ hosts: all
+ ignore_unreachable: true
+ roles:
+ - cleanup-nginx
+ - cleanup-vncserver
diff --git a/ansible/test/play-infrastructure/molecule/default/vars.yml b/ansible/test/play-infrastructure/molecule/default/vars.yml
index e91eadb2..6ca5a1ad 100644
--- a/ansible/test/play-infrastructure/molecule/default/vars.yml
+++ b/ansible/test/play-infrastructure/molecule/default/vars.yml
@@ -16,9 +16,9 @@ molecule_test_registry: nexus3.onap.org:10001
runtime_images:
busybox:
registry: "{{ molecule_test_registry }}"
- path: "/onap/components/busybox"
+ path: "/busybox"
tag: "latest"
aaionap-haproxy:
registry: "{{ molecule_test_registry }}"
- path: "/onap/components/aaionap/haproxy"
+ path: "/aaionap/haproxy"
tag: "1.2.4"
diff --git a/ansible/test/play-resources/molecule/default/cleanup.yml b/ansible/test/play-resources/molecule/default/cleanup.yml
deleted file mode 100644
index e0c0b624..00000000
--- a/ansible/test/play-resources/molecule/default/cleanup.yml
+++ /dev/null
@@ -1,23 +0,0 @@
----
-- name: Cleanup data from instance (doing it from hosts requires root access).
- hosts: resources
- gather_facts: false
- ignore_unreachable: true
- pre_tasks:
- - name: Find files and dirs to delete
- find:
- paths: /data/ # Only deleting content not dir itself as we get "Device or resource busy" error as it's mounted to container doing the deletion
- patterns: "*"
- recurse: true
- file_type: any
- register: files_to_delete
- - name: Make file/dir path list
- set_fact:
- to_delete_paths: "{{ to_delete_paths | default([]) + [item.path] }}"
- loop: "{{ files_to_delete.files }}"
- when: files_to_delete.files is defined
- roles:
- - role: cleanup-directories
- vars:
- directories_files_list_to_remove: "{{ to_delete_paths }}"
- when: to_delete_paths is defined
diff --git a/ansible/test/play-resources/molecule/default/playbook.yml b/ansible/test/play-resources/molecule/default/playbook.yml
index 50ba233a..096077a2 100644
--- a/ansible/test/play-resources/molecule/default/playbook.yml
+++ b/ansible/test/play-resources/molecule/default/playbook.yml
@@ -1,6 +1,8 @@
---
-- name: Test resource transfer with ssh
+- name: Initialize
hosts: all
- roles:
- - setup
- - resource-data
+ tasks:
+ - name: Include variables
+ include_vars: vars.yml
+
+- import_playbook: ../../../../resources.yml
diff --git a/ansible/test/play-resources/molecule/default/prepare.yml b/ansible/test/play-resources/molecule/default/prepare.yml
index a4436001..5c8ae654 100644
--- a/ansible/test/play-resources/molecule/default/prepare.yml
+++ b/ansible/test/play-resources/molecule/default/prepare.yml
@@ -2,4 +2,7 @@
- name: Prepare resource-data
hosts: all
roles:
+ - setup
- prepare-resource-data
+ vars_files:
+ - vars.yml
diff --git a/ansible/test/play-resources/molecule/default/group_vars/all.yml b/ansible/test/play-resources/molecule/default/vars.yml
index 558eacb2..46ab3e04 100644
--- a/ansible/test/play-resources/molecule/default/group_vars/all.yml
+++ b/ansible/test/play-resources/molecule/default/vars.yml
@@ -1,5 +1,5 @@
---
-app_data_path: /opt/myleculeapp
+app_data_path: /opt/moleculeapp
aux_data_path: "{{ app_data_path }}/runtime_images_source_dir"
resources_dir: /data
resources_filename: resources_package.tar
diff --git a/ansible/test/play-resources/molecule/nfs/molecule.yml b/ansible/test/play-resources/molecule/nfs/molecule.yml
index ffaabb07..11726396 100644
--- a/ansible/test/play-resources/molecule/nfs/molecule.yml
+++ b/ansible/test/play-resources/molecule/nfs/molecule.yml
@@ -13,7 +13,7 @@ platforms:
override_command: false
volumes:
- /sys/fs/cgroup:/sys/fs/cgroup:ro
- - ${HOME}/data:/data:rw # mount fs from host to get nfs exportfs task working
+ - /data
groups:
- resources
networks:
@@ -32,13 +32,8 @@ platforms:
- name: resource-data
provisioner:
name: ansible
- playbooks:
- cleanup: ../default/cleanup.yml
env:
ANSIBLE_ROLES_PATH: ../../../../roles:../../../roles
- inventory:
- links:
- group_vars: ../default/group_vars
lint:
name: ansible-lint
verifier:
diff --git a/ansible/test/play-resources/molecule/nfs/playbook.yml b/ansible/test/play-resources/molecule/nfs/playbook.yml
index 88440116..096077a2 100644
--- a/ansible/test/play-resources/molecule/nfs/playbook.yml
+++ b/ansible/test/play-resources/molecule/nfs/playbook.yml
@@ -1,8 +1,8 @@
---
-- name: Test resource transfer with nfs
+- name: Initialize
hosts: all
- roles:
- - setup
- - role: resource-data
- vars:
- resources_on_nfs: true
+ tasks:
+ - name: Include variables
+ include_vars: vars.yml
+
+- import_playbook: ../../../../resources.yml
diff --git a/ansible/test/play-resources/molecule/nfs/prepare.yml b/ansible/test/play-resources/molecule/nfs/prepare.yml
index d381ba77..2276f2e8 100644
--- a/ansible/test/play-resources/molecule/nfs/prepare.yml
+++ b/ansible/test/play-resources/molecule/nfs/prepare.yml
@@ -1,7 +1,7 @@
---
-- name: Prepare resource-data
+- name: Prepare
hosts: all
roles:
- - role: prepare-resource-data
- vars:
- resources_on_nfs: true
+ - prepare-resource-data
+ vars_files:
+ - vars.yml
diff --git a/ansible/test/play-resources/molecule/nfs/vars.yml b/ansible/test/play-resources/molecule/nfs/vars.yml
new file mode 100644
index 00000000..2cc4d258
--- /dev/null
+++ b/ansible/test/play-resources/molecule/nfs/vars.yml
@@ -0,0 +1,7 @@
+---
+app_data_path: /opt/moleculeapp
+aux_data_path: "{{ app_data_path }}/runtime_images_source_dir"
+resources_on_nfs: true
+resources_dir: /data
+resources_filename: resources_package.tar
+aux_resources_filename: aux_resources_package.tar
diff --git a/ansible/test/roles/cleanup-application/tasks/main.yml b/ansible/test/roles/cleanup-application/tasks/main.yml
new file mode 100644
index 00000000..cbb8d521
--- /dev/null
+++ b/ansible/test/roles/cleanup-application/tasks/main.yml
@@ -0,0 +1,9 @@
+---
+- name: Clean application role mocked artifacts directories
+ file:
+ path: "{{ item }}"
+ state: absent
+ delegate_to: localhost
+ loop:
+ - certs
+ - application
diff --git a/ansible/test/roles/cleanup-containers/tasks/main.yml b/ansible/test/roles/cleanup-containers/tasks/main.yml
deleted file mode 100644
index 3a800c9e..00000000
--- a/ansible/test/roles/cleanup-containers/tasks/main.yml
+++ /dev/null
@@ -1,6 +0,0 @@
----
-- name: Remove containers
- docker_container:
- name: "{{ item }}"
- state: absent
- loop: "{{ container_list }}"
diff --git a/ansible/test/roles/cleanup-directories/tasks/main.yml b/ansible/test/roles/cleanup-directories/tasks/main.yml
deleted file mode 100644
index 8e79ea0c..00000000
--- a/ansible/test/roles/cleanup-directories/tasks/main.yml
+++ /dev/null
@@ -1,7 +0,0 @@
----
-- name: Remove directories/files
- file:
- path: "{{ item }}"
- state: absent
- become: true
- loop: "{{ directories_files_list_to_remove }}"
diff --git a/ansible/test/roles/cleanup-nginx/tasks/main.yml b/ansible/test/roles/cleanup-nginx/tasks/main.yml
new file mode 100644
index 00000000..94517519
--- /dev/null
+++ b/ansible/test/roles/cleanup-nginx/tasks/main.yml
@@ -0,0 +1,6 @@
+---
+- name: Remove saved nginx docker image
+ delegate_to: localhost
+ file:
+ path: /tmp/nginx.tar
+ state: absent
diff --git a/ansible/test/roles/cleanup-rancher/tasks/main.yml b/ansible/test/roles/cleanup-rancher/tasks/main.yml
deleted file mode 100644
index 21b02988..00000000
--- a/ansible/test/roles/cleanup-rancher/tasks/main.yml
+++ /dev/null
@@ -1,18 +0,0 @@
----
-# Remove containers spawned by Rancher Agent
-- name: Get list of containers spawned by Rancher Agent
- docker_list_containers:
- label_name: "{{ item.label }}"
- label_value: "{{ item.value }}"
- loop: "{{ container_list_by_label }}"
- register: docker_list_containers_var
-
-- name: set fact # save a list of containers found by previous task to orphaned_containers var
- set_fact: orphaned_containers="{{ orphaned_containers|default([]) + item.containers }}"
- loop: "{{ docker_list_containers_var.results }}"
-
-- name: Remove orphaned containers
- docker_container:
- name: "{{ item }}"
- state: absent
- loop: "{{ orphaned_containers }}"
diff --git a/ansible/test/roles/cleanup-vncserver/tasks/main.yml b/ansible/test/roles/cleanup-vncserver/tasks/main.yml
new file mode 100644
index 00000000..1fcb37d8
--- /dev/null
+++ b/ansible/test/roles/cleanup-vncserver/tasks/main.yml
@@ -0,0 +1,6 @@
+---
+- name: Remove saved vncserver docker image
+ delegate_to: localhost
+ file:
+ path: /tmp/vncserver.tar
+ state: absent
diff --git a/ansible/test/roles/prepare-application/defaults/main.yml b/ansible/test/roles/prepare-application/defaults/main.yml
index 227bd4f0..c3883c65 100644
--- a/ansible/test/roles/prepare-application/defaults/main.yml
+++ b/ansible/test/roles/prepare-application/defaults/main.yml
@@ -1,4 +1,6 @@
---
simulate_helm: true
app_helm_charts_install_directory: application/helm_charts
-helm_simulation_output_file: /tmp/helm_simu_output \ No newline at end of file
+helm_simulation_output_file: /tmp/helm_simu_output
+application_pre_install_role: application/test-patch-role
+application_post_install_role: application/test-patch-role
diff --git a/ansible/test/roles/prepare-application/tasks/main.yml b/ansible/test/roles/prepare-application/tasks/main.yml
index 2f143a07..75abb802 100644
--- a/ansible/test/roles/prepare-application/tasks/main.yml
+++ b/ansible/test/roles/prepare-application/tasks/main.yml
@@ -1,9 +1,14 @@
---
-- name: Create Application helm charts directory
+- name: Create application role mocked artifacts directories
file:
- path: "{{ app_helm_charts_install_directory }}"
+ path: "{{ item }}"
state: directory
delegate_to: localhost
+ loop:
+ - "{{ app_helm_charts_install_directory }}"
+ - certs
+ - "{{ application_pre_install_role + '/tasks/' }}"
+ - "{{ application_post_install_role + '/tasks/' }}"
- name: Create Makefile to simulate helm charts dir and make building
copy:
@@ -33,15 +38,19 @@
name: make
state: present
-- name: Create local certs dir for dummy certs
- file:
- path: certs
- state: directory
- delegate_to: localhost
-
- name: Create dummy cert file to simulate offline server certificates in helm install with override.yml file
copy:
content: |
this is dummy server certificate value
dest: certs/rootCA.crt
delegate_to: localhost
+
+- name: Create test patch role files
+ copy:
+ content: |
+ ---
+ - name: Mocked up patch role
+ debug:
+ msg: "Noop task to mock up patch role"
+ dest: application/test-patch-role/tasks/main.yml
+ delegate_to: localhost
diff --git a/ansible/test/roles/prepare-docker/tasks/docker-packages.yml b/ansible/test/roles/prepare-docker/tasks/docker-packages.yml
deleted file mode 100644
index 8f55c5ce..00000000
--- a/ansible/test/roles/prepare-docker/tasks/docker-packages.yml
+++ /dev/null
@@ -1,10 +0,0 @@
----
-
-- name: Download docker related packages
- command: yumdownloader --destdir="{{ rpm_dir }}" "{{ docker_ce_rpm }}"
- args:
- creates: "{{ rpm_dir }}/{{ docker_ce_rpm }}"
-
-- name: Install docker related packages
- yum:
- name: "{{ rpm_dir + '/' + docker_ce_rpm + '.rpm'}}"
diff --git a/ansible/test/roles/prepare-docker/tasks/docker-socket-override.yml b/ansible/test/roles/prepare-docker/tasks/docker-socket-override.yml
deleted file mode 100644
index 0cabadf4..00000000
--- a/ansible/test/roles/prepare-docker/tasks/docker-socket-override.yml
+++ /dev/null
@@ -1,13 +0,0 @@
----
-- name: Create docker.socket systemd dir for override
- file:
- path: /etc/systemd/system/docker.socket.d
- state: directory
-
-- name: Fake dockerd dependent docker.socket service not to actually listen the docker socket as dockerd is not used in container only docker client
- copy:
- content: |
- [Socket]
- ListenStream=
- ListenStream=/tmp/fake
- dest: /etc/systemd/system/docker.socket.d/override.conf
diff --git a/ansible/test/roles/prepare-docker/tasks/enable-repos.yml b/ansible/test/roles/prepare-docker/tasks/enable-repos.yml
deleted file mode 100644
index 204bf03d..00000000
--- a/ansible/test/roles/prepare-docker/tasks/enable-repos.yml
+++ /dev/null
@@ -1,13 +0,0 @@
----
-- name: Enable docker repos back for Molecule testing purposes
- copy:
- remote_src: yes
- src: "{{ item }}"
- dest: "{{ (item | splitext)[0] }}"
- loop: "{{ docker_needed_repos }}"
-
-- name: Disable offline repo for molecule testing purposes
- lineinfile:
- path: "{{ offline_repo_file }}"
- regexp: 'enabled = 1'
- line: 'enabled = 0'
diff --git a/ansible/test/roles/prepare-docker/tasks/main.yml b/ansible/test/roles/prepare-docker/tasks/main.yml
deleted file mode 100644
index e7d87061..00000000
--- a/ansible/test/roles/prepare-docker/tasks/main.yml
+++ /dev/null
@@ -1,21 +0,0 @@
----
-- name: Check repositories
- stat:
- path: "{{ item }}"
- loop: "{{ docker_needed_repos + [offline_repo_file] }}"
- register: repos
-
-- name: Set fact for offline repos created
- set_fact:
- offline_repo_created: "{{ (repos.results | map(attribute='stat.exists') | list) is all }}"
-
-- debug:
- var: offline_repo_created
-
-- name: Enable repos back (if package-repository role already setup offline onap repo) for molecule testing purposes to install docker
- include_tasks: enable-repos.yml
- when: offline_repo_created
-
-- name: Prepare docker repos normally
- include_tasks: prepare-docker-repos.yml
- when: not offline_repo_created
diff --git a/ansible/test/roles/prepare-docker/tasks/prepare-docker-repos.yml b/ansible/test/roles/prepare-docker/tasks/prepare-docker-repos.yml
deleted file mode 100644
index 01ea72c3..00000000
--- a/ansible/test/roles/prepare-docker/tasks/prepare-docker-repos.yml
+++ /dev/null
@@ -1,20 +0,0 @@
----
-- name: Install Docker-CE repo
- yum_repository:
- name: docker-ce
- description: Docker-ce YUM repo
- baseurl: https://download.docker.com/linux/centos/7/x86_64/stable/
- gpgcheck: true
- gpgkey: https://download.docker.com/linux/centos/gpg
-
-- name: Create rpm dir
- file:
- path: "{{ rpm_dir }}"
- state: directory
-
-- name: Handle docker-ce packages
- import_tasks: docker-packages.yml
-
-- name: Fake dockerd on container
- import_tasks: docker-socket-override.yml
- when: ansible_env.container == 'docker'
diff --git a/ansible/test/roles/prepare-docker/vars/main.yml b/ansible/test/roles/prepare-docker/vars/main.yml
deleted file mode 100644
index bcd7f365..00000000
--- a/ansible/test/roles/prepare-docker/vars/main.yml
+++ /dev/null
@@ -1,7 +0,0 @@
----
-docker_needed_repos:
- - /etc/yum.repos.d/CentOS-Base.repo.disabled
- - /etc/yum.repos.d/docker-ce.repo.disabled
-offline_repo_file: /etc/yum.repos.d/onap.repo
-rpm_dir: /root/rpm
-docker_ce_rpm: docker-ce-18.09.5-3.el7.x86_64
diff --git a/ansible/test/roles/prepare-helm/defaults/main.yml b/ansible/test/roles/prepare-helm/defaults/main.yml
new file mode 100644
index 00000000..8ab9ed3a
--- /dev/null
+++ b/ansible/test/roles/prepare-helm/defaults/main.yml
@@ -0,0 +1,3 @@
+---
+# Helm version to download.
+helm_version: 2.12.3
diff --git a/ansible/test/roles/prepare-helm/tasks/main.yml b/ansible/test/roles/prepare-helm/tasks/main.yml
new file mode 100644
index 00000000..aa01e281
--- /dev/null
+++ b/ansible/test/roles/prepare-helm/tasks/main.yml
@@ -0,0 +1,18 @@
+---
+- name: "Ensure {{ app_data_path }}/downloads directory exists"
+ file:
+ path: "{{ app_data_path }}/downloads"
+ recurse: true
+ state: directory
+
+- name: "Download and unarchive helm-{{ helm_version }}"
+ unarchive:
+ src: "https://get.helm.sh/helm-v{{ helm_version }}-linux-amd64.tar.gz"
+ dest: "/tmp"
+ remote_src: true
+
+- name: "Copy helm binary"
+ copy:
+ src: /tmp/linux-amd64/helm
+ dest: "{{ app_data_path }}/downloads/helm"
+ remote_src: true
diff --git a/ansible/test/roles/prepare-kubectl/defaults/main.yml b/ansible/test/roles/prepare-kubectl/defaults/main.yml
new file mode 100644
index 00000000..d4e8ef94
--- /dev/null
+++ b/ansible/test/roles/prepare-kubectl/defaults/main.yml
@@ -0,0 +1,7 @@
+---
+# Set to false to download kubectl in preparation for kubectl role that
+# needs to install it, set to true to immediately install (needed for
+# cases where it is used by verification tests of other roles).
+kubectl_install: false
+# Kubectl version.
+kubectl_version: 1.13.5
diff --git a/ansible/test/roles/prepare-kubectl/tasks/main.yml b/ansible/test/roles/prepare-kubectl/tasks/main.yml
new file mode 100644
index 00000000..b5631850
--- /dev/null
+++ b/ansible/test/roles/prepare-kubectl/tasks/main.yml
@@ -0,0 +1,14 @@
+---
+- name: "Ensure {{ app_data_path }} exists"
+ file:
+ path: "{{ app_data_path }}/downloads"
+ state: directory
+ when: not kubectl_install
+
+- name: "Install kubectl-{{ kubectl_version }}"
+ get_url:
+ url: "https://storage.googleapis.com/kubernetes-release/release/v{{ kubectl_version }}/bin/linux/amd64/kubectl"
+ dest: "{{ '/usr/local/bin/kubectl' if kubectl_install else app_data_path+'/downloads/kubectl' }}"
+ # This mode conditional allows checking if kubectl role, when installing kubectl, correctly
+ # adds executable bit (bonus).
+ mode: "{{ 0755 if kubectl_install else omit }}"
diff --git a/ansible/test/roles/prepare-nexus/tasks/main.yml b/ansible/test/roles/prepare-nexus/tasks/main.yml
index 4ef40f5e..5eb1b7d3 100644
--- a/ansible/test/roles/prepare-nexus/tasks/main.yml
+++ b/ansible/test/roles/prepare-nexus/tasks/main.yml
@@ -29,12 +29,12 @@
docker_image:
name: busybox
tag: latest
- repository: nexus3.onap.org:10001/onap/components/busybox
+ repository: nexus3.onap.org:10001/busybox
- name: Save busybox image
delegate_to: localhost
docker_image:
- name: nexus3.onap.org:10001/onap/components/busybox
+ name: nexus3.onap.org:10001/busybox
tag: latest
pull: false
archive_path: /tmp/busybox.tar
@@ -44,12 +44,12 @@
docker_image:
name: aaionap/haproxy
tag: 1.2.4
- repository: nexus3.onap.org:10001/onap/components/aaionap/haproxy
+ repository: nexus3.onap.org:10001/aaionap/haproxy
- name: Save haproxy image
delegate_to: localhost
docker_image:
- name: nexus3.onap.org:10001/onap/components/aaionap/haproxy
+ name: nexus3.onap.org:10001/aaionap/haproxy
tag: 1.2.4
pull: false
archive_path: /tmp/haproxy.tar
diff --git a/build/build_nexus_blob.sh b/build/build_nexus_blob.sh
index 09ed8969..f3edb482 100755
--- a/build/build_nexus_blob.sh
+++ b/build/build_nexus_blob.sh
@@ -20,7 +20,7 @@
### This script prepares Nexus repositories data blobs for ONAP
-## The script requires following dependencies are installed: nodejs, jq, docker
+## The script requires following dependencies are installed: nodejs, jq, docker, twine
## All required resources are expected in the upper directory created during
## download procedure as DATA_DIR or in the directory given as --input-directory
## All lists used must be in project data_lists directory or in the directory given
@@ -58,22 +58,18 @@ NEXUS_DATA_DIR="${DATA_DIR}/nexus_data"
LISTS_DIR="${LOCAL_PATH}/data_lists"
usage () {
- echo " Example usage: build_nexus_blob.sh -t <tag> --input-directory </path/to/downloaded/files/dir> --output-directory
+ echo " Example usage: build_nexus_blob.sh --input-directory </path/to/downloaded/files/dir> --output-directory
</path/to/output/dir> --resource-list-directory </path/to/dir/with/resource/list>
- -t | --tag release tag, taken from available on git or placed by data generating script (mandatory) must fallow scheme onap_<semver>
-i | --input-directory directory containing file needed to create nexus blob. The structure of this directory must organized as described in build guide
-o | --output-directory
- -rl | --resource-list-directory directory with files containing docker, pypi and rpm lists
+ -rl | --resource-list-directory directory with files containing docker, pypi and npm lists
"
exit 1
}
while [ "$1" != "" ]; do
case $1 in
- -t | --tag ) shift
- TAG=$1
- ;;
-i | --input-directory ) shift
DATA_DIR=$1
;;
@@ -90,22 +86,15 @@ while [ "$1" != "" ]; do
shift
done
-
-# exit if no tag given
-if [ -z ${TAG} ]; then
- usage
- exit 1
-fi
-
# Setup directories with resources for docker, npm and pypi
NXS_SRC_DOCKER_IMG_DIR="${DATA_DIR}/offline_data/docker_images_for_nexus"
NXS_SRC_NPM_DIR="${DATA_DIR}/offline_data/npm_tar"
NXS_SRC_PYPI_DIR="${DATA_DIR}/offline_data/pypi"
-# Setup specific resources list based on the tag provided
-NXS_DOCKER_IMG_LIST="${LISTS_DIR}/${TAG}-docker_images.list"
-NXS_NPM_LIST="${LISTS_DIR}/$(sed 's/.$/x/' <<< ${TAG})-npm.list"
-NXS_PYPI_LIST="${LISTS_DIR}/$(sed 's/.$/x/' <<< ${TAG})-pip_packages.list"
+# Setup specific resources lists
+NXS_DOCKER_IMG_LIST="${LISTS_DIR}/onap_docker_images.list"
+NXS_NPM_LIST="${LISTS_DIR}/onap_npm.list"
+NXS_PYPI_LIST="${LISTS_DIR}/onap_pip_packages.list"
# Setup Nexus image used for build and install infra
INFRA_LIST="${LISTS_DIR}/infra_docker_images.list"
@@ -340,4 +329,3 @@ npm config set registry "https://registry.npmjs.org"
echo "Nexus blob is built"
exit 0
-
diff --git a/build/creating_data/create-rhel-repo.sh b/build/creating_data/create-rhel-repo.sh
deleted file mode 100755
index 43709a7e..00000000
--- a/build/creating_data/create-rhel-repo.sh
+++ /dev/null
@@ -1,45 +0,0 @@
-# COPYRIGHT NOTICE STARTS HERE
-#
-# Copyright 2018-2019 © Samsung Electronics Co., Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-# COPYRIGHT NOTICE ENDS HERE
-
-OUTDIR="${1}"
-if [[ -z "${OUTDIR}" ]]; then
- echo "Missing output dir"
- exit 1
-fi
-
-# if onap.repo does not exists create it
-mkdir -p "${OUTDIR}"
-if [ ! -f "${OUTDIR}/onap.repo" ]; then
- cat > "${OUTDIR}/onap.repo" <<EOF
-[ONAP]
-name=Offline ONAP repository
-baseurl=PATH
-enabled=1
-gpgcheck=0
-EOF
-fi
-
-# this exact docker version is required by ONAP/beijing
-# it should be available in centos docker repo
-yumdownloader --resolve --destdir="${OUTDIR}" docker-ce-18.09.5 container-selinux docker-ce-cli \
-containerd.io nfs-utils python-jsonpointer python-docker-py python-docker-pycreds python-ipaddress \
-python-websocket-client
-
-createrepo "${OUTDIR}"
-
-exit 0 \ No newline at end of file
diff --git a/build/creating_data/create-ubuntu-repo.sh b/build/creating_data/create-ubuntu-repo.sh
deleted file mode 100755
index ac7de65c..00000000
--- a/build/creating_data/create-ubuntu-repo.sh
+++ /dev/null
@@ -1,33 +0,0 @@
-# COPYRIGHT NOTICE STARTS HERE
-#
-# Copyright 2018 © Samsung Electronics Co., Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-# COPYRIGHT NOTICE ENDS HERE
-
-OUTDIR="${1}"
-if [[ -z "${OUTDIR}" ]]; then
- echo "Missing output dir"
- exit 1
-fi
-
-
-# create the package index
-dpkg-scanpackages -m "${OUTDIR}" > "${OUTDIR}/Packages"
-cat "${OUTDIR}/Packages" | gzip -9c > "${OUTDIR}/Packages.gz"
-
-# create the Release file
-echo 'deb [trusted=yes] http://repo.infra-server/ubuntu/xenial /' > "${OUTDIR}/onap.list"
-
-exit 0
diff --git a/build/creating_data/docker-images-collector.sh b/build/creating_data/docker-images-collector.sh
index e13b9150..6761c328 100755
--- a/build/creating_data/docker-images-collector.sh
+++ b/build/creating_data/docker-images-collector.sh
@@ -30,9 +30,9 @@ usage () {
echo " "
echo " This script is preparing docker images list based on kubernetes project"
echo " Usage:"
- echo " ./$(basename $0) <project version> <path to project> [<output list file>]"
+ echo " ./$(basename $0) <path to project> [<output list file>]"
echo " "
- echo " Example: ./$(basename $0) onap_3.0.2 /root/oom/kubernetes/onap"
+ echo " Example: ./$(basename $0) /root/oom/kubernetes/onap"
echo " "
echo " Dependencies: helm, python-yaml, make"
echo " "
@@ -47,7 +47,7 @@ import yaml
import sys
with open("${1}", 'r') as f:
- values = yaml.load(f)
+ values = yaml.load(f, Loader=yaml.SafeLoader)
enabled = filter(lambda x: values[x].get('enabled', False) == True, values)
print(' '.join(enabled))
@@ -55,26 +55,35 @@ PYP
}
create_list() {
- helm template "${PROJECT_DIR}/../${1}" | grep 'image:\ \|tag_version:\ \|h._image' |
+ if [ -d "${PROJECT_DIR}/../${1}" ]; then
+ SUBSYS_DIR="${PROJECT_DIR}/../${1}"
+ elif [ -d "${PROJECT_DIR}/../common/${1}" ]; then
+ SUBSYS_DIR="${PROJECT_DIR}/../common/${1}"
+ else
+ >&2 echo -e \n" !!! ${1} sybsystem does not exist !!!"\n
+ fi
+ helm template "${SUBSYS_DIR}" | grep 'image:\ \|tag_version:\ \|h._image' |
sed -e 's/^.*\"h._image\"\ :\ //; s/^.*\"\(.*\)\".*$/\1/' \
-e 's/\x27\|,//g; s/^.*\(image\|tag_version\):\ //' | tr -d '\r'
}
# Configuration
-TAG="${1}"
-PROJECT_DIR="${2}"
-LIST="${3}"
+if [ "${1}" == "-h" ] || [ "${1}" == "--help" ] || [ $# -lt 1 ]; then
+ usage
+fi
+
+PROJECT_DIR="${1}"
+LIST="${2}"
LISTS_DIR="$(readlink -f $(dirname ${0}))/../data_lists"
HELM_REPO="local http://127.0.0.1:8879"
+PROJECT="$(basename ${1})"
-if [ "${1}" == "-h" ] || [ "${1}" == "--help" ] || [ $# -lt 2 ]; then
- usage
-elif [ ! -f "${PROJECT_DIR}/../Makefile" ]; then
+if [ ! -f "${PROJECT_DIR}/../Makefile" ]; then
echo "Wrong path to project directory entered"
exit 1
elif [ -z "${LIST}" ]; then
mkdir -p ${LISTS_DIR}
- LIST="${LISTS_DIR}/${TAG}-docker_images.list"
+ LIST="${LISTS_DIR}/${PROJECT}_docker_images.list"
fi
if [ -e "${LIST}" ]; then
@@ -82,8 +91,6 @@ if [ -e "${LIST}" ]; then
MSG="$(realpath ${LIST}) already existed\nCreated backup $(realpath ${LIST}).bk\n"
fi
-PROJECT="$(basename ${2})"
-
# Setup helm
if pgrep -x "helm" > /dev/null; then
echo "helm is already running"
@@ -106,12 +113,17 @@ popd
# Create the list from all enabled subsystems
echo "Creating the list..."
if [ "${PROJECT}" == "onap" ]; then
+ COMMENT="OOM commit $(git --git-dir="${PROJECT_DIR}/../../.git" rev-parse HEAD)"
for subsystem in `parse_yaml "${PROJECT_DIR}/values.yaml"`; do
create_list ${subsystem}
- done
+ done | sort -u > ${LIST}
else
- create_list ${PROJECT}
-fi | sort -u > ${LIST}
+ COMMENT="${PROJECT}"
+ create_list ${PROJECT} | sort -u > ${LIST}
+fi
+
+# Add comment reffering to the project
+sed -i "1i# generated from ${COMMENT}" "${LIST}"
echo -e ${MSG}
echo -e 'The list has been created:\n '"${LIST}"
diff --git a/build/creating_data/download-bin-tools.sh b/build/creating_data/download-bin-tools.sh
deleted file mode 100755
index 327e210f..00000000
--- a/build/creating_data/download-bin-tools.sh
+++ /dev/null
@@ -1,60 +0,0 @@
-# COPYRIGHT NOTICE STARTS HERE
-#
-# Copyright 2018-2019 © Samsung Electronics Co., Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-# COPYRIGHT NOTICE ENDS HERE
-
-usage () {
- echo "Usage:"
- echo -e "./$(basename $0) [destination directory]\n"
- echo "Examples:"
- echo " ./$(basename $0) ./downloads"
-}
-
-if [ "${1}" == "-h" ] || [ -z "${1}" ] ; then
- usage
- exit 1
-else
- OUTDIR="${1}"
-fi
-
-# we are keeping just dublin support in dublin branch
-KUBECTL_VERSION=${KUBECTL_VERSION:-1.13.5}
-HELM_VERSION=${HELM_VERSION:-2.12.3}
-RKE_VERSION=${RKE_VERSION:-0.2.1}
-
-mkdir -p "$OUTDIR"
-cd "$OUTDIR"
-
-download() {
- url="$1"
- url_file="${url%%\?*}"
- file=$(basename "$url_file")
- echo "Downloading $url"
- curl -s --retry 5 -y 10 -Y 10 --location "$url" -o "$file"
-}
-
-download "https://storage.googleapis.com/kubernetes-release/release/v${KUBECTL_VERSION}/bin/linux/amd64/kubectl"
-
-download "https://storage.googleapis.com/kubernetes-helm/helm-v${HELM_VERSION}-linux-amd64.tar.gz"
-tar -xf ./helm-v${HELM_VERSION}-linux-amd64.tar.gz linux-amd64/helm -O > helm
-rm -f ./helm-v${HELM_VERSION}-linux-amd64.tar.gz
-
-download "https://github.com/rancher/rke/releases/download/v${RKE_VERSION}/rke_linux-amd64"
-mv rke_linux-amd64 rke
-
-chmod a+x ./helm ./kubectl ./rke
-
-exit 0
diff --git a/build/creating_data/download-docker-images.sh b/build/creating_data/download-docker-images.sh
deleted file mode 100755
index c0a0bed1..00000000
--- a/build/creating_data/download-docker-images.sh
+++ /dev/null
@@ -1,39 +0,0 @@
-#! /usr/bin/env bash
-
-# COPYRIGHT NOTICE STARTS HERE
-#
-# Copyright 2018 © Samsung Electronics Co., Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-# COPYRIGHT NOTICE ENDS HERE
-
-
-# Load common-functions library
-. $(dirname ${0})/../common-functions.sh
-
-LIST_FILE="${1}"
-if [[ -z "$LIST_FILE" ]]; then
- LIST_FILE="docker_image_list.txt"
-fi
-
-echo "Download all images"
-
-lines=$(clean_list "$LIST_FILE" | wc -l)
-line=1
-for image in $(clean_list "$LIST_FILE"); do
- echo "== pkg #$line of $lines =="
- echo "$image"
- retry docker -l error pull "$image"
- line=$((line+1))
-done
diff --git a/build/creating_data/download-files.sh b/build/creating_data/download-files.sh
deleted file mode 100755
index f687fda1..00000000
--- a/build/creating_data/download-files.sh
+++ /dev/null
@@ -1,50 +0,0 @@
-# COPYRIGHT NOTICE STARTS HERE
-#
-# Copyright 2018 © Samsung Electronics Co., Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-# COPYRIGHT NOTICE ENDS HERE
-
-
-# Load common-functions library
-. $(dirname ${0})/../common-functions.sh
-
-LIST_FILE="${1}"
-if [[ -z "$LIST_FILE" ]]; then
- echo "Missing list file"
- exit 1
-fi
-
-outdir="$2"
-if [[ -z "$outdir" ]]; then
- echo "Missing output directory"
- exit 1
-fi
-
-lines=$(clean_list "$LIST_FILE" | wc -l)
-cnt=1
-
-# create output dir if not exists
-mkdir -p "$outdir"
-
-for line in $(clean_list "$LIST_FILE"); do
- # www.springframework.org/schema/tool/spring-tool-4.3.xsd
- file="${line%%\?*}"
- filename=$(basename "$file")
- echo "Downloading $cnt / $lines: $file"
- # following curl params are ensurring 5 reties and cut-off if connectivity will
- # drop below 10b/10s
- curl --retry 5 -y 10 -Y 10 --location "$line" -o "$outdir/$filename" &>/dev/null
- cnt=$((cnt+1))
-done \ No newline at end of file
diff --git a/build/creating_data/download-git-repos.sh b/build/creating_data/download-git-repos.sh
deleted file mode 100755
index 7853a141..00000000
--- a/build/creating_data/download-git-repos.sh
+++ /dev/null
@@ -1,56 +0,0 @@
-#! /usr/bin/env bash
-# COPYRIGHT NOTICE STARTS HERE
-#
-# Copyright 2018 © Samsung Electronics Co., Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-# COPYRIGHT NOTICE ENDS HERE
-
-# fail fast
-set -e
-
-usage () {
- echo "Usage:"
- echo -e "./$(basename $0) <repository list> [destination directory]\n"
- echo "Examples:"
- echo " ./$(basename $0) onap_3.0.x-git_repos.list ./git-repo"
-}
-
-LIST="${1}"
-
-if [[ -z "${LIST}" ]]; then
- echo "Missing argument for repository list"
- exit 1
-fi
-
-OUTDIR="${2}"
-if [[ -z "${OUTDIR}" ]]; then
- OUTDIR="./git-repo"
-fi
-
-mkdir -p "${OUTDIR}"
-cd "${OUTDIR}"
-
-
-while IFS=" " read -r REPO BRANCH remainder
-do
- if [[ -z "${BRANCH}" ]]; then
- git clone https://${REPO} --bare ${REPO}
- else
- git clone -b ${BRANCH} --single-branch https://${REPO} --bare ${REPO}
- fi
-done < <(awk '$1 ~ /^[^;#]/' ${LIST})
-
-
-exit 0
diff --git a/build/creating_data/download-http-files.sh b/build/creating_data/download-http-files.sh
deleted file mode 100755
index 1144c66e..00000000
--- a/build/creating_data/download-http-files.sh
+++ /dev/null
@@ -1,51 +0,0 @@
-# COPYRIGHT NOTICE STARTS HERE
-#
-# Copyright 2018 © Samsung Electronics Co., Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-# COPYRIGHT NOTICE ENDS HERE
-
-
-# Load common-functions library
-. $(dirname ${0})/../common-functions.sh
-
-LIST_FILE="${1}"
-if [[ -z "$LIST_FILE" ]]; then
- echo "Missing list file"
- exit 1
-fi
-
-outdir="$2"
-if [[ -z "$outdir" ]]; then
- echo "Missing output directory"
- exit 1
-fi
-
-lines=$(clean_list "$LIST_FILE" | wc -l)
-cnt=1
-
-# create output dir if not exists
-mkdir -p "$outdir"
-
-for line in $(clean_list "$LIST_FILE"); do
- # www.springframework.org/schema/tool/spring-tool-4.3.xsd
- file="${line%%\?*}"
- echo "Downloading $cnt / $lines: $file"
- fdir=$(dirname "$file")
- mkdir -p $outdir/$fdir
- # following curl params are ensurring 5 reties and cut-off if connectivity will
- # drop below 10b/10s
- curl --retry 5 -y 10 -Y 10 --location "$line" -o "$outdir/$file" &>/dev/null
- cnt=$((cnt+1))
-done \ No newline at end of file
diff --git a/build/creating_data/download-npm-pkgs.sh b/build/creating_data/download-npm-pkgs.sh
deleted file mode 100755
index 191dd5df..00000000
--- a/build/creating_data/download-npm-pkgs.sh
+++ /dev/null
@@ -1,42 +0,0 @@
-# COPYRIGHT NOTICE STARTS HERE
-#
-# Copyright 2018 © Samsung Electronics Co., Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-# COPYRIGHT NOTICE ENDS HERE
-
-# Load common-functions library
-. $(dirname ${0})/../common-functions.sh
-
-LIST_FILE="${1}"
-
-if [[ -z "$LIST_FILE" ]]; then
- LIST_FILE="all_npm_list.txt"
-fi
-
-outdir="$2"
-if [[ -z "$outdir" ]]; then
- echo "Missing arg outdir"
- exit 1
-fi
-
-mkdir -p "$outdir"
-cd "$outdir"
-lines=$(clean_list "$LIST_FILE" | wc -l)
-cnt=1
-for line in $(clean_list "$LIST_FILE"); do
- echo "== pkg #$cnt of $lines =="
- npm pack $line
- cnt=$((cnt+1))
-done \ No newline at end of file
diff --git a/build/creating_data/download-pip.sh b/build/creating_data/download-pip.sh
deleted file mode 100755
index 7ab4b0c8..00000000
--- a/build/creating_data/download-pip.sh
+++ /dev/null
@@ -1,48 +0,0 @@
-# COPYRIGHT NOTICE STARTS HERE
-#
-# Copyright 2018 © Samsung Electronics Co., Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-# COPYRIGHT NOTICE ENDS HERE
-
-
-# Load common-functions library
-. $(dirname ${0})/../common-functions.sh
-
-LIST_FILE="$1"
-if [[ -z "$LIST_FILE" ]]; then
- echo "Missing list file"
- exit 1
-fi
-LIST_FILE=$(readlink -f "$LIST_FILE")
-
-
-outdir="$2"
-if [[ -z "$outdir" ]]; then
- echo "Missing output directory"
- exit 1
-fi
-
-lines=$(clean_list "$LIST_FILE" | wc -l)
-cnt=1
-
-# create output dir if not exists
-mkdir -p "$outdir"
-
-cd "$outdir"
-for line in $(clean_list "$LIST_FILE"); do
- echo "Downloading $cnt / $lines: $line"
- pip download $line
- cnt=$((cnt+1))
-done
diff --git a/build/creating_data/save-docker-images.sh b/build/creating_data/save-docker-images.sh
deleted file mode 100755
index 0a72d15b..00000000
--- a/build/creating_data/save-docker-images.sh
+++ /dev/null
@@ -1,59 +0,0 @@
-#! /usr/bin/env bash
-
-# COPYRIGHT NOTICE STARTS HERE
-#
-# Copyright 2018 © Samsung Electronics Co., Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-# COPYRIGHT NOTICE ENDS HERE
-
-
-# Load common-functions library
-. $(dirname ${0})/../common-functions.sh
-
-LIST_FILE="${1}"
-IMG_DIR="${2}"
-
-if [[ -z "$IMG_DIR" ]]; then
- IMG_DIR="./images"
-fi
-
-echo "Creating ${IMG_DIR}"
-if [[ ! -d "${IMG_DIR}" ]]; then
- mkdir -p "${IMG_DIR}"
-fi
-
-save_image() {
- local name_tag=$1
- echo "$name_tag"
- local img_name=$(echo "${name_tag}" | tr /: __)
- local img_path="${IMG_DIR}/${img_name}.tar"
-
- if [[ ! -f "${img_path}" ]] ; then
- echo "[DEBUG] save ${name_tag} to ${img_path}"
- echo "${name_tag}" >> $IMG_DIR/_image_list.txt
- retry docker -l error save -o "${img_path}" ${name_tag}
- else
- echo "[DEBUG] ${name_tag} already saved"
- fi
-}
-
-echo "Save all images"
-line=1
-lines=$(clean_list "$LIST_FILE" | wc -l)
-for image in $(clean_list "$LIST_FILE"); do
- echo "== pkg #$line of $lines =="
- save_image "${image}"
- line=$((line+1))
-done \ No newline at end of file
diff --git a/build/data_lists/infra_bin_utils.list b/build/data_lists/infra_bin_utils.list
new file mode 100644
index 00000000..8de616e9
--- /dev/null
+++ b/build/data_lists/infra_bin_utils.list
@@ -0,0 +1,3 @@
+https://storage.googleapis.com/kubernetes-release/release/v1.13.5/bin/linux/amd64/kubectl
+https://storage.googleapis.com/kubernetes-helm/helm-v2.12.3-linux-amd64.tar.gz
+https://github.com/rancher/rke/releases/download/v0.2.1/rke_linux-amd64
diff --git a/build/data_lists/onap_docker_images.list b/build/data_lists/onap_docker_images.list
index 3ac67ed6..451f6172 100644
--- a/build/data_lists/onap_docker_images.list
+++ b/build/data_lists/onap_docker_images.list
@@ -1,13 +1,13 @@
-# generated from OOM commit 94664fb4457c61076cc7e65ed40dda5cf696bcbe
+# generated from OOM commit 0b904977dde761d189874d6dc6c527cd45928d92
alpine:3.6
busybox
crunchydata/crunchy-pgpool:centos7-10.4-2.0.0
crunchydata/crunchy-postgres:centos7-10.3-1.8.2
crunchydata/crunchy-postgres:centos7-10.4-2.0.0
docker.elastic.co/beats/filebeat:5.5.0
+docker.elastic.co/elasticsearch/elasticsearch-oss:6.6.2
docker.elastic.co/elasticsearch/elasticsearch:5.5.0
docker.elastic.co/elasticsearch/elasticsearch:6.6.2
-docker.elastic.co/elasticsearch/elasticsearch-oss:6.6.2
docker.elastic.co/kibana/kibana:5.5.0
docker.elastic.co/kibana/kibana:6.6.2
docker.elastic.co/logstash/logstash:5.4.3
@@ -45,8 +45,8 @@ nexus3.onap.org:10001/onap/aaf/aaf_locate:2.1.13
nexus3.onap.org:10001/onap/aaf/aaf_oauth:2.1.13
nexus3.onap.org:10001/onap/aaf/aaf_service:2.1.13
nexus3.onap.org:10001/onap/aaf/distcenter:4.0.0
-nexus3.onap.org:10001/onap/aaf/sms:4.0.0
nexus3.onap.org:10001/onap/aaf/smsquorumclient:4.0.0
+nexus3.onap.org:10001/onap/aaf/sms:4.0.0
nexus3.onap.org:10001/onap/aaf/testcaservice:4.0.0
nexus3.onap.org:10001/onap/aai/esr-gui:1.4.0
nexus3.onap.org:10001/onap/aai/esr-server:1.4.0
@@ -54,23 +54,22 @@ nexus3.onap.org:10001/onap/aai-graphadmin:1.1.0
nexus3.onap.org:10001/onap/aai-resources:1.4.0
nexus3.onap.org:10001/onap/aai-schema-service:1.0.6
nexus3.onap.org:10001/onap/aai-traversal:1.4.1
-nexus3.onap.org:10001/onap/admportal-sdnc-image:1.5.1
-nexus3.onap.org:10001/onap/appc-cdt-image:1.5.0
-nexus3.onap.org:10001/onap/appc-image:1.5.0
+nexus3.onap.org:10001/onap/admportal-sdnc-image:1.5.2
+nexus3.onap.org:10001/onap/appc-cdt-image:1.5.1
+nexus3.onap.org:10001/onap/appc-image:1.5.1
nexus3.onap.org:10001/onap/babel:1.4.2
nexus3.onap.org:10001/onap/ccsdk-ansible-server-image:0.4.2
-nexus3.onap.org:10001/onap/ccsdk-apps-ms-neng:0.4.2
-nexus3.onap.org:10001/onap/ccsdk-blueprintsprocessor:0.4.2
-nexus3.onap.org:10001/onap/ccsdk-cds-ui-server:0.4.2
-nexus3.onap.org:10001/onap/ccsdk-commandexecutor:0.4.2
-nexus3.onap.org:10001/onap/ccsdk-controllerblueprints:0.4.2
-nexus3.onap.org:10001/onap/ccsdk-dgbuilder-image:0.4.2
-nexus3.onap.org:10001/onap/ccsdk-sdclistener:0.4.2
-nexus3.onap.org:10001/onap/champ:1.4.0
-nexus3.onap.org:10001/onap/clamp:4.0.2
+nexus3.onap.org:10001/onap/ccsdk-apps-ms-neng:0.4.3
+nexus3.onap.org:10001/onap/ccsdk-blueprintsprocessor:0.4.3
+nexus3.onap.org:10001/onap/ccsdk-cds-ui-server:0.4.3
+nexus3.onap.org:10001/onap/ccsdk-commandexecutor:0.4.3
+nexus3.onap.org:10001/onap/ccsdk-controllerblueprints:0.4.3
+nexus3.onap.org:10001/onap/ccsdk-dgbuilder-image:0.4.3
+nexus3.onap.org:10001/onap/ccsdk-sdclistener:0.4.3
nexus3.onap.org:10001/onap/clamp-dashboard-kibana:4.0.1
nexus3.onap.org:10001/onap/clamp-dashboard-logstash:4.0.1
-nexus3.onap.org:10001/onap/cli:2.0.4
+nexus3.onap.org:10001/onap/clamp:4.0.2
+nexus3.onap.org:10001/onap/cli:3.0.0
nexus3.onap.org:10001/onap/data-router:1.3.3
nexus3.onap.org:10001/onap/data-router:1.4.0
nexus3.onap.org:10001/onap/dcae-be:1.3.0
@@ -90,24 +89,25 @@ nexus3.onap.org:10001/onap/externalapi/nbi:4.0.0
nexus3.onap.org:10001/onap/gizmo:1.4.0
nexus3.onap.org:10001/onap/holmes/engine-management:1.2.5
nexus3.onap.org:10001/onap/holmes/rule-management:1.2.6
+nexus3.onap.org:10001/onap/champ:1.4.0
nexus3.onap.org:10001/onap/modeling/genericparser:1.0.2
nexus3.onap.org:10001/onap/model-loader:1.4.0
nexus3.onap.org:10001/onap/msb/msb_apigateway:1.2.4
nexus3.onap.org:10001/onap/msb/msb_discovery:1.2.3
-nexus3.onap.org:10001/onap/multicloud/azure:1.2.1
-nexus3.onap.org:10001/onap/multicloud/framework:1.3.1
+nexus3.onap.org:10001/onap/multicloud/azure:1.2.2
nexus3.onap.org:10001/onap/multicloud/framework-artifactbroker:1.3.3
-nexus3.onap.org:10001/onap/multicloud/k8s:0.2.0
-nexus3.onap.org:10001/onap/multicloud/openstack-fcaps:1.3.2
-nexus3.onap.org:10001/onap/multicloud/openstack-lenovo:1.3.1
-nexus3.onap.org:10001/onap/multicloud/openstack-ocata:1.3.2
-nexus3.onap.org:10001/onap/multicloud/openstack-pike:1.3.2
-nexus3.onap.org:10001/onap/multicloud/openstack-starlingx:1.3.2
-nexus3.onap.org:10001/onap/multicloud/openstack-windriver:1.3.2
+nexus3.onap.org:10001/onap/multicloud/framework:1.3.3
+nexus3.onap.org:10001/onap/multicloud/k8s:0.4.0
+nexus3.onap.org:10001/onap/multicloud/openstack-fcaps:1.3.4
+nexus3.onap.org:10001/onap/multicloud/openstack-lenovo:1.3.4
+nexus3.onap.org:10001/onap/multicloud/openstack-ocata:1.3.4
+nexus3.onap.org:10001/onap/multicloud/openstack-pike:1.3.4
+nexus3.onap.org:10001/onap/multicloud/openstack-starlingx:1.3.4
+nexus3.onap.org:10001/onap/multicloud/openstack-windriver:1.3.4
nexus3.onap.org:10001/onap/multicloud/vio:1.3.1
-nexus3.onap.org:10001/onap/music/cassandra_3_11:3.0.24
nexus3.onap.org:10001/onap/music/cassandra_job:3.0.24
nexus3.onap.org:10001/onap/music/cassandra_music:3.0.0
+nexus3.onap.org:10001/onap/music/cassandra_3_11:3.0.24
nexus3.onap.org:10001/onap/music/music:3.0.24
nexus3.onap.org:10001/onap/network-discovery:1.5.1
nexus3.onap.org:10001/onap/oom/kube2msb:1.1.0
@@ -118,17 +118,17 @@ nexus3.onap.org:10001/onap/optf-cmso-ticketmgt:2.0.0
nexus3.onap.org:10001/onap/optf-cmso-topology:2.0.0
nexus3.onap.org:10001/onap/optf-has:1.3.0
nexus3.onap.org:10001/onap/optf-osdf:1.3.0
-nexus3.onap.org:10001/onap/org.onap.ccsdk.dashboard.ccsdk-app-os:1.1.0-SNAPSHOT-latest
+nexus3.onap.org:10001/onap/org.onap.ccsdk.dashboard.ccsdk-app-os:1.1.0
nexus3.onap.org:10001/onap/org.onap.dcaegen2.collectors.hv-ves.hv-collector-main:1.1.0
nexus3.onap.org:10001/onap/org.onap.dcaegen2.collectors.snmptrap:1.4.0
nexus3.onap.org:10001/onap/org.onap.dcaegen2.collectors.ves.vescollector:1.4.4
nexus3.onap.org:10001/onap/org.onap.dcaegen2.deployments.cm-container:1.6.2
nexus3.onap.org:10001/onap/org.onap.dcaegen2.deployments.consul-loader-container:1.0.0
nexus3.onap.org:10001/onap/org.onap.dcaegen2.deployments.healthcheck-container:1.2.4
-nexus3.onap.org:10001/onap/org.onap.dcaegen2.deployments.k8s-bootstrap-container:1.4.17
+nexus3.onap.org:10001/onap/org.onap.dcaegen2.deployments.k8s-bootstrap-container:1.4.18
nexus3.onap.org:10001/onap/org.onap.dcaegen2.deployments.multisite-init-container:1.0.0
nexus3.onap.org:10001/onap/org.onap.dcaegen2.deployments.redis-cluster-container:1.0.0
-nexus3.onap.org:10001/onap/org.onap.dcaegen2.deployments.tca-cdap-container:1.1.1
+nexus3.onap.org:10001/onap/org.onap.dcaegen2.deployments.tca-cdap-container:1.1.2
nexus3.onap.org:10001/onap/org.onap.dcaegen2.deployments.tls-init-container:1.0.3
nexus3.onap.org:10001/onap/org.onap.dcaegen2.platform.configbinding.app-app:2.3.0
nexus3.onap.org:10001/onap/org.onap.dcaegen2.platform.deployment-handler:4.0.1
@@ -152,19 +152,19 @@ nexus3.onap.org:10001/onap/portal-app:2.5.0
nexus3.onap.org:10001/onap/portal-db:2.5.0
nexus3.onap.org:10001/onap/portal-sdk:2.5.0
nexus3.onap.org:10001/onap/portal-wms:2.5.0
-nexus3.onap.org:10001/onap/sdc-backend:1.4.0
-nexus3.onap.org:10001/onap/sdc-backend-init:1.4.0
-nexus3.onap.org:10001/onap/sdc-cassandra-init:1.4.0
-nexus3.onap.org:10001/onap/sdc-elasticsearch:1.4.0
-nexus3.onap.org:10001/onap/sdc-frontend:1.4.0
-nexus3.onap.org:10001/onap/sdc-init-elasticsearch:1.4.0
-nexus3.onap.org:10001/onap/sdc-kibana:1.4.0
-nexus3.onap.org:10001/onap/sdc-onboard-backend:1.4.0
-nexus3.onap.org:10001/onap/sdc-onboard-cassandra-init:1.4.0
-nexus3.onap.org:10001/onap/sdnc-ansible-server-image:1.5.1
-nexus3.onap.org:10001/onap/sdnc-dmaap-listener-image:1.5.1
-nexus3.onap.org:10001/onap/sdnc-image:1.5.1
-nexus3.onap.org:10001/onap/sdnc-ueb-listener-image:1.5.1
+nexus3.onap.org:10001/onap/sdc-backend-init:1.4.1
+nexus3.onap.org:10001/onap/sdc-backend:1.4.1
+nexus3.onap.org:10001/onap/sdc-cassandra-init:1.4.1
+nexus3.onap.org:10001/onap/sdc-elasticsearch:1.4.1
+nexus3.onap.org:10001/onap/sdc-frontend:1.4.1
+nexus3.onap.org:10001/onap/sdc-init-elasticsearch:1.4.1
+nexus3.onap.org:10001/onap/sdc-kibana:1.4.1
+nexus3.onap.org:10001/onap/sdc-onboard-backend:1.4.1
+nexus3.onap.org:10001/onap/sdc-onboard-cassandra-init:1.4.1
+nexus3.onap.org:10001/onap/sdnc-ansible-server-image:1.5.2
+nexus3.onap.org:10001/onap/sdnc-dmaap-listener-image:1.5.2
+nexus3.onap.org:10001/onap/sdnc-image:1.5.2
+nexus3.onap.org:10001/onap/sdnc-ueb-listener-image:1.5.2
nexus3.onap.org:10001/onap/search-data-service:1.3.1
nexus3.onap.org:10001/onap/search-data-service:1.4.3
nexus3.onap.org:10001/onap/service-decomposition:1.5.1
@@ -182,8 +182,8 @@ nexus3.onap.org:10001/onap/so/vnfm-adapter:1.4.1
nexus3.onap.org:10001/onap/sparky-be:1.4.0
nexus3.onap.org:10001/onap/spike:1.4.0
nexus3.onap.org:10001/onap/testsuite:1.4.0
-nexus3.onap.org:10001/onap/usecase-ui:1.2.2
nexus3.onap.org:10001/onap/usecase-ui-server:1.2.1
+nexus3.onap.org:10001/onap/usecase-ui:1.2.2
nexus3.onap.org:10001/onap/validation:1.3.1
nexus3.onap.org:10001/onap/vfc/catalog:1.3.1
nexus3.onap.org:10001/onap/vfc/db:1.3.0
@@ -202,11 +202,11 @@ nexus3.onap.org:10001/onap/vfc/wfengine-activiti:1.3.0
nexus3.onap.org:10001/onap/vfc/wfengine-mgrservice:1.3.0
nexus3.onap.org:10001/onap/vfc/ztesdncdriver:1.3.0
nexus3.onap.org:10001/onap/vfc/ztevnfmdriver:1.3.1
-nexus3.onap.org:10001/onap/vid:4.0.0
-nexus3.onap.org:10001/onap/vnfsdk/refrepo:1.2.0
-nexus3.onap.org:10001/onap/workflow-backend:1.4.0
-nexus3.onap.org:10001/onap/workflow-frontend:1.4.0
-nexus3.onap.org:10001/onap/workflow-init:1.4.0
+nexus3.onap.org:10001/onap/vid:4.2.0
+nexus3.onap.org:10001/onap/vnfsdk/refrepo:1.3.0
+nexus3.onap.org:10001/onap/workflow-backend:1.4.1
+nexus3.onap.org:10001/onap/workflow-frontend:1.4.1
+nexus3.onap.org:10001/onap/workflow-init:1.4.1
nexus3.onap.org:10001/sonatype/nexus:2.14.8-01
nexus3.onap.org:10001/zookeeper:3.4
oomk8s/mariadb-client-init:3.0.0
diff --git a/build/data_lists/onap_pip_packages.list b/build/data_lists/onap_pip_packages.list
index ba5cdf7e..48029bc2 100644
--- a/build/data_lists/onap_pip_packages.list
+++ b/build/data_lists/onap_pip_packages.list
@@ -5,8 +5,7 @@ Flask==1.0.3
idna==2.8
itsdangerous==1.1.0
Jinja2==2.10.1
-MarkupSafe==1.1.1
+MarkupSafe==0.23
requests==2.22.0
-setuptools==40.7.1
urllib3==1.25.3
Werkzeug==0.15.4
diff --git a/build/data_lists/onap_rpm.list b/build/data_lists/onap_rpm.list
new file mode 100644
index 00000000..4595d4b1
--- /dev/null
+++ b/build/data_lists/onap_rpm.list
@@ -0,0 +1,21 @@
+containerd.io-1.2.5-3.1.el7.x86_64
+container-selinux-1.12.5-14.el7.x86_64
+container-selinux-2.95-2.el7_6.noarch
+docker-ce-18.09.5-3.el7.x86_64
+docker-ce-cli-18.09.6-3.el7.x86_64
+gssproxy-0.7.0-21.el7.x86_64
+keyutils-1.5.8-3.el7.x86_64
+libbasicobjects-0.1.1-32.el7.x86_64
+libcollection-0.7.0-32.el7.x86_64
+libevent-2.0.21-4.el7.x86_64
+libini_config-1.3.1-32.el7.x86_64
+libnfsidmap-0.25-19.el7.x86_64
+libpath_utils-0.2.1-32.el7.x86_64
+libref_array-0.1.5-32.el7.x86_64
+libverto-libevent-0.2.5-4.el7.x86_64
+nfs-utils-1.3.0-0.61.el7.x86_64
+python-docker-py-1.10.6-9.el7_6.noarch
+python-docker-pycreds-0.3.0-9.el7_6.noarch
+python-ipaddress-1.0.16-2.el7.noarch
+python-jsonpointer-1.9-2.el7.noarch
+python-websocket-client-0.32.0-116.el7.noarch
diff --git a/build/download/base.py b/build/download/base.py
index 5bcd0ef6..d8b44839 100644
--- a/build/download/base.py
+++ b/build/download/base.py
@@ -38,7 +38,8 @@ def load_list(item_list):
:return: set of items from file
"""
with open(item_list, 'r') as f:
- return {item for item in (line.strip() for line in f) if item}
+ return {item for item in (line.strip() for line in f)
+ if item and not item.startswith('#')}
def init_progress(items_name):
diff --git a/build/download/docker_images.py b/build/download/docker_images.py
index e4e742b3..d8138dd6 100755
--- a/build/download/docker_images.py
+++ b/build/download/docker_images.py
@@ -180,7 +180,7 @@ def download_docker_image(image, save, output_dir, docker_client):
if save:
save_image(image, pulled_image, output_dir)
except Exception as err:
- log.error('Error downloading {}: {}'.format(image, err))
+ log.exception('Error downloading {}: {}'.format(image, err))
raise err
@@ -195,10 +195,10 @@ def download(image_list, save, output_dir, check_mode, progress, workers=3):
:return: None
"""
try:
- docker_client = docker.client.DockerClient(version='auto')
+ # big timeout in case of massive images like pnda-mirror-container:5.0.0 (11.4GB)
+ docker_client = docker.client.DockerClient(version='auto', timeout=300)
except docker.errors.DockerException as err:
- log.error(err)
- log.error('Error creating docker client. Check if is docker installed and running'
+ log.exception('Error creating docker client. Check if is docker installed and running'
' or if you have right permissions.')
raise err
@@ -221,14 +221,12 @@ def download(image_list, save, output_dir, check_mode, progress, workers=3):
missing_images['not_saved'] - missing_images['not_pulled'],
None, output_dir, docker_client)
+ base.finish_progress(progress, error_count, log)
if error_count > 0:
log.error('{} images were not downloaded'.format(error_count))
missing_images = missing(docker_client, target_images, save, output_dir)
log.info(check_table(merge_dict_sets(missing_images), missing_images, save))
-
- base.finish_progress(progress, error_count, log)
-
- return error_count
+ raise RuntimeError()
def run_cli():
@@ -256,11 +254,13 @@ def run_cli():
progress = base.init_progress('Docker images') if not args.check else None
try:
- sys.exit(download(args.image_list, args.save, args.output_dir, args.check,
- progress, args.workers))
+ download(args.image_list, args.save, args.output_dir, args.check,
+ progress, args.workers)
except docker.errors.DockerException:
- log.error('Irrecoverable error detected.')
+ log.exception('Irrecoverable error detected.')
sys.exit(1)
+ except RuntimeError as err:
+ log.exception(err)
if __name__ == '__main__':
diff --git a/build/download/download.py b/build/download/download.py
new file mode 100755
index 00000000..0d8912ee
--- /dev/null
+++ b/build/download/download.py
@@ -0,0 +1,173 @@
+#! /usr/bin/env python
+# -*- coding: utf-8 -*-
+
+# COPYRIGHT NOTICE STARTS HERE
+
+# Copyright 2019 © Samsung Electronics Co., Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# COPYRIGHT NOTICE ENDS HERE
+
+import argparse
+import logging
+import sys
+import datetime
+import timeit
+
+import base
+import docker_images
+import git_repos
+import http_files
+import npm_packages
+import pypi_packages
+import rpm_packages
+
+log = logging.getLogger(name=__name__)
+
+def parse_args():
+ parser=argparse.ArgumentParser(description='Download data from lists')
+ list_group = parser.add_argument_group()
+ list_group.add_argument('--docker', action='append', nargs='+', default=[],
+ metavar=('list', 'dir-name'),
+ help='Docker type list. If second argument is specified '
+ 'it is treated as directory where images will be saved '
+ 'otherwise only pull operation is executed')
+ list_group.add_argument('--http', action='append', nargs=2, default=[],
+ metavar=('list', 'dir-name'),
+ help='Http type list and directory to save downloaded files')
+ list_group.add_argument('--npm', action='append', nargs=2, default=[],
+ metavar=('list', 'dir-name'),
+ help='npm type list and directory to save downloaded files')
+ list_group.add_argument('--rpm', action='append', nargs=2, default=[],
+ metavar=('list', 'dir-name'),
+ help='rpm type list and directory to save downloaded files')
+ list_group.add_argument('--git', action='append', nargs=2, default=[],
+ metavar=('list', 'dir-name'),
+ help='git repo type list and directory to save downloaded files')
+ list_group.add_argument('--pypi', action='append', nargs=2, default=[],
+ metavar=('list', 'dir-name'),
+ help='pypi packages type list and directory to save downloaded files')
+ parser.add_argument('--npm-registry', default='https://registry.npmjs.org',
+ help='npm registry to use (default: https://registry.npmjs.org)')
+ parser.add_argument('--check', '-c', action='store_true', default=False,
+ help='Check what is missing. No download.')
+ parser.add_argument('--debug', action='store_true', default=False,
+ help='Turn on debug output')
+
+ args = parser.parse_args()
+
+ for arg in ('docker', 'npm', 'http', 'rpm', 'git', 'pypi'):
+ if getattr(args, arg):
+ return args
+
+ parser.error('One of --docker, --npm, --http, --rpm, --git must be specified')
+
+
+def run_cli():
+ args = parse_args()
+
+ console_handler = logging.StreamHandler(sys.stdout)
+ console_formatter = logging.Formatter('%(message)s')
+ console_handler.setFormatter(console_formatter)
+ now = datetime.datetime.now().strftime('%Y%m%d%H%M%S')
+ log_file = 'download_data-{}.log'.format(now)
+ file_format = "%(asctime)s: %(filename)s: %(levelname)s: %(message)s"
+
+ if args.debug:
+ logging.basicConfig(level=logging.DEBUG, filename=log_file, format=file_format)
+ else:
+ logging.basicConfig(level=logging.INFO, filename=log_file, format=file_format)
+ root_logger = logging.getLogger()
+ root_logger.addHandler(console_handler)
+
+ list_with_errors = []
+ timer_start = timeit.default_timer()
+
+ for docker_list in args.docker:
+ log.info('Processing {}.'.format(docker_list[0]))
+ progress = None if args.check else base.init_progress('docker images')
+ save = False
+ if len(docker_list) > 1:
+ save = True
+ else:
+ docker_list.append(None)
+ try:
+ docker_images.download(docker_list[0], save,
+ docker_list[1], args.check, progress)
+ except RuntimeError:
+ list_with_errors.append(docker_list[0])
+
+ for http_list in args.http:
+ progress = None if args.check else base.init_progress('http files')
+ log.info('Processing {}.'.format(http_list[0]))
+ try:
+ http_files.download(http_list[0], http_list[1], args.check,
+ progress)
+ except RuntimeError:
+ list_with_errors.append(http_list[0])
+
+ for npm_list in args.npm:
+ progress = None if args.check else base.init_progress('npm packages')
+ log.info('Processing {}.'.format(npm_list[0]))
+ try:
+ npm_packages.download(npm_list[0], args.npm_registry, npm_list[1],
+ args.check, progress)
+ except RuntimeError:
+ list_with_errors.append(npm_list[0])
+
+ for rpm_list in args.rpm:
+ if args.check:
+ log.info('Check mode for rpm packages is not implemented')
+ break
+ log.info('Processing {}.'.format(rpm_list[0]))
+ try:
+ rpm_packages.download(rpm_list[0], rpm_list[1])
+ except RuntimeError:
+ list_with_errors.append(rpm_list[0])
+
+ for git_list in args.git:
+ if args.check:
+ log.info('Check mode for git repositories is not implemented')
+ break
+ progress = None if args.check else base.init_progress('git repositories')
+ log.info('Processing {}.'.format(git_list[0]))
+ try:
+ git_repos.download(git_list[0], git_list[1], progress)
+ except RuntimeError:
+ list_with_errors.append(git_list[0])
+
+ for pypi_list in args.pypi:
+ if args.check:
+ log.info('Check mode for pypi packages is not implemented')
+ break
+ progress = None if args.check else base.init_progress('pypi packages')
+ log.info('Processing {}.'.format(pypi_list[0]))
+ try:
+ pypi_packages.download(pypi_list[0], pypi_list[1], progress)
+ except RuntimeError:
+ list_with_errors.append(pypi_list[0])
+
+ e_time = datetime.timedelta(seconds=timeit.default_timer() - timer_start)
+ log.info(timeit.default_timer() - timer_start)
+ log.info('Execution ended. Total elapsed time {}'.format(e_time))
+
+ if list_with_errors:
+ log.error('Errors encountered while processing these lists:'
+ '\n{}'.format('\n'.join(list_with_errors)))
+ sys.exit(1)
+
+
+
+if __name__ == '__main__':
+ run_cli()
diff --git a/build/download/git_repos.py b/build/download/git_repos.py
index e388e94c..1d8c2979 100755
--- a/build/download/git_repos.py
+++ b/build/download/git_repos.py
@@ -21,6 +21,7 @@
import argparse
import subprocess
+import shutil
import logging
import sys
import os
@@ -45,10 +46,9 @@ def download(git_list, dst_dir, progress):
if not base.check_tool('git'):
log.error('ERROR: git is not installed')
progress.finish(dirty=True)
- return 1
+ raise RuntimeError('git missing')
- git_set = {tuple(item.split()) for item in base.load_list(git_list)
- if not item.startswith('#')}
+ git_set = {tuple(item.split()) for item in base.load_list(git_list)}
error_count = 0
@@ -64,19 +64,20 @@ def download(git_list, dst_dir, progress):
clone_repo(dst, *repo)
progress.update(progress.value + 1)
except subprocess.CalledProcessError as err:
- log.error(err.output.decode())
+ if os.path.isdir(dst):
+ shutil.rmtree(dst)
+ log.exception(err.output.decode())
error_count += 1
base.finish_progress(progress, error_count, log)
if error_count > 0:
log.error('{} were not downloaded. Check logs for details'.format(error_count))
- return error_count
-
+ raise RuntimeError('Download unsuccessful')
def run_cli():
parser = argparse.ArgumentParser(description='Download git repositories from list')
parser.add_argument('git_list', metavar='git-list',
- help='File with list of npm packages to download.')
+ help='File with list of git repos to download.')
parser.add_argument('--output-dir', '-o', default=os.getcwd(),
help='Download destination')
@@ -85,8 +86,11 @@ def run_cli():
logging.basicConfig(stream=sys.stdout, level=logging.INFO, format='%(message)s')
progress = base.init_progress('git repositories')
-
- sys.exit(download(args.git_list, args.output_dir, progress))
+ try:
+ download(args.git_list, args.output_dir, progress)
+ except RuntimeError as err:
+ log.exception(err)
+ sys.exit(1)
if __name__ == '__main__':
diff --git a/build/download/http_files.py b/build/download/http_files.py
index f5b1e59a..c83158d6 100755
--- a/build/download/http_files.py
+++ b/build/download/http_files.py
@@ -83,7 +83,7 @@ def download(data_list, dst_dir, check, progress, workers=None):
if check:
log.info(base.simple_check_table(file_set, missing_files))
- return 0
+ return
skipping = file_set - missing_files
@@ -91,12 +91,11 @@ def download(data_list, dst_dir, check, progress, workers=None):
error_count = base.run_concurrent(workers, progress, download_file, missing_files, dst_dir)
+ base.finish_progress(progress, error_count, log)
if error_count > 0:
log.error('{} files were not downloaded. Check log for specific failures.'.format(error_count))
+ raise RuntimeError()
- base.finish_progress(progress, error_count, log)
-
- return error_count
def run_cli():
"""
@@ -123,7 +122,10 @@ def run_cli():
progress = base.init_progress('http files') if not args.check else None
- sys.exit(download(args.file_list, args.output_dir, args.check, progress, args.workers))
+ try:
+ download(args.file_list, args.output_dir, args.check, progress, args.workers)
+ except RuntimeError:
+ sys.exit(1)
if __name__ == '__main__':
diff --git a/build/download/npm_packages.py b/build/download/npm_packages.py
index c174e2c1..70c03ad8 100755
--- a/build/download/npm_packages.py
+++ b/build/download/npm_packages.py
@@ -57,7 +57,7 @@ def download_npm(npm, registry, dst_dir):
except Exception as err:
if os.path.isfile(dst_path):
os.remove(dst_path)
- log.error('Failed: {}: {}'.format(npm, err))
+ log.exception('Failed: {}'.format(npm))
raise err
log.info('Downloaded: {}'.format(npm))
@@ -81,12 +81,10 @@ def download(npm_list, registry, dst_dir, check_mode, progress=None, workers=Non
base.start_progress(progress, len(npm_set), skipping, log)
error_count = base.run_concurrent(workers, progress, download_npm, missing_npms, registry, dst_dir)
+ base.finish_progress(progress, error_count, log)
if error_count > 0:
log.error('{} packages were not downloaded. Check log for specific failures.'.format(error_count))
-
- base.finish_progress(progress, error_count, log)
-
- return error_count
+ raise RuntimeError()
def run_cli():
diff --git a/build/download/pypi_packages.py b/build/download/pypi_packages.py
new file mode 100755
index 00000000..951003c5
--- /dev/null
+++ b/build/download/pypi_packages.py
@@ -0,0 +1,88 @@
+#! /usr/bin/env python
+# -*- coding: utf-8 -*-
+
+# COPYRIGHT NOTICE STARTS HERE
+
+# Copyright 2019 © Samsung Electronics Co., Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# COPYRIGHT NOTICE ENDS HERE
+
+import argparse
+import logging
+import sys
+import subprocess
+import os
+from retrying import retry
+
+import base
+
+log = logging.getLogger(name=__name__)
+
+@retry(stop_max_attempt_number=5, wait_fixed=5000)
+def download_package(package_name, dst_dir):
+ command = 'pip download --dest {} {}'.format(dst_dir, package_name)
+ log.info('Running: {}'.format(command))
+ log.info(subprocess.check_output(command.split(), stderr=subprocess.STDOUT).decode())
+ log.info('Downloaded: {}'.format(package_name))
+
+
+def download(pypi_list, dst_dir, progress):
+ if not base.check_tool('pip'):
+ log.error('ERROR: pip is not installed')
+ progress.finish(dirty=True)
+ raise RuntimeError('pip missing')
+
+ pypi_set = base.load_list(pypi_list)
+
+ error_count = 0
+
+ base.start_progress(progress, len(pypi_set), [], log)
+
+ for package in pypi_set:
+ try:
+ download_package(package, dst_dir)
+ except subprocess.CalledProcessError as err:
+ log.exception(err.output.decode())
+ error_count += 1
+
+ progress.update(progress.value + 1)
+
+ base.finish_progress(progress, error_count, log)
+ if error_count > 0:
+ log.error('{} packages were not downloaded. Check logs for details'.format(error_count))
+ raise RuntimeError('Download unsuccesfull')
+
+
+def run_cli():
+ parser = argparse.ArgumentParser(description='Download git repositories from list')
+ parser.add_argument('pypi_list', metavar='pypi-list',
+ help='File with list of pypi packages to download.')
+ parser.add_argument('--output-dir', '-o', default=os.getcwd(),
+ help='Download destination')
+
+ args = parser.parse_args()
+
+ logging.basicConfig(stream=sys.stdout, level=logging.INFO, format='%(message)s')
+
+ progress = base.init_progress('pypi packages')
+ try:
+ download(args.pypi_list, args.output_dir, progress)
+ except RuntimeError as err:
+ log.exception(err)
+ sys.exit(1)
+
+
+if __name__ == '__main__':
+ run_cli()
diff --git a/build/download/rpm_packages.py b/build/download/rpm_packages.py
index 7f9700a3..732af0ea 100755
--- a/build/download/rpm_packages.py
+++ b/build/download/rpm_packages.py
@@ -33,7 +33,7 @@ log = logging.getLogger(name=__name__)
def download(rpm_list, dst_dir):
if not base.check_tool('yumdownloader'):
log.error('ERROR: yumdownloader is not installed')
- return 1
+ raise RuntimeError('yumdownloader missing')
rpm_set = base.load_list(rpm_list)
@@ -41,11 +41,10 @@ def download(rpm_list, dst_dir):
log.info('Running command: {}'.format(command))
try:
subprocess.check_call(command.split())
- log.info('Downloaded')
except subprocess.CalledProcessError as err:
- log.error(err.output)
- return err.returncode
-
+ log.exception(err.output)
+ raise err
+ log.info('Downloaded')
def run_cli():
@@ -59,7 +58,11 @@ def run_cli():
logging.basicConfig(stream=sys.stdout, level=logging.INFO, format='%(message)s')
- sys.exit(download(args.rpm_list, args.output_dir))
+ try:
+ download(args.rpm_list, args.output_dir)
+ except (subprocess.CalledProcessError, RuntimeError):
+ sys.exit(1)
+
if __name__ == '__main__':
diff --git a/build/download_offline_data_by_lists.sh b/build/download_offline_data_by_lists.sh
deleted file mode 100755
index b2afd172..00000000
--- a/build/download_offline_data_by_lists.sh
+++ /dev/null
@@ -1,96 +0,0 @@
-#! /usr/bin/env bash
-
-# COPYRIGHT NOTICE STARTS HERE
-#
-# Copyright 2018 © Samsung Electronics Co., Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-# COPYRIGHT NOTICE ENDS HERE
-
-
-# fail fast
-set -e
-
-usage () {
- echo "Usage:"
- echo -e "./$(basename $0) <project version>\n"
- echo "onap_3.0.0 for casablanca (sign-off 30/11/2018)"
- echo "onap_3.0.1 for casablanca maintenance release (sign-off 10/12/2018)"
- echo "onap_3.0.2 for latest casablanca with fixed certificates (sign-off 25/04/2019)"
- echo ""
- echo "Example:"
- echo " ./$(basename $0) onap_3.0.2"
-}
-
-# boilerplate
-RELATIVE_PATH=./ # relative path from this script to 'common-functions.sh'
-if [ "$IS_COMMON_FUNCTIONS_SOURCED" != YES ] ; then
- SCRIPT_DIR=$(dirname "${0}")
- LOCAL_PATH=$(readlink -f "$SCRIPT_DIR")
- . "${LOCAL_PATH}"/"${RELATIVE_PATH}"/common-functions.sh
-fi
-
-if [ "${1}" == "-h" ] || [ -z "${1}" ]; then
- usage
- exit 0
-else
- TAG="${1}"
-fi
-
-CTOOLS="${LOCAL_PATH}/creating_data"
-LISTS_DIR="${LOCAL_PATH}/data_lists"
-DATA_DIR="${LOCAL_PATH}/../../resources"
-TOTAL=12
-CURR=1
-
-message info "Downloading started: $(date)"
-
-echo "[Step $((CURR++))/$TOTAL Download collected docker images]"
-$CTOOLS/download-docker-images.sh "${LISTS_DIR}/${TAG}-docker_images.list"
-
-echo "[Step $((CURR++))/$TOTAL Download docker images for infra-server]"
-$CTOOLS/download-docker-images.sh "${LISTS_DIR}/infra_docker_images.list"
-
-echo "[Step $((CURR++))/$TOTAL Build own nginx image]"
-$CTOOLS/create_nginx_image/01create-image.sh "${DATA_DIR}/offline_data/docker_images_infra"
-
-echo "[Step $((CURR++))/$TOTAL Save docker images from docker cache to tarfiles]"
-$CTOOLS/save-docker-images.sh "${LISTS_DIR}/${TAG}-docker_images.list" "${DATA_DIR}/offline_data/docker_images_for_nexus"
-
-echo "[Step $((CURR++))/$TOTAL Prepare infra related images to infra folder]"
-$CTOOLS/save-docker-images.sh "${LISTS_DIR}/infra_docker_images.list" "${DATA_DIR}/offline_data/docker_images_infra"
-
-echo "[Step $((CURR++))/$TOTAL Download git repos]"
-$CTOOLS/download-git-repos.sh "${LISTS_DIR}/onap_3.0.x-git_repos.list" "${DATA_DIR}/git-repo"
-
-echo "[Step $((CURR++))/$TOTAL Download http files]"
-$CTOOLS/download-http-files.sh "${LISTS_DIR}/onap_3.0.x-http_files.list" "${DATA_DIR}/http"
-
-echo "[Step $((CURR++))/$TOTAL Download npm pkgs]"
-$CTOOLS/download-npm-pkgs.sh "${LISTS_DIR}/onap_3.0.x-npm.list" "${DATA_DIR}/offline_data/npm_tar"
-
-echo "[Step $((CURR++))/$TOTAL Download bin tools]"
-$CTOOLS/download-bin-tools.sh "${DATA_DIR}/downloads"
-
-echo "[Step $((CURR++))/$TOTAL Create RHEL repository]"
-$CTOOLS/create-rhel-repo.sh "${DATA_DIR}/pkg/rhel"
-
-echo "[Step $((CURR++))/$TOTAL Download sdnc-ansible-server packages]"
-$CTOOLS/download-pip.sh "${LISTS_DIR}/onap_3.0.x-pip_packages.list" "${DATA_DIR}/offline_data/pypi"
-$CTOOLS/download-files.sh "${LISTS_DIR}/deb_packages.list" "${DATA_DIR}/pkg/ubuntu/xenial"
-
-echo "[Step $((CURR++))/$TOTAL Create APT repository]"
-$CTOOLS/create-ubuntu-repo.sh "${DATA_DIR}/pkg/ubuntu/xenial"
-
-message info "Downloading finished: $(date)"
diff --git a/build/fetch_and_patch_charts.sh b/build/fetch_and_patch_charts.sh
index 79d7a01e..22d45e66 100755
--- a/build/fetch_and_patch_charts.sh
+++ b/build/fetch_and_patch_charts.sh
@@ -54,7 +54,7 @@ TOTAL=5
PATCH_FILE=$(realpath "${3}")
echo -e "${_G}[Step $((CURR++))/${TOTAL} cloning repo with charts to be patched]${C_}"
-git clone "${1}" "${4}"
+git clone --recurse-submodules "${1}" "${4}"
echo -e "${_G}[Step $((CURR++))/${TOTAL} setting working dir to ${4}]${C_}"
pushd "${4}"
diff --git a/docs/BuildGuide.rst b/docs/BuildGuide.rst
index bb0e4cca..cfddcc99 100644
--- a/docs/BuildGuide.rst
+++ b/docs/BuildGuide.rst
@@ -15,29 +15,9 @@ Part 1. Preparations
We assume that procedure is executed on RHEL 7.6 server with \~300G disc space, 16G+ RAM and internet connectivity
-More-over following sw packages has to be installed:
+Some additional sw packages are required by ONAP Offline platform building tooling. in order to install them
+following repos has to be configured for RHEL 7.6 platform.
-* for the Preparation (Part 1), the Download artifacts for offline installer (Part 2) and the application helm charts preparation and patching (Part 4)
- - git
- - wget
-
-* for the Download artifacts for offline installer (Part 2) only
- - createrepo
- - dpkg-dev
- - python2-pip
-
-* for the Download artifacts for offline installer (Part 2) and the Populate local nexus (Part 3)
- - nodejs
- - jq
- - docker (exact version docker-ce-18.09.5)
-
-* for the Download artifacts for offline installer (Part 2) and for the Application helm charts preparation and patching (Part 4)
- - patch
-
-* for the Populate local nexus (Part 3)
- - twine
-
-Configure repos for downloading all needed rpms for download/packaging tooling:
::
@@ -49,19 +29,28 @@ Configure repos for downloading all needed rpms for download/packaging tooling:
# Register server
subscription-manager register --username <rhel licence name> --password <password> --auto-attach
- # enable epel for npm and jq
- rpm -ivh https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm
+ # required by special centos docker recommended by ONAP
+ yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
- # enable rhel-7-server-e4s-optional-rpms in /etc/yum.repos.d/redhat.repo
+ # required by docker dependencies i.e. docker-selinux
+ subscription-manager repos --enable=rhel-7-server-extras-rpms
+
+ # epel is required by npm within blob build
+ rpm -ivh https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm
Alternatively
::
+ ToDo: newer download scripts needs to be verified on Centos with ONAP Dublin
+
##############
# Centos 7.6 #
##############
+ # required by special centos docker recommended by ONAP
+ yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
+
# enable epel repo for npm and jq
yum install -y epel-release
@@ -70,12 +59,13 @@ Subsequent steps are the same on both platforms:
::
# install following packages
- yum install -y expect nodejs git wget createrepo python2-pip jq patch dpkg-dev
+ yum install -y docker-ce-18.09.5 python-pip git createrepo expect nodejs npm jq
+ # twine package is needed by nexus blob build script
pip install twine
- # install docker
- curl https://releases.rancher.com/install-docker/18.09.sh | sh
+ # docker daemon must be running on host
+ service docker start
Then it is necessary to clone all installer and build related repositories and prepare the directory structure.
@@ -86,142 +76,73 @@ Then it is necessary to clone all installer and build related repositories and p
git clone https://gerrit.onap.org/r/oom/offline-installer onap-offline
cd onap-offline
+ # install required pip packages for download scripts
+ pip install -r ./build/download/requirements.txt
+
Part 2. Download artifacts for offline installer
------------------------------------------------
.. note:: Skip this step if you have already all necessary resources and continue with Part 3. Populate local nexus
-All artifacts should be downloaded by running the download script as follows:
-
-./build/download_offline_data_by_lists.sh <project>
+It's possible to download all artifacts in single ./download.py execution. Recently we improved reliability of download scripts
+so one might try following command to download most of the required artifacts in single shot.
-For example:
+**Step1 - download wrapper script execution**
::
- # onap_3.0.0 for casablanca (sign-off 30/11/2018)
- # onap_3.0.1 for casablanca maintenance release (sign-off 10/12/2018)
- # onap_3.0.2 for latest casablanca with fixed certificates (sign-off 25/04/2019)
-
- $ ./build/download_offline_data_by_lists.sh onap_3.0.2
-
-Download is as reliable as network connectivity to internet, it is highly recommended to run it in screen and save log file from this script execution for checking if all artifacts were successfully collected. Each start and end of script call should contain timestamp in console output. Downloading consists of 10 steps, which should be checked at the end one-by-one.
-
-**Verify:** *Please take a look on following comments to respective
-parts of download script*
+ # following arguments are provided
+ # all data lists are taken in ./build/data_lists/ folder
+ # all resources will be stored in expected folder structure within ../resources folder
+ # for more details refer to Appendix 1.
-[Step 1/10 Download collected docker images]
+ ./build/download/download.py --docker ./build/data_lists/infra_docker_images.list ../resources/offline_data/docker_images_infra \
+ --docker ./build/data_lists/rke_docker_images.list ../resources/offline_data/docker_images_for_nexus \
+ --docker ./build/data_lists/onap_docker_images.list ../resources/offline_data/docker_images_for_nexus \
+ --git ./build/data_lists/onap_git_repos.list ../resources/git-repo \
+ --npm ./build/data_lists/onap_npm.list ../resources/offline_data/npm_tar \
+ --rpm ./build/data_lists/onap_rpm.list ../resources/pkg/rhel \
+ --pypi ./build/data_lists/onap_pip_packages.list ../resources/offline_data/pypi \
+ --http ./build/data_lists/infra_bin_utils.list ../resources/downloads
-=> image download step is quite reliable and contain retry logic
-E.g
+Alternatively, step-by-step procedure is described in Appendix 1.
-::
+Following steps are still required and are not supported by current version of download.py script.
- == pkg #143 of 163 ==
- rancher/etc-host-updater:v0.0.3
- digest:sha256:bc156a5ae480d6d6d536aa454a9cc2a88385988617a388808b271e06dc309ce8
- Error response from daemon: Get https://registry-1.docker.io/v2/rancher/etc-host-updater/manifests/v0.0.3: Get
- https://auth.docker.io/token?scope=repository%3Arancher%2Fetc-host-updater%3Apull&service=registry.docker.io: net/http: TLS handshake timeout
- WARNING [!]: warning Command docker -l error pull rancher/etc-host-updater:v0.0.3 failed.
- Attempt: 2/5
- INFO: info waiting 10s for another try...
- v0.0.3: Pulling from rancher/etc-host-updater
- b3e1c725a85f: Already exists
- 6a710864a9fc: Already exists
- d0ac3b234321: Already exists
- 87f567b5cf58: Already exists
- 16914729cfd3: Already exists
- 83c2da5790af: Pulling fs layer
- 83c2da5790af: Verifying Checksum
- 83c2da5790af: Download complete
- 83c2da5790af: Pull complete
-
-[Step 2/10 Build own nginx image]
-
-=> there is no hardening in this step, if it fails it needs to be
-retriggered. It should end with
+**Step 2 - Building own dns image**
::
- Successfully built <id>
-
-[Step 3/10 Save docker images from docker cache to tarfiles]
+ # We are building our own dns image within our offline infrastructure
+ ./build/creating_data/create_nginx_image/01create-image.sh /tmp/resources/offline_data/docker_images_infra
-=> quite reliable, retry logic in place
-[Step 4/10 move infra related images to infra folder]
+**Step 3 - Http files**
-=> should be safe, precondition is not failing step(3)
+ToDo: complete and verified list of http files will come just during/after vFWCL testcase
-[Step 5/10 Download git repos]
-=> potentially unsafe, no hardening in place. If it not download all git repos. It has to be executed again. Easiest way is probably to comment-out other steps in load script and run it again.
-
-E.g.
+**Step 4 - Binaries**
::
- Cloning into bare repository
- 'github.com/rancher/community-catalog.git'...
- error: RPC failed; result=28, HTTP code = 0
- fatal: The remote end hung up unexpectedly
- Cloning into bare repository 'git.rancher.io/rancher-catalog.git'...
- Cloning into bare repository
- 'gerrit.onap.org/r/testsuite/properties.git'...
- Cloning into bare repository 'gerrit.onap.org/r/portal.git'...
- Cloning into bare repository 'gerrit.onap.org/r/aaf/authz.git'...
- Cloning into bare repository 'gerrit.onap.org/r/demo.git'...
- Cloning into bare repository
- 'gerrit.onap.org/r/dmaap/messagerouter/messageservice.git'...
- Cloning into bare repository 'gerrit.onap.org/r/so/docker-config.git'...
+ # Binaries are downloaded in step one but some post processing is still needed.
+ # This will be improved in future in installer itself
-[Step 6/10 Download http files]
+ tar -xf ../resources/downloads/helm-v2.12.3-linux-amd64.tar.gz linux-amd64/helm -O > ../resources/downloads/helm
+ rm -f ../resources/downloads/helm-v2.12.3-linux-amd64.tar.gz
+ mv ../resources/downloads/rke_linux-amd64 rke
-[Step 7/10 Download npm pkgs]
-[Step 8/10 Download bin tools]
+**Step 5 - Create repo**
-=> work quite reliably, If it not download all artifacts. Easiest way is probably to comment-out other steps in load script and run it again.
-
-[Step 9/10 Download rhel pkgs]
-
-=> this is the step which will work on rhel only, for other platform different packages has to be downloaded.
+::
-Following is considered as sucessfull run of this part:
+ createrepo ../resources/pkg/rhel
-::
- Available: 1:net-snmp-devel-5.7.2-32.el7.i686 (rhel-7-server-rpms)
- net-snmp-devel = 1:5.7.2-32.el7
- Available: 1:net-snmp-devel-5.7.2-33.el7_5.2.i686 (rhel-7-server-rpms)
- net-snmp-devel = 1:5.7.2-33.el7_5.2
- Dependency resolution failed, some packages will not be downloaded.
- No Presto metadata available for rhel-7-server-rpms
- https://ftp.icm.edu.pl/pub/Linux/fedora/linux/epel/7/x86_64/Packages/p/perl-CDB_File-0.98-9.el7.x86_64.rpm:
- [Errno 12\] Timeout on
- https://ftp.icm.edu.pl/pub/Linux/fedora/linux/epel/7/x86_64/Packages/p/perl-CDB_File-0.98-9.el7.x86_64.rpm:
- (28, 'Operation timed out after 30001 milliseconds with 0 out of 0 bytes
- received')
- Trying other mirror.
- Spawning worker 0 with 230 pkgs
- Spawning worker 1 with 230 pkgs
- Spawning worker 2 with 230 pkgs
- Spawning worker 3 with 230 pkgs
- Spawning worker 4 with 229 pkgs
- Spawning worker 5 with 229 pkgs
- Spawning worker 6 with 229 pkgs
- Spawning worker 7 with 229 pkgs
- Workers Finished
- Saving Primary metadata
- Saving file lists metadata
- Saving other metadata
- Generating sqlite DBs
- Sqlite DBs complete
-
-[Step 10/10 Download sdnc-ansible-server packages]
-
-=> there is again no retry logic in this part, it is collecting packages for sdnc-ansible-server in the exactly same way how that container is doing it, however there is a bug in upstream that image in place will not work with those packages as old ones are not available and newer are not compatible with other stuff inside that image
+This concludes SW download part required for ONAP offline platform creating.
Part 3. Populate local nexus
----------------------------
@@ -234,18 +155,24 @@ Prerequisites:
.. note:: In case you skipped the Part 2 for the artifacts download, please ensure that the copy of resources data are untarred in *./onap-offline/../resources/*
-Whole nexus blob data will be created by running script build\_nexus\_blob.sh.
+Whole nexus blob data will be created by running script build_nexus_blob.sh.
It will load the listed docker images, run the Nexus, configure it as npm, pypi
and docker repositories. Then it will push all listed npm and pypi packages and
docker images to the repositories. After all is done the repository container
is stopped.
+.. note:: build_nexus_blob.sh script is using docker, npm and pip data lists for building nexus blob. Unfortunatelly we now have 2 different docker data lists (RKE & ONAP). So we need to merge them as visible from following snippet. This problem will be fixed in OOM-1890
+
You can run the script as following example:
-``$ ./install/onap-offline/build_nexus_blob.sh onap_3.0.2``
+::
+
+ # merge RKE and ONAP app data lists
+ cat ./build/data_lists/rke_docker_images.list >> ./build/data_lists/onap_docker_images.list
+
+ ./build/build_nexus_blob.sh
-Where the onap_3.0.2 is the tag to specify which lists will be used for the
-resources
+.. note:: in current release scope we aim to maintain just single example data lists set, tags used in previous releases are not needed. Datalists are also covering latest versions verified by us despite user is allowed to build data lists on his own.
Once the Nexus data blob is created, the docker images and npm and pypi
packages can be deleted to reduce the package size as they won't be needed in
@@ -255,9 +182,9 @@ E.g.
::
- rm -f /tmp/onap-offline/resources/offline_data/docker_images_for_nexus/*
- rm -rf /tmp/onap-offline/resources/offline_data/npm_tar
- rm -rf /tmp/onap-offline/resources/offline_data/pypi
+ rm -f /tmp/resources/offline_data/docker_images_for_nexus/*
+ rm -rf /tmp/resources/offline_data/npm_tar
+ rm -rf /tmp/resources/offline_data/pypi
Part 4. Application helm charts preparation and patching
--------------------------------------------------------
@@ -267,13 +194,13 @@ offline. Use the following command:
::
- ./build/fetch\_and\_patch\_charts.sh <helm charts repo> <commit/tag/branch> <patchfile> <target\_dir>
+ ./build/fetch_and_patch_charts.sh <helm charts repo> <commit/tag/branch> <patchfile> <target\_dir>
For example:
::
- ./build/fetch_and_patch_charts.sh https://gerrit.onap.org/r/oom master /tmp/onap-offline/patches/onap.patch /tmp/oom-clone
+ ./build/fetch_and_patch_charts.sh https://gerrit.onap.org/r/oom 0b904977dde761d189874d6dc6c527cd45928 /tmp/onap-offline/patches/onap.patch /tmp/oom-clone
Part 5. Creating offline installation package
---------------------------------------------
@@ -288,11 +215,11 @@ Example values below are setup according to steps done in this guide to package
+---------------------------------------+------------------------------------------------------------------------------+
| Parameter | Description |
+=======================================+==============================================================================+
-| HELM\_CHARTS\_DIR | directory with Helm charts for the application |
+| HELM_CHARTS_DIR | directory with Helm charts for the application |
| | |
| | Example: /tmp/oom-clone/kubernetes |
+---------------------------------------+------------------------------------------------------------------------------+
-| APP\_CONFIGURATION | application install configuration (application_configuration.yml) for |
+| APP_CONFIGURATION | application install configuration (application_configuration.yml) for |
| | ansible installer and custom ansible role code directories if any. |
| | |
| | Example:: |
@@ -303,11 +230,11 @@ Example values below are setup according to steps done in this guide to package
| | ) |
| | |
+---------------------------------------+------------------------------------------------------------------------------+
-| APP\_BINARY\_RESOURCES\_DIR | directory with all (binary) resources for offline infra and application |
+| APP_BINARY_RESOURCES_DIR | directory with all (binary) resources for offline infra and application |
| | |
-| | Example: /tmp/onap-offline/resources |
+| | Example: /tmp/resources |
+---------------------------------------+------------------------------------------------------------------------------+
-| APP\_AUX\_BINARIES | additional binaries such as docker images loaded during runtime [optional] |
+| APP_AUX_BINARIES | additional binaries such as docker images loaded during runtime [optional] |
+---------------------------------------+------------------------------------------------------------------------------+
Offline installer packages are created with prepopulated data via
@@ -321,13 +248,81 @@ E.g.
::
- ./build/package.sh onap 3.0.2 /tmp/package
+ ./build/package.sh onap 4.0.0 /tmp/package
So in the target directory you should find tar files with
::
- offline-<PROJECT\_NAME>-<PROJECT\_VERSION>-sw.tar
- offline-<PROJECT\_NAME>-<PROJECT\_VERSION>-resources.tar
- offline-<PROJECT\_NAME>-<PROJECT\_VERSION>-aux-resources.tar
+ offline-<PROJECT_NAME>-<PROJECT_VERSION>-sw.tar
+ offline-<PROJECT_NAME>-<PROJECT_VERSION>-resources.tar
+ offline-<PROJECT_NAME>-<PROJECT_VERSION>-aux-resources.tar
+
+
+Appendix 1. Step-by-step download procedure
+-------------------------------------------
+
+**Step 1 - docker images**
+
+::
+
+ # This step will parse all 3 docker datalists (offline infrastructure images, rke k8s images & onap images)
+ # and start building onap offline platform in /tmp/resources folder
+
+ ./build/download/download.py --docker ./build/data_lists/infra_docker_images.list ../resources/offline_data/docker_images_infra \
+ --docker ./build/data_lists/rke_docker_images.list ../resources/offline_data/docker_images_for_nexus \
+ --docker ./build/data_lists/onap_docker_images.list ../resources/offline_data/docker_images_for_nexus
+
+
+**Step 2 - building own dns image**
+
+::
+
+ # We are building our own dns image within our offline infrastructure
+ ./build/creating_data/create_nginx_image/01create-image.sh /tmp/resources/offline_data/docker_images_infra
+
+**Step 3 - git repos**
+
+::
+
+ # Following step will download all git repos
+ ./build/download/download.py --git ./build/data_lists/onap_git_repos.list ../resources/git-repo
+
+**Step 4 - http files**
+
+ToDo: complete and verified list of http files will come just during/after vFWCL testcase
+
+**Step 5 - npm packages**
+
+::
+
+ # Following step will download all npm packages
+ ./build/download/download.py --npm ./build/data_lists/onap_npm.list ../resources/offline_data/npm_tar
+
+**Step 6 - binaries**
+
+::
+
+ # Following step will download and prepare rke, kubectl and helm binaries
+ ./build/download/download.py --http ./build/data_lists/infra_bin_utils.sh ../resources/downloads
+ tar -xf ../resources/downloads/helm-v2.12.3-linux-amd64.tar.gz linux-amd64/helm -O > ../resources/downloads/helm
+ rm -f ../resources/downloads/helm-v2.12.3-linux-amd64.tar.gz
+ mv ../resources/downloads/rke_linux-amd64 rke
+
+**Step 7 - rpms**
+
+::
+
+ # Following step will download all rpms and create repo
+ ./build/download/download.py --rpm ./build/data_lists/onap_rpm.list ../resources/pkg/rhel
+
+ createrepo ../resources/pkg/rhel
+
+**Step 8 - pip packages**
+
+::
+
+ # Following step will download all pip packages
+ ./build/download/download.py --pypi ./build/data_lists/onap_pip_packages.list ../resources/offline_data/pypi
+
diff --git a/helm_deployment_status.py b/helm_deployment_status.py
index 8f9a931d..8917e992 100755
--- a/helm_deployment_status.py
+++ b/helm_deployment_status.py
@@ -25,7 +25,7 @@ import sys
import argparse
import yaml
import requests
-from subprocess import Popen,STDOUT,PIPE
+from subprocess import Popen,STDOUT,PIPE,check_output
import datetime
from time import sleep
from os.path import expanduser
@@ -137,7 +137,7 @@ def check_in_loop(k8s, max_time, sleep_time, verbosity):
return ready
def check_helm_releases():
- helm = subprocess.check_output(['helm', 'ls'])
+ helm = check_output(['helm', 'ls'])
if helm == '':
sys.exit('No Helm releases detected.')
helm_releases = csv.DictReader(
@@ -180,9 +180,7 @@ def parse_args():
default=expanduser('~') + '/.kube/config',
help='path to .kube/config file')
parser.add_argument('--health-path', '-hp', help='path to ONAP robot ete-k8s.sh')
- parser.add_argument('--health-mode', default='health', help='healthcheck mode',
- choices=('health','healthdist','distribute','instantiate','instantiateVFWCL',
- 'instantiateDemoVFWCL','portal'))
+ parser.add_argument('--health-mode', '-hm', default='health', help='healthcheck mode')
parser.add_argument('--no-helm', action='store_true', help='Do not check Helm')
parser.add_argument('--check-frequency', '-w', default=300, type=int,
help='time between readiness checks in seconds')
diff --git a/patches/onap-patch-role/tasks/main.yml b/patches/onap-patch-role/tasks/main.yml
index fb81f200..474b8ed4 100644
--- a/patches/onap-patch-role/tasks/main.yml
+++ b/patches/onap-patch-role/tasks/main.yml
@@ -4,15 +4,15 @@
- name: Check presence of files for NPM patching
stat:
path: "{{ app_helm_charts_infra_directory }}/{{ item }}"
- with_items:
+ loop:
- common/dgbuilder/templates/deployment.yaml
register: npm_files_check
- name: Check presence of files for nexus domain resolving
stat:
path: "{{ app_helm_charts_infra_directory }}/{{ item }}"
- with_items:
- - oof/charts/oof-cmso/charts/oof-cmso-service/resources/config/msosimulator.sh
+ loop:
+ - oof/charts/oof-cmso/charts/oof-cmso-service/templates/deployment.yaml
register: hosts_files_check
- name: Patch OOM - set npm registry
@@ -22,15 +22,15 @@
line: '\g<1>npm set registry "http://nexus.{{ ansible_nodename }}/repository/npm-private/";'
backrefs: yes
state: present
- with_items: "{{ npm_files_check.results }}"
+ loop: "{{ npm_files_check.results }}"
when: item.stat.exists
- name: Patch OOM - nexus domain resolving
lineinfile:
path: "{{ item.stat.path }}"
- regexp: '^(.*)HOSTS_FILE_RECORD'
- line: '\g<1>{{ cluster_ip }} {{ simulated_hosts.nexus | join(" ") }} >> /etc/hosts;'
+ regexp: '^(.*)INFRA_CLUSTER_IP'
+ line: '\g<1>{{ cluster_ip }}'
backrefs: yes
state: present
- with_items: "{{ hosts_files_check.results }}"
+ loop: "{{ hosts_files_check.results }}"
when: item.stat.exists
diff --git a/patches/onap.patch b/patches/onap.patch
index 05e217e7..a135ad38 100644
--- a/patches/onap.patch
+++ b/patches/onap.patch
@@ -1,20 +1,29 @@
diff --git a/kubernetes/oof/charts/oof-cmso/charts/oof-cmso-service/resources/config/msosimulator.sh b/kubernetes/oof/charts/oof-cmso/charts/oof-cmso-service/resources/config/msosimulator.sh
-index c87a26b..79ad921 100644
+index c87a26b..4212c96 100644
--- a/kubernetes/oof/charts/oof-cmso/charts/oof-cmso-service/resources/config/msosimulator.sh
+++ b/kubernetes/oof/charts/oof-cmso/charts/oof-cmso-service/resources/config/msosimulator.sh
-@@ -1,4 +1,10 @@
+@@ -1,4 +1,4 @@
#!/bin/sh
-pip install flask
-pip install requests
-+
-+# for some reason DNS is not working properly on this pod
-+# therefore we need to explicitly add record just for
-+# the purpose of this script
-+HOSTS_FILE_RECORD >> /etc/hosts
-+
+pip install -i https://nexus3.onap.org/repository/pypi-private/simple/ --trusted-host nexus3.onap.org flask
+pip install -i https://nexus3.onap.org/repository/pypi-private/simple/ --trusted-host nexus3.onap.org requests
python /share/etc/config/mock.py
+diff --git a/kubernetes/oof/charts/oof-cmso/charts/oof-cmso-service/templates/deployment.yaml b/kubernetes/oof/charts/oof-cmso/charts/oof-cmso-service/templates/deployment.yaml
+index b26f6dd..2d31aad 100644
+--- a/kubernetes/oof/charts/oof-cmso/charts/oof-cmso-service/templates/deployment.yaml
++++ b/kubernetes/oof/charts/oof-cmso/charts/oof-cmso-service/templates/deployment.yaml
+@@ -30,6 +30,10 @@ spec:
+ app: {{ include "common.name" . }}
+ release: {{ .Release.Name }}
+ spec:
++ hostAliases:
++ - ip: INFRA_CLUSTER_IP
++ hostnames:
++ - nexus3.onap.org
+ initContainers:
+ - command:
+ - /root/ready.py
--
diff --git a/kubernetes/common/dgbuilder/templates/deployment.yaml b/kubernetes/common/dgbuilder/templates/deployment.yaml
@@ -36,6 +45,6 @@ index 353c231..cf38409 100644
ports:
- containerPort: {{ .Values.service.internalPort }}
readinessProbe:
---
+--
1.8.3.1