summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTomáš Levora <t.levora@partner.samsung.com>2019-11-12 14:28:27 +0000
committerTomáš Levora <t.levora@partner.samsung.com>2019-11-12 14:28:27 +0000
commit1ed2b6fce2c08867c55786fc4aeebe983f312b4c (patch)
tree26d0cdd4fc4a57fd4468426db00a129387683f87
parent92477974b68c7638a43ffc869e3ea9fb854b3534 (diff)
Revert "Fix packaging offline-installer"
This reverts commit 92477974b68c7638a43ffc869e3ea9fb854b3534. Reason for revert: Not solved handling of application_configuration.yml in package.py Change-Id: I814c01dc1f7334a961e991c42fd485d9af4675a1 Signed-off-by: Tomas Levora <t.levora@partner.samsung.com> Issue-ID: OOM-2201
-rwxr-xr-xbuild/package.py33
-rwxr-xr-xconfig/application_configuration.yml (renamed from ansible/config/application_configuration.yml)0
-rw-r--r--docs/BuildGuide.rst16
-rw-r--r--docs/InstallGuide.rst6
4 files changed, 20 insertions, 35 deletions
diff --git a/build/package.py b/build/package.py
index f3b42eb0..ad921ed3 100755
--- a/build/package.py
+++ b/build/package.py
@@ -33,8 +33,7 @@ import tarfile
import git
log = logging.getLogger(__name__)
-script_location = os.path.abspath(os.path.join(__file__, '..'))
-offline_repository_dir = os.path.abspath(os.path.join(script_location, '..'))
+script_location = os.path.dirname(os.path.realpath(__file__))
def prepare_application_repository(directory, url, refspec, patch_path):
@@ -74,7 +73,6 @@ def create_package_info_file(output_file, repository_list, tag):
Generates text file in json format containing basic information about the build
:param output_file:
:param repository_list: list of repositories to be included in package info
- :param tag: build version of packages
:return:
"""
log.info('Generating package.info file')
@@ -102,7 +100,7 @@ def create_package(tar_content, file_name):
with tarfile.open(file_name, 'w') as output_tar_file:
for src, dst in tar_content.items():
if src != '':
- output_tar_file.add(src, dst)
+ output_tar_file.add(src, dst)
def build_offline_deliverables(build_version,
@@ -112,7 +110,6 @@ def build_offline_deliverables(build_version,
application_charts_dir,
application_configuration,
application_patch_role,
- inventory_file,
output_dir,
resources_directory,
aux_directory,
@@ -129,7 +126,6 @@ def build_offline_deliverables(build_version,
:param application_charts_dir: path to directory under application repository containing helm charts
:param application_configuration: path to application configuration file (helm override configuration)
:param application_patch_role: path to application patch role (executed just before helm deploy)
- :param inventory_file: path to ansible inventory file
:param output_dir: Destination directory for saving packages
:param resources_directory: Path to resource directory
:param aux_directory: Path to aux binary directory
@@ -143,10 +139,11 @@ def build_offline_deliverables(build_version,
if os.path.exists(output_dir) and os.listdir(output_dir):
if not overwrite:
log.error('Output directory is not empty, use overwrite to force build')
- raise FileExistsError(output_dir)
+ raise FileExistsError
shutil.rmtree(output_dir)
# Git
+ offline_repository_dir = os.path.join(script_location, '..')
offline_repository = git.Repo(offline_repository_dir)
application_dir = os.path.join(output_dir, 'application_repository')
@@ -164,7 +161,6 @@ def build_offline_deliverables(build_version,
os.path.join(offline_repository_dir, 'ansible'): 'ansible',
application_configuration: 'ansible/application/application_configuration.yml',
application_patch_role: 'ansible/application/onap-patch-role',
- inventory_file: 'ansible/application/hosts.yml',
os.path.join(application_dir, application_charts_dir): 'ansible/application/helm_charts',
info_file: 'package.info'
}
@@ -177,9 +173,6 @@ def build_offline_deliverables(build_version,
info_file: 'package.info'
}
- # add separator if build version not empty
- build_version = "-" + build_version if build_version != "" else ""
-
if not skip_sw:
log.info('Building offline installer')
os.chdir(os.path.join(offline_repository_dir, 'ansible', 'docker'))
@@ -201,7 +194,7 @@ def build_offline_deliverables(build_version,
log.info('Binaries - workaround')
download_dir_path = os.path.join(resources_directory, 'downloads')
os.chdir(download_dir_path)
- for file in os.listdir(download_dir_path):
+ for file in os.listdir():
if os.path.islink(file):
os.unlink(file)
@@ -221,7 +214,7 @@ def build_offline_deliverables(build_version,
create_package(resources_content, resources_package_tar_path)
if not skip_aux:
- aux_package_tar_path = os.path.join(output_dir, 'aux_package' + build_version + '.tar')
+ aux_package_tar_path = os.path.join(output_dir, 'aux_package'+ build_version + '.tar')
create_package(aux_content, aux_package_tar_path)
shutil.rmtree(application_dir)
@@ -233,7 +226,7 @@ def run_cli():
"""
parser = argparse.ArgumentParser(description='Create Package For Offline Installer')
parser.add_argument('--build-version',
- help='version of the build', default='')
+ help='version of the build', default='custom')
parser.add_argument('application_repository_url', metavar='application-repository-url',
help='git repository hosting application helm charts')
parser.add_argument('--application-repository_reference', default='master',
@@ -241,19 +234,16 @@ def run_cli():
parser.add_argument('--application-patch_file',
help='git patch file to be applied over application repository', default='')
parser.add_argument('--application-charts_dir',
- help='path to directory under application repository containing helm charts ',
- default='kubernetes')
+ help='path to directory under application repository containing helm charts ', default='kubernetes')
parser.add_argument('--application-configuration',
help='path to application configuration file (helm override configuration)',
- default=os.path.join(offline_repository_dir, 'ansible/config/application_configuration.yml'))
+ default='')
parser.add_argument('--application-patch-role',
help='path to application patch role file (ansible role) to be executed right before installation',
default='')
- parser.add_argument('--inventory-file', help="path to ansible inventory file",
- default=os.path.join(offline_repository_dir, 'ansible/inventory/hosts.yml'))
- parser.add_argument('--output-dir', '-o', default=os.path.join(offline_repository_dir, '../packages'),
+ parser.add_argument('--output-dir', '-o', default=os.path.join(script_location, '..', '..'),
help='Destination directory for saving packages')
- parser.add_argument('--resources-directory', default=os.path.join(offline_repository_dir, '../resources'),
+ parser.add_argument('--resources-directory', default='',
help='Path to resource directory')
parser.add_argument('--aux-directory',
help='Path to aux binary directory', default='')
@@ -281,7 +271,6 @@ def run_cli():
args.application_charts_dir,
args.application_configuration,
args.application_patch_role,
- args.inventory_file,
args.output_dir,
args.resources_directory,
args.aux_directory,
diff --git a/ansible/config/application_configuration.yml b/config/application_configuration.yml
index 0c082867..0c082867 100755
--- a/ansible/config/application_configuration.yml
+++ b/config/application_configuration.yml
diff --git a/docs/BuildGuide.rst b/docs/BuildGuide.rst
index d0a558ba..27c0835e 100644
--- a/docs/BuildGuide.rst
+++ b/docs/BuildGuide.rst
@@ -128,18 +128,14 @@ so one might try following command to download most of the required artifacts in
::
# following arguments are provided
- # all data lists are taken from ./build/data_lists/ folder
+ # all data lists are taken in ./build/data_lists/ folder
# all resources will be stored in expected folder structure within ../resources folder
./build/download/download.py --docker ./build/data_lists/infra_docker_images.list ../resources/offline_data/docker_images_infra \
- --http ./build/data_lists/infra_bin_utils.list ../resources/downloads
-
- # following docker images does not neccessary need to be stored under resources as they load into repository in next part
- # if second argument for --docker is not present, images are just pulled and cached.
- # Warning: script must be run twice separately, for more details run download.py --help
- ./build/download/download.py --docker ./build/data_lists/rke_docker_images.list \
+ --docker ./build/data_lists/rke_docker_images.list \
--docker ./build/data_lists/k8s_docker_images.list \
--docker ./build/data_lists/onap_docker_images.list \
+ --http ./build/data_lists/infra_bin_utils.list ../resources/downloads
Alternatively, step-by-step procedure is described in Appendix 1.
@@ -152,7 +148,7 @@ Part 3. Populate local nexus
Prerequisites:
- All data lists and resources which are pushed to local nexus repository are available
-- Following ports are not occupied by another service: 80, 8081, 8082, 10001
+- Following ports are not occupied buy another service: 80, 8081, 8082, 10001
- There's no docker container called "nexus"
.. note:: In case you skipped the Part 2 for the artifacts download, please ensure that the onap docker images are cached and copy of resources data are untarred in *./onap-offline/../resources/*
@@ -189,13 +185,13 @@ From onap-offline directory run:
::
- ./build/package.py <helm charts repo> --build-version <version> --application-repository_reference <commit/tag/branch> --output-dir <target\_dir> --resources-directory <target\_dir>
+ ./build/package.py <helm charts repo> --build_version "" --application-repository_reference <commit/tag/branch> --output-dir <target\_dir> --resources-directory <target\_dir>
For example:
::
- ./build/package.py https://gerrit.onap.org/r/oom --application-repository_reference master --output-dir /tmp/packages --resources-directory /tmp/resources
+ ./build/package.py https://gerrit.onap.org/r/oom --build_version "" --application-repository_reference master --output-dir /tmp/packages --resources-directory /tmp/resources
In the target directory you should find tar files:
diff --git a/docs/InstallGuide.rst b/docs/InstallGuide.rst
index 947cd727..9239cad9 100644
--- a/docs/InstallGuide.rst
+++ b/docs/InstallGuide.rst
@@ -233,7 +233,7 @@ After all the changes, the ``'hosts.yml'`` should look similar to this::
infrastructure:
hosts:
infrastructure-server:
- ansible_host: 10.8.8.100
+ ansible_host: 10.8.8.13
#IP used for communication between infra and kubernetes nodes, must be specified.
cluster_ip: 10.8.8.100
@@ -326,7 +326,7 @@ Second one controls time zone setting on host. It's value should be time zone na
Final configuration can resemble the following::
resources_dir: /data
- resources_filename: resources_package.tar
+ resources_filename: resources-package.tar
app_data_path: /opt/onap
app_name: onap
timesync:
@@ -432,7 +432,7 @@ Part 4. Post-installation and troubleshooting
After all of the playbooks are run successfully, it will still take a lot of time until all pods are up and running. You can monitor your newly created kubernetes cluster for example like this::
- $ ssh -i ~/.ssh/offline_ssh_key root@10.8.8.100 # tailor this command to connect to your infra-node
+ $ ssh -i ~/.ssh/offline_ssh_key root@10.8.8.4 # tailor this command to connect to your infra-node
$ watch -d -n 5 'kubectl get pods --all-namespaces'
Alternatively you can monitor progress with ``helm_deployment_status.py`` script located in offline-installer directory. Transfer it to infra-node and run::