summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--ansible/docker/Dockerfile2
-rwxr-xr-xansible/group_vars/all.yml3
-rw-r--r--ansible/resources.yml6
-rw-r--r--ansible/roles/nginx/defaults/main.yml21
-rw-r--r--ansible/roles/nginx/tasks/main.yml19
-rw-r--r--ansible/roles/package-repository/defaults/main.yml8
-rw-r--r--ansible/roles/package-repository/tasks/main.yml42
-rw-r--r--ansible/roles/resource-data/defaults/main.yml2
-rw-r--r--ansible/roles/resource-data/tasks/main.yml32
-rw-r--r--ansible/roles/resource-data/tasks/nfs-upload.yml53
-rw-r--r--ansible/roles/resource-data/tasks/ssh-upload.yml60
-rw-r--r--ansible/roles/resource-data/tasks/unarchive-nfs-resource.yml30
-rw-r--r--ansible/roles/resource-data/tasks/unarchive-resource.yml55
-rw-r--r--ansible/roles/resource-data/tasks/unarchive-ssh-resource.yml45
-rw-r--r--ansible/roles/resource-data/tasks/upload_resources.yml15
-rw-r--r--ansible/site.yml2
-rw-r--r--ansible/upload_resources.yml49
-rw-r--r--build/data_lists/onap_3.0.1-git_repos.list12
-rw-r--r--docs/InstallGuide.rst30
19 files changed, 292 insertions, 194 deletions
diff --git a/ansible/docker/Dockerfile b/ansible/docker/Dockerfile
index f249d3b6..8056b9fc 100644
--- a/ansible/docker/Dockerfile
+++ b/ansible/docker/Dockerfile
@@ -1,6 +1,6 @@
FROM alpine:3.8
-ARG ansible_version=2.6.3
+ARG ansible_version=2.7.8
LABEL ansible_version=$ansible_version vendor=Samsung
# Install Ansible build dependencies
diff --git a/ansible/group_vars/all.yml b/ansible/group_vars/all.yml
index 85363951..0d22ac5d 100755
--- a/ansible/group_vars/all.yml
+++ b/ansible/group_vars/all.yml
@@ -59,9 +59,6 @@ certificates:
# Default value is to allow redeploy
redeploy_k8s_env: yes
-# Distribute offline software package (rpm,apt) repository
-deploy_package_repository: yes
-
# Offline solution is deploying app specific rpm repository and requires some name
# also for k8s cluster
# e.g. app_name: onap
diff --git a/ansible/resources.yml b/ansible/resources.yml
new file mode 100644
index 00000000..8e779567
--- /dev/null
+++ b/ansible/resources.yml
@@ -0,0 +1,6 @@
+---
+- name: Transfer needed resources from resource to infra servers
+ hosts: resources[0], infrastructure
+ serial: 1
+ roles:
+ - resource-data
diff --git a/ansible/roles/nginx/defaults/main.yml b/ansible/roles/nginx/defaults/main.yml
new file mode 100644
index 00000000..7c9fa0be
--- /dev/null
+++ b/ansible/roles/nginx/defaults/main.yml
@@ -0,0 +1,21 @@
+---
+simulated_hosts:
+ git:
+ http:
+ nexus:
+all_simulated_hosts:
+ "{{ simulated_hosts.git + simulated_hosts.http + simulated_hosts.nexus }}"
+
+nginx:
+ ports:
+ - "80:80"
+ - "443:443"
+ - "10001:443"
+ volumes:
+ - "{{ app_data_path }}/cfg/nginx.conf:/etc/nginx/nginx.conf:ro"
+ - "{{ app_data_path }}/certs:/etc/nginx/certs:ro"
+ - "{{ app_data_path }}/git-repo:/srv/git:rw"
+ - "{{ app_data_path }}/http:/srv/http:rw"
+ - "{{ app_data_path }}/pkg/rhel:/srv/http/repo.infra-server/rhel:rw"
+ - "{{ app_data_path }}/pkg/ubuntu/xenial:/srv/http/repo.infra-server/ubuntu/xenial:rw"
+ - /var/log/nginx:/var/log/nginx:rw
diff --git a/ansible/roles/nginx/tasks/main.yml b/ansible/roles/nginx/tasks/main.yml
index c59be75f..1ec0adbc 100644
--- a/ansible/roles/nginx/tasks/main.yml
+++ b/ansible/roles/nginx/tasks/main.yml
@@ -16,23 +16,18 @@
state: present
timeout: 120
+- name: Create nexus network
+ docker_network:
+ name: nexus_network
+ state: present
+
- name: Start nginx
docker_container:
name: own_nginx
image: own_nginx
networks:
- name: nexus_network
- ports:
- - "80:80"
- - "443:443"
- - "10001:443"
- volumes:
- - "{{ app_data_path }}/cfg/nginx.conf:/etc/nginx/nginx.conf:ro"
- - "{{ app_data_path }}/certs:/etc/nginx/certs:ro"
- - "{{ app_data_path }}/git-repo:/srv/git:rw"
- - "{{ app_data_path }}/http:/srv/http:rw"
- - "{{ app_data_path }}/pkg/rhel:/srv/http/repo.infra-server/rhel:rw"
- - "{{ app_data_path }}/pkg/ubuntu/xenial:/srv/http/repo.infra-server/ubuntu/xenial:rw"
- - /var/log/nginx:/var/log/nginx:rw
+ ports: "{{ nginx.ports }}"
+ volumes: "{{ nginx.volumes }}"
state: started
restart_policy: unless-stopped
diff --git a/ansible/roles/package-repository/defaults/main.yml b/ansible/roles/package-repository/defaults/main.yml
index ea5796df..678b6bd4 100644
--- a/ansible/roles/package-repository/defaults/main.yml
+++ b/ansible/roles/package-repository/defaults/main.yml
@@ -1,2 +1,8 @@
---
-deploy_package_repository: true
+package_repositories:
+ - name: "{{ app_name }}"
+ file: "{{ app_name | lower }}"
+ description: "{{ app_name | upper }} offline repository"
+ baseurl: "{{ 'http://repo.infra-server/rhel' if 'infrastructure' not in group_names else 'file://' + app_data_path + '/pkg/rhel' }}"
+ gpgcheck: false
+ enabled: true
diff --git a/ansible/roles/package-repository/tasks/main.yml b/ansible/roles/package-repository/tasks/main.yml
index 686310e4..4949345a 100644
--- a/ansible/roles/package-repository/tasks/main.yml
+++ b/ansible/roles/package-repository/tasks/main.yml
@@ -7,13 +7,41 @@
insertbefore: BOF
become: true
+- name: Disable all OS default repositories
+ block:
+ - name: Find repo files names
+ find:
+ paths: /etc/yum.repos.d
+ pattern: '*.repo'
+ register: repo_files
+
+ - name: Get all defined offline repo names
+ set_fact: package_repositories_names="{{ package_repositories | selectattr('name', 'defined') | map(attribute='name') | list }}"
+
+ - name: Backup repo files
+ copy:
+ remote_src: yes
+ src: "{{ item.path }}"
+ dest: "{{ item.path }}.disabled"
+ loop: "{{ repo_files.files }}"
+ when: "(item.path | basename | splitext)[0] not in package_repositories_names"
+
+ - name: Remove disabled repo files
+ file:
+ path: "{{ item.path }}"
+ state: absent
+ loop: "{{ repo_files.files }}"
+ when: "(item.path | basename | splitext)[0] not in package_repositories_names"
+ become: yes
+
- name: Add application offline package repository
yum_repository:
- name: "{{ app_name }}"
- file: "{{ app_name | lower }}"
- description: "{{ app_name | upper }} offline repository"
- baseurl: "{{ 'http://repo.infra-server/rhel' if 'infrastructure' not in group_names else 'file://' + app_data_path + '/pkg/rhel' }}"
- gpgcheck: false
- enabled: true
- when: deploy_package_repository
+ name: "{{ item.name }}"
+ file: "{{ item.file }}"
+ description: "{{ item.description | default('') }}"
+ baseurl: "{{ item.baseurl | default('') }}"
+ gpgcheck: "{{ item.gpgcheck | default(true) }}"
+ enabled: "{{ item.enabled | default(false) }}"
+ state: "{{ item.state | default('present') }}"
+ loop: "{{ package_repositories }}"
become: true
diff --git a/ansible/roles/resource-data/defaults/main.yml b/ansible/roles/resource-data/defaults/main.yml
new file mode 100644
index 00000000..8a1adfa1
--- /dev/null
+++ b/ansible/roles/resource-data/defaults/main.yml
@@ -0,0 +1,2 @@
+---
+resources_on_nfs: no
diff --git a/ansible/roles/resource-data/tasks/main.yml b/ansible/roles/resource-data/tasks/main.yml
index 51127226..41046d81 100644
--- a/ansible/roles/resource-data/tasks/main.yml
+++ b/ansible/roles/resource-data/tasks/main.yml
@@ -1,2 +1,32 @@
---
-- include_tasks: "{{ transport }}-upload.yml"
+- name: Collect source resources
+ block:
+ - name: Check if source dir and files are present
+ stat:
+ path: "{{ item.source }}"
+ get_checksum: false
+ loop:
+ - { source: "{{ resources_dir }}/{{ resources_filename | default('thisdoesnotexists', true) }}",
+ target: "{{ app_data_path }}/{{ resources_filename | default('thisdoesnotexists', true) }}" }
+ - { source: "{{ resources_dir }}/{{ aux_resources_filename | default('thisdoesnotexists', true) }}",
+ target: "{{ aux_data_path }}/{{ aux_resources_filename | default('thisdoesnotexists', true) }}" }
+ register: source_path
+
+ - name: Create initial resources list of dicts
+ set_fact:
+ to_be_uploaded_resources_list: "{{
+ to_be_uploaded_resources_list | default([]) + [
+ {'file': item.item.target | basename,
+ 'destination_dir': item.item.target | dirname } ] }}"
+ loop: "{{ source_path.results }}"
+ when: item.stat.exists
+ when: inventory_hostname in groups.resources
+
+- name: "Upload resource files {{ hostvars[groups.resources.0].to_be_uploaded_resources_list }} to infrastructure"
+ include_tasks: upload_resources.yml
+ vars:
+ resources_source_host: "{{ hostvars[groups.resources.0].ansible_host | default(hostvars[groups.resources.0].inventory_hostname) }}"
+ resources_list_of_dicts: "{{ hostvars[groups.resources.0].to_be_uploaded_resources_list }}"
+ when:
+ - inventory_hostname in groups.infrastructure
+ - hostvars[groups.resources.0].to_be_uploaded_resources_list is defined
diff --git a/ansible/roles/resource-data/tasks/nfs-upload.yml b/ansible/roles/resource-data/tasks/nfs-upload.yml
deleted file mode 100644
index 4b5c18d1..00000000
--- a/ansible/roles/resource-data/tasks/nfs-upload.yml
+++ /dev/null
@@ -1,53 +0,0 @@
----
-- name: Upload resources to infrastructure servers over nfs
- block:
- - name: Mount resources
- mount:
- path: /tmp/resource_data
- src: "{{ hostvars[groups.resources.0].ansible_host }}:{{ hostvars[groups.resources.0].resources_dir }}"
- fstype: nfs
- state: mounted
-
- - name: Unarchive resources
- unarchive:
- src: "/tmp/resource_data/{{ hostvars[groups.resources.0].resources_filename }}"
- remote_src: yes
- dest: "{{ app_data_path }}"
- when: not resources_data_check.stat.exists
-
- - name: Unarchive auxiliary resources
- unarchive:
- src: "/tmp/resource_data/{{ hostvars[groups.resources.0].aux_resources_filename }}"
- remote_src: yes
- dest: "{{ aux_data_path }}"
- when: >
- hostvars[groups.resources.0].aux_resources_filename is defined
- and hostvars[groups.resources.0].aux_resources_filename is not none
- and aux_data_path is defined and aux_data_path is not none
- and hostvars[groups.resources.0].aux_file_presence.stat.exists
- and not aux_resources_data_check.stat.exists
-
- rescue:
- - name: Removing the resources data due to an error - so the next run can try again
- command: /bin/false
- register: upload_failed
-
- always:
- - name: unmount resource dir
- mount:
- path: /tmp/resource_data
- src: "{{ hostvars[groups.resources.0].ansible_host }}:{{hostvars[groups.resources.0].resources_dir }}"
- fstype: nfs
- state: absent
-
- - name: Remove the resource data on error
- file:
- path: "{{ app_data_path }}"
- state: absent
- when: upload_failed is defined
-
- - name: Remove the auxilliary resource data on error
- file:
- path: "{{ aux_data_path }}"
- state: absent
- when: upload_failed is defined
diff --git a/ansible/roles/resource-data/tasks/ssh-upload.yml b/ansible/roles/resource-data/tasks/ssh-upload.yml
deleted file mode 100644
index bc7df37f..00000000
--- a/ansible/roles/resource-data/tasks/ssh-upload.yml
+++ /dev/null
@@ -1,60 +0,0 @@
----
-- name: Upload resources to infrastructure servers over ssh
- block:
- - name: Upload ssh private key
- copy:
- src: "{{ ansible_ssh_private_key_file }}"
- dest: /root/.ssh/infra_to_resource.privkey
- mode: 0600
- owner: root
- group: root
- remote_src: no
-
- - name: Unarchive resources
- shell: >
- ssh -o StrictHostKeyChecking=no -o BatchMode=yes
- -i /root/.ssh/infra_to_resource.privkey
- {{ hostvars[groups.resources.0].ansible_host }}
- 'cat "{{ hostvars[groups.resources.0].resources_dir }}/{{ hostvars[groups.resources.0].resources_filename }}"'
- | tar -C "{{ app_data_path }}" -xf -
- args:
- warn: False
- when: not resources_data_check.stat.exists
-
- - name: Unarchive auxiliary resources
- shell: >
- ssh -i /root/.ssh/infra_to_resource.privkey
- {{ hostvars[groups.resources.0].ansible_host }}
- 'cat "{{ hostvars[groups.resources.0].resources_dir }}/{{ hostvars[groups.resources.0].aux_resources_filename }}"'
- | tar -C "{{ aux_data_path }}" -xf -
- when: >
- hostvars[groups.resources.0].aux_resources_filename is defined
- and hostvars[groups.resources.0].aux_resources_filename is not none
- and aux_data_path is defined and aux_data_path is not none
- and hostvars[groups.resources.0].aux_file_presence.stat.exists
- and not aux_resources_data_check.stat.exists
- args:
- warn: False
-
- rescue:
- - name: Removing the resources data due to an error - so the next run can try again
- command: /bin/false
- register: upload_failed
-
- always:
- - name: Remove the ssh private key
- file:
- path: /root/.ssh/infra_to_resource.privkey
- state: absent
-
- - name: Remove the resource data on error
- file:
- path: "{{ app_data_path }}"
- state: absent
- when: upload_failed is defined
-
- - name: Remove the auxilliary resource data on error
- file:
- path: "{{ aux_data_path }}"
- state: absent
- when: upload_failed is defined
diff --git a/ansible/roles/resource-data/tasks/unarchive-nfs-resource.yml b/ansible/roles/resource-data/tasks/unarchive-nfs-resource.yml
new file mode 100644
index 00000000..9f9d92d0
--- /dev/null
+++ b/ansible/roles/resource-data/tasks/unarchive-nfs-resource.yml
@@ -0,0 +1,30 @@
+---
+#
+# Expected variables
+# resources_source_host
+# resources_dir
+# resource_source_filename
+# resource_destination_directory
+# Output is upload_failed true/false
+#
+- name: "Upload resource {{ resources_dir }}/{{ resource_source_filename }} to infrastructure servers over nfs"
+ block:
+ - name: Mount resource dir
+ mount:
+ path: /tmp/resource_data
+ src: "{{ resources_source_host }}:{{ resources_dir }}"
+ fstype: nfs
+ state: mounted
+
+ - name: "Unarchive resource {{ resources_dir }}/{{ resource_source_filename }} to {{ resource_destination_directory }} dir on infrastructure servers over nfs"
+ unarchive:
+ src: "/tmp/resource_data/{{ resource_source_filename }}"
+ dest: "{{ resource_destination_directory }}"
+ remote_src: yes
+ always:
+ - name: Unmount resource dir
+ mount:
+ path: /tmp/resource_data
+ src: "{{ resources_source_host }}:{{ resources_dir }}"
+ fstype: nfs
+ state: absent
diff --git a/ansible/roles/resource-data/tasks/unarchive-resource.yml b/ansible/roles/resource-data/tasks/unarchive-resource.yml
new file mode 100644
index 00000000..79fdbfce
--- /dev/null
+++ b/ansible/roles/resource-data/tasks/unarchive-resource.yml
@@ -0,0 +1,55 @@
+---
+#
+# Wrapper to pass through following variables
+# resources_source_host
+# resources_dir
+# resource_source_filename
+# resource_destination_directory
+# And handling target directory creation and possible removal on failure.
+# Idempotence is also handled here as nothing is done if resource_destination_directory
+# was already created.
+#
+# Logically also tranport method selection belongs to here but left it to caller
+# as this is called in a loop causing "package_facts" called many times
+# (not sure if it would matter).
+#
+- name: "Create {{ resource_destination_directory }} directory"
+ file:
+ path: "{{ resource_destination_directory }}"
+ state: directory
+
+- name: Check if resources are uploaded
+ stat:
+ path: "{{ resource_destination_directory }}/{{ resource_source_filename }}-uploaded"
+ register: uploaded
+
+- name: "Handle transport of one archive file"
+ when: not uploaded.stat.exists
+ block:
+ - name: "Get list of destination directory files"
+ find:
+ path: "{{ resource_destination_directory }}"
+ file_type: any
+ register: original_files
+
+ - name: "Unarchive resource {{ resource_source_filename }} from host {{ resources_source_host }}, transport is {{ transport }}"
+ include_tasks: "unarchive-{{ transport }}-resource.yml"
+ - file:
+ path: "{{ resource_destination_directory }}/{{ resource_source_filename }}-uploaded"
+ state: touch
+ rescue:
+ - name: "Get list of destination directory files"
+ find:
+ path: "{{ resource_destination_directory }}"
+ file_type: any
+ register: files_after_fail
+
+ - name: "Cleanup the destination directory {{ resource_destination_directory }} on error"
+ file:
+ path: "{{ item.path }}"
+ state: absent
+ with_items: "{{ files_after_fail.files | difference(original_files.files) }}"
+ when: files_after_fail is defined
+
+ - fail:
+ msg: "Upload of {{ resource_source_filename }} failed"
diff --git a/ansible/roles/resource-data/tasks/unarchive-ssh-resource.yml b/ansible/roles/resource-data/tasks/unarchive-ssh-resource.yml
new file mode 100644
index 00000000..1385ba55
--- /dev/null
+++ b/ansible/roles/resource-data/tasks/unarchive-ssh-resource.yml
@@ -0,0 +1,45 @@
+---
+#
+# Expected variables
+# resources_source_host
+# resources_dir
+# resource_source_filename
+# resource_destination_directory
+# Output is upload_failed true/false
+#
+- name: "Upload resource {{ resources_dir }}/{{ resource_source_filename }} to infrastructure servers over ssh"
+ block:
+ - name: Upload ssh private key
+ copy:
+ src: "{{ ansible_ssh_private_key_file }}"
+ dest: /root/.ssh/infra_to_resource.privkey
+ mode: 0600
+ owner: root
+ group: root
+
+ - name: Detect if archive is compressed
+ shell: >
+ file "{{ resources_dir }}/{{ resource_source_filename }}"
+ | grep "compressed"
+ register: compressed
+ failed_when: compressed.rc > 1
+ delegate_to: "{{ resources_source_host }}"
+
+ - name: Set tar extract options
+ set_fact:
+ tar_extract_options: "{{ '-xzf' if compressed.rc == 0 else '-xf' }}"
+
+ - name: "Unarchive resource {{ resources_dir }}/{{ resource_source_filename }} to {{ resource_destination_directory }} dir on infrastructure servers over ssh"
+ shell: >
+ ssh -o StrictHostKeyChecking=no -o BatchMode=yes
+ -i /root/.ssh/infra_to_resource.privkey
+ {{ resources_source_host }}
+ 'cat "{{ resources_dir }}/{{ resource_source_filename }}"'
+ | tar -C "{{ resource_destination_directory }}" "{{ tar_extract_options }}" -
+ args:
+ warn: false
+ always:
+ - name: Remove the ssh private key
+ file:
+ path: /root/.ssh/infra_to_resource.privkey
+ state: absent
diff --git a/ansible/roles/resource-data/tasks/upload_resources.yml b/ansible/roles/resource-data/tasks/upload_resources.yml
new file mode 100644
index 00000000..571bc7d6
--- /dev/null
+++ b/ansible/roles/resource-data/tasks/upload_resources.yml
@@ -0,0 +1,15 @@
+---
+- name: Query package facts to check nfs-utils existence
+ package_facts:
+ manager: auto
+
+- name: Set transport fact to nfs or ssh
+ set_fact:
+ transport: "{{ 'nfs' if resources_on_nfs and 'nfs-utils' in ansible_facts.packages else 'ssh' }}"
+
+- name: "Upload resources to infrastructure servers over {{ transport }}"
+ include_tasks: unarchive-resource.yml
+ vars:
+ resource_source_filename: "{{ item.file }}"
+ resource_destination_directory: "{{ item.destination_dir }}"
+ loop: "{{ resources_list_of_dicts }}"
diff --git a/ansible/site.yml b/ansible/site.yml
index 0df534d0..fbf2c389 100644
--- a/ansible/site.yml
+++ b/ansible/site.yml
@@ -13,7 +13,7 @@
# 3. playbooks here are more or less batch jobs and the ssh authentication
# is a precondition, which should be done during configuration of the
# installer
-- import_playbook: upload_resources.yml
+- import_playbook: resources.yml
- import_playbook: infrastructure.yml
- import_playbook: rancher_kubernetes.yml
- import_playbook: application.yml
diff --git a/ansible/upload_resources.yml b/ansible/upload_resources.yml
deleted file mode 100644
index 68010eb1..00000000
--- a/ansible/upload_resources.yml
+++ /dev/null
@@ -1,49 +0,0 @@
----
-- name: Check for presence of auxiliary resources tar file
- hosts: resources[0]
- tasks:
- - name: Store auxiliary resources tar file info into variable
- stat:
- path: "{{ hostvars[groups.resources.0].resources_dir }}/{{ hostvars[groups.resources.0].aux_resources_filename }}"
- register: aux_file_presence
-
-- name: Check infrastructure server for presence of resources and requirements
- hosts: infrastructure
- tasks:
- - name: Check if nfs-utils is installed
- yum:
- list: nfs-utils
- register: nfs_utils_check
-
- - name: Check if the resources are already unpacked
- stat:
- path: "{{ app_data_path }}"
- register: resources_data_check
-
- - name: Check if the auxilliary resources are already unpacked
- stat:
- path: "{{ aux_data_path }}"
- register: aux_resources_data_check
- when: aux_data_path is defined and aux_data_path is not none
-
-- name: Ensure the existence of data directory/ies on infrastructure server
- hosts: infrastructure
- tasks:
- - name: Create data directory
- file:
- path: "{{ app_data_path }}"
- state: directory
-
- - name: Create auxiliary data directory
- file:
- path: "{{ aux_data_path }}"
- state: directory
- when: aux_data_path is defined and aux_data_path is not none
-
-- name: Upload resources to infrastructure server
- hosts: infrastructure
- roles:
- # use nfs or ssh and unpack resources into data directory/ies
- - role: resource-data
- vars:
- transport: "{{ 'nfs' if resources_on_nfs and (nfs_utils_check.results|selectattr('yumstate', 'match', 'installed')|list|length != 0) else 'ssh' }}"
diff --git a/build/data_lists/onap_3.0.1-git_repos.list b/build/data_lists/onap_3.0.1-git_repos.list
index 27ecd57f..5f602daa 100644
--- a/build/data_lists/onap_3.0.1-git_repos.list
+++ b/build/data_lists/onap_3.0.1-git_repos.list
@@ -1,9 +1,9 @@
#repo branch
-gerrit.onap.org/r/aaf/authz.git casablanca
-gerrit.onap.org/r/demo.git casablanca
-gerrit.onap.org/r/dmaap/messagerouter/messageservice.git casablanca
-gerrit.onap.org/r/so/docker-config.git casablanca
+gerrit.onap.org/r/aaf/authz.git 3.0.1-ONAP
+gerrit.onap.org/r/demo.git 3.0.1-ONAP
+gerrit.onap.org/r/dmaap/messagerouter/messageservice.git 3.0.1-ONAP
+gerrit.onap.org/r/so/docker-config.git 3.0.1-ONAP
github.com/rancher/community-catalog.git
git.rancher.io/rancher-catalog.git
-gerrit.onap.org/r/testsuite/properties.git casablanca
-gerrit.onap.org/r/portal.git casablanca
+gerrit.onap.org/r/testsuite/properties.git 3.0.1-ONAP
+gerrit.onap.org/r/portal.git 3.0.1-ONAP
diff --git a/docs/InstallGuide.rst b/docs/InstallGuide.rst
index f34ee03e..e91c7bd7 100644
--- a/docs/InstallGuide.rst
+++ b/docs/InstallGuide.rst
@@ -333,11 +333,41 @@ This will take a while so be patient.
- ``rancher_kubernetes.yml``
- ``application.yml``
+----
+
+.. _oooi_installguide_postinstall:
+
+Part 4. Postinstallation and troubleshooting
+--------------------------------------------
+
After all the playbooks are finished, it will still take a lot of time until all pods will be up and running. You can monitor your newly created kubernetes cluster for example like this::
$ ssh -i ~/.ssh/offline_ssh_key root@10.8.8.4 # tailor this command to connect to your infra-node
$ watch -d -n 5 'kubectl get pods --all-namespaces'
+
+Final result of installation varies based on number of k8s nodes used and distribution of pods. In some dev envs we quite frequently hit problems with not all pods properly deployed. In successful deployments all jobs should be in successful state.
+This can be verified using ::
+
+ $ kubectl get jobs -n <namespace>
+
+If some of the job is hanging in some wrong end-state like ``'BackoffLimitExceeded'`` manual intervention is required to heal this and make also dependent jobs passing. More details about particular job state can be obtained using ::
+
+ $ kubectl describe job -n <namespace> <job_name>
+
+If manual intervention is required, one can remove failing job and retry helm install command directly, which will not launch full deployment but rather check current state of the system and rebuild parts which are not up & running. Exact commands are as follows ::
+
+ $ kubectl delete job -n <namespace> <job_name>
+ $ helm deploy <env_name> <helm_chart_name> --namespace <namespace_name>
+
+ E.g. helm deploy dev local/onap --namespace onap
+
+Once all pods are properly deployed and in running state, one can verify functionality e.g. by running onap healthchecks ::
+
+ $ cd <app_data_path>/<app_name>/helm_charts/robot
+ $ ./ete-k8s.sh onap health
+
+
-----
.. _oooi_installguide_appendix1: