From 0fb3b8f4d48a066259b8a9ea2a18d68d7644f8e5 Mon Sep 17 00:00:00 2001 From: efiacor Date: Fri, 28 Oct 2022 15:29:26 +0100 Subject: [RDT] Refactoring RTD Major refactor Moved some old docs etc to archived dir Added sub section for guides Added section for deployment options etc Signed-off-by: efiacor Change-Id: I5832c7a94d58c3110655f0c676a5f2a19172dc68 Issue-ID: OOM-3028 --- .readthedocs.yaml | 17 +- docs/_static/css/ribbon.css | 2 +- docs/archived/certs/hardcoded_certificates.csv | 18 + docs/archived/images/consul/consulHealth.png | Bin 0 -> 301016 bytes docs/archived/images/cp_vms/control_plane_1.png | Bin 0 -> 318506 bytes docs/archived/images/cp_vms/control_plane_2.png | Bin 0 -> 247673 bytes docs/archived/images/cp_vms/control_plane_3.png | Bin 0 -> 246800 bytes docs/archived/images/cp_vms/control_plane_4.png | Bin 0 -> 91840 bytes docs/archived/images/cp_vms/control_plane_5.png | Bin 0 -> 206942 bytes docs/archived/images/cp_vms/control_plane_6.png | Bin 0 -> 246780 bytes docs/archived/images/cp_vms/control_plane_7.png | Bin 0 -> 145992 bytes docs/archived/images/cp_vms/control_plane_8.png | Bin 0 -> 120550 bytes docs/archived/images/floating_ips/floating_1.png | Bin 0 -> 30028 bytes docs/archived/images/floating_ips/floating_2.png | Bin 0 -> 85017 bytes docs/archived/images/k8s/kubernetes_objects.png | Bin 0 -> 41593 bytes docs/archived/images/keys/key_pair_1.png | Bin 0 -> 296083 bytes docs/archived/images/keys/key_pair_2.png | Bin 0 -> 310423 bytes docs/archived/images/keys/key_pair_3.png | Bin 0 -> 271397 bytes docs/archived/images/keys/key_pair_4.png | Bin 0 -> 155219 bytes docs/archived/images/msb/MSB-OOM-Diagram.png | Bin 0 -> 77338 bytes docs/archived/images/network/network_1.png | Bin 0 -> 127009 bytes docs/archived/images/network/network_2.png | Bin 0 -> 133842 bytes docs/archived/images/network/network_3.png | Bin 0 -> 105085 bytes docs/archived/images/network/network_4.png | Bin 0 -> 97405 bytes docs/archived/images/nfs_server/nfs_server_1.png | Bin 0 -> 164108 bytes docs/archived/images/nfs_server/nfs_server_10.png | Bin 0 -> 20699 bytes docs/archived/images/nfs_server/nfs_server_2.png | Bin 0 -> 318506 bytes docs/archived/images/nfs_server/nfs_server_3.png | Bin 0 -> 247673 bytes docs/archived/images/nfs_server/nfs_server_4.png | Bin 0 -> 246800 bytes docs/archived/images/nfs_server/nfs_server_5.png | Bin 0 -> 91840 bytes docs/archived/images/nfs_server/nfs_server_6.png | Bin 0 -> 206942 bytes docs/archived/images/nfs_server/nfs_server_7.png | Bin 0 -> 246780 bytes docs/archived/images/nfs_server/nfs_server_8.png | Bin 0 -> 74954 bytes docs/archived/images/nfs_server/nfs_server_9.png | Bin 0 -> 26251 bytes .../images/oom_logo/oomLogoV2-Configure.png | Bin 0 -> 50668 bytes docs/archived/images/oom_logo/oomLogoV2-Delete.png | Bin 0 -> 48038 bytes docs/archived/images/oom_logo/oomLogoV2-Deploy.png | Bin 0 -> 48500 bytes docs/archived/images/oom_logo/oomLogoV2-Heal.png | Bin 0 -> 48092 bytes .../archived/images/oom_logo/oomLogoV2-Monitor.png | Bin 0 -> 50051 bytes docs/archived/images/oom_logo/oomLogoV2-Scale.png | Bin 0 -> 49430 bytes .../archived/images/oom_logo/oomLogoV2-Upgrade.png | Bin 0 -> 49259 bytes docs/archived/images/oom_logo/oomLogoV2-medium.png | Bin 0 -> 31090 bytes docs/archived/images/rke/rke_1.png | Bin 0 -> 143906 bytes docs/archived/images/sg/sg_1.png | Bin 0 -> 108229 bytes docs/archived/images/sg/sg_2.png | Bin 0 -> 153078 bytes docs/archived/images/wk_vms/worker_1.png | Bin 0 -> 126955 bytes docs/archived/images/wk_vms/worker_2.png | Bin 0 -> 247673 bytes docs/archived/images/wk_vms/worker_3.png | Bin 0 -> 96987 bytes docs/archived/images/wk_vms/worker_4.png | Bin 0 -> 91840 bytes docs/archived/images/wk_vms/worker_5.png | Bin 0 -> 206942 bytes docs/archived/images/wk_vms/worker_6.png | Bin 0 -> 143838 bytes docs/archived/images/wk_vms/worker_7.png | Bin 0 -> 222941 bytes docs/archived/oom_developer_guide.rst | 1149 ++++++++++++++++++++ docs/archived/oom_hardcoded_certificates.rst | 18 + docs/archived/oom_quickstart_guide.rst | 284 +++++ docs/archived/oom_setup_kubernetes_rancher.rst | 531 +++++++++ docs/archived/oom_setup_paas.rst | 144 +++ docs/archived/oom_user_guide.rst | 798 ++++++++++++++ docs/archived/shell/master_nfs_node.sh | 32 + docs/archived/shell/openstack-k8s-controlnode.sh | 36 + docs/archived/shell/openstack-k8s-workernode.sh | 34 + docs/archived/shell/openstack-nfs-server.sh | 18 + docs/archived/shell/slave_nfs_node.sh | 25 + docs/archived/yaml/cluster.yml | 156 +++ .../yaml/example-integration-override-v3.yaml | 69 ++ .../yaml/example-integration-override.yaml | 56 + docs/certs/hardcoded_certificates.csv | 18 - docs/conf.py | 55 +- docs/helm/helm-search.txt | 42 - docs/images/consul/consulHealth.png | Bin 301016 -> 0 bytes docs/images/cp_vms/control_plane_1.png | Bin 318506 -> 0 bytes docs/images/cp_vms/control_plane_2.png | Bin 247673 -> 0 bytes docs/images/cp_vms/control_plane_3.png | Bin 246800 -> 0 bytes docs/images/cp_vms/control_plane_4.png | Bin 91840 -> 0 bytes docs/images/cp_vms/control_plane_5.png | Bin 206942 -> 0 bytes docs/images/cp_vms/control_plane_6.png | Bin 246780 -> 0 bytes docs/images/cp_vms/control_plane_7.png | Bin 145992 -> 0 bytes docs/images/cp_vms/control_plane_8.png | Bin 120550 -> 0 bytes docs/images/floating_ips/floating_1.png | Bin 30028 -> 0 bytes docs/images/floating_ips/floating_2.png | Bin 85017 -> 0 bytes docs/images/k8s/kubernetes_objects.png | Bin 41593 -> 0 bytes docs/images/keys/key_pair_1.png | Bin 296083 -> 0 bytes docs/images/keys/key_pair_2.png | Bin 310423 -> 0 bytes docs/images/keys/key_pair_3.png | Bin 271397 -> 0 bytes docs/images/keys/key_pair_4.png | Bin 155219 -> 0 bytes docs/images/msb/MSB-OOM-Diagram.png | Bin 77338 -> 0 bytes docs/images/network/network_1.png | Bin 127009 -> 0 bytes docs/images/network/network_2.png | Bin 133842 -> 0 bytes docs/images/network/network_3.png | Bin 105085 -> 0 bytes docs/images/network/network_4.png | Bin 97405 -> 0 bytes docs/images/nfs_server/nfs_server_1.png | Bin 164108 -> 0 bytes docs/images/nfs_server/nfs_server_10.png | Bin 20699 -> 0 bytes docs/images/nfs_server/nfs_server_2.png | Bin 318506 -> 0 bytes docs/images/nfs_server/nfs_server_3.png | Bin 247673 -> 0 bytes docs/images/nfs_server/nfs_server_4.png | Bin 246800 -> 0 bytes docs/images/nfs_server/nfs_server_5.png | Bin 91840 -> 0 bytes docs/images/nfs_server/nfs_server_6.png | Bin 206942 -> 0 bytes docs/images/nfs_server/nfs_server_7.png | Bin 246780 -> 0 bytes docs/images/nfs_server/nfs_server_8.png | Bin 74954 -> 0 bytes docs/images/nfs_server/nfs_server_9.png | Bin 26251 -> 0 bytes docs/images/oom_logo/oomLogoV2-Configure.png | Bin 50668 -> 0 bytes docs/images/oom_logo/oomLogoV2-Delete.png | Bin 48038 -> 0 bytes docs/images/oom_logo/oomLogoV2-Deploy.png | Bin 48500 -> 0 bytes docs/images/oom_logo/oomLogoV2-Heal.png | Bin 48092 -> 0 bytes docs/images/oom_logo/oomLogoV2-Monitor.png | Bin 50051 -> 0 bytes docs/images/oom_logo/oomLogoV2-Scale.png | Bin 49430 -> 0 bytes docs/images/oom_logo/oomLogoV2-Upgrade.png | Bin 49259 -> 0 bytes docs/images/oom_logo/oomLogoV2-medium.png | Bin 31090 -> 0 bytes docs/images/rke/rke_1.png | Bin 143906 -> 0 bytes docs/images/sg/sg_1.png | Bin 108229 -> 0 bytes docs/images/sg/sg_2.png | Bin 153078 -> 0 bytes docs/images/wk_vms/worker_1.png | Bin 126955 -> 0 bytes docs/images/wk_vms/worker_2.png | Bin 247673 -> 0 bytes docs/images/wk_vms/worker_3.png | Bin 96987 -> 0 bytes docs/images/wk_vms/worker_4.png | Bin 91840 -> 0 bytes docs/images/wk_vms/worker_5.png | Bin 206942 -> 0 bytes docs/images/wk_vms/worker_6.png | Bin 143838 -> 0 bytes docs/images/wk_vms/worker_7.png | Bin 222941 -> 0 bytes docs/index.rst | 20 +- docs/oom_cloud_setup_guide.rst | 113 -- docs/oom_developer_guide.rst | 1148 ------------------- docs/oom_hardcoded_certificates.rst | 18 - docs/oom_project_description.rst | 100 -- docs/oom_quickstart_guide.rst | 284 ----- docs/oom_setup_ingress_controller.rst | 181 --- docs/oom_setup_kubernetes_rancher.rst | 531 --------- docs/oom_setup_paas.rst | 144 --- docs/oom_user_guide.rst | 798 -------------- docs/release_notes/release-notes-amsterdam.rst | 75 -- docs/release_notes/release-notes-beijing.rst | 427 -------- docs/release_notes/release-notes-casablanca.rst | 78 -- docs/release_notes/release-notes-dublin.rst | 83 -- docs/release_notes/release-notes-elalto.rst | 84 -- docs/release_notes/release-notes-frankfurt.rst | 166 --- docs/release_notes/release-notes-guilin.rst | 150 --- docs/release_notes/release-notes-honolulu.rst | 162 --- docs/release_notes/release-notes-isntanbul.rst | 132 --- docs/release_notes/release-notes.rst | 131 --- docs/requirements-docs.txt | 8 + .../guides/access_guides/oom_access_info.rst | 21 + .../guides/access_guides/oom_ingress_access.rst | 18 + .../deployment_guides/oom_customize_overrides.rst | 48 + .../guides/deployment_guides/oom_deployment.rst | 42 + .../oom_dev_testing_local_deploy.rst | 87 ++ .../oom_helm_release_repo_deploy.rst | 44 + .../oom_helm_testing_repo_deploy.rst | 46 + .../oom_dev_config_management.rst | 444 ++++++++ .../oom_dev_container_orchestration.rst | 366 +++++++ .../development_guides/oom_dev_helm_chart_info.rst | 172 +++ .../guides/development_guides/oom_development.rst | 64 ++ .../guides/infra_guides/oom_base_config_setup.rst | 187 ++++ .../infra_guides/oom_base_optional_addons.rst | 41 + .../guides/infra_guides/oom_infra_setup.rst | 72 ++ .../infra_guides/oom_setup_ingress_controller.rst | 176 +++ .../sections/guides/user_guides/oom_user_guide.rst | 651 +++++++++++ docs/sections/oom_project_description.rst | 106 ++ .../release_notes/release-notes-amsterdam.rst | 75 ++ .../release_notes/release-notes-beijing.rst | 427 ++++++++ .../release_notes/release-notes-casablanca.rst | 78 ++ .../release_notes/release-notes-dublin.rst | 83 ++ .../release_notes/release-notes-elalto.rst | 84 ++ .../release_notes/release-notes-frankfurt.rst | 152 +++ .../release_notes/release-notes-guilin.rst | 142 +++ .../release_notes/release-notes-honolulu.rst | 155 +++ .../release_notes/release-notes-isntanbul.rst | 124 +++ .../release_notes/release-notes-jakarta.rst | 124 +++ docs/sections/release_notes/release-notes.rst | 130 +++ docs/sections/resources/helm/helm-search.txt | 42 + .../resources/images/consul/consulHealth.png | Bin 0 -> 301016 bytes .../resources/images/k8s/kubernetes_objects.png | Bin 0 -> 41593 bytes .../resources/images/msb/MSB-OOM-Diagram.png | Bin 0 -> 77338 bytes .../images/oom_logo/oomLogoV2-Configure.png | Bin 0 -> 50668 bytes .../resources/images/oom_logo/oomLogoV2-Delete.png | Bin 0 -> 48038 bytes .../resources/images/oom_logo/oomLogoV2-Deploy.png | Bin 0 -> 48500 bytes .../resources/images/oom_logo/oomLogoV2-Heal.png | Bin 0 -> 48092 bytes .../images/oom_logo/oomLogoV2-Monitor.png | Bin 0 -> 50051 bytes .../resources/images/oom_logo/oomLogoV2-Scale.png | Bin 0 -> 49430 bytes .../images/oom_logo/oomLogoV2-Upgrade.png | Bin 0 -> 49259 bytes .../resources/images/oom_logo/oomLogoV2-medium.png | Bin 0 -> 31090 bytes .../resources/yaml/environments_onap_demo.yaml | 103 ++ docs/shell/master_nfs_node.sh | 32 - docs/shell/openstack-k8s-controlnode.sh | 36 - docs/shell/openstack-k8s-workernode.sh | 34 - docs/shell/openstack-nfs-server.sh | 18 - docs/shell/slave_nfs_node.sh | 25 - docs/tox.ini | 32 + docs/yaml/cluster.yml | 156 --- docs/yaml/environments_onap_demo.yaml | 103 -- docs/yaml/example-integration-override-v3.yaml | 69 -- docs/yaml/example-integration-override.yaml | 56 - requirements.txt | 6 +- 191 files changed, 7786 insertions(+), 5420 deletions(-) create mode 100644 docs/archived/certs/hardcoded_certificates.csv create mode 100644 docs/archived/images/consul/consulHealth.png create mode 100644 docs/archived/images/cp_vms/control_plane_1.png create mode 100644 docs/archived/images/cp_vms/control_plane_2.png create mode 100644 docs/archived/images/cp_vms/control_plane_3.png create mode 100644 docs/archived/images/cp_vms/control_plane_4.png create mode 100644 docs/archived/images/cp_vms/control_plane_5.png create mode 100644 docs/archived/images/cp_vms/control_plane_6.png create mode 100644 docs/archived/images/cp_vms/control_plane_7.png create mode 100644 docs/archived/images/cp_vms/control_plane_8.png create mode 100644 docs/archived/images/floating_ips/floating_1.png create mode 100644 docs/archived/images/floating_ips/floating_2.png create mode 100644 docs/archived/images/k8s/kubernetes_objects.png create mode 100644 docs/archived/images/keys/key_pair_1.png create mode 100644 docs/archived/images/keys/key_pair_2.png create mode 100644 docs/archived/images/keys/key_pair_3.png create mode 100644 docs/archived/images/keys/key_pair_4.png create mode 100644 docs/archived/images/msb/MSB-OOM-Diagram.png create mode 100644 docs/archived/images/network/network_1.png create mode 100644 docs/archived/images/network/network_2.png create mode 100644 docs/archived/images/network/network_3.png create mode 100644 docs/archived/images/network/network_4.png create mode 100644 docs/archived/images/nfs_server/nfs_server_1.png create mode 100644 docs/archived/images/nfs_server/nfs_server_10.png create mode 100644 docs/archived/images/nfs_server/nfs_server_2.png create mode 100644 docs/archived/images/nfs_server/nfs_server_3.png create mode 100644 docs/archived/images/nfs_server/nfs_server_4.png create mode 100644 docs/archived/images/nfs_server/nfs_server_5.png create mode 100644 docs/archived/images/nfs_server/nfs_server_6.png create mode 100644 docs/archived/images/nfs_server/nfs_server_7.png create mode 100644 docs/archived/images/nfs_server/nfs_server_8.png create mode 100644 docs/archived/images/nfs_server/nfs_server_9.png create mode 100644 docs/archived/images/oom_logo/oomLogoV2-Configure.png create mode 100644 docs/archived/images/oom_logo/oomLogoV2-Delete.png create mode 100644 docs/archived/images/oom_logo/oomLogoV2-Deploy.png create mode 100644 docs/archived/images/oom_logo/oomLogoV2-Heal.png create mode 100644 docs/archived/images/oom_logo/oomLogoV2-Monitor.png create mode 100644 docs/archived/images/oom_logo/oomLogoV2-Scale.png create mode 100644 docs/archived/images/oom_logo/oomLogoV2-Upgrade.png create mode 100644 docs/archived/images/oom_logo/oomLogoV2-medium.png create mode 100644 docs/archived/images/rke/rke_1.png create mode 100644 docs/archived/images/sg/sg_1.png create mode 100644 docs/archived/images/sg/sg_2.png create mode 100644 docs/archived/images/wk_vms/worker_1.png create mode 100644 docs/archived/images/wk_vms/worker_2.png create mode 100644 docs/archived/images/wk_vms/worker_3.png create mode 100644 docs/archived/images/wk_vms/worker_4.png create mode 100644 docs/archived/images/wk_vms/worker_5.png create mode 100644 docs/archived/images/wk_vms/worker_6.png create mode 100644 docs/archived/images/wk_vms/worker_7.png create mode 100644 docs/archived/oom_developer_guide.rst create mode 100644 docs/archived/oom_hardcoded_certificates.rst create mode 100644 docs/archived/oom_quickstart_guide.rst create mode 100644 docs/archived/oom_setup_kubernetes_rancher.rst create mode 100644 docs/archived/oom_setup_paas.rst create mode 100644 docs/archived/oom_user_guide.rst create mode 100644 docs/archived/shell/master_nfs_node.sh create mode 100644 docs/archived/shell/openstack-k8s-controlnode.sh create mode 100644 docs/archived/shell/openstack-k8s-workernode.sh create mode 100644 docs/archived/shell/openstack-nfs-server.sh create mode 100644 docs/archived/shell/slave_nfs_node.sh create mode 100644 docs/archived/yaml/cluster.yml create mode 100644 docs/archived/yaml/example-integration-override-v3.yaml create mode 100644 docs/archived/yaml/example-integration-override.yaml delete mode 100644 docs/certs/hardcoded_certificates.csv delete mode 100644 docs/helm/helm-search.txt delete mode 100644 docs/images/consul/consulHealth.png delete mode 100644 docs/images/cp_vms/control_plane_1.png delete mode 100644 docs/images/cp_vms/control_plane_2.png delete mode 100644 docs/images/cp_vms/control_plane_3.png delete mode 100644 docs/images/cp_vms/control_plane_4.png delete mode 100644 docs/images/cp_vms/control_plane_5.png delete mode 100644 docs/images/cp_vms/control_plane_6.png delete mode 100644 docs/images/cp_vms/control_plane_7.png delete mode 100644 docs/images/cp_vms/control_plane_8.png delete mode 100644 docs/images/floating_ips/floating_1.png delete mode 100644 docs/images/floating_ips/floating_2.png delete mode 100644 docs/images/k8s/kubernetes_objects.png delete mode 100644 docs/images/keys/key_pair_1.png delete mode 100644 docs/images/keys/key_pair_2.png delete mode 100644 docs/images/keys/key_pair_3.png delete mode 100644 docs/images/keys/key_pair_4.png delete mode 100644 docs/images/msb/MSB-OOM-Diagram.png delete mode 100644 docs/images/network/network_1.png delete mode 100644 docs/images/network/network_2.png delete mode 100644 docs/images/network/network_3.png delete mode 100644 docs/images/network/network_4.png delete mode 100644 docs/images/nfs_server/nfs_server_1.png delete mode 100644 docs/images/nfs_server/nfs_server_10.png delete mode 100644 docs/images/nfs_server/nfs_server_2.png delete mode 100644 docs/images/nfs_server/nfs_server_3.png delete mode 100644 docs/images/nfs_server/nfs_server_4.png delete mode 100644 docs/images/nfs_server/nfs_server_5.png delete mode 100644 docs/images/nfs_server/nfs_server_6.png delete mode 100644 docs/images/nfs_server/nfs_server_7.png delete mode 100644 docs/images/nfs_server/nfs_server_8.png delete mode 100644 docs/images/nfs_server/nfs_server_9.png delete mode 100644 docs/images/oom_logo/oomLogoV2-Configure.png delete mode 100644 docs/images/oom_logo/oomLogoV2-Delete.png delete mode 100644 docs/images/oom_logo/oomLogoV2-Deploy.png delete mode 100644 docs/images/oom_logo/oomLogoV2-Heal.png delete mode 100644 docs/images/oom_logo/oomLogoV2-Monitor.png delete mode 100644 docs/images/oom_logo/oomLogoV2-Scale.png delete mode 100644 docs/images/oom_logo/oomLogoV2-Upgrade.png delete mode 100644 docs/images/oom_logo/oomLogoV2-medium.png delete mode 100644 docs/images/rke/rke_1.png delete mode 100644 docs/images/sg/sg_1.png delete mode 100644 docs/images/sg/sg_2.png delete mode 100644 docs/images/wk_vms/worker_1.png delete mode 100644 docs/images/wk_vms/worker_2.png delete mode 100644 docs/images/wk_vms/worker_3.png delete mode 100644 docs/images/wk_vms/worker_4.png delete mode 100644 docs/images/wk_vms/worker_5.png delete mode 100644 docs/images/wk_vms/worker_6.png delete mode 100644 docs/images/wk_vms/worker_7.png delete mode 100644 docs/oom_cloud_setup_guide.rst delete mode 100644 docs/oom_developer_guide.rst delete mode 100644 docs/oom_hardcoded_certificates.rst delete mode 100644 docs/oom_project_description.rst delete mode 100644 docs/oom_quickstart_guide.rst delete mode 100644 docs/oom_setup_ingress_controller.rst delete mode 100644 docs/oom_setup_kubernetes_rancher.rst delete mode 100644 docs/oom_setup_paas.rst delete mode 100644 docs/oom_user_guide.rst delete mode 100644 docs/release_notes/release-notes-amsterdam.rst delete mode 100644 docs/release_notes/release-notes-beijing.rst delete mode 100644 docs/release_notes/release-notes-casablanca.rst delete mode 100644 docs/release_notes/release-notes-dublin.rst delete mode 100644 docs/release_notes/release-notes-elalto.rst delete mode 100644 docs/release_notes/release-notes-frankfurt.rst delete mode 100644 docs/release_notes/release-notes-guilin.rst delete mode 100644 docs/release_notes/release-notes-honolulu.rst delete mode 100644 docs/release_notes/release-notes-isntanbul.rst delete mode 100644 docs/release_notes/release-notes.rst create mode 100644 docs/requirements-docs.txt create mode 100644 docs/sections/guides/access_guides/oom_access_info.rst create mode 100644 docs/sections/guides/access_guides/oom_ingress_access.rst create mode 100644 docs/sections/guides/deployment_guides/oom_customize_overrides.rst create mode 100644 docs/sections/guides/deployment_guides/oom_deployment.rst create mode 100644 docs/sections/guides/deployment_guides/oom_dev_testing_local_deploy.rst create mode 100644 docs/sections/guides/deployment_guides/oom_helm_release_repo_deploy.rst create mode 100644 docs/sections/guides/deployment_guides/oom_helm_testing_repo_deploy.rst create mode 100644 docs/sections/guides/development_guides/oom_dev_config_management.rst create mode 100644 docs/sections/guides/development_guides/oom_dev_container_orchestration.rst create mode 100644 docs/sections/guides/development_guides/oom_dev_helm_chart_info.rst create mode 100644 docs/sections/guides/development_guides/oom_development.rst create mode 100644 docs/sections/guides/infra_guides/oom_base_config_setup.rst create mode 100644 docs/sections/guides/infra_guides/oom_base_optional_addons.rst create mode 100644 docs/sections/guides/infra_guides/oom_infra_setup.rst create mode 100644 docs/sections/guides/infra_guides/oom_setup_ingress_controller.rst create mode 100644 docs/sections/guides/user_guides/oom_user_guide.rst create mode 100644 docs/sections/oom_project_description.rst create mode 100644 docs/sections/release_notes/release-notes-amsterdam.rst create mode 100644 docs/sections/release_notes/release-notes-beijing.rst create mode 100644 docs/sections/release_notes/release-notes-casablanca.rst create mode 100644 docs/sections/release_notes/release-notes-dublin.rst create mode 100644 docs/sections/release_notes/release-notes-elalto.rst create mode 100644 docs/sections/release_notes/release-notes-frankfurt.rst create mode 100644 docs/sections/release_notes/release-notes-guilin.rst create mode 100644 docs/sections/release_notes/release-notes-honolulu.rst create mode 100644 docs/sections/release_notes/release-notes-isntanbul.rst create mode 100644 docs/sections/release_notes/release-notes-jakarta.rst create mode 100644 docs/sections/release_notes/release-notes.rst create mode 100644 docs/sections/resources/helm/helm-search.txt create mode 100644 docs/sections/resources/images/consul/consulHealth.png create mode 100644 docs/sections/resources/images/k8s/kubernetes_objects.png create mode 100644 docs/sections/resources/images/msb/MSB-OOM-Diagram.png create mode 100644 docs/sections/resources/images/oom_logo/oomLogoV2-Configure.png create mode 100644 docs/sections/resources/images/oom_logo/oomLogoV2-Delete.png create mode 100644 docs/sections/resources/images/oom_logo/oomLogoV2-Deploy.png create mode 100644 docs/sections/resources/images/oom_logo/oomLogoV2-Heal.png create mode 100644 docs/sections/resources/images/oom_logo/oomLogoV2-Monitor.png create mode 100644 docs/sections/resources/images/oom_logo/oomLogoV2-Scale.png create mode 100644 docs/sections/resources/images/oom_logo/oomLogoV2-Upgrade.png create mode 100644 docs/sections/resources/images/oom_logo/oomLogoV2-medium.png create mode 100644 docs/sections/resources/yaml/environments_onap_demo.yaml delete mode 100644 docs/shell/master_nfs_node.sh delete mode 100644 docs/shell/openstack-k8s-controlnode.sh delete mode 100644 docs/shell/openstack-k8s-workernode.sh delete mode 100644 docs/shell/openstack-nfs-server.sh delete mode 100644 docs/shell/slave_nfs_node.sh create mode 100644 docs/tox.ini delete mode 100644 docs/yaml/cluster.yml delete mode 100644 docs/yaml/environments_onap_demo.yaml delete mode 100644 docs/yaml/example-integration-override-v3.yaml delete mode 100644 docs/yaml/example-integration-override.yaml diff --git a/.readthedocs.yaml b/.readthedocs.yaml index f724264ff4..40e9e991d2 100644 --- a/.readthedocs.yaml +++ b/.readthedocs.yaml @@ -3,18 +3,19 @@ # Read the Docs configuration file # See https://docs.readthedocs.io/en/stable/config-file/v2.html for details # Required +# version: 2 -formats: - - htmlzip - build: - image: latest + image: latest python: - version: 3.7 - install: - - requirements: requirements.txt + version: 3.7 + install: + - requirements: docs/requirements-docs.txt + +submodules: + include: all sphinx: - configuration: docs/conf.py + configuration: docs/conf.py \ No newline at end of file diff --git a/docs/_static/css/ribbon.css b/docs/_static/css/ribbon.css index 6008cb1a08..afb9480d67 100644 --- a/docs/_static/css/ribbon.css +++ b/docs/_static/css/ribbon.css @@ -59,5 +59,5 @@ /* fix width of the screen */ .wy-nav-content { - max-width: none; + max-width: 800px; } diff --git a/docs/archived/certs/hardcoded_certificates.csv b/docs/archived/certs/hardcoded_certificates.csv new file mode 100644 index 0000000000..fbc7db3e11 --- /dev/null +++ b/docs/archived/certs/hardcoded_certificates.csv @@ -0,0 +1,18 @@ +Project,ONAP Certificate,Own Certificate,MSB Certificate,Path +AAF,No,Yes,No,aaf/charts/aaf-cert-service/resources/ +AAF,Yes,No,No,aaf/components/aaf-sms/resources/certs/intermediate_root_ca.pem +AAI,Yes,No,No,aai/oom/resources/config/aai/aai_keystore +AAI/SEARCH-DATA,Yes,No,No,aai/oom/components/aai-search-data/resources/config/auth/tomcat_keystore +AAI/SPARKY-BE,Yes,No,No,aai/oom/components/aai-spary-be/resources/config/auth/org.onap.aai.p12 +AAI/BABEL,No,Yes,No,aai/oom/components/aai-babel/resources/config/auth/tomcat_keystore +AAI/MODEL-LOADER,Yes,Yes,No,aai/oom/components/aai-model-loaderresources/config/auth/tomcat_keystore +APPC,Yes,No,No,kubernetes/appc/resources/config/certs/org.onap.appc.keyfile +APPC,Yes,No,No,kubernetes/appc/resources/config/certs/org.onap.appc.p12 +certInitializer,Yes,No,No,kubernetes/common/certInitializer/resources +DMaaP/MR,Yes,No,No,Hardcoded in container +HOLMES,No,Yes,No,Hardcoded in container +MULTICLOUD,No,Yes,No,Hardcoded in container +Robot,Yes,No,No,kubernetes/robot/resources/config/lighttpd/ssl/onap-robot.onap.* +SDC,Yes,No?,No?,kubernetes/sdc/resources/cert +VID,Yes,No,No,Hardcoded in container +UUI,No,Yes,No,Hardcoded in container diff --git a/docs/archived/images/consul/consulHealth.png b/docs/archived/images/consul/consulHealth.png new file mode 100644 index 0000000000..cd7e730c39 Binary files /dev/null and b/docs/archived/images/consul/consulHealth.png differ diff --git a/docs/archived/images/cp_vms/control_plane_1.png b/docs/archived/images/cp_vms/control_plane_1.png new file mode 100644 index 0000000000..d59b9863b7 Binary files /dev/null and b/docs/archived/images/cp_vms/control_plane_1.png differ diff --git a/docs/archived/images/cp_vms/control_plane_2.png b/docs/archived/images/cp_vms/control_plane_2.png new file mode 100644 index 0000000000..9a7d72f8a5 Binary files /dev/null and b/docs/archived/images/cp_vms/control_plane_2.png differ diff --git a/docs/archived/images/cp_vms/control_plane_3.png b/docs/archived/images/cp_vms/control_plane_3.png new file mode 100644 index 0000000000..da329f20b5 Binary files /dev/null and b/docs/archived/images/cp_vms/control_plane_3.png differ diff --git a/docs/archived/images/cp_vms/control_plane_4.png b/docs/archived/images/cp_vms/control_plane_4.png new file mode 100644 index 0000000000..817355a99e Binary files /dev/null and b/docs/archived/images/cp_vms/control_plane_4.png differ diff --git a/docs/archived/images/cp_vms/control_plane_5.png b/docs/archived/images/cp_vms/control_plane_5.png new file mode 100644 index 0000000000..33805c50dd Binary files /dev/null and b/docs/archived/images/cp_vms/control_plane_5.png differ diff --git a/docs/archived/images/cp_vms/control_plane_6.png b/docs/archived/images/cp_vms/control_plane_6.png new file mode 100644 index 0000000000..9e8ab638bc Binary files /dev/null and b/docs/archived/images/cp_vms/control_plane_6.png differ diff --git a/docs/archived/images/cp_vms/control_plane_7.png b/docs/archived/images/cp_vms/control_plane_7.png new file mode 100644 index 0000000000..f0db6d3f3f Binary files /dev/null and b/docs/archived/images/cp_vms/control_plane_7.png differ diff --git a/docs/archived/images/cp_vms/control_plane_8.png b/docs/archived/images/cp_vms/control_plane_8.png new file mode 100644 index 0000000000..e20f631e60 Binary files /dev/null and b/docs/archived/images/cp_vms/control_plane_8.png differ diff --git a/docs/archived/images/floating_ips/floating_1.png b/docs/archived/images/floating_ips/floating_1.png new file mode 100644 index 0000000000..9f413164ab Binary files /dev/null and b/docs/archived/images/floating_ips/floating_1.png differ diff --git a/docs/archived/images/floating_ips/floating_2.png b/docs/archived/images/floating_ips/floating_2.png new file mode 100644 index 0000000000..0001ef068c Binary files /dev/null and b/docs/archived/images/floating_ips/floating_2.png differ diff --git a/docs/archived/images/k8s/kubernetes_objects.png b/docs/archived/images/k8s/kubernetes_objects.png new file mode 100644 index 0000000000..768a3adb99 Binary files /dev/null and b/docs/archived/images/k8s/kubernetes_objects.png differ diff --git a/docs/archived/images/keys/key_pair_1.png b/docs/archived/images/keys/key_pair_1.png new file mode 100644 index 0000000000..1135c93320 Binary files /dev/null and b/docs/archived/images/keys/key_pair_1.png differ diff --git a/docs/archived/images/keys/key_pair_2.png b/docs/archived/images/keys/key_pair_2.png new file mode 100644 index 0000000000..ac3bfc5ca2 Binary files /dev/null and b/docs/archived/images/keys/key_pair_2.png differ diff --git a/docs/archived/images/keys/key_pair_3.png b/docs/archived/images/keys/key_pair_3.png new file mode 100644 index 0000000000..1e0c0200f8 Binary files /dev/null and b/docs/archived/images/keys/key_pair_3.png differ diff --git a/docs/archived/images/keys/key_pair_4.png b/docs/archived/images/keys/key_pair_4.png new file mode 100644 index 0000000000..031a9ba785 Binary files /dev/null and b/docs/archived/images/keys/key_pair_4.png differ diff --git a/docs/archived/images/msb/MSB-OOM-Diagram.png b/docs/archived/images/msb/MSB-OOM-Diagram.png new file mode 100644 index 0000000000..4ee878d833 Binary files /dev/null and b/docs/archived/images/msb/MSB-OOM-Diagram.png differ diff --git a/docs/archived/images/network/network_1.png b/docs/archived/images/network/network_1.png new file mode 100644 index 0000000000..d51cb1280b Binary files /dev/null and b/docs/archived/images/network/network_1.png differ diff --git a/docs/archived/images/network/network_2.png b/docs/archived/images/network/network_2.png new file mode 100644 index 0000000000..9498a460d3 Binary files /dev/null and b/docs/archived/images/network/network_2.png differ diff --git a/docs/archived/images/network/network_3.png b/docs/archived/images/network/network_3.png new file mode 100644 index 0000000000..c729405aef Binary files /dev/null and b/docs/archived/images/network/network_3.png differ diff --git a/docs/archived/images/network/network_4.png b/docs/archived/images/network/network_4.png new file mode 100644 index 0000000000..cc8f96fac0 Binary files /dev/null and b/docs/archived/images/network/network_4.png differ diff --git a/docs/archived/images/nfs_server/nfs_server_1.png b/docs/archived/images/nfs_server/nfs_server_1.png new file mode 100644 index 0000000000..912a10f055 Binary files /dev/null and b/docs/archived/images/nfs_server/nfs_server_1.png differ diff --git a/docs/archived/images/nfs_server/nfs_server_10.png b/docs/archived/images/nfs_server/nfs_server_10.png new file mode 100644 index 0000000000..7d87d1ca56 Binary files /dev/null and b/docs/archived/images/nfs_server/nfs_server_10.png differ diff --git a/docs/archived/images/nfs_server/nfs_server_2.png b/docs/archived/images/nfs_server/nfs_server_2.png new file mode 100644 index 0000000000..d59b9863b7 Binary files /dev/null and b/docs/archived/images/nfs_server/nfs_server_2.png differ diff --git a/docs/archived/images/nfs_server/nfs_server_3.png b/docs/archived/images/nfs_server/nfs_server_3.png new file mode 100644 index 0000000000..9a7d72f8a5 Binary files /dev/null and b/docs/archived/images/nfs_server/nfs_server_3.png differ diff --git a/docs/archived/images/nfs_server/nfs_server_4.png b/docs/archived/images/nfs_server/nfs_server_4.png new file mode 100644 index 0000000000..da329f20b5 Binary files /dev/null and b/docs/archived/images/nfs_server/nfs_server_4.png differ diff --git a/docs/archived/images/nfs_server/nfs_server_5.png b/docs/archived/images/nfs_server/nfs_server_5.png new file mode 100644 index 0000000000..817355a99e Binary files /dev/null and b/docs/archived/images/nfs_server/nfs_server_5.png differ diff --git a/docs/archived/images/nfs_server/nfs_server_6.png b/docs/archived/images/nfs_server/nfs_server_6.png new file mode 100644 index 0000000000..33805c50dd Binary files /dev/null and b/docs/archived/images/nfs_server/nfs_server_6.png differ diff --git a/docs/archived/images/nfs_server/nfs_server_7.png b/docs/archived/images/nfs_server/nfs_server_7.png new file mode 100644 index 0000000000..9e8ab638bc Binary files /dev/null and b/docs/archived/images/nfs_server/nfs_server_7.png differ diff --git a/docs/archived/images/nfs_server/nfs_server_8.png b/docs/archived/images/nfs_server/nfs_server_8.png new file mode 100644 index 0000000000..14103fb9c3 Binary files /dev/null and b/docs/archived/images/nfs_server/nfs_server_8.png differ diff --git a/docs/archived/images/nfs_server/nfs_server_9.png b/docs/archived/images/nfs_server/nfs_server_9.png new file mode 100644 index 0000000000..aa8bc140e1 Binary files /dev/null and b/docs/archived/images/nfs_server/nfs_server_9.png differ diff --git a/docs/archived/images/oom_logo/oomLogoV2-Configure.png b/docs/archived/images/oom_logo/oomLogoV2-Configure.png new file mode 100644 index 0000000000..bdb1ece10c Binary files /dev/null and b/docs/archived/images/oom_logo/oomLogoV2-Configure.png differ diff --git a/docs/archived/images/oom_logo/oomLogoV2-Delete.png b/docs/archived/images/oom_logo/oomLogoV2-Delete.png new file mode 100644 index 0000000000..10c43d2fb3 Binary files /dev/null and b/docs/archived/images/oom_logo/oomLogoV2-Delete.png differ diff --git a/docs/archived/images/oom_logo/oomLogoV2-Deploy.png b/docs/archived/images/oom_logo/oomLogoV2-Deploy.png new file mode 100644 index 0000000000..706097cd6c Binary files /dev/null and b/docs/archived/images/oom_logo/oomLogoV2-Deploy.png differ diff --git a/docs/archived/images/oom_logo/oomLogoV2-Heal.png b/docs/archived/images/oom_logo/oomLogoV2-Heal.png new file mode 100644 index 0000000000..97ac58e9ec Binary files /dev/null and b/docs/archived/images/oom_logo/oomLogoV2-Heal.png differ diff --git a/docs/archived/images/oom_logo/oomLogoV2-Monitor.png b/docs/archived/images/oom_logo/oomLogoV2-Monitor.png new file mode 100644 index 0000000000..c9a184ac37 Binary files /dev/null and b/docs/archived/images/oom_logo/oomLogoV2-Monitor.png differ diff --git a/docs/archived/images/oom_logo/oomLogoV2-Scale.png b/docs/archived/images/oom_logo/oomLogoV2-Scale.png new file mode 100644 index 0000000000..140e5ca54f Binary files /dev/null and b/docs/archived/images/oom_logo/oomLogoV2-Scale.png differ diff --git a/docs/archived/images/oom_logo/oomLogoV2-Upgrade.png b/docs/archived/images/oom_logo/oomLogoV2-Upgrade.png new file mode 100644 index 0000000000..d51f6cfcde Binary files /dev/null and b/docs/archived/images/oom_logo/oomLogoV2-Upgrade.png differ diff --git a/docs/archived/images/oom_logo/oomLogoV2-medium.png b/docs/archived/images/oom_logo/oomLogoV2-medium.png new file mode 100644 index 0000000000..20aea693e0 Binary files /dev/null and b/docs/archived/images/oom_logo/oomLogoV2-medium.png differ diff --git a/docs/archived/images/rke/rke_1.png b/docs/archived/images/rke/rke_1.png new file mode 100644 index 0000000000..b27fc517df Binary files /dev/null and b/docs/archived/images/rke/rke_1.png differ diff --git a/docs/archived/images/sg/sg_1.png b/docs/archived/images/sg/sg_1.png new file mode 100644 index 0000000000..ff5264d3c4 Binary files /dev/null and b/docs/archived/images/sg/sg_1.png differ diff --git a/docs/archived/images/sg/sg_2.png b/docs/archived/images/sg/sg_2.png new file mode 100644 index 0000000000..395057fc97 Binary files /dev/null and b/docs/archived/images/sg/sg_2.png differ diff --git a/docs/archived/images/wk_vms/worker_1.png b/docs/archived/images/wk_vms/worker_1.png new file mode 100644 index 0000000000..01314d1557 Binary files /dev/null and b/docs/archived/images/wk_vms/worker_1.png differ diff --git a/docs/archived/images/wk_vms/worker_2.png b/docs/archived/images/wk_vms/worker_2.png new file mode 100644 index 0000000000..9a7d72f8a5 Binary files /dev/null and b/docs/archived/images/wk_vms/worker_2.png differ diff --git a/docs/archived/images/wk_vms/worker_3.png b/docs/archived/images/wk_vms/worker_3.png new file mode 100644 index 0000000000..93d5e28cf2 Binary files /dev/null and b/docs/archived/images/wk_vms/worker_3.png differ diff --git a/docs/archived/images/wk_vms/worker_4.png b/docs/archived/images/wk_vms/worker_4.png new file mode 100644 index 0000000000..817355a99e Binary files /dev/null and b/docs/archived/images/wk_vms/worker_4.png differ diff --git a/docs/archived/images/wk_vms/worker_5.png b/docs/archived/images/wk_vms/worker_5.png new file mode 100644 index 0000000000..33805c50dd Binary files /dev/null and b/docs/archived/images/wk_vms/worker_5.png differ diff --git a/docs/archived/images/wk_vms/worker_6.png b/docs/archived/images/wk_vms/worker_6.png new file mode 100644 index 0000000000..c71c122217 Binary files /dev/null and b/docs/archived/images/wk_vms/worker_6.png differ diff --git a/docs/archived/images/wk_vms/worker_7.png b/docs/archived/images/wk_vms/worker_7.png new file mode 100644 index 0000000000..ecb13c1809 Binary files /dev/null and b/docs/archived/images/wk_vms/worker_7.png differ diff --git a/docs/archived/oom_developer_guide.rst b/docs/archived/oom_developer_guide.rst new file mode 100644 index 0000000000..f6513bdf83 --- /dev/null +++ b/docs/archived/oom_developer_guide.rst @@ -0,0 +1,1149 @@ +.. This work is licensed under a Creative Commons Attribution 4.0 +.. International License. +.. http://creativecommons.org/licenses/by/4.0 +.. Copyright 2018-2020 Amdocs, Bell Canada, Orange, Samsung +.. Modification copyright (C) 2022 Nordix Foundation + +.. Links +.. _Helm: https://docs.helm.sh/ +.. _Helm Charts: https://github.com/kubernetes/charts +.. _Kubernetes: https://Kubernetes.io/ +.. _Docker: https://www.docker.com/ +.. _Nexus: https://nexus.onap.org/ +.. _AWS Elastic Block Store: https://aws.amazon.com/ebs/ +.. _Azure File: https://docs.microsoft.com/en-us/azure/storage/files/storage-files-introduction +.. _GCE Persistent Disk: https://cloud.google.com/compute/docs/disks/ +.. _Gluster FS: https://www.gluster.org/ +.. _Kubernetes Storage Class: https://Kubernetes.io/docs/concepts/storage/storage-classes/ +.. _Assigning Pods to Nodes: https://Kubernetes.io/docs/concepts/configuration/assign-pod-node/ + + +.. _developer-guide-label: + +OOM Developer Guide +################### + +.. figure:: ../../resources/images/oom_logo/oomLogoV2-medium.png + :align: right + +ONAP consists of a large number of components, each of which are substantial +projects within themselves, which results in a high degree of complexity in +deployment and management. To cope with this complexity the ONAP Operations +Manager (OOM) uses a Helm_ model of ONAP - Helm being the primary management +system for Kubernetes_ container systems - to drive all user driven life-cycle +management operations. The Helm model of ONAP is composed of a set of +hierarchical Helm charts that define the structure of the ONAP components and +the configuration of these components. These charts are fully parameterized +such that a single environment file defines all of the parameters needed to +deploy ONAP. A user of ONAP may maintain several such environment files to +control the deployment of ONAP in multiple environments such as development, +pre-production, and production. + +The following sections describe how the ONAP Helm charts are constructed. + +.. contents:: + :depth: 3 + :local: +.. + +Container Background +==================== +Linux containers allow for an application and all of its operating system +dependencies to be packaged and deployed as a single unit without including a +guest operating system as done with virtual machines. The most popular +container solution is Docker_ which provides tools for container management +like the Docker Host (dockerd) which can create, run, stop, move, or delete a +container. Docker has a very popular registry of containers images that can be +used by any Docker system; however, in the ONAP context, Docker images are +built by the standard CI/CD flow and stored in Nexus_ repositories. OOM uses +the "standard" ONAP docker containers and three new ones specifically created +for OOM. + +Containers are isolated from each other primarily via name spaces within the +Linux kernel without the need for multiple guest operating systems. As such, +multiple containers can be deployed with little overhead such as all of ONAP +can be deployed on a single host. With some optimization of the ONAP components +(e.g. elimination of redundant database instances) it may be possible to deploy +ONAP on a single laptop computer. + +Helm Charts +=========== +A Helm chart is a collection of files that describe a related set of Kubernetes +resources. A simple chart might be used to deploy something simple, like a +memcached pod, while a complex chart might contain many micro-service arranged +in a hierarchy as found in the `aai` ONAP component. + +Charts are created as files laid out in a particular directory tree, then they +can be packaged into versioned archives to be deployed. There is a public +archive of `Helm Charts`_ on GitHub that includes many technologies applicable +to ONAP. Some of these charts have been used in ONAP and all of the ONAP charts +have been created following the guidelines provided. + +The top level of the ONAP charts is shown below: + +.. code-block:: bash + + common + ├── cassandra + │   ├── Chart.yaml + │   ├── resources + │   │   ├── config + │   │   │   └── docker-entrypoint.sh + │   │   ├── exec.py + │   │   └── restore.sh + │   ├── templates + │   │   ├── backup + │   │   │   ├── configmap.yaml + │   │   │   ├── cronjob.yaml + │   │   │   ├── pv.yaml + │   │   │   └── pvc.yaml + │   │   ├── configmap.yaml + │   │   ├── pv.yaml + │   │   ├── service.yaml + │   │   └── statefulset.yaml + │   └── values.yaml + ├── common + │   ├── Chart.yaml + │   ├── templates + │   │   ├── _createPassword.tpl + │   │   ├── _ingress.tpl + │   │   ├── _labels.tpl + │   │   ├── _mariadb.tpl + │   │   ├── _name.tpl + │   │   ├── _namespace.tpl + │   │   ├── _repository.tpl + │   │   ├── _resources.tpl + │   │   ├── _secret.yaml + │   │   ├── _service.tpl + │   │   ├── _storage.tpl + │   │   └── _tplValue.tpl + │   └── values.yaml + ├── ... + └── postgres-legacy +    ├── Chart.yaml + ├── charts + └── configs + +The common section of charts consists of a set of templates that assist with +parameter substitution (`_name.tpl`, `_namespace.tpl` and others) and a set of +charts for components used throughout ONAP. When the common components are used +by other charts they are instantiated each time or we can deploy a shared +instances for several components. + +All of the ONAP components have charts that follow the pattern shown below: + +.. code-block:: bash + + name-of-my-component + ├── Chart.yaml + ├── component + │   └── subcomponent-folder + ├── charts + │   └── subchart-folder + ├── resources + │   ├── folder1 + │   │   ├── file1 + │   │   └── file2 + │   └── folder1 + │   ├── file3 + │   └── folder3 + │      └── file4 + ├── templates + │   ├── NOTES.txt + │   ├── configmap.yaml + │   ├── deployment.yaml + │   ├── ingress.yaml + │   ├── job.yaml + │   ├── secrets.yaml + │   └── service.yaml + └── values.yaml + +Note that the component charts / components may include a hierarchy of sub +components and in themselves can be quite complex. + +You can use either `charts` or `components` folder for your subcomponents. +`charts` folder means that the subcomponent will always been deployed. + +`components` folders means we can choose if we want to deploy the +subcomponent. + +This choice is done in root `values.yaml`: + +.. code-block:: yaml + + --- + global: + key: value + + component1: + enabled: true + component2: + enabled: true + +Then in `Chart.yaml` dependencies section, you'll use these values: + +.. code-block:: yaml + + --- + dependencies: + - name: common + version: ~x.y-0 + repository: '@local' + - name: component1 + version: ~x.y-0 + repository: 'file://components/component1' + condition: component1.enabled + - name: component2 + version: ~x.y-0 + repository: 'file://components/component2' + condition: component2.enabled + +Configuration of the components varies somewhat from component to component but +generally follows the pattern of one or more `configmap.yaml` files which can +directly provide configuration to the containers in addition to processing +configuration files stored in the `config` directory. It is the responsibility +of each ONAP component team to update these configuration files when changes +are made to the project containers that impact configuration. + +The following section describes how the hierarchical ONAP configuration system +is key to management of such a large system. + +Configuration Management +======================== + +ONAP is a large system composed of many components - each of which are complex +systems in themselves - that needs to be deployed in a number of different +ways. For example, within a single operator's network there may be R&D +deployments under active development, pre-production versions undergoing system +testing and production systems that are operating live networks. Each of these +deployments will differ in significant ways, such as the version of the +software images deployed. In addition, there may be a number of application +specific configuration differences, such as operating system environment +variables. The following describes how the Helm configuration management +system is used within the OOM project to manage both ONAP infrastructure +configuration as well as ONAP components configuration. + +One of the artifacts that OOM/Kubernetes uses to deploy ONAP components is the +deployment specification, yet another yaml file. Within these deployment specs +are a number of parameters as shown in the following example: + +.. code-block:: yaml + + apiVersion: apps/v1 + kind: StatefulSet + metadata: + labels: + app.kubernetes.io/name: zookeeper + helm.sh/chart: zookeeper + app.kubernetes.io/component: server + app.kubernetes.io/managed-by: Tiller + app.kubernetes.io/instance: onap-oof + name: onap-oof-zookeeper + namespace: onap + spec: + <...> + replicas: 3 + selector: + matchLabels: + app.kubernetes.io/name: zookeeper + app.kubernetes.io/component: server + app.kubernetes.io/instance: onap-oof + serviceName: onap-oof-zookeeper-headless + template: + metadata: + labels: + app.kubernetes.io/name: zookeeper + helm.sh/chart: zookeeper + app.kubernetes.io/component: server + app.kubernetes.io/managed-by: Tiller + app.kubernetes.io/instance: onap-oof + spec: + <...> + affinity: + containers: + - name: zookeeper + <...> + image: gcr.io/google_samples/k8szk:v3 + imagePullPolicy: Always + <...> + ports: + - containerPort: 2181 + name: client + protocol: TCP + - containerPort: 3888 + name: election + protocol: TCP + - containerPort: 2888 + name: server + protocol: TCP + <...> + +Note that within the statefulset specification, one of the container arguments +is the key/value pair image: gcr.io/google_samples/k8szk:v3 which +specifies the version of the zookeeper software to deploy. Although the +statefulset specifications greatly simplify statefulset, maintenance of the +statefulset specifications themselves become problematic as software versions +change over time or as different versions are required for different +statefulsets. For example, if the R&D team needs to deploy a newer version of +mariadb than what is currently used in the production environment, they would +need to clone the statefulset specification and change this value. Fortunately, +this problem has been solved with the templating capabilities of Helm. + +The following example shows how the statefulset specifications are modified to +incorporate Helm templates such that key/value pairs can be defined outside of +the statefulset specifications and passed during instantiation of the component. + +.. code-block:: yaml + + apiVersion: apps/v1 + kind: StatefulSet + metadata: + name: {{ include "common.fullname" . }} + namespace: {{ include "common.namespace" . }} + labels: {{- include "common.labels" . | nindent 4 }} + spec: + replicas: {{ .Values.replicaCount }} + selector: + matchLabels: {{- include "common.matchLabels" . | nindent 6 }} + # serviceName is only needed for StatefulSet + # put the postfix part only if you have add a postfix on the service name + serviceName: {{ include "common.servicename" . }}-{{ .Values.service.postfix }} + <...> + template: + metadata: + labels: {{- include "common.labels" . | nindent 8 }} + annotations: {{- include "common.tplValue" (dict "value" .Values.podAnnotations "context" $) | nindent 8 }} + name: {{ include "common.name" . }} + spec: + <...> + containers: + - name: {{ include "common.name" . }} + image: {{ .Values.image }} + imagePullPolicy: {{ .Values.global.pullPolicy | default .Values.pullPolicy }} + ports: + {{- range $index, $port := .Values.service.ports }} + - containerPort: {{ $port.port }} + name: {{ $port.name }} + {{- end }} + {{- range $index, $port := .Values.service.headlessPorts }} + - containerPort: {{ $port.port }} + name: {{ $port.name }} + {{- end }} + <...> + +This version of the statefulset specification has gone through the process of +templating values that are likely to change between statefulsets. Note that the +image is now specified as: image: {{ .Values.image }} instead of a +string used previously. During the statefulset phase, Helm (actually the Helm +sub-component Tiller) substitutes the {{ .. }} entries with a variable defined +in a values.yaml file. The content of this file is as follows: + +.. code-block:: yaml + + <...> + image: gcr.io/google_samples/k8szk:v3 + replicaCount: 3 + <...> + + +Within the values.yaml file there is an image key with the value +`gcr.io/google_samples/k8szk:v3` which is the same value used in +the non-templated version. Once all of the substitutions are complete, the +resulting statefulset specification ready to be used by Kubernetes. + +When creating a template consider the use of default values if appropriate. +Helm templating has built in support for DEFAULT values, here is +an example: + +.. code-block:: yaml + + imagePullSecrets: + - name: "{{ .Values.nsPrefix | default "onap" }}-docker-registry-key" + +The pipeline operator ("|") used here hints at that power of Helm templates in +that much like an operating system command line the pipeline operator allow +over 60 Helm functions to be embedded directly into the template (note that the +Helm template language is a superset of the Go template language). These +functions include simple string operations like upper and more complex flow +control operations like if/else. + +OOM is mainly helm templating. In order to have consistent deployment of the +different components of ONAP, some rules must be followed. + +Templates are provided in order to create Kubernetes resources (Secrets, +Ingress, Services, ...) or part of Kubernetes resources (names, labels, +resources requests and limits, ...). + +a full list and simple description is done in +`kubernetes/common/common/documentation.rst`. + +Service template +---------------- + +In order to create a Service for a component, you have to create a file (with +`service` in the name. +For normal service, just put the following line: + +.. code-block:: yaml + + {{ include "common.service" . }} + +For headless service, the line to put is the following: + +.. code-block:: yaml + + {{ include "common.headlessService" . }} + +The configuration of the service is done in component `values.yaml`: + +.. code-block:: yaml + + service: + name: NAME-OF-THE-SERVICE + postfix: MY-POSTFIX + type: NodePort + annotations: + someAnnotationsKey: value + ports: + - name: tcp-MyPort + port: 5432 + nodePort: 88 + - name: http-api + port: 8080 + nodePort: 89 + - name: https-api + port: 9443 + nodePort: 90 + +`annotations` and `postfix` keys are optional. +if `service.type` is `NodePort`, then you have to give `nodePort` value for your +service ports (which is the end of the computed nodePort, see example). + +It would render the following Service Resource (for a component named +`name-of-my-component`, with version `x.y.z`, helm deployment name +`my-deployment` and `global.nodePortPrefix` `302`): + +.. code-block:: yaml + + apiVersion: v1 + kind: Service + metadata: + annotations: + someAnnotationsKey: value + name: NAME-OF-THE-SERVICE-MY-POSTFIX + labels: + app.kubernetes.io/name: name-of-my-component + helm.sh/chart: name-of-my-component-x.y.z + app.kubernetes.io/instance: my-deployment-name-of-my-component + app.kubernetes.io/managed-by: Tiller + spec: + ports: + - port: 5432 + targetPort: tcp-MyPort + nodePort: 30288 + - port: 8080 + targetPort: http-api + nodePort: 30289 + - port: 9443 + targetPort: https-api + nodePort: 30290 + selector: + app.kubernetes.io/name: name-of-my-component + app.kubernetes.io/instance: my-deployment-name-of-my-component + type: NodePort + +In the deployment or statefulSet file, you needs to set the good labels in +order for the service to match the pods. + +here's an example to be sure it matches (for a statefulSet): + +.. code-block:: yaml + + apiVersion: apps/v1 + kind: StatefulSet + metadata: + name: {{ include "common.fullname" . }} + namespace: {{ include "common.namespace" . }} + labels: {{- include "common.labels" . | nindent 4 }} + spec: + selector: + matchLabels: {{- include "common.matchLabels" . | nindent 6 }} + # serviceName is only needed for StatefulSet + # put the postfix part only if you have add a postfix on the service name + serviceName: {{ include "common.servicename" . }}-{{ .Values.service.postfix }} + <...> + template: + metadata: + labels: {{- include "common.labels" . | nindent 8 }} + annotations: {{- include "common.tplValue" (dict "value" .Values.podAnnotations "context" $) | nindent 8 }} + name: {{ include "common.name" . }} + spec: + <...> + containers: + - name: {{ include "common.name" . }} + ports: + {{- range $index, $port := .Values.service.ports }} + - containerPort: {{ $port.port }} + name: {{ $port.name }} + {{- end }} + {{- range $index, $port := .Values.service.headlessPorts }} + - containerPort: {{ $port.port }} + name: {{ $port.name }} + {{- end }} + <...> + +The configuration of the service is done in component `values.yaml`: + +.. code-block:: yaml + + service: + name: NAME-OF-THE-SERVICE + headless: + postfix: NONE + annotations: + anotherAnnotationsKey : value + publishNotReadyAddresses: true + headlessPorts: + - name: tcp-MyPort + port: 5432 + - name: http-api + port: 8080 + - name: https-api + port: 9443 + +`headless.annotations`, `headless.postfix` and +`headless.publishNotReadyAddresses` keys are optional. + +If `headless.postfix` is not set, then we'll add `-headless` at the end of the +service name. + +If it set to `NONE`, there will be not postfix. + +And if set to something, it will add `-something` at the end of the service +name. + +It would render the following Service Resource (for a component named +`name-of-my-component`, with version `x.y.z`, helm deployment name +`my-deployment` and `global.nodePortPrefix` `302`): + +.. code-block:: yaml + + apiVersion: v1 + kind: Service + metadata: + annotations: + anotherAnnotationsKey: value + name: NAME-OF-THE-SERVICE + labels: + app.kubernetes.io/name: name-of-my-component + helm.sh/chart: name-of-my-component-x.y.z + app.kubernetes.io/instance: my-deployment-name-of-my-component + app.kubernetes.io/managed-by: Tiller + spec: + clusterIP: None + ports: + - port: 5432 + targetPort: tcp-MyPort + nodePort: 30288 + - port: 8080 + targetPort: http-api + nodePort: 30289 + - port: 9443 + targetPort: https-api + nodePort: 30290 + publishNotReadyAddresses: true + selector: + app.kubernetes.io/name: name-of-my-component + app.kubernetes.io/instance: my-deployment-name-of-my-component + type: ClusterIP + +Previous example of StatefulSet would also match (except for the `postfix` part +obviously). + +Creating Deployment or StatefulSet +---------------------------------- + +Deployment and StatefulSet should use the `apps/v1` (which has appeared in +v1.9). +As seen on the service part, the following parts are mandatory: + +.. code-block:: yaml + + apiVersion: apps/v1 + kind: StatefulSet + metadata: + name: {{ include "common.fullname" . }} + namespace: {{ include "common.namespace" . }} + labels: {{- include "common.labels" . | nindent 4 }} + spec: + selector: + matchLabels: {{- include "common.matchLabels" . | nindent 6 }} + # serviceName is only needed for StatefulSet + # put the postfix part only if you have add a postfix on the service name + serviceName: {{ include "common.servicename" . }}-{{ .Values.service.postfix }} + <...> + template: + metadata: + labels: {{- include "common.labels" . | nindent 8 }} + annotations: {{- include "common.tplValue" (dict "value" .Values.podAnnotations "context" $) | nindent 8 }} + name: {{ include "common.name" . }} + spec: + <...> + containers: + - name: {{ include "common.name" . }} + +ONAP Application Configuration +------------------------------ + +Dependency Management +--------------------- +These Helm charts describe the desired state +of an ONAP deployment and instruct the Kubernetes container manager as to how +to maintain the deployment in this state. These dependencies dictate the order +in-which the containers are started for the first time such that such +dependencies are always met without arbitrary sleep times between container +startups. For example, the SDC back-end container requires the Elastic-Search, +Cassandra and Kibana containers within SDC to be ready and is also dependent on +DMaaP (or the message-router) to be ready - where ready implies the built-in +"readiness" probes succeeded - before becoming fully operational. When an +initial deployment of ONAP is requested the current state of the system is NULL +so ONAP is deployed by the Kubernetes manager as a set of Docker containers on +one or more predetermined hosts. The hosts could be physical machines or +virtual machines. When deploying on virtual machines the resulting system will +be very similar to "Heat" based deployments, i.e. Docker containers running +within a set of VMs, the primary difference being that the allocation of +containers to VMs is done dynamically with OOM and statically with "Heat". +Example SO deployment descriptor file shows SO's dependency on its mariadb +data-base component: + +SO deployment specification excerpt: + +.. code-block:: yaml + + apiVersion: apps/v1 + kind: Deployment + metadata: + name: {{ include "common.fullname" . }} + namespace: {{ include "common.namespace" . }} + labels: {{- include "common.labels" . | nindent 4 }} + spec: + replicas: {{ .Values.replicaCount }} + selector: + matchLabels: {{- include "common.matchLabels" . | nindent 6 }} + template: + metadata: + labels: + app: {{ include "common.name" . }} + release: {{ .Release.Name }} + spec: + initContainers: + - command: + - /app/ready.py + args: + - --container-name + - so-mariadb + env: + ... + +Kubernetes Container Orchestration +================================== +The ONAP components are managed by the Kubernetes_ container management system +which maintains the desired state of the container system as described by one +or more deployment descriptors - similar in concept to OpenStack HEAT +Orchestration Templates. The following sections describe the fundamental +objects managed by Kubernetes, the network these components use to communicate +with each other and other entities outside of ONAP and the templates that +describe the configuration and desired state of the ONAP components. + +Name Spaces +----------- +Within the namespaces are Kubernetes services that provide external +connectivity to pods that host Docker containers. + +ONAP Components to Kubernetes Object Relationships +-------------------------------------------------- +Kubernetes deployments consist of multiple objects: + +- **nodes** - a worker machine - either physical or virtual - that hosts + multiple containers managed by Kubernetes. +- **services** - an abstraction of a logical set of pods that provide a + micro-service. +- **pods** - one or more (but typically one) container(s) that provide specific + application functionality. +- **persistent volumes** - One or more permanent volumes need to be established + to hold non-ephemeral configuration and state data. + +The relationship between these objects is shown in the following figure: + +.. .. uml:: +.. +.. @startuml +.. node PH { +.. component Service { +.. component Pod0 +.. component Pod1 +.. } +.. } +.. +.. database PV +.. @enduml + +.. figure:: ../../resources/images/k8s/kubernetes_objects.png + +OOM uses these Kubernetes objects as described in the following sections. + +Nodes +~~~~~ +OOM works with both physical and virtual worker machines. + +* Virtual Machine Deployments - If ONAP is to be deployed onto a set of virtual + machines, the creation of the VMs is outside of the scope of OOM and could be + done in many ways, such as + + * manually, for example by a user using the OpenStack Horizon dashboard or + AWS EC2, or + * automatically, for example with the use of a OpenStack Heat Orchestration + Template which builds an ONAP stack, Azure ARM template, AWS CloudFormation + Template, or + * orchestrated, for example with Cloudify creating the VMs from a TOSCA + template and controlling their life cycle for the life of the ONAP + deployment. + +* Physical Machine Deployments - If ONAP is to be deployed onto physical + machines there are several options but the recommendation is to use Rancher + along with Helm to associate hosts with a Kubernetes cluster. + +Pods +~~~~ +A group of containers with shared storage and networking can be grouped +together into a Kubernetes pod. All of the containers within a pod are +co-located and co-scheduled so they operate as a single unit. Within ONAP +Amsterdam release, pods are mapped one-to-one to docker containers although +this may change in the future. As explained in the Services section below the +use of Pods within each ONAP component is abstracted from other ONAP +components. + +Services +~~~~~~~~ +OOM uses the Kubernetes service abstraction to provide a consistent access +point for each of the ONAP components independent of the pod or container +architecture of that component. For example, the SDNC component may introduce +OpenDaylight clustering as some point and change the number of pods in this +component to three or more but this change will be isolated from the other ONAP +components by the service abstraction. A service can include a load balancer +on its ingress to distribute traffic between the pods and even react to dynamic +changes in the number of pods if they are part of a replica set. + +Persistent Volumes +~~~~~~~~~~~~~~~~~~ +To enable ONAP to be deployed into a wide variety of cloud infrastructures a +flexible persistent storage architecture, built on Kubernetes persistent +volumes, provides the ability to define the physical storage in a central +location and have all ONAP components securely store their data. + +When deploying ONAP into a public cloud, available storage services such as +`AWS Elastic Block Store`_, `Azure File`_, or `GCE Persistent Disk`_ are +options. Alternatively, when deploying into a private cloud the storage +architecture might consist of Fiber Channel, `Gluster FS`_, or iSCSI. Many +other storage options existing, refer to the `Kubernetes Storage Class`_ +documentation for a full list of the options. The storage architecture may vary +from deployment to deployment but in all cases a reliable, redundant storage +system must be provided to ONAP with which the state information of all ONAP +components will be securely stored. The Storage Class for a given deployment is +a single parameter listed in the ONAP values.yaml file and therefore is easily +customized. Operation of this storage system is outside the scope of the OOM. + +.. code-block:: yaml + + Insert values.yaml code block with storage block here + +Once the storage class is selected and the physical storage is provided, the +ONAP deployment step creates a pool of persistent volumes within the given +physical storage that is used by all of the ONAP components. ONAP components +simply make a claim on these persistent volumes (PV), with a persistent volume +claim (PVC), to gain access to their storage. + +The following figure illustrates the relationships between the persistent +volume claims, the persistent volumes, the storage class, and the physical +storage. + +.. graphviz:: + + digraph PV { + label = "Persistance Volume Claim to Physical Storage Mapping" + { + node [shape=cylinder] + D0 [label="Drive0"] + D1 [label="Drive1"] + Dx [label="Drivex"] + } + { + node [shape=Mrecord label="StorageClass:ceph"] + sc + } + { + node [shape=point] + p0 p1 p2 + p3 p4 p5 + } + subgraph clusterSDC { + label="SDC" + PVC0 + PVC1 + } + subgraph clusterSDNC { + label="SDNC" + PVC2 + } + subgraph clusterSO { + label="SO" + PVCn + } + PV0 -> sc + PV1 -> sc + PV2 -> sc + PVn -> sc + + sc -> {D0 D1 Dx} + PVC0 -> PV0 + PVC1 -> PV1 + PVC2 -> PV2 + PVCn -> PVn + + # force all of these nodes to the same line in the given order + subgraph { + rank = same; PV0;PV1;PV2;PVn;p0;p1;p2 + PV0->PV1->PV2->p0->p1->p2->PVn [style=invis] + } + + subgraph { + rank = same; D0;D1;Dx;p3;p4;p5 + D0->D1->p3->p4->p5->Dx [style=invis] + } + + } + +In-order for an ONAP component to use a persistent volume it must make a claim +against a specific persistent volume defined in the ONAP common charts. Note +that there is a one-to-one relationship between a PVC and PV. The following is +an excerpt from a component chart that defines a PVC: + +.. code-block:: yaml + + Insert PVC example here + +OOM Networking with Kubernetes +------------------------------ + +- DNS +- Ports - Flattening the containers also expose port conflicts between the + containers which need to be resolved. + +Node Ports +~~~~~~~~~~ + +Pod Placement Rules +------------------- +OOM will use the rich set of Kubernetes node and pod affinity / +anti-affinity rules to minimize the chance of a single failure resulting in a +loss of ONAP service. Node affinity / anti-affinity is used to guide the +Kubernetes orchestrator in the placement of pods on nodes (physical or virtual +machines). For example: + +- if a container used Intel DPDK technology the pod may state that it as + affinity to an Intel processor based node, or +- geographical based node labels (such as the Kubernetes standard zone or + region labels) may be used to ensure placement of a DCAE complex close to the + VNFs generating high volumes of traffic thus minimizing networking cost. + Specifically, if nodes were pre-assigned labels East and West, the pod + deployment spec to distribute pods to these nodes would be: + +.. code-block:: yaml + + nodeSelector: + failure-domain.beta.Kubernetes.io/region: {{ .Values.location }} + +- "location: West" is specified in the `values.yaml` file used to deploy + one DCAE cluster and "location: East" is specified in a second `values.yaml` + file (see OOM Configuration Management for more information about + configuration files like the `values.yaml` file). + +Node affinity can also be used to achieve geographic redundancy if pods are +assigned to multiple failure domains. For more information refer to `Assigning +Pods to Nodes`_. + +.. note:: + One could use Pod to Node assignment to totally constrain Kubernetes when + doing initial container assignment to replicate the Amsterdam release + OpenStack Heat based deployment. Should one wish to do this, each VM would + need a unique node name which would be used to specify a node constaint + for every component. These assignment could be specified in an environment + specific values.yaml file. Constraining Kubernetes in this way is not + recommended. + +Kubernetes has a comprehensive system called Taints and Tolerations that can be +used to force the container orchestrator to repel pods from nodes based on +static events (an administrator assigning a taint to a node) or dynamic events +(such as a node becoming unreachable or running out of disk space). There are +no plans to use taints or tolerations in the ONAP Beijing release. Pod +affinity / anti-affinity is the concept of creating a spacial relationship +between pods when the Kubernetes orchestrator does assignment (both initially +an in operation) to nodes as explained in Inter-pod affinity and anti-affinity. +For example, one might choose to co-located all of the ONAP SDC containers on a +single node as they are not critical runtime components and co-location +minimizes overhead. On the other hand, one might choose to ensure that all of +the containers in an ODL cluster (SDNC and APPC) are placed on separate nodes +such that a node failure has minimal impact to the operation of the cluster. +An example of how pod affinity / anti-affinity is shown below: + +Pod Affinity / Anti-Affinity + +.. code-block:: yaml + + apiVersion: v1 + kind: Pod + metadata: + name: with-pod-affinity + spec: + affinity: + podAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: security + operator: In + values: + - S1 + topologyKey: failure-domain.beta.Kubernetes.io/zone + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: security + operator: In + values: + - S2 + topologyKey: Kubernetes.io/hostname + containers: + - name: with-pod-affinity + image: gcr.io/google_containers/pause:2.0 + +This example contains both podAffinity and podAntiAffinity rules, the first +rule is is a must (requiredDuringSchedulingIgnoredDuringExecution) while the +second will be met pending other considerations +(preferredDuringSchedulingIgnoredDuringExecution). Preemption Another feature +that may assist in achieving a repeatable deployment in the presence of faults +that may have reduced the capacity of the cloud is assigning priority to the +containers such that mission critical components have the ability to evict less +critical components. Kubernetes provides this capability with Pod Priority and +Preemption. Prior to having more advanced production grade features available, +the ability to at least be able to re-deploy ONAP (or a subset of) reliably +provides a level of confidence that should an outage occur the system can be +brought back on-line predictably. + +Health Checks +------------- + +Monitoring of ONAP components is configured in the agents within JSON files and +stored in gerrit under the consul-agent-config, here is an example from the AAI +model loader (aai-model-loader-health.json): + +.. code-block:: json + + { + "service": { + "name": "A&AI Model Loader", + "checks": [ + { + "id": "model-loader-process", + "name": "Model Loader Presence", + "script": "/consul/config/scripts/model-loader-script.sh", + "interval": "15s", + "timeout": "1s" + } + ] + } + } + +Liveness Probes +--------------- + +These liveness probes can simply check that a port is available, that a +built-in health check is reporting good health, or that the Consul health check +is positive. For example, to monitor the SDNC component has following liveness +probe can be found in the SDNC DB deployment specification: + +.. code-block:: yaml + + sdnc db liveness probe + + livenessProbe: + exec: + command: ["mysqladmin", "ping"] + initialDelaySeconds: 30 periodSeconds: 10 + timeoutSeconds: 5 + +The 'initialDelaySeconds' control the period of time between the readiness +probe succeeding and the liveness probe starting. 'periodSeconds' and +'timeoutSeconds' control the actual operation of the probe. Note that +containers are inherently ephemeral so the healing action destroys failed +containers and any state information within it. To avoid a loss of state, a +persistent volume should be used to store all data that needs to be persisted +over the re-creation of a container. Persistent volumes have been created for +the database components of each of the projects and the same technique can be +used for all persistent state information. + + + +Environment Files +~~~~~~~~~~~~~~~~~ + +MSB Integration +=============== + +The \ `Microservices Bus +Project `__ provides +facilities to integrate micro-services into ONAP and therefore needs to +integrate into OOM - primarily through Consul which is the backend of +MSB service discovery. The following is a brief description of how this +integration will be done: + +A registrator to push the service endpoint info to MSB service +discovery. + +- The needed service endpoint info is put into the kubernetes yaml file + as annotation, including service name, Protocol,version, visual + range,LB method, IP, Port,etc. + +- OOM deploy/start/restart/scale in/scale out/upgrade ONAP components + +- Registrator watch the kubernetes event + +- When an ONAP component instance has been started/destroyed by OOM, + Registrator get the notification from kubernetes + +- Registrator parse the service endpoint info from annotation and + register/update/unregister it to MSB service discovery + +- MSB API Gateway uses the service endpoint info for service routing + and load balancing. + +Details of the registration service API can be found at \ `Microservice +Bus API +Documentation `__. + +ONAP Component Registration to MSB +---------------------------------- +The charts of all ONAP components intending to register against MSB must have +an annotation in their service(s) template. A `sdc` example follows: + +.. code-block:: yaml + + apiVersion: v1 + kind: Service + metadata: + labels: + app: sdc-be + name: sdc-be + namespace: "{{ .Values.nsPrefix }}" + annotations: + msb.onap.org/service-info: '[ + { + "serviceName": "sdc", + "version": "v1", + "url": "/sdc/v1", + "protocol": "REST", + "port": "8080", + "visualRange":"1" + }, + { + "serviceName": "sdc-deprecated", + "version": "v1", + "url": "/sdc/v1", + "protocol": "REST", + "port": "8080", + "visualRange":"1", + "path":"/sdc/v1" + } + ]' + ... + + +MSB Integration with OOM +------------------------ +A preliminary view of the OOM-MSB integration is as follows: + +.. figure:: ../../resources/images/msb/MSB-OOM-Diagram.png + +A message sequence chart of the registration process: + +.. uml:: + + participant "OOM" as oom + participant "ONAP Component" as onap + participant "Service Discovery" as sd + participant "External API Gateway" as eagw + participant "Router (Internal API Gateway)" as iagw + + box "MSB" #LightBlue + participant sd + participant eagw + participant iagw + end box + + == Deploy Servcie == + + oom -> onap: Deploy + oom -> sd: Register service endpoints + sd -> eagw: Services exposed to external system + sd -> iagw: Services for internal use + + == Component Life-cycle Management == + + oom -> onap: Start/Stop/Scale/Migrate/Upgrade + oom -> sd: Update service info + sd -> eagw: Update service info + sd -> iagw: Update service info + + == Service Health Check == + + sd -> onap: Check the health of service + sd -> eagw: Update service status + sd -> iagw: Update service status + + +MSB Deployment Instructions +--------------------------- +MSB is helm installable ONAP component which is often automatically deployed. +To install it individually enter:: + + > helm install /msb + +.. note:: + TBD: Vaidate if the following procedure is still required. + +Please note that Kubernetes authentication token must be set at +*kubernetes/kube2msb/values.yaml* so the kube2msb registrator can get the +access to watch the kubernetes events and get service annotation by +Kubernetes APIs. The token can be found in the kubectl configuration file +*~/.kube/config* + +More details can be found here `MSB installation `_. + +.. MISC +.. ==== +.. Note that although OOM uses Kubernetes facilities to minimize the effort +.. required of the ONAP component owners to implement a successful rolling +.. upgrade strategy there are other considerations that must be taken into +.. consideration. +.. For example, external APIs - both internal and external to ONAP - should be +.. designed to gracefully accept transactions from a peer at a different +.. software version to avoid deadlock situations. Embedded version codes in +.. messages may facilitate such capabilities. +.. +.. Within each of the projects a new configuration repository contains all of +.. the project specific configuration artifacts. As changes are made within +.. the project, it's the responsibility of the project team to make appropriate +.. changes to the configuration data. diff --git a/docs/archived/oom_hardcoded_certificates.rst b/docs/archived/oom_hardcoded_certificates.rst new file mode 100644 index 0000000000..326cd3980f --- /dev/null +++ b/docs/archived/oom_hardcoded_certificates.rst @@ -0,0 +1,18 @@ +.. This work is licensed under a Creative Commons Attribution 4.0 +.. International License. +.. http://creativecommons.org/licenses/by/4.0 +.. Copyright 2018 Amdocs, Bell Canada, 2020 Nokia Solutions and Networks + +:orphan: + +.. Links +.. _hardcoded-certificates-label: + +ONAP Hardcoded certificates +########################### + +ONAP current installation have hardcoded certificates. +Here's the list of these certificates: + +.. csv-table:: + :file: certs/hardcoded_certificates.csv diff --git a/docs/archived/oom_quickstart_guide.rst b/docs/archived/oom_quickstart_guide.rst new file mode 100644 index 0000000000..b7749b1056 --- /dev/null +++ b/docs/archived/oom_quickstart_guide.rst @@ -0,0 +1,284 @@ +.. This work is licensed under a +.. Creative Commons Attribution 4.0 International License. +.. http://creativecommons.org/licenses/by/4.0 +.. Copyright 2019-2020 Amdocs, Bell Canada, Orange, Samsung +.. _oom_quickstart_guide: +.. _quick-start-label: + +OOM Quick Start Guide +##################### + +.. figure:: images/oom_logo/oomLogoV2-medium.png + :align: right + +Once a Kubernetes environment is available (follow the instructions in +:ref:`cloud-setup-guide-label` if you don't have a cloud environment +available), follow the following instructions to deploy ONAP. + +**Step 1.** Clone the OOM repository from ONAP gerrit:: + + > git clone -b http://gerrit.onap.org/r/oom --recurse-submodules + > cd oom/kubernetes + +where can be an official release tag, such as + +* 4.0.0-ONAP for Dublin +* 5.0.1-ONAP for El Alto +* 6.0.0 for Frankfurt +* 7.0.0 for Guilin +* 8.0.0 for Honolulu +* 9.0.0 for Istanbul +* 10.0.0 for Jakarta +* 11.0.0 for Kohn + +**Step 2.** Install Helm Plugins required to deploy ONAP:: + + > cp -R ~/oom/kubernetes/helm/plugins/ ~/.local/share/helm/plugins + > helm plugin install https://github.com/chartmuseum/helm-push.git \ + --version 0.9.0 + +.. note:: + The ``--version 0.9.0`` is required as new version of helm (3.7.0 and up) is + now using ``push`` directly and helm-push is using ``cm-push`` starting + version ``0.10.0`` and up. + +**Step 3.** Install Chartmuseum:: + + > curl -LO https://s3.amazonaws.com/chartmuseum/release/latest/bin/linux/amd64/chartmuseum + > chmod +x ./chartmuseum + > mv ./chartmuseum /usr/local/bin + +**Step 4.** Install Cert-Manager:: + + > kubectl apply -f https://github.com/jetstack/cert-manager/releases/download/v1.2.0/cert-manager.yaml + +More details can be found :doc:`here `. + +**Step 4.1** Install Strimzi Kafka Operator: + +- Add the helm repo:: + + > helm repo add strimzi https://strimzi.io/charts/ + +- Install the operator:: + + > helm install strimzi-kafka-operator strimzi/strimzi-kafka-operator --namespace strimzi-system --version 0.28.0 --set watchAnyNamespace=true --create-namespace + +More details can be found :doc:`here `. + +**Step 5.** Customize the Helm charts like `oom/kubernetes/onap/values.yaml` or +an override file like `onap-all.yaml`, `onap-vfw.yaml` or `openstack.yaml` file +to suit your deployment with items like the OpenStack tenant information. + +.. note:: + Standard and example override files (e.g. `onap-all.yaml`, `openstack.yaml`) + can be found in the `oom/kubernetes/onap/resources/overrides/` directory. + + + a. You may want to selectively enable or disable ONAP components by changing + the ``enabled: true/false`` flags. + + + b. Encrypt the OpenStack password using the shell tool for Robot and put it in + the Robot Helm charts or Robot section of `openstack.yaml` + + + c. Encrypt the OpenStack password using the java based script for SO Helm + charts or SO section of `openstack.yaml`. + + + d. Update the OpenStack parameters that will be used by Robot, SO and APPC Helm + charts or use an override file to replace them. + + e. Add in the command line a value for the global master password + (global.masterPassword). + + + +a. Enabling/Disabling Components: +Here is an example of the nominal entries that need to be provided. +We have different values file available for different contexts. + +.. literalinclude:: ../kubernetes/onap/values.yaml + :language: yaml + + +b. Generating ROBOT Encrypted Password: +The Robot encrypted Password uses the same encryption.key as SO but an +openssl algorithm that works with the python based Robot Framework. + +.. note:: + To generate Robot ``openStackEncryptedPasswordHere``:: + + cd so/resources/config/mso/ + /oom/kubernetes/so/resources/config/mso# echo -n "" | openssl aes-128-ecb -e -K `cat encryption.key` -nosalt | xxd -c 256 -p`` + +c. Generating SO Encrypted Password: +The SO Encrypted Password uses a java based encryption utility since the +Java encryption library is not easy to integrate with openssl/python that +Robot uses in Dublin and upper versions. + +.. note:: + To generate SO ``openStackEncryptedPasswordHere`` and ``openStackSoEncryptedPassword`` + ensure `default-jdk` is installed:: + + apt-get update; apt-get install default-jdk + + Then execute:: + + SO_ENCRYPTION_KEY=`cat ~/oom/kubernetes/so/resources/config/mso/encryption.key` + OS_PASSWORD=XXXX_OS_CLEARTESTPASSWORD_XXXX + + git clone http://gerrit.onap.org/r/integration + cd integration/deployment/heat/onap-rke/scripts + + javac Crypto.java + java Crypto "$OS_PASSWORD" "$SO_ENCRYPTION_KEY" + +d. Update the OpenStack parameters: + +There are assumptions in the demonstration VNF Heat templates about the +networking available in the environment. To get the most value out of these +templates and the automation that can help confirm the setup is correct, please +observe the following constraints. + + +``openStackPublicNetId:`` + This network should allow Heat templates to add interfaces. + This need not be an external network, floating IPs can be assigned to the + ports on the VMs that are created by the heat template but its important that + neutron allow ports to be created on them. + +``openStackPrivateNetCidr: "10.0.0.0/16"`` + This ip address block is used to assign OA&M addresses on VNFs to allow ONAP + connectivity. The demonstration Heat templates assume that 10.0 prefix can be + used by the VNFs and the demonstration ip addressing plan embodied in the + preload template prevent conflicts when instantiating the various VNFs. If + you need to change this, you will need to modify the preload data in the + Robot Helm chart like integration_preload_parameters.py and the + demo/heat/preload_data in the Robot container. The size of the CIDR should + be sufficient for ONAP and the VMs you expect to create. + +``openStackOamNetworkCidrPrefix: "10.0"`` + This ip prefix mush match the openStackPrivateNetCidr and is a helper + variable to some of the Robot scripts for demonstration. A production + deployment need not worry about this setting but for the demonstration VNFs + the ip asssignment strategy assumes 10.0 ip prefix. + +Example Keystone v2.0 + +.. literalinclude:: yaml/example-integration-override.yaml + :language: yaml + +Example Keystone v3 (required for Rocky and later releases) + +.. literalinclude:: yaml/example-integration-override-v3.yaml + :language: yaml + + +**Step 6.** To setup a local Helm server to server up the ONAP charts:: + + > chartmuseum --storage local --storage-local-rootdir ~/helm3-storage -port 8879 & + +Note the port number that is listed and use it in the Helm repo add as +follows:: + + > helm repo add local http://127.0.0.1:8879 + +**Step 7.** Verify your Helm repository setup with:: + + > helm repo list + NAME URL + local http://127.0.0.1:8879 + +**Step 8.** Build a local Helm repository (from the kubernetes directory):: + + > make SKIP_LINT=TRUE [HELM_BIN=] all ; make SKIP_LINT=TRUE [HELM_BIN=] onap + +`HELM_BIN` + Sets the helm binary to be used. The default value use helm from PATH + + +**Step 9.** Display the onap charts that available to be deployed:: + + > helm repo update + > helm search repo onap + +.. literalinclude:: helm/helm-search.txt + +.. note:: + The setup of the Helm repository is a one time activity. If you make changes + to your deployment charts or values be sure to use ``make`` to update your + local Helm repository. + +**Step 10.** Once the repo is setup, installation of ONAP can be done with a +single command + +.. note:: + The ``--timeout 900s`` is currently required in Dublin and later + versions up to address long running initialization tasks for DMaaP + and SO. Without this timeout value both applications may fail to + deploy. + +.. danger:: + We've added the master password on the command line. + You shouldn't put it in a file for safety reason + please don't forget to change the value to something random + + A space is also added in front of the command so "history" doesn't catch it. + This masterPassword is very sensitive, please be careful! + + +To deploy all ONAP applications use this command:: + + > cd oom/kubernetes + > helm deploy dev local/onap --namespace onap --create-namespace --set global.masterPassword=myAwesomePasswordThatINeedToChange -f onap/resources/overrides/onap-all.yaml -f onap/resources/overrides/environment.yaml -f onap/resources/overrides/openstack.yaml --timeout 900s + +All override files may be customized (or replaced by other overrides) as per +needs. + +`onap-all.yaml` + Enables the modules in the ONAP deployment. As ONAP is very modular, it is + possible to customize ONAP and disable some components through this + configuration file. + +`onap-all-ingress-nginx-vhost.yaml` + Alternative version of the `onap-all.yaml` but with global ingress controller + enabled. It requires the cluster configured with the nginx ingress controller + and load balancer. Please use this file instead `onap-all.yaml` if you want + to use experimental ingress controller feature. + +`environment.yaml` + Includes configuration values specific to the deployment environment. + + Example: adapt readiness and liveness timers to the level of performance of + your infrastructure + +`openstack.yaml` + Includes all the OpenStack related information for the default target tenant + you want to use to deploy VNFs from ONAP and/or additional parameters for the + embedded tests. + +**Step 11.** Verify ONAP installation + +Use the following to monitor your deployment and determine when ONAP is ready +for use:: + + > kubectl get pods -n onap -o=wide + +.. note:: + While all pods may be in a Running state, it is not a guarantee that all + components are running fine. + + Launch the healthcheck tests using Robot to verify that the components are + healthy:: + + > ~/oom/kubernetes/robot/ete-k8s.sh onap health + +**Step 12.** Undeploy ONAP +:: + + > helm undeploy dev + +More examples of using the deploy and undeploy plugins can be found here: +https://wiki.onap.org/display/DW/OOM+Helm+%28un%29Deploy+plugins diff --git a/docs/archived/oom_setup_kubernetes_rancher.rst b/docs/archived/oom_setup_kubernetes_rancher.rst new file mode 100644 index 0000000000..767b93925e --- /dev/null +++ b/docs/archived/oom_setup_kubernetes_rancher.rst @@ -0,0 +1,531 @@ +.. This work is licensed under a Creative Commons Attribution 4.0 +.. International License. +.. http://creativecommons.org/licenses/by/4.0 +.. Copyright 2018-2020 Amdocs, Bell Canada, Orange, Samsung + +.. Links +.. _HELM Best Practices Guide: https://docs.helm.sh/chart_best_practices/#requirements +.. _kubectl Cheat Sheet: https://kubernetes.io/docs/reference/kubectl/cheatsheet/ +.. _Kubernetes documentation for emptyDir: https://kubernetes.io/docs/concepts/storage/volumes/#emptydir +.. _Docker DevOps: https://wiki.onap.org/display/DW/Docker+DevOps#DockerDevOps-DockerBuild +.. _http://cd.onap.info:30223/mso/logging/debug: http://cd.onap.info:30223/mso/logging/debug +.. _Onboarding and Distributing a Vendor Software Product: https://wiki.onap.org/pages/viewpage.action?pageId=1018474 +.. _README.md: https://gerrit.onap.org/r/gitweb?p=oom.git;a=blob;f=kubernetes/README.md + +.. figure:: images/oom_logo/oomLogoV2-medium.png + :align: right + +.. _onap-on-kubernetes-with-rancher: + +ONAP on HA Kubernetes Cluster +############################# + +This guide provides instructions on how to setup a Highly-Available Kubernetes +Cluster. For this, we are hosting our cluster on OpenStack VMs and using the +Rancher Kubernetes Engine (RKE) to deploy and manage our Kubernetes Cluster. + +.. contents:: + :depth: 1 + :local: +.. + +The result at the end of this tutorial will be: + +#. Creation of a Key Pair to use with Open Stack and RKE + +#. Creation of OpenStack VMs to host Kubernetes Control Plane + +#. Creation of OpenStack VMs to host Kubernetes Workers + +#. Installation and configuration of RKE to setup an HA Kubernetes + +#. Installation and configuration of kubectl + +#. Installation and configuration of Helm + +#. Creation of an NFS Server to be used by ONAP as shared persistance + +There are many ways one can execute the above steps. Including automation +through the use of HEAT to setup the OpenStack VMs. To better illustrate the +steps involved, we have captured the manual creation of such an environment +using the ONAP Wind River Open Lab. + +Create Key Pair +=============== +A Key Pair is required to access the created OpenStack VMs and will be used by +RKE to configure the VMs for Kubernetes. + +Use an existing key pair, import one or create a new one to assign. + +.. image:: images/keys/key_pair_1.png + +.. Note:: + If you're creating a new Key Pair, ensure to create a local copy of the + Private Key through the use of "Copy Private Key to Clipboard". + +For the purpose of this guide, we will assume a new local key called "onap-key" +has been downloaded and is copied into **~/.ssh/**, from which it can be +referenced. + +Example:: + + > mv onap-key ~/.ssh + + > chmod 600 ~/.ssh/onap-key + + +Create Network +============== + +An internal network is required in order to deploy our VMs that will host +Kubernetes. + +.. image:: images/network/network_1.png + +.. image:: images/network/network_2.png + +.. image:: images/network/network_3.png + +.. Note:: + It's better to have one network per deployment and obviously the name of this + network should be unique. + +Now we need to create a router to attach this network to outside: + +.. image:: images/network/network_4.png + +Create Security Group +===================== + +A specific security group is also required + +.. image:: images/sg/sg_1.png + +then click on `manage rules` of the newly created security group. +And finally click on `Add Rule` and create the following one: + +.. image:: images/sg/sg_2.png + +.. Note:: + the security is clearly not good here and the right SG will be proposed in a + future version + +Create Kubernetes Control Plane VMs +=================================== + +The following instructions describe how to create 3 OpenStack VMs to host the +Highly-Available Kubernetes Control Plane. +ONAP workloads will not be scheduled on these Control Plane nodes. + +Launch new VM instances +----------------------- + +.. image:: images/cp_vms/control_plane_1.png + +Select Ubuntu 18.04 as base image +--------------------------------- +Select "No" for "Create New Volume" + +.. image:: images/cp_vms/control_plane_2.png + +Select Flavor +------------- +The recommended flavor is at least 4 vCPU and 8GB ram. + +.. image:: images/cp_vms/control_plane_3.png + +Networking +---------- + +Use the created network: + +.. image:: images/cp_vms/control_plane_4.png + +Security Groups +--------------- + +Use the created security group: + +.. image:: images/cp_vms/control_plane_5.png + +Key Pair +-------- +Assign the key pair that was created/selected previously (e.g. onap_key). + +.. image:: images/cp_vms/control_plane_6.png + +Apply customization script for Control Plane VMs +------------------------------------------------ + +Click :download:`openstack-k8s-controlnode.sh ` +to download the script. + +.. literalinclude:: shell/openstack-k8s-controlnode.sh + :language: bash + +This customization script will: + +* update ubuntu +* install docker + +.. image:: images/cp_vms/control_plane_7.png + +Launch Instance +--------------- + +.. image:: images/cp_vms/control_plane_8.png + + + +Create Kubernetes Worker VMs +============================ +The following instructions describe how to create OpenStack VMs to host the +Highly-Available Kubernetes Workers. ONAP workloads will only be scheduled on +these nodes. + +Launch new VM instances +----------------------- + +The number and size of Worker VMs is dependent on the size of the ONAP +deployment. By default, all ONAP applications are deployed. It's possible to +customize the deployment and enable a subset of the ONAP applications. For the +purpose of this guide, however, we will deploy 12 Kubernetes Workers that have +been sized to handle the entire ONAP application workload. + +.. image:: images/wk_vms/worker_1.png + +Select Ubuntu 18.04 as base image +--------------------------------- +Select "No" on "Create New Volume" + +.. image:: images/wk_vms/worker_2.png + +Select Flavor +------------- +The size of Kubernetes hosts depend on the size of the ONAP deployment +being installed. + +If a small subset of ONAP applications are being deployed +(i.e. for testing purposes), then 16GB or 32GB may be sufficient. + +.. image:: images/wk_vms/worker_3.png + +Networking +----------- + +.. image:: images/wk_vms/worker_4.png + +Security Group +--------------- + +.. image:: images/wk_vms/worker_5.png + +Key Pair +-------- +Assign the key pair that was created/selected previously (e.g. onap_key). + +.. image:: images/wk_vms/worker_6.png + +Apply customization script for Kubernetes VM(s) +----------------------------------------------- + +Click :download:`openstack-k8s-workernode.sh ` to +download the script. + +.. literalinclude:: shell/openstack-k8s-workernode.sh + :language: bash + +This customization script will: + +* update ubuntu +* install docker +* install nfs common + + +Launch Instance +--------------- + +.. image:: images/wk_vms/worker_7.png + + + + +Assign Floating IP addresses +---------------------------- +Assign Floating IPs to all Control Plane and Worker VMs. +These addresses provide external access to the VMs and will be used by RKE +to configure kubernetes on to the VMs. + +Repeat the following for each VM previously created: + +.. image:: images/floating_ips/floating_1.png + +Resulting floating IP assignments in this example. + +.. image:: images/floating_ips/floating_2.png + + + + +Configure Rancher Kubernetes Engine (RKE) +========================================= + +Install RKE +----------- +Download and install RKE on a VM, desktop or laptop. +Binaries can be found here for Linux and Mac: https://github.com/rancher/rke/releases/tag/v1.0.6 + +.. note:: + There are several ways to install RKE. Further parts of this documentation + assumes that you have rke command available. + If you don't know how to install RKE you may follow the below steps: + + * chmod +x ./rke_linux-amd64 + * sudo mv ./rke_linux-amd64 /user/local/bin/rke + +RKE requires a *cluster.yml* as input. An example file is show below that +describes a Kubernetes cluster that will be mapped onto the OpenStack VMs +created earlier in this guide. + +Click :download:`cluster.yml ` to download the +configuration file. + +.. literalinclude:: yaml/cluster.yml + :language: yaml + +Prepare cluster.yml +------------------- +Before this configuration file can be used the external **address** +and the **internal_address** must be mapped for each control and worker node +in this file. + +Run RKE +------- +From within the same directory as the cluster.yml file, simply execute:: + + > rke up + +The output will look something like:: + + INFO[0000] Initiating Kubernetes cluster + INFO[0000] [certificates] Generating admin certificates and kubeconfig + INFO[0000] Successfully Deployed state file at [./cluster.rkestate] + INFO[0000] Building Kubernetes cluster + INFO[0000] [dialer] Setup tunnel for host [10.12.6.82] + INFO[0000] [dialer] Setup tunnel for host [10.12.6.249] + INFO[0000] [dialer] Setup tunnel for host [10.12.6.74] + INFO[0000] [dialer] Setup tunnel for host [10.12.6.85] + INFO[0000] [dialer] Setup tunnel for host [10.12.6.238] + INFO[0000] [dialer] Setup tunnel for host [10.12.6.89] + INFO[0000] [dialer] Setup tunnel for host [10.12.5.11] + INFO[0000] [dialer] Setup tunnel for host [10.12.6.90] + INFO[0000] [dialer] Setup tunnel for host [10.12.6.244] + INFO[0000] [dialer] Setup tunnel for host [10.12.5.165] + INFO[0000] [dialer] Setup tunnel for host [10.12.6.126] + INFO[0000] [dialer] Setup tunnel for host [10.12.6.111] + INFO[0000] [dialer] Setup tunnel for host [10.12.5.160] + INFO[0000] [dialer] Setup tunnel for host [10.12.5.191] + INFO[0000] [dialer] Setup tunnel for host [10.12.6.195] + INFO[0002] [network] Deploying port listener containers + INFO[0002] [network] Pulling image [nexus3.onap.org:10001/rancher/rke-tools:v0.1.27] on host [10.12.6.85] + INFO[0002] [network] Pulling image [nexus3.onap.org:10001/rancher/rke-tools:v0.1.27] on host [10.12.6.89] + INFO[0002] [network] Pulling image [nexus3.onap.org:10001/rancher/rke-tools:v0.1.27] on host [10.12.6.90] + INFO[0011] [network] Successfully pulled image [nexus3.onap.org:10001/rancher/rke-tools:v0.1.27] on host [10.12.6.89] + . . . . + INFO[0309] [addons] Setting up Metrics Server + INFO[0309] [addons] Saving ConfigMap for addon rke-metrics-addon to Kubernetes + INFO[0309] [addons] Successfully saved ConfigMap for addon rke-metrics-addon to Kubernetes + INFO[0309] [addons] Executing deploy job rke-metrics-addon + INFO[0315] [addons] Metrics Server deployed successfully + INFO[0315] [ingress] Setting up nginx ingress controller + INFO[0315] [addons] Saving ConfigMap for addon rke-ingress-controller to Kubernetes + INFO[0316] [addons] Successfully saved ConfigMap for addon rke-ingress-controller to Kubernetes + INFO[0316] [addons] Executing deploy job rke-ingress-controller + INFO[0322] [ingress] ingress controller nginx deployed successfully + INFO[0322] [addons] Setting up user addons + INFO[0322] [addons] no user addons defined + INFO[0322] Finished building Kubernetes cluster successfully + +Install Kubectl +=============== + +Download and install kubectl. Binaries can be found here for Linux and Mac: + +https://storage.googleapis.com/kubernetes-release/release/v1.15.11/bin/linux/amd64/kubectl +https://storage.googleapis.com/kubernetes-release/release/v1.15.11/bin/darwin/amd64/kubectl + +You only need to install kubectl where you'll launch Kubernetes command. This +can be any machines of the Kubernetes cluster or a machine that has IP access +to the APIs. +Usually, we use the first controller as it has also access to internal +Kubernetes services, which can be convenient. + +Validate deployment +------------------- + +:: + + > mkdir -p ~/.kube + + > cp kube_config_cluster.yml ~/.kube/config.onap + + > export KUBECONFIG=~/.kube/config.onap + + > kubectl config use-context onap + + > kubectl get nodes -o=wide + +:: + + NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME + onap-control-1 Ready controlplane,etcd 3h53m v1.15.2 10.0.0.8 Ubuntu 18.04 LTS 4.15.0-22-generic docker://18.9.5 + onap-control-2 Ready controlplane,etcd 3h53m v1.15.2 10.0.0.11 Ubuntu 18.04 LTS 4.15.0-22-generic docker://18.9.5 + onap-control-3 Ready controlplane,etcd 3h53m v1.15.2 10.0.0.12 Ubuntu 18.04 LTS 4.15.0-22-generic docker://18.9.5 + onap-k8s-1 Ready worker 3h53m v1.15.2 10.0.0.14 Ubuntu 18.04 LTS 4.15.0-22-generic docker://18.9.5 + onap-k8s-10 Ready worker 3h53m v1.15.2 10.0.0.16 Ubuntu 18.04 LTS 4.15.0-22-generic docker://18.9.5 + onap-k8s-11 Ready worker 3h53m v1.15.2 10.0.0.18 Ubuntu 18.04 LTS 4.15.0-22-generic docker://18.9.5 + onap-k8s-12 Ready worker 3h53m v1.15.2 10.0.0.7 Ubuntu 18.04 LTS 4.15.0-22-generic docker://18.9.5 + onap-k8s-2 Ready worker 3h53m v1.15.2 10.0.0.26 Ubuntu 18.04 LTS 4.15.0-22-generic docker://18.9.5 + onap-k8s-3 Ready worker 3h53m v1.15.2 10.0.0.5 Ubuntu 18.04 LTS 4.15.0-22-generic docker://18.9.5 + onap-k8s-4 Ready worker 3h53m v1.15.2 10.0.0.6 Ubuntu 18.04 LTS 4.15.0-22-generic docker://18.9.5 + onap-k8s-5 Ready worker 3h53m v1.15.2 10.0.0.9 Ubuntu 18.04 LTS 4.15.0-22-generic docker://18.9.5 + onap-k8s-6 Ready worker 3h53m v1.15.2 10.0.0.17 Ubuntu 18.04 LTS 4.15.0-22-generic docker://18.9.5 + onap-k8s-7 Ready worker 3h53m v1.15.2 10.0.0.20 Ubuntu 18.04 LTS 4.15.0-22-generic docker://18.9.5 + onap-k8s-8 Ready worker 3h53m v1.15.2 10.0.0.10 Ubuntu 18.04 LTS 4.15.0-22-generic docker://18.9.5 + onap-k8s-9 Ready worker 3h53m v1.15.2 10.0.0.4 Ubuntu 18.04 LTS 4.15.0-22-generic docker://18.9.5 + + +Install Helm +============ + +Example Helm client install on Linux:: + + > wget https://get.helm.sh/helm-v2.16.6-linux-amd64.tar.gz + + > tar -zxvf helm-v2.16.6-linux-amd64.tar.gz + + > sudo mv linux-amd64/helm /usr/local/bin/helm + +Initialize Kubernetes Cluster for use by Helm +--------------------------------------------- + +:: + + > kubectl -n kube-system create serviceaccount tiller + + > kubectl create clusterrolebinding tiller --clusterrole=cluster-admin --serviceaccount=kube-system:tiller + + > helm init --service-account tiller + + > kubectl -n kube-system  rollout status deploy/tiller-deploy + + + +Setting up an NFS share for Multinode Kubernetes Clusters +========================================================= +Deploying applications to a Kubernetes cluster requires Kubernetes nodes to +share a common, distributed filesystem. In this tutorial, we will setup an +NFS Master, and configure all Worker nodes a Kubernetes cluster to play +the role of NFS slaves. + +It is recommended that a separate VM, outside of the kubernetes +cluster, be used. This is to ensure that the NFS Master does not compete for +resources with Kubernetes Control Plane or Worker Nodes. + + +Launch new NFS Server VM instance +--------------------------------- +.. image:: images/nfs_server/nfs_server_1.png + +Select Ubuntu 18.04 as base image +--------------------------------- +Select "No" on "Create New Volume" + +.. image:: images/nfs_server/nfs_server_2.png + +Select Flavor +------------- + +.. image:: images/nfs_server/nfs_server_3.png + +Networking +----------- + +.. image:: images/nfs_server/nfs_server_4.png + +Security Group +--------------- + +.. image:: images/nfs_server/nfs_server_5.png + +Key Pair +-------- +Assign the key pair that was created/selected previously (e.g. onap_key). + +.. image:: images/nfs_server/nfs_server_6.png + +Apply customization script for NFS Server VM +-------------------------------------------- + +Click :download:`openstack-nfs-server.sh ` to download +the script. + +.. literalinclude:: shell/openstack-nfs-server.sh + :language: bash + +This customization script will: + +* update ubuntu +* install nfs server + + +Launch Instance +--------------- + +.. image:: images/nfs_server/nfs_server_7.png + + + +Assign Floating IP addresses +---------------------------- + +.. image:: images/nfs_server/nfs_server_8.png + +Resulting floating IP assignments in this example. + +.. image:: images/nfs_server/nfs_server_9.png + + +To properly set up an NFS share on Master and Slave nodes, the user can run the +scripts below. + +Click :download:`master_nfs_node.sh ` to download the +script. + +.. literalinclude:: shell/master_nfs_node.sh + :language: bash + +Click :download:`slave_nfs_node.sh ` to download the script. + +.. literalinclude:: shell/slave_nfs_node.sh + :language: bash + +The master_nfs_node.sh script runs in the NFS Master node and needs the list of +NFS Slave nodes as input, e.g.:: + + > sudo ./master_nfs_node.sh node1_ip node2_ip ... nodeN_ip + +The slave_nfs_node.sh script runs in each NFS Slave node and needs the IP of +the NFS Master node as input, e.g.:: + + > sudo ./slave_nfs_node.sh master_node_ip + + +ONAP Deployment via OOM +======================= +Now that Kubernetes and Helm are installed and configured you can prepare to +deploy ONAP. Follow the instructions in the README.md_ or look at the official +documentation to get started: + +- :ref:`quick-start-label` - deploy ONAP on an existing cloud +- :ref:`user-guide-label` - a guide for operators of an ONAP instance diff --git a/docs/archived/oom_setup_paas.rst b/docs/archived/oom_setup_paas.rst new file mode 100644 index 0000000000..2dabcb1aea --- /dev/null +++ b/docs/archived/oom_setup_paas.rst @@ -0,0 +1,144 @@ +.. This work is licensed under a Creative Commons Attribution 4.0 +.. International License. +.. http://creativecommons.org/licenses/by/4.0 +.. Copyright 2021 Nokia + +.. Links +.. _Cert-Manager Installation documentation: https://cert-manager.io/docs/installation/kubernetes/ +.. _Cert-Manager kubectl plugin documentation: https://cert-manager.io/docs/usage/kubectl-plugin/ +.. _Strimzi Apache Kafka Operator helm Installation documentation: https://strimzi.io/docs/operators/in-development/deploying.html#deploying-cluster-operator-helm-chart-str + +.. _oom_setup_paas: + +ONAP PaaS set-up +################ + +Starting from Honolulu release, Cert-Manager and Prometheus Stack are a part +of k8s PaaS for ONAP operations and can be installed to provide +additional functionality for ONAP engineers. +Starting from Jakarta release, Strimzi Apache Kafka is deployed to provide +Apache kafka as the default messaging bus for ONAP. + +The versions of PaaS components that are supported by OOM are as follows: + +.. table:: ONAP PaaS components + + ============== ============= ================= ======= + Release Cert-Manager Prometheus Stack Strimzi + ============== ============= ================= ======= + honolulu 1.2.0 13.x + istanbul 1.5.4 19.x + jakarta 0.28.0 + ============== ============= ================= ======= + +This guide provides instructions on how to install the PaaS +components for ONAP. + +.. contents:: + :depth: 1 + :local: +.. + +Strimzi Apache Kafka Operator +============================= + +Strimzi provides a way to run an Apache Kafka cluster on Kubernetes +in various deployment configurations by using kubernetes operators. +Operators are a method of packaging, deploying, and managing a +Kubernetes application. +Strimzi Operators extend Kubernetes functionality, automating common +and complex tasks related to a Kafka deployment. By implementing +knowledge of Kafka operations in code, Kafka administration +tasks are simplified and require less manual intervention. + +Installation steps +------------------ + +The recommended version of Strimzi for Kubernetes 1.19 is v0.28.0. +The Strimzi cluster operator is deployed using helm to install the parent chart +containing all of the required custom resource definitions. This should be done +by a kubernetes administrator to allow for deployment of custom resources in to +any kubernetes namespace within the cluster. + +Full installation instructions can be found in the +`Strimzi Apache Kafka Operator helm Installation documentation`_. + +Installation can be as simple as: + +- Add the helm repo:: + + > helm repo add strimzi https://strimzi.io/charts/ + +- Install the operator:: + + > helm install strimzi-kafka-operator strimzi/strimzi-kafka-operator --namespace strimzi-system --version 0.28.0 --set watchAnyNamespace=true --create-namespace + +Cert-Manager +============ + +Cert-Manager is a native Kubernetes certificate management controller. +It can help with issuing certificates from a variety of sources, such as +Let’s Encrypt, HashiCorp Vault, Venafi, a simple signing key pair, self +signed or external issuers. It ensures certificates are valid and up to +date, and attempt to renew certificates at a configured time before expiry. + +Installation steps +------------------ + +The recommended version of Cert-Manager for Kubernetes 1.19 is v1.5.4. +Cert-Manager is deployed using regular YAML manifests which include all +the needed resources (the CustomResourceDefinitions, cert-manager, +namespace, and the webhook component). + +Full installation instructions, including details on how to configure extra +functionality in Cert-Manager can be found in the +`Cert-Manager Installation documentation`_. + +There is also a kubectl plugin (kubectl cert-manager) that can help you +to manage cert-manager resources inside your cluster. For installation +steps, please refer to `Cert-Manager kubectl plugin documentation`_. + +Installation can be as simple as:: + + > kubectl apply -f https://github.com/jetstack/cert-manager/releases/download/v1.5.4/cert-manager.yaml + +Prometheus Stack (optional) +=========================== + +Prometheus is an open-source systems monitoring and alerting toolkit with +an active ecosystem. + +Kube Prometheus Stack is a collection of Kubernetes manifests, Grafana +dashboards, and Prometheus rules combined with documentation and scripts to +provide easy to operate end-to-end Kubernetes cluster monitoring with +Prometheus using the Prometheus Operator. As it includes both Prometheus +Operator and Grafana dashboards, there is no need to set up them separately. + +Installation steps +------------------ + +The recommended version of kube-prometheus-stack chart for +Kubernetes 1.19 is 19.x (which is currently the latest major chart version), +for example 19.0.2. + +In order to install Prometheus Stack, you must follow these steps: + +- Create the namespace for Prometheus Stack:: + + > kubectl create namespace prometheus + +- Add the prometheus-community Helm repository:: + + > helm repo add prometheus-community https://prometheus-community.github.io/helm-charts + +- Update your local Helm chart repository cache:: + + > helm repo update + +- To install the kube-prometheus-stack Helm chart in latest version:: + + > helm install prometheus prometheus-community/kube-prometheus-stack --namespace=prometheus + + To install the kube-prometheus-stack Helm chart in specific version, for example 19.0.2:: + + > helm install prometheus prometheus-community/kube-prometheus-stack --namespace=prometheus --version=19.0.2 diff --git a/docs/archived/oom_user_guide.rst b/docs/archived/oom_user_guide.rst new file mode 100644 index 0000000000..2ff74b5898 --- /dev/null +++ b/docs/archived/oom_user_guide.rst @@ -0,0 +1,798 @@ +.. This work is licensed under a Creative Commons Attribution 4.0 +.. International License. +.. http://creativecommons.org/licenses/by/4.0 +.. Copyright 2018-2021 Amdocs, Bell Canada, Orange, Samsung, Nordix Foundation +.. _oom_user_guide: + +.. Links +.. _Curated applications for Kubernetes: https://github.com/kubernetes/charts +.. _Services: https://kubernetes.io/docs/concepts/services-networking/service/ +.. _ReplicaSet: https://kubernetes.io/docs/concepts/workloads/controllers/replicaset/ +.. _StatefulSet: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/ +.. _Helm Documentation: https://docs.helm.sh/helm/ +.. _Helm: https://docs.helm.sh/ +.. _Kubernetes: https://Kubernetes.io/ +.. _Kubernetes LoadBalancer: https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer +.. _user-guide-label: + +OOM User Guide +############## + +The ONAP Operations Manager (OOM) provide the ability to manage the entire +life-cycle of an ONAP installation, from the initial deployment to final +decommissioning. This guide provides instructions for users of ONAP to +use the Kubernetes_/Helm_ system as a complete ONAP management system. + +This guide provides many examples of Helm command line operations. For a +complete description of these commands please refer to the `Helm +Documentation`_. + +.. figure:: images/oom_logo/oomLogoV2-medium.png + :align: right + +The following sections describe the life-cycle operations: + +- Deploy_ - with built-in component dependency management +- Configure_ - unified configuration across all ONAP components +- Monitor_ - real-time health monitoring feeding to a Consul UI and Kubernetes +- Heal_- failed ONAP containers are recreated automatically +- Scale_ - cluster ONAP services to enable seamless scaling +- Upgrade_ - change-out containers or configuration with little or no service + impact +- Delete_ - cleanup individual containers or entire deployments + +.. figure:: images/oom_logo/oomLogoV2-Deploy.png + :align: right + +Deploy +====== + +The OOM team with assistance from the ONAP project teams, have built a +comprehensive set of Helm charts, yaml files very similar to TOSCA files, that +describe the composition of each of the ONAP components and the relationship +within and between components. Using this model Helm is able to deploy all of +ONAP with a few simple commands. + +Pre-requisites +-------------- +Your environment must have the Kubernetes `kubectl` with Strimzi Apache Kafka, Cert-Manager +and Helm setup as a one time activity. + +Install Kubectl +~~~~~~~~~~~~~~~ +Enter the following to install kubectl (on Ubuntu, there are slight differences +on other O/Ss), the Kubernetes command line interface used to manage a +Kubernetes cluster:: + + > curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.19.11/bin/linux/amd64/kubectl + > chmod +x ./kubectl + > sudo mv ./kubectl /usr/local/bin/kubectl + > mkdir ~/.kube + +Paste kubectl config from Rancher (see the :ref:`cloud-setup-guide-label` for +alternative Kubernetes environment setups) into the `~/.kube/config` file. + +Verify that the Kubernetes config is correct:: + + > kubectl get pods --all-namespaces + +At this point you should see Kubernetes pods running. + +Install Helm +~~~~~~~~~~~~ +Helm is used by OOM for package and configuration management. To install Helm, +enter the following:: + + > wget https://get.helm.sh/helm-v3.6.3-linux-amd64.tar.gz + > tar -zxvf helm-v3.6.3-linux-amd64.tar.gz + > sudo mv linux-amd64/helm /usr/local/bin/helm + +Verify the Helm version with:: + + > helm version + +Install Strimzi Apache Kafka Operator +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Details on how to install Strimzi Apache Kafka can be found +:doc:`here `. + +Install Cert-Manager +~~~~~~~~~~~~~~~~~~~~ +Details on how to install Cert-Manager can be found +:doc:`here `. + +Install the Helm Repo +--------------------- +Once kubectl and Helm are setup, one needs to setup a local Helm server to +server up the ONAP charts:: + + > helm install osn/onap + +.. note:: + The osn repo is not currently available so creation of a local repository is + required. + +Helm is able to use charts served up from a repository and comes setup with a +default CNCF provided `Curated applications for Kubernetes`_ repository called +stable which should be removed to avoid confusion:: + + > helm repo remove stable + +.. To setup the Open Source Networking Nexus repository for helm enter:: +.. > helm repo add osn 'https://nexus3.onap.org:10001/helm/helm-repo-in-nexus/master/' + +To prepare your system for an installation of ONAP, you'll need to:: + + > git clone -b kohn --recurse-submodules -j2 http://gerrit.onap.org/r/oom + > cd oom/kubernetes + + +To install a local Helm server:: + + > curl -LO https://s3.amazonaws.com/chartmuseum/release/latest/bin/linux/amd64/chartmuseum + > chmod +x ./chartmuseum + > mv ./chartmuseum /usr/local/bin + +To setup a local Helm server to server up the ONAP charts:: + + > mkdir -p ~/helm3-storage + > chartmuseum --storage local --storage-local-rootdir ~/helm3-storage -port 8879 & + +Note the port number that is listed and use it in the Helm repo add as +follows:: + + > helm repo add local http://127.0.0.1:8879 + +To get a list of all of the available Helm chart repositories:: + + > helm repo list + NAME URL + local http://127.0.0.1:8879 + +Then build your local Helm repository:: + + > make SKIP_LINT=TRUE [HELM_BIN=] all + +`HELM_BIN` + Sets the helm binary to be used. The default value use helm from PATH + +The Helm search command reads through all of the repositories configured on the +system, and looks for matches:: + + > helm search repo local + NAME VERSION DESCRIPTION + local/appc 11.0.0 Application Controller + local/clamp 11.0.0 ONAP Clamp + local/common 11.0.0 Common templates for inclusion in other charts + local/onap 11.0.0 Open Network Automation Platform (ONAP) + local/robot 11.0.0 A helm Chart for kubernetes-ONAP Robot + local/so 11.0.0 ONAP Service Orchestrator + +In any case, setup of the Helm repository is a one time activity. + +Next, install Helm Plugins required to deploy the ONAP release:: + + > cp -R ~/oom/kubernetes/helm/plugins/ ~/.local/share/helm/plugins + +Once the repo is setup, installation of ONAP can be done with a single +command:: + + > helm deploy development local/onap --namespace onap --set global.masterPassword=password + +This will install ONAP from a local repository in a 'development' Helm release. +As described below, to override the default configuration values provided by +OOM, an environment file can be provided on the command line as follows:: + + + + > helm deploy development local/onap --namespace onap -f overrides.yaml --set global.masterPassword=password + +.. note:: + Refer the Configure_ section on how to update overrides.yaml and values.yaml + +To get a summary of the status of all of the pods (containers) running in your +deployment:: + + > kubectl get pods --namespace onap -o=wide + +.. note:: + The Kubernetes namespace concept allows for multiple instances of a component + (such as all of ONAP) to co-exist with other components in the same + Kubernetes cluster by isolating them entirely. Namespaces share only the + hosts that form the cluster thus providing isolation between production and + development systems as an example. + +.. note:: + The Helm `--name` option refers to a release name and not a Kubernetes namespace. + + +To install a specific version of a single ONAP component (`so` in this example) +with the given release name enter:: + + > helm deploy so onap/so --version 11.0.0 --set global.masterPassword=password --set global.flavor=unlimited --namespace onap + +.. note:: + The dependent components should be installed for component being installed + + +To display details of a specific resource or group of resources type:: + + > kubectl describe pod so-1071802958-6twbl + +where the pod identifier refers to the auto-generated pod identifier. + +.. figure:: images/oom_logo/oomLogoV2-Configure.png + :align: right + +Configure +========= + +Each project within ONAP has its own configuration data generally consisting +of: environment variables, configuration files, and database initial values. +Many technologies are used across the projects resulting in significant +operational complexity and an inability to apply global parameters across the +entire ONAP deployment. OOM solves this problem by introducing a common +configuration technology, Helm charts, that provide a hierarchical +configuration with the ability to override values with higher +level charts or command line options. + +The structure of the configuration of ONAP is shown in the following diagram. +Note that key/value pairs of a parent will always take precedence over those +of a child. Also note that values set on the command line have the highest +precedence of all. + +.. graphviz:: + + digraph config { + { + node [shape=folder] + oValues [label="values.yaml"] + demo [label="onap-demo.yaml"] + prod [label="onap-production.yaml"] + oReq [label="Chart.yaml"] + soValues [label="values.yaml"] + soReq [label="Chart.yaml"] + mdValues [label="values.yaml"] + } + { + oResources [label="resources"] + } + onap -> oResources + onap -> oValues + oResources -> environments + oResources -> oReq + oReq -> so + environments -> demo + environments -> prod + so -> soValues + so -> soReq + so -> charts + charts -> mariadb + mariadb -> mdValues + + } + +The top level onap/values.yaml file contains the values required to be set +before deploying ONAP. Here is the contents of this file: + +.. include:: ../kubernetes/onap/values.yaml + :code: yaml + +One may wish to create a value file that is specific to a given deployment such +that it can be differentiated from other deployments. For example, a +onap-development.yaml file may create a minimal environment for development +while onap-production.yaml might describe a production deployment that operates +independently of the developer version. + +For example, if the production OpenStack instance was different from a +developer's instance, the onap-production.yaml file may contain a different +value for the vnfDeployment/openstack/oam_network_cidr key as shown below. + +.. code-block:: yaml + + nsPrefix: onap + nodePortPrefix: 302 + apps: consul msb mso message-router sdnc vid robot portal policy appc aai + sdc dcaegen2 log cli multicloud clamp vnfsdk aaf kube2msb + dataRootDir: /dockerdata-nfs + + # docker repositories + repository: + onap: nexus3.onap.org:10001 + oom: oomk8s + aai: aaionap + filebeat: docker.elastic.co + + image: + pullPolicy: Never + + # vnf deployment environment + vnfDeployment: + openstack: + ubuntu_14_image: "Ubuntu_14.04.5_LTS" + public_net_id: "e8f51956-00dd-4425-af36-045716781ffc" + oam_network_id: "d4769dfb-c9e4-4f72-b3d6-1d18f4ac4ee6" + oam_subnet_id: "191f7580-acf6-4c2b-8ec0-ba7d99b3bc4e" + oam_network_cidr: "192.168.30.0/24" + <...> + + +To deploy ONAP with this environment file, enter:: + + > helm deploy local/onap -n onap -f onap/resources/environments/onap-production.yaml --set global.masterPassword=password + +.. include:: yaml/environments_onap_demo.yaml + :code: yaml + +When deploying all of ONAP, the dependencies section of the Chart.yaml file +controls which and what version of the ONAP components are included. +Here is an excerpt of this file: + +.. code-block:: yaml + + dependencies: + <...> + - name: so + version: ~11.0.0 + repository: '@local' + condition: so.enabled + <...> + +The ~ operator in the `so` version value indicates that the latest "10.X.X" +version of `so` shall be used thus allowing the chart to allow for minor +upgrades that don't impact the so API; hence, version 10.0.1 will be installed +in this case. + +The onap/resources/environment/dev.yaml (see the excerpt below) enables +for fine grained control on what components are included as part of this +deployment. By changing this `so` line to `enabled: false` the `so` component +will not be deployed. If this change is part of an upgrade the existing `so` +component will be shut down. Other `so` parameters and even `so` child values +can be modified, for example the `so`'s `liveness` probe could be disabled +(which is not recommended as this change would disable auto-healing of `so`). + +.. code-block:: yaml + + ################################################################# + # Global configuration overrides. + # + # These overrides will affect all helm charts (ie. applications) + # that are listed below and are 'enabled'. + ################################################################# + global: + <...> + + ################################################################# + # Enable/disable and configure helm charts (ie. applications) + # to customize the ONAP deployment. + ################################################################# + aaf: + enabled: false + <...> + so: # Service Orchestrator + enabled: true + + replicaCount: 1 + + liveness: + # necessary to disable liveness probe when setting breakpoints + # in debugger so K8s doesn't restart unresponsive container + enabled: true + + <...> + +Accessing the ONAP Portal using OOM and a Kubernetes Cluster +------------------------------------------------------------ + +The ONAP deployment created by OOM operates in a private IP network that isn't +publicly accessible (i.e. OpenStack VMs with private internal network) which +blocks access to the ONAP Portal. To enable direct access to this Portal from a +user's own environment (a laptop etc.) the portal application's port 8989 is +exposed through a `Kubernetes LoadBalancer`_ object. + +Typically, to be able to access the Kubernetes nodes publicly a public address +is assigned. In OpenStack this is a floating IP address. + +When the `portal-app` chart is deployed a Kubernetes service is created that +instantiates a load balancer. The LB chooses the private interface of one of +the nodes as in the example below (10.0.0.4 is private to the K8s cluster only). +Then to be able to access the portal on port 8989 from outside the K8s & +OpenStack environment, the user needs to assign/get the floating IP address that +corresponds to the private IP as follows:: + + > kubectl -n onap get services|grep "portal-app" + portal-app LoadBalancer 10.43.142.201 10.0.0.4 8989:30215/TCP,8006:30213/TCP,8010:30214/TCP 1d app=portal-app,release=dev + + +In this example, use the 11.0.0.4 private address as a key find the +corresponding public address which in this example is 10.12.6.155. If you're +using OpenStack you'll do the lookup with the horizon GUI or the OpenStack CLI +for your tenant (openstack server list). That IP is then used in your +`/etc/hosts` to map the fixed DNS aliases required by the ONAP Portal as shown +below:: + + 10.12.6.155 portal.api.simpledemo.onap.org + 10.12.6.155 vid.api.simpledemo.onap.org + 10.12.6.155 sdc.api.fe.simpledemo.onap.org + 10.12.6.155 sdc.workflow.plugin.simpledemo.onap.org + 10.12.6.155 sdc.dcae.plugin.simpledemo.onap.org + 10.12.6.155 portal-sdk.simpledemo.onap.org + 10.12.6.155 policy.api.simpledemo.onap.org + 10.12.6.155 aai.api.sparky.simpledemo.onap.org + 10.12.6.155 cli.api.simpledemo.onap.org + 10.12.6.155 msb.api.discovery.simpledemo.onap.org + 10.12.6.155 msb.api.simpledemo.onap.org + 10.12.6.155 clamp.api.simpledemo.onap.org + 10.12.6.155 so.api.simpledemo.onap.org + 10.12.6.155 sdc.workflow.plugin.simpledemo.onap.org + +Ensure you've disabled any proxy settings the browser you are using to access +the portal and then simply access now the new ssl-encrypted URL: +``https://portal.api.simpledemo.onap.org:30225/ONAPPORTAL/login.htm`` + +.. note:: + Using the HTTPS based Portal URL the Browser needs to be configured to accept + unsecure credentials. + Additionally when opening an Application inside the Portal, the Browser + might block the content, which requires to disable the blocking and reloading + of the page + +.. note:: + Besides the ONAP Portal the Components can deliver additional user interfaces, + please check the Component specific documentation. + +.. note:: + + | Alternatives Considered: + + - Kubernetes port forwarding was considered but discarded as it would + require the end user to run a script that opens up port forwarding tunnels + to each of the pods that provides a portal application widget. + + - Reverting to a VNC server similar to what was deployed in the Amsterdam + release was also considered but there were many issues with resolution, + lack of volume mount, /etc/hosts dynamic update, file upload that were + a tall order to solve in time for the Beijing release. + + Observations: + + - If you are not using floating IPs in your Kubernetes deployment and + directly attaching a public IP address (i.e. by using your public provider + network) to your K8S Node VMs' network interface, then the output of + 'kubectl -n onap get services | grep "portal-app"' + will show your public IP instead of the private network's IP. Therefore, + you can grab this public IP directly (as compared to trying to find the + floating IP first) and map this IP in /etc/hosts. + +.. figure:: images/oom_logo/oomLogoV2-Monitor.png + :align: right + +Monitor +======= + +All highly available systems include at least one facility to monitor the +health of components within the system. Such health monitors are often used as +inputs to distributed coordination systems (such as etcd, Zookeeper, or Consul) +and monitoring systems (such as Nagios or Zabbix). OOM provides two mechanisms +to monitor the real-time health of an ONAP deployment: + +- a Consul GUI for a human operator or downstream monitoring systems and + Kubernetes liveness probes that enable automatic healing of failed + containers, and +- a set of liveness probes which feed into the Kubernetes manager which + are described in the Heal section. + +Within ONAP, Consul is the monitoring system of choice and deployed by OOM in +two parts: + +- a three-way, centralized Consul server cluster is deployed as a highly + available monitor of all of the ONAP components, and +- a number of Consul agents. + +The Consul server provides a user interface that allows a user to graphically +view the current health status of all of the ONAP components for which agents +have been created - a sample from the ONAP Integration labs follows: + +.. figure:: images/consul/consulHealth.png + :align: center + +To see the real-time health of a deployment go to: ``http://:30270/ui/`` +where a GUI much like the following will be found: + +.. note:: + If Consul GUI is not accessible, you can refer this + `kubectl port-forward `_ method to access an application + +.. figure:: images/oom_logo/oomLogoV2-Heal.png + :align: right + +Heal +==== + +The ONAP deployment is defined by Helm charts as mentioned earlier. These Helm +charts are also used to implement automatic recoverability of ONAP components +when individual components fail. Once ONAP is deployed, a "liveness" probe +starts checking the health of the components after a specified startup time. + +Should a liveness probe indicate a failed container it will be terminated and a +replacement will be started in its place - containers are ephemeral. Should the +deployment specification indicate that there are one or more dependencies to +this container or component (for example a dependency on a database) the +dependency will be satisfied before the replacement container/component is +started. This mechanism ensures that, after a failure, all of the ONAP +components restart successfully. + +To test healing, the following command can be used to delete a pod:: + + > kubectl delete pod [pod name] -n [pod namespace] + +One could then use the following command to monitor the pods and observe the +pod being terminated and the service being automatically healed with the +creation of a replacement pod:: + + > kubectl get pods --all-namespaces -o=wide + +.. figure:: images/oom_logo/oomLogoV2-Scale.png + :align: right + +Scale +===== + +Many of the ONAP components are horizontally scalable which allows them to +adapt to expected offered load. During the Beijing release scaling is static, +that is during deployment or upgrade a cluster size is defined and this cluster +will be maintained even in the presence of faults. The parameter that controls +the cluster size of a given component is found in the values.yaml file for that +component. Here is an excerpt that shows this parameter: + +.. code-block:: yaml + + # default number of instances + replicaCount: 1 + +In order to change the size of a cluster, an operator could use a helm upgrade +(described in detail in the next section) as follows:: + + > helm upgrade [RELEASE] [CHART] [flags] + +The RELEASE argument can be obtained from the following command:: + + > helm list + +Below is the example for the same:: + + > helm list + NAME REVISION UPDATED STATUS CHART APP VERSION NAMESPACE + dev 1 Wed Oct 14 13:49:52 2020 DEPLOYED onap-11.0.0 Kohn onap + dev-cassandra 5 Thu Oct 15 14:45:34 2020 DEPLOYED cassandra-11.0.0 onap + dev-contrib 1 Wed Oct 14 13:52:53 2020 DEPLOYED contrib-11.0.0 onap + dev-mariadb-galera 1 Wed Oct 14 13:55:56 2020 DEPLOYED mariadb-galera-11.0.0 onap + +Here the Name column shows the RELEASE NAME, In our case we want to try the +scale operation on cassandra, thus the RELEASE NAME would be dev-cassandra. + +Now we need to obtain the chart name for cassandra. Use the below +command to get the chart name:: + + > helm search cassandra + +Below is the example for the same:: + + > helm search cassandra + NAME CHART VERSION APP VERSION DESCRIPTION + local/cassandra 11.0.0 ONAP cassandra + local/portal-cassandra 11.0.0 Portal cassandra + local/aaf-cass 11.0.0 ONAP AAF cassandra + local/sdc-cs 11.0.0 ONAP Service Design and Creation Cassandra + +Here the Name column shows the chart name. As we want to try the scale +operation for cassandra, thus the corresponding chart name is local/cassandra + + +Now we have both the command's arguments, thus we can perform the +scale operation for cassandra as follows:: + + > helm upgrade dev-cassandra local/cassandra --set replicaCount=3 + +Using this command we can scale up or scale down the cassandra db instances. + + +The ONAP components use Kubernetes provided facilities to build clustered, +highly available systems including: Services_ with load-balancers, ReplicaSet_, +and StatefulSet_. Some of the open-source projects used by the ONAP components +directly support clustered configurations, for example ODL and MariaDB Galera. + +The Kubernetes Services_ abstraction to provide a consistent access point for +each of the ONAP components, independent of the pod or container architecture +of that component. For example, SDN-C uses OpenDaylight clustering with a +default cluster size of three but uses a Kubernetes service to and change the +number of pods in this abstract this cluster from the other ONAP components +such that the cluster could change size and this change is isolated from the +other ONAP components by the load-balancer implemented in the ODL service +abstraction. + +A ReplicaSet_ is a construct that is used to describe the desired state of the +cluster. For example 'replicas: 3' indicates to Kubernetes that a cluster of 3 +instances is the desired state. Should one of the members of the cluster fail, +a new member will be automatically started to replace it. + +Some of the ONAP components many need a more deterministic deployment; for +example to enable intra-cluster communication. For these applications the +component can be deployed as a Kubernetes StatefulSet_ which will maintain a +persistent identifier for the pods and thus a stable network id for the pods. +For example: the pod names might be web-0, web-1, web-{N-1} for N 'web' pods +with corresponding DNS entries such that intra service communication is simple +even if the pods are physically distributed across multiple nodes. An example +of how these capabilities can be used is described in the Running Consul on +Kubernetes tutorial. + +.. figure:: images/oom_logo/oomLogoV2-Upgrade.png + :align: right + +Upgrade +======= + +Helm has built-in capabilities to enable the upgrade of pods without causing a +loss of the service being provided by that pod or pods (if configured as a +cluster). As described in the OOM Developer's Guide, ONAP components provide +an abstracted 'service' end point with the pods or containers providing this +service hidden from other ONAP components by a load balancer. This capability +is used during upgrades to allow a pod with a new image to be added to the +service before removing the pod with the old image. This 'make before break' +capability ensures minimal downtime. + +Prior to doing an upgrade, determine of the status of the deployed charts:: + + > helm list + NAME REVISION UPDATED STATUS CHART NAMESPACE + so 1 Mon Feb 5 10:05:22 2020 DEPLOYED so-11.0.0 onap + +When upgrading a cluster a parameter controls the minimum size of the cluster +during the upgrade while another parameter controls the maximum number of nodes +in the cluster. For example, SNDC configured as a 3-way ODL cluster might +require that during the upgrade no fewer than 2 pods are available at all times +to provide service while no more than 5 pods are ever deployed across the two +versions at any one time to avoid depleting the cluster of resources. In this +scenario, the SDNC cluster would start with 3 old pods then Kubernetes may add +a new pod (3 old, 1 new), delete one old (2 old, 1 new), add two new pods (2 +old, 3 new) and finally delete the 2 old pods (3 new). During this sequence +the constraints of the minimum of two pods and maximum of five would be +maintained while providing service the whole time. + +Initiation of an upgrade is triggered by changes in the Helm charts. For +example, if the image specified for one of the pods in the SDNC deployment +specification were to change (i.e. point to a new Docker image in the nexus3 +repository - commonly through the change of a deployment variable), the +sequence of events described in the previous paragraph would be initiated. + +For example, to upgrade a container by changing configuration, specifically an +environment value:: + + > helm upgrade so onap/so --version 11.0.1 --set enableDebug=true + +Issuing this command will result in the appropriate container being stopped by +Kubernetes and replaced with a new container with the new environment value. + +To upgrade a component to a new version with a new configuration file enter:: + + > helm upgrade so onap/so --version 11.0.1 -f environments/demo.yaml + +To fetch release history enter:: + + > helm history so + REVISION UPDATED STATUS CHART DESCRIPTION + 1 Mon Jul 5 10:05:22 2022 SUPERSEDED so-11.0.0 Install complete + 2 Mon Jul 5 10:10:55 2022 DEPLOYED so-11.0.1 Upgrade complete + +Unfortunately, not all upgrades are successful. In recognition of this the +lineup of pods within an ONAP deployment is tagged such that an administrator +may force the ONAP deployment back to the previously tagged configuration or to +a specific configuration, say to jump back two steps if an incompatibility +between two ONAP components is discovered after the two individual upgrades +succeeded. + +This rollback functionality gives the administrator confidence that in the +unfortunate circumstance of a failed upgrade the system can be rapidly brought +back to a known good state. This process of rolling upgrades while under +service is illustrated in this short YouTube video showing a Zero Downtime +Upgrade of a web application while under a 10 million transaction per second +load. + +For example, to roll-back back to previous system revision enter:: + + > helm rollback so 1 + + > helm history so + REVISION UPDATED STATUS CHART DESCRIPTION + 1 Mon Jul 5 10:05:22 2022 SUPERSEDED so-11.0.0 Install complete + 2 Mon Jul 5 10:10:55 2022 SUPERSEDED so-11.0.1 Upgrade complete + 3 Mon Jul 5 10:14:32 2022 DEPLOYED so-11.0.0 Rollback to 1 + +.. note:: + + The description field can be overridden to document actions taken or include + tracking numbers. + +Many of the ONAP components contain their own databases which are used to +record configuration or state information. The schemas of these databases may +change from version to version in such a way that data stored within the +database needs to be migrated between versions. If such a migration script is +available it can be invoked during the upgrade (or rollback) by Container +Lifecycle Hooks. Two such hooks are available, PostStart and PreStop, which +containers can access by registering a handler against one or both. Note that +it is the responsibility of the ONAP component owners to implement the hook +handlers - which could be a shell script or a call to a specific container HTTP +endpoint - following the guidelines listed on the Kubernetes site. Lifecycle +hooks are not restricted to database migration or even upgrades but can be used +anywhere specific operations need to be taken during lifecycle operations. + +OOM uses Helm K8S package manager to deploy ONAP components. Each component is +arranged in a packaging format called a chart - a collection of files that +describe a set of k8s resources. Helm allows for rolling upgrades of the ONAP +component deployed. To upgrade a component Helm release you will need an +updated Helm chart. The chart might have modified, deleted or added values, +deployment yamls, and more. To get the release name use:: + + > helm ls + +To easily upgrade the release use:: + + > helm upgrade [RELEASE] [CHART] + +To roll back to a previous release version use:: + + > helm rollback [flags] [RELEASE] [REVISION] + +For example, to upgrade the onap-so helm release to the latest SO container +release v1.1.2: + +- Edit so values.yaml which is part of the chart +- Change "so: nexus3.onap.org:10001/openecomp/so:v1.1.1" to + "so: nexus3.onap.org:10001/openecomp/so:v1.1.2" +- From the chart location run:: + + > helm upgrade onap-so + +The previous so pod will be terminated and a new so pod with an updated so +container will be created. + +.. figure:: images/oom_logo/oomLogoV2-Delete.png + :align: right + +Delete +====== + +Existing deployments can be partially or fully removed once they are no longer +needed. To minimize errors it is recommended that before deleting components +from a running deployment the operator perform a 'dry-run' to display exactly +what will happen with a given command prior to actually deleting anything. +For example:: + + > helm undeploy onap --dry-run + +will display the outcome of deleting the 'onap' release from the +deployment. +To completely delete a release and remove it from the internal store enter:: + + > helm undeploy onap + +Once complete undeploy is done then delete the namespace as well +using following command:: + + > kubectl delete namespace + +.. note:: + You need to provide the namespace name which you used during deployment, + below is the example:: + + > kubectl delete namespace onap + +One can also remove individual components from a deployment by changing the +ONAP configuration values. For example, to remove `so` from a running +deployment enter:: + + > helm undeploy onap-so + +will remove `so` as the configuration indicates it's no longer part of the +deployment. This might be useful if a one wanted to replace just `so` by +installing a custom version. diff --git a/docs/archived/shell/master_nfs_node.sh b/docs/archived/shell/master_nfs_node.sh new file mode 100644 index 0000000000..32574c9f29 --- /dev/null +++ b/docs/archived/shell/master_nfs_node.sh @@ -0,0 +1,32 @@ +#!/bin/sh + +usage () { + echo "Usage:" + echo " ./$(basename $0) node1_ip node2_ip ... nodeN_ip" + exit 1 +} + +if [ "$#" -lt 1 ]; then + echo "Missing NFS slave nodes" + usage +fi + +#Install NFS kernel +sudo apt-get update +sudo apt-get install -y nfs-kernel-server + +#Create /dockerdata-nfs and set permissions +sudo mkdir -p /dockerdata-nfs +sudo chmod 777 -R /dockerdata-nfs +sudo chown nobody:nogroup /dockerdata-nfs/ + +#Update the /etc/exports +NFS_EXP="" +for i in $@; do + NFS_EXP="${NFS_EXP}$i(rw,sync,no_root_squash,no_subtree_check) " +done +echo "/dockerdata-nfs "$NFS_EXP | sudo tee -a /etc/exports + +#Restart the NFS service +sudo exportfs -a +sudo systemctl restart nfs-kernel-server diff --git a/docs/archived/shell/openstack-k8s-controlnode.sh b/docs/archived/shell/openstack-k8s-controlnode.sh new file mode 100644 index 0000000000..d1515a7e5f --- /dev/null +++ b/docs/archived/shell/openstack-k8s-controlnode.sh @@ -0,0 +1,36 @@ +#!/bin/sh + +DOCKER_VERSION=18.09.5 + +apt-get update + +curl https://releases.rancher.com/install-docker/$DOCKER_VERSION.sh | sh +mkdir -p /etc/systemd/system/docker.service.d/ +cat > /etc/systemd/system/docker.service.d/docker.conf << EOF +[Service] +ExecStart= +ExecStart=/usr/bin/dockerd -H fd:// --insecure-registry=nexus3.onap.org:10001 +EOF + +sudo usermod -aG docker ubuntu + +systemctl daemon-reload +systemctl restart docker +apt-mark hold docker-ce + +IP_ADDR=$(ip address |grep ens|grep inet|awk '{print $2}'| awk -F / '{print $1}') +HOST_NAME=$(hostname) + +echo "$IP_ADDR $HOST_NAME" >> /etc/hosts + +docker login -u docker -p docker nexus3.onap.org:10001 + +sudo apt-get install make -y + +#nfs server +sudo apt-get install nfs-kernel-server -y +sudo mkdir -p /dockerdata-nfs +sudo chown nobody:nogroup /dockerdata-nfs/ + + +exit 0 diff --git a/docs/archived/shell/openstack-k8s-workernode.sh b/docs/archived/shell/openstack-k8s-workernode.sh new file mode 100644 index 0000000000..8b1b9e41ee --- /dev/null +++ b/docs/archived/shell/openstack-k8s-workernode.sh @@ -0,0 +1,34 @@ +#!/bin/sh + +DOCKER_VERSION=18.09.5 + +apt-get update + +curl https://releases.rancher.com/install-docker/$DOCKER_VERSION.sh | sh +mkdir -p /etc/systemd/system/docker.service.d/ +cat > /etc/systemd/system/docker.service.d/docker.conf << EOF +[Service] +ExecStart= +ExecStart=/usr/bin/dockerd -H fd:// --insecure-registry=nexus3.onap.org:10001 +EOF + +sudo usermod -aG docker ubuntu + +systemctl daemon-reload +systemctl restart docker +apt-mark hold docker-ce + +IP_ADDR=$(ip address |grep ens|grep inet|awk '{print $2}'| awk -F / '{print $1}') +HOST_NAME=$(hostname) + +echo "$IP_ADDR $HOST_NAME" >> /etc/hosts + +docker login -u docker -p docker nexus3.onap.org:10001 + +sudo apt-get install make -y + +# install nfs +sudo apt-get install nfs-common -y + + +exit 0 diff --git a/docs/archived/shell/openstack-nfs-server.sh b/docs/archived/shell/openstack-nfs-server.sh new file mode 100644 index 0000000000..395d04f27c --- /dev/null +++ b/docs/archived/shell/openstack-nfs-server.sh @@ -0,0 +1,18 @@ +#!/bin/sh + +apt-get update + +IP_ADDR=$(ip address |grep ens|grep inet|awk '{print $2}'| awk -F / '{print $1}') +HOST_NAME=$(hostname) + +echo "$IP_ADDR $HOST_NAME" >> /etc/hosts + +sudo apt-get install make -y + +# nfs server +sudo apt-get install nfs-kernel-server -y + +sudo mkdir -p /nfs_share +sudo chown nobody:nogroup /nfs_share/ + +exit 0 diff --git a/docs/archived/shell/slave_nfs_node.sh b/docs/archived/shell/slave_nfs_node.sh new file mode 100644 index 0000000000..1035ff5ad6 --- /dev/null +++ b/docs/archived/shell/slave_nfs_node.sh @@ -0,0 +1,25 @@ +#!/bin/sh + +usage () { + echo "Usage:" + echo " ./$(basename $0) nfs_master_ip" + exit 1 +} + +if [ "$#" -ne 1 ]; then + echo "Missing NFS mater node" + usage +fi + +MASTER_IP=$1 + +#Install NFS common +sudo apt-get update +sudo apt-get install -y nfs-common + +#Create NFS directory +sudo mkdir -p /dockerdata-nfs + +#Mount the remote NFS directory to the local one +sudo mount $MASTER_IP:/dockerdata-nfs /dockerdata-nfs/ +echo "$MASTER_IP:/dockerdata-nfs /dockerdata-nfs nfs auto,nofail,noatime,nolock,intr,tcp,actimeo=1800 0 0" | sudo tee -a /etc/fstab diff --git a/docs/archived/yaml/cluster.yml b/docs/archived/yaml/cluster.yml new file mode 100644 index 0000000000..0757e15a28 --- /dev/null +++ b/docs/archived/yaml/cluster.yml @@ -0,0 +1,156 @@ +# An example of an HA Kubernetes cluster for ONAP +nodes: +- address: 10.12.6.85 + port: "22" + internal_address: 10.0.0.8 + role: + - controlplane + - etcd + hostname_override: "onap-control-1" + user: ubuntu + ssh_key_path: "~/.ssh/onap-key" +- address: 10.12.6.90 + port: "22" + internal_address: 10.0.0.11 + role: + - controlplane + - etcd + hostname_override: "onap-control-2" + user: ubuntu + ssh_key_path: "~/.ssh/onap-key" +- address: 10.12.6.89 + port: "22" + internal_address: 10.0.0.12 + role: + - controlplane + - etcd + hostname_override: "onap-control-3" + user: ubuntu + ssh_key_path: "~/.ssh/onap-key" +- address: 10.12.5.165 + port: "22" + internal_address: 10.0.0.14 + role: + - worker + hostname_override: "onap-k8s-1" + user: ubuntu + ssh_key_path: "~/.ssh/onap-key" +- address: 10.12.6.238 + port: "22" + internal_address: 10.0.0.26 + role: + - worker + hostname_override: "onap-k8s-2" + user: ubuntu + ssh_key_path: "~/.ssh/onap-key" +- address: 10.12.6.126 + port: "22" + internal_address: 10.0.0.5 + role: + - worker + hostname_override: "onap-k8s-3" + user: ubuntu + ssh_key_path: "~/.ssh/onap-key" +- address: 10.12.5.11 + port: "22" + internal_address: 10.0.0.6 + role: + - worker + hostname_override: "onap-k8s-4" + user: ubuntu + ssh_key_path: "~/.ssh/onap-key" +- address: 10.12.6.244 + port: "22" + internal_address: 10.0.0.9 + role: + - worker + hostname_override: "onap-k8s-5" + user: ubuntu + ssh_key_path: "~/.ssh/onap-key" +- address: 10.12.6.249 + port: "22" + internal_address: 10.0.0.17 + role: + - worker + hostname_override: "onap-k8s-6" + user: ubuntu + ssh_key_path: "~/.ssh/onap-key" +- address: 10.12.5.191 + port: "22" + internal_address: 10.0.0.20 + role: + - worker + hostname_override: "onap-k8s-7" + user: ubuntu + ssh_key_path: "~/.ssh/onap-key" +- address: 10.12.6.111 + port: "22" + internal_address: 10.0.0.10 + role: + - worker + hostname_override: "onap-k8s-8" + user: ubuntu + ssh_key_path: "~/.ssh/onap-key" +- address: 10.12.6.195 + port: "22" + internal_address: 10.0.0.4 + role: + - worker + hostname_override: "onap-k8s-9" + user: ubuntu + ssh_key_path: "~/.ssh/onap-key" +- address: 10.12.5.160 + port: "22" + internal_address: 10.0.0.16 + role: + - worker + hostname_override: "onap-k8s-10" + user: ubuntu + ssh_key_path: "~/.ssh/onap-key" +- address: 10.12.6.74 + port: "22" + internal_address: 10.0.0.18 + role: + - worker + hostname_override: "onap-k8s-11" + user: ubuntu + ssh_key_path: "~/.ssh/onap-key" +- address: 10.12.6.82 + port: "22" + internal_address: 10.0.0.7 + role: + - worker + hostname_override: "onap-k8s-12" + user: ubuntu + ssh_key_path: "~/.ssh/onap-key" +services: + kube-api: + service_cluster_ip_range: 10.43.0.0/16 + pod_security_policy: false + always_pull_images: false + kube-controller: + cluster_cidr: 10.42.0.0/16 + service_cluster_ip_range: 10.43.0.0/16 + kubelet: + cluster_domain: cluster.local + cluster_dns_server: 10.43.0.10 + fail_swap_on: false +network: + plugin: canal +authentication: + strategy: x509 +ssh_key_path: "~/.ssh/onap-key" +ssh_agent_auth: false +authorization: + mode: rbac +ignore_docker_version: false +kubernetes_version: "v1.15.11-rancher1-2" +private_registries: +- url: nexus3.onap.org:10001 + user: docker + password: docker + is_default: true +cluster_name: "onap" +restore: + restore: false + snapshot_name: "" diff --git a/docs/archived/yaml/example-integration-override-v3.yaml b/docs/archived/yaml/example-integration-override-v3.yaml new file mode 100644 index 0000000000..a55b1c08fc --- /dev/null +++ b/docs/archived/yaml/example-integration-override-v3.yaml @@ -0,0 +1,69 @@ +################################################################# +# This override file configures openstack parameters for ONAP +################################################################# +robot: + enabled: true + flavor: large + appcUsername: "appc@appc.onap.org" + appcPassword: "demo123456!" + # KEYSTONE Version 3 Required for Rocky and beyond + openStackKeystoneAPIVersion: "v3" + # OS_AUTH_URL without the /v3 from the openstack .RC file + openStackKeyStoneUrl: "http://10.12.25.2:5000" + # tenantID=`openstack project show $tenantName | grep -w id | awk '{print $4}'` + # where "tenantName" is OS_PROJECT_NAME from openstack .RC file + openStackTenantId: "09d8566ea45e43aa974cf447ed591d77" + # OS_USERNAME from the openstack .RC file + openStackUserName: "OS_USERNAME_HERE" + # OS_PROJECT_DOMAIN_ID from the openstack .RC file + # in some environments it is a string but in other environmens it may be a numeric + openStackDomainId: "default" + # OS_USER_DOMAIN_NAME from the openstack .RC file + openStackUserDomain: "Default" + openStackProjectName: "OPENSTACK_PROJECT_NAME_HERE" + ubuntu14Image: "ubuntu-14-04-cloud-amd64" + ubuntu16Image: "ubuntu-16-04-cloud-amd64" + # From openstack network list output + openStackPublicNetId: "971040b2-7059-49dc-b220-4fab50cb2ad4" + # From openstack network list output + openStackPrivateNetId: "83c84b68-80be-4990-8d7f-0220e3c6e5c8" + # From openstack network list output + openStackPrivateSubnetId: "e571c1d1-8ac0-4744-9b40-c3218d0a53a0" + openStackPrivateNetCidr: "10.0.0.0/16" + openStackOamNetworkCidrPrefix: "10.0" + # From openstack security group list output + openStackSecurityGroup: "bbe028dc-b64f-4f11-a10f-5c6d8d26dc89" + dcaeCollectorIp: "10.12.6.109" + # SSH public key + vnfPubKey: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDKXDgoo3+WOqcUG8/5uUbk81+yczgwC4Y8ywTmuQqbNxlY1oQ0YxdMUqUnhitSXs5S/yRuAVOYHwGg2mCs20oAINrP+mxBI544AMIb9itPjCtgqtE2EWo6MmnFGbHB4Sx3XioE7F4VPsh7japsIwzOjbrQe+Mua1TGQ5d4nfEOQaaglXLLPFfuc7WbhbJbK6Q7rHqZfRcOwAMXgDoBqlyqKeiKwnumddo2RyNT8ljYmvB6buz7KnMinzo7qB0uktVT05FH9Rg0CTWH5norlG5qXgP2aukL0gk1ph8iAt7uYLf1ktp+LJI2gaF6L0/qli9EmVCSLr1uJ38Q8CBflhkh" + demoArtifactsVersion: "1.4.0" + demoArtifactsRepoUrl: "https://nexus.onap.org/content/repositories/releases" + scriptVersion: "1.4.0" + # rancher node IP where RKE configired + rancherIpAddress: "10.12.6.160" + config: + # use the python utility to encrypt the OS_PASSWORD for the OS_USERNAME + openStackEncryptedPasswordHere: "XXXXXXXXXXXXXXXXXXXXXXXX_OPENSTACK_PYTHON_PASSWORD_HERE_XXXXXXXXXXXXXXXX" + openStackSoEncryptedPassword: "YYYYYYYYYYYYYYYYYYYYYYYY_OPENSTACK_JAVA_PASSWORD_HERE_YYYYYYYYYYYYYYYY" +so: + enabled: true + so-catalog-db-adapter: + config: + openStackUserName: "OS_USERNAME_HERE" + # OS_AUTH_URL (keep the /v3) from the openstack .RC file + openStackKeyStoneUrl: "http://10.12.25.2:5000/v3" + # use the SO Java utility to encrypt the OS_PASSWORD for the OS_USERNAME + openStackEncryptedPasswordHere: "YYYYYYYYYYYYYYYYYYYYYYYY_OPENSTACK_JAVA_PASSWORD_HERE_YYYYYYYYYYYYYYYY" +appc: + enabled: true + replicaCount: 3 + config: + enableClustering: true + openStackType: "OpenStackProvider" + openStackName: "OpenStack" + # OS_AUTH_URL from the openstack .RC file + openStackKeyStoneUrl: "http://10.12.25.2:5000/v3" + openStackServiceTenantName: "OPENSTACK_PROJECT_NAME_HERE" + openStackDomain: "OPEN_STACK_DOMAIN_NAME_HERE" + openStackUserName: "OS_USER_NAME_HERE" + openStackEncryptedPassword: "OPENSTACK_CLEAR_TEXT_PASSWORD_HERE" diff --git a/docs/archived/yaml/example-integration-override.yaml b/docs/archived/yaml/example-integration-override.yaml new file mode 100644 index 0000000000..5eeee5e2f5 --- /dev/null +++ b/docs/archived/yaml/example-integration-override.yaml @@ -0,0 +1,56 @@ +################################################################# +# This override file configures openstack parameters for ONAP +################################################################# +appc: + config: + enableClustering: false + openStackType: "OpenStackProvider" + openStackName: "OpenStack" + # OS_AUTH_URL from the openstack .RC file + openStackKeyStoneUrl: "http://10.12.25.2:5000/v2.0" + openStackServiceTenantName: "OPENSTACK_TENANTNAME_HERE" + # OS_USER_DOMAIN_NAME from the openstack .RC file + openStackDomain: "Default" + openStackUserName: "OPENSTACK_USERNAME_HERE" + openStackEncryptedPassword: "XXXXXXXXXXXXXXXXXXXXXXXX_OPENSTACK_PASSWORD_HERE_XXXXXXXXXXXXXXXX" +robot: + appcUsername: "appc@appc.onap.org" + appcPassword: "demo123456!" + # OS_AUTH_URL without the /v2.0 from the openstack .RC file + openStackKeyStoneUrl: "http://10.12.25.2:5000" + # From openstack network list output + openStackPublicNetId: "971040b2-7059-49dc-b220-4fab50cb2ad4" + # tenantID=`openstack project show $tenantName | grep -w id | awk '{print $4}'` + # where "tenantName" is OS_PROJECT_NAME from openstack .RC file + openStackTenantId: "09d8566ea45e43aa974cf447ed591d77" + openStackUserName: "OPENSTACK_USERNAME_HERE" + ubuntu14Image: "ubuntu-14-04-cloud-amd64" + ubuntu16Image: "ubuntu-16-04-cloud-amd64" + # From openstack network list output + openStackPrivateNetId: "c7824f00-bef7-4864-81b9-f6c3afabd313" + # From openstack network list output + openStackPrivateSubnetId: "2a0e8888-f93e-4615-8d28-fc3d4d087fc3" + openStackPrivateNetCidr: "10.0.0.0/16" + # From openstack security group list output + openStackSecurityGroup: "3a7a1e7e-6d15-4264-835d-fab1ae81e8b0" + openStackOamNetworkCidrPrefix: "10.0" + # Control node IP + dcaeCollectorIp: "10.12.6.88" + # SSH public key + vnfPubKey: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDKXDgoo3+WOqcUG8/5uUbk81+yczgwC4Y8ywTmuQqbNxlY1oQ0YxdMUqUnhitSXs5S/yRuAVOYHwGg2mCs20oAINrP+mxBI544AMIb9itPjCtgqtE2EWo6MmnFGbHB4Sx3XioE7F4VPsh7japsIwzOjbrQe+Mua1TGQ5d4nfEOQaaglXLLPFfuc7WbhbJbK6Q7rHqZfRcOwAMXgDoBqlyqKeiKwnumddo2RyNT8ljYmvB6buz7KnMinzo7qB0uktVT05FH9Rg0CTWH5norlG5qXgP2aukL0gk1ph8iAt7uYLf1ktp+LJI2gaF6L0/qli9EmVCSLr1uJ38Q8CBflhkh" + demoArtifactsVersion: "1.4.0-SNAPSHOT" + demoArtifactsRepoUrl: "https://nexus.onap.org/content/repositories/releases" + scriptVersion: "1.4.0-SNAPSHOT" + # rancher node IP where RKE configired + rancherIpAddress: "10.12.5.127" + config: + # openStackEncryptedPasswordHere should match the encrypted string used in SO and APPC and overridden per environment + openStackEncryptedPasswordHere: "XXXXXXXXXXXXXXXXXXXXXXXX_OPENSTACK_ENCRYPTED_PASSWORD_HERE_XXXXXXXXXXXXXXXX" +so: + # so server configuration + so-catalog-db-adapter: + config: + openStackUserName: "OPENSTACK_USERNAME_HERE" + # OS_AUTH_URL from the openstack .RC file + openStackKeyStoneUrl: "http://10.12.25.2:5000/v2.0" + openStackEncryptedPasswordHere: "XXXXXXXXXXXXXXXXXXXXXXXX_OPENSTACK_ENCRYPTED_PASSWORD_HERE_XXXXXXXXXXXXXXXX" diff --git a/docs/certs/hardcoded_certificates.csv b/docs/certs/hardcoded_certificates.csv deleted file mode 100644 index fbc7db3e11..0000000000 --- a/docs/certs/hardcoded_certificates.csv +++ /dev/null @@ -1,18 +0,0 @@ -Project,ONAP Certificate,Own Certificate,MSB Certificate,Path -AAF,No,Yes,No,aaf/charts/aaf-cert-service/resources/ -AAF,Yes,No,No,aaf/components/aaf-sms/resources/certs/intermediate_root_ca.pem -AAI,Yes,No,No,aai/oom/resources/config/aai/aai_keystore -AAI/SEARCH-DATA,Yes,No,No,aai/oom/components/aai-search-data/resources/config/auth/tomcat_keystore -AAI/SPARKY-BE,Yes,No,No,aai/oom/components/aai-spary-be/resources/config/auth/org.onap.aai.p12 -AAI/BABEL,No,Yes,No,aai/oom/components/aai-babel/resources/config/auth/tomcat_keystore -AAI/MODEL-LOADER,Yes,Yes,No,aai/oom/components/aai-model-loaderresources/config/auth/tomcat_keystore -APPC,Yes,No,No,kubernetes/appc/resources/config/certs/org.onap.appc.keyfile -APPC,Yes,No,No,kubernetes/appc/resources/config/certs/org.onap.appc.p12 -certInitializer,Yes,No,No,kubernetes/common/certInitializer/resources -DMaaP/MR,Yes,No,No,Hardcoded in container -HOLMES,No,Yes,No,Hardcoded in container -MULTICLOUD,No,Yes,No,Hardcoded in container -Robot,Yes,No,No,kubernetes/robot/resources/config/lighttpd/ssl/onap-robot.onap.* -SDC,Yes,No?,No?,kubernetes/sdc/resources/cert -VID,Yes,No,No,Hardcoded in container -UUI,No,Yes,No,Hardcoded in container diff --git a/docs/conf.py b/docs/conf.py index 3b28eb74a8..6e3da64513 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -1,15 +1,58 @@ -from docs_conf.conf import * +project = "onap" +release = "master" +version = "master" -branch = 'latest' -master_doc = 'index' +author = "Open Network Automation Platform" +# yamllint disable-line rule:line-length +copyright = "ONAP. Licensed under Creative Commons Attribution 4.0 International License" -linkcheck_ignore = [ - 'http://localhost', +pygments_style = "sphinx" +html_theme = "sphinx_rtd_theme" +html_theme_options = { + "style_nav_header_background": "white", + "sticky_navigation": "False" } +html_logo = "_static/logo_onap_2017.png" +html_favicon = "_static/favicon.ico" +html_static_path = ["_static"] +html_show_sphinx = False + +extensions = [ + 'sphinx.ext.intersphinx', + 'sphinx.ext.graphviz', + 'sphinxcontrib.blockdiag', + 'sphinxcontrib.seqdiag', + 'sphinxcontrib.swaggerdoc', + 'sphinxcontrib.plantuml', + 'sphinx_toolbox.collapse' ] +exclude_patterns = ['archived/*.rst', '.tox'] +# +# Map to 'latest' if this file is used in 'latest' (master) 'doc' branch. +# Change to {releasename} after you have created the new 'doc' branch. +# + +branch = 'latest' + intersphinx_mapping = {} +doc_url = 'https://docs.onap.org/projects' +master_doc = 'index' + +spelling_word_list_filename='spelling_wordlist.txt' +spelling_lang = "en_GB" + +# +# Example: +# intersphinx_mapping['onap-aai-aai-common'] = ('{}/onap-aai-aai-common/en/%s'.format(doc_url) % branch, None) +# html_last_updated_fmt = '%d-%b-%y %H:%M' + def setup(app): - app.add_css_file("css/ribbon_onap.css") + app.add_css_file("css/ribbon.css") + + +linkcheck_ignore = [ + r'http://localhost:\d+/' +] \ No newline at end of file diff --git a/docs/helm/helm-search.txt b/docs/helm/helm-search.txt deleted file mode 100644 index 75c274957b..0000000000 --- a/docs/helm/helm-search.txt +++ /dev/null @@ -1,42 +0,0 @@ -NAME CHART VERSION APP VERSION DESCRIPTION -local/onap 11.0.0 Kohn Open Network Automation Platform (ONAP) -local/aaf 11.0.0 ONAP Application Authorization Framework -local/aai 11.0.0 ONAP Active and Available Inventory -local/appc 11.0.0 Application Controller -local/cassandra 11.0.0 ONAP cassandra -local/cds 11.0.0 ONAP Controller Design Studio (CDS) -local/clamp 11.0.0 ONAP Clamp -local/cli 11.0.0 ONAP Command Line Interface -local/common 11.0.0 Common templates for inclusion in other charts -local/consul 11.0.0 ONAP Consul Agent -local/contrib 11.0.0 ONAP optional tools -local/cps 11.0.0 ONAP Configuration Persistene Service (CPS) -local/dcaegen2 11.0.0 ONAP DCAE Gen2 -local/dgbuilder 11.0.0 D.G. Builder application -local/dmaap 11.0.0 ONAP DMaaP components -local/log 11.0.0 ONAP Logging ElasticStack -local/mariadb-galera 11.0.0 Chart for MariaDB Galera cluster -local/mongo 11.0.0 MongoDB Server -local/msb 11.0.0 ONAP MicroServices Bus -local/multicloud 11.0.0 ONAP multicloud broker -local/music 11.0.0 MUSIC - Multi-site State Coordination Service -local/mysql 11.0.0 MySQL Server -local/nbi 11.0.0 ONAP Northbound Interface -local/network-name-gen 11.0.0 Name Generation Micro Service -local/nfs-provisioner 11.0.0 NFS provisioner -local/oof 11.0.0 ONAP Optimization Framework -local/policy 11.0.0 ONAP Policy Administration Point -local/pomba 11.0.0 ONAP Post Orchestration Model Based Audit -local/portal 11.0.0 ONAP Web Portal -local/postgres 11.0.0 ONAP Postgres Server -local/robot 11.0.0 A helm Chart for kubernetes-ONAP Robot -local/sdc 11.0.0 Service Design and Creation Umbrella Helm charts -local/sdnc 11.0.0 SDN Controller -local/sdnc-prom 11.0.0 ONAP SDNC Policy Driven Ownership Management -local/sniro-emulator 11.0.0 ONAP Mock Sniro Emulator -local/so 11.0.0 ONAP Service Orchestrator -local/strimzi 11.0.0 ONAP Strimzi Apache Kafka -local/uui 11.0.0 ONAP uui -local/vfc 11.0.0 ONAP Virtual Function Controller (VF-C) -local/vid 11.0.0 ONAP Virtual Infrastructure Deployment -local/vnfsdk 11.0.0 ONAP VNF SDK diff --git a/docs/images/consul/consulHealth.png b/docs/images/consul/consulHealth.png deleted file mode 100644 index cd7e730c39..0000000000 Binary files a/docs/images/consul/consulHealth.png and /dev/null differ diff --git a/docs/images/cp_vms/control_plane_1.png b/docs/images/cp_vms/control_plane_1.png deleted file mode 100644 index d59b9863b7..0000000000 Binary files a/docs/images/cp_vms/control_plane_1.png and /dev/null differ diff --git a/docs/images/cp_vms/control_plane_2.png b/docs/images/cp_vms/control_plane_2.png deleted file mode 100644 index 9a7d72f8a5..0000000000 Binary files a/docs/images/cp_vms/control_plane_2.png and /dev/null differ diff --git a/docs/images/cp_vms/control_plane_3.png b/docs/images/cp_vms/control_plane_3.png deleted file mode 100644 index da329f20b5..0000000000 Binary files a/docs/images/cp_vms/control_plane_3.png and /dev/null differ diff --git a/docs/images/cp_vms/control_plane_4.png b/docs/images/cp_vms/control_plane_4.png deleted file mode 100644 index 817355a99e..0000000000 Binary files a/docs/images/cp_vms/control_plane_4.png and /dev/null differ diff --git a/docs/images/cp_vms/control_plane_5.png b/docs/images/cp_vms/control_plane_5.png deleted file mode 100644 index 33805c50dd..0000000000 Binary files a/docs/images/cp_vms/control_plane_5.png and /dev/null differ diff --git a/docs/images/cp_vms/control_plane_6.png b/docs/images/cp_vms/control_plane_6.png deleted file mode 100644 index 9e8ab638bc..0000000000 Binary files a/docs/images/cp_vms/control_plane_6.png and /dev/null differ diff --git a/docs/images/cp_vms/control_plane_7.png b/docs/images/cp_vms/control_plane_7.png deleted file mode 100644 index f0db6d3f3f..0000000000 Binary files a/docs/images/cp_vms/control_plane_7.png and /dev/null differ diff --git a/docs/images/cp_vms/control_plane_8.png b/docs/images/cp_vms/control_plane_8.png deleted file mode 100644 index e20f631e60..0000000000 Binary files a/docs/images/cp_vms/control_plane_8.png and /dev/null differ diff --git a/docs/images/floating_ips/floating_1.png b/docs/images/floating_ips/floating_1.png deleted file mode 100644 index 9f413164ab..0000000000 Binary files a/docs/images/floating_ips/floating_1.png and /dev/null differ diff --git a/docs/images/floating_ips/floating_2.png b/docs/images/floating_ips/floating_2.png deleted file mode 100644 index 0001ef068c..0000000000 Binary files a/docs/images/floating_ips/floating_2.png and /dev/null differ diff --git a/docs/images/k8s/kubernetes_objects.png b/docs/images/k8s/kubernetes_objects.png deleted file mode 100644 index 768a3adb99..0000000000 Binary files a/docs/images/k8s/kubernetes_objects.png and /dev/null differ diff --git a/docs/images/keys/key_pair_1.png b/docs/images/keys/key_pair_1.png deleted file mode 100644 index 1135c93320..0000000000 Binary files a/docs/images/keys/key_pair_1.png and /dev/null differ diff --git a/docs/images/keys/key_pair_2.png b/docs/images/keys/key_pair_2.png deleted file mode 100644 index ac3bfc5ca2..0000000000 Binary files a/docs/images/keys/key_pair_2.png and /dev/null differ diff --git a/docs/images/keys/key_pair_3.png b/docs/images/keys/key_pair_3.png deleted file mode 100644 index 1e0c0200f8..0000000000 Binary files a/docs/images/keys/key_pair_3.png and /dev/null differ diff --git a/docs/images/keys/key_pair_4.png b/docs/images/keys/key_pair_4.png deleted file mode 100644 index 031a9ba785..0000000000 Binary files a/docs/images/keys/key_pair_4.png and /dev/null differ diff --git a/docs/images/msb/MSB-OOM-Diagram.png b/docs/images/msb/MSB-OOM-Diagram.png deleted file mode 100644 index 4ee878d833..0000000000 Binary files a/docs/images/msb/MSB-OOM-Diagram.png and /dev/null differ diff --git a/docs/images/network/network_1.png b/docs/images/network/network_1.png deleted file mode 100644 index d51cb1280b..0000000000 Binary files a/docs/images/network/network_1.png and /dev/null differ diff --git a/docs/images/network/network_2.png b/docs/images/network/network_2.png deleted file mode 100644 index 9498a460d3..0000000000 Binary files a/docs/images/network/network_2.png and /dev/null differ diff --git a/docs/images/network/network_3.png b/docs/images/network/network_3.png deleted file mode 100644 index c729405aef..0000000000 Binary files a/docs/images/network/network_3.png and /dev/null differ diff --git a/docs/images/network/network_4.png b/docs/images/network/network_4.png deleted file mode 100644 index cc8f96fac0..0000000000 Binary files a/docs/images/network/network_4.png and /dev/null differ diff --git a/docs/images/nfs_server/nfs_server_1.png b/docs/images/nfs_server/nfs_server_1.png deleted file mode 100644 index 912a10f055..0000000000 Binary files a/docs/images/nfs_server/nfs_server_1.png and /dev/null differ diff --git a/docs/images/nfs_server/nfs_server_10.png b/docs/images/nfs_server/nfs_server_10.png deleted file mode 100644 index 7d87d1ca56..0000000000 Binary files a/docs/images/nfs_server/nfs_server_10.png and /dev/null differ diff --git a/docs/images/nfs_server/nfs_server_2.png b/docs/images/nfs_server/nfs_server_2.png deleted file mode 100644 index d59b9863b7..0000000000 Binary files a/docs/images/nfs_server/nfs_server_2.png and /dev/null differ diff --git a/docs/images/nfs_server/nfs_server_3.png b/docs/images/nfs_server/nfs_server_3.png deleted file mode 100644 index 9a7d72f8a5..0000000000 Binary files a/docs/images/nfs_server/nfs_server_3.png and /dev/null differ diff --git a/docs/images/nfs_server/nfs_server_4.png b/docs/images/nfs_server/nfs_server_4.png deleted file mode 100644 index da329f20b5..0000000000 Binary files a/docs/images/nfs_server/nfs_server_4.png and /dev/null differ diff --git a/docs/images/nfs_server/nfs_server_5.png b/docs/images/nfs_server/nfs_server_5.png deleted file mode 100644 index 817355a99e..0000000000 Binary files a/docs/images/nfs_server/nfs_server_5.png and /dev/null differ diff --git a/docs/images/nfs_server/nfs_server_6.png b/docs/images/nfs_server/nfs_server_6.png deleted file mode 100644 index 33805c50dd..0000000000 Binary files a/docs/images/nfs_server/nfs_server_6.png and /dev/null differ diff --git a/docs/images/nfs_server/nfs_server_7.png b/docs/images/nfs_server/nfs_server_7.png deleted file mode 100644 index 9e8ab638bc..0000000000 Binary files a/docs/images/nfs_server/nfs_server_7.png and /dev/null differ diff --git a/docs/images/nfs_server/nfs_server_8.png b/docs/images/nfs_server/nfs_server_8.png deleted file mode 100644 index 14103fb9c3..0000000000 Binary files a/docs/images/nfs_server/nfs_server_8.png and /dev/null differ diff --git a/docs/images/nfs_server/nfs_server_9.png b/docs/images/nfs_server/nfs_server_9.png deleted file mode 100644 index aa8bc140e1..0000000000 Binary files a/docs/images/nfs_server/nfs_server_9.png and /dev/null differ diff --git a/docs/images/oom_logo/oomLogoV2-Configure.png b/docs/images/oom_logo/oomLogoV2-Configure.png deleted file mode 100644 index bdb1ece10c..0000000000 Binary files a/docs/images/oom_logo/oomLogoV2-Configure.png and /dev/null differ diff --git a/docs/images/oom_logo/oomLogoV2-Delete.png b/docs/images/oom_logo/oomLogoV2-Delete.png deleted file mode 100644 index 10c43d2fb3..0000000000 Binary files a/docs/images/oom_logo/oomLogoV2-Delete.png and /dev/null differ diff --git a/docs/images/oom_logo/oomLogoV2-Deploy.png b/docs/images/oom_logo/oomLogoV2-Deploy.png deleted file mode 100644 index 706097cd6c..0000000000 Binary files a/docs/images/oom_logo/oomLogoV2-Deploy.png and /dev/null differ diff --git a/docs/images/oom_logo/oomLogoV2-Heal.png b/docs/images/oom_logo/oomLogoV2-Heal.png deleted file mode 100644 index 97ac58e9ec..0000000000 Binary files a/docs/images/oom_logo/oomLogoV2-Heal.png and /dev/null differ diff --git a/docs/images/oom_logo/oomLogoV2-Monitor.png b/docs/images/oom_logo/oomLogoV2-Monitor.png deleted file mode 100644 index c9a184ac37..0000000000 Binary files a/docs/images/oom_logo/oomLogoV2-Monitor.png and /dev/null differ diff --git a/docs/images/oom_logo/oomLogoV2-Scale.png b/docs/images/oom_logo/oomLogoV2-Scale.png deleted file mode 100644 index 140e5ca54f..0000000000 Binary files a/docs/images/oom_logo/oomLogoV2-Scale.png and /dev/null differ diff --git a/docs/images/oom_logo/oomLogoV2-Upgrade.png b/docs/images/oom_logo/oomLogoV2-Upgrade.png deleted file mode 100644 index d51f6cfcde..0000000000 Binary files a/docs/images/oom_logo/oomLogoV2-Upgrade.png and /dev/null differ diff --git a/docs/images/oom_logo/oomLogoV2-medium.png b/docs/images/oom_logo/oomLogoV2-medium.png deleted file mode 100644 index 20aea693e0..0000000000 Binary files a/docs/images/oom_logo/oomLogoV2-medium.png and /dev/null differ diff --git a/docs/images/rke/rke_1.png b/docs/images/rke/rke_1.png deleted file mode 100644 index b27fc517df..0000000000 Binary files a/docs/images/rke/rke_1.png and /dev/null differ diff --git a/docs/images/sg/sg_1.png b/docs/images/sg/sg_1.png deleted file mode 100644 index ff5264d3c4..0000000000 Binary files a/docs/images/sg/sg_1.png and /dev/null differ diff --git a/docs/images/sg/sg_2.png b/docs/images/sg/sg_2.png deleted file mode 100644 index 395057fc97..0000000000 Binary files a/docs/images/sg/sg_2.png and /dev/null differ diff --git a/docs/images/wk_vms/worker_1.png b/docs/images/wk_vms/worker_1.png deleted file mode 100644 index 01314d1557..0000000000 Binary files a/docs/images/wk_vms/worker_1.png and /dev/null differ diff --git a/docs/images/wk_vms/worker_2.png b/docs/images/wk_vms/worker_2.png deleted file mode 100644 index 9a7d72f8a5..0000000000 Binary files a/docs/images/wk_vms/worker_2.png and /dev/null differ diff --git a/docs/images/wk_vms/worker_3.png b/docs/images/wk_vms/worker_3.png deleted file mode 100644 index 93d5e28cf2..0000000000 Binary files a/docs/images/wk_vms/worker_3.png and /dev/null differ diff --git a/docs/images/wk_vms/worker_4.png b/docs/images/wk_vms/worker_4.png deleted file mode 100644 index 817355a99e..0000000000 Binary files a/docs/images/wk_vms/worker_4.png and /dev/null differ diff --git a/docs/images/wk_vms/worker_5.png b/docs/images/wk_vms/worker_5.png deleted file mode 100644 index 33805c50dd..0000000000 Binary files a/docs/images/wk_vms/worker_5.png and /dev/null differ diff --git a/docs/images/wk_vms/worker_6.png b/docs/images/wk_vms/worker_6.png deleted file mode 100644 index c71c122217..0000000000 Binary files a/docs/images/wk_vms/worker_6.png and /dev/null differ diff --git a/docs/images/wk_vms/worker_7.png b/docs/images/wk_vms/worker_7.png deleted file mode 100644 index ecb13c1809..0000000000 Binary files a/docs/images/wk_vms/worker_7.png and /dev/null differ diff --git a/docs/index.rst b/docs/index.rst index 94a4bf00e5..7a1d52b2ff 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -2,20 +2,20 @@ .. International License. .. http://creativecommons.org/licenses/by/4.0 .. Copyright 2018-2021 Amdocs, Bell Canada, Orange, Samsung, Nokia -.. _master_index: +.. Modification copyright (C) 2022 Nordix Foundation +.. _master_index: OOM Documentation Repository ---------------------------- .. toctree:: :maxdepth: 2 - oom_project_description.rst - oom_quickstart_guide.rst - oom_user_guide.rst - oom_setup_paas.rst - oom_developer_guide.rst - oom_cloud_setup_guide.rst - release_notes/release-notes.rst - oom_setup_kubernetes_rancher.rst - oom_setup_ingress_controller.rst + sections/oom_project_description.rst + sections/guides/development_guides/oom_development.rst + sections/guides/infra_guides/oom_infra_setup.rst + sections/guides/deployment_guides/oom_deployment.rst + sections/guides/user_guides/oom_user_guide.rst + sections/guides/access_guides/oom_access_info.rst + sections/release_notes/release-notes.rst + diff --git a/docs/oom_cloud_setup_guide.rst b/docs/oom_cloud_setup_guide.rst deleted file mode 100644 index 69062894cf..0000000000 --- a/docs/oom_cloud_setup_guide.rst +++ /dev/null @@ -1,113 +0,0 @@ -.. This work is licensed under a Creative Commons Attribution 4.0 -.. International License. -.. http://creativecommons.org/licenses/by/4.0 -.. Copyright 2019-2020 Amdocs, Bell Canada, Orange, Samsung -.. _oom_cloud_setup_guide: - -.. Links -.. _Microsoft Azure: https://wiki.onap.org/display/DW/Cloud+Native+Deployment#CloudNativeDeployment-MicrosoftAzure -.. _Amazon AWS: https://wiki.onap.org/display/DW/Cloud+Native+Deployment#CloudNativeDeployment-AmazonAWS -.. _Google GCE: https://wiki.onap.org/display/DW/Cloud+Native+Deployment#CloudNativeDeployment-GoogleGCE -.. _VMware VIO: https://wiki.onap.org/display/DW/ONAP+on+VMware+Integrated+OpenStack+-+Container+Orchestration -.. _OpenStack: https://wiki.onap.org/display/DW/ONAP+on+Kubernetes+on+OpenStack?src=contextnavpagetreemode -.. _Setting Up Kubernetes with Rancher: https://wiki.onap.org/display/DW/Cloud+Native+Deployment -.. _Setting Up Kubernetes with Kubeadm: https://wiki.onap.org/display/DW/Deploying+Kubernetes+Cluster+with+kubeadm -.. _Cloud Native Deployment Wiki: https://wiki.onap.org/display/DW/Cloud+Native+Deployment -.. _ONAP Development - 110 pod limit Wiki: https://wiki.onap.org/display/DW/ONAP+Development#ONAPDevelopment-Changemax-podsfromdefault110podlimit - -.. figure:: images/oom_logo/oomLogoV2-medium.png - :align: right - -.. _cloud-setup-guide-label: - -OOM Cloud Setup Guide -##################### - -OOM deploys and manages ONAP on a pre-established Kubernetes_ cluster - the -creation of this cluster is outside of the scope of the OOM project as there -are many options including public clouds with pre-established environments. -However, this guide includes instructions for how to create and use some of the -more popular environments which could be used to host ONAP. If creation of a -Kubernetes cluster is required, the life-cycle of this cluster is independent -of the life-cycle of the ONAP components themselves. Much like an OpenStack -environment, the Kubernetes environment may be used for an extended period of -time, possibly spanning multiple ONAP releases. - -.. note:: - Inclusion of a cloud technology or provider in this guide does not imply an - endorsement. - -.. _Kubernetes: https://kubernetes.io/ - -Software Requirements -===================== - -The versions of Kubernetes that are supported by OOM are as follows: - -.. table:: OOM Software Requirements - - ============== =========== ======= ======== ======== ============ - Release Kubernetes Helm kubectl Docker Cert-Manager - ============== =========== ======= ======== ======== ============ - amsterdam 1.7.x 2.3.x 1.7.x 1.12.x - beijing 1.8.10 2.8.2 1.8.10 17.03.x - casablanca 1.11.5 2.9.1 1.11.5 17.03.x - dublin 1.13.5 2.12.3 1.13.5 18.09.5 - el alto 1.15.2 2.14.2 1.15.2 18.09.x - frankfurt 1.15.9 2.16.6 1.15.11 18.09.x - guilin 1.15.11 2.16.10 1.15.11 18.09.x - honolulu 1.19.9 3.5.2 1.19.9 19.03.x 1.2.0 - Istanbul 1.19.11 3.6.3 1.19.11 19.03.x 1.5.4 - Jakarta 1.22.4 3.6.3 1.22.4 20.10.x 1.5.4 - ============== =========== ======= ======== ======== ============ - -Minimum Hardware Configuration -============================== - -The hardware requirements are provided below. Note that this is for a -full ONAP deployment (all components). Customizing ONAP to deploy only -components that are needed will drastically reduce the requirements. - -.. table:: OOM Hardware Requirements - - ===== ===== ====== ==================== - RAM HD vCores Ports - ===== ===== ====== ==================== - 224GB 160GB 112 0.0.0.0/0 (all open) - ===== ===== ====== ==================== - -.. note:: - Kubernetes supports a maximum of 110 pods per node - configurable in the - --max-pods=n setting off the "additional kubelet flags" box in the kubernetes - template window described in 'ONAP Development - 110 pod limit Wiki' - - this limit does not need to be modified . The use of many small nodes is - preferred over a few larger nodes (for example 14x16GB - 8 vCores each). - Subsets of ONAP may still be deployed on a single node. - -Cloud Installation -================== - -.. #. OOM supports deployment on major public clouds. The following guides -.. provide instructions on how to deploy ONAP on these clouds: -.. -.. - `Microsoft Azure`_, -.. - `Amazon AWS`_, -.. - `Google GCE`_, -.. - `VMware VIO`_, -.. - IBM, and -.. - `Openstack`_. -.. -.. #. Alternatively, OOM can be deployed on a private set of physical hosts or -.. VMs (or even a combination of the two). The following guides describe how -.. to create a Kubernetes cluster with popular tools: -.. -.. - `Setting up Kubernetes with Rancher`_ (recommended) -.. - `Setting up Kubernetes with Kubeadm`_ -.. - `Setting up Kubernetes with Cloudify`_ - -OOM can be deployed on a private set of physical hosts or VMs (or even a -combination of the two). The following guide describe the recommended method to -setup a Kubernetes cluster: :ref:`onap-on-kubernetes-with-rancher`. - -There are alternative deployment methods described on the -`Cloud Native Deployment Wiki`_ diff --git a/docs/oom_developer_guide.rst b/docs/oom_developer_guide.rst deleted file mode 100644 index 8df8e74acd..0000000000 --- a/docs/oom_developer_guide.rst +++ /dev/null @@ -1,1148 +0,0 @@ -.. This work is licensed under a Creative Commons Attribution 4.0 -.. International License. -.. http://creativecommons.org/licenses/by/4.0 -.. Copyright 2018-2020 Amdocs, Bell Canada, Orange, Samsung - -.. Links -.. _Helm: https://docs.helm.sh/ -.. _Helm Charts: https://github.com/kubernetes/charts -.. _Kubernetes: https://Kubernetes.io/ -.. _Docker: https://www.docker.com/ -.. _Nexus: https://nexus.onap.org/ -.. _AWS Elastic Block Store: https://aws.amazon.com/ebs/ -.. _Azure File: https://docs.microsoft.com/en-us/azure/storage/files/storage-files-introduction -.. _GCE Persistent Disk: https://cloud.google.com/compute/docs/disks/ -.. _Gluster FS: https://www.gluster.org/ -.. _Kubernetes Storage Class: https://Kubernetes.io/docs/concepts/storage/storage-classes/ -.. _Assigning Pods to Nodes: https://Kubernetes.io/docs/concepts/configuration/assign-pod-node/ - - -.. _developer-guide-label: - -OOM Developer Guide -################### - -.. figure:: images/oom_logo/oomLogoV2-medium.png - :align: right - -ONAP consists of a large number of components, each of which are substantial -projects within themselves, which results in a high degree of complexity in -deployment and management. To cope with this complexity the ONAP Operations -Manager (OOM) uses a Helm_ model of ONAP - Helm being the primary management -system for Kubernetes_ container systems - to drive all user driven life-cycle -management operations. The Helm model of ONAP is composed of a set of -hierarchical Helm charts that define the structure of the ONAP components and -the configuration of these components. These charts are fully parameterized -such that a single environment file defines all of the parameters needed to -deploy ONAP. A user of ONAP may maintain several such environment files to -control the deployment of ONAP in multiple environments such as development, -pre-production, and production. - -The following sections describe how the ONAP Helm charts are constructed. - -.. contents:: - :depth: 3 - :local: -.. - -Container Background -==================== -Linux containers allow for an application and all of its operating system -dependencies to be packaged and deployed as a single unit without including a -guest operating system as done with virtual machines. The most popular -container solution is Docker_ which provides tools for container management -like the Docker Host (dockerd) which can create, run, stop, move, or delete a -container. Docker has a very popular registry of containers images that can be -used by any Docker system; however, in the ONAP context, Docker images are -built by the standard CI/CD flow and stored in Nexus_ repositories. OOM uses -the "standard" ONAP docker containers and three new ones specifically created -for OOM. - -Containers are isolated from each other primarily via name spaces within the -Linux kernel without the need for multiple guest operating systems. As such, -multiple containers can be deployed with little overhead such as all of ONAP -can be deployed on a single host. With some optimization of the ONAP components -(e.g. elimination of redundant database instances) it may be possible to deploy -ONAP on a single laptop computer. - -Helm Charts -=========== -A Helm chart is a collection of files that describe a related set of Kubernetes -resources. A simple chart might be used to deploy something simple, like a -memcached pod, while a complex chart might contain many micro-service arranged -in a hierarchy as found in the `aai` ONAP component. - -Charts are created as files laid out in a particular directory tree, then they -can be packaged into versioned archives to be deployed. There is a public -archive of `Helm Charts`_ on GitHub that includes many technologies applicable -to ONAP. Some of these charts have been used in ONAP and all of the ONAP charts -have been created following the guidelines provided. - -The top level of the ONAP charts is shown below: - -.. code-block:: bash - - common - ├── cassandra - │   ├── Chart.yaml - │   ├── resources - │   │   ├── config - │   │   │   └── docker-entrypoint.sh - │   │   ├── exec.py - │   │   └── restore.sh - │   ├── templates - │   │   ├── backup - │   │   │   ├── configmap.yaml - │   │   │   ├── cronjob.yaml - │   │   │   ├── pv.yaml - │   │   │   └── pvc.yaml - │   │   ├── configmap.yaml - │   │   ├── pv.yaml - │   │   ├── service.yaml - │   │   └── statefulset.yaml - │   └── values.yaml - ├── common - │   ├── Chart.yaml - │   ├── templates - │   │   ├── _createPassword.tpl - │   │   ├── _ingress.tpl - │   │   ├── _labels.tpl - │   │   ├── _mariadb.tpl - │   │   ├── _name.tpl - │   │   ├── _namespace.tpl - │   │   ├── _repository.tpl - │   │   ├── _resources.tpl - │   │   ├── _secret.yaml - │   │   ├── _service.tpl - │   │   ├── _storage.tpl - │   │   └── _tplValue.tpl - │   └── values.yaml - ├── ... - └── postgres-legacy -    ├── Chart.yaml - ├── charts - └── configs - -The common section of charts consists of a set of templates that assist with -parameter substitution (`_name.tpl`, `_namespace.tpl` and others) and a set of -charts for components used throughout ONAP. When the common components are used -by other charts they are instantiated each time or we can deploy a shared -instances for several components. - -All of the ONAP components have charts that follow the pattern shown below: - -.. code-block:: bash - - name-of-my-component - ├── Chart.yaml - ├── component - │   └── subcomponent-folder - ├── charts - │   └── subchart-folder - ├── resources - │   ├── folder1 - │   │   ├── file1 - │   │   └── file2 - │   └── folder1 - │   ├── file3 - │   └── folder3 - │      └── file4 - ├── templates - │   ├── NOTES.txt - │   ├── configmap.yaml - │   ├── deployment.yaml - │   ├── ingress.yaml - │   ├── job.yaml - │   ├── secrets.yaml - │   └── service.yaml - └── values.yaml - -Note that the component charts / components may include a hierarchy of sub -components and in themselves can be quite complex. - -You can use either `charts` or `components` folder for your subcomponents. -`charts` folder means that the subcomponent will always been deployed. - -`components` folders means we can choose if we want to deploy the -subcomponent. - -This choice is done in root `values.yaml`: - -.. code-block:: yaml - - --- - global: - key: value - - component1: - enabled: true - component2: - enabled: true - -Then in `Chart.yaml` dependencies section, you'll use these values: - -.. code-block:: yaml - - --- - dependencies: - - name: common - version: ~x.y-0 - repository: '@local' - - name: component1 - version: ~x.y-0 - repository: 'file://components/component1' - condition: component1.enabled - - name: component2 - version: ~x.y-0 - repository: 'file://components/component2' - condition: component2.enabled - -Configuration of the components varies somewhat from component to component but -generally follows the pattern of one or more `configmap.yaml` files which can -directly provide configuration to the containers in addition to processing -configuration files stored in the `config` directory. It is the responsibility -of each ONAP component team to update these configuration files when changes -are made to the project containers that impact configuration. - -The following section describes how the hierarchical ONAP configuration system -is key to management of such a large system. - -Configuration Management -======================== - -ONAP is a large system composed of many components - each of which are complex -systems in themselves - that needs to be deployed in a number of different -ways. For example, within a single operator's network there may be R&D -deployments under active development, pre-production versions undergoing system -testing and production systems that are operating live networks. Each of these -deployments will differ in significant ways, such as the version of the -software images deployed. In addition, there may be a number of application -specific configuration differences, such as operating system environment -variables. The following describes how the Helm configuration management -system is used within the OOM project to manage both ONAP infrastructure -configuration as well as ONAP components configuration. - -One of the artifacts that OOM/Kubernetes uses to deploy ONAP components is the -deployment specification, yet another yaml file. Within these deployment specs -are a number of parameters as shown in the following example: - -.. code-block:: yaml - - apiVersion: apps/v1 - kind: StatefulSet - metadata: - labels: - app.kubernetes.io/name: zookeeper - helm.sh/chart: zookeeper - app.kubernetes.io/component: server - app.kubernetes.io/managed-by: Tiller - app.kubernetes.io/instance: onap-oof - name: onap-oof-zookeeper - namespace: onap - spec: - <...> - replicas: 3 - selector: - matchLabels: - app.kubernetes.io/name: zookeeper - app.kubernetes.io/component: server - app.kubernetes.io/instance: onap-oof - serviceName: onap-oof-zookeeper-headless - template: - metadata: - labels: - app.kubernetes.io/name: zookeeper - helm.sh/chart: zookeeper - app.kubernetes.io/component: server - app.kubernetes.io/managed-by: Tiller - app.kubernetes.io/instance: onap-oof - spec: - <...> - affinity: - containers: - - name: zookeeper - <...> - image: gcr.io/google_samples/k8szk:v3 - imagePullPolicy: Always - <...> - ports: - - containerPort: 2181 - name: client - protocol: TCP - - containerPort: 3888 - name: election - protocol: TCP - - containerPort: 2888 - name: server - protocol: TCP - <...> - -Note that within the statefulset specification, one of the container arguments -is the key/value pair image: gcr.io/google_samples/k8szk:v3 which -specifies the version of the zookeeper software to deploy. Although the -statefulset specifications greatly simplify statefulset, maintenance of the -statefulset specifications themselves become problematic as software versions -change over time or as different versions are required for different -statefulsets. For example, if the R&D team needs to deploy a newer version of -mariadb than what is currently used in the production environment, they would -need to clone the statefulset specification and change this value. Fortunately, -this problem has been solved with the templating capabilities of Helm. - -The following example shows how the statefulset specifications are modified to -incorporate Helm templates such that key/value pairs can be defined outside of -the statefulset specifications and passed during instantiation of the component. - -.. code-block:: yaml - - apiVersion: apps/v1 - kind: StatefulSet - metadata: - name: {{ include "common.fullname" . }} - namespace: {{ include "common.namespace" . }} - labels: {{- include "common.labels" . | nindent 4 }} - spec: - replicas: {{ .Values.replicaCount }} - selector: - matchLabels: {{- include "common.matchLabels" . | nindent 6 }} - # serviceName is only needed for StatefulSet - # put the postfix part only if you have add a postfix on the service name - serviceName: {{ include "common.servicename" . }}-{{ .Values.service.postfix }} - <...> - template: - metadata: - labels: {{- include "common.labels" . | nindent 8 }} - annotations: {{- include "common.tplValue" (dict "value" .Values.podAnnotations "context" $) | nindent 8 }} - name: {{ include "common.name" . }} - spec: - <...> - containers: - - name: {{ include "common.name" . }} - image: {{ .Values.image }} - imagePullPolicy: {{ .Values.global.pullPolicy | default .Values.pullPolicy }} - ports: - {{- range $index, $port := .Values.service.ports }} - - containerPort: {{ $port.port }} - name: {{ $port.name }} - {{- end }} - {{- range $index, $port := .Values.service.headlessPorts }} - - containerPort: {{ $port.port }} - name: {{ $port.name }} - {{- end }} - <...> - -This version of the statefulset specification has gone through the process of -templating values that are likely to change between statefulsets. Note that the -image is now specified as: image: {{ .Values.image }} instead of a -string used previously. During the statefulset phase, Helm (actually the Helm -sub-component Tiller) substitutes the {{ .. }} entries with a variable defined -in a values.yaml file. The content of this file is as follows: - -.. code-block:: yaml - - <...> - image: gcr.io/google_samples/k8szk:v3 - replicaCount: 3 - <...> - - -Within the values.yaml file there is an image key with the value -`gcr.io/google_samples/k8szk:v3` which is the same value used in -the non-templated version. Once all of the substitutions are complete, the -resulting statefulset specification ready to be used by Kubernetes. - -When creating a template consider the use of default values if appropriate. -Helm templating has built in support for DEFAULT values, here is -an example: - -.. code-block:: yaml - - imagePullSecrets: - - name: "{{ .Values.nsPrefix | default "onap" }}-docker-registry-key" - -The pipeline operator ("|") used here hints at that power of Helm templates in -that much like an operating system command line the pipeline operator allow -over 60 Helm functions to be embedded directly into the template (note that the -Helm template language is a superset of the Go template language). These -functions include simple string operations like upper and more complex flow -control operations like if/else. - -OOM is mainly helm templating. In order to have consistent deployment of the -different components of ONAP, some rules must be followed. - -Templates are provided in order to create Kubernetes resources (Secrets, -Ingress, Services, ...) or part of Kubernetes resources (names, labels, -resources requests and limits, ...). - -a full list and simple description is done in -`kubernetes/common/common/documentation.rst`. - -Service template ----------------- - -In order to create a Service for a component, you have to create a file (with -`service` in the name. -For normal service, just put the following line: - -.. code-block:: yaml - - {{ include "common.service" . }} - -For headless service, the line to put is the following: - -.. code-block:: yaml - - {{ include "common.headlessService" . }} - -The configuration of the service is done in component `values.yaml`: - -.. code-block:: yaml - - service: - name: NAME-OF-THE-SERVICE - postfix: MY-POSTFIX - type: NodePort - annotations: - someAnnotationsKey: value - ports: - - name: tcp-MyPort - port: 5432 - nodePort: 88 - - name: http-api - port: 8080 - nodePort: 89 - - name: https-api - port: 9443 - nodePort: 90 - -`annotations` and `postfix` keys are optional. -if `service.type` is `NodePort`, then you have to give `nodePort` value for your -service ports (which is the end of the computed nodePort, see example). - -It would render the following Service Resource (for a component named -`name-of-my-component`, with version `x.y.z`, helm deployment name -`my-deployment` and `global.nodePortPrefix` `302`): - -.. code-block:: yaml - - apiVersion: v1 - kind: Service - metadata: - annotations: - someAnnotationsKey: value - name: NAME-OF-THE-SERVICE-MY-POSTFIX - labels: - app.kubernetes.io/name: name-of-my-component - helm.sh/chart: name-of-my-component-x.y.z - app.kubernetes.io/instance: my-deployment-name-of-my-component - app.kubernetes.io/managed-by: Tiller - spec: - ports: - - port: 5432 - targetPort: tcp-MyPort - nodePort: 30288 - - port: 8080 - targetPort: http-api - nodePort: 30289 - - port: 9443 - targetPort: https-api - nodePort: 30290 - selector: - app.kubernetes.io/name: name-of-my-component - app.kubernetes.io/instance: my-deployment-name-of-my-component - type: NodePort - -In the deployment or statefulSet file, you needs to set the good labels in -order for the service to match the pods. - -here's an example to be sure it matches (for a statefulSet): - -.. code-block:: yaml - - apiVersion: apps/v1 - kind: StatefulSet - metadata: - name: {{ include "common.fullname" . }} - namespace: {{ include "common.namespace" . }} - labels: {{- include "common.labels" . | nindent 4 }} - spec: - selector: - matchLabels: {{- include "common.matchLabels" . | nindent 6 }} - # serviceName is only needed for StatefulSet - # put the postfix part only if you have add a postfix on the service name - serviceName: {{ include "common.servicename" . }}-{{ .Values.service.postfix }} - <...> - template: - metadata: - labels: {{- include "common.labels" . | nindent 8 }} - annotations: {{- include "common.tplValue" (dict "value" .Values.podAnnotations "context" $) | nindent 8 }} - name: {{ include "common.name" . }} - spec: - <...> - containers: - - name: {{ include "common.name" . }} - ports: - {{- range $index, $port := .Values.service.ports }} - - containerPort: {{ $port.port }} - name: {{ $port.name }} - {{- end }} - {{- range $index, $port := .Values.service.headlessPorts }} - - containerPort: {{ $port.port }} - name: {{ $port.name }} - {{- end }} - <...> - -The configuration of the service is done in component `values.yaml`: - -.. code-block:: yaml - - service: - name: NAME-OF-THE-SERVICE - headless: - postfix: NONE - annotations: - anotherAnnotationsKey : value - publishNotReadyAddresses: true - headlessPorts: - - name: tcp-MyPort - port: 5432 - - name: http-api - port: 8080 - - name: https-api - port: 9443 - -`headless.annotations`, `headless.postfix` and -`headless.publishNotReadyAddresses` keys are optional. - -If `headless.postfix` is not set, then we'll add `-headless` at the end of the -service name. - -If it set to `NONE`, there will be not postfix. - -And if set to something, it will add `-something` at the end of the service -name. - -It would render the following Service Resource (for a component named -`name-of-my-component`, with version `x.y.z`, helm deployment name -`my-deployment` and `global.nodePortPrefix` `302`): - -.. code-block:: yaml - - apiVersion: v1 - kind: Service - metadata: - annotations: - anotherAnnotationsKey: value - name: NAME-OF-THE-SERVICE - labels: - app.kubernetes.io/name: name-of-my-component - helm.sh/chart: name-of-my-component-x.y.z - app.kubernetes.io/instance: my-deployment-name-of-my-component - app.kubernetes.io/managed-by: Tiller - spec: - clusterIP: None - ports: - - port: 5432 - targetPort: tcp-MyPort - nodePort: 30288 - - port: 8080 - targetPort: http-api - nodePort: 30289 - - port: 9443 - targetPort: https-api - nodePort: 30290 - publishNotReadyAddresses: true - selector: - app.kubernetes.io/name: name-of-my-component - app.kubernetes.io/instance: my-deployment-name-of-my-component - type: ClusterIP - -Previous example of StatefulSet would also match (except for the `postfix` part -obviously). - -Creating Deployment or StatefulSet ----------------------------------- - -Deployment and StatefulSet should use the `apps/v1` (which has appeared in -v1.9). -As seen on the service part, the following parts are mandatory: - -.. code-block:: yaml - - apiVersion: apps/v1 - kind: StatefulSet - metadata: - name: {{ include "common.fullname" . }} - namespace: {{ include "common.namespace" . }} - labels: {{- include "common.labels" . | nindent 4 }} - spec: - selector: - matchLabels: {{- include "common.matchLabels" . | nindent 6 }} - # serviceName is only needed for StatefulSet - # put the postfix part only if you have add a postfix on the service name - serviceName: {{ include "common.servicename" . }}-{{ .Values.service.postfix }} - <...> - template: - metadata: - labels: {{- include "common.labels" . | nindent 8 }} - annotations: {{- include "common.tplValue" (dict "value" .Values.podAnnotations "context" $) | nindent 8 }} - name: {{ include "common.name" . }} - spec: - <...> - containers: - - name: {{ include "common.name" . }} - -ONAP Application Configuration ------------------------------- - -Dependency Management ---------------------- -These Helm charts describe the desired state -of an ONAP deployment and instruct the Kubernetes container manager as to how -to maintain the deployment in this state. These dependencies dictate the order -in-which the containers are started for the first time such that such -dependencies are always met without arbitrary sleep times between container -startups. For example, the SDC back-end container requires the Elastic-Search, -Cassandra and Kibana containers within SDC to be ready and is also dependent on -DMaaP (or the message-router) to be ready - where ready implies the built-in -"readiness" probes succeeded - before becoming fully operational. When an -initial deployment of ONAP is requested the current state of the system is NULL -so ONAP is deployed by the Kubernetes manager as a set of Docker containers on -one or more predetermined hosts. The hosts could be physical machines or -virtual machines. When deploying on virtual machines the resulting system will -be very similar to "Heat" based deployments, i.e. Docker containers running -within a set of VMs, the primary difference being that the allocation of -containers to VMs is done dynamically with OOM and statically with "Heat". -Example SO deployment descriptor file shows SO's dependency on its mariadb -data-base component: - -SO deployment specification excerpt: - -.. code-block:: yaml - - apiVersion: apps/v1 - kind: Deployment - metadata: - name: {{ include "common.fullname" . }} - namespace: {{ include "common.namespace" . }} - labels: {{- include "common.labels" . | nindent 4 }} - spec: - replicas: {{ .Values.replicaCount }} - selector: - matchLabels: {{- include "common.matchLabels" . | nindent 6 }} - template: - metadata: - labels: - app: {{ include "common.name" . }} - release: {{ .Release.Name }} - spec: - initContainers: - - command: - - /app/ready.py - args: - - --container-name - - so-mariadb - env: - ... - -Kubernetes Container Orchestration -================================== -The ONAP components are managed by the Kubernetes_ container management system -which maintains the desired state of the container system as described by one -or more deployment descriptors - similar in concept to OpenStack HEAT -Orchestration Templates. The following sections describe the fundamental -objects managed by Kubernetes, the network these components use to communicate -with each other and other entities outside of ONAP and the templates that -describe the configuration and desired state of the ONAP components. - -Name Spaces ------------ -Within the namespaces are Kubernetes services that provide external -connectivity to pods that host Docker containers. - -ONAP Components to Kubernetes Object Relationships --------------------------------------------------- -Kubernetes deployments consist of multiple objects: - -- **nodes** - a worker machine - either physical or virtual - that hosts - multiple containers managed by Kubernetes. -- **services** - an abstraction of a logical set of pods that provide a - micro-service. -- **pods** - one or more (but typically one) container(s) that provide specific - application functionality. -- **persistent volumes** - One or more permanent volumes need to be established - to hold non-ephemeral configuration and state data. - -The relationship between these objects is shown in the following figure: - -.. .. uml:: -.. -.. @startuml -.. node PH { -.. component Service { -.. component Pod0 -.. component Pod1 -.. } -.. } -.. -.. database PV -.. @enduml - -.. figure:: images/k8s/kubernetes_objects.png - -OOM uses these Kubernetes objects as described in the following sections. - -Nodes -~~~~~ -OOM works with both physical and virtual worker machines. - -* Virtual Machine Deployments - If ONAP is to be deployed onto a set of virtual - machines, the creation of the VMs is outside of the scope of OOM and could be - done in many ways, such as - - * manually, for example by a user using the OpenStack Horizon dashboard or - AWS EC2, or - * automatically, for example with the use of a OpenStack Heat Orchestration - Template which builds an ONAP stack, Azure ARM template, AWS CloudFormation - Template, or - * orchestrated, for example with Cloudify creating the VMs from a TOSCA - template and controlling their life cycle for the life of the ONAP - deployment. - -* Physical Machine Deployments - If ONAP is to be deployed onto physical - machines there are several options but the recommendation is to use Rancher - along with Helm to associate hosts with a Kubernetes cluster. - -Pods -~~~~ -A group of containers with shared storage and networking can be grouped -together into a Kubernetes pod. All of the containers within a pod are -co-located and co-scheduled so they operate as a single unit. Within ONAP -Amsterdam release, pods are mapped one-to-one to docker containers although -this may change in the future. As explained in the Services section below the -use of Pods within each ONAP component is abstracted from other ONAP -components. - -Services -~~~~~~~~ -OOM uses the Kubernetes service abstraction to provide a consistent access -point for each of the ONAP components independent of the pod or container -architecture of that component. For example, the SDNC component may introduce -OpenDaylight clustering as some point and change the number of pods in this -component to three or more but this change will be isolated from the other ONAP -components by the service abstraction. A service can include a load balancer -on its ingress to distribute traffic between the pods and even react to dynamic -changes in the number of pods if they are part of a replica set. - -Persistent Volumes -~~~~~~~~~~~~~~~~~~ -To enable ONAP to be deployed into a wide variety of cloud infrastructures a -flexible persistent storage architecture, built on Kubernetes persistent -volumes, provides the ability to define the physical storage in a central -location and have all ONAP components securely store their data. - -When deploying ONAP into a public cloud, available storage services such as -`AWS Elastic Block Store`_, `Azure File`_, or `GCE Persistent Disk`_ are -options. Alternatively, when deploying into a private cloud the storage -architecture might consist of Fiber Channel, `Gluster FS`_, or iSCSI. Many -other storage options existing, refer to the `Kubernetes Storage Class`_ -documentation for a full list of the options. The storage architecture may vary -from deployment to deployment but in all cases a reliable, redundant storage -system must be provided to ONAP with which the state information of all ONAP -components will be securely stored. The Storage Class for a given deployment is -a single parameter listed in the ONAP values.yaml file and therefore is easily -customized. Operation of this storage system is outside the scope of the OOM. - -.. code-block:: yaml - - Insert values.yaml code block with storage block here - -Once the storage class is selected and the physical storage is provided, the -ONAP deployment step creates a pool of persistent volumes within the given -physical storage that is used by all of the ONAP components. ONAP components -simply make a claim on these persistent volumes (PV), with a persistent volume -claim (PVC), to gain access to their storage. - -The following figure illustrates the relationships between the persistent -volume claims, the persistent volumes, the storage class, and the physical -storage. - -.. graphviz:: - - digraph PV { - label = "Persistance Volume Claim to Physical Storage Mapping" - { - node [shape=cylinder] - D0 [label="Drive0"] - D1 [label="Drive1"] - Dx [label="Drivex"] - } - { - node [shape=Mrecord label="StorageClass:ceph"] - sc - } - { - node [shape=point] - p0 p1 p2 - p3 p4 p5 - } - subgraph clusterSDC { - label="SDC" - PVC0 - PVC1 - } - subgraph clusterSDNC { - label="SDNC" - PVC2 - } - subgraph clusterSO { - label="SO" - PVCn - } - PV0 -> sc - PV1 -> sc - PV2 -> sc - PVn -> sc - - sc -> {D0 D1 Dx} - PVC0 -> PV0 - PVC1 -> PV1 - PVC2 -> PV2 - PVCn -> PVn - - # force all of these nodes to the same line in the given order - subgraph { - rank = same; PV0;PV1;PV2;PVn;p0;p1;p2 - PV0->PV1->PV2->p0->p1->p2->PVn [style=invis] - } - - subgraph { - rank = same; D0;D1;Dx;p3;p4;p5 - D0->D1->p3->p4->p5->Dx [style=invis] - } - - } - -In-order for an ONAP component to use a persistent volume it must make a claim -against a specific persistent volume defined in the ONAP common charts. Note -that there is a one-to-one relationship between a PVC and PV. The following is -an excerpt from a component chart that defines a PVC: - -.. code-block:: yaml - - Insert PVC example here - -OOM Networking with Kubernetes ------------------------------- - -- DNS -- Ports - Flattening the containers also expose port conflicts between the - containers which need to be resolved. - -Node Ports -~~~~~~~~~~ - -Pod Placement Rules -------------------- -OOM will use the rich set of Kubernetes node and pod affinity / -anti-affinity rules to minimize the chance of a single failure resulting in a -loss of ONAP service. Node affinity / anti-affinity is used to guide the -Kubernetes orchestrator in the placement of pods on nodes (physical or virtual -machines). For example: - -- if a container used Intel DPDK technology the pod may state that it as - affinity to an Intel processor based node, or -- geographical based node labels (such as the Kubernetes standard zone or - region labels) may be used to ensure placement of a DCAE complex close to the - VNFs generating high volumes of traffic thus minimizing networking cost. - Specifically, if nodes were pre-assigned labels East and West, the pod - deployment spec to distribute pods to these nodes would be: - -.. code-block:: yaml - - nodeSelector: - failure-domain.beta.Kubernetes.io/region: {{ .Values.location }} - -- "location: West" is specified in the `values.yaml` file used to deploy - one DCAE cluster and "location: East" is specified in a second `values.yaml` - file (see OOM Configuration Management for more information about - configuration files like the `values.yaml` file). - -Node affinity can also be used to achieve geographic redundancy if pods are -assigned to multiple failure domains. For more information refer to `Assigning -Pods to Nodes`_. - -.. note:: - One could use Pod to Node assignment to totally constrain Kubernetes when - doing initial container assignment to replicate the Amsterdam release - OpenStack Heat based deployment. Should one wish to do this, each VM would - need a unique node name which would be used to specify a node constaint - for every component. These assignment could be specified in an environment - specific values.yaml file. Constraining Kubernetes in this way is not - recommended. - -Kubernetes has a comprehensive system called Taints and Tolerations that can be -used to force the container orchestrator to repel pods from nodes based on -static events (an administrator assigning a taint to a node) or dynamic events -(such as a node becoming unreachable or running out of disk space). There are -no plans to use taints or tolerations in the ONAP Beijing release. Pod -affinity / anti-affinity is the concept of creating a spacial relationship -between pods when the Kubernetes orchestrator does assignment (both initially -an in operation) to nodes as explained in Inter-pod affinity and anti-affinity. -For example, one might choose to co-located all of the ONAP SDC containers on a -single node as they are not critical runtime components and co-location -minimizes overhead. On the other hand, one might choose to ensure that all of -the containers in an ODL cluster (SDNC and APPC) are placed on separate nodes -such that a node failure has minimal impact to the operation of the cluster. -An example of how pod affinity / anti-affinity is shown below: - -Pod Affinity / Anti-Affinity - -.. code-block:: yaml - - apiVersion: v1 - kind: Pod - metadata: - name: with-pod-affinity - spec: - affinity: - podAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - - labelSelector: - matchExpressions: - - key: security - operator: In - values: - - S1 - topologyKey: failure-domain.beta.Kubernetes.io/zone - podAntiAffinity: - preferredDuringSchedulingIgnoredDuringExecution: - - weight: 100 - podAffinityTerm: - labelSelector: - matchExpressions: - - key: security - operator: In - values: - - S2 - topologyKey: Kubernetes.io/hostname - containers: - - name: with-pod-affinity - image: gcr.io/google_containers/pause:2.0 - -This example contains both podAffinity and podAntiAffinity rules, the first -rule is is a must (requiredDuringSchedulingIgnoredDuringExecution) while the -second will be met pending other considerations -(preferredDuringSchedulingIgnoredDuringExecution). Preemption Another feature -that may assist in achieving a repeatable deployment in the presence of faults -that may have reduced the capacity of the cloud is assigning priority to the -containers such that mission critical components have the ability to evict less -critical components. Kubernetes provides this capability with Pod Priority and -Preemption. Prior to having more advanced production grade features available, -the ability to at least be able to re-deploy ONAP (or a subset of) reliably -provides a level of confidence that should an outage occur the system can be -brought back on-line predictably. - -Health Checks -------------- - -Monitoring of ONAP components is configured in the agents within JSON files and -stored in gerrit under the consul-agent-config, here is an example from the AAI -model loader (aai-model-loader-health.json): - -.. code-block:: json - - { - "service": { - "name": "A&AI Model Loader", - "checks": [ - { - "id": "model-loader-process", - "name": "Model Loader Presence", - "script": "/consul/config/scripts/model-loader-script.sh", - "interval": "15s", - "timeout": "1s" - } - ] - } - } - -Liveness Probes ---------------- - -These liveness probes can simply check that a port is available, that a -built-in health check is reporting good health, or that the Consul health check -is positive. For example, to monitor the SDNC component has following liveness -probe can be found in the SDNC DB deployment specification: - -.. code-block:: yaml - - sdnc db liveness probe - - livenessProbe: - exec: - command: ["mysqladmin", "ping"] - initialDelaySeconds: 30 periodSeconds: 10 - timeoutSeconds: 5 - -The 'initialDelaySeconds' control the period of time between the readiness -probe succeeding and the liveness probe starting. 'periodSeconds' and -'timeoutSeconds' control the actual operation of the probe. Note that -containers are inherently ephemeral so the healing action destroys failed -containers and any state information within it. To avoid a loss of state, a -persistent volume should be used to store all data that needs to be persisted -over the re-creation of a container. Persistent volumes have been created for -the database components of each of the projects and the same technique can be -used for all persistent state information. - - - -Environment Files -~~~~~~~~~~~~~~~~~ - -MSB Integration -=============== - -The \ `Microservices Bus -Project `__ provides -facilities to integrate micro-services into ONAP and therefore needs to -integrate into OOM - primarily through Consul which is the backend of -MSB service discovery. The following is a brief description of how this -integration will be done: - -A registrator to push the service endpoint info to MSB service -discovery. - -- The needed service endpoint info is put into the kubernetes yaml file - as annotation, including service name, Protocol,version, visual - range,LB method, IP, Port,etc. - -- OOM deploy/start/restart/scale in/scale out/upgrade ONAP components - -- Registrator watch the kubernetes event - -- When an ONAP component instance has been started/destroyed by OOM, - Registrator get the notification from kubernetes - -- Registrator parse the service endpoint info from annotation and - register/update/unregister it to MSB service discovery - -- MSB API Gateway uses the service endpoint info for service routing - and load balancing. - -Details of the registration service API can be found at \ `Microservice -Bus API -Documentation `__. - -ONAP Component Registration to MSB ----------------------------------- -The charts of all ONAP components intending to register against MSB must have -an annotation in their service(s) template. A `sdc` example follows: - -.. code-block:: yaml - - apiVersion: v1 - kind: Service - metadata: - labels: - app: sdc-be - name: sdc-be - namespace: "{{ .Values.nsPrefix }}" - annotations: - msb.onap.org/service-info: '[ - { - "serviceName": "sdc", - "version": "v1", - "url": "/sdc/v1", - "protocol": "REST", - "port": "8080", - "visualRange":"1" - }, - { - "serviceName": "sdc-deprecated", - "version": "v1", - "url": "/sdc/v1", - "protocol": "REST", - "port": "8080", - "visualRange":"1", - "path":"/sdc/v1" - } - ]' - ... - - -MSB Integration with OOM ------------------------- -A preliminary view of the OOM-MSB integration is as follows: - -.. figure:: images/msb/MSB-OOM-Diagram.png - -A message sequence chart of the registration process: - -.. uml:: - - participant "OOM" as oom - participant "ONAP Component" as onap - participant "Service Discovery" as sd - participant "External API Gateway" as eagw - participant "Router (Internal API Gateway)" as iagw - - box "MSB" #LightBlue - participant sd - participant eagw - participant iagw - end box - - == Deploy Servcie == - - oom -> onap: Deploy - oom -> sd: Register service endpoints - sd -> eagw: Services exposed to external system - sd -> iagw: Services for internal use - - == Component Life-cycle Management == - - oom -> onap: Start/Stop/Scale/Migrate/Upgrade - oom -> sd: Update service info - sd -> eagw: Update service info - sd -> iagw: Update service info - - == Service Health Check == - - sd -> onap: Check the health of service - sd -> eagw: Update service status - sd -> iagw: Update service status - - -MSB Deployment Instructions ---------------------------- -MSB is helm installable ONAP component which is often automatically deployed. -To install it individually enter:: - - > helm install /msb - -.. note:: - TBD: Vaidate if the following procedure is still required. - -Please note that Kubernetes authentication token must be set at -*kubernetes/kube2msb/values.yaml* so the kube2msb registrator can get the -access to watch the kubernetes events and get service annotation by -Kubernetes APIs. The token can be found in the kubectl configuration file -*~/.kube/config* - -More details can be found here `MSB installation `_. - -.. MISC -.. ==== -.. Note that although OOM uses Kubernetes facilities to minimize the effort -.. required of the ONAP component owners to implement a successful rolling -.. upgrade strategy there are other considerations that must be taken into -.. consideration. -.. For example, external APIs - both internal and external to ONAP - should be -.. designed to gracefully accept transactions from a peer at a different -.. software version to avoid deadlock situations. Embedded version codes in -.. messages may facilitate such capabilities. -.. -.. Within each of the projects a new configuration repository contains all of -.. the project specific configuration artifacts. As changes are made within -.. the project, it's the responsibility of the project team to make appropriate -.. changes to the configuration data. diff --git a/docs/oom_hardcoded_certificates.rst b/docs/oom_hardcoded_certificates.rst deleted file mode 100644 index 326cd3980f..0000000000 --- a/docs/oom_hardcoded_certificates.rst +++ /dev/null @@ -1,18 +0,0 @@ -.. This work is licensed under a Creative Commons Attribution 4.0 -.. International License. -.. http://creativecommons.org/licenses/by/4.0 -.. Copyright 2018 Amdocs, Bell Canada, 2020 Nokia Solutions and Networks - -:orphan: - -.. Links -.. _hardcoded-certificates-label: - -ONAP Hardcoded certificates -########################### - -ONAP current installation have hardcoded certificates. -Here's the list of these certificates: - -.. csv-table:: - :file: certs/hardcoded_certificates.csv diff --git a/docs/oom_project_description.rst b/docs/oom_project_description.rst deleted file mode 100644 index b672c48bc0..0000000000 --- a/docs/oom_project_description.rst +++ /dev/null @@ -1,100 +0,0 @@ -.. This work is licensed under a Creative Commons Attribution 4.0 -.. International License. -.. http://creativecommons.org/licenses/by/4.0 -.. Copyright 2018-2020 Amdocs, Bell Canada, Orange, Samsung -.. _oom_project_description: - -ONAP Operations Manager Project -############################### - -The ONAP Operations Manager (OOM) is responsible for life-cycle management of -the ONAP platform itself; components such as SO, SDNC, etc. It is not -responsible for the management of services, VNFs or infrastructure instantiated -by ONAP or used by ONAP to host such services or VNFs. OOM uses the open-source -Kubernetes container management system as a means to manage the Docker -containers that compose ONAP where the containers are hosted either directly on -bare-metal servers or on VMs hosted by a 3rd party management system. OOM -ensures that ONAP is easily deployable and maintainable throughout its life -cycle while using hardware resources efficiently. - -.. figure:: images/oom_logo/oomLogoV2-medium.png - :align: right - -In summary OOM provides the following capabilities: - -- **Deploy** - with built-in component dependency management -- **Configure** - unified configuration across all ONAP components -- **Monitor** - real-time health monitoring feeding to a Consul UI and - Kubernetes -- **Heal**- failed ONAP containers are recreated automatically -- **Scale** - cluster ONAP services to enable seamless scaling -- **Upgrade** - change-out containers or configuration with little or no - service impact -- **Delete** - cleanup individual containers or entire deployments - -OOM supports a wide variety of Kubernetes private clouds - built with Rancher, -Kubeadm or Cloudify - and public cloud infrastructures such as: Microsoft -Azure, Amazon AWS, Google GCD, VMware VIO, and OpenStack. - -The OOM documentation is broken into four different areas each targeted at a -different user: - -- :ref:`quick-start-label` - deploy ONAP on an existing cloud -- :ref:`user-guide-label` - a guide for operators of an ONAP instance -- :ref:`developer-guide-label` - a guide for developers of OOM and ONAP -- :ref:`cloud-setup-guide-label` - a guide for those setting up cloud - environments that ONAP will use -- :ref:`hardcoded-certificates-label` - the list of all hardcoded certificates - set in ONAP installation - -The :ref:`release_notes` for OOM describe the incremental features per release. - -Component Orchestration Overview -================================ -Multiple technologies, templates, and extensible plug-in frameworks are used in -ONAP to orchestrate platform instances of software component artifacts. A few -standard configurations are provide that may be suitable for test, development, -and some production deployments by substitution of local or platform wide -parameters. Larger and more automated deployments may require integration the -component technologies, templates, and frameworks with a higher level of -automated orchestration and control software. Design guidelines are provided to -insure the component level templates and frameworks can be easily integrated -and maintained. The following diagram provides an overview of these with links -to examples and templates for describing new ones. - -.. graphviz:: - - digraph COO { - rankdir="LR"; - - { - node [shape=folder] - oValues [label="values"] - cValues [label="values"] - comValues [label="values"] - sValues [label="values"] - oCharts [label="charts"] - cCharts [label="charts"] - comCharts [label="charts"] - sCharts [label="charts"] - blueprint [label="TOSCA blueprint"] - } - {oom [label="ONAP Operations Manager"]} - {hlo [label="High Level Orchestrator"]} - - - hlo -> blueprint - hlo -> oom - oom -> oValues - oom -> oCharts - oom -> component - oom -> common - common -> comValues - common -> comCharts - component -> cValues - component -> cCharts - component -> subcomponent - subcomponent -> sValues - subcomponent -> sCharts - blueprint -> component - } diff --git a/docs/oom_quickstart_guide.rst b/docs/oom_quickstart_guide.rst deleted file mode 100644 index b7749b1056..0000000000 --- a/docs/oom_quickstart_guide.rst +++ /dev/null @@ -1,284 +0,0 @@ -.. This work is licensed under a -.. Creative Commons Attribution 4.0 International License. -.. http://creativecommons.org/licenses/by/4.0 -.. Copyright 2019-2020 Amdocs, Bell Canada, Orange, Samsung -.. _oom_quickstart_guide: -.. _quick-start-label: - -OOM Quick Start Guide -##################### - -.. figure:: images/oom_logo/oomLogoV2-medium.png - :align: right - -Once a Kubernetes environment is available (follow the instructions in -:ref:`cloud-setup-guide-label` if you don't have a cloud environment -available), follow the following instructions to deploy ONAP. - -**Step 1.** Clone the OOM repository from ONAP gerrit:: - - > git clone -b http://gerrit.onap.org/r/oom --recurse-submodules - > cd oom/kubernetes - -where can be an official release tag, such as - -* 4.0.0-ONAP for Dublin -* 5.0.1-ONAP for El Alto -* 6.0.0 for Frankfurt -* 7.0.0 for Guilin -* 8.0.0 for Honolulu -* 9.0.0 for Istanbul -* 10.0.0 for Jakarta -* 11.0.0 for Kohn - -**Step 2.** Install Helm Plugins required to deploy ONAP:: - - > cp -R ~/oom/kubernetes/helm/plugins/ ~/.local/share/helm/plugins - > helm plugin install https://github.com/chartmuseum/helm-push.git \ - --version 0.9.0 - -.. note:: - The ``--version 0.9.0`` is required as new version of helm (3.7.0 and up) is - now using ``push`` directly and helm-push is using ``cm-push`` starting - version ``0.10.0`` and up. - -**Step 3.** Install Chartmuseum:: - - > curl -LO https://s3.amazonaws.com/chartmuseum/release/latest/bin/linux/amd64/chartmuseum - > chmod +x ./chartmuseum - > mv ./chartmuseum /usr/local/bin - -**Step 4.** Install Cert-Manager:: - - > kubectl apply -f https://github.com/jetstack/cert-manager/releases/download/v1.2.0/cert-manager.yaml - -More details can be found :doc:`here `. - -**Step 4.1** Install Strimzi Kafka Operator: - -- Add the helm repo:: - - > helm repo add strimzi https://strimzi.io/charts/ - -- Install the operator:: - - > helm install strimzi-kafka-operator strimzi/strimzi-kafka-operator --namespace strimzi-system --version 0.28.0 --set watchAnyNamespace=true --create-namespace - -More details can be found :doc:`here `. - -**Step 5.** Customize the Helm charts like `oom/kubernetes/onap/values.yaml` or -an override file like `onap-all.yaml`, `onap-vfw.yaml` or `openstack.yaml` file -to suit your deployment with items like the OpenStack tenant information. - -.. note:: - Standard and example override files (e.g. `onap-all.yaml`, `openstack.yaml`) - can be found in the `oom/kubernetes/onap/resources/overrides/` directory. - - - a. You may want to selectively enable or disable ONAP components by changing - the ``enabled: true/false`` flags. - - - b. Encrypt the OpenStack password using the shell tool for Robot and put it in - the Robot Helm charts or Robot section of `openstack.yaml` - - - c. Encrypt the OpenStack password using the java based script for SO Helm - charts or SO section of `openstack.yaml`. - - - d. Update the OpenStack parameters that will be used by Robot, SO and APPC Helm - charts or use an override file to replace them. - - e. Add in the command line a value for the global master password - (global.masterPassword). - - - -a. Enabling/Disabling Components: -Here is an example of the nominal entries that need to be provided. -We have different values file available for different contexts. - -.. literalinclude:: ../kubernetes/onap/values.yaml - :language: yaml - - -b. Generating ROBOT Encrypted Password: -The Robot encrypted Password uses the same encryption.key as SO but an -openssl algorithm that works with the python based Robot Framework. - -.. note:: - To generate Robot ``openStackEncryptedPasswordHere``:: - - cd so/resources/config/mso/ - /oom/kubernetes/so/resources/config/mso# echo -n "" | openssl aes-128-ecb -e -K `cat encryption.key` -nosalt | xxd -c 256 -p`` - -c. Generating SO Encrypted Password: -The SO Encrypted Password uses a java based encryption utility since the -Java encryption library is not easy to integrate with openssl/python that -Robot uses in Dublin and upper versions. - -.. note:: - To generate SO ``openStackEncryptedPasswordHere`` and ``openStackSoEncryptedPassword`` - ensure `default-jdk` is installed:: - - apt-get update; apt-get install default-jdk - - Then execute:: - - SO_ENCRYPTION_KEY=`cat ~/oom/kubernetes/so/resources/config/mso/encryption.key` - OS_PASSWORD=XXXX_OS_CLEARTESTPASSWORD_XXXX - - git clone http://gerrit.onap.org/r/integration - cd integration/deployment/heat/onap-rke/scripts - - javac Crypto.java - java Crypto "$OS_PASSWORD" "$SO_ENCRYPTION_KEY" - -d. Update the OpenStack parameters: - -There are assumptions in the demonstration VNF Heat templates about the -networking available in the environment. To get the most value out of these -templates and the automation that can help confirm the setup is correct, please -observe the following constraints. - - -``openStackPublicNetId:`` - This network should allow Heat templates to add interfaces. - This need not be an external network, floating IPs can be assigned to the - ports on the VMs that are created by the heat template but its important that - neutron allow ports to be created on them. - -``openStackPrivateNetCidr: "10.0.0.0/16"`` - This ip address block is used to assign OA&M addresses on VNFs to allow ONAP - connectivity. The demonstration Heat templates assume that 10.0 prefix can be - used by the VNFs and the demonstration ip addressing plan embodied in the - preload template prevent conflicts when instantiating the various VNFs. If - you need to change this, you will need to modify the preload data in the - Robot Helm chart like integration_preload_parameters.py and the - demo/heat/preload_data in the Robot container. The size of the CIDR should - be sufficient for ONAP and the VMs you expect to create. - -``openStackOamNetworkCidrPrefix: "10.0"`` - This ip prefix mush match the openStackPrivateNetCidr and is a helper - variable to some of the Robot scripts for demonstration. A production - deployment need not worry about this setting but for the demonstration VNFs - the ip asssignment strategy assumes 10.0 ip prefix. - -Example Keystone v2.0 - -.. literalinclude:: yaml/example-integration-override.yaml - :language: yaml - -Example Keystone v3 (required for Rocky and later releases) - -.. literalinclude:: yaml/example-integration-override-v3.yaml - :language: yaml - - -**Step 6.** To setup a local Helm server to server up the ONAP charts:: - - > chartmuseum --storage local --storage-local-rootdir ~/helm3-storage -port 8879 & - -Note the port number that is listed and use it in the Helm repo add as -follows:: - - > helm repo add local http://127.0.0.1:8879 - -**Step 7.** Verify your Helm repository setup with:: - - > helm repo list - NAME URL - local http://127.0.0.1:8879 - -**Step 8.** Build a local Helm repository (from the kubernetes directory):: - - > make SKIP_LINT=TRUE [HELM_BIN=] all ; make SKIP_LINT=TRUE [HELM_BIN=] onap - -`HELM_BIN` - Sets the helm binary to be used. The default value use helm from PATH - - -**Step 9.** Display the onap charts that available to be deployed:: - - > helm repo update - > helm search repo onap - -.. literalinclude:: helm/helm-search.txt - -.. note:: - The setup of the Helm repository is a one time activity. If you make changes - to your deployment charts or values be sure to use ``make`` to update your - local Helm repository. - -**Step 10.** Once the repo is setup, installation of ONAP can be done with a -single command - -.. note:: - The ``--timeout 900s`` is currently required in Dublin and later - versions up to address long running initialization tasks for DMaaP - and SO. Without this timeout value both applications may fail to - deploy. - -.. danger:: - We've added the master password on the command line. - You shouldn't put it in a file for safety reason - please don't forget to change the value to something random - - A space is also added in front of the command so "history" doesn't catch it. - This masterPassword is very sensitive, please be careful! - - -To deploy all ONAP applications use this command:: - - > cd oom/kubernetes - > helm deploy dev local/onap --namespace onap --create-namespace --set global.masterPassword=myAwesomePasswordThatINeedToChange -f onap/resources/overrides/onap-all.yaml -f onap/resources/overrides/environment.yaml -f onap/resources/overrides/openstack.yaml --timeout 900s - -All override files may be customized (or replaced by other overrides) as per -needs. - -`onap-all.yaml` - Enables the modules in the ONAP deployment. As ONAP is very modular, it is - possible to customize ONAP and disable some components through this - configuration file. - -`onap-all-ingress-nginx-vhost.yaml` - Alternative version of the `onap-all.yaml` but with global ingress controller - enabled. It requires the cluster configured with the nginx ingress controller - and load balancer. Please use this file instead `onap-all.yaml` if you want - to use experimental ingress controller feature. - -`environment.yaml` - Includes configuration values specific to the deployment environment. - - Example: adapt readiness and liveness timers to the level of performance of - your infrastructure - -`openstack.yaml` - Includes all the OpenStack related information for the default target tenant - you want to use to deploy VNFs from ONAP and/or additional parameters for the - embedded tests. - -**Step 11.** Verify ONAP installation - -Use the following to monitor your deployment and determine when ONAP is ready -for use:: - - > kubectl get pods -n onap -o=wide - -.. note:: - While all pods may be in a Running state, it is not a guarantee that all - components are running fine. - - Launch the healthcheck tests using Robot to verify that the components are - healthy:: - - > ~/oom/kubernetes/robot/ete-k8s.sh onap health - -**Step 12.** Undeploy ONAP -:: - - > helm undeploy dev - -More examples of using the deploy and undeploy plugins can be found here: -https://wiki.onap.org/display/DW/OOM+Helm+%28un%29Deploy+plugins diff --git a/docs/oom_setup_ingress_controller.rst b/docs/oom_setup_ingress_controller.rst deleted file mode 100644 index fafe0db987..0000000000 --- a/docs/oom_setup_ingress_controller.rst +++ /dev/null @@ -1,181 +0,0 @@ -.. This work is licensed under a Creative Commons Attribution 4.0 -.. International License. -.. http://creativecommons.org/licenses/by/4.0 -.. Copyright 2020, Samsung Electronics - -.. Links -.. _HELM Best Practices Guide: https://docs.helm.sh/chart_best_practices/#requirements -.. _kubectl Cheat Sheet: https://kubernetes.io/docs/reference/kubectl/cheatsheet/ -.. _Kubernetes documentation for emptyDir: https://kubernetes.io/docs/concepts/storage/volumes/#emptydir -.. _metallb Metal Load Balancer installation: https://metallb.universe.tf/installation/ -.. _http://cd.onap.info:30223/mso/logging/debug: http://cd.onap.info:30223/mso/logging/debug -.. _Onboarding and Distributing a Vendor Software Product: https://wiki.onap.org/pages/viewpage.action?pageId=1018474 -.. _README.md: https://gerrit.onap.org/r/gitweb?p=oom.git;a=blob;f=kubernetes/README.md - -.. figure:: images/oom_logo/oomLogoV2-medium.png - :align: right - -.. _oom_setup_ingress_controller: - - -Ingress controller setup on HA Kubernetes Cluster -################################################# - -This guide provides instruction how to setup experimental ingress controller -feature. For this, we are hosting our cluster on OpenStack VMs and using the -Rancher Kubernetes Engine (RKE) to deploy and manage our Kubernetes Cluster and -ingress controller - -.. contents:: - :depth: 1 - :local: -.. - -The result at the end of this tutorial will be: - -#. Customization of the cluster.yaml file for ingress controller support - -#. Installation and configuration test DNS server for ingress host resolution - on testing machines - -#. Installation and configuration MLB (Metal Load Balancer) required for - exposing ingress service - -#. Installation and configuration NGINX ingress controller - -#. Additional info how to deploy ONAP with services exposed via Ingress - controller - -Customize cluster.yml file -========================== -Before setup cluster for ingress purposes DNS cluster IP and ingress provider -should be configured and following: - -.. code-block:: yaml - - --- - <...> - restore: - restore: false - snapshot_name: "" - ingress: - provider: none - dns: - provider: coredns - upstreamnameservers: - - :31555 - -Where the should be set to the same IP as the CONTROLPANE -node. - -For external load balancer purposes, minimum one of the worker node should be -configured with external IP address accessible outside the cluster. It can be -done using the following example node configuration: - -.. code-block:: yaml - - --- - <...> - - address: - internal_address: - port: "22" - role: - - worker - hostname_override: "onap-worker-0" - user: ubuntu - ssh_key_path: "~/.ssh/id_rsa" - <...> - -Where the is external worker node IP address, and -is internal node IP address if it is required. - - -DNS server configuration and installation -========================================= -DNS server deployed on the Kubernetes cluster makes it easy to use services -exposed through ingress controller because it resolves all subdomain related to -the ONAP cluster to the load balancer IP. Testing ONAP cluster requires a lot -of entries on the target machines in the /etc/hosts. Adding many entries into -the configuration files on testing machines is quite problematic and error -prone. The better wait is to create central DNS server with entries for all -virtual host pointed to simpledemo.onap.org and add custom DNS server as a -target DNS server for testing machines and/or as external DNS for Kubernetes -cluster. - -DNS server has automatic installation and configuration script, so installation -is quite easy:: - - > cd kubernetes/contrib/dns-server-for-vhost-ingress-testing - - > ./deploy\_dns.sh - -After DNS deploy you need to setup DNS entry on the target testing machine. -Because DNS listen on non standard port configuration require iptables rules -on the target machine. Please follow the configuration proposed by the deploy -scripts. -Example output depends on the IP address and example output looks like bellow:: - - DNS server already deployed: - 1. You can add the DNS server to the target machine using following commands: - sudo iptables -t nat -A OUTPUT -p tcp -d 192.168.211.211 --dport 53 -j DNAT --to-destination 10.10.13.14:31555 - sudo iptables -t nat -A OUTPUT -p udp -d 192.168.211.211 --dport 53 -j DNAT --to-destination 10.10.13.14:31555 - sudo sysctl -w net.ipv4.conf.all.route_localnet=1 - sudo sysctl -w net.ipv4.ip_forward=1 - 2. Update /etc/resolv.conf file with nameserver 192.168.211.211 entry on your target machine - - -MetalLB Load Balancer installation and configuration -==================================================== - -By default pure Kubernetes cluster requires external load balancer if we want -to expose external port using LoadBalancer settings. For this purpose MetalLB -can be used. Before installing the MetalLB you need to ensure that at least one -worker has assigned IP accessible outside the cluster. - -MetalLB Load balancer can be easily installed using automatic install script:: - - > cd kubernetes/contrib/metallb-loadbalancer-inst - - > ./install-metallb-on-cluster.sh - - -Configuration Nginx ingress controller -====================================== - -After installation DNS server and ingress controller we can install and -configure ingress controller. -It can be done using the following commands:: - - > cd kubernetes/contrib/ingress-nginx-post-inst - - > kubectl apply -f nginx_ingress_cluster_config.yaml - - > kubectl apply -f nginx_ingress_enable_optional_load_balacer_service.yaml - -After deploy NGINX ingress controller you can ensure that the ingress port is -exposed as load balancer service with external IP address:: - - > kubectl get svc -n ingress-nginx - NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE - default-http-backend ClusterIP 10.10.10.10 80/TCP 25h - ingress-nginx LoadBalancer 10.10.10.11 10.12.13.14 80:31308/TCP,443:30314/TCP 24h - - -ONAP with ingress exposed services -================================== -If you want to deploy onap with services exposed through ingress controller you -can use full onap deploy script:: - - > onap/resources/overrides/onap-all-ingress-nginx-vhost.yaml - -Ingress also can be enabled on any onap setup override using following code: - -.. code-block:: yaml - - --- - <...> - global: - <...> - ingress: - enabled: true - diff --git a/docs/oom_setup_kubernetes_rancher.rst b/docs/oom_setup_kubernetes_rancher.rst deleted file mode 100644 index 767b93925e..0000000000 --- a/docs/oom_setup_kubernetes_rancher.rst +++ /dev/null @@ -1,531 +0,0 @@ -.. This work is licensed under a Creative Commons Attribution 4.0 -.. International License. -.. http://creativecommons.org/licenses/by/4.0 -.. Copyright 2018-2020 Amdocs, Bell Canada, Orange, Samsung - -.. Links -.. _HELM Best Practices Guide: https://docs.helm.sh/chart_best_practices/#requirements -.. _kubectl Cheat Sheet: https://kubernetes.io/docs/reference/kubectl/cheatsheet/ -.. _Kubernetes documentation for emptyDir: https://kubernetes.io/docs/concepts/storage/volumes/#emptydir -.. _Docker DevOps: https://wiki.onap.org/display/DW/Docker+DevOps#DockerDevOps-DockerBuild -.. _http://cd.onap.info:30223/mso/logging/debug: http://cd.onap.info:30223/mso/logging/debug -.. _Onboarding and Distributing a Vendor Software Product: https://wiki.onap.org/pages/viewpage.action?pageId=1018474 -.. _README.md: https://gerrit.onap.org/r/gitweb?p=oom.git;a=blob;f=kubernetes/README.md - -.. figure:: images/oom_logo/oomLogoV2-medium.png - :align: right - -.. _onap-on-kubernetes-with-rancher: - -ONAP on HA Kubernetes Cluster -############################# - -This guide provides instructions on how to setup a Highly-Available Kubernetes -Cluster. For this, we are hosting our cluster on OpenStack VMs and using the -Rancher Kubernetes Engine (RKE) to deploy and manage our Kubernetes Cluster. - -.. contents:: - :depth: 1 - :local: -.. - -The result at the end of this tutorial will be: - -#. Creation of a Key Pair to use with Open Stack and RKE - -#. Creation of OpenStack VMs to host Kubernetes Control Plane - -#. Creation of OpenStack VMs to host Kubernetes Workers - -#. Installation and configuration of RKE to setup an HA Kubernetes - -#. Installation and configuration of kubectl - -#. Installation and configuration of Helm - -#. Creation of an NFS Server to be used by ONAP as shared persistance - -There are many ways one can execute the above steps. Including automation -through the use of HEAT to setup the OpenStack VMs. To better illustrate the -steps involved, we have captured the manual creation of such an environment -using the ONAP Wind River Open Lab. - -Create Key Pair -=============== -A Key Pair is required to access the created OpenStack VMs and will be used by -RKE to configure the VMs for Kubernetes. - -Use an existing key pair, import one or create a new one to assign. - -.. image:: images/keys/key_pair_1.png - -.. Note:: - If you're creating a new Key Pair, ensure to create a local copy of the - Private Key through the use of "Copy Private Key to Clipboard". - -For the purpose of this guide, we will assume a new local key called "onap-key" -has been downloaded and is copied into **~/.ssh/**, from which it can be -referenced. - -Example:: - - > mv onap-key ~/.ssh - - > chmod 600 ~/.ssh/onap-key - - -Create Network -============== - -An internal network is required in order to deploy our VMs that will host -Kubernetes. - -.. image:: images/network/network_1.png - -.. image:: images/network/network_2.png - -.. image:: images/network/network_3.png - -.. Note:: - It's better to have one network per deployment and obviously the name of this - network should be unique. - -Now we need to create a router to attach this network to outside: - -.. image:: images/network/network_4.png - -Create Security Group -===================== - -A specific security group is also required - -.. image:: images/sg/sg_1.png - -then click on `manage rules` of the newly created security group. -And finally click on `Add Rule` and create the following one: - -.. image:: images/sg/sg_2.png - -.. Note:: - the security is clearly not good here and the right SG will be proposed in a - future version - -Create Kubernetes Control Plane VMs -=================================== - -The following instructions describe how to create 3 OpenStack VMs to host the -Highly-Available Kubernetes Control Plane. -ONAP workloads will not be scheduled on these Control Plane nodes. - -Launch new VM instances ------------------------ - -.. image:: images/cp_vms/control_plane_1.png - -Select Ubuntu 18.04 as base image ---------------------------------- -Select "No" for "Create New Volume" - -.. image:: images/cp_vms/control_plane_2.png - -Select Flavor -------------- -The recommended flavor is at least 4 vCPU and 8GB ram. - -.. image:: images/cp_vms/control_plane_3.png - -Networking ----------- - -Use the created network: - -.. image:: images/cp_vms/control_plane_4.png - -Security Groups ---------------- - -Use the created security group: - -.. image:: images/cp_vms/control_plane_5.png - -Key Pair --------- -Assign the key pair that was created/selected previously (e.g. onap_key). - -.. image:: images/cp_vms/control_plane_6.png - -Apply customization script for Control Plane VMs ------------------------------------------------- - -Click :download:`openstack-k8s-controlnode.sh ` -to download the script. - -.. literalinclude:: shell/openstack-k8s-controlnode.sh - :language: bash - -This customization script will: - -* update ubuntu -* install docker - -.. image:: images/cp_vms/control_plane_7.png - -Launch Instance ---------------- - -.. image:: images/cp_vms/control_plane_8.png - - - -Create Kubernetes Worker VMs -============================ -The following instructions describe how to create OpenStack VMs to host the -Highly-Available Kubernetes Workers. ONAP workloads will only be scheduled on -these nodes. - -Launch new VM instances ------------------------ - -The number and size of Worker VMs is dependent on the size of the ONAP -deployment. By default, all ONAP applications are deployed. It's possible to -customize the deployment and enable a subset of the ONAP applications. For the -purpose of this guide, however, we will deploy 12 Kubernetes Workers that have -been sized to handle the entire ONAP application workload. - -.. image:: images/wk_vms/worker_1.png - -Select Ubuntu 18.04 as base image ---------------------------------- -Select "No" on "Create New Volume" - -.. image:: images/wk_vms/worker_2.png - -Select Flavor -------------- -The size of Kubernetes hosts depend on the size of the ONAP deployment -being installed. - -If a small subset of ONAP applications are being deployed -(i.e. for testing purposes), then 16GB or 32GB may be sufficient. - -.. image:: images/wk_vms/worker_3.png - -Networking ------------ - -.. image:: images/wk_vms/worker_4.png - -Security Group ---------------- - -.. image:: images/wk_vms/worker_5.png - -Key Pair --------- -Assign the key pair that was created/selected previously (e.g. onap_key). - -.. image:: images/wk_vms/worker_6.png - -Apply customization script for Kubernetes VM(s) ------------------------------------------------ - -Click :download:`openstack-k8s-workernode.sh ` to -download the script. - -.. literalinclude:: shell/openstack-k8s-workernode.sh - :language: bash - -This customization script will: - -* update ubuntu -* install docker -* install nfs common - - -Launch Instance ---------------- - -.. image:: images/wk_vms/worker_7.png - - - - -Assign Floating IP addresses ----------------------------- -Assign Floating IPs to all Control Plane and Worker VMs. -These addresses provide external access to the VMs and will be used by RKE -to configure kubernetes on to the VMs. - -Repeat the following for each VM previously created: - -.. image:: images/floating_ips/floating_1.png - -Resulting floating IP assignments in this example. - -.. image:: images/floating_ips/floating_2.png - - - - -Configure Rancher Kubernetes Engine (RKE) -========================================= - -Install RKE ------------ -Download and install RKE on a VM, desktop or laptop. -Binaries can be found here for Linux and Mac: https://github.com/rancher/rke/releases/tag/v1.0.6 - -.. note:: - There are several ways to install RKE. Further parts of this documentation - assumes that you have rke command available. - If you don't know how to install RKE you may follow the below steps: - - * chmod +x ./rke_linux-amd64 - * sudo mv ./rke_linux-amd64 /user/local/bin/rke - -RKE requires a *cluster.yml* as input. An example file is show below that -describes a Kubernetes cluster that will be mapped onto the OpenStack VMs -created earlier in this guide. - -Click :download:`cluster.yml ` to download the -configuration file. - -.. literalinclude:: yaml/cluster.yml - :language: yaml - -Prepare cluster.yml -------------------- -Before this configuration file can be used the external **address** -and the **internal_address** must be mapped for each control and worker node -in this file. - -Run RKE -------- -From within the same directory as the cluster.yml file, simply execute:: - - > rke up - -The output will look something like:: - - INFO[0000] Initiating Kubernetes cluster - INFO[0000] [certificates] Generating admin certificates and kubeconfig - INFO[0000] Successfully Deployed state file at [./cluster.rkestate] - INFO[0000] Building Kubernetes cluster - INFO[0000] [dialer] Setup tunnel for host [10.12.6.82] - INFO[0000] [dialer] Setup tunnel for host [10.12.6.249] - INFO[0000] [dialer] Setup tunnel for host [10.12.6.74] - INFO[0000] [dialer] Setup tunnel for host [10.12.6.85] - INFO[0000] [dialer] Setup tunnel for host [10.12.6.238] - INFO[0000] [dialer] Setup tunnel for host [10.12.6.89] - INFO[0000] [dialer] Setup tunnel for host [10.12.5.11] - INFO[0000] [dialer] Setup tunnel for host [10.12.6.90] - INFO[0000] [dialer] Setup tunnel for host [10.12.6.244] - INFO[0000] [dialer] Setup tunnel for host [10.12.5.165] - INFO[0000] [dialer] Setup tunnel for host [10.12.6.126] - INFO[0000] [dialer] Setup tunnel for host [10.12.6.111] - INFO[0000] [dialer] Setup tunnel for host [10.12.5.160] - INFO[0000] [dialer] Setup tunnel for host [10.12.5.191] - INFO[0000] [dialer] Setup tunnel for host [10.12.6.195] - INFO[0002] [network] Deploying port listener containers - INFO[0002] [network] Pulling image [nexus3.onap.org:10001/rancher/rke-tools:v0.1.27] on host [10.12.6.85] - INFO[0002] [network] Pulling image [nexus3.onap.org:10001/rancher/rke-tools:v0.1.27] on host [10.12.6.89] - INFO[0002] [network] Pulling image [nexus3.onap.org:10001/rancher/rke-tools:v0.1.27] on host [10.12.6.90] - INFO[0011] [network] Successfully pulled image [nexus3.onap.org:10001/rancher/rke-tools:v0.1.27] on host [10.12.6.89] - . . . . - INFO[0309] [addons] Setting up Metrics Server - INFO[0309] [addons] Saving ConfigMap for addon rke-metrics-addon to Kubernetes - INFO[0309] [addons] Successfully saved ConfigMap for addon rke-metrics-addon to Kubernetes - INFO[0309] [addons] Executing deploy job rke-metrics-addon - INFO[0315] [addons] Metrics Server deployed successfully - INFO[0315] [ingress] Setting up nginx ingress controller - INFO[0315] [addons] Saving ConfigMap for addon rke-ingress-controller to Kubernetes - INFO[0316] [addons] Successfully saved ConfigMap for addon rke-ingress-controller to Kubernetes - INFO[0316] [addons] Executing deploy job rke-ingress-controller - INFO[0322] [ingress] ingress controller nginx deployed successfully - INFO[0322] [addons] Setting up user addons - INFO[0322] [addons] no user addons defined - INFO[0322] Finished building Kubernetes cluster successfully - -Install Kubectl -=============== - -Download and install kubectl. Binaries can be found here for Linux and Mac: - -https://storage.googleapis.com/kubernetes-release/release/v1.15.11/bin/linux/amd64/kubectl -https://storage.googleapis.com/kubernetes-release/release/v1.15.11/bin/darwin/amd64/kubectl - -You only need to install kubectl where you'll launch Kubernetes command. This -can be any machines of the Kubernetes cluster or a machine that has IP access -to the APIs. -Usually, we use the first controller as it has also access to internal -Kubernetes services, which can be convenient. - -Validate deployment -------------------- - -:: - - > mkdir -p ~/.kube - - > cp kube_config_cluster.yml ~/.kube/config.onap - - > export KUBECONFIG=~/.kube/config.onap - - > kubectl config use-context onap - - > kubectl get nodes -o=wide - -:: - - NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME - onap-control-1 Ready controlplane,etcd 3h53m v1.15.2 10.0.0.8 Ubuntu 18.04 LTS 4.15.0-22-generic docker://18.9.5 - onap-control-2 Ready controlplane,etcd 3h53m v1.15.2 10.0.0.11 Ubuntu 18.04 LTS 4.15.0-22-generic docker://18.9.5 - onap-control-3 Ready controlplane,etcd 3h53m v1.15.2 10.0.0.12 Ubuntu 18.04 LTS 4.15.0-22-generic docker://18.9.5 - onap-k8s-1 Ready worker 3h53m v1.15.2 10.0.0.14 Ubuntu 18.04 LTS 4.15.0-22-generic docker://18.9.5 - onap-k8s-10 Ready worker 3h53m v1.15.2 10.0.0.16 Ubuntu 18.04 LTS 4.15.0-22-generic docker://18.9.5 - onap-k8s-11 Ready worker 3h53m v1.15.2 10.0.0.18 Ubuntu 18.04 LTS 4.15.0-22-generic docker://18.9.5 - onap-k8s-12 Ready worker 3h53m v1.15.2 10.0.0.7 Ubuntu 18.04 LTS 4.15.0-22-generic docker://18.9.5 - onap-k8s-2 Ready worker 3h53m v1.15.2 10.0.0.26 Ubuntu 18.04 LTS 4.15.0-22-generic docker://18.9.5 - onap-k8s-3 Ready worker 3h53m v1.15.2 10.0.0.5 Ubuntu 18.04 LTS 4.15.0-22-generic docker://18.9.5 - onap-k8s-4 Ready worker 3h53m v1.15.2 10.0.0.6 Ubuntu 18.04 LTS 4.15.0-22-generic docker://18.9.5 - onap-k8s-5 Ready worker 3h53m v1.15.2 10.0.0.9 Ubuntu 18.04 LTS 4.15.0-22-generic docker://18.9.5 - onap-k8s-6 Ready worker 3h53m v1.15.2 10.0.0.17 Ubuntu 18.04 LTS 4.15.0-22-generic docker://18.9.5 - onap-k8s-7 Ready worker 3h53m v1.15.2 10.0.0.20 Ubuntu 18.04 LTS 4.15.0-22-generic docker://18.9.5 - onap-k8s-8 Ready worker 3h53m v1.15.2 10.0.0.10 Ubuntu 18.04 LTS 4.15.0-22-generic docker://18.9.5 - onap-k8s-9 Ready worker 3h53m v1.15.2 10.0.0.4 Ubuntu 18.04 LTS 4.15.0-22-generic docker://18.9.5 - - -Install Helm -============ - -Example Helm client install on Linux:: - - > wget https://get.helm.sh/helm-v2.16.6-linux-amd64.tar.gz - - > tar -zxvf helm-v2.16.6-linux-amd64.tar.gz - - > sudo mv linux-amd64/helm /usr/local/bin/helm - -Initialize Kubernetes Cluster for use by Helm ---------------------------------------------- - -:: - - > kubectl -n kube-system create serviceaccount tiller - - > kubectl create clusterrolebinding tiller --clusterrole=cluster-admin --serviceaccount=kube-system:tiller - - > helm init --service-account tiller - - > kubectl -n kube-system  rollout status deploy/tiller-deploy - - - -Setting up an NFS share for Multinode Kubernetes Clusters -========================================================= -Deploying applications to a Kubernetes cluster requires Kubernetes nodes to -share a common, distributed filesystem. In this tutorial, we will setup an -NFS Master, and configure all Worker nodes a Kubernetes cluster to play -the role of NFS slaves. - -It is recommended that a separate VM, outside of the kubernetes -cluster, be used. This is to ensure that the NFS Master does not compete for -resources with Kubernetes Control Plane or Worker Nodes. - - -Launch new NFS Server VM instance ---------------------------------- -.. image:: images/nfs_server/nfs_server_1.png - -Select Ubuntu 18.04 as base image ---------------------------------- -Select "No" on "Create New Volume" - -.. image:: images/nfs_server/nfs_server_2.png - -Select Flavor -------------- - -.. image:: images/nfs_server/nfs_server_3.png - -Networking ------------ - -.. image:: images/nfs_server/nfs_server_4.png - -Security Group ---------------- - -.. image:: images/nfs_server/nfs_server_5.png - -Key Pair --------- -Assign the key pair that was created/selected previously (e.g. onap_key). - -.. image:: images/nfs_server/nfs_server_6.png - -Apply customization script for NFS Server VM --------------------------------------------- - -Click :download:`openstack-nfs-server.sh ` to download -the script. - -.. literalinclude:: shell/openstack-nfs-server.sh - :language: bash - -This customization script will: - -* update ubuntu -* install nfs server - - -Launch Instance ---------------- - -.. image:: images/nfs_server/nfs_server_7.png - - - -Assign Floating IP addresses ----------------------------- - -.. image:: images/nfs_server/nfs_server_8.png - -Resulting floating IP assignments in this example. - -.. image:: images/nfs_server/nfs_server_9.png - - -To properly set up an NFS share on Master and Slave nodes, the user can run the -scripts below. - -Click :download:`master_nfs_node.sh ` to download the -script. - -.. literalinclude:: shell/master_nfs_node.sh - :language: bash - -Click :download:`slave_nfs_node.sh ` to download the script. - -.. literalinclude:: shell/slave_nfs_node.sh - :language: bash - -The master_nfs_node.sh script runs in the NFS Master node and needs the list of -NFS Slave nodes as input, e.g.:: - - > sudo ./master_nfs_node.sh node1_ip node2_ip ... nodeN_ip - -The slave_nfs_node.sh script runs in each NFS Slave node and needs the IP of -the NFS Master node as input, e.g.:: - - > sudo ./slave_nfs_node.sh master_node_ip - - -ONAP Deployment via OOM -======================= -Now that Kubernetes and Helm are installed and configured you can prepare to -deploy ONAP. Follow the instructions in the README.md_ or look at the official -documentation to get started: - -- :ref:`quick-start-label` - deploy ONAP on an existing cloud -- :ref:`user-guide-label` - a guide for operators of an ONAP instance diff --git a/docs/oom_setup_paas.rst b/docs/oom_setup_paas.rst deleted file mode 100644 index 2dabcb1aea..0000000000 --- a/docs/oom_setup_paas.rst +++ /dev/null @@ -1,144 +0,0 @@ -.. This work is licensed under a Creative Commons Attribution 4.0 -.. International License. -.. http://creativecommons.org/licenses/by/4.0 -.. Copyright 2021 Nokia - -.. Links -.. _Cert-Manager Installation documentation: https://cert-manager.io/docs/installation/kubernetes/ -.. _Cert-Manager kubectl plugin documentation: https://cert-manager.io/docs/usage/kubectl-plugin/ -.. _Strimzi Apache Kafka Operator helm Installation documentation: https://strimzi.io/docs/operators/in-development/deploying.html#deploying-cluster-operator-helm-chart-str - -.. _oom_setup_paas: - -ONAP PaaS set-up -################ - -Starting from Honolulu release, Cert-Manager and Prometheus Stack are a part -of k8s PaaS for ONAP operations and can be installed to provide -additional functionality for ONAP engineers. -Starting from Jakarta release, Strimzi Apache Kafka is deployed to provide -Apache kafka as the default messaging bus for ONAP. - -The versions of PaaS components that are supported by OOM are as follows: - -.. table:: ONAP PaaS components - - ============== ============= ================= ======= - Release Cert-Manager Prometheus Stack Strimzi - ============== ============= ================= ======= - honolulu 1.2.0 13.x - istanbul 1.5.4 19.x - jakarta 0.28.0 - ============== ============= ================= ======= - -This guide provides instructions on how to install the PaaS -components for ONAP. - -.. contents:: - :depth: 1 - :local: -.. - -Strimzi Apache Kafka Operator -============================= - -Strimzi provides a way to run an Apache Kafka cluster on Kubernetes -in various deployment configurations by using kubernetes operators. -Operators are a method of packaging, deploying, and managing a -Kubernetes application. -Strimzi Operators extend Kubernetes functionality, automating common -and complex tasks related to a Kafka deployment. By implementing -knowledge of Kafka operations in code, Kafka administration -tasks are simplified and require less manual intervention. - -Installation steps ------------------- - -The recommended version of Strimzi for Kubernetes 1.19 is v0.28.0. -The Strimzi cluster operator is deployed using helm to install the parent chart -containing all of the required custom resource definitions. This should be done -by a kubernetes administrator to allow for deployment of custom resources in to -any kubernetes namespace within the cluster. - -Full installation instructions can be found in the -`Strimzi Apache Kafka Operator helm Installation documentation`_. - -Installation can be as simple as: - -- Add the helm repo:: - - > helm repo add strimzi https://strimzi.io/charts/ - -- Install the operator:: - - > helm install strimzi-kafka-operator strimzi/strimzi-kafka-operator --namespace strimzi-system --version 0.28.0 --set watchAnyNamespace=true --create-namespace - -Cert-Manager -============ - -Cert-Manager is a native Kubernetes certificate management controller. -It can help with issuing certificates from a variety of sources, such as -Let’s Encrypt, HashiCorp Vault, Venafi, a simple signing key pair, self -signed or external issuers. It ensures certificates are valid and up to -date, and attempt to renew certificates at a configured time before expiry. - -Installation steps ------------------- - -The recommended version of Cert-Manager for Kubernetes 1.19 is v1.5.4. -Cert-Manager is deployed using regular YAML manifests which include all -the needed resources (the CustomResourceDefinitions, cert-manager, -namespace, and the webhook component). - -Full installation instructions, including details on how to configure extra -functionality in Cert-Manager can be found in the -`Cert-Manager Installation documentation`_. - -There is also a kubectl plugin (kubectl cert-manager) that can help you -to manage cert-manager resources inside your cluster. For installation -steps, please refer to `Cert-Manager kubectl plugin documentation`_. - -Installation can be as simple as:: - - > kubectl apply -f https://github.com/jetstack/cert-manager/releases/download/v1.5.4/cert-manager.yaml - -Prometheus Stack (optional) -=========================== - -Prometheus is an open-source systems monitoring and alerting toolkit with -an active ecosystem. - -Kube Prometheus Stack is a collection of Kubernetes manifests, Grafana -dashboards, and Prometheus rules combined with documentation and scripts to -provide easy to operate end-to-end Kubernetes cluster monitoring with -Prometheus using the Prometheus Operator. As it includes both Prometheus -Operator and Grafana dashboards, there is no need to set up them separately. - -Installation steps ------------------- - -The recommended version of kube-prometheus-stack chart for -Kubernetes 1.19 is 19.x (which is currently the latest major chart version), -for example 19.0.2. - -In order to install Prometheus Stack, you must follow these steps: - -- Create the namespace for Prometheus Stack:: - - > kubectl create namespace prometheus - -- Add the prometheus-community Helm repository:: - - > helm repo add prometheus-community https://prometheus-community.github.io/helm-charts - -- Update your local Helm chart repository cache:: - - > helm repo update - -- To install the kube-prometheus-stack Helm chart in latest version:: - - > helm install prometheus prometheus-community/kube-prometheus-stack --namespace=prometheus - - To install the kube-prometheus-stack Helm chart in specific version, for example 19.0.2:: - - > helm install prometheus prometheus-community/kube-prometheus-stack --namespace=prometheus --version=19.0.2 diff --git a/docs/oom_user_guide.rst b/docs/oom_user_guide.rst deleted file mode 100644 index 2ff74b5898..0000000000 --- a/docs/oom_user_guide.rst +++ /dev/null @@ -1,798 +0,0 @@ -.. This work is licensed under a Creative Commons Attribution 4.0 -.. International License. -.. http://creativecommons.org/licenses/by/4.0 -.. Copyright 2018-2021 Amdocs, Bell Canada, Orange, Samsung, Nordix Foundation -.. _oom_user_guide: - -.. Links -.. _Curated applications for Kubernetes: https://github.com/kubernetes/charts -.. _Services: https://kubernetes.io/docs/concepts/services-networking/service/ -.. _ReplicaSet: https://kubernetes.io/docs/concepts/workloads/controllers/replicaset/ -.. _StatefulSet: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/ -.. _Helm Documentation: https://docs.helm.sh/helm/ -.. _Helm: https://docs.helm.sh/ -.. _Kubernetes: https://Kubernetes.io/ -.. _Kubernetes LoadBalancer: https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer -.. _user-guide-label: - -OOM User Guide -############## - -The ONAP Operations Manager (OOM) provide the ability to manage the entire -life-cycle of an ONAP installation, from the initial deployment to final -decommissioning. This guide provides instructions for users of ONAP to -use the Kubernetes_/Helm_ system as a complete ONAP management system. - -This guide provides many examples of Helm command line operations. For a -complete description of these commands please refer to the `Helm -Documentation`_. - -.. figure:: images/oom_logo/oomLogoV2-medium.png - :align: right - -The following sections describe the life-cycle operations: - -- Deploy_ - with built-in component dependency management -- Configure_ - unified configuration across all ONAP components -- Monitor_ - real-time health monitoring feeding to a Consul UI and Kubernetes -- Heal_- failed ONAP containers are recreated automatically -- Scale_ - cluster ONAP services to enable seamless scaling -- Upgrade_ - change-out containers or configuration with little or no service - impact -- Delete_ - cleanup individual containers or entire deployments - -.. figure:: images/oom_logo/oomLogoV2-Deploy.png - :align: right - -Deploy -====== - -The OOM team with assistance from the ONAP project teams, have built a -comprehensive set of Helm charts, yaml files very similar to TOSCA files, that -describe the composition of each of the ONAP components and the relationship -within and between components. Using this model Helm is able to deploy all of -ONAP with a few simple commands. - -Pre-requisites --------------- -Your environment must have the Kubernetes `kubectl` with Strimzi Apache Kafka, Cert-Manager -and Helm setup as a one time activity. - -Install Kubectl -~~~~~~~~~~~~~~~ -Enter the following to install kubectl (on Ubuntu, there are slight differences -on other O/Ss), the Kubernetes command line interface used to manage a -Kubernetes cluster:: - - > curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.19.11/bin/linux/amd64/kubectl - > chmod +x ./kubectl - > sudo mv ./kubectl /usr/local/bin/kubectl - > mkdir ~/.kube - -Paste kubectl config from Rancher (see the :ref:`cloud-setup-guide-label` for -alternative Kubernetes environment setups) into the `~/.kube/config` file. - -Verify that the Kubernetes config is correct:: - - > kubectl get pods --all-namespaces - -At this point you should see Kubernetes pods running. - -Install Helm -~~~~~~~~~~~~ -Helm is used by OOM for package and configuration management. To install Helm, -enter the following:: - - > wget https://get.helm.sh/helm-v3.6.3-linux-amd64.tar.gz - > tar -zxvf helm-v3.6.3-linux-amd64.tar.gz - > sudo mv linux-amd64/helm /usr/local/bin/helm - -Verify the Helm version with:: - - > helm version - -Install Strimzi Apache Kafka Operator -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -Details on how to install Strimzi Apache Kafka can be found -:doc:`here `. - -Install Cert-Manager -~~~~~~~~~~~~~~~~~~~~ -Details on how to install Cert-Manager can be found -:doc:`here `. - -Install the Helm Repo ---------------------- -Once kubectl and Helm are setup, one needs to setup a local Helm server to -server up the ONAP charts:: - - > helm install osn/onap - -.. note:: - The osn repo is not currently available so creation of a local repository is - required. - -Helm is able to use charts served up from a repository and comes setup with a -default CNCF provided `Curated applications for Kubernetes`_ repository called -stable which should be removed to avoid confusion:: - - > helm repo remove stable - -.. To setup the Open Source Networking Nexus repository for helm enter:: -.. > helm repo add osn 'https://nexus3.onap.org:10001/helm/helm-repo-in-nexus/master/' - -To prepare your system for an installation of ONAP, you'll need to:: - - > git clone -b kohn --recurse-submodules -j2 http://gerrit.onap.org/r/oom - > cd oom/kubernetes - - -To install a local Helm server:: - - > curl -LO https://s3.amazonaws.com/chartmuseum/release/latest/bin/linux/amd64/chartmuseum - > chmod +x ./chartmuseum - > mv ./chartmuseum /usr/local/bin - -To setup a local Helm server to server up the ONAP charts:: - - > mkdir -p ~/helm3-storage - > chartmuseum --storage local --storage-local-rootdir ~/helm3-storage -port 8879 & - -Note the port number that is listed and use it in the Helm repo add as -follows:: - - > helm repo add local http://127.0.0.1:8879 - -To get a list of all of the available Helm chart repositories:: - - > helm repo list - NAME URL - local http://127.0.0.1:8879 - -Then build your local Helm repository:: - - > make SKIP_LINT=TRUE [HELM_BIN=] all - -`HELM_BIN` - Sets the helm binary to be used. The default value use helm from PATH - -The Helm search command reads through all of the repositories configured on the -system, and looks for matches:: - - > helm search repo local - NAME VERSION DESCRIPTION - local/appc 11.0.0 Application Controller - local/clamp 11.0.0 ONAP Clamp - local/common 11.0.0 Common templates for inclusion in other charts - local/onap 11.0.0 Open Network Automation Platform (ONAP) - local/robot 11.0.0 A helm Chart for kubernetes-ONAP Robot - local/so 11.0.0 ONAP Service Orchestrator - -In any case, setup of the Helm repository is a one time activity. - -Next, install Helm Plugins required to deploy the ONAP release:: - - > cp -R ~/oom/kubernetes/helm/plugins/ ~/.local/share/helm/plugins - -Once the repo is setup, installation of ONAP can be done with a single -command:: - - > helm deploy development local/onap --namespace onap --set global.masterPassword=password - -This will install ONAP from a local repository in a 'development' Helm release. -As described below, to override the default configuration values provided by -OOM, an environment file can be provided on the command line as follows:: - - - - > helm deploy development local/onap --namespace onap -f overrides.yaml --set global.masterPassword=password - -.. note:: - Refer the Configure_ section on how to update overrides.yaml and values.yaml - -To get a summary of the status of all of the pods (containers) running in your -deployment:: - - > kubectl get pods --namespace onap -o=wide - -.. note:: - The Kubernetes namespace concept allows for multiple instances of a component - (such as all of ONAP) to co-exist with other components in the same - Kubernetes cluster by isolating them entirely. Namespaces share only the - hosts that form the cluster thus providing isolation between production and - development systems as an example. - -.. note:: - The Helm `--name` option refers to a release name and not a Kubernetes namespace. - - -To install a specific version of a single ONAP component (`so` in this example) -with the given release name enter:: - - > helm deploy so onap/so --version 11.0.0 --set global.masterPassword=password --set global.flavor=unlimited --namespace onap - -.. note:: - The dependent components should be installed for component being installed - - -To display details of a specific resource or group of resources type:: - - > kubectl describe pod so-1071802958-6twbl - -where the pod identifier refers to the auto-generated pod identifier. - -.. figure:: images/oom_logo/oomLogoV2-Configure.png - :align: right - -Configure -========= - -Each project within ONAP has its own configuration data generally consisting -of: environment variables, configuration files, and database initial values. -Many technologies are used across the projects resulting in significant -operational complexity and an inability to apply global parameters across the -entire ONAP deployment. OOM solves this problem by introducing a common -configuration technology, Helm charts, that provide a hierarchical -configuration with the ability to override values with higher -level charts or command line options. - -The structure of the configuration of ONAP is shown in the following diagram. -Note that key/value pairs of a parent will always take precedence over those -of a child. Also note that values set on the command line have the highest -precedence of all. - -.. graphviz:: - - digraph config { - { - node [shape=folder] - oValues [label="values.yaml"] - demo [label="onap-demo.yaml"] - prod [label="onap-production.yaml"] - oReq [label="Chart.yaml"] - soValues [label="values.yaml"] - soReq [label="Chart.yaml"] - mdValues [label="values.yaml"] - } - { - oResources [label="resources"] - } - onap -> oResources - onap -> oValues - oResources -> environments - oResources -> oReq - oReq -> so - environments -> demo - environments -> prod - so -> soValues - so -> soReq - so -> charts - charts -> mariadb - mariadb -> mdValues - - } - -The top level onap/values.yaml file contains the values required to be set -before deploying ONAP. Here is the contents of this file: - -.. include:: ../kubernetes/onap/values.yaml - :code: yaml - -One may wish to create a value file that is specific to a given deployment such -that it can be differentiated from other deployments. For example, a -onap-development.yaml file may create a minimal environment for development -while onap-production.yaml might describe a production deployment that operates -independently of the developer version. - -For example, if the production OpenStack instance was different from a -developer's instance, the onap-production.yaml file may contain a different -value for the vnfDeployment/openstack/oam_network_cidr key as shown below. - -.. code-block:: yaml - - nsPrefix: onap - nodePortPrefix: 302 - apps: consul msb mso message-router sdnc vid robot portal policy appc aai - sdc dcaegen2 log cli multicloud clamp vnfsdk aaf kube2msb - dataRootDir: /dockerdata-nfs - - # docker repositories - repository: - onap: nexus3.onap.org:10001 - oom: oomk8s - aai: aaionap - filebeat: docker.elastic.co - - image: - pullPolicy: Never - - # vnf deployment environment - vnfDeployment: - openstack: - ubuntu_14_image: "Ubuntu_14.04.5_LTS" - public_net_id: "e8f51956-00dd-4425-af36-045716781ffc" - oam_network_id: "d4769dfb-c9e4-4f72-b3d6-1d18f4ac4ee6" - oam_subnet_id: "191f7580-acf6-4c2b-8ec0-ba7d99b3bc4e" - oam_network_cidr: "192.168.30.0/24" - <...> - - -To deploy ONAP with this environment file, enter:: - - > helm deploy local/onap -n onap -f onap/resources/environments/onap-production.yaml --set global.masterPassword=password - -.. include:: yaml/environments_onap_demo.yaml - :code: yaml - -When deploying all of ONAP, the dependencies section of the Chart.yaml file -controls which and what version of the ONAP components are included. -Here is an excerpt of this file: - -.. code-block:: yaml - - dependencies: - <...> - - name: so - version: ~11.0.0 - repository: '@local' - condition: so.enabled - <...> - -The ~ operator in the `so` version value indicates that the latest "10.X.X" -version of `so` shall be used thus allowing the chart to allow for minor -upgrades that don't impact the so API; hence, version 10.0.1 will be installed -in this case. - -The onap/resources/environment/dev.yaml (see the excerpt below) enables -for fine grained control on what components are included as part of this -deployment. By changing this `so` line to `enabled: false` the `so` component -will not be deployed. If this change is part of an upgrade the existing `so` -component will be shut down. Other `so` parameters and even `so` child values -can be modified, for example the `so`'s `liveness` probe could be disabled -(which is not recommended as this change would disable auto-healing of `so`). - -.. code-block:: yaml - - ################################################################# - # Global configuration overrides. - # - # These overrides will affect all helm charts (ie. applications) - # that are listed below and are 'enabled'. - ################################################################# - global: - <...> - - ################################################################# - # Enable/disable and configure helm charts (ie. applications) - # to customize the ONAP deployment. - ################################################################# - aaf: - enabled: false - <...> - so: # Service Orchestrator - enabled: true - - replicaCount: 1 - - liveness: - # necessary to disable liveness probe when setting breakpoints - # in debugger so K8s doesn't restart unresponsive container - enabled: true - - <...> - -Accessing the ONAP Portal using OOM and a Kubernetes Cluster ------------------------------------------------------------- - -The ONAP deployment created by OOM operates in a private IP network that isn't -publicly accessible (i.e. OpenStack VMs with private internal network) which -blocks access to the ONAP Portal. To enable direct access to this Portal from a -user's own environment (a laptop etc.) the portal application's port 8989 is -exposed through a `Kubernetes LoadBalancer`_ object. - -Typically, to be able to access the Kubernetes nodes publicly a public address -is assigned. In OpenStack this is a floating IP address. - -When the `portal-app` chart is deployed a Kubernetes service is created that -instantiates a load balancer. The LB chooses the private interface of one of -the nodes as in the example below (10.0.0.4 is private to the K8s cluster only). -Then to be able to access the portal on port 8989 from outside the K8s & -OpenStack environment, the user needs to assign/get the floating IP address that -corresponds to the private IP as follows:: - - > kubectl -n onap get services|grep "portal-app" - portal-app LoadBalancer 10.43.142.201 10.0.0.4 8989:30215/TCP,8006:30213/TCP,8010:30214/TCP 1d app=portal-app,release=dev - - -In this example, use the 11.0.0.4 private address as a key find the -corresponding public address which in this example is 10.12.6.155. If you're -using OpenStack you'll do the lookup with the horizon GUI or the OpenStack CLI -for your tenant (openstack server list). That IP is then used in your -`/etc/hosts` to map the fixed DNS aliases required by the ONAP Portal as shown -below:: - - 10.12.6.155 portal.api.simpledemo.onap.org - 10.12.6.155 vid.api.simpledemo.onap.org - 10.12.6.155 sdc.api.fe.simpledemo.onap.org - 10.12.6.155 sdc.workflow.plugin.simpledemo.onap.org - 10.12.6.155 sdc.dcae.plugin.simpledemo.onap.org - 10.12.6.155 portal-sdk.simpledemo.onap.org - 10.12.6.155 policy.api.simpledemo.onap.org - 10.12.6.155 aai.api.sparky.simpledemo.onap.org - 10.12.6.155 cli.api.simpledemo.onap.org - 10.12.6.155 msb.api.discovery.simpledemo.onap.org - 10.12.6.155 msb.api.simpledemo.onap.org - 10.12.6.155 clamp.api.simpledemo.onap.org - 10.12.6.155 so.api.simpledemo.onap.org - 10.12.6.155 sdc.workflow.plugin.simpledemo.onap.org - -Ensure you've disabled any proxy settings the browser you are using to access -the portal and then simply access now the new ssl-encrypted URL: -``https://portal.api.simpledemo.onap.org:30225/ONAPPORTAL/login.htm`` - -.. note:: - Using the HTTPS based Portal URL the Browser needs to be configured to accept - unsecure credentials. - Additionally when opening an Application inside the Portal, the Browser - might block the content, which requires to disable the blocking and reloading - of the page - -.. note:: - Besides the ONAP Portal the Components can deliver additional user interfaces, - please check the Component specific documentation. - -.. note:: - - | Alternatives Considered: - - - Kubernetes port forwarding was considered but discarded as it would - require the end user to run a script that opens up port forwarding tunnels - to each of the pods that provides a portal application widget. - - - Reverting to a VNC server similar to what was deployed in the Amsterdam - release was also considered but there were many issues with resolution, - lack of volume mount, /etc/hosts dynamic update, file upload that were - a tall order to solve in time for the Beijing release. - - Observations: - - - If you are not using floating IPs in your Kubernetes deployment and - directly attaching a public IP address (i.e. by using your public provider - network) to your K8S Node VMs' network interface, then the output of - 'kubectl -n onap get services | grep "portal-app"' - will show your public IP instead of the private network's IP. Therefore, - you can grab this public IP directly (as compared to trying to find the - floating IP first) and map this IP in /etc/hosts. - -.. figure:: images/oom_logo/oomLogoV2-Monitor.png - :align: right - -Monitor -======= - -All highly available systems include at least one facility to monitor the -health of components within the system. Such health monitors are often used as -inputs to distributed coordination systems (such as etcd, Zookeeper, or Consul) -and monitoring systems (such as Nagios or Zabbix). OOM provides two mechanisms -to monitor the real-time health of an ONAP deployment: - -- a Consul GUI for a human operator or downstream monitoring systems and - Kubernetes liveness probes that enable automatic healing of failed - containers, and -- a set of liveness probes which feed into the Kubernetes manager which - are described in the Heal section. - -Within ONAP, Consul is the monitoring system of choice and deployed by OOM in -two parts: - -- a three-way, centralized Consul server cluster is deployed as a highly - available monitor of all of the ONAP components, and -- a number of Consul agents. - -The Consul server provides a user interface that allows a user to graphically -view the current health status of all of the ONAP components for which agents -have been created - a sample from the ONAP Integration labs follows: - -.. figure:: images/consul/consulHealth.png - :align: center - -To see the real-time health of a deployment go to: ``http://:30270/ui/`` -where a GUI much like the following will be found: - -.. note:: - If Consul GUI is not accessible, you can refer this - `kubectl port-forward `_ method to access an application - -.. figure:: images/oom_logo/oomLogoV2-Heal.png - :align: right - -Heal -==== - -The ONAP deployment is defined by Helm charts as mentioned earlier. These Helm -charts are also used to implement automatic recoverability of ONAP components -when individual components fail. Once ONAP is deployed, a "liveness" probe -starts checking the health of the components after a specified startup time. - -Should a liveness probe indicate a failed container it will be terminated and a -replacement will be started in its place - containers are ephemeral. Should the -deployment specification indicate that there are one or more dependencies to -this container or component (for example a dependency on a database) the -dependency will be satisfied before the replacement container/component is -started. This mechanism ensures that, after a failure, all of the ONAP -components restart successfully. - -To test healing, the following command can be used to delete a pod:: - - > kubectl delete pod [pod name] -n [pod namespace] - -One could then use the following command to monitor the pods and observe the -pod being terminated and the service being automatically healed with the -creation of a replacement pod:: - - > kubectl get pods --all-namespaces -o=wide - -.. figure:: images/oom_logo/oomLogoV2-Scale.png - :align: right - -Scale -===== - -Many of the ONAP components are horizontally scalable which allows them to -adapt to expected offered load. During the Beijing release scaling is static, -that is during deployment or upgrade a cluster size is defined and this cluster -will be maintained even in the presence of faults. The parameter that controls -the cluster size of a given component is found in the values.yaml file for that -component. Here is an excerpt that shows this parameter: - -.. code-block:: yaml - - # default number of instances - replicaCount: 1 - -In order to change the size of a cluster, an operator could use a helm upgrade -(described in detail in the next section) as follows:: - - > helm upgrade [RELEASE] [CHART] [flags] - -The RELEASE argument can be obtained from the following command:: - - > helm list - -Below is the example for the same:: - - > helm list - NAME REVISION UPDATED STATUS CHART APP VERSION NAMESPACE - dev 1 Wed Oct 14 13:49:52 2020 DEPLOYED onap-11.0.0 Kohn onap - dev-cassandra 5 Thu Oct 15 14:45:34 2020 DEPLOYED cassandra-11.0.0 onap - dev-contrib 1 Wed Oct 14 13:52:53 2020 DEPLOYED contrib-11.0.0 onap - dev-mariadb-galera 1 Wed Oct 14 13:55:56 2020 DEPLOYED mariadb-galera-11.0.0 onap - -Here the Name column shows the RELEASE NAME, In our case we want to try the -scale operation on cassandra, thus the RELEASE NAME would be dev-cassandra. - -Now we need to obtain the chart name for cassandra. Use the below -command to get the chart name:: - - > helm search cassandra - -Below is the example for the same:: - - > helm search cassandra - NAME CHART VERSION APP VERSION DESCRIPTION - local/cassandra 11.0.0 ONAP cassandra - local/portal-cassandra 11.0.0 Portal cassandra - local/aaf-cass 11.0.0 ONAP AAF cassandra - local/sdc-cs 11.0.0 ONAP Service Design and Creation Cassandra - -Here the Name column shows the chart name. As we want to try the scale -operation for cassandra, thus the corresponding chart name is local/cassandra - - -Now we have both the command's arguments, thus we can perform the -scale operation for cassandra as follows:: - - > helm upgrade dev-cassandra local/cassandra --set replicaCount=3 - -Using this command we can scale up or scale down the cassandra db instances. - - -The ONAP components use Kubernetes provided facilities to build clustered, -highly available systems including: Services_ with load-balancers, ReplicaSet_, -and StatefulSet_. Some of the open-source projects used by the ONAP components -directly support clustered configurations, for example ODL and MariaDB Galera. - -The Kubernetes Services_ abstraction to provide a consistent access point for -each of the ONAP components, independent of the pod or container architecture -of that component. For example, SDN-C uses OpenDaylight clustering with a -default cluster size of three but uses a Kubernetes service to and change the -number of pods in this abstract this cluster from the other ONAP components -such that the cluster could change size and this change is isolated from the -other ONAP components by the load-balancer implemented in the ODL service -abstraction. - -A ReplicaSet_ is a construct that is used to describe the desired state of the -cluster. For example 'replicas: 3' indicates to Kubernetes that a cluster of 3 -instances is the desired state. Should one of the members of the cluster fail, -a new member will be automatically started to replace it. - -Some of the ONAP components many need a more deterministic deployment; for -example to enable intra-cluster communication. For these applications the -component can be deployed as a Kubernetes StatefulSet_ which will maintain a -persistent identifier for the pods and thus a stable network id for the pods. -For example: the pod names might be web-0, web-1, web-{N-1} for N 'web' pods -with corresponding DNS entries such that intra service communication is simple -even if the pods are physically distributed across multiple nodes. An example -of how these capabilities can be used is described in the Running Consul on -Kubernetes tutorial. - -.. figure:: images/oom_logo/oomLogoV2-Upgrade.png - :align: right - -Upgrade -======= - -Helm has built-in capabilities to enable the upgrade of pods without causing a -loss of the service being provided by that pod or pods (if configured as a -cluster). As described in the OOM Developer's Guide, ONAP components provide -an abstracted 'service' end point with the pods or containers providing this -service hidden from other ONAP components by a load balancer. This capability -is used during upgrades to allow a pod with a new image to be added to the -service before removing the pod with the old image. This 'make before break' -capability ensures minimal downtime. - -Prior to doing an upgrade, determine of the status of the deployed charts:: - - > helm list - NAME REVISION UPDATED STATUS CHART NAMESPACE - so 1 Mon Feb 5 10:05:22 2020 DEPLOYED so-11.0.0 onap - -When upgrading a cluster a parameter controls the minimum size of the cluster -during the upgrade while another parameter controls the maximum number of nodes -in the cluster. For example, SNDC configured as a 3-way ODL cluster might -require that during the upgrade no fewer than 2 pods are available at all times -to provide service while no more than 5 pods are ever deployed across the two -versions at any one time to avoid depleting the cluster of resources. In this -scenario, the SDNC cluster would start with 3 old pods then Kubernetes may add -a new pod (3 old, 1 new), delete one old (2 old, 1 new), add two new pods (2 -old, 3 new) and finally delete the 2 old pods (3 new). During this sequence -the constraints of the minimum of two pods and maximum of five would be -maintained while providing service the whole time. - -Initiation of an upgrade is triggered by changes in the Helm charts. For -example, if the image specified for one of the pods in the SDNC deployment -specification were to change (i.e. point to a new Docker image in the nexus3 -repository - commonly through the change of a deployment variable), the -sequence of events described in the previous paragraph would be initiated. - -For example, to upgrade a container by changing configuration, specifically an -environment value:: - - > helm upgrade so onap/so --version 11.0.1 --set enableDebug=true - -Issuing this command will result in the appropriate container being stopped by -Kubernetes and replaced with a new container with the new environment value. - -To upgrade a component to a new version with a new configuration file enter:: - - > helm upgrade so onap/so --version 11.0.1 -f environments/demo.yaml - -To fetch release history enter:: - - > helm history so - REVISION UPDATED STATUS CHART DESCRIPTION - 1 Mon Jul 5 10:05:22 2022 SUPERSEDED so-11.0.0 Install complete - 2 Mon Jul 5 10:10:55 2022 DEPLOYED so-11.0.1 Upgrade complete - -Unfortunately, not all upgrades are successful. In recognition of this the -lineup of pods within an ONAP deployment is tagged such that an administrator -may force the ONAP deployment back to the previously tagged configuration or to -a specific configuration, say to jump back two steps if an incompatibility -between two ONAP components is discovered after the two individual upgrades -succeeded. - -This rollback functionality gives the administrator confidence that in the -unfortunate circumstance of a failed upgrade the system can be rapidly brought -back to a known good state. This process of rolling upgrades while under -service is illustrated in this short YouTube video showing a Zero Downtime -Upgrade of a web application while under a 10 million transaction per second -load. - -For example, to roll-back back to previous system revision enter:: - - > helm rollback so 1 - - > helm history so - REVISION UPDATED STATUS CHART DESCRIPTION - 1 Mon Jul 5 10:05:22 2022 SUPERSEDED so-11.0.0 Install complete - 2 Mon Jul 5 10:10:55 2022 SUPERSEDED so-11.0.1 Upgrade complete - 3 Mon Jul 5 10:14:32 2022 DEPLOYED so-11.0.0 Rollback to 1 - -.. note:: - - The description field can be overridden to document actions taken or include - tracking numbers. - -Many of the ONAP components contain their own databases which are used to -record configuration or state information. The schemas of these databases may -change from version to version in such a way that data stored within the -database needs to be migrated between versions. If such a migration script is -available it can be invoked during the upgrade (or rollback) by Container -Lifecycle Hooks. Two such hooks are available, PostStart and PreStop, which -containers can access by registering a handler against one or both. Note that -it is the responsibility of the ONAP component owners to implement the hook -handlers - which could be a shell script or a call to a specific container HTTP -endpoint - following the guidelines listed on the Kubernetes site. Lifecycle -hooks are not restricted to database migration or even upgrades but can be used -anywhere specific operations need to be taken during lifecycle operations. - -OOM uses Helm K8S package manager to deploy ONAP components. Each component is -arranged in a packaging format called a chart - a collection of files that -describe a set of k8s resources. Helm allows for rolling upgrades of the ONAP -component deployed. To upgrade a component Helm release you will need an -updated Helm chart. The chart might have modified, deleted or added values, -deployment yamls, and more. To get the release name use:: - - > helm ls - -To easily upgrade the release use:: - - > helm upgrade [RELEASE] [CHART] - -To roll back to a previous release version use:: - - > helm rollback [flags] [RELEASE] [REVISION] - -For example, to upgrade the onap-so helm release to the latest SO container -release v1.1.2: - -- Edit so values.yaml which is part of the chart -- Change "so: nexus3.onap.org:10001/openecomp/so:v1.1.1" to - "so: nexus3.onap.org:10001/openecomp/so:v1.1.2" -- From the chart location run:: - - > helm upgrade onap-so - -The previous so pod will be terminated and a new so pod with an updated so -container will be created. - -.. figure:: images/oom_logo/oomLogoV2-Delete.png - :align: right - -Delete -====== - -Existing deployments can be partially or fully removed once they are no longer -needed. To minimize errors it is recommended that before deleting components -from a running deployment the operator perform a 'dry-run' to display exactly -what will happen with a given command prior to actually deleting anything. -For example:: - - > helm undeploy onap --dry-run - -will display the outcome of deleting the 'onap' release from the -deployment. -To completely delete a release and remove it from the internal store enter:: - - > helm undeploy onap - -Once complete undeploy is done then delete the namespace as well -using following command:: - - > kubectl delete namespace - -.. note:: - You need to provide the namespace name which you used during deployment, - below is the example:: - - > kubectl delete namespace onap - -One can also remove individual components from a deployment by changing the -ONAP configuration values. For example, to remove `so` from a running -deployment enter:: - - > helm undeploy onap-so - -will remove `so` as the configuration indicates it's no longer part of the -deployment. This might be useful if a one wanted to replace just `so` by -installing a custom version. diff --git a/docs/release_notes/release-notes-amsterdam.rst b/docs/release_notes/release-notes-amsterdam.rst deleted file mode 100644 index 6fc229696c..0000000000 --- a/docs/release_notes/release-notes-amsterdam.rst +++ /dev/null @@ -1,75 +0,0 @@ -.. This work is licensed under a Creative Commons Attribution 4.0 International -.. License. -.. http://creativecommons.org/licenses/by/4.0 -.. Copyright 2017 Bell Canada & Amdocs Intellectual Property. All rights -.. reserved. -.. _release_notes_amsterdam: - -:orphan: - -ONAP Operations Manager Release Notes -===================================== - -Version: 1.1.0 --------------- - -:Release Date: 2017-11-16 - -**New Features** - -The Amsterdam release is the first release of the ONAP Operations Manager -(OOM). - -The main goal of the Amsterdam release was to: - - - Support Flexible Platform Deployment via Kubernetes of fully - containerized ONAP components - on any type of environment. - - Support State Management of ONAP platform components. - - Support full production ONAP deployment and any variation of component - level deployment for development. - - Platform Operations Orchestration / Control Loop Actions. - - Platform centralized logging with ELK stack. - -**Bug Fixes** - - The full list of implemented user stories and epics is available on - `JIRA `_ - This is the first release of OOM, the defects fixed in this release were - raised during the course of the release. - Anything not closed is captured below under Known Issues. If you want to - review the defects fixed in the Amsterdam release, refer to Jira link - above. - -**Known Issues** - - `OOM-6 `_ Automated platform deployment on Docker/Kubernetes - - VFC, AAF, MSB minor issues. - - Workaround: Manual configuration changes - however the reference - vFirewall use case does not currently require these components. - - - `OOM-10 `_ Platform configuration management. - - OOM ONAP Configuration Management - Handling of Secrets. - - Workaround: Automated workaround to be able to pull from protected - docker repositories. - - -**Security Issues** - N/A - - -**Upgrade Notes** - - N/A - -**Deprecation Notes** - - N/A - -**Other** - - N/A - -End of Release Notes diff --git a/docs/release_notes/release-notes-beijing.rst b/docs/release_notes/release-notes-beijing.rst deleted file mode 100644 index 84f86c100d..0000000000 --- a/docs/release_notes/release-notes-beijing.rst +++ /dev/null @@ -1,427 +0,0 @@ -.. This work is licensed under a Creative Commons Attribution 4.0 International -.. License. -.. http://creativecommons.org/licenses/by/4.0 -.. Copyright 2017 Bell Canada & Amdocs Intellectual Property. All rights -.. reserved. -.. _release_notes_beijing: - -:orphan: - -ONAP Operations Manager Release Notes -===================================== - -Version 2.0.0 Beijing Release ------------------------------ - -:Release Date: 2018-06-07 - -Previous Release Notes -********************** - -- :ref:`Amsterdam ` - -Epic -**** - -* [`OOM-6 `_] - Automated platform deployment on Docker/Kubernetes -* [`OOM-7 `_] - Platform monitoring and auto-healing -* [`OOM-8 `_] - Automated platform scalability -* [`OOM-9 `_] - Platform upgradability & rollbacks -* [`OOM-10 `_] - Platform configuration management -* [`OOM-46 `_] - Platform infrastructure deployment with TOSCA -* [`OOM-109 `_] - Platform Centralized Logging -* [`OOM-138 `_] - Using Optimization framework -* [`OOM-346 `_] - Platform Resiliency (including Recoverability, High-Availability, Backup/Restore, Geo-Redundancy) -* [`OOM-376 `_] - ONAP deployment options standardization -* [`OOM-486 `_] - HELM upgrade from 2.3 to 2.8.0 -* [`OOM-535 `_] - Upgrade Kubernetes from 1.8.6 to 1.9.2 -* [`OOM-590 `_] - OOM Wiki documentation of deployment options - -Story -***** - -* [`OOM-11 `_] - Add AAF containers to ONAP Kubernetes -* [`OOM-13 `_] - Add CLI containers to ONAP Kubernetes -* [`OOM-15 `_] - Add DMAAP containers to ONAP Kubernetes -* [`OOM-20 `_] - State Monitoring: MSO/mso -* [`OOM-21 `_] - State Monitoring: A&AI/aai-service -* [`OOM-22 `_] - State Monitoring: SDNC/sdc-be -* [`OOM-24 `_] - State Monitoring: message-router -* [`OOM-25 `_] - State Monitoring: MSB -* [`OOM-29 `_] - State Monitoring: VID -* [`OOM-31 `_] - State Monitoring: APPC/dbhost -* [`OOM-32 `_] - State Monitoring: VFC -* [`OOM-33 `_] - State Monitoring: Multi-VIM -* [`OOM-34 `_] - Auto-Restart on failure: ... -* [`OOM-35 `_] - State Monitoring: A&AI/hbase -* [`OOM-36 `_] - State Monitoring: A&AI/model-loader-service -* [`OOM-37 `_] - State Monitoring: APPC/dgbuilder -* [`OOM-38 `_] - State Monitoring: APPC/sdnctldb01 -* [`OOM-39 `_] - State Monitoring: APPC/sdnctldb02 -* [`OOM-40 `_] - State Monitoring: APPC/sdnhost -* [`OOM-41 `_] - State Monitoring: MSO/mariadb -* [`OOM-42 `_] - State Monitoring: SDNC/dbhost -* [`OOM-43 `_] - State Monitoring: SDNC/sdnc-dgbuilder -* [`OOM-44 `_] - State Monitoring: SDNC/sdnc-portal -* [`OOM-45 `_] - State Monitoring: SDNC/sdnctldb01 -* [`OOM-51 `_] - OOM ONAP Configuration Management - Externalize hardwired values -* [`OOM-52 `_] - OOM ONAP Configuration Management - Parameterization of docker images -* [`OOM-53 `_] - OOM ONAP Configuration Management - Parameterization for Sizing -* [`OOM-63 `_] - Kubernetes cluster created by TOSCA description -* [`OOM-85 `_] - Test the code in the "Lab" project environment -* [`OOM-86 `_] - Monitoring the health status of ONAP components -* [`OOM-87 `_] - Configure TOSCA description via dashboard -* [`OOM-88 `_] - Deploy Holmes on K8S cluster by TOSCA description -* [`OOM-89 `_] - Deploy CLAMP on K8S cluster by TOSCA description -* [`OOM-91 `_] - Deploy MSB on K8S cluster by TOSCA description -* [`OOM-92 `_] - Deploy AAF on K8S cluster by TOSCA description -* [`OOM-93 `_] - Deploy VF-C on K8S cluster by TOSCA description -* [`OOM-94 `_] - Deploy Multi-VIM on K8S cluster by TOSCA description -* [`OOM-95 `_] - Deploy DCAEGen2 on K8S cluster by TOSCA description -* [`OOM-96 `_] - Deploy AAI on K8S cluster by TOSCA description -* [`OOM-97 `_] - Deploy APPC on K8S cluster by TOSCA description -* [`OOM-98 `_] - Deploy MSO on K8S cluster by TOSCA description -* [`OOM-99 `_] - Deploy Policy on K8S cluster by TOSCA description -* [`OOM-100 `_] - Deploy SDC on K8S cluster by TOSCA description -* [`OOM-102 `_] - Deploy VID on K8S cluster by TOSCA description -* [`OOM-110 `_] - OOM ONAP Logging - Elastic Stack components deployment -* [`OOM-111 `_] - OOM ONAP Logging - FileBeat deployment aside ONAP components -* [`OOM-112 `_] - OOM ONAP Logging - Configuration of all ONAP components to emit canonical logs -* [`OOM-116 `_] - ignore intellj files -* [`OOM-145 `_] - update directory path from dockerdata-nfs to configured directory name (make it configurable) -* [`OOM-235 `_] - Service endpoint annotation for Usecase UI -* [`OOM-242 `_] - Modify DCAE seed for Helm -* [`OOM-262 `_] - Remove "oneclick" kubectl scripts. -* [`OOM-265 `_] - Top level helm chart for ONAP -* [`OOM-268 `_] - Persist and externalize database directories via persistent volumes -* [`OOM-271 `_] - Copy app config files from source -* [`OOM-272 `_] - Set application environment variables from source -* [`OOM-277 `_] - add automatic ONAP config parameter substitution -* [`OOM-280 `_] - MSB automatically re-synch service data on restart. -* [`OOM-292 `_] - Expose LOG Volume via /dockerdata-nfs -* [`OOM-293 `_] - OOM ONAP Configuration Management - Handling of Secrets -* [`OOM-298 `_] - Provide script to cleanup configuration data created by createConfig.sh -* [`OOM-322 `_] - Clean-up config files that are generated at system startup -* [`OOM-341 `_] - Provide an example of a partial deployment of ONAP components (e.g. no VFC) -* [`OOM-342 `_] - Add pointer to Wiki page on the readme file. -* [`OOM-344 `_] - Break the configuration tarball per appplication -* [`OOM-345 `_] - Re-validate # of containers and configuration for DCAEgen2 -* [`OOM-356 `_] - Add 'Usecase UI' containers to ONAP Kubernetes -* [`OOM-359 `_] - SDC logback chef failure -* [`OOM-375 `_] - F2F: ONAP/OOM for Developers -* [`OOM-382 `_] - Robot Version 1.1 OpenO tests -* [`OOM-406 `_] - In Kubernetes 1.8, the annotations are no longer supported and must be converted to the PodSpec field. -* [`OOM-457 `_] - In Kubernetes 1.8, init-container annotations to be converted to PodSpec field for aaf, clamp and vfc -* [`OOM-460 `_] - Segregating configuration of ONAP components -* [`OOM-476 `_] - Parameterize values.yaml docker image repos into global config variables -* [`OOM-528 `_] - Confirm k8s context with a prompt for deleteAll.bash -* [`OOM-534 `_] - Need to provide support for creating different sized OOM deployments -* [`OOM-546 `_] - Provide option to collect ONAP env details for issue investigations -* [`OOM-569 `_] - Investigate containerizing Cloudify Manager -* [`OOM-579 `_] - Document a Cloudify deployment of OOM Beijing -* [`OOM-633 `_] - Provide direct access to ONAP Portal without the need to use VNC -* [`OOM-677 `_] - Update all source code files with the Apache 2 License header -* [`OOM-678 `_] - Enforce MSB dockers dependencies using init-container -* [`OOM-681 `_] - updating docker images/components to latest code -* [`OOM-682 `_] - deployment of sdc workflow designer -* [`OOM-695 `_] - Improve Readiness-check prob -* [`OOM-722 `_] - OOM - Run all ONAP components in one namespace -* [`OOM-725 `_] - Use Blueprint to install Helm and k8s dashboard while creating k8s cluster -* [`OOM-727 `_] - Add Standardized Configuration to SO -* [`OOM-728 `_] - Add Standardized Configuration to ROBOT -* [`OOM-729 `_] - Add Standardized Configuration to VID -* [`OOM-730 `_] - Add Standardized Configuration to Consul -* [`OOM-731 `_] - Add Standardized Configuration to DMaaP Message Router -* [`OOM-732 `_] - Add Standardized Configuration to AAF -* [`OOM-733 `_] - Add Standardized Configuration to APPC -* [`OOM-734 `_] - Add Standardized Configuration to AAI -* [`OOM-735 `_] - Add Standardized Configuration to CLAMP -* [`OOM-736 `_] - Add Standardized Configuration to CLI -* [`OOM-737 `_] - Add Standardized Configuration to DCAEGEN2 -* [`OOM-738 `_] - Add Standardized Configuration to ESR -* [`OOM-739 `_] - Add Standardized Configuration to KUBE2MSB -* [`OOM-740 `_] - Add Standardized Configuration to LOG -* [`OOM-742 `_] - Add Standardized Configuration to MSB -* [`OOM-743 `_] - Replace deprecated MSO Helm Chart with Standardized SO Helm Chart -* [`OOM-744 `_] - Add Standardized Configuration to MULTICLOUD -* [`OOM-746 `_] - Add Standardized Configuration to PORTAL -* [`OOM-747 `_] - Add Standardized Configuration to SDC -* [`OOM-748 `_] - Add Standardized Configuration to SDNC -* [`OOM-749 `_] - Add Standardized Configuration to UUI -* [`OOM-750 `_] - Add Standardized Configuration to VFC -* [`OOM-751 `_] - Add Standardized Configuration to VNFSDK -* [`OOM-758 `_] - Common Mariadb Galera Helm Chart to be reused by many applications -* [`OOM-771 `_] - OOM - update master with new policy db deployment -* [`OOM-777 `_] - Add Standardized Configuration Helm Starter Chart -* [`OOM-779 `_] - OOM APPC ODL (MDSAL) persistent storage -* [`OOM-780 `_] - Update MSO to latest working version. -* [`OOM-786 `_] - Re-add support for multiple instances of ONAP -* [`OOM-788 `_] - Abstract docker secrets -* [`OOM-789 `_] - Abstract cluster role binding -* [`OOM-811 `_] - Make kube2msb use secret instead of passing token as environment variable -* [`OOM-822 `_] - Update Documentation for the Beijing Release -* [`OOM-823 `_] - Add CDT image to APPC chart -* [`OOM-827 `_] - Add quick start documentation README -* [`OOM-828 `_] - Remove oneclick scripts -* [`OOM-857 `_] - kube2msb fails to start -* [`OOM-914 `_] - Add LOG component robot healthcheck -* [`OOM-960 `_] - OOM Healthcheck lockdown - currently 32/39 : 20180421 -* [`OOM-979 `_] - Enhance OOM TOSCA solution to support standardized Helm Chart -* [`OOM-1006 `_] - VNFSDK healthcheck fails -* [`OOM-1073 `_] - Change the Repository location in the image oomk8s/config-init:2.0.0-SNAPSHOT -* [`OOM-1078 `_] - Update Kubectl, docker, helm version - -Task -**** - -* [`OOM-57 `_] - Agree on configuration contract/YAML with each of the project teams -* [`OOM-105 `_] - TOSCA based orchestration demo -* [`OOM-257 `_] - DevOps: OOM config reset procedure for new /dockerdata-nfs content -* [`OOM-305 `_] - Rename MSO to SO in OOM -* [`OOM-332 `_] - Add AAI filebeat container - blocked by LOG-67 -* [`OOM-428 `_] - Add log container healthcheck to mark failed creations - see OOM-427 -* [`OOM-429 `_] - DOC: Document HELM server version 2.7.2 required for tpl usage -* [`OOM-489 `_] - Update values.yaml files for tag name changes for docker images and versions. -* [`OOM-543 `_] - SDNC adjust docker pullPolicy to IfNotPresent to speed up initial deployment slowdown introduced by SDNC-163 -* [`OOM-604 `_] - Update OOM and HEAT AAI sparky master from v1.1.0 to v1.1.1 - match INT-288 -* [`OOM-614 `_] - SDC, SDNC, AAI Healthcheck failures last 12 hours 20180124:1100EST -* [`OOM-624 `_] - CII security badging: cleartext password for keystone and docker repo creds -* [`OOM-726 `_] - Mirror AAI docker version changes into OOM from AAI-791 -* [`OOM-772 `_] - Remove old DCAE from Release -* [`OOM-801 `_] - Policy docker images rename - key off new name in POLICY-674 -* [`OOM-810 `_] - Improve emsdriver code -* [`OOM-819 `_] - expose log/logstash 5044 as nodeport for external log producers outside of the kubernetes cluster -* [`OOM-820 `_] - Bypass vnc-portal for ONAP portal access -* [`OOM-943 `_] - Upgrade prepull_docker.sh to work with new helm based master refactor - post OOM-328 -* [`OOM-947 `_] - Update AAI to latest images -* [`OOM-975 `_] - Notes are missing in multicloud -* [`OOM-1031 `_] - Config Changes for consul to make vid, so, log health checks pass -* [`OOM-1032 `_] - Making consul Stateful -* [`OOM-1122 `_] - Update APPC OOM chart to use Beijing release artifacts - -Bug -*** - -* [`OOM-4 `_] - deleteAll.bash fails to properly delete services and ports -* [`OOM-153 `_] - test - Sample Bug -* [`OOM-212 `_] - deleteAll script does not have an option to delete the services -* [`OOM-215 `_] - configure_app for helm apps is not correct -* [`OOM-218 `_] - createConfig.sh needs a chmod 755 in release-1.0.0 only -* [`OOM-239 `_] - mso.tar created in dockerdatanfs -* [`OOM-258 `_] - AAI logs are not being written outside the pods -* [`OOM-282 `_] - vnc-portal requires /etc/hosts url fix for SDC sdc.ui should be sdc.api -* [`OOM-283 `_] - No longer able to deploy instances in specified namespace -* [`OOM-290 `_] - config_init pod fails when /dockerdata-nfs is nfs-mounted -* [`OOM-300 `_] - cat: /config-init/onap/mso/mso/encryption.key: No such file or directory -* [`OOM-333 `_] - vfc-workflow fails [VFC BUG] - fixed - 20180117 vfc-ztevnfmdriver has docker pull issue -* [`OOM-334 `_] - Change kubernetes startup user -* [`OOM-351 `_] - Apply standard convention across the "template deployment YML" file -* [`OOM-352 `_] - failed to start VFC containers -* [`OOM-363 `_] - DCAE tests NOK with Robot E2E tests -* [`OOM-366 `_] - certificates in consul agent config are not in the right directory -* [`OOM-389 `_] - sdc-be and sdc-fe do not initialize correctly on latest master -* [`OOM-409 `_] - Update Vid yaml file to point to the ONAPPORTAL URL -* [`OOM-413 `_] - In portal VNC pod refresh /etc/hosts entries -* [`OOM-414 `_] - MSB Healtcheck failure on $*_ENDPOINT variables -* [`OOM-424 `_] - DCAE installation is not possible today -* [`OOM-430 `_] - Portal healthcheck passing on vnc-portal down -* [`OOM-467 `_] - Optimize config-init process -* [`OOM-493 `_] - Kubernetes infrastructure for ESR -* [`OOM-496 `_] - Readiness check is marking full availability of some components like SDC and SDNC before they would pass healthcheck -* [`OOM-514 `_] - Readiness prob fails sometimes even though the relevant pods are running -* [`OOM-539 `_] - Kube2MSB registrator doesn't support https REST service registration -* [`OOM-570 `_] - Wrong value is assigned to kube2msb AUTH_TOKEN environment variable -* [`OOM-574 `_] - OOM configuration for robot does not copy heat templatese in dockerdata-nfs -* [`OOM-577 `_] - Incorrect evaluation of bash command in yaml template file (portal-vnc-dep.yaml) -* [`OOM-578 `_] - Hard coded token in oom/kubernetes/kube2msb/values.yaml file -* [`OOM-589 `_] - Can not acces CLI in vnc-portal -* [`OOM-598 `_] - createAll.bash base64: invalid option -- d -* [`OOM-600 `_] - Unable to open CLI by clicking CLI application icon -* [`OOM-630 `_] - Red herring config pod deletion error on deleteAll - after we started deleting onap-config automatically -* [`OOM-645 `_] - Kube2MSB RBAC security issues -* [`OOM-653 `_] - sdnc-dbhost-0 deletion failure -* [`OOM-657 `_] - Look into DCAEGEN2 failure on duplicate servicePort -* [`OOM-672 `_] - hardcoded clusterIP for aai breaks auto installation -* [`OOM-680 `_] - ONAP Failure install with kubernetes 1.8+ -* [`OOM-687 `_] - Typo in README_HELM -* [`OOM-724 `_] - License Update in TOSCA -* [`OOM-767 `_] - data-router-logs and elasticsearch-data mapped to same folder -* [`OOM-768 `_] - Hardcoded onap in config files -* [`OOM-769 `_] - sdc-es data mapping in sdc-be and sdc-fe redundant -* [`OOM-783 `_] - UUI health check is failing -* [`OOM-784 `_] - make new so chart one namespace compatible -* [`OOM-791 `_] - After OOM-722 merge - docker pulls are timing out - switch to pullPolicy IfNotPresent -* [`OOM-794 `_] - demo-k8s.sh name not modified in the usage string -* [`OOM-795 `_] - HEAT templates for robot instantiateVFW missing -* [`OOM-796 `_] - robot asdc/sdngc interface in synch for Master -* [`OOM-797 `_] - GLOBAL_INJECTED_SCRIPT_VERSION missing from vm_properties.py -* [`OOM-804 `_] - VFC vfc-ztevnfmdriver container failure -* [`OOM-815 `_] - OOM Robot container helm failure after OOM-728 35909 merge -* [`OOM-829 `_] - Can not make multicloud helm chart -* [`OOM-830 `_] - Fix OOM build dependencies -* [`OOM-835 `_] - CLAMP mariadb pv is pointing to a wrong location -* [`OOM-836 `_] - champ and gizmo yaml validation issue -* [`OOM-845 `_] - Global repository should not be set by default -* [`OOM-846 `_] - Add liveness enabled fix to helm starter -* [`OOM-847 `_] - log-elasticsearch external ports are not externally accessible -* [`OOM-848 `_] - log-logstash logstash pipeline fails to start after oom standard config changes -* [`OOM-851 `_] - sdc chart validation error -* [`OOM-856 `_] - appc mysql fails deployment -* [`OOM-858 `_] - Fail to deploy onap chart due to config map size -* [`OOM-870 `_] - Missing CLAMP configuration -* [`OOM-871 `_] - log kibana container fails to start after oom standard config changes -* [`OOM-872 `_] - APPC-helm Still need config pod -* [`OOM-873 `_] - OOM doc typo -* [`OOM-874 `_] - Inconsistent repository references in ONAP charts -* [`OOM-875 `_] - Cannot retrieve robot logs -* [`OOM-876 `_] - Some containers ignore the repository setting -* [`OOM-878 `_] - MySQL slave nodes don't deploy when mysql.replicaCount > 1 -* [`OOM-881 `_] - SDN-C Portal pod fails to come up -* [`OOM-882 `_] - Some SDNC service names should be prefixed with the helm release name -* [`OOM-884 `_] - VID-VID mariadb pv is pointing to a wrong location -* [`OOM-885 `_] - Beijing oom component log messages missing in Elasticsearch -* [`OOM-886 `_] - kube2msb not starting up -* [`OOM-887 `_] - SDN-C db schema and sdnctl db user not reliably being created -* [`OOM-888 `_] - aaf-cs mapping wrong -* [`OOM-889 `_] - restore pv&pvc for mysql when NFS provisioner is disabled -* [`OOM-898 `_] - Multicloud-framework config file is not volume-mounted -* [`OOM-899 `_] - SDNC main pod does not come up -* [`OOM-900 `_] - portal-cassandra missing pv and pvc -* [`OOM-904 `_] - OOM problems bringing up components and passing healthchecks -* [`OOM-905 `_] - Charts use nsPrefix instead of release namespace -* [`OOM-906 `_] - Make all services independent of helm Release.Name -* [`OOM-907 `_] - Make all persistent volume to be mapped to a location defined by helm Release.Name -* [`OOM-908 `_] - Job portal-db-config fails due to missing image config -* [`OOM-909 `_] - SO Health Check fails -* [`OOM-910 `_] - VID Health Check fails -* [`OOM-911 `_] - VFC Health Check fails for 9 components -* [`OOM-912 `_] - Multicloud Health Check fails for 1 of its components -* [`OOM-913 `_] - Consul agent pod is failing -* [`OOM-916 `_] - Used to fix testing issues related to usability -* [`OOM-918 `_] - Policy - incorrect configmap mount causes base.conf to disappear -* [`OOM-920 `_] - Issue with CLAMP configuration -* [`OOM-921 `_] - align onap/values.yaml and onap/resources/environments/dev.yaml - different /dockerdata-nfs -* [`OOM-926 `_] - Disable clustering for APP-C out-of-the-box -* [`OOM-927 `_] - Need a production grade configuration override file of ONAP deployment -* [`OOM-928 `_] - Some charts use /dockerdata-nfs by default -* [`OOM-929 `_] - DMaaP message router docker image fails to pull -* [`OOM-930 `_] - New AAF Helm Charts required -* [`OOM-931 `_] - Reintroduce VNC pod into OOM -* [`OOM-932 `_] - Unblock integration testing -* [`OOM-935 `_] - sdc-cassandra pod fails to delete using helm delete - forced kubectl delete -* [`OOM-936 `_] - Readiness-check prob version is inconsistent across components -* [`OOM-937 `_] - Portal Cassandra config map points to wrong directory -* [`OOM-938 `_] - Can't install aai alone using helm -* [`OOM-945 `_] - SDNC some bundles failing to start cleanly -* [`OOM-948 `_] - make vfc got an error -* [`OOM-951 `_] - Update APPC charts based on on changes for ccsdk and Nitrogen ODL -* [`OOM-953 `_] - switch aai haproxy/hbase repo from hub.docker.com to nexus3 -* [`OOM-958 `_] - SDC-be deployment missing environment parameter -* [`OOM-964 `_] - SDC Healthcheck failure on sdc-be and sdc-kb containers down -* [`OOM-968 `_] - warning on default deployment values.yaml -* [`OOM-969 `_] - oomk8s images have no Dockerfile's -* [`OOM-971 `_] - Common service name template should allow for chart name override -* [`OOM-974 `_] - Cassandra bootstrap is done incorrectly -* [`OOM-977 `_] - The esr-gui annotations should include a "path" param when register to MSB -* [`OOM-985 `_] - DMAAP Redis fails to start -* [`OOM-986 `_] - SDC BE and FE logs are missing -* [`OOM-989 `_] - Sync ete-k8.sh and ete.sh for new log file numbering -* [`OOM-990 `_] - AUTO.json in SDC has unreachable addresses -* [`OOM-993 `_] - AAI model-loader.properties not in sync with project file -* [`OOM-994 `_] - DCAE cloudify controller docker image 1.1.0 N/A - use 1.2.0/1.3.0 -* [`OOM-1003 `_] - dcae-cloudify-manager chart references obsolete image version -* [`OOM-1004 `_] - aai-resources constantly fails due to cassandra hostname -* [`OOM-1005 `_] - AAI Widgets not loading due to duplicate volumes -* [`OOM-1007 `_] - Update dcae robot health check config -* [`OOM-1008 `_] - Set default consul server replica count to 1 -* [`OOM-1010 `_] - Fix broken property names in DCAE input files -* [`OOM-1011 `_] - Policy config correction after Service Name changes because of OOM-906 -* [`OOM-1013 `_] - Update DCAE container versions -* [`OOM-1014 `_] - Portal login not working due to inconsistent zookeeper naming -* [`OOM-1015 `_] - Champ fails to start -* [`OOM-1016 `_] - DOC-OPS Review: Helm install command is wrong on oom_user_guide - missing namespace -* [`OOM-1017 `_] - DOC-OPS review: Docker/Kubernetes versions wrong for master in oom_cloud_setup_guide -* [`OOM-1018 `_] - DOC-OPS review: global repo override does not match git in oom quick start guide -* [`OOM-1019 `_] - DOC-OPS review: Add Ubuntu 16.04 reference to oom_user_guide to avoid 14/16 confusion -* [`OOM-1021 `_] - Update APPC resources for Nitrogen ODL -* [`OOM-1022 `_] - Fix SDC startup dependencies -* [`OOM-1023 `_] - "spring.datasource.cldsdb.url" in clamp has wrong clampdb name -* [`OOM-1024 `_] - Cassandra data not persisted -* [`OOM-1033 `_] - helm error during deployment 20180501:1900 - all builds under 2.7.2 -* [`OOM-1034 `_] - VID Ports incorrect in deployment.yaml -* [`OOM-1037 `_] - Enable CLI health check -* [`OOM-1039 `_] - Service distribution to SO fails -* [`OOM-1041 `_] - aai-service was renamed, but old references remain -* [`OOM-1042 `_] - portalapps service was renamed, but old references remain -* [`OOM-1045 `_] - top level values.yaml missing entry for dmaap chart -* [`OOM-1049 `_] - SDNC_UEB_LISTENER db -* [`OOM-1050 `_] - Impossible to deploy consul using cache docker registry -* [`OOM-1051 `_] - Fix aaf deployment -* [`OOM-1052 `_] - SO cloud config file points to Rackspace cloud -* [`OOM-1054 `_] - Portal LoadBalancer Ingress IP is on the wrong network -* [`OOM-1060 `_] - Incorrect MR Kafka references prevent aai champ from starting -* [`OOM-1061 `_] - ConfigMap size limit exceeded -* [`OOM-1064 `_] - Improve docker registry secret management -* [`OOM-1066 `_] - Updating TOSCA blueprint to sync up with helm configuration changes (add dmaap and oof/delete message-router) -* [`OOM-1068 `_] - Update SO with new AAI cert -* [`OOM-1076 `_] - some charts still using readiness check image from amsterdam 1.x -* [`OOM-1077 `_] - AAI resources and traversal deployment failure on non-rancher envs -* [`OOM-1079 `_] - Robot charts do not allow over ride of pub_key, dcae_collector_ip and dcae_collector_port -* [`OOM-1081 `_] - Remove component 'mock' from TOSCA deployment -* [`OOM-1082 `_] - Wrong pv location of dcae postgres -* [`OOM-1085 `_] - appc hostname is incorrect in url -* [`OOM-1086 `_] - clamp deployment changes /dockerdata-nfs/ReleaseName dir permissions -* [`OOM-1088 `_] - APPC returns error for vCPE restart message from Policy -* [`OOM-1089 `_] - DCAE pods are not getting purged -* [`OOM-1093 `_] - Line wrapping issue in redis-cluster-config.sh script -* [`OOM-1094 `_] - Fix postgres startup -* [`OOM-1095 `_] - common makefile builds out of order -* [`OOM-1096 `_] - node port conflict SDNC (Geo enabled) & other charts -* [`OOM-1097 `_] - Nbi needs dep-nbi - crash on make all -* [`OOM-1099 `_] - Add External Interface NBI project into OOM TOSCA -* [`OOM-1102 `_] - Incorrect AAI services -* [`OOM-1103 `_] - Cannot disable NBI -* [`OOM-1104 `_] - Policy DROOLS configuration across container restarts -* [`OOM-1110 `_] - Clamp issue when connecting Policy -* [`OOM-1111 `_] - Please revert to using VNFSDK Postgres container -* [`OOM-1114 `_] - APPC is broken in latest helm chart -* [`OOM-1115 `_] - SDNC DGBuilder cant operate on DGs in database - need NodePort -* [`OOM-1116 `_] - Correct values needed by NBI chart -* [`OOM-1124 `_] - Update OOM APPC chart to enhance AAF support -* [`OOM-1126 `_] - Incorrect Port mapping between CDT Application and APPC main application -* [`OOM-1127 `_] - SO fails healthcheck -* [`OOM-1128 `_] - AAF CS fails to start in OpenLab - -Sub-task -******** - -* [`OOM-304 `_] - Service endpoint annotation for Data Router -* [`OOM-306 `_] - Handle mariadb secrets -* [`OOM-510 `_] - Increase vm.max_map_count to 262144 when running Rancher 1.6.11+ via helm 2.6+ - for elasticsearch log mem failure -* [`OOM-512 `_] - Push the reviewed and merged ReadMe content to RTD -* [`OOM-641 `_] - Segregating of configuration for SDNC-UEB component -* [`OOM-655 `_] - Create alternate prepull script which provides more user feedback and logging -* [`OOM-753 `_] - Create Helm Sub-Chart for SO's embedded mariadb -* [`OOM-754 `_] - Create Helm Chart for SO -* [`OOM-774 `_] - Create Helm Sub-Chart for APPC's embedded mySQL database -* [`OOM-775 `_] - Create Helm Chart for APPC -* [`OOM-778 `_] - Replace NFS Provisioner with configurable PV storage solution -* [`OOM-825 `_] - Apache 2 License updation for All sqls and .js file -* [`OOM-849 `_] - Policy Nexus component needs persistent volume for /sonatype-work -* [`OOM-991 `_] - Adjust SDC-BE init job timing from 10 to 30s to avoid restarts on single node systems -* [`OOM-1036 `_] - update helm from 2.7.2 to 2.8.2 wiki/rtd -* [`OOM-1063 `_] - Document Portal LoadBalancer Ingress IP Settings - -**Security Notes** - -OOM code has been formally scanned during build time using NexusIQ and no -Critical vulnerability was found. - -Quick Links: - - `OOM project page `_ - - - `Passing Badge information for OOM `_ - - - -End of Release Notes diff --git a/docs/release_notes/release-notes-casablanca.rst b/docs/release_notes/release-notes-casablanca.rst deleted file mode 100644 index 6b857309aa..0000000000 --- a/docs/release_notes/release-notes-casablanca.rst +++ /dev/null @@ -1,78 +0,0 @@ -.. This work is licensed under a Creative Commons Attribution 4.0 International -.. License. -.. http://creativecommons.org/licenses/by/4.0 -.. Copyright 2017 Bell Canada & Amdocs Intellectual Property. All rights -.. reserved. -.. _release_notes_casablanca: - -:orphan: - -ONAP Operations Manager Release Notes -===================================== - -Version 3.0.0 Casablanca Release --------------------------------- - -:Release Date: 2018-11-30 - -**Previous Release Notes** - -- :ref:`Beijing ` -- :ref:`Amsterdam ` - -Summary -------- - -The focus of this release was on incremental improvements in the following -areas: - -* Pluggable persistent storage with support for GlusterFS as the first storage - class provisioner - -* CPU and Memory limits in Helm Charts to improve Pod placement based on - resource availability in Kubernetes Cluster - -* Support of Node Selectors for Pod placement - -* Common "shared" Helm Charts referencing common images - - - mariadb-galera - - postgres - - cassandra - - mysql - - mongo - -* Integration of ARK Backup and Restore solution - -* Introduction of Helm deploy and undeploy plugins to better manage ONAP - deployments - - -**Security Notes** - -OOM code has been formally scanned during build time using NexusIQ and no -Critical vulnerability was found. - -Quick Links: - - - `OOM project page `_ - - - `Passing Badge information for OOM `_ - - -**Known Issues** - - * **Problem**: kubectl connections to pods (kubectl exec|logs) will - fail after a while due to a known bug in Kubernetes (1.11.2) - - **Workaround**: Restart of the kubelet daemons on the k8s hosts - - **Fix**: Will be delivered in the next release via a new - Kubernetes version (1.12) - - - `K8S Bug Report `_ - - `OOM-1532 `_ - - `OOM-1516 `_ - - `OOM-1520 `_ - -End of Release Notes diff --git a/docs/release_notes/release-notes-dublin.rst b/docs/release_notes/release-notes-dublin.rst deleted file mode 100644 index 7a32297210..0000000000 --- a/docs/release_notes/release-notes-dublin.rst +++ /dev/null @@ -1,83 +0,0 @@ -.. This work is licensed under a Creative Commons Attribution 4.0 International -.. License. -.. http://creativecommons.org/licenses/by/4.0 -.. Copyright 2017 Bell Canada & Amdocs Intellectual Property. All rights -.. reserved. -.. _release_notes_dublin: - -:orphan: - -ONAP Operations Manager Release Notes -===================================== - -Version 4.0.0 (Dublin Release) ------------------------------- - -:Release Date: 2019-06-26 - -**Previous Release Notes** - -- :ref:`Casablanca ` -- :ref:`Beijing ` -- :ref:`Amsterdam ` - - -Summary -------- - -**Platform Resiliency** - -* Documentation of a Highly-Available Kubernetes Cluster Deployment -* Availability of a Default Storage Class Provisioner for improved Persistent - Storage resiliency -* Availability of a CNI reference integration for Multi-site support - - * applications can take advantage of multi-site by using POD and/or Node - (anti)affinity, taints/tolerations, labels per application - -**Footprint Optimization** - -* Shared MariaDB-Galera Cluster - current clients in Dublin: SO, SDNC -* Shared Cassandra Cluster - current clients in Dublin: AAI, SDC -* Optional deployment of independent clusters (backward compatibility) - -**Platform Upgradability** - -* Introduction of an Upgrade Framework supporting: - - * Automated rolling upgrades for applications - * In-place schema and data migrations - * Blue-Green deployment environment migration (e.g. Pre-prod to Prod) - * Upgrades from embedded database instance into shared database instance - -* Release-to-release upgrade support delivered for the following projects - - * A&AI - * SDNC - * SO - -**Security Notes** - -*Fixed Security Issues* - -*Known Security Issues* - -* In default deployment OOM (consul-server-ui) exposes HTTP port 30270 outside of cluster. [`OJSI-134 `_] -* Hard coded password used for all oom deployments [`OJSI-188 `_] -* CVE-2019-12127 - OOM exposes unprotected API/UI on port 30270 [`OJSI-202 `_] - -*Known Vulnerabilities in Used Modules* - -OOM code has been formally scanned during build time using NexusIQ and no -Critical vulnerability was found. - -Quick Links: - - - `OOM project page `_ - - - `Passing Badge information for OOM `_ - - -**Known Issues** - -End of Release Notes diff --git a/docs/release_notes/release-notes-elalto.rst b/docs/release_notes/release-notes-elalto.rst deleted file mode 100644 index b4059028e5..0000000000 --- a/docs/release_notes/release-notes-elalto.rst +++ /dev/null @@ -1,84 +0,0 @@ -.. This work is licensed under a Creative Commons Attribution 4.0 International -.. License. -.. http://creativecommons.org/licenses/by/4.0 -.. Copyright 2017 Bell Canada & Amdocs Intellectual Property. All rights -.. reserved. -.. _release_notes_elalto: - -:orphan: - -ONAP Operations Manager Release Notes -===================================== - -Version 5.0.1 (El Alto Release) -------------------------------- - -:Release Date: 2019-10-10 - -**Previous Release Notes** - -- :ref:`Dublin ` -- :ref:`Casablanca ` -- :ref:`Beijing ` -- :ref:`Amsterdam ` - - -Summary -------- - -The focus of this release was on maintenance and as such no new features were -delivered. -A list of issues resolved in this release can be found here: https://jira.onap.org/projects/OOM/versions/10726 - -**New Features** - -**Bug Fixes** - -* 25 defects addressed (see link above) - -**Known Issues** - -The following known issues will be addressed in a future release: - -* [`OOM-1480 `_] - postgres chart does not set root password when installing on an existing database instances -* [`OOM-1966 `_] - ONAP on HA Kubernetes Cluster - Documentation update -* [`OOM-1995 `_] - Mariadb Galera cluster pods keep failing -* [`OOM-2061 `_] - Details Missing for installing the kubectl section -* [`OOM-2075 `_] - Invalid MTU for Canal CNI interfaces -* [`OOM-2080 `_] - Need for "ReadWriteMany" access on storage when deploying on Kubernetes? -* [`OOM-2091 `_] - incorrect release deployed -* [`OOM-2132 `_] - Common Galera server.cnf does not contain Camunda required settings - -**Security Notes** - -*Fixed Security Issues* - -*Known Security Issues* - -* In default deployment OOM (consul-server-ui) exposes HTTP port 30270 outside of cluster. [`OJSI-134 `_] -* Hard coded password used for all oom deployments [`OJSI-188 `_] -* CVE-2019-12127 - OOM exposes unprotected API/UI on port 30270 [`OJSI-202 `_] - -*Known Vulnerabilities in Used Modules* - -OOM code has been formally scanned during build time using NexusIQ and no -Critical vulnerability was found. - -Quick Links: - - - `OOM project page `_ - - - `Passing Badge information for OOM `_ - - -Version 5.0.0 (El Alto Early Drop) ----------------------------------- - -:Release Date: 2019-08-19 - -Summary -------- - -**Software Requirements** - -* Upgraded to Kubernetes 1.15.x and Helm 1.14.x diff --git a/docs/release_notes/release-notes-frankfurt.rst b/docs/release_notes/release-notes-frankfurt.rst deleted file mode 100644 index 8b07f4a0db..0000000000 --- a/docs/release_notes/release-notes-frankfurt.rst +++ /dev/null @@ -1,166 +0,0 @@ -.. This work is licensed under a Creative Commons Attribution 4.0 - International License. -.. http://creativecommons.org/licenses/by/4.0 -.. (c) ONAP Project and its contributors -.. _release_notes_frankfurt: - -:orphan: - -************************************* -ONAP Operations Manager Release Notes -************************************* - -Previous Release Notes -====================== - -- :ref:`El Alto ` -- :ref:`Dublin ` -- :ref:`Casablanca ` -- :ref:`Beijing ` -- :ref:`Amsterdam ` - -Abstract -======== - -This document provides the release notes for the Frankfurt release. - -Summary -======= - -The focus of this release is to strengthen the foundation of OOM installer. - -Release Data -============ - -+--------------------------------------+--------------------------------------+ -| **Project** | OOM | -| | | -+--------------------------------------+--------------------------------------+ -| **Docker images** | N/A | -| | | -+--------------------------------------+--------------------------------------+ -| **Release designation** | Frankfurt | -| | | -+--------------------------------------+--------------------------------------+ -| **Release date** | 2020/06/15 | -| | | -+--------------------------------------+--------------------------------------+ - -New features ------------- - -* Ingress deployment is getting more and more usable -* Use of dynamic Persistent Volume is available - -**Bug fixes** - -A list of issues resolved in this release can be found here: -https://jira.onap.org/projects/OOM/versions/10826 - -**Known Issues** - -- `OOM-1237 `_ Source Helm Charts from - ONAP Repo. Having helm charts repo is not possible for Frankfurt release. -- `OOM-1720 `_ galera container is - outdated. containers used for mariadb are outdated and not supported anymore. -- `OOM-1817 `_ Use of global.repository - inconsistent across Helm Charts. it's then may be hard to retrieve some - containers when deploying in constrained environment. -- `OOM-2075 `_ Invalid MTU for Canal CNI - interfaces -- `OOM-2227 `_ Cassandra Backup Mechanism - works only on "static PV" mode. -- `OOM-2230 `_ Missing requests/limits - for some PODS. This can lead to "memory bombing" so cautious monitoring of - Kubernetes resources usage must be set up. -- `OOM-2279 `_ OOM El Alto and master - clamp mariadb resources doesn't match chart. -- `OOM-2285 `_ deploy.sh does not work - for mariadb-galera. deploy script doesn't behave well with "-" in the - component name. -- `OOM-2369 `_ DMAAP Helm install takes - too long and often fails. -- `OOM-2418 `_ Readiness-check 2.0.2 not - working properly for stateful set. -- `OOM-2421 `_ OOM NBI chart deployment - error. In some case, NBI deployment fails. -- `OOM-2422 `_ Portal App is unreachable - when deploying without HTTPs - - -Deliverables ------------- - -Software Deliverables -~~~~~~~~~~~~~~~~~~~~~ - -OOM provides `Helm charts `_ that needs to be -"compiled" into Helm package. see step 6 in -:doc:`quickstart guide <../oom_quickstart_guide>`. - -Documentation Deliverables -~~~~~~~~~~~~~~~~~~~~~~~~~~ - -- :doc:`Project Description <../oom_project_description>` -- :doc:`Cloud Setup Guide <../oom_cloud_setup_guide>` -- :doc:`Quick Start Guide <../oom_quickstart_guide>` -- :doc:`Setup Ingress Controller <../oom_setup_ingress_controller>` -- :doc:`Developer Guide <../oom_developer_guide>` -- :doc:`Hardcoded Certificates <../oom_hardcoded_certificates>` - -Known Limitations, Issues and Workarounds -========================================= - -Known Vulnerabilities ---------------------- - -- Hard coded password used for all OOM deployments - [`OJSI-188 `_] -- :doc:`Hard coded certificates <../oom_hardcoded_certificates>` in Helm packages - -Workarounds ------------ - -- `OOM-1237 `_ Workaround is to generate - them as explained in documentation. -- `OOM-1817 `_ Workaround is to use - offline installer if needed. -- `OOM-2227 `_ Workaround is to stick to - "static PV" (so, not using storage class) if backup is needed. -- `OOM-2285 `_ Workaround is to use - directly helm upgrade if needed. -- `OOM-2369 `_ Workaround is to play - postinstall jobs by hand. -- `OOM-2418 `_ Workaround is to use - version 2.2.2 in global part of override file if the new check is needed. -- `OOM-2421 `_ Workaround is to - undeploy/redeploy NBI. -- `OOM-2422 `_ Workaround is to create - first portal app service with service type Cluster IP then changing it to - NodePort or LoadBalancer so all the port are available. - -Security Notes --------------- - -**Fixed Security Issues** - -- In default deployment OOM (consul-server-ui) exposes HTTP port 30270 outside - of cluster. [`OJSI-134 `_] -- CVE-2019-12127 - OOM exposes unprotected API/UI on port 30270 - [`OJSI-202 `_] - -References -========== - -For more information on the ONAP Frankfurt release, please see: - -#. `ONAP Home Page`_ -#. `ONAP Documentation`_ -#. `ONAP Release Downloads`_ -#. `ONAP Wiki Page`_ - - -.. _`ONAP Home Page`: https://www.onap.org -.. _`ONAP Wiki Page`: https://wiki.onap.org -.. _`ONAP Documentation`: https://docs.onap.org -.. _`ONAP Release Downloads`: https://git.onap.org diff --git a/docs/release_notes/release-notes-guilin.rst b/docs/release_notes/release-notes-guilin.rst deleted file mode 100644 index 8d4b4322b8..0000000000 --- a/docs/release_notes/release-notes-guilin.rst +++ /dev/null @@ -1,150 +0,0 @@ -.. This work is licensed under a Creative Commons Attribution 4.0 - International License. -.. http://creativecommons.org/licenses/by/4.0 -.. (c) ONAP Project and its contributors -.. _release_notes_guilin: - -:orphan: - -************************************* -ONAP Operations Manager Release Notes -************************************* - -Previous Release Notes -====================== - -- :ref:`Frankfurt ` -- :ref:`El Alto ` -- :ref:`Dublin ` -- :ref:`Casablanca ` -- :ref:`Beijing ` -- :ref:`Amsterdam ` - -Abstract -======== - -This document provides the release notes for the Guilin release. - -Summary -======= - -The focus of this release is to strengthen the foundation of OOM installer. - -Release Data -============ - -+--------------------------------------+--------------------------------------+ -| **Project** | OOM | -| | | -+--------------------------------------+--------------------------------------+ -| **Docker images** | N/A | -| | | -+--------------------------------------+--------------------------------------+ -| **Release designation** | Guilin | -| | | -+--------------------------------------+--------------------------------------+ -| **Release date** | 2020/12/03 | -| | | -+--------------------------------------+--------------------------------------+ - -New features ------------- - -* Kubernetes support for version up to 1.19 -* Helm (experimental) support for version up to 3.3 -* Limits are set for most of the components - -**Bug fixes** - -A list of issues resolved in this release can be found here: -https://jira.onap.org/projects/OOM/versions/10826 - -**Known Issues** - -- `OOM-1237 `_ Source Helm Charts from - ONAP Repo. Having helm charts repo is not possible for Frankfurt release. -- `OOM-1720 `_ galera container is - outdated. containers used for mariadb are outdated and not supported anymore. -- `OOM-1817 `_ Use of global.repository - inconsistent across Helm Charts. it's then may be hard to retrieve some - containers when deploying in constrained environment. -- `OOM-2227 `_ Cassandra Backup Mechanism - works only on "static PV" mode. -- `OOM-2285 `_ deploy.sh does not work - for mariadb-galera. deploy script doesn't behave well with "-" in the - component name. -- `OOM-2421 `_ OOM nbi chart deployment - error -- `OOM-2534 `_ Cert-Service leverages - runtime external dependency -- `OOM-2554 `_ Common pods have java 8 -- `OOM-2588 `_ Various subcharts not - installing due to helm size issues -- `OOM-2629 `_ NetBox demo entry setup - not complete - - -Deliverables ------------- - -Software Deliverables -~~~~~~~~~~~~~~~~~~~~~ - -OOM provides `Helm charts `_ that needs to be -"compiled" into Helm package. see step 6 in -:doc:`quickstart guide <../oom_quickstart_guide>`. - -Documentation Deliverables -~~~~~~~~~~~~~~~~~~~~~~~~~~ - -- :doc:`Project Description <../oom_project_description>` -- :doc:`Cloud Setup Guide <../oom_cloud_setup_guide>` -- :doc:`Quick Start Guide <../oom_quickstart_guide>` -- :doc:`Setup Ingress Controller <../oom_setup_ingress_controller>` -- :doc:`Developer Guide <../oom_developer_guide>` -- :doc:`Hardcoded Certificates <../oom_hardcoded_certificates>` - -Known Limitations, Issues and Workarounds -========================================= - -Known Vulnerabilities ---------------------- - -- Hard coded password used for all OOM deployments - [`OJSI-188 `_] -- :doc:`Hard coded certificates <../oom_hardcoded_certificates>` in Helm packages - -Workarounds ------------ - -- `OOM-1237 `_ Workaround is to generate - them as explained in documentation. -- `OOM-1817 `_ Workaround is to use - offline installer if needed. -- `OOM-2227 `_ Workaround is to stick to - "static PV" (so, not using storage class) if backup is needed. -- `OOM-2285 `_ Workaround is to use - directly helm upgrade if needed. -- `OOM-2534 `_ Workaround is to download - in advance docker.io/openjdk:11-jre-slim where you will generate the charts - -Security Notes --------------- - -**Fixed Security Issues** - -References -========== - -For more information on the ONAP Frankfurt release, please see: - -#. `ONAP Home Page`_ -#. `ONAP Documentation`_ -#. `ONAP Release Downloads`_ -#. `ONAP Wiki Page`_ - - -.. _`ONAP Home Page`: https://www.onap.org -.. _`ONAP Wiki Page`: https://wiki.onap.org -.. _`ONAP Documentation`: https://docs.onap.org -.. _`ONAP Release Downloads`: https://git.onap.org diff --git a/docs/release_notes/release-notes-honolulu.rst b/docs/release_notes/release-notes-honolulu.rst deleted file mode 100644 index bf91a44a3e..0000000000 --- a/docs/release_notes/release-notes-honolulu.rst +++ /dev/null @@ -1,162 +0,0 @@ -.. This work is licensed under a Creative Commons Attribution 4.0 - International License. -.. http://creativecommons.org/licenses/by/4.0 -.. (c) ONAP Project and its contributors -.. _release_notes_honolulu: - -:orphan: - -************************************* -ONAP Operations Manager Release Notes -************************************* - -Previous Release Notes -====================== - -- :ref:`Guilin ` -- :ref:`Frankfurt ` -- :ref:`El Alto ` -- :ref:`Dublin ` -- :ref:`Casablanca ` -- :ref:`Beijing ` -- :ref:`Amsterdam ` - -Abstract -======== - -This document provides the release notes for the Honolulu release. - -Summary -======= - -The focus of this release is to strengthen the foundation of OOM installer. - -Release Data -============ - -+--------------------------------------+--------------------------------------+ -| **Project** | OOM | -| | | -+--------------------------------------+--------------------------------------+ -| **Docker images** | N/A | -| | | -+--------------------------------------+--------------------------------------+ -| **Release designation** | Honolulu | -| | | -+--------------------------------------+--------------------------------------+ -| **Release date** | 2021/04/29 | -| | | -+--------------------------------------+--------------------------------------+ - -New features ------------- - -* Kubernetes support for version up to 1.20 -* Helm support for version up to 3.5 -* Limits are set for most of the components -* Portal-Cassandra image updated to Bitnami, supporting IPv4/IPv6 Dual Stack -* CMPv2 external issuer implemented which extends Cert-Manager with ability to - enroll X.509 certificates from CMPv2 servers -* New version for mariadb galera using Bitnami image, supporting IPv4/IPv6 Dual - Stack -* Bump version of common PostgreSQL and ElasticSearch -* Move to automatic certificates retrieval for 80% of the components -* Consistent retrieval of docker images, with ability to configure proxy for - the 4 repositories used by ONAP - -**Bug fixes** - -A list of issues resolved in this release can be found here: -https://jira.onap.org/projects/OOM/versions/11073 - -major issues solved: - -* Better handling of persistence on PostgreSQL -* Better Ingress templating -* Better Service templating - -**Known Issues** - -- `OOM-2554 `_ Common pods have java 8 -- `OOM-2435 `_ SDNC karaf shell: - log:list: Error executing command: Unrecognized configuration -- `OOM-2629 `_ NetBox demo entry setup - not complete -- `OOM-2706 `_ CDS Blueprint Processor - does not work with local DB -- `OOM-2713 `_ Problem on onboarding - custom cert to SDNC ONAP during deployment -- `OOM-2698 `_ SO helm override fails in - for value with multi-level replacement -- `OOM-2697 `_ SO with local MariaDB - deployment fails -- `OOM-2538 `_ strange error with - CertInitializer template -- `OOM-2547 `_ Health Check failures - seen after bringing down/up control plane & worker node VM instances on which - ONAP hosted -- `OOM-2699 `_ SO so-mariadb - readinessCheck fails for local MariaDB instance -- `OOM-2705 `_ SDNC DB installation fails - on local MariaDB instance -- `OOM-2603 `_ [SDNC] allign password for - scaleoutUser/restconfUser/odlUser - -Deliverables ------------- - -Software Deliverables -~~~~~~~~~~~~~~~~~~~~~ - -OOM provides `Helm charts `_ that needs to be -"compiled" into Helm package. see step 6 in -:doc:`quickstart guide <../oom_quickstart_guide>`. - -Documentation Deliverables -~~~~~~~~~~~~~~~~~~~~~~~~~~ - -- :doc:`Project Description <../oom_project_description>` -- :doc:`Cloud Setup Guide <../oom_cloud_setup_guide>` -- :doc:`Quick Start Guide <../oom_quickstart_guide>` -- :doc:`Setup Ingress Controller <../oom_setup_ingress_controller>` -- :doc:`Developer Guide <../oom_developer_guide>` -- :doc:`Hardcoded Certificates <../oom_hardcoded_certificates>` - -Known Limitations, Issues and Workarounds -========================================= - -Known Vulnerabilities ---------------------- - -- Hard coded password used for all OOM deployments - [`OJSI-188 `_] -- :doc:`Hard coded certificates <../oom_hardcoded_certificates>` in Helm packages - -Workarounds ------------ - -- ``_ - Workaround is to generate a password with "short" strength or pregenerate - passwords without single quote in it. Default deployment is using "short" - password generation for mariadb. - -Security Notes --------------- - -**Fixed Security Issues** - -References -========== - -For more information on the ONAP Frankfurt release, please see: - -#. `ONAP Home Page`_ -#. `ONAP Documentation`_ -#. `ONAP Release Downloads`_ -#. `ONAP Wiki Page`_ - - -.. _`ONAP Home Page`: https://www.onap.org -.. _`ONAP Wiki Page`: https://wiki.onap.org -.. _`ONAP Documentation`: https://docs.onap.org -.. _`ONAP Release Downloads`: https://git.onap.org diff --git a/docs/release_notes/release-notes-isntanbul.rst b/docs/release_notes/release-notes-isntanbul.rst deleted file mode 100644 index bd320112c3..0000000000 --- a/docs/release_notes/release-notes-isntanbul.rst +++ /dev/null @@ -1,132 +0,0 @@ -.. This work is licensed under a Creative Commons Attribution 4.0 - International License. -.. http://creativecommons.org/licenses/by/4.0 -.. (c) ONAP Project and its contributors -.. _release_notes_istanbul: - -:orphan: - -************************************* -ONAP Operations Manager Release Notes -************************************* - -Previous Release Notes -====================== - -- :ref:`Honolulu ` -- :ref:`Guilin ` -- :ref:`Frankfurt ` -- :ref:`El Alto ` -- :ref:`Dublin ` -- :ref:`Casablanca ` -- :ref:`Beijing ` -- :ref:`Amsterdam ` - -Abstract -======== - -This document provides the release notes for the Istanbul release. - -Summary -======= - - - -Release Data -============ - -+--------------------------------------+--------------------------------------+ -| **Project** | OOM | -| | | -+--------------------------------------+--------------------------------------+ -| **Docker images** | N/A | -| | | -+--------------------------------------+--------------------------------------+ -| **Release designation** | Istanbul | -| | | -+--------------------------------------+--------------------------------------+ -| **Release date** | | -| | | -+--------------------------------------+--------------------------------------+ - -New features ------------- - - -**Bug fixes** - -A list of issues resolved in this release can be found here: -https://jira.onap.org/projects/OOM/versions/11074 - - -**Known Issues** - - -Deliverables ------------- - -Software Deliverables -~~~~~~~~~~~~~~~~~~~~~ - -OOM provides `Helm charts `_ that needs to be -"compiled" into Helm package. see step 6 in -:doc:`quickstart guide <../oom_quickstart_guide>`. - -Documentation Deliverables -~~~~~~~~~~~~~~~~~~~~~~~~~~ - -- :doc:`Project Description <../oom_project_description>` -- :doc:`Cloud Setup Guide <../oom_cloud_setup_guide>` -- :doc:`Quick Start Guide <../oom_quickstart_guide>` -- :doc:`Setup Ingress Controller <../oom_setup_ingress_controller>` -- :doc:`Developer Guide <../oom_developer_guide>` -- :doc:`Hardcoded Certificates <../oom_hardcoded_certificates>` - -Known Limitations, Issues and Workarounds -========================================= - -Known Vulnerabilities ---------------------- - - -Workarounds ------------ - -- `OOM-2754 `_ - Because of *updateEndpoint* property added to *cmpv2issuer* CRD - it is impossible to upgrade platform component from Honolulu to Istanbul - release without manual steps. Actions that should be performed: - - #. Update the CRD definition:: - - > kubectl -n onap apply -f oom/kubernetes/platform/components/cmpv2-cert-provider/crds/cmpv2issuer.yaml - #. Upgrade the component:: - - > helm -n onap upgrade dev-platform oom/kubernetes/platform - #. Make sure that *cmpv2issuer* contains correct value for - *spec.updateEndpoint*. The value should be: *v1/certificate-update*. - If it's not, edit the resource:: - - > kubectl -n onap edit cmpv2issuer cmpv2-issuer-onap - - -Security Notes --------------- - -**Fixed Security Issues** - -References -========== - -For more information on the ONAP Istanbul release, please see: - -#. `ONAP Home Page`_ -#. `ONAP Documentation`_ -#. `ONAP Release Downloads`_ -#. `ONAP Wiki Page`_ - - -.. _`ONAP Home Page`: https://www.onap.org -.. _`ONAP Wiki Page`: https://wiki.onap.org -.. _`ONAP Documentation`: https://docs.onap.org -.. _`ONAP Release Downloads`: https://git.onap.org diff --git a/docs/release_notes/release-notes.rst b/docs/release_notes/release-notes.rst deleted file mode 100644 index 1ab2173e10..0000000000 --- a/docs/release_notes/release-notes.rst +++ /dev/null @@ -1,131 +0,0 @@ -.. This work is licensed under a Creative Commons Attribution 4.0 - International License. -.. http://creativecommons.org/licenses/by/4.0 -.. (c) ONAP Project and its contributors -.. _release_notes: - -************************************* -ONAP Operations Manager Release Notes -************************************* - -Previous Release Notes -====================== - -- :ref:`Istanbul ` -- :ref:`Honolulu ` -- :ref:`Guilin ` -- :ref:`Frankfurt ` -- :ref:`El Alto ` -- :ref:`Dublin ` -- :ref:`Casablanca ` -- :ref:`Beijing ` -- :ref:`Amsterdam ` - -Abstract -======== - -This document provides the release notes for the Jakarta release. - -Summary -======= - - - -Release Data -============ - -+--------------------------------------+--------------------------------------+ -| **Project** | OOM | -| | | -+--------------------------------------+--------------------------------------+ -| **Docker images** | N/A | -| | | -+--------------------------------------+--------------------------------------+ -| **Release designation** | Jakarta | -| | | -+--------------------------------------+--------------------------------------+ -| **Release date** | | -| | | -+--------------------------------------+--------------------------------------+ - -New features ------------- - - -**Bug fixes** - -A list of issues resolved in this release can be found here: -https://jira.onap.org/projects/OOM/versions/11498 - - -**Known Issues** - - -Deliverables ------------- - -Software Deliverables -~~~~~~~~~~~~~~~~~~~~~ - -OOM provides `Helm charts `_ that needs to be -"compiled" into Helm package. see step 6 in -:doc:`quickstart guide <../oom_quickstart_guide>`. - -Documentation Deliverables -~~~~~~~~~~~~~~~~~~~~~~~~~~ - -- :doc:`Project Description <../oom_project_description>` -- :doc:`Cloud Setup Guide <../oom_cloud_setup_guide>` -- :doc:`Quick Start Guide <../oom_quickstart_guide>` -- :doc:`Setup Ingress Controller <../oom_setup_ingress_controller>` -- :doc:`Developer Guide <../oom_developer_guide>` -- :doc:`Hardcoded Certificates <../oom_hardcoded_certificates>` - -Known Limitations, Issues and Workarounds -========================================= - -Known Vulnerabilities ---------------------- - - -Workarounds ------------ - -- `OOM-2754 `_ - Because of *updateEndpoint* property added to *cmpv2issuer* CRD - it is impossible to upgrade platform component from Istanbul to Jakarta - release without manual steps. Actions that should be performed: - - #. Update the CRD definition:: - - > kubectl -n onap apply -f oom/kubernetes/platform/components/cmpv2-cert-provider/crds/cmpv2issuer.yaml - #. Upgrade the component:: - - > helm -n onap upgrade dev-platform oom/kubernetes/platform - #. Make sure that *cmpv2issuer* contains correct value for - *spec.updateEndpoint*. The value should be: *v1/certificate-update*. - If it's not, edit the resource:: - - > kubectl -n onap edit cmpv2issuer cmpv2-issuer-onap - - -Security Notes --------------- - -**Fixed Security Issues** - -References -========== - -For more information on the ONAP Istanbul release, please see: - -#. `ONAP Home Page`_ -#. `ONAP Documentation`_ -#. `ONAP Release Downloads`_ -#. `ONAP Wiki Page`_ - - -.. _`ONAP Home Page`: https://www.onap.org -.. _`ONAP Wiki Page`: https://wiki.onap.org -.. _`ONAP Documentation`: https://docs.onap.org -.. _`ONAP Release Downloads`: https://git.onap.org diff --git a/docs/requirements-docs.txt b/docs/requirements-docs.txt new file mode 100644 index 0000000000..9c104de61c --- /dev/null +++ b/docs/requirements-docs.txt @@ -0,0 +1,8 @@ +sphinx>=4.2.0 # BSD +sphinx-rtd-theme>=1.0.0 # MIT +sphinxcontrib-blockdiag # BSD +sphinxcontrib-seqdiag # BSD +sphinxcontrib-swaggerdoc +sphinxcontrib-spelling +sphinxcontrib-plantuml +sphinx_toolbox>=3.2.0 diff --git a/docs/sections/guides/access_guides/oom_access_info.rst b/docs/sections/guides/access_guides/oom_access_info.rst new file mode 100644 index 0000000000..2e779105f2 --- /dev/null +++ b/docs/sections/guides/access_guides/oom_access_info.rst @@ -0,0 +1,21 @@ +.. This work is licensed under a Creative Commons Attribution 4.0 +.. International License. +.. http://creativecommons.org/licenses/by/4.0 +.. Copyright (C) 2022 Nordix Foundation + +.. _oom_access_info_guide: + +OOM Access Info +--------------- + +.. figure:: ../../resources/images/oom_logo/oomLogoV2-medium.png + :align: right + +Some relevant information regarding accessing OOM from outside the cluster etc + + +.. toctree:: + :maxdepth: 1 + + oom_ingress_access.rst + diff --git a/docs/sections/guides/access_guides/oom_ingress_access.rst b/docs/sections/guides/access_guides/oom_ingress_access.rst new file mode 100644 index 0000000000..0c64375098 --- /dev/null +++ b/docs/sections/guides/access_guides/oom_ingress_access.rst @@ -0,0 +1,18 @@ +.. This work is licensed under a Creative Commons Attribution 4.0 +.. International License. +.. http://creativecommons.org/licenses/by/4.0 +.. Copyright (C) 2022 Nordix Foundation + +.. Links + + +.. figure:: ../../resources/images/oom_logo/oomLogoV2-medium.png + :align: right + +.. _oom_ingress_access: + + +Ingress access to OOM +##################### + +TBD diff --git a/docs/sections/guides/deployment_guides/oom_customize_overrides.rst b/docs/sections/guides/deployment_guides/oom_customize_overrides.rst new file mode 100644 index 0000000000..3acb8b6ee6 --- /dev/null +++ b/docs/sections/guides/deployment_guides/oom_customize_overrides.rst @@ -0,0 +1,48 @@ +.. This work is licensed under a Creative Commons Attribution 4.0 +.. International License. +.. http://creativecommons.org/licenses/by/4.0 +.. Copyright (C) 2022 Nordix Foundation + +.. Links +.. _helm deploy: https://github.com/onap/oom/blob/master/kubernetes/helm/plugins/deploy/deploy.sh + +.. _oom_customize_overrides: + +OOM Custom Overrides +#################### + +The OOM `helm deploy`_ plugin requires deployment configuration as input, usually in the form of override yaml files. +These input files determine what ONAP components get deployed, and the configuration of the OOM deployment. + +Other helm config options like `--set log.enabled=true|false` are available. + +See the `helm deploy`_ plugin usage section for more detail, or it the plugin has already been installed, execute the following:: + + > helm deploy --help + +Users can customize the override files to suit their required deployment. + +.. note:: + Standard and example override files (e.g. `onap-all.yaml`, `onap-all-ingress-istio.yaml`) + can be found in the `oom/kubernetes/onap/resources/overrides/` directory. + + * Users can selectively enable or disable ONAP components by changing the ``enabled: true/false`` flags. + + * Add to the command line a value for the global master password (ie. --set global.masterPassword=My_superPassw0rd). + + +Enabling/Disabling Components +----------------------------- +Here is an example of the nominal entries that need to be provided. +Different values files are available for different contexts. + +.. collapse:: Default ONAP values.yaml + + .. include:: ../../../../kubernetes/onap/values.yaml + :code: yaml + +| + +Some other heading +------------------ +adva \ No newline at end of file diff --git a/docs/sections/guides/deployment_guides/oom_deployment.rst b/docs/sections/guides/deployment_guides/oom_deployment.rst new file mode 100644 index 0000000000..110736939e --- /dev/null +++ b/docs/sections/guides/deployment_guides/oom_deployment.rst @@ -0,0 +1,42 @@ +.. This work is licensed under a Creative Commons Attribution 4.0 +.. International License. +.. http://creativecommons.org/licenses/by/4.0 +.. Copyright (C) 2022 Nordix Foundation + +.. Links +.. _ONAP Release Long Term Roadmap: https://wiki.onap.org/display/DW/Long+Term+Roadmap + +.. _oom_deploy_guide: + +OOM Deployment Guide +-------------------- + +.. figure:: ../../resources/images/oom_logo/oomLogoV2-medium.png + :align: right + +ONAP OOM supports several options for the deployment of ONAP using it's helm charts. + + * :ref:`oom_helm_release_repo_deploy` + * :ref:`oom_helm_testing_repo_deploy` + * :ref:`oom_dev_testing_local_deploy` + +.. warning:: + | **Pre-requisites** + | The following sections must be completed before continuing with deployment: + + | :ref:`Set up your base platform` + + +Each deployment method can be customized to deploy a subset of ONAP component applications. +See the :ref:`oom_customize_overrides` section for more details. + + +.. toctree:: + :hidden: + + oom_customize_overrides.rst + oom_helm_release_repo_deploy.rst + oom_helm_testing_repo_deploy.rst + oom_dev_testing_local_deploy.rst + + diff --git a/docs/sections/guides/deployment_guides/oom_dev_testing_local_deploy.rst b/docs/sections/guides/deployment_guides/oom_dev_testing_local_deploy.rst new file mode 100644 index 0000000000..50701dd597 --- /dev/null +++ b/docs/sections/guides/deployment_guides/oom_dev_testing_local_deploy.rst @@ -0,0 +1,87 @@ +.. This work is licensed under a Creative Commons Attribution 4.0 +.. International License. +.. http://creativecommons.org/licenses/by/4.0 +.. Copyright (C) 2022 Nordix Foundation + +.. Links + +.. _oom_dev_testing_local_deploy: + +OOM Developer Testing Deployment +================================ + +Developing and testing changes to the existing OOM project can be done locally by setting up some additional +tools to host the updated helm charts. + +**Step 1.** Clone the OOM repository from ONAP gerrit:: + + > git clone http://gerrit.onap.org/r/oom + + > cd oom/kubernetes + + +**Step 2.** Install Helm Plugin required to push helm charts to local repo:: + + > helm plugin install https://github.com/chartmuseum/helm-push.git --version 0.9.0 + +.. note:: + The ``--version 0.9.0`` is required as new version of helm (3.7.0 and up) is + now using ``push`` directly and helm-push is using ``cm-push`` starting + version ``0.10.0`` and up. + +**Step 3.** Install Chartmuseum + +Chart museum is required to host the helm charts locally when deploying in a development environment:: + + > curl -LO https://s3.amazonaws.com/chartmuseum/release/latest/bin/linux/amd64/chartmuseum + + > chmod +x ./chartmuseum + + > mv ./chartmuseum /usr/local/bin + +**Step 4.** To setup a local Helm server to store the ONAP charts:: + + > mkdir -p ~/helm3-storage + + > chartmuseum --storage local --storage-local-rootdir ~/helm3-storage -port 8879 & + +Note the port number that is listed and use it in the Helm repo add as follows:: + + > helm repo add local http://127.0.0.1:8879 + +**Step 5.** Verify your Helm repository setup with:: + + > helm repo list + NAME URL + local http://127.0.0.1:8879 + +**Step 6.** Build a local Helm repository (from the kubernetes directory):: + + > make SKIP_LINT=TRUE [HELM_BIN=] all + +`HELM_BIN` + Sets the helm binary to be used. The default value use helm from PATH + + +**Step 7.** Display the onap charts that are available to be deployed:: + + > helm repo update + + > helm search repo local + + +.. collapse:: Helm search repo output + + .. include:: ../../resources/helm/helm-search.txt + :code: yaml + +| + +.. note:: + The setup of the Helm repository is a one time activity. If you make changes + to your deployment charts or values be sure to use ``make`` to update your + local Helm repository. + + + + diff --git a/docs/sections/guides/deployment_guides/oom_helm_release_repo_deploy.rst b/docs/sections/guides/deployment_guides/oom_helm_release_repo_deploy.rst new file mode 100644 index 0000000000..f932360e44 --- /dev/null +++ b/docs/sections/guides/deployment_guides/oom_helm_release_repo_deploy.rst @@ -0,0 +1,44 @@ +.. This work is licensed under a Creative Commons Attribution 4.0 +.. International License. +.. http://creativecommons.org/licenses/by/4.0 +.. Copyright (C) 2022 Nordix Foundation + +.. Links +.. _ONAP helm release repository: https://nexus3.onap.org/service/rest/repository/browse/onap-helm-release/ +.. _ONAP Release Long Term Roadmap: https://wiki.onap.org/display/DW/Long+Term+Roadmap + +.. _oom_helm_release_repo_deploy: + +OOM Helm Release Deployment +=========================== + +ONAP hosts the OOM release helm charts in it's `ONAP helm release repository`_. + +This is the officially supported repository for the deployment of OOM. + +.. note:: + ONAP supports up to N-1 releases. See `ONAP Release Long Term Roadmap`_ for more details. + +Add the OOM release repo & Deploy +--------------------------------- +Add the repository: + +- To add the onap release helm repo, execute the following:: + + > helm repo add onap-release https://nexus3.onap.org/repository/onap-helm-release/ + +.. note:: + The following helm command will deploy ONAP charts, with `all` OOM components enabled as per the onap-all.yml overrides file provided to the `-f` flag. + + To customize what applications are deployed, see the :ref:`oom_customize_overrides` section for more details, to provide your own custom overrides yaml file. + +- To deploy a release, execute the following, substituting the tag with your preferred release (ie. 11.0.0):: + + > helm deploy dev onap-release/onap --namespace onap --create-namespace --set global.masterPassword=myAwesomePasswordThatINeedToChange --version -f oom/kubernetes/onap/resources/overrides/onap-all.yaml + + + + + + + diff --git a/docs/sections/guides/deployment_guides/oom_helm_testing_repo_deploy.rst b/docs/sections/guides/deployment_guides/oom_helm_testing_repo_deploy.rst new file mode 100644 index 0000000000..a0dafcef9b --- /dev/null +++ b/docs/sections/guides/deployment_guides/oom_helm_testing_repo_deploy.rst @@ -0,0 +1,46 @@ +.. This work is licensed under a Creative Commons Attribution 4.0 +.. International License. +.. http://creativecommons.org/licenses/by/4.0 +.. Copyright (C) 2022 Nordix Foundation + +.. Links +.. _ONAP helm testing repository: https://nexus3.onap.org/service/rest/repository/browse/onap-helm-testing/ +.. _OOM: https://github.com/onap/oom + +.. _oom_helm_testing_repo_deploy: + +OOM Helm Testing Deployment +=========================== + +ONAP hosts the OOM `testing` helm charts in it's `ONAP helm testing repository`_. + +This is helm repo contains: + + * The `latest` charts built from the head of the `OOM`_ project's master + branch, tagged with the version number of the current development cycle (ie. 11.0.0). + + +Add the OOM testing repo & Deploy +--------------------------------- +.. note:: + The testing helm charts for earlier releases are not fully supported. Test at your own risk. + +Add the repository: + +- To add the onap testing helm repo, execute the following:: + + > helm repo add onap-testing https://nexus3.onap.org/repository/onap-helm-testing/ + +.. note:: + The following helm command will deploy ONAP charts, with `all` OOM components enabled as per the onap-all.yml overrides file provided to the `-f` flag. + + To customize what applications are deployed, see the :ref:`oom_customize_overrides` section for more details, to provide your own custom overrides yaml file. + +- To deploy the latest charts, we need to target the repo added previously:: + + > helm deploy dev onap-testing/onap --namespace onap --create-namespace --set global.masterPassword=myAwesomePasswordThatINeedToChange -f oom/kubernetes/onap/resources/overrides/onap-all.yaml + +This will deploy the latest testing version of the OOM helm charts. + + + diff --git a/docs/sections/guides/development_guides/oom_dev_config_management.rst b/docs/sections/guides/development_guides/oom_dev_config_management.rst new file mode 100644 index 0000000000..36a02dc85d --- /dev/null +++ b/docs/sections/guides/development_guides/oom_dev_config_management.rst @@ -0,0 +1,444 @@ +.. This work is licensed under a Creative Commons Attribution 4.0 +.. International License. +.. http://creativecommons.org/licenses/by/4.0 +.. Copyright 2018-2020 Amdocs, Bell Canada, Orange, Samsung +.. Modification copyright (C) 2022 Nordix Foundation + +.. Links + +.. _oom_dev_config_management: + + +Configuration Management +######################## + +ONAP is a large system composed of many components - each of which are complex +systems in themselves - that needs to be deployed in a number of different +ways. For example, within a single operator's network there may be R&D +deployments under active development, pre-production versions undergoing system +testing and production systems that are operating live networks. Each of these +deployments will differ in significant ways, such as the version of the +software images deployed. In addition, there may be a number of application +specific configuration differences, such as operating system environment +variables. The following describes how the Helm configuration management +system is used within the OOM project to manage both ONAP infrastructure +configuration as well as ONAP components configuration. + +One of the artifacts that OOM/Kubernetes uses to deploy ONAP components is the +deployment specification, yet another yaml file. Within these deployment specs +are a number of parameters as shown in the following example: + +.. code-block:: yaml + + apiVersion: apps/v1 + kind: StatefulSet + metadata: + labels: + app.kubernetes.io/name: zookeeper + helm.sh/chart: zookeeper + app.kubernetes.io/component: server + app.kubernetes.io/managed-by: Tiller + app.kubernetes.io/instance: onap-oof + name: onap-oof-zookeeper + namespace: onap + spec: + <...> + replicas: 3 + selector: + matchLabels: + app.kubernetes.io/name: zookeeper + app.kubernetes.io/component: server + app.kubernetes.io/instance: onap-oof + serviceName: onap-oof-zookeeper-headless + template: + metadata: + labels: + app.kubernetes.io/name: zookeeper + helm.sh/chart: zookeeper + app.kubernetes.io/component: server + app.kubernetes.io/managed-by: Tiller + app.kubernetes.io/instance: onap-oof + spec: + <...> + affinity: + containers: + - name: zookeeper + <...> + image: gcr.io/google_samples/k8szk:v3 + imagePullPolicy: Always + <...> + ports: + - containerPort: 2181 + name: client + protocol: TCP + - containerPort: 3888 + name: election + protocol: TCP + - containerPort: 2888 + name: server + protocol: TCP + <...> + +Note that within the statefulset specification, one of the container arguments +is the key/value pair image: gcr.io/google_samples/k8szk:v3 which +specifies the version of the zookeeper software to deploy. Although the +statefulset specifications greatly simplify statefulset, maintenance of the +statefulset specifications themselves become problematic as software versions +change over time or as different versions are required for different +statefulsets. For example, if the R&D team needs to deploy a newer version of +mariadb than what is currently used in the production environment, they would +need to clone the statefulset specification and change this value. Fortunately, +this problem has been solved with the templating capabilities of Helm. + +The following example shows how the statefulset specifications are modified to +incorporate Helm templates such that key/value pairs can be defined outside of +the statefulset specifications and passed during instantiation of the component. + +.. code-block:: yaml + + apiVersion: apps/v1 + kind: StatefulSet + metadata: + name: {{ include "common.fullname" . }} + namespace: {{ include "common.namespace" . }} + labels: {{- include "common.labels" . | nindent 4 }} + spec: + replicas: {{ .Values.replicaCount }} + selector: + matchLabels: {{- include "common.matchLabels" . | nindent 6 }} + # serviceName is only needed for StatefulSet + # put the postfix part only if you have add a postfix on the service name + serviceName: {{ include "common.servicename" . }}-{{ .Values.service.postfix }} + <...> + template: + metadata: + labels: {{- include "common.labels" . | nindent 8 }} + annotations: {{- include "common.tplValue" (dict "value" .Values.podAnnotations "context" $) | nindent 8 }} + name: {{ include "common.name" . }} + spec: + <...> + containers: + - name: {{ include "common.name" . }} + image: {{ .Values.image }} + imagePullPolicy: {{ .Values.global.pullPolicy | default .Values.pullPolicy }} + ports: + {{- range $index, $port := .Values.service.ports }} + - containerPort: {{ $port.port }} + name: {{ $port.name }} + {{- end }} + {{- range $index, $port := .Values.service.headlessPorts }} + - containerPort: {{ $port.port }} + name: {{ $port.name }} + {{- end }} + <...> + +This version of the statefulset specification has gone through the process of +templating values that are likely to change between statefulsets. Note that the +image is now specified as: image: {{ .Values.image }} instead of a +string used previously. During the statefulset phase, Helm (actually the Helm +sub-component Tiller) substitutes the {{ .. }} entries with a variable defined +in a values.yaml file. The content of this file is as follows: + +.. code-block:: yaml + + <...> + image: gcr.io/google_samples/k8szk:v3 + replicaCount: 3 + <...> + + +Within the values.yaml file there is an image key with the value +`gcr.io/google_samples/k8szk:v3` which is the same value used in +the non-templated version. Once all of the substitutions are complete, the +resulting statefulset specification ready to be used by Kubernetes. + +When creating a template consider the use of default values if appropriate. +Helm templating has built in support for DEFAULT values, here is +an example: + +.. code-block:: yaml + + imagePullSecrets: + - name: "{{ .Values.nsPrefix | default "onap" }}-docker-registry-key" + +The pipeline operator ("|") used here hints at that power of Helm templates in +that much like an operating system command line the pipeline operator allow +over 60 Helm functions to be embedded directly into the template (note that the +Helm template language is a superset of the Go template language). These +functions include simple string operations like upper and more complex flow +control operations like if/else. + +OOM is mainly helm templating. In order to have consistent deployment of the +different components of ONAP, some rules must be followed. + +Templates are provided in order to create Kubernetes resources (Secrets, +Ingress, Services, ...) or part of Kubernetes resources (names, labels, +resources requests and limits, ...). + +a full list and simple description is done in +`kubernetes/common/common/documentation.rst`. + +Service template +---------------- + +In order to create a Service for a component, you have to create a file (with +`service` in the name. +For normal service, just put the following line: + +.. code-block:: yaml + + {{ include "common.service" . }} + +For headless service, the line to put is the following: + +.. code-block:: yaml + + {{ include "common.headlessService" . }} + +The configuration of the service is done in component `values.yaml`: + +.. code-block:: yaml + + service: + name: NAME-OF-THE-SERVICE + postfix: MY-POSTFIX + type: NodePort + annotations: + someAnnotationsKey: value + ports: + - name: tcp-MyPort + port: 5432 + nodePort: 88 + - name: http-api + port: 8080 + nodePort: 89 + - name: https-api + port: 9443 + nodePort: 90 + +`annotations` and `postfix` keys are optional. +if `service.type` is `NodePort`, then you have to give `nodePort` value for your +service ports (which is the end of the computed nodePort, see example). + +It would render the following Service Resource (for a component named +`name-of-my-component`, with version `x.y.z`, helm deployment name +`my-deployment` and `global.nodePortPrefix` `302`): + +.. code-block:: yaml + + apiVersion: v1 + kind: Service + metadata: + annotations: + someAnnotationsKey: value + name: NAME-OF-THE-SERVICE-MY-POSTFIX + labels: + app.kubernetes.io/name: name-of-my-component + helm.sh/chart: name-of-my-component-x.y.z + app.kubernetes.io/instance: my-deployment-name-of-my-component + app.kubernetes.io/managed-by: Tiller + spec: + ports: + - port: 5432 + targetPort: tcp-MyPort + nodePort: 30288 + - port: 8080 + targetPort: http-api + nodePort: 30289 + - port: 9443 + targetPort: https-api + nodePort: 30290 + selector: + app.kubernetes.io/name: name-of-my-component + app.kubernetes.io/instance: my-deployment-name-of-my-component + type: NodePort + +In the deployment or statefulSet file, you needs to set the good labels in +order for the service to match the pods. + +here's an example to be sure it matches (for a statefulSet): + +.. code-block:: yaml + + apiVersion: apps/v1 + kind: StatefulSet + metadata: + name: {{ include "common.fullname" . }} + namespace: {{ include "common.namespace" . }} + labels: {{- include "common.labels" . | nindent 4 }} + spec: + selector: + matchLabels: {{- include "common.matchLabels" . | nindent 6 }} + # serviceName is only needed for StatefulSet + # put the postfix part only if you have add a postfix on the service name + serviceName: {{ include "common.servicename" . }}-{{ .Values.service.postfix }} + <...> + template: + metadata: + labels: {{- include "common.labels" . | nindent 8 }} + annotations: {{- include "common.tplValue" (dict "value" .Values.podAnnotations "context" $) | nindent 8 }} + name: {{ include "common.name" . }} + spec: + <...> + containers: + - name: {{ include "common.name" . }} + ports: + {{- range $index, $port := .Values.service.ports }} + - containerPort: {{ $port.port }} + name: {{ $port.name }} + {{- end }} + {{- range $index, $port := .Values.service.headlessPorts }} + - containerPort: {{ $port.port }} + name: {{ $port.name }} + {{- end }} + <...> + +The configuration of the service is done in component `values.yaml`: + +.. code-block:: yaml + + service: + name: NAME-OF-THE-SERVICE + headless: + postfix: NONE + annotations: + anotherAnnotationsKey : value + publishNotReadyAddresses: true + headlessPorts: + - name: tcp-MyPort + port: 5432 + - name: http-api + port: 8080 + - name: https-api + port: 9443 + +`headless.annotations`, `headless.postfix` and +`headless.publishNotReadyAddresses` keys are optional. + +If `headless.postfix` is not set, then we'll add `-headless` at the end of the +service name. + +If it set to `NONE`, there will be not postfix. + +And if set to something, it will add `-something` at the end of the service +name. + +It would render the following Service Resource (for a component named +`name-of-my-component`, with version `x.y.z`, helm deployment name +`my-deployment` and `global.nodePortPrefix` `302`): + +.. code-block:: yaml + + apiVersion: v1 + kind: Service + metadata: + annotations: + anotherAnnotationsKey: value + name: NAME-OF-THE-SERVICE + labels: + app.kubernetes.io/name: name-of-my-component + helm.sh/chart: name-of-my-component-x.y.z + app.kubernetes.io/instance: my-deployment-name-of-my-component + app.kubernetes.io/managed-by: Tiller + spec: + clusterIP: None + ports: + - port: 5432 + targetPort: tcp-MyPort + nodePort: 30288 + - port: 8080 + targetPort: http-api + nodePort: 30289 + - port: 9443 + targetPort: https-api + nodePort: 30290 + publishNotReadyAddresses: true + selector: + app.kubernetes.io/name: name-of-my-component + app.kubernetes.io/instance: my-deployment-name-of-my-component + type: ClusterIP + +Previous example of StatefulSet would also match (except for the `postfix` part +obviously). + +Creating Deployment or StatefulSet +---------------------------------- + +Deployment and StatefulSet should use the `apps/v1` (which has appeared in +v1.9). +As seen on the service part, the following parts are mandatory: + +.. code-block:: yaml + + apiVersion: apps/v1 + kind: StatefulSet + metadata: + name: {{ include "common.fullname" . }} + namespace: {{ include "common.namespace" . }} + labels: {{- include "common.labels" . | nindent 4 }} + spec: + selector: + matchLabels: {{- include "common.matchLabels" . | nindent 6 }} + # serviceName is only needed for StatefulSet + # put the postfix part only if you have add a postfix on the service name + serviceName: {{ include "common.servicename" . }}-{{ .Values.service.postfix }} + <...> + template: + metadata: + labels: {{- include "common.labels" . | nindent 8 }} + annotations: {{- include "common.tplValue" (dict "value" .Values.podAnnotations "context" $) | nindent 8 }} + name: {{ include "common.name" . }} + spec: + <...> + containers: + - name: {{ include "common.name" . }} + +Dependency Management +--------------------- +These Helm charts describe the desired state +of an ONAP deployment and instruct the Kubernetes container manager as to how +to maintain the deployment in this state. These dependencies dictate the order +in-which the containers are started for the first time such that such +dependencies are always met without arbitrary sleep times between container +startups. For example, the SDC back-end container requires the Elastic-Search, +Cassandra and Kibana containers within SDC to be ready and is also dependent on +DMaaP (or the message-router) to be ready - where ready implies the built-in +"readiness" probes succeeded - before becoming fully operational. When an +initial deployment of ONAP is requested the current state of the system is NULL +so ONAP is deployed by the Kubernetes manager as a set of Docker containers on +one or more predetermined hosts. The hosts could be physical machines or +virtual machines. When deploying on virtual machines the resulting system will +be very similar to "Heat" based deployments, i.e. Docker containers running +within a set of VMs, the primary difference being that the allocation of +containers to VMs is done dynamically with OOM and statically with "Heat". +Example SO deployment descriptor file shows SO's dependency on its mariadb +data-base component: + +SO deployment specification excerpt: + +.. code-block:: yaml + + apiVersion: apps/v1 + kind: Deployment + metadata: + name: {{ include "common.fullname" . }} + namespace: {{ include "common.namespace" . }} + labels: {{- include "common.labels" . | nindent 4 }} + spec: + replicas: {{ .Values.replicaCount }} + selector: + matchLabels: {{- include "common.matchLabels" . | nindent 6 }} + template: + metadata: + labels: + app: {{ include "common.name" . }} + release: {{ .Release.Name }} + spec: + initContainers: + - command: + - /app/ready.py + args: + - --container-name + - so-mariadb + env: + ... \ No newline at end of file diff --git a/docs/sections/guides/development_guides/oom_dev_container_orchestration.rst b/docs/sections/guides/development_guides/oom_dev_container_orchestration.rst new file mode 100644 index 0000000000..b137bff8b6 --- /dev/null +++ b/docs/sections/guides/development_guides/oom_dev_container_orchestration.rst @@ -0,0 +1,366 @@ +.. This work is licensed under a Creative Commons Attribution 4.0 +.. International License. +.. http://creativecommons.org/licenses/by/4.0 +.. Copyright 2018-2020 Amdocs, Bell Canada, Orange, Samsung +.. Modification copyright (C) 2022 Nordix Foundation + +.. Links +.. _Kubernetes: https://Kubernetes.io/ +.. _AWS Elastic Block Store: https://aws.amazon.com/ebs/ +.. _Azure File: https://docs.microsoft.com/en-us/azure/storage/files/storage-files-introduction +.. _GCE Persistent Disk: https://cloud.google.com/compute/docs/disks/ +.. _Gluster FS: https://www.gluster.org/ +.. _Kubernetes Storage Class: https://Kubernetes.io/docs/concepts/storage/storage-classes/ +.. _Assigning Pods to Nodes: https://Kubernetes.io/docs/concepts/configuration/assign-pod-node/ + + +.. _oom_dev_container_orch: + +Kubernetes Container Orchestration +################################## + +The ONAP components are managed by the Kubernetes_ container management system +which maintains the desired state of the container system as described by one +or more deployment descriptors - similar in concept to OpenStack HEAT +Orchestration Templates. The following sections describe the fundamental +objects managed by Kubernetes, the network these components use to communicate +with each other and other entities outside of ONAP and the templates that +describe the configuration and desired state of the ONAP components. + +**Name Spaces** + +Within the namespaces are Kubernetes services that provide external +connectivity to pods that host Docker containers. + +ONAP Components to Kubernetes Object Relationships +-------------------------------------------------- +Kubernetes deployments consist of multiple objects: + +- **nodes** - a worker machine - either physical or virtual - that hosts + multiple containers managed by Kubernetes. +- **services** - an abstraction of a logical set of pods that provide a + micro-service. +- **pods** - one or more (but typically one) container(s) that provide specific + application functionality. +- **persistent volumes** - One or more permanent volumes need to be established + to hold non-ephemeral configuration and state data. + +The relationship between these objects is shown in the following figure: + +.. .. uml:: +.. +.. @startuml +.. node PH { +.. component Service { +.. component Pod0 +.. component Pod1 +.. } +.. } +.. +.. database PV +.. @enduml + +.. figure:: ../../resources/images/k8s/kubernetes_objects.png + +OOM uses these Kubernetes objects as described in the following sections. + +Nodes +~~~~~ +OOM works with both physical and virtual worker machines. + +* Virtual Machine Deployments - If ONAP is to be deployed onto a set of virtual + machines, the creation of the VMs is outside of the scope of OOM and could be + done in many ways, such as + + * manually, for example by a user using the OpenStack Horizon dashboard or + AWS EC2, or + * automatically, for example with the use of a OpenStack Heat Orchestration + Template which builds an ONAP stack, Azure ARM template, AWS CloudFormation + Template, or + * orchestrated, for example with Cloudify creating the VMs from a TOSCA + template and controlling their life cycle for the life of the ONAP + deployment. + +* Physical Machine Deployments - If ONAP is to be deployed onto physical + machines there are several options but the recommendation is to use Rancher + along with Helm to associate hosts with a Kubernetes cluster. + +Pods +~~~~ +A group of containers with shared storage and networking can be grouped +together into a Kubernetes pod. All of the containers within a pod are +co-located and co-scheduled so they operate as a single unit. Within ONAP +Amsterdam release, pods are mapped one-to-one to docker containers although +this may change in the future. As explained in the Services section below the +use of Pods within each ONAP component is abstracted from other ONAP +components. + +Services +~~~~~~~~ +OOM uses the Kubernetes service abstraction to provide a consistent access +point for each of the ONAP components independent of the pod or container +architecture of that component. For example, the SDNC component may introduce +OpenDaylight clustering as some point and change the number of pods in this +component to three or more but this change will be isolated from the other ONAP +components by the service abstraction. A service can include a load balancer +on its ingress to distribute traffic between the pods and even react to dynamic +changes in the number of pods if they are part of a replica set. + +Persistent Volumes +~~~~~~~~~~~~~~~~~~ +To enable ONAP to be deployed into a wide variety of cloud infrastructures a +flexible persistent storage architecture, built on Kubernetes persistent +volumes, provides the ability to define the physical storage in a central +location and have all ONAP components securely store their data. + +When deploying ONAP into a public cloud, available storage services such as +`AWS Elastic Block Store`_, `Azure File`_, or `GCE Persistent Disk`_ are +options. Alternatively, when deploying into a private cloud the storage +architecture might consist of Fiber Channel, `Gluster FS`_, or iSCSI. Many +other storage options existing, refer to the `Kubernetes Storage Class`_ +documentation for a full list of the options. The storage architecture may vary +from deployment to deployment but in all cases a reliable, redundant storage +system must be provided to ONAP with which the state information of all ONAP +components will be securely stored. The Storage Class for a given deployment is +a single parameter listed in the ONAP values.yaml file and therefore is easily +customized. Operation of this storage system is outside the scope of the OOM. + +.. code-block:: yaml + + Insert values.yaml code block with storage block here + +Once the storage class is selected and the physical storage is provided, the +ONAP deployment step creates a pool of persistent volumes within the given +physical storage that is used by all of the ONAP components. ONAP components +simply make a claim on these persistent volumes (PV), with a persistent volume +claim (PVC), to gain access to their storage. + +The following figure illustrates the relationships between the persistent +volume claims, the persistent volumes, the storage class, and the physical +storage. + +.. graphviz:: + + digraph PV { + label = "Persistance Volume Claim to Physical Storage Mapping" + { + node [shape=cylinder] + D0 [label="Drive0"] + D1 [label="Drive1"] + Dx [label="Drivex"] + } + { + node [shape=Mrecord label="StorageClass:ceph"] + sc + } + { + node [shape=point] + p0 p1 p2 + p3 p4 p5 + } + subgraph clusterSDC { + label="SDC" + PVC0 + PVC1 + } + subgraph clusterSDNC { + label="SDNC" + PVC2 + } + subgraph clusterSO { + label="SO" + PVCn + } + PV0 -> sc + PV1 -> sc + PV2 -> sc + PVn -> sc + + sc -> {D0 D1 Dx} + PVC0 -> PV0 + PVC1 -> PV1 + PVC2 -> PV2 + PVCn -> PVn + + # force all of these nodes to the same line in the given order + subgraph { + rank = same; PV0;PV1;PV2;PVn;p0;p1;p2 + PV0->PV1->PV2->p0->p1->p2->PVn [style=invis] + } + + subgraph { + rank = same; D0;D1;Dx;p3;p4;p5 + D0->D1->p3->p4->p5->Dx [style=invis] + } + + } + +In-order for an ONAP component to use a persistent volume it must make a claim +against a specific persistent volume defined in the ONAP common charts. Note +that there is a one-to-one relationship between a PVC and PV. The following is +an excerpt from a component chart that defines a PVC: + +.. code-block:: yaml + + Insert PVC example here + +OOM Networking with Kubernetes +------------------------------ + +- DNS +- Ports - Flattening the containers also expose port conflicts between the + containers which need to be resolved. + + +Pod Placement Rules +------------------- +OOM will use the rich set of Kubernetes node and pod affinity / +anti-affinity rules to minimize the chance of a single failure resulting in a +loss of ONAP service. Node affinity / anti-affinity is used to guide the +Kubernetes orchestrator in the placement of pods on nodes (physical or virtual +machines). For example: + +- if a container used Intel DPDK technology the pod may state that it as + affinity to an Intel processor based node, or +- geographical based node labels (such as the Kubernetes standard zone or + region labels) may be used to ensure placement of a DCAE complex close to the + VNFs generating high volumes of traffic thus minimizing networking cost. + Specifically, if nodes were pre-assigned labels East and West, the pod + deployment spec to distribute pods to these nodes would be: + +.. code-block:: yaml + + nodeSelector: + failure-domain.beta.Kubernetes.io/region: {{ .Values.location }} + +- "location: West" is specified in the `values.yaml` file used to deploy + one DCAE cluster and "location: East" is specified in a second `values.yaml` + file (see OOM Configuration Management for more information about + configuration files like the `values.yaml` file). + +Node affinity can also be used to achieve geographic redundancy if pods are +assigned to multiple failure domains. For more information refer to `Assigning +Pods to Nodes`_. + +.. note:: + One could use Pod to Node assignment to totally constrain Kubernetes when + doing initial container assignment to replicate the Amsterdam release + OpenStack Heat based deployment. Should one wish to do this, each VM would + need a unique node name which would be used to specify a node constaint + for every component. These assignment could be specified in an environment + specific values.yaml file. Constraining Kubernetes in this way is not + recommended. + +Kubernetes has a comprehensive system called Taints and Tolerations that can be +used to force the container orchestrator to repel pods from nodes based on +static events (an administrator assigning a taint to a node) or dynamic events +(such as a node becoming unreachable or running out of disk space). There are +no plans to use taints or tolerations in the ONAP Beijing release. Pod +affinity / anti-affinity is the concept of creating a spacial relationship +between pods when the Kubernetes orchestrator does assignment (both initially +an in operation) to nodes as explained in Inter-pod affinity and anti-affinity. +For example, one might choose to co-located all of the ONAP SDC containers on a +single node as they are not critical runtime components and co-location +minimizes overhead. On the other hand, one might choose to ensure that all of +the containers in an ODL cluster (SDNC and APPC) are placed on separate nodes +such that a node failure has minimal impact to the operation of the cluster. +An example of how pod affinity / anti-affinity is shown below: + +Pod Affinity / Anti-Affinity + +.. code-block:: yaml + + apiVersion: v1 + kind: Pod + metadata: + name: with-pod-affinity + spec: + affinity: + podAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: security + operator: In + values: + - S1 + topologyKey: failure-domain.beta.Kubernetes.io/zone + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: security + operator: In + values: + - S2 + topologyKey: Kubernetes.io/hostname + containers: + - name: with-pod-affinity + image: gcr.io/google_containers/pause:2.0 + +This example contains both podAffinity and podAntiAffinity rules, the first +rule is is a must (requiredDuringSchedulingIgnoredDuringExecution) while the +second will be met pending other considerations +(preferredDuringSchedulingIgnoredDuringExecution). Preemption Another feature +that may assist in achieving a repeatable deployment in the presence of faults +that may have reduced the capacity of the cloud is assigning priority to the +containers such that mission critical components have the ability to evict less +critical components. Kubernetes provides this capability with Pod Priority and +Preemption. Prior to having more advanced production grade features available, +the ability to at least be able to re-deploy ONAP (or a subset of) reliably +provides a level of confidence that should an outage occur the system can be +brought back on-line predictably. + +Health Checks +------------- + +Monitoring of ONAP components is configured in the agents within JSON files and +stored in gerrit under the consul-agent-config, here is an example from the AAI +model loader (aai-model-loader-health.json): + +.. code-block:: json + + { + "service": { + "name": "A&AI Model Loader", + "checks": [ + { + "id": "model-loader-process", + "name": "Model Loader Presence", + "script": "/consul/config/scripts/model-loader-script.sh", + "interval": "15s", + "timeout": "1s" + } + ] + } + } + +Liveness Probes +--------------- + +These liveness probes can simply check that a port is available, that a +built-in health check is reporting good health, or that the Consul health check +is positive. For example, to monitor the SDNC component has following liveness +probe can be found in the SDNC DB deployment specification: + +.. code-block:: yaml + + sdnc db liveness probe + + livenessProbe: + exec: + command: ["mysqladmin", "ping"] + initialDelaySeconds: 30 periodSeconds: 10 + timeoutSeconds: 5 + +The 'initialDelaySeconds' control the period of time between the readiness +probe succeeding and the liveness probe starting. 'periodSeconds' and +'timeoutSeconds' control the actual operation of the probe. Note that +containers are inherently ephemeral so the healing action destroys failed +containers and any state information within it. To avoid a loss of state, a +persistent volume should be used to store all data that needs to be persisted +over the re-creation of a container. Persistent volumes have been created for +the database components of each of the projects and the same technique can be +used for all persistent state information. \ No newline at end of file diff --git a/docs/sections/guides/development_guides/oom_dev_helm_chart_info.rst b/docs/sections/guides/development_guides/oom_dev_helm_chart_info.rst new file mode 100644 index 0000000000..533f60e29b --- /dev/null +++ b/docs/sections/guides/development_guides/oom_dev_helm_chart_info.rst @@ -0,0 +1,172 @@ +.. This work is licensed under a Creative Commons Attribution 4.0 +.. International License. +.. http://creativecommons.org/licenses/by/4.0 +.. Copyright 2018-2020 Amdocs, Bell Canada, Orange, Samsung +.. Modification copyright (C) 2022 Nordix Foundation + +.. Links +.. _Helm Charts: https://artifacthub.io/packages/search +.. _aai: https://github.com/onap/oom/tree/master/kubernetes/aai +.. _name.tpl: https://github.com/onap/oom/blob/master/kubernetes/common/common/templates/_name.tpl +.. _namespace.tpl: https://github.com/onap/oom/blob/master/kubernetes/common/common/templates/_namespace.tpl + +.. _oom_helm_chart_info: + +Helm Charts +########### + +A Helm chart is a collection of files that describe a related set of Kubernetes +resources. A simple chart might be used to deploy something simple, like a +memcached pod, while a complex chart might contain many micro-service arranged +in a hierarchy as found in the `aai`_ ONAP component. + +Charts are created as files laid out in a particular directory tree, then they +can be packaged into versioned archives to be deployed. There is a public +archive of `Helm Charts`_ on ArtifactHUB that includes many technologies applicable +to ONAP. Some of these charts have been used in ONAP and all of the ONAP charts +have been created following the guidelines provided. + +An example structure of the OOM common helm charts is shown below: + +.. code-block:: bash + + common + ├── cassandra + │   ├── Chart.yaml + │   ├── resources + │   │   ├── config + │   │   │   └── docker-entrypoint.sh + │   │   ├── exec.py + │   │   └── restore.sh + │   ├── templates + │   │   ├── backup + │   │   │   ├── configmap.yaml + │   │   │   ├── cronjob.yaml + │   │   │   ├── pv.yaml + │   │   │   └── pvc.yaml + │   │   ├── configmap.yaml + │   │   ├── pv.yaml + │   │   ├── service.yaml + │   │   └── statefulset.yaml + │   └── values.yaml + ├── common + │   ├── Chart.yaml + │   ├── templates + │   │   ├── _createPassword.tpl + │   │   ├── _ingress.tpl + │   │   ├── _labels.tpl + │   │   ├── _mariadb.tpl + │   │   ├── _name.tpl + │   │   ├── _namespace.tpl + │   │   ├── _repository.tpl + │   │   ├── _resources.tpl + │   │   ├── _secret.yaml + │   │   ├── _service.tpl + │   │   ├── _storage.tpl + │   │   └── _tplValue.tpl + │   └── values.yaml + ├── ... + └── postgres-legacy +    ├── Chart.yaml + ├── charts + └── configs + +The common section of charts consists of a set of templates that assist with +parameter substitution (`name.tpl`_, `namespace.tpl`_, etc) and a set of +charts for components used throughout ONAP. When the common components are used +by other charts they are instantiated each time or we can deploy a shared +instances for several components. + +All of the ONAP components have charts that follow the pattern shown below: + +.. code-block:: bash + + name-of-my-component + ├── Chart.yaml + ├── component + │   └── subcomponent-folder + ├── charts + │   └── subchart-folder + ├── resources + │   ├── folder1 + │   │   ├── file1 + │   │   └── file2 + │   └── folder1 + │   ├── file3 + │   └── folder3 + │      └── file4 + ├── templates + │   ├── NOTES.txt + │   ├── configmap.yaml + │   ├── deployment.yaml + │   ├── ingress.yaml + │   ├── job.yaml + │   ├── secrets.yaml + │   └── service.yaml + └── values.yaml + +Note that the /components sub dir may include a hierarchy of sub +components and in themselves can be quite complex. + +You can use either `charts` or `components` folder for your subcomponents. +`charts` folder means that the subcomponent will always been deployed. + +`components` folders means we can choose if we want to deploy the subcomponent. + +This choice is done in root `values.yaml`: + +.. code-block:: yaml + + --- + global: + key: value + + component1: + enabled: true + component2: + enabled: true + +Then in `Chart.yaml` dependencies section, you'll use these values: + +.. code-block:: yaml + + --- + dependencies: + - name: common + version: ~x.y-0 + repository: '@local' + - name: component1 + version: ~x.y-0 + repository: 'file://components/component1' + condition: component1.enabled + - name: component2 + version: ~x.y-0 + repository: 'file://components/component2' + condition: component2.enabled + +Configuration of the components varies somewhat from component to component but +generally follows the pattern of one or more `configmap.yaml` files which can +directly provide configuration to the containers in addition to processing +configuration files stored in the `config` directory. It is the responsibility +of each ONAP component team to update these configuration files when changes +are made to the project containers that impact configuration. + +The following section describes how the hierarchical ONAP configuration system +is key to management of such a large system. + + +.. MISC +.. ==== +.. Note that although OOM uses Kubernetes facilities to minimize the effort +.. required of the ONAP component owners to implement a successful rolling +.. upgrade strategy there are other considerations that must be taken into +.. consideration. +.. For example, external APIs - both internal and external to ONAP - should be +.. designed to gracefully accept transactions from a peer at a different +.. software version to avoid deadlock situations. Embedded version codes in +.. messages may facilitate such capabilities. +.. +.. Within each of the projects a new configuration repository contains all of +.. the project specific configuration artifacts. As changes are made within +.. the project, it's the responsibility of the project team to make appropriate +.. changes to the configuration data. diff --git a/docs/sections/guides/development_guides/oom_development.rst b/docs/sections/guides/development_guides/oom_development.rst new file mode 100644 index 0000000000..169e211450 --- /dev/null +++ b/docs/sections/guides/development_guides/oom_development.rst @@ -0,0 +1,64 @@ +.. This work is licensed under a Creative Commons Attribution 4.0 +.. International License. +.. http://creativecommons.org/licenses/by/4.0 +.. Copyright (C) 2022 Nordix Foundation + +.. Links +.. _Helm: https://docs.helm.sh/ +.. _Helm Charts: https://github.com/kubernetes/charts +.. _Kubernetes: https://Kubernetes.io/ +.. _Docker: https://www.docker.com/ +.. _Nexus: https://nexus.onap.org/ + +.. _oom_dev_guide: + +OOM Developer Guide +################### + +.. figure:: ../../resources/images/oom_logo/oomLogoV2-medium.png + :align: right + +ONAP consists of a large number of components, each of which are substantial +projects within themselves, which results in a high degree of complexity in +deployment and management. To cope with this complexity the ONAP Operations +Manager (OOM) uses a Helm_ model of ONAP - Helm being the primary management +system for Kubernetes_ container systems - to drive all user driven life-cycle +management operations. The Helm model of ONAP is composed of a set of +hierarchical Helm charts that define the structure of the ONAP components and +the configuration of these components. These charts are fully parameterized +such that a single environment file defines all of the parameters needed to +deploy ONAP. A user of ONAP may maintain several such environment files to +control the deployment of ONAP in multiple environments such as development, +pre-production, and production. + + + +.. rubric:: Container Background + +Linux containers allow for an application and all of its operating system +dependencies to be packaged and deployed as a single unit without including a +guest operating system as done with virtual machines. The most popular +container solution is Docker_ which provides tools for container management +like the Docker Host (dockerd) which can create, run, stop, move, or delete a +container. Docker has a very popular registry of containers images that can be +used by any Docker system; however, in the ONAP context, Docker images are +built by the standard CI/CD flow and stored in Nexus_ repositories. OOM uses +the "standard" ONAP docker containers and three new ones specifically created +for OOM. + +Containers are isolated from each other primarily via name spaces within the +Linux kernel without the need for multiple guest operating systems. As such, +multiple containers can be deployed with little overhead such as all of ONAP +can be deployed on a single host. With some optimization of the ONAP components +(e.g. elimination of redundant database instances) it may be possible to deploy +ONAP on a single laptop computer. + +The following sections describe how the ONAP Helm charts are constructed. + +.. toctree:: + :maxdepth: 1 + + oom_dev_helm_chart_info.rst + oom_dev_config_management.rst + oom_dev_container_orchestration.rst + diff --git a/docs/sections/guides/infra_guides/oom_base_config_setup.rst b/docs/sections/guides/infra_guides/oom_base_config_setup.rst new file mode 100644 index 0000000000..d228f5df56 --- /dev/null +++ b/docs/sections/guides/infra_guides/oom_base_config_setup.rst @@ -0,0 +1,187 @@ +.. This work is licensed under a Creative Commons Attribution 4.0 +.. International License. +.. http://creativecommons.org/licenses/by/4.0 +.. Copyright (C) 2022 Nordix Foundation + +.. Links +.. _HELM Best Practices Guide: https://docs.helm.sh/chart_best_practices/#requirements +.. _helm installation guide: https://helm.sh/docs/intro/install/ +.. _kubectl installation guide: https://kubernetes.io/docs/tasks/tools/install-kubectl-linux/ +.. _Curated applications for Kubernetes: https://github.com/kubernetes/charts +.. _Cert-Manager Installation documentation: https://cert-manager.io/docs/installation/kubernetes/ +.. _Cert-Manager kubectl plugin documentation: https://cert-manager.io/docs/usage/kubectl-plugin/ +.. _Strimzi Apache Kafka Operator helm Installation documentation: https://strimzi.io/docs/operators/in-development/deploying.html#deploying-cluster-operator-helm-chart-str + +.. _oom_base_setup_guide: + +OOM Base Platform +################# + +As part of the initial base setup of the host Kubernetes cluster, +the following mandatory installation and configuration steps must be completed. + +.. contents:: + :backlinks: top + :depth: 1 + :local: +.. + +For additional platform add-ons, see the :ref:`oom_base_optional_addons` section. + +Install & configure kubectl +*************************** +The Kubernetes command line interface used to manage a Kubernetes cluster needs to be installed +and configured to run as non root. + +For additional information regarding kubectl installation and configuration see the `kubectl installation guide`_ + +To install kubectl, execute the following, replacing the with the version defined +in the :ref:`versions_table` table:: + + > curl -LO https://dl.k8s.io/release/v/bin/linux/amd64/kubectl + + > chmod +x ./kubectl + + > sudo mv ./kubectl /usr/local/bin/kubectl + + > mkdir ~/.kube + + > cp kube_config_cluster.yml ~/.kube/config.onap + + > export KUBECONFIG=~/.kube/config.onap + + > kubectl config use-context onap + +Validate the installation:: + + > kubectl get nodes + +:: + + NAME STATUS ROLES AGE VERSION + onap-control-1 Ready controlplane,etcd 3h53m v1.23.8 + onap-control-2 Ready controlplane,etcd 3h53m v1.23.8 + onap-k8s-1 Ready worker 3h53m v1.23.8 + onap-k8s-2 Ready worker 3h53m v1.23.8 + onap-k8s-3 Ready worker 3h53m v1.23.8 + onap-k8s-4 Ready worker 3h53m v1.23.8 + onap-k8s-5 Ready worker 3h53m v1.23.8 + onap-k8s-6 Ready worker 3h53m v1.23.8 + + +Install & configure helm +************************ +Helm is used for package and configuration management of the relevant helm charts. +For additional information, see the `helm installation guide`_ + +To install helm, execute the following, replacing the with the version defined +in the :ref:`versions_table` table:: + + > wget https://get.helm.sh/helm-v-linux-amd64.tar.gz + + > tar -zxvf helm-v-linux-amd64.tar.gz + + > sudo mv linux-amd64/helm /usr/local/bin/helm + +Verify the helm version with:: + + > helm version + +Helm's default CNCF provided `Curated applications for Kubernetes`_ repository called +*stable* can be removed to avoid confusion:: + + > helm repo remove stable + +Install the additional OOM plugins required to un/deploy the OOM helm charts:: + + > git clone http://gerrit.onap.org/r/oom + + > cp -R ~/oom/kubernetes/helm/plugins/ /usr/local/bin/helm/plugins + +Verify the plugins are installed:: + + > helm plugin ls + +:: + + NAME VERSION DESCRIPTION + deploy 1.0.0 install (upgrade if release exists) parent charty and all subcharts as separate but related releases + undeploy 1.0.0 delete parent chart and subcharts that were deployed as separate releases + + +Install the strimzi kafka operator +********************************** +Strimzi Apache Kafka provides a way to run an Apache Kafka cluster on Kubernetes +in various deployment configurations by using kubernetes operators. +Operators are a method of packaging, deploying, and managing Kubernetes applications. + +Strimzi Operators extend the Kubernetes functionality, automating common +and complex tasks related to a Kafka deployment. By implementing +knowledge of Kafka operations in code, the Kafka administration +tasks are simplified and require less manual intervention. + +The Strimzi cluster operator is deployed using helm to install the parent chart +containing all of the required custom resource definitions. This should be done +by a kubernetes administrator to allow for deployment of custom resources in to +any kubernetes namespace within the cluster. + +Full installation instructions can be found in the +`Strimzi Apache Kafka Operator helm Installation documentation`_. + +To add the required helm repository, execute the following:: + + > helm repo add strimzi https://strimzi.io/charts/ + +To install the strimzi kafka operator, execute the following, replacing the with the version defined +in the :ref:`versions_table` table:: + + > helm install strimzi-kafka-operator strimzi/strimzi-kafka-operator --namespace strimzi-system --version --set watchAnyNamespace=true --create-namespace + +Verify the installation:: + + > kubectl get po -n strimzi-system + +:: + + NAME READY STATUS RESTARTS AGE + strimzi-cluster-operator-7f7d6b46cf-mnpjr 1/1 Running 0 2m + + +Install Cert-Manager +******************** + +Cert-Manager is a native Kubernetes certificate management controller. +It can help with issuing certificates from a variety of sources, such as +Let’s Encrypt, HashiCorp Vault, Venafi, a simple signing key pair, self +signed or external issuers. It ensures certificates are valid and up to +date, and attempt to renew certificates at a configured time before expiry. + +Cert-Manager is deployed using regular YAML manifests which include all +the needed resources (the CustomResourceDefinitions, cert-manager, +namespace, and the webhook component). + +Full installation instructions, including details on how to configure extra +functionality in Cert-Manager can be found in the +`Cert-Manager Installation documentation`_. + +There is also a kubectl plugin (kubectl cert-manager) that can help you +to manage cert-manager resources inside your cluster. For installation +steps, please refer to `Cert-Manager kubectl plugin documentation`_. + + +To install cert-manager, execute the following, replacing the with the version defined +in the :ref:`versions_table` table:: + + > kubectl apply -f https://github.com/jetstack/cert-manager/releases/download/v/cert-manager.yaml + +Verify the installation:: + + > kubectl get po -n cert-manager + +:: + + NAME READY STATUS RESTARTS AGE + cert-manager-776c4cfcb6-vgnpw 1/1 Running 0 2m + cert-manager-cainjector-7d9668978d-hdxf7 1/1 Running 0 2m + cert-manager-webhook-66c8f6c75-dxmtz 1/1 Running 0 2m + diff --git a/docs/sections/guides/infra_guides/oom_base_optional_addons.rst b/docs/sections/guides/infra_guides/oom_base_optional_addons.rst new file mode 100644 index 0000000000..4b4fbf7883 --- /dev/null +++ b/docs/sections/guides/infra_guides/oom_base_optional_addons.rst @@ -0,0 +1,41 @@ +.. This work is licensed under a Creative Commons Attribution 4.0 +.. International License. +.. http://creativecommons.org/licenses/by/4.0 +.. Copyright (C) 2022 Nordix Foundation + +.. Links +.. _Prometheus stack README: https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack#readme + +.. _oom_base_optional_addons: + +OOM Optional Addons +################### + +The following optional applications can be added to your kubernetes environment. + +Install Prometheus Stack +************************ + +Prometheus is an open-source systems monitoring and alerting toolkit with +an active ecosystem. + +Kube Prometheus Stack is a collection of Kubernetes manifests, Grafana +dashboards, and Prometheus rules combined with documentation and scripts to +provide easy to operate end-to-end Kubernetes cluster monitoring with +Prometheus using the Prometheus Operator. As it includes both Prometheus +Operator and Grafana dashboards, there is no need to set up them separately. +See the `Prometheus stack README`_ for more information. + +To install the prometheus stack, execute the following: + +- Add the prometheus-community Helm repository:: + + > helm repo add prometheus-community https://prometheus-community.github.io/helm-charts + +- Update your local Helm chart repository cache:: + + > helm repo update + +- To install prometheus, execute the following, replacing the with the version defined in the :ref:`versions_table` table:: + + > helm install prometheus prometheus-community/kube-prometheus-stack --namespace=prometheus --create-namespace --version= diff --git a/docs/sections/guides/infra_guides/oom_infra_setup.rst b/docs/sections/guides/infra_guides/oom_infra_setup.rst new file mode 100644 index 0000000000..d8fb743f42 --- /dev/null +++ b/docs/sections/guides/infra_guides/oom_infra_setup.rst @@ -0,0 +1,72 @@ +.. This work is licensed under a Creative Commons Attribution 4.0 +.. International License. +.. http://creativecommons.org/licenses/by/4.0 +.. Copyright (C) 2022 Nordix Foundation + +.. Links +.. _Kubernetes: https://kubernetes.io/ +.. _Kubernetes best practices: https://kubernetes.io/docs/setup/best-practices/cluster-large/ +.. _kubelet confg guide: https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet/ + +.. _oom_infra_setup_guide: + +OOM Infrastructure Guide +######################## + +.. figure:: ../../resources/images/oom_logo/oomLogoV2-medium.png + :align: right + +OOM deploys and manages ONAP on a pre-established Kubernetes_ cluster - the +creation of this cluster is outside of the scope of the OOM project as there +are many options including public clouds with pre-established environments. +If creation of a Kubernetes cluster is required, the life-cycle of this +cluster is independent of the life-cycle of the ONAP components themselves. + +.. rubric:: Minimum Hardware Configuration + +Some recommended hardware requirements are provided below. Note that this is for a +full ONAP deployment (all components). + +.. table:: OOM Hardware Requirements + + ===== ===== ====== ==================== + RAM HD vCores Ports + ===== ===== ====== ==================== + 224GB 160GB 112 0.0.0.0/0 (all open) + ===== ===== ====== ==================== + +Customizing ONAP to deploy only components that are needed will drastically reduce these requirements. +See the :ref:`OOM customized deployment` section for more details. + +.. note:: + | Kubernetes supports a maximum of 110 pods per node - this can be overcome by modifying your kubelet config. + | See the `kubelet confg guide`_ for more information. + + | The use of many small nodes is preferred over a few larger nodes (for example 14 x 16GB - 8 vCores each). + + | OOM can be deployed on a private set of physical hosts or VMs (or even a combination of the two). + +.. rubric:: Software Requirements + +The versions of software that are supported by OOM are as follows: + +.. _versions_table: + +.. table:: OOM Software Requirements + + ============== =========== ======= ======== ======== ============ ================= ======= + Release Kubernetes Helm kubectl Docker Cert-Manager Prometheus Stack Strimzi + ============== =========== ======= ======== ======== ============ ================= ======= + Jakarta 1.22.4 3.6.3 1.22.4 20.10.x 1.8.0 35.x 0.28.0 + Kohn 1.23.8 3.8.2 1.23.8 20.10.x 1.8.0 35.x 0.31.1 + ============== =========== ======= ======== ======== ============ ================= ======= + + +.. toctree:: + :hidden: + + oom_base_config_setup.rst + oom_base_optional_addons.rst + oom_setup_ingress_controller.rst + + diff --git a/docs/sections/guides/infra_guides/oom_setup_ingress_controller.rst b/docs/sections/guides/infra_guides/oom_setup_ingress_controller.rst new file mode 100644 index 0000000000..8c261fdfd7 --- /dev/null +++ b/docs/sections/guides/infra_guides/oom_setup_ingress_controller.rst @@ -0,0 +1,176 @@ +.. This work is licensed under a Creative Commons Attribution 4.0 +.. International License. +.. http://creativecommons.org/licenses/by/4.0 +.. Copyright 2020, Samsung Electronics +.. Modification copyright (C) 2022 Nordix Foundation + +.. Links +.. _metallb Metal Load Balancer installation: https://metallb.universe.tf/installation/ + +.. _oom_setup_ingress_controller: + +OOM Ingress controller setup +############################ + +.. warning:: + This guide should prob go in the Optional addons section + +This optional guide provides instruction how to setup experimental ingress controller +feature. For this, we are hosting our cluster on OpenStack VMs and using the +Rancher Kubernetes Engine (RKE) to deploy and manage our Kubernetes Cluster and +ingress controller + +.. contents:: + :backlinks: top + :depth: 1 + :local: +.. + +The result at the end of this tutorial will be: + +#. Customization of the cluster.yaml file for ingress controller support + +#. Installation and configuration test DNS server for ingress host resolution + on testing machines + +#. Installation and configuration MLB (Metal Load Balancer) required for + exposing ingress service + +#. Installation and configuration NGINX ingress controller + +#. Additional info how to deploy ONAP with services exposed via Ingress + controller + +Customize cluster.yml file +************************** +Before setup cluster for ingress purposes DNS cluster IP and ingress provider +should be configured and following: + +.. code-block:: yaml + + --- + <...> + restore: + restore: false + snapshot_name: "" + ingress: + provider: none + dns: + provider: coredns + upstreamnameservers: + - :31555 + +Where the should be set to the same IP as the CONTROLPANE +node. + +For external load balancer purposes, minimum one of the worker node should be +configured with external IP address accessible outside the cluster. It can be +done using the following example node configuration: + +.. code-block:: yaml + + --- + <...> + - address: + internal_address: + port: "22" + role: + - worker + hostname_override: "onap-worker-0" + user: ubuntu + ssh_key_path: "~/.ssh/id_rsa" + <...> + +Where the is external worker node IP address, and +is internal node IP address if it is required. + + +DNS server configuration and installation +***************************************** +DNS server deployed on the Kubernetes cluster makes it easy to use services +exposed through ingress controller because it resolves all subdomain related to +the ONAP cluster to the load balancer IP. Testing ONAP cluster requires a lot +of entries on the target machines in the /etc/hosts. Adding many entries into +the configuration files on testing machines is quite problematic and error +prone. The better wait is to create central DNS server with entries for all +virtual host pointed to simpledemo.onap.org and add custom DNS server as a +target DNS server for testing machines and/or as external DNS for Kubernetes +cluster. + +DNS server has automatic installation and configuration script, so installation +is quite easy:: + + > cd kubernetes/contrib/dns-server-for-vhost-ingress-testing + + > ./deploy\_dns.sh + +After DNS deploy you need to setup DNS entry on the target testing machine. +Because DNS listen on non standard port configuration require iptables rules +on the target machine. Please follow the configuration proposed by the deploy +scripts. +Example output depends on the IP address and example output looks like bellow:: + + DNS server already deployed: + 1. You can add the DNS server to the target machine using following commands: + sudo iptables -t nat -A OUTPUT -p tcp -d 192.168.211.211 --dport 53 -j DNAT --to-destination 10.10.13.14:31555 + sudo iptables -t nat -A OUTPUT -p udp -d 192.168.211.211 --dport 53 -j DNAT --to-destination 10.10.13.14:31555 + sudo sysctl -w net.ipv4.conf.all.route_localnet=1 + sudo sysctl -w net.ipv4.ip_forward=1 + 2. Update /etc/resolv.conf file with nameserver 192.168.211.211 entry on your target machine + + +MetalLB Load Balancer installation and configuration +**************************************************** + +By default pure Kubernetes cluster requires external load balancer if we want +to expose external port using LoadBalancer settings. For this purpose MetalLB +can be used. Before installing the MetalLB you need to ensure that at least one +worker has assigned IP accessible outside the cluster. + +MetalLB Load balancer can be easily installed using automatic install script:: + + > cd kubernetes/contrib/metallb-loadbalancer-inst + + > ./install-metallb-on-cluster.sh + + +Configuration of the Nginx ingress controller +********************************************* + +After installation of the DNS server and ingress controller, we can install and +configure ingress controller. +It can be done using the following commands:: + + > cd kubernetes/contrib/ingress-nginx-post-inst + + > kubectl apply -f nginx_ingress_cluster_config.yaml + + > kubectl apply -f nginx_ingress_enable_optional_load_balacer_service.yaml + +After deploying the NGINX ingress controller, you can ensure that the ingress port is +exposed as load balancer service with an external IP address:: + + > kubectl get svc -n ingress-nginx + NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE + default-http-backend ClusterIP 10.10.10.10 80/TCP 25h + ingress-nginx LoadBalancer 10.10.10.11 10.12.13.14 80:31308/TCP,443:30314/TCP 24h + + +ONAP with ingress exposed services +********************************** +If you want to deploy onap with services exposed through ingress controller you +can use full onap deploy yaml:: + + > onap/resources/overrides/onap-all-ingress-nginx-vhost.yaml + +Ingress also can be enabled on any onap setup override using following code: + +.. code-block:: yaml + + --- + <...> + global: + <...> + ingress: + enabled: true + diff --git a/docs/sections/guides/user_guides/oom_user_guide.rst b/docs/sections/guides/user_guides/oom_user_guide.rst new file mode 100644 index 0000000000..c0f4f6ef73 --- /dev/null +++ b/docs/sections/guides/user_guides/oom_user_guide.rst @@ -0,0 +1,651 @@ +.. This work is licensed under a Creative Commons Attribution 4.0 +.. International License. +.. http://creativecommons.org/licenses/by/4.0 +.. Copyright (C) 2022 Nordix Foundation + +.. Links +.. _Curated applications for Kubernetes: https://github.com/kubernetes/charts +.. _Services: https://kubernetes.io/docs/concepts/services-networking/service/ +.. _ReplicaSet: https://kubernetes.io/docs/concepts/workloads/controllers/replicaset/ +.. _StatefulSet: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/ +.. _Helm Documentation: https://docs.helm.sh/helm/ +.. _Helm: https://docs.helm.sh/ +.. _Kubernetes: https://Kubernetes.io/ +.. _Kubernetes LoadBalancer: https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer +.. _user-guide-label: + + +.. _oom_user_guide: + + +OOM User Guide +############## + +.. warning:: + + **THIS PAGE NEEDS TO BE EITHER REWRITTEN OR SOMETING AS SOME INFO IS NO LONGER RELEVANT** + +The ONAP Operations Manager (OOM) provide the ability to manage the entire +life-cycle of an ONAP installation, from the initial deployment to final +decommissioning. This guide provides instructions for users of ONAP to +use the Kubernetes_/Helm_ system as a complete ONAP management system. + +This guide provides many examples of Helm command line operations. For a +complete description of these commands please refer to the `Helm +Documentation`_. + +.. figure:: ../../resources/images/oom_logo/oomLogoV2-medium.png + :align: right + +The following sections describe the life-cycle operations: + +- Deploy_ - with built-in component dependency management +- Configure_ - unified configuration across all ONAP components +- Monitor_ - real-time health monitoring feeding to a Consul UI and Kubernetes +- Heal_- failed ONAP containers are recreated automatically +- Scale_ - cluster ONAP services to enable seamless scaling +- Upgrade_ - change-out containers or configuration with little or no service impact +- Delete_ - cleanup individual containers or entire deployments + +.. figure:: ../../resources/images/oom_logo/oomLogoV2-Deploy.png + :align: right + +Deploy +====== + +The OOM team with assistance from the ONAP project teams, have built a +comprehensive set of Helm charts, yaml files very similar to TOSCA files, that +describe the composition of each of the ONAP components and the relationship +within and between components. Using this model Helm is able to deploy all of +ONAP with a few simple commands. + +Please refer to the :ref:`oom_deploy_guide` for deployment pre-requisites and options + +.. note:: + Refer to the :ref:`oom_customize_overrides` section on how to update overrides.yaml and values.yaml + +.. figure:: ../../resources/images/oom_logo/oomLogoV2-Configure.png + :align: right + +Configure +========= + +Each project within ONAP has its own configuration data generally consisting +of: environment variables, configuration files, and database initial values. +Many technologies are used across the projects resulting in significant +operational complexity and an inability to apply global parameters across the +entire ONAP deployment. OOM solves this problem by introducing a common +configuration technology, Helm charts, that provide a hierarchical +configuration with the ability to override values with higher +level charts or command line options. + +The structure of the configuration of ONAP is shown in the following diagram. +Note that key/value pairs of a parent will always take precedence over those +of a child. Also note that values set on the command line have the highest +precedence of all. + +.. graphviz:: + + digraph config { + { + node [shape=folder] + oValues [label="values.yaml"] + demo [label="onap-demo.yaml"] + prod [label="onap-production.yaml"] + oReq [label="Chart.yaml"] + soValues [label="values.yaml"] + soReq [label="Chart.yaml"] + mdValues [label="values.yaml"] + } + { + oResources [label="resources"] + } + onap -> oResources + onap -> oValues + oResources -> environments + oResources -> oReq + oReq -> so + environments -> demo + environments -> prod + so -> soValues + so -> soReq + so -> charts + charts -> mariadb + mariadb -> mdValues + + } + +The top level onap/values.yaml file contains the values required to be set +before deploying ONAP. Here is the contents of this file: + +.. collapse:: Default ONAP values.yaml + + .. include:: ../../../../kubernetes/onap/values.yaml + :code: yaml + +| + + +One may wish to create a value file that is specific to a given deployment such +that it can be differentiated from other deployments. For example, a +onap-development.yaml file may create a minimal environment for development +while onap-production.yaml might describe a production deployment that operates +independently of the developer version. + +For example, if the production OpenStack instance was different from a +developer's instance, the onap-production.yaml file may contain a different +value for the vnfDeployment/openstack/oam_network_cidr key as shown below. + +.. code-block:: yaml + + nsPrefix: onap + nodePortPrefix: 302 + apps: consul msb mso message-router sdnc vid robot portal policy appc aai + sdc dcaegen2 log cli multicloud clamp vnfsdk aaf kube2msb + dataRootDir: /dockerdata-nfs + + # docker repositories + repository: + onap: nexus3.onap.org:10001 + oom: oomk8s + aai: aaionap + filebeat: docker.elastic.co + + image: + pullPolicy: Never + + # vnf deployment environment + vnfDeployment: + openstack: + ubuntu_14_image: "Ubuntu_14.04.5_LTS" + public_net_id: "e8f51956-00dd-4425-af36-045716781ffc" + oam_network_id: "d4769dfb-c9e4-4f72-b3d6-1d18f4ac4ee6" + oam_subnet_id: "191f7580-acf6-4c2b-8ec0-ba7d99b3bc4e" + oam_network_cidr: "192.168.30.0/24" + <...> + + +To deploy ONAP with this environment file, enter:: + + > helm deploy local/onap -n onap -f onap/resources/environments/onap-production.yaml --set global.masterPassword=password + + +.. collapse:: Default ONAP values.yaml + + .. include:: ../../resources/yaml/environments_onap_demo.yaml + :code: yaml + +| + +When deploying all of ONAP, the dependencies section of the Chart.yaml file +controls which and what version of the ONAP components are included. +Here is an excerpt of this file: + +.. code-block:: yaml + + dependencies: + <...> + - name: so + version: ~11.0.0 + repository: '@local' + condition: so.enabled + <...> + +The ~ operator in the `so` version value indicates that the latest "10.X.X" +version of `so` shall be used thus allowing the chart to allow for minor +upgrades that don't impact the so API; hence, version 10.0.1 will be installed +in this case. + +The onap/resources/environment/dev.yaml (see the excerpt below) enables +for fine grained control on what components are included as part of this +deployment. By changing this `so` line to `enabled: false` the `so` component +will not be deployed. If this change is part of an upgrade the existing `so` +component will be shut down. Other `so` parameters and even `so` child values +can be modified, for example the `so`'s `liveness` probe could be disabled +(which is not recommended as this change would disable auto-healing of `so`). + +.. code-block:: yaml + + ################################################################# + # Global configuration overrides. + # + # These overrides will affect all helm charts (ie. applications) + # that are listed below and are 'enabled'. + ################################################################# + global: + <...> + + ################################################################# + # Enable/disable and configure helm charts (ie. applications) + # to customize the ONAP deployment. + ################################################################# + aaf: + enabled: false + <...> + so: # Service Orchestrator + enabled: true + + replicaCount: 1 + + liveness: + # necessary to disable liveness probe when setting breakpoints + # in debugger so K8s doesn't restart unresponsive container + enabled: true + + <...> + +Accessing the ONAP Portal using OOM and a Kubernetes Cluster +------------------------------------------------------------ + +The ONAP deployment created by OOM operates in a private IP network that isn't +publicly accessible (i.e. OpenStack VMs with private internal network) which +blocks access to the ONAP Portal. To enable direct access to this Portal from a +user's own environment (a laptop etc.) the portal application's port 8989 is +exposed through a `Kubernetes LoadBalancer`_ object. + +Typically, to be able to access the Kubernetes nodes publicly a public address +is assigned. In OpenStack this is a floating IP address. + +When the `portal-app` chart is deployed a Kubernetes service is created that +instantiates a load balancer. The LB chooses the private interface of one of +the nodes as in the example below (10.0.0.4 is private to the K8s cluster only). +Then to be able to access the portal on port 8989 from outside the K8s & +OpenStack environment, the user needs to assign/get the floating IP address that +corresponds to the private IP as follows:: + + > kubectl -n onap get services|grep "portal-app" + portal-app LoadBalancer 10.43.142.201 10.0.0.4 8989:30215/TCP,8006:30213/TCP,8010:30214/TCP 1d app=portal-app,release=dev + + +In this example, use the 11.0.0.4 private address as a key find the +corresponding public address which in this example is 10.12.6.155. If you're +using OpenStack you'll do the lookup with the horizon GUI or the OpenStack CLI +for your tenant (openstack server list). That IP is then used in your +`/etc/hosts` to map the fixed DNS aliases required by the ONAP Portal as shown +below:: + + 10.12.6.155 portal.api.simpledemo.onap.org + 10.12.6.155 vid.api.simpledemo.onap.org + 10.12.6.155 sdc.api.fe.simpledemo.onap.org + 10.12.6.155 sdc.workflow.plugin.simpledemo.onap.org + 10.12.6.155 sdc.dcae.plugin.simpledemo.onap.org + 10.12.6.155 portal-sdk.simpledemo.onap.org + 10.12.6.155 policy.api.simpledemo.onap.org + 10.12.6.155 aai.api.sparky.simpledemo.onap.org + 10.12.6.155 cli.api.simpledemo.onap.org + 10.12.6.155 msb.api.discovery.simpledemo.onap.org + 10.12.6.155 msb.api.simpledemo.onap.org + 10.12.6.155 clamp.api.simpledemo.onap.org + 10.12.6.155 so.api.simpledemo.onap.org + 10.12.6.155 sdc.workflow.plugin.simpledemo.onap.org + +Ensure you've disabled any proxy settings the browser you are using to access +the portal and then simply access now the new ssl-encrypted URL: +``https://portal.api.simpledemo.onap.org:30225/ONAPPORTAL/login.htm`` + +.. note:: + Using the HTTPS based Portal URL the Browser needs to be configured to accept + unsecure credentials. + Additionally when opening an Application inside the Portal, the Browser + might block the content, which requires to disable the blocking and reloading + of the page + +.. note:: + Besides the ONAP Portal the Components can deliver additional user interfaces, + please check the Component specific documentation. + +.. note:: + + | Alternatives Considered: + + - Kubernetes port forwarding was considered but discarded as it would + require the end user to run a script that opens up port forwarding tunnels + to each of the pods that provides a portal application widget. + + - Reverting to a VNC server similar to what was deployed in the Amsterdam + release was also considered but there were many issues with resolution, + lack of volume mount, /etc/hosts dynamic update, file upload that were + a tall order to solve in time for the Beijing release. + + Observations: + + - If you are not using floating IPs in your Kubernetes deployment and + directly attaching a public IP address (i.e. by using your public provider + network) to your K8S Node VMs' network interface, then the output of + 'kubectl -n onap get services | grep "portal-app"' + will show your public IP instead of the private network's IP. Therefore, + you can grab this public IP directly (as compared to trying to find the + floating IP first) and map this IP in /etc/hosts. + +.. figure:: ../../resources/images/oom_logo/oomLogoV2-Monitor.png + :align: right + +Monitor +======= + +All highly available systems include at least one facility to monitor the +health of components within the system. Such health monitors are often used as +inputs to distributed coordination systems (such as etcd, Zookeeper, or Consul) +and monitoring systems (such as Nagios or Zabbix). OOM provides two mechanisms +to monitor the real-time health of an ONAP deployment: + +- a Consul GUI for a human operator or downstream monitoring systems and + Kubernetes liveness probes that enable automatic healing of failed + containers, and +- a set of liveness probes which feed into the Kubernetes manager which + are described in the Heal section. + +Within ONAP, Consul is the monitoring system of choice and deployed by OOM in +two parts: + +- a three-way, centralized Consul server cluster is deployed as a highly + available monitor of all of the ONAP components, and +- a number of Consul agents. + +The Consul server provides a user interface that allows a user to graphically +view the current health status of all of the ONAP components for which agents +have been created - a sample from the ONAP Integration labs follows: + +.. figure:: ../../resources/images/consul/consulHealth.png + :align: center + +To see the real-time health of a deployment go to: ``http://:30270/ui/`` +where a GUI much like the following will be found: + +.. note:: + If Consul GUI is not accessible, you can refer this + `kubectl port-forward `_ method to access an application + +.. figure:: ../../resources/images/oom_logo/oomLogoV2-Heal.png + :align: right + +Heal +==== + +The ONAP deployment is defined by Helm charts as mentioned earlier. These Helm +charts are also used to implement automatic recoverability of ONAP components +when individual components fail. Once ONAP is deployed, a "liveness" probe +starts checking the health of the components after a specified startup time. + +Should a liveness probe indicate a failed container it will be terminated and a +replacement will be started in its place - containers are ephemeral. Should the +deployment specification indicate that there are one or more dependencies to +this container or component (for example a dependency on a database) the +dependency will be satisfied before the replacement container/component is +started. This mechanism ensures that, after a failure, all of the ONAP +components restart successfully. + +To test healing, the following command can be used to delete a pod:: + + > kubectl delete pod [pod name] -n [pod namespace] + +One could then use the following command to monitor the pods and observe the +pod being terminated and the service being automatically healed with the +creation of a replacement pod:: + + > kubectl get pods --all-namespaces -o=wide + +.. figure:: ../../resources/images/oom_logo/oomLogoV2-Scale.png + :align: right + +Scale +===== + +Many of the ONAP components are horizontally scalable which allows them to +adapt to expected offered load. During the Beijing release scaling is static, +that is during deployment or upgrade a cluster size is defined and this cluster +will be maintained even in the presence of faults. The parameter that controls +the cluster size of a given component is found in the values.yaml file for that +component. Here is an excerpt that shows this parameter: + +.. code-block:: yaml + + # default number of instances + replicaCount: 1 + +In order to change the size of a cluster, an operator could use a helm upgrade +(described in detail in the next section) as follows:: + + > helm upgrade [RELEASE] [CHART] [flags] + +The RELEASE argument can be obtained from the following command:: + + > helm list + +Below is the example for the same:: + + > helm list + NAME REVISION UPDATED STATUS CHART APP VERSION NAMESPACE + dev 1 Wed Oct 14 13:49:52 2020 DEPLOYED onap-11.0.0 Kohn onap + dev-cassandra 5 Thu Oct 15 14:45:34 2020 DEPLOYED cassandra-11.0.0 onap + dev-contrib 1 Wed Oct 14 13:52:53 2020 DEPLOYED contrib-11.0.0 onap + dev-mariadb-galera 1 Wed Oct 14 13:55:56 2020 DEPLOYED mariadb-galera-11.0.0 onap + +Here the Name column shows the RELEASE NAME, In our case we want to try the +scale operation on cassandra, thus the RELEASE NAME would be dev-cassandra. + +Now we need to obtain the chart name for cassandra. Use the below +command to get the chart name:: + + > helm search cassandra + +Below is the example for the same:: + + > helm search cassandra + NAME CHART VERSION APP VERSION DESCRIPTION + local/cassandra 11.0.0 ONAP cassandra + local/portal-cassandra 11.0.0 Portal cassandra + local/aaf-cass 11.0.0 ONAP AAF cassandra + local/sdc-cs 11.0.0 ONAP Service Design and Creation Cassandra + +Here the Name column shows the chart name. As we want to try the scale +operation for cassandra, thus the corresponding chart name is local/cassandra + + +Now we have both the command's arguments, thus we can perform the +scale operation for cassandra as follows:: + + > helm upgrade dev-cassandra local/cassandra --set replicaCount=3 + +Using this command we can scale up or scale down the cassandra db instances. + + +The ONAP components use Kubernetes provided facilities to build clustered, +highly available systems including: Services_ with load-balancers, ReplicaSet_, +and StatefulSet_. Some of the open-source projects used by the ONAP components +directly support clustered configurations, for example ODL and MariaDB Galera. + +The Kubernetes Services_ abstraction to provide a consistent access point for +each of the ONAP components, independent of the pod or container architecture +of that component. For example, SDN-C uses OpenDaylight clustering with a +default cluster size of three but uses a Kubernetes service to and change the +number of pods in this abstract this cluster from the other ONAP components +such that the cluster could change size and this change is isolated from the +other ONAP components by the load-balancer implemented in the ODL service +abstraction. + +A ReplicaSet_ is a construct that is used to describe the desired state of the +cluster. For example 'replicas: 3' indicates to Kubernetes that a cluster of 3 +instances is the desired state. Should one of the members of the cluster fail, +a new member will be automatically started to replace it. + +Some of the ONAP components many need a more deterministic deployment; for +example to enable intra-cluster communication. For these applications the +component can be deployed as a Kubernetes StatefulSet_ which will maintain a +persistent identifier for the pods and thus a stable network id for the pods. +For example: the pod names might be web-0, web-1, web-{N-1} for N 'web' pods +with corresponding DNS entries such that intra service communication is simple +even if the pods are physically distributed across multiple nodes. An example +of how these capabilities can be used is described in the Running Consul on +Kubernetes tutorial. + +.. figure:: ../../resources/images/oom_logo/oomLogoV2-Upgrade.png + :align: right + +Upgrade +======= + +Helm has built-in capabilities to enable the upgrade of pods without causing a +loss of the service being provided by that pod or pods (if configured as a +cluster). As described in the OOM Developer's Guide, ONAP components provide +an abstracted 'service' end point with the pods or containers providing this +service hidden from other ONAP components by a load balancer. This capability +is used during upgrades to allow a pod with a new image to be added to the +service before removing the pod with the old image. This 'make before break' +capability ensures minimal downtime. + +Prior to doing an upgrade, determine of the status of the deployed charts:: + + > helm list + NAME REVISION UPDATED STATUS CHART NAMESPACE + so 1 Mon Feb 5 10:05:22 2020 DEPLOYED so-11.0.0 onap + +When upgrading a cluster a parameter controls the minimum size of the cluster +during the upgrade while another parameter controls the maximum number of nodes +in the cluster. For example, SNDC configured as a 3-way ODL cluster might +require that during the upgrade no fewer than 2 pods are available at all times +to provide service while no more than 5 pods are ever deployed across the two +versions at any one time to avoid depleting the cluster of resources. In this +scenario, the SDNC cluster would start with 3 old pods then Kubernetes may add +a new pod (3 old, 1 new), delete one old (2 old, 1 new), add two new pods (2 +old, 3 new) and finally delete the 2 old pods (3 new). During this sequence +the constraints of the minimum of two pods and maximum of five would be +maintained while providing service the whole time. + +Initiation of an upgrade is triggered by changes in the Helm charts. For +example, if the image specified for one of the pods in the SDNC deployment +specification were to change (i.e. point to a new Docker image in the nexus3 +repository - commonly through the change of a deployment variable), the +sequence of events described in the previous paragraph would be initiated. + +For example, to upgrade a container by changing configuration, specifically an +environment value:: + + > helm upgrade so onap/so --version 11.0.1 --set enableDebug=true + +Issuing this command will result in the appropriate container being stopped by +Kubernetes and replaced with a new container with the new environment value. + +To upgrade a component to a new version with a new configuration file enter:: + + > helm upgrade so onap/so --version 11.0.1 -f environments/demo.yaml + +To fetch release history enter:: + + > helm history so + REVISION UPDATED STATUS CHART DESCRIPTION + 1 Mon Jul 5 10:05:22 2022 SUPERSEDED so-11.0.0 Install complete + 2 Mon Jul 5 10:10:55 2022 DEPLOYED so-11.0.1 Upgrade complete + +Unfortunately, not all upgrades are successful. In recognition of this the +lineup of pods within an ONAP deployment is tagged such that an administrator +may force the ONAP deployment back to the previously tagged configuration or to +a specific configuration, say to jump back two steps if an incompatibility +between two ONAP components is discovered after the two individual upgrades +succeeded. + +This rollback functionality gives the administrator confidence that in the +unfortunate circumstance of a failed upgrade the system can be rapidly brought +back to a known good state. This process of rolling upgrades while under +service is illustrated in this short YouTube video showing a Zero Downtime +Upgrade of a web application while under a 10 million transaction per second +load. + +For example, to roll-back back to previous system revision enter:: + + > helm rollback so 1 + + > helm history so + REVISION UPDATED STATUS CHART DESCRIPTION + 1 Mon Jul 5 10:05:22 2022 SUPERSEDED so-11.0.0 Install complete + 2 Mon Jul 5 10:10:55 2022 SUPERSEDED so-11.0.1 Upgrade complete + 3 Mon Jul 5 10:14:32 2022 DEPLOYED so-11.0.0 Rollback to 1 + +.. note:: + + The description field can be overridden to document actions taken or include + tracking numbers. + +Many of the ONAP components contain their own databases which are used to +record configuration or state information. The schemas of these databases may +change from version to version in such a way that data stored within the +database needs to be migrated between versions. If such a migration script is +available it can be invoked during the upgrade (or rollback) by Container +Lifecycle Hooks. Two such hooks are available, PostStart and PreStop, which +containers can access by registering a handler against one or both. Note that +it is the responsibility of the ONAP component owners to implement the hook +handlers - which could be a shell script or a call to a specific container HTTP +endpoint - following the guidelines listed on the Kubernetes site. Lifecycle +hooks are not restricted to database migration or even upgrades but can be used +anywhere specific operations need to be taken during lifecycle operations. + +OOM uses Helm K8S package manager to deploy ONAP components. Each component is +arranged in a packaging format called a chart - a collection of files that +describe a set of k8s resources. Helm allows for rolling upgrades of the ONAP +component deployed. To upgrade a component Helm release you will need an +updated Helm chart. The chart might have modified, deleted or added values, +deployment yamls, and more. To get the release name use:: + + > helm ls + +To easily upgrade the release use:: + + > helm upgrade [RELEASE] [CHART] + +To roll back to a previous release version use:: + + > helm rollback [flags] [RELEASE] [REVISION] + +For example, to upgrade the onap-so helm release to the latest SO container +release v1.1.2: + +- Edit so values.yaml which is part of the chart +- Change "so: nexus3.onap.org:10001/openecomp/so:v1.1.1" to + "so: nexus3.onap.org:10001/openecomp/so:v1.1.2" +- From the chart location run:: + + > helm upgrade onap-so + +The previous so pod will be terminated and a new so pod with an updated so +container will be created. + +.. figure:: ../../resources/images/oom_logo/oomLogoV2-Delete.png + :align: right + +Delete +====== + +Existing deployments can be partially or fully removed once they are no longer +needed. To minimize errors it is recommended that before deleting components +from a running deployment the operator perform a 'dry-run' to display exactly +what will happen with a given command prior to actually deleting anything. +For example:: + + > helm undeploy onap --dry-run + +will display the outcome of deleting the 'onap' release from the +deployment. +To completely delete a release and remove it from the internal store enter:: + + > helm undeploy onap + +Once complete undeploy is done then delete the namespace as well +using following command:: + + > kubectl delete namespace + +.. note:: + You need to provide the namespace name which you used during deployment, + below is the example:: + + > kubectl delete namespace onap + +One can also remove individual components from a deployment by changing the +ONAP configuration values. For example, to remove `so` from a running +deployment enter:: + + > helm undeploy onap-so + +will remove `so` as the configuration indicates it's no longer part of the +deployment. This might be useful if a one wanted to replace just `so` by +installing a custom version. diff --git a/docs/sections/oom_project_description.rst b/docs/sections/oom_project_description.rst new file mode 100644 index 0000000000..404a410931 --- /dev/null +++ b/docs/sections/oom_project_description.rst @@ -0,0 +1,106 @@ +.. This work is licensed under a Creative Commons Attribution 4.0 +.. International License. +.. http://creativecommons.org/licenses/by/4.0 +.. Copyright 2018-2020 Amdocs, Bell Canada, Orange, Samsung +.. Modification copyright (C) 2022 Nordix Foundation + +.. _oom_project_description: + +ONAP Operations Manager Project +############################### + +.. warning:: + + THIS PAGE PROB NEEDS A REWRITE ALSO + +The ONAP Operations Manager (OOM) is responsible for life-cycle management of +the ONAP platform itself; components such as SO, SDNC, etc. It is not +responsible for the management of services, VNFs or infrastructure instantiated +by ONAP or used by ONAP to host such services or VNFs. OOM uses the open-source +Kubernetes container management system as a means to manage the Docker +containers that compose ONAP where the containers are hosted either directly on +bare-metal servers or on VMs hosted by a 3rd party management system. OOM +ensures that ONAP is easily deployable and maintainable throughout its life +cycle while using hardware resources efficiently. + +.. figure:: resources/images/oom_logo/oomLogoV2-medium.png + :align: right + +In summary OOM provides the following capabilities: + +- **Deploy** - with built-in component dependency management +- **Configure** - unified configuration across all ONAP components +- **Monitor** - real-time health monitoring feeding to a Consul UI and + Kubernetes +- **Heal**- failed ONAP containers are recreated automatically +- **Scale** - cluster ONAP services to enable seamless scaling +- **Upgrade** - change-out containers or configuration with little or no + service impact +- **Delete** - cleanup individual containers or entire deployments + +OOM supports a wide variety of Kubernetes private clouds - built with Rancher, +Kubeadm or Cloudify - and public cloud infrastructures such as: Microsoft +Azure, Amazon AWS, Google GCD, VMware VIO, and OpenStack. + +The OOM documentation is broken into four different areas each targeted at a +different user: + +- :ref:`oom_dev_guide` - a guide for developers of OOM +- :ref:`oom_infra_setup_guide` - a guide for those setting up the environments that OOM will use +- :ref:`oom_deploy_guide` - a guide for those deploying OOM on an existing cloud +- :ref:`oom_user_guide` - a guide for operators of an OOM instance +- :ref:`oom_access_info_guide` - a guide for operators who require access to OOM applications + + + +The :ref:`release_notes` for OOM describe the incremental features per release. + +Component Orchestration Overview +================================ +Multiple technologies, templates, and extensible plug-in frameworks are used in +ONAP to orchestrate platform instances of software component artifacts. A few +standard configurations are provide that may be suitable for test, development, +and some production deployments by substitution of local or platform wide +parameters. Larger and more automated deployments may require integration the +component technologies, templates, and frameworks with a higher level of +automated orchestration and control software. Design guidelines are provided to +insure the component level templates and frameworks can be easily integrated +and maintained. The following diagram provides an overview of these with links +to examples and templates for describing new ones. + +.. graphviz:: + + digraph COO { + rankdir="LR"; + + { + node [shape=folder] + oValues [label="values"] + cValues [label="values"] + comValues [label="values"] + sValues [label="values"] + oCharts [label="charts"] + cCharts [label="charts"] + comCharts [label="charts"] + sCharts [label="charts"] + blueprint [label="TOSCA blueprint"] + } + {oom [label="ONAP Operations Manager"]} + {hlo [label="High Level Orchestrator"]} + + + hlo -> blueprint + hlo -> oom + oom -> oValues + oom -> oCharts + oom -> component + oom -> common + common -> comValues + common -> comCharts + component -> cValues + component -> cCharts + component -> subcomponent + subcomponent -> sValues + subcomponent -> sCharts + blueprint -> component + } diff --git a/docs/sections/release_notes/release-notes-amsterdam.rst b/docs/sections/release_notes/release-notes-amsterdam.rst new file mode 100644 index 0000000000..6fc229696c --- /dev/null +++ b/docs/sections/release_notes/release-notes-amsterdam.rst @@ -0,0 +1,75 @@ +.. This work is licensed under a Creative Commons Attribution 4.0 International +.. License. +.. http://creativecommons.org/licenses/by/4.0 +.. Copyright 2017 Bell Canada & Amdocs Intellectual Property. All rights +.. reserved. +.. _release_notes_amsterdam: + +:orphan: + +ONAP Operations Manager Release Notes +===================================== + +Version: 1.1.0 +-------------- + +:Release Date: 2017-11-16 + +**New Features** + +The Amsterdam release is the first release of the ONAP Operations Manager +(OOM). + +The main goal of the Amsterdam release was to: + + - Support Flexible Platform Deployment via Kubernetes of fully + containerized ONAP components - on any type of environment. + - Support State Management of ONAP platform components. + - Support full production ONAP deployment and any variation of component + level deployment for development. + - Platform Operations Orchestration / Control Loop Actions. + - Platform centralized logging with ELK stack. + +**Bug Fixes** + + The full list of implemented user stories and epics is available on + `JIRA `_ + This is the first release of OOM, the defects fixed in this release were + raised during the course of the release. + Anything not closed is captured below under Known Issues. If you want to + review the defects fixed in the Amsterdam release, refer to Jira link + above. + +**Known Issues** + - `OOM-6 `_ Automated platform deployment on Docker/Kubernetes + + VFC, AAF, MSB minor issues. + + Workaround: Manual configuration changes - however the reference + vFirewall use case does not currently require these components. + + - `OOM-10 `_ Platform configuration management. + + OOM ONAP Configuration Management - Handling of Secrets. + + Workaround: Automated workaround to be able to pull from protected + docker repositories. + + +**Security Issues** + N/A + + +**Upgrade Notes** + + N/A + +**Deprecation Notes** + + N/A + +**Other** + + N/A + +End of Release Notes diff --git a/docs/sections/release_notes/release-notes-beijing.rst b/docs/sections/release_notes/release-notes-beijing.rst new file mode 100644 index 0000000000..84f86c100d --- /dev/null +++ b/docs/sections/release_notes/release-notes-beijing.rst @@ -0,0 +1,427 @@ +.. This work is licensed under a Creative Commons Attribution 4.0 International +.. License. +.. http://creativecommons.org/licenses/by/4.0 +.. Copyright 2017 Bell Canada & Amdocs Intellectual Property. All rights +.. reserved. +.. _release_notes_beijing: + +:orphan: + +ONAP Operations Manager Release Notes +===================================== + +Version 2.0.0 Beijing Release +----------------------------- + +:Release Date: 2018-06-07 + +Previous Release Notes +********************** + +- :ref:`Amsterdam ` + +Epic +**** + +* [`OOM-6 `_] - Automated platform deployment on Docker/Kubernetes +* [`OOM-7 `_] - Platform monitoring and auto-healing +* [`OOM-8 `_] - Automated platform scalability +* [`OOM-9 `_] - Platform upgradability & rollbacks +* [`OOM-10 `_] - Platform configuration management +* [`OOM-46 `_] - Platform infrastructure deployment with TOSCA +* [`OOM-109 `_] - Platform Centralized Logging +* [`OOM-138 `_] - Using Optimization framework +* [`OOM-346 `_] - Platform Resiliency (including Recoverability, High-Availability, Backup/Restore, Geo-Redundancy) +* [`OOM-376 `_] - ONAP deployment options standardization +* [`OOM-486 `_] - HELM upgrade from 2.3 to 2.8.0 +* [`OOM-535 `_] - Upgrade Kubernetes from 1.8.6 to 1.9.2 +* [`OOM-590 `_] - OOM Wiki documentation of deployment options + +Story +***** + +* [`OOM-11 `_] - Add AAF containers to ONAP Kubernetes +* [`OOM-13 `_] - Add CLI containers to ONAP Kubernetes +* [`OOM-15 `_] - Add DMAAP containers to ONAP Kubernetes +* [`OOM-20 `_] - State Monitoring: MSO/mso +* [`OOM-21 `_] - State Monitoring: A&AI/aai-service +* [`OOM-22 `_] - State Monitoring: SDNC/sdc-be +* [`OOM-24 `_] - State Monitoring: message-router +* [`OOM-25 `_] - State Monitoring: MSB +* [`OOM-29 `_] - State Monitoring: VID +* [`OOM-31 `_] - State Monitoring: APPC/dbhost +* [`OOM-32 `_] - State Monitoring: VFC +* [`OOM-33 `_] - State Monitoring: Multi-VIM +* [`OOM-34 `_] - Auto-Restart on failure: ... +* [`OOM-35 `_] - State Monitoring: A&AI/hbase +* [`OOM-36 `_] - State Monitoring: A&AI/model-loader-service +* [`OOM-37 `_] - State Monitoring: APPC/dgbuilder +* [`OOM-38 `_] - State Monitoring: APPC/sdnctldb01 +* [`OOM-39 `_] - State Monitoring: APPC/sdnctldb02 +* [`OOM-40 `_] - State Monitoring: APPC/sdnhost +* [`OOM-41 `_] - State Monitoring: MSO/mariadb +* [`OOM-42 `_] - State Monitoring: SDNC/dbhost +* [`OOM-43 `_] - State Monitoring: SDNC/sdnc-dgbuilder +* [`OOM-44 `_] - State Monitoring: SDNC/sdnc-portal +* [`OOM-45 `_] - State Monitoring: SDNC/sdnctldb01 +* [`OOM-51 `_] - OOM ONAP Configuration Management - Externalize hardwired values +* [`OOM-52 `_] - OOM ONAP Configuration Management - Parameterization of docker images +* [`OOM-53 `_] - OOM ONAP Configuration Management - Parameterization for Sizing +* [`OOM-63 `_] - Kubernetes cluster created by TOSCA description +* [`OOM-85 `_] - Test the code in the "Lab" project environment +* [`OOM-86 `_] - Monitoring the health status of ONAP components +* [`OOM-87 `_] - Configure TOSCA description via dashboard +* [`OOM-88 `_] - Deploy Holmes on K8S cluster by TOSCA description +* [`OOM-89 `_] - Deploy CLAMP on K8S cluster by TOSCA description +* [`OOM-91 `_] - Deploy MSB on K8S cluster by TOSCA description +* [`OOM-92 `_] - Deploy AAF on K8S cluster by TOSCA description +* [`OOM-93 `_] - Deploy VF-C on K8S cluster by TOSCA description +* [`OOM-94 `_] - Deploy Multi-VIM on K8S cluster by TOSCA description +* [`OOM-95 `_] - Deploy DCAEGen2 on K8S cluster by TOSCA description +* [`OOM-96 `_] - Deploy AAI on K8S cluster by TOSCA description +* [`OOM-97 `_] - Deploy APPC on K8S cluster by TOSCA description +* [`OOM-98 `_] - Deploy MSO on K8S cluster by TOSCA description +* [`OOM-99 `_] - Deploy Policy on K8S cluster by TOSCA description +* [`OOM-100 `_] - Deploy SDC on K8S cluster by TOSCA description +* [`OOM-102 `_] - Deploy VID on K8S cluster by TOSCA description +* [`OOM-110 `_] - OOM ONAP Logging - Elastic Stack components deployment +* [`OOM-111 `_] - OOM ONAP Logging - FileBeat deployment aside ONAP components +* [`OOM-112 `_] - OOM ONAP Logging - Configuration of all ONAP components to emit canonical logs +* [`OOM-116 `_] - ignore intellj files +* [`OOM-145 `_] - update directory path from dockerdata-nfs to configured directory name (make it configurable) +* [`OOM-235 `_] - Service endpoint annotation for Usecase UI +* [`OOM-242 `_] - Modify DCAE seed for Helm +* [`OOM-262 `_] - Remove "oneclick" kubectl scripts. +* [`OOM-265 `_] - Top level helm chart for ONAP +* [`OOM-268 `_] - Persist and externalize database directories via persistent volumes +* [`OOM-271 `_] - Copy app config files from source +* [`OOM-272 `_] - Set application environment variables from source +* [`OOM-277 `_] - add automatic ONAP config parameter substitution +* [`OOM-280 `_] - MSB automatically re-synch service data on restart. +* [`OOM-292 `_] - Expose LOG Volume via /dockerdata-nfs +* [`OOM-293 `_] - OOM ONAP Configuration Management - Handling of Secrets +* [`OOM-298 `_] - Provide script to cleanup configuration data created by createConfig.sh +* [`OOM-322 `_] - Clean-up config files that are generated at system startup +* [`OOM-341 `_] - Provide an example of a partial deployment of ONAP components (e.g. no VFC) +* [`OOM-342 `_] - Add pointer to Wiki page on the readme file. +* [`OOM-344 `_] - Break the configuration tarball per appplication +* [`OOM-345 `_] - Re-validate # of containers and configuration for DCAEgen2 +* [`OOM-356 `_] - Add 'Usecase UI' containers to ONAP Kubernetes +* [`OOM-359 `_] - SDC logback chef failure +* [`OOM-375 `_] - F2F: ONAP/OOM for Developers +* [`OOM-382 `_] - Robot Version 1.1 OpenO tests +* [`OOM-406 `_] - In Kubernetes 1.8, the annotations are no longer supported and must be converted to the PodSpec field. +* [`OOM-457 `_] - In Kubernetes 1.8, init-container annotations to be converted to PodSpec field for aaf, clamp and vfc +* [`OOM-460 `_] - Segregating configuration of ONAP components +* [`OOM-476 `_] - Parameterize values.yaml docker image repos into global config variables +* [`OOM-528 `_] - Confirm k8s context with a prompt for deleteAll.bash +* [`OOM-534 `_] - Need to provide support for creating different sized OOM deployments +* [`OOM-546 `_] - Provide option to collect ONAP env details for issue investigations +* [`OOM-569 `_] - Investigate containerizing Cloudify Manager +* [`OOM-579 `_] - Document a Cloudify deployment of OOM Beijing +* [`OOM-633 `_] - Provide direct access to ONAP Portal without the need to use VNC +* [`OOM-677 `_] - Update all source code files with the Apache 2 License header +* [`OOM-678 `_] - Enforce MSB dockers dependencies using init-container +* [`OOM-681 `_] - updating docker images/components to latest code +* [`OOM-682 `_] - deployment of sdc workflow designer +* [`OOM-695 `_] - Improve Readiness-check prob +* [`OOM-722 `_] - OOM - Run all ONAP components in one namespace +* [`OOM-725 `_] - Use Blueprint to install Helm and k8s dashboard while creating k8s cluster +* [`OOM-727 `_] - Add Standardized Configuration to SO +* [`OOM-728 `_] - Add Standardized Configuration to ROBOT +* [`OOM-729 `_] - Add Standardized Configuration to VID +* [`OOM-730 `_] - Add Standardized Configuration to Consul +* [`OOM-731 `_] - Add Standardized Configuration to DMaaP Message Router +* [`OOM-732 `_] - Add Standardized Configuration to AAF +* [`OOM-733 `_] - Add Standardized Configuration to APPC +* [`OOM-734 `_] - Add Standardized Configuration to AAI +* [`OOM-735 `_] - Add Standardized Configuration to CLAMP +* [`OOM-736 `_] - Add Standardized Configuration to CLI +* [`OOM-737 `_] - Add Standardized Configuration to DCAEGEN2 +* [`OOM-738 `_] - Add Standardized Configuration to ESR +* [`OOM-739 `_] - Add Standardized Configuration to KUBE2MSB +* [`OOM-740 `_] - Add Standardized Configuration to LOG +* [`OOM-742 `_] - Add Standardized Configuration to MSB +* [`OOM-743 `_] - Replace deprecated MSO Helm Chart with Standardized SO Helm Chart +* [`OOM-744 `_] - Add Standardized Configuration to MULTICLOUD +* [`OOM-746 `_] - Add Standardized Configuration to PORTAL +* [`OOM-747 `_] - Add Standardized Configuration to SDC +* [`OOM-748 `_] - Add Standardized Configuration to SDNC +* [`OOM-749 `_] - Add Standardized Configuration to UUI +* [`OOM-750 `_] - Add Standardized Configuration to VFC +* [`OOM-751 `_] - Add Standardized Configuration to VNFSDK +* [`OOM-758 `_] - Common Mariadb Galera Helm Chart to be reused by many applications +* [`OOM-771 `_] - OOM - update master with new policy db deployment +* [`OOM-777 `_] - Add Standardized Configuration Helm Starter Chart +* [`OOM-779 `_] - OOM APPC ODL (MDSAL) persistent storage +* [`OOM-780 `_] - Update MSO to latest working version. +* [`OOM-786 `_] - Re-add support for multiple instances of ONAP +* [`OOM-788 `_] - Abstract docker secrets +* [`OOM-789 `_] - Abstract cluster role binding +* [`OOM-811 `_] - Make kube2msb use secret instead of passing token as environment variable +* [`OOM-822 `_] - Update Documentation for the Beijing Release +* [`OOM-823 `_] - Add CDT image to APPC chart +* [`OOM-827 `_] - Add quick start documentation README +* [`OOM-828 `_] - Remove oneclick scripts +* [`OOM-857 `_] - kube2msb fails to start +* [`OOM-914 `_] - Add LOG component robot healthcheck +* [`OOM-960 `_] - OOM Healthcheck lockdown - currently 32/39 : 20180421 +* [`OOM-979 `_] - Enhance OOM TOSCA solution to support standardized Helm Chart +* [`OOM-1006 `_] - VNFSDK healthcheck fails +* [`OOM-1073 `_] - Change the Repository location in the image oomk8s/config-init:2.0.0-SNAPSHOT +* [`OOM-1078 `_] - Update Kubectl, docker, helm version + +Task +**** + +* [`OOM-57 `_] - Agree on configuration contract/YAML with each of the project teams +* [`OOM-105 `_] - TOSCA based orchestration demo +* [`OOM-257 `_] - DevOps: OOM config reset procedure for new /dockerdata-nfs content +* [`OOM-305 `_] - Rename MSO to SO in OOM +* [`OOM-332 `_] - Add AAI filebeat container - blocked by LOG-67 +* [`OOM-428 `_] - Add log container healthcheck to mark failed creations - see OOM-427 +* [`OOM-429 `_] - DOC: Document HELM server version 2.7.2 required for tpl usage +* [`OOM-489 `_] - Update values.yaml files for tag name changes for docker images and versions. +* [`OOM-543 `_] - SDNC adjust docker pullPolicy to IfNotPresent to speed up initial deployment slowdown introduced by SDNC-163 +* [`OOM-604 `_] - Update OOM and HEAT AAI sparky master from v1.1.0 to v1.1.1 - match INT-288 +* [`OOM-614 `_] - SDC, SDNC, AAI Healthcheck failures last 12 hours 20180124:1100EST +* [`OOM-624 `_] - CII security badging: cleartext password for keystone and docker repo creds +* [`OOM-726 `_] - Mirror AAI docker version changes into OOM from AAI-791 +* [`OOM-772 `_] - Remove old DCAE from Release +* [`OOM-801 `_] - Policy docker images rename - key off new name in POLICY-674 +* [`OOM-810 `_] - Improve emsdriver code +* [`OOM-819 `_] - expose log/logstash 5044 as nodeport for external log producers outside of the kubernetes cluster +* [`OOM-820 `_] - Bypass vnc-portal for ONAP portal access +* [`OOM-943 `_] - Upgrade prepull_docker.sh to work with new helm based master refactor - post OOM-328 +* [`OOM-947 `_] - Update AAI to latest images +* [`OOM-975 `_] - Notes are missing in multicloud +* [`OOM-1031 `_] - Config Changes for consul to make vid, so, log health checks pass +* [`OOM-1032 `_] - Making consul Stateful +* [`OOM-1122 `_] - Update APPC OOM chart to use Beijing release artifacts + +Bug +*** + +* [`OOM-4 `_] - deleteAll.bash fails to properly delete services and ports +* [`OOM-153 `_] - test - Sample Bug +* [`OOM-212 `_] - deleteAll script does not have an option to delete the services +* [`OOM-215 `_] - configure_app for helm apps is not correct +* [`OOM-218 `_] - createConfig.sh needs a chmod 755 in release-1.0.0 only +* [`OOM-239 `_] - mso.tar created in dockerdatanfs +* [`OOM-258 `_] - AAI logs are not being written outside the pods +* [`OOM-282 `_] - vnc-portal requires /etc/hosts url fix for SDC sdc.ui should be sdc.api +* [`OOM-283 `_] - No longer able to deploy instances in specified namespace +* [`OOM-290 `_] - config_init pod fails when /dockerdata-nfs is nfs-mounted +* [`OOM-300 `_] - cat: /config-init/onap/mso/mso/encryption.key: No such file or directory +* [`OOM-333 `_] - vfc-workflow fails [VFC BUG] - fixed - 20180117 vfc-ztevnfmdriver has docker pull issue +* [`OOM-334 `_] - Change kubernetes startup user +* [`OOM-351 `_] - Apply standard convention across the "template deployment YML" file +* [`OOM-352 `_] - failed to start VFC containers +* [`OOM-363 `_] - DCAE tests NOK with Robot E2E tests +* [`OOM-366 `_] - certificates in consul agent config are not in the right directory +* [`OOM-389 `_] - sdc-be and sdc-fe do not initialize correctly on latest master +* [`OOM-409 `_] - Update Vid yaml file to point to the ONAPPORTAL URL +* [`OOM-413 `_] - In portal VNC pod refresh /etc/hosts entries +* [`OOM-414 `_] - MSB Healtcheck failure on $*_ENDPOINT variables +* [`OOM-424 `_] - DCAE installation is not possible today +* [`OOM-430 `_] - Portal healthcheck passing on vnc-portal down +* [`OOM-467 `_] - Optimize config-init process +* [`OOM-493 `_] - Kubernetes infrastructure for ESR +* [`OOM-496 `_] - Readiness check is marking full availability of some components like SDC and SDNC before they would pass healthcheck +* [`OOM-514 `_] - Readiness prob fails sometimes even though the relevant pods are running +* [`OOM-539 `_] - Kube2MSB registrator doesn't support https REST service registration +* [`OOM-570 `_] - Wrong value is assigned to kube2msb AUTH_TOKEN environment variable +* [`OOM-574 `_] - OOM configuration for robot does not copy heat templatese in dockerdata-nfs +* [`OOM-577 `_] - Incorrect evaluation of bash command in yaml template file (portal-vnc-dep.yaml) +* [`OOM-578 `_] - Hard coded token in oom/kubernetes/kube2msb/values.yaml file +* [`OOM-589 `_] - Can not acces CLI in vnc-portal +* [`OOM-598 `_] - createAll.bash base64: invalid option -- d +* [`OOM-600 `_] - Unable to open CLI by clicking CLI application icon +* [`OOM-630 `_] - Red herring config pod deletion error on deleteAll - after we started deleting onap-config automatically +* [`OOM-645 `_] - Kube2MSB RBAC security issues +* [`OOM-653 `_] - sdnc-dbhost-0 deletion failure +* [`OOM-657 `_] - Look into DCAEGEN2 failure on duplicate servicePort +* [`OOM-672 `_] - hardcoded clusterIP for aai breaks auto installation +* [`OOM-680 `_] - ONAP Failure install with kubernetes 1.8+ +* [`OOM-687 `_] - Typo in README_HELM +* [`OOM-724 `_] - License Update in TOSCA +* [`OOM-767 `_] - data-router-logs and elasticsearch-data mapped to same folder +* [`OOM-768 `_] - Hardcoded onap in config files +* [`OOM-769 `_] - sdc-es data mapping in sdc-be and sdc-fe redundant +* [`OOM-783 `_] - UUI health check is failing +* [`OOM-784 `_] - make new so chart one namespace compatible +* [`OOM-791 `_] - After OOM-722 merge - docker pulls are timing out - switch to pullPolicy IfNotPresent +* [`OOM-794 `_] - demo-k8s.sh name not modified in the usage string +* [`OOM-795 `_] - HEAT templates for robot instantiateVFW missing +* [`OOM-796 `_] - robot asdc/sdngc interface in synch for Master +* [`OOM-797 `_] - GLOBAL_INJECTED_SCRIPT_VERSION missing from vm_properties.py +* [`OOM-804 `_] - VFC vfc-ztevnfmdriver container failure +* [`OOM-815 `_] - OOM Robot container helm failure after OOM-728 35909 merge +* [`OOM-829 `_] - Can not make multicloud helm chart +* [`OOM-830 `_] - Fix OOM build dependencies +* [`OOM-835 `_] - CLAMP mariadb pv is pointing to a wrong location +* [`OOM-836 `_] - champ and gizmo yaml validation issue +* [`OOM-845 `_] - Global repository should not be set by default +* [`OOM-846 `_] - Add liveness enabled fix to helm starter +* [`OOM-847 `_] - log-elasticsearch external ports are not externally accessible +* [`OOM-848 `_] - log-logstash logstash pipeline fails to start after oom standard config changes +* [`OOM-851 `_] - sdc chart validation error +* [`OOM-856 `_] - appc mysql fails deployment +* [`OOM-858 `_] - Fail to deploy onap chart due to config map size +* [`OOM-870 `_] - Missing CLAMP configuration +* [`OOM-871 `_] - log kibana container fails to start after oom standard config changes +* [`OOM-872 `_] - APPC-helm Still need config pod +* [`OOM-873 `_] - OOM doc typo +* [`OOM-874 `_] - Inconsistent repository references in ONAP charts +* [`OOM-875 `_] - Cannot retrieve robot logs +* [`OOM-876 `_] - Some containers ignore the repository setting +* [`OOM-878 `_] - MySQL slave nodes don't deploy when mysql.replicaCount > 1 +* [`OOM-881 `_] - SDN-C Portal pod fails to come up +* [`OOM-882 `_] - Some SDNC service names should be prefixed with the helm release name +* [`OOM-884 `_] - VID-VID mariadb pv is pointing to a wrong location +* [`OOM-885 `_] - Beijing oom component log messages missing in Elasticsearch +* [`OOM-886 `_] - kube2msb not starting up +* [`OOM-887 `_] - SDN-C db schema and sdnctl db user not reliably being created +* [`OOM-888 `_] - aaf-cs mapping wrong +* [`OOM-889 `_] - restore pv&pvc for mysql when NFS provisioner is disabled +* [`OOM-898 `_] - Multicloud-framework config file is not volume-mounted +* [`OOM-899 `_] - SDNC main pod does not come up +* [`OOM-900 `_] - portal-cassandra missing pv and pvc +* [`OOM-904 `_] - OOM problems bringing up components and passing healthchecks +* [`OOM-905 `_] - Charts use nsPrefix instead of release namespace +* [`OOM-906 `_] - Make all services independent of helm Release.Name +* [`OOM-907 `_] - Make all persistent volume to be mapped to a location defined by helm Release.Name +* [`OOM-908 `_] - Job portal-db-config fails due to missing image config +* [`OOM-909 `_] - SO Health Check fails +* [`OOM-910 `_] - VID Health Check fails +* [`OOM-911 `_] - VFC Health Check fails for 9 components +* [`OOM-912 `_] - Multicloud Health Check fails for 1 of its components +* [`OOM-913 `_] - Consul agent pod is failing +* [`OOM-916 `_] - Used to fix testing issues related to usability +* [`OOM-918 `_] - Policy - incorrect configmap mount causes base.conf to disappear +* [`OOM-920 `_] - Issue with CLAMP configuration +* [`OOM-921 `_] - align onap/values.yaml and onap/resources/environments/dev.yaml - different /dockerdata-nfs +* [`OOM-926 `_] - Disable clustering for APP-C out-of-the-box +* [`OOM-927 `_] - Need a production grade configuration override file of ONAP deployment +* [`OOM-928 `_] - Some charts use /dockerdata-nfs by default +* [`OOM-929 `_] - DMaaP message router docker image fails to pull +* [`OOM-930 `_] - New AAF Helm Charts required +* [`OOM-931 `_] - Reintroduce VNC pod into OOM +* [`OOM-932 `_] - Unblock integration testing +* [`OOM-935 `_] - sdc-cassandra pod fails to delete using helm delete - forced kubectl delete +* [`OOM-936 `_] - Readiness-check prob version is inconsistent across components +* [`OOM-937 `_] - Portal Cassandra config map points to wrong directory +* [`OOM-938 `_] - Can't install aai alone using helm +* [`OOM-945 `_] - SDNC some bundles failing to start cleanly +* [`OOM-948 `_] - make vfc got an error +* [`OOM-951 `_] - Update APPC charts based on on changes for ccsdk and Nitrogen ODL +* [`OOM-953 `_] - switch aai haproxy/hbase repo from hub.docker.com to nexus3 +* [`OOM-958 `_] - SDC-be deployment missing environment parameter +* [`OOM-964 `_] - SDC Healthcheck failure on sdc-be and sdc-kb containers down +* [`OOM-968 `_] - warning on default deployment values.yaml +* [`OOM-969 `_] - oomk8s images have no Dockerfile's +* [`OOM-971 `_] - Common service name template should allow for chart name override +* [`OOM-974 `_] - Cassandra bootstrap is done incorrectly +* [`OOM-977 `_] - The esr-gui annotations should include a "path" param when register to MSB +* [`OOM-985 `_] - DMAAP Redis fails to start +* [`OOM-986 `_] - SDC BE and FE logs are missing +* [`OOM-989 `_] - Sync ete-k8.sh and ete.sh for new log file numbering +* [`OOM-990 `_] - AUTO.json in SDC has unreachable addresses +* [`OOM-993 `_] - AAI model-loader.properties not in sync with project file +* [`OOM-994 `_] - DCAE cloudify controller docker image 1.1.0 N/A - use 1.2.0/1.3.0 +* [`OOM-1003 `_] - dcae-cloudify-manager chart references obsolete image version +* [`OOM-1004 `_] - aai-resources constantly fails due to cassandra hostname +* [`OOM-1005 `_] - AAI Widgets not loading due to duplicate volumes +* [`OOM-1007 `_] - Update dcae robot health check config +* [`OOM-1008 `_] - Set default consul server replica count to 1 +* [`OOM-1010 `_] - Fix broken property names in DCAE input files +* [`OOM-1011 `_] - Policy config correction after Service Name changes because of OOM-906 +* [`OOM-1013 `_] - Update DCAE container versions +* [`OOM-1014 `_] - Portal login not working due to inconsistent zookeeper naming +* [`OOM-1015 `_] - Champ fails to start +* [`OOM-1016 `_] - DOC-OPS Review: Helm install command is wrong on oom_user_guide - missing namespace +* [`OOM-1017 `_] - DOC-OPS review: Docker/Kubernetes versions wrong for master in oom_cloud_setup_guide +* [`OOM-1018 `_] - DOC-OPS review: global repo override does not match git in oom quick start guide +* [`OOM-1019 `_] - DOC-OPS review: Add Ubuntu 16.04 reference to oom_user_guide to avoid 14/16 confusion +* [`OOM-1021 `_] - Update APPC resources for Nitrogen ODL +* [`OOM-1022 `_] - Fix SDC startup dependencies +* [`OOM-1023 `_] - "spring.datasource.cldsdb.url" in clamp has wrong clampdb name +* [`OOM-1024 `_] - Cassandra data not persisted +* [`OOM-1033 `_] - helm error during deployment 20180501:1900 - all builds under 2.7.2 +* [`OOM-1034 `_] - VID Ports incorrect in deployment.yaml +* [`OOM-1037 `_] - Enable CLI health check +* [`OOM-1039 `_] - Service distribution to SO fails +* [`OOM-1041 `_] - aai-service was renamed, but old references remain +* [`OOM-1042 `_] - portalapps service was renamed, but old references remain +* [`OOM-1045 `_] - top level values.yaml missing entry for dmaap chart +* [`OOM-1049 `_] - SDNC_UEB_LISTENER db +* [`OOM-1050 `_] - Impossible to deploy consul using cache docker registry +* [`OOM-1051 `_] - Fix aaf deployment +* [`OOM-1052 `_] - SO cloud config file points to Rackspace cloud +* [`OOM-1054 `_] - Portal LoadBalancer Ingress IP is on the wrong network +* [`OOM-1060 `_] - Incorrect MR Kafka references prevent aai champ from starting +* [`OOM-1061 `_] - ConfigMap size limit exceeded +* [`OOM-1064 `_] - Improve docker registry secret management +* [`OOM-1066 `_] - Updating TOSCA blueprint to sync up with helm configuration changes (add dmaap and oof/delete message-router) +* [`OOM-1068 `_] - Update SO with new AAI cert +* [`OOM-1076 `_] - some charts still using readiness check image from amsterdam 1.x +* [`OOM-1077 `_] - AAI resources and traversal deployment failure on non-rancher envs +* [`OOM-1079 `_] - Robot charts do not allow over ride of pub_key, dcae_collector_ip and dcae_collector_port +* [`OOM-1081 `_] - Remove component 'mock' from TOSCA deployment +* [`OOM-1082 `_] - Wrong pv location of dcae postgres +* [`OOM-1085 `_] - appc hostname is incorrect in url +* [`OOM-1086 `_] - clamp deployment changes /dockerdata-nfs/ReleaseName dir permissions +* [`OOM-1088 `_] - APPC returns error for vCPE restart message from Policy +* [`OOM-1089 `_] - DCAE pods are not getting purged +* [`OOM-1093 `_] - Line wrapping issue in redis-cluster-config.sh script +* [`OOM-1094 `_] - Fix postgres startup +* [`OOM-1095 `_] - common makefile builds out of order +* [`OOM-1096 `_] - node port conflict SDNC (Geo enabled) & other charts +* [`OOM-1097 `_] - Nbi needs dep-nbi - crash on make all +* [`OOM-1099 `_] - Add External Interface NBI project into OOM TOSCA +* [`OOM-1102 `_] - Incorrect AAI services +* [`OOM-1103 `_] - Cannot disable NBI +* [`OOM-1104 `_] - Policy DROOLS configuration across container restarts +* [`OOM-1110 `_] - Clamp issue when connecting Policy +* [`OOM-1111 `_] - Please revert to using VNFSDK Postgres container +* [`OOM-1114 `_] - APPC is broken in latest helm chart +* [`OOM-1115 `_] - SDNC DGBuilder cant operate on DGs in database - need NodePort +* [`OOM-1116 `_] - Correct values needed by NBI chart +* [`OOM-1124 `_] - Update OOM APPC chart to enhance AAF support +* [`OOM-1126 `_] - Incorrect Port mapping between CDT Application and APPC main application +* [`OOM-1127 `_] - SO fails healthcheck +* [`OOM-1128 `_] - AAF CS fails to start in OpenLab + +Sub-task +******** + +* [`OOM-304 `_] - Service endpoint annotation for Data Router +* [`OOM-306 `_] - Handle mariadb secrets +* [`OOM-510 `_] - Increase vm.max_map_count to 262144 when running Rancher 1.6.11+ via helm 2.6+ - for elasticsearch log mem failure +* [`OOM-512 `_] - Push the reviewed and merged ReadMe content to RTD +* [`OOM-641 `_] - Segregating of configuration for SDNC-UEB component +* [`OOM-655 `_] - Create alternate prepull script which provides more user feedback and logging +* [`OOM-753 `_] - Create Helm Sub-Chart for SO's embedded mariadb +* [`OOM-754 `_] - Create Helm Chart for SO +* [`OOM-774 `_] - Create Helm Sub-Chart for APPC's embedded mySQL database +* [`OOM-775 `_] - Create Helm Chart for APPC +* [`OOM-778 `_] - Replace NFS Provisioner with configurable PV storage solution +* [`OOM-825 `_] - Apache 2 License updation for All sqls and .js file +* [`OOM-849 `_] - Policy Nexus component needs persistent volume for /sonatype-work +* [`OOM-991 `_] - Adjust SDC-BE init job timing from 10 to 30s to avoid restarts on single node systems +* [`OOM-1036 `_] - update helm from 2.7.2 to 2.8.2 wiki/rtd +* [`OOM-1063 `_] - Document Portal LoadBalancer Ingress IP Settings + +**Security Notes** + +OOM code has been formally scanned during build time using NexusIQ and no +Critical vulnerability was found. + +Quick Links: + - `OOM project page `_ + + - `Passing Badge information for OOM `_ + + + +End of Release Notes diff --git a/docs/sections/release_notes/release-notes-casablanca.rst b/docs/sections/release_notes/release-notes-casablanca.rst new file mode 100644 index 0000000000..6b857309aa --- /dev/null +++ b/docs/sections/release_notes/release-notes-casablanca.rst @@ -0,0 +1,78 @@ +.. This work is licensed under a Creative Commons Attribution 4.0 International +.. License. +.. http://creativecommons.org/licenses/by/4.0 +.. Copyright 2017 Bell Canada & Amdocs Intellectual Property. All rights +.. reserved. +.. _release_notes_casablanca: + +:orphan: + +ONAP Operations Manager Release Notes +===================================== + +Version 3.0.0 Casablanca Release +-------------------------------- + +:Release Date: 2018-11-30 + +**Previous Release Notes** + +- :ref:`Beijing ` +- :ref:`Amsterdam ` + +Summary +------- + +The focus of this release was on incremental improvements in the following +areas: + +* Pluggable persistent storage with support for GlusterFS as the first storage + class provisioner + +* CPU and Memory limits in Helm Charts to improve Pod placement based on + resource availability in Kubernetes Cluster + +* Support of Node Selectors for Pod placement + +* Common "shared" Helm Charts referencing common images + + - mariadb-galera + - postgres + - cassandra + - mysql + - mongo + +* Integration of ARK Backup and Restore solution + +* Introduction of Helm deploy and undeploy plugins to better manage ONAP + deployments + + +**Security Notes** + +OOM code has been formally scanned during build time using NexusIQ and no +Critical vulnerability was found. + +Quick Links: + + - `OOM project page `_ + + - `Passing Badge information for OOM `_ + + +**Known Issues** + + * **Problem**: kubectl connections to pods (kubectl exec|logs) will + fail after a while due to a known bug in Kubernetes (1.11.2) + + **Workaround**: Restart of the kubelet daemons on the k8s hosts + + **Fix**: Will be delivered in the next release via a new + Kubernetes version (1.12) + + - `K8S Bug Report `_ + - `OOM-1532 `_ + - `OOM-1516 `_ + - `OOM-1520 `_ + +End of Release Notes diff --git a/docs/sections/release_notes/release-notes-dublin.rst b/docs/sections/release_notes/release-notes-dublin.rst new file mode 100644 index 0000000000..7a32297210 --- /dev/null +++ b/docs/sections/release_notes/release-notes-dublin.rst @@ -0,0 +1,83 @@ +.. This work is licensed under a Creative Commons Attribution 4.0 International +.. License. +.. http://creativecommons.org/licenses/by/4.0 +.. Copyright 2017 Bell Canada & Amdocs Intellectual Property. All rights +.. reserved. +.. _release_notes_dublin: + +:orphan: + +ONAP Operations Manager Release Notes +===================================== + +Version 4.0.0 (Dublin Release) +------------------------------ + +:Release Date: 2019-06-26 + +**Previous Release Notes** + +- :ref:`Casablanca ` +- :ref:`Beijing ` +- :ref:`Amsterdam ` + + +Summary +------- + +**Platform Resiliency** + +* Documentation of a Highly-Available Kubernetes Cluster Deployment +* Availability of a Default Storage Class Provisioner for improved Persistent + Storage resiliency +* Availability of a CNI reference integration for Multi-site support + + * applications can take advantage of multi-site by using POD and/or Node + (anti)affinity, taints/tolerations, labels per application + +**Footprint Optimization** + +* Shared MariaDB-Galera Cluster - current clients in Dublin: SO, SDNC +* Shared Cassandra Cluster - current clients in Dublin: AAI, SDC +* Optional deployment of independent clusters (backward compatibility) + +**Platform Upgradability** + +* Introduction of an Upgrade Framework supporting: + + * Automated rolling upgrades for applications + * In-place schema and data migrations + * Blue-Green deployment environment migration (e.g. Pre-prod to Prod) + * Upgrades from embedded database instance into shared database instance + +* Release-to-release upgrade support delivered for the following projects + + * A&AI + * SDNC + * SO + +**Security Notes** + +*Fixed Security Issues* + +*Known Security Issues* + +* In default deployment OOM (consul-server-ui) exposes HTTP port 30270 outside of cluster. [`OJSI-134 `_] +* Hard coded password used for all oom deployments [`OJSI-188 `_] +* CVE-2019-12127 - OOM exposes unprotected API/UI on port 30270 [`OJSI-202 `_] + +*Known Vulnerabilities in Used Modules* + +OOM code has been formally scanned during build time using NexusIQ and no +Critical vulnerability was found. + +Quick Links: + + - `OOM project page `_ + + - `Passing Badge information for OOM `_ + + +**Known Issues** + +End of Release Notes diff --git a/docs/sections/release_notes/release-notes-elalto.rst b/docs/sections/release_notes/release-notes-elalto.rst new file mode 100644 index 0000000000..b4059028e5 --- /dev/null +++ b/docs/sections/release_notes/release-notes-elalto.rst @@ -0,0 +1,84 @@ +.. This work is licensed under a Creative Commons Attribution 4.0 International +.. License. +.. http://creativecommons.org/licenses/by/4.0 +.. Copyright 2017 Bell Canada & Amdocs Intellectual Property. All rights +.. reserved. +.. _release_notes_elalto: + +:orphan: + +ONAP Operations Manager Release Notes +===================================== + +Version 5.0.1 (El Alto Release) +------------------------------- + +:Release Date: 2019-10-10 + +**Previous Release Notes** + +- :ref:`Dublin ` +- :ref:`Casablanca ` +- :ref:`Beijing ` +- :ref:`Amsterdam ` + + +Summary +------- + +The focus of this release was on maintenance and as such no new features were +delivered. +A list of issues resolved in this release can be found here: https://jira.onap.org/projects/OOM/versions/10726 + +**New Features** + +**Bug Fixes** + +* 25 defects addressed (see link above) + +**Known Issues** + +The following known issues will be addressed in a future release: + +* [`OOM-1480 `_] - postgres chart does not set root password when installing on an existing database instances +* [`OOM-1966 `_] - ONAP on HA Kubernetes Cluster - Documentation update +* [`OOM-1995 `_] - Mariadb Galera cluster pods keep failing +* [`OOM-2061 `_] - Details Missing for installing the kubectl section +* [`OOM-2075 `_] - Invalid MTU for Canal CNI interfaces +* [`OOM-2080 `_] - Need for "ReadWriteMany" access on storage when deploying on Kubernetes? +* [`OOM-2091 `_] - incorrect release deployed +* [`OOM-2132 `_] - Common Galera server.cnf does not contain Camunda required settings + +**Security Notes** + +*Fixed Security Issues* + +*Known Security Issues* + +* In default deployment OOM (consul-server-ui) exposes HTTP port 30270 outside of cluster. [`OJSI-134 `_] +* Hard coded password used for all oom deployments [`OJSI-188 `_] +* CVE-2019-12127 - OOM exposes unprotected API/UI on port 30270 [`OJSI-202 `_] + +*Known Vulnerabilities in Used Modules* + +OOM code has been formally scanned during build time using NexusIQ and no +Critical vulnerability was found. + +Quick Links: + + - `OOM project page `_ + + - `Passing Badge information for OOM `_ + + +Version 5.0.0 (El Alto Early Drop) +---------------------------------- + +:Release Date: 2019-08-19 + +Summary +------- + +**Software Requirements** + +* Upgraded to Kubernetes 1.15.x and Helm 1.14.x diff --git a/docs/sections/release_notes/release-notes-frankfurt.rst b/docs/sections/release_notes/release-notes-frankfurt.rst new file mode 100644 index 0000000000..c0374a6dd8 --- /dev/null +++ b/docs/sections/release_notes/release-notes-frankfurt.rst @@ -0,0 +1,152 @@ +.. This work is licensed under a Creative Commons Attribution 4.0 + International License. +.. http://creativecommons.org/licenses/by/4.0 +.. (c) ONAP Project and its contributors +.. _release_notes_frankfurt: + +:orphan: + +************************************* +ONAP Operations Manager Release Notes +************************************* + +Previous Release Notes +====================== + +- :ref:`El Alto ` +- :ref:`Dublin ` +- :ref:`Casablanca ` +- :ref:`Beijing ` +- :ref:`Amsterdam ` + +Abstract +======== + +This document provides the release notes for the Frankfurt release. + +Summary +======= + +The focus of this release is to strengthen the foundation of OOM installer. + +Release Data +============ + ++--------------------------------------+--------------------------------------+ +| **Project** | OOM | +| | | ++--------------------------------------+--------------------------------------+ +| **Docker images** | N/A | +| | | ++--------------------------------------+--------------------------------------+ +| **Release designation** | Frankfurt | +| | | ++--------------------------------------+--------------------------------------+ +| **Release date** | 2020/06/15 | +| | | ++--------------------------------------+--------------------------------------+ + +New features +------------ + +* Ingress deployment is getting more and more usable +* Use of dynamic Persistent Volume is available + +**Bug fixes** + +A list of issues resolved in this release can be found here: +https://jira.onap.org/projects/OOM/versions/10826 + +**Known Issues** + +- `OOM-1237 `_ Source Helm Charts from + ONAP Repo. Having helm charts repo is not possible for Frankfurt release. +- `OOM-1720 `_ galera container is + outdated. containers used for mariadb are outdated and not supported anymore. +- `OOM-1817 `_ Use of global.repository + inconsistent across Helm Charts. it's then may be hard to retrieve some + containers when deploying in constrained environment. +- `OOM-2075 `_ Invalid MTU for Canal CNI + interfaces +- `OOM-2227 `_ Cassandra Backup Mechanism + works only on "static PV" mode. +- `OOM-2230 `_ Missing requests/limits + for some PODS. This can lead to "memory bombing" so cautious monitoring of + Kubernetes resources usage must be set up. +- `OOM-2279 `_ OOM El Alto and master + clamp mariadb resources doesn't match chart. +- `OOM-2285 `_ deploy.sh does not work + for mariadb-galera. deploy script doesn't behave well with "-" in the + component name. +- `OOM-2369 `_ DMAAP Helm install takes + too long and often fails. +- `OOM-2418 `_ Readiness-check 2.0.2 not + working properly for stateful set. +- `OOM-2421 `_ OOM NBI chart deployment + error. In some case, NBI deployment fails. +- `OOM-2422 `_ Portal App is unreachable + when deploying without HTTPs + + +Deliverables +------------ + +Software Deliverables +~~~~~~~~~~~~~~~~~~~~~ + + +Documentation Deliverables +~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Known Limitations, Issues and Workarounds +========================================= + +Known Vulnerabilities +--------------------- + +Workarounds +----------- + +- `OOM-1237 `_ Workaround is to generate + them as explained in documentation. +- `OOM-1817 `_ Workaround is to use + offline installer if needed. +- `OOM-2227 `_ Workaround is to stick to + "static PV" (so, not using storage class) if backup is needed. +- `OOM-2285 `_ Workaround is to use + directly helm upgrade if needed. +- `OOM-2369 `_ Workaround is to play + postinstall jobs by hand. +- `OOM-2418 `_ Workaround is to use + version 2.2.2 in global part of override file if the new check is needed. +- `OOM-2421 `_ Workaround is to + undeploy/redeploy NBI. +- `OOM-2422 `_ Workaround is to create + first portal app service with service type Cluster IP then changing it to + NodePort or LoadBalancer so all the port are available. + +Security Notes +-------------- + +**Fixed Security Issues** + +- In default deployment OOM (consul-server-ui) exposes HTTP port 30270 outside + of cluster. [`OJSI-134 `_] +- CVE-2019-12127 - OOM exposes unprotected API/UI on port 30270 + [`OJSI-202 `_] + +References +========== + +For more information on the ONAP Frankfurt release, please see: + +#. `ONAP Home Page`_ +#. `ONAP Documentation`_ +#. `ONAP Release Downloads`_ +#. `ONAP Wiki Page`_ + + +.. _`ONAP Home Page`: https://www.onap.org +.. _`ONAP Wiki Page`: https://wiki.onap.org +.. _`ONAP Documentation`: https://docs.onap.org +.. _`ONAP Release Downloads`: https://git.onap.org diff --git a/docs/sections/release_notes/release-notes-guilin.rst b/docs/sections/release_notes/release-notes-guilin.rst new file mode 100644 index 0000000000..e8fda544c2 --- /dev/null +++ b/docs/sections/release_notes/release-notes-guilin.rst @@ -0,0 +1,142 @@ +.. This work is licensed under a Creative Commons Attribution 4.0 + International License. +.. http://creativecommons.org/licenses/by/4.0 +.. (c) ONAP Project and its contributors +.. _release_notes_guilin: + +:orphan: + +************************************* +ONAP Operations Manager Release Notes +************************************* + +Previous Release Notes +====================== + +- :ref:`Frankfurt ` +- :ref:`El Alto ` +- :ref:`Dublin ` +- :ref:`Casablanca ` +- :ref:`Beijing ` +- :ref:`Amsterdam ` + +Abstract +======== + +This document provides the release notes for the Guilin release. + +Summary +======= + +The focus of this release is to strengthen the foundation of OOM installer. + +Release Data +============ + ++--------------------------------------+--------------------------------------+ +| **Project** | OOM | +| | | ++--------------------------------------+--------------------------------------+ +| **Docker images** | N/A | +| | | ++--------------------------------------+--------------------------------------+ +| **Release designation** | Guilin | +| | | ++--------------------------------------+--------------------------------------+ +| **Release date** | 2020/12/03 | +| | | ++--------------------------------------+--------------------------------------+ + +New features +------------ + +* Kubernetes support for version up to 1.19 +* Helm (experimental) support for version up to 3.3 +* Limits are set for most of the components + +**Bug fixes** + +A list of issues resolved in this release can be found here: +https://jira.onap.org/projects/OOM/versions/10826 + +**Known Issues** + +- `OOM-1237 `_ Source Helm Charts from + ONAP Repo. Having helm charts repo is not possible for Frankfurt release. +- `OOM-1720 `_ galera container is + outdated. containers used for mariadb are outdated and not supported anymore. +- `OOM-1817 `_ Use of global.repository + inconsistent across Helm Charts. it's then may be hard to retrieve some + containers when deploying in constrained environment. +- `OOM-2227 `_ Cassandra Backup Mechanism + works only on "static PV" mode. +- `OOM-2285 `_ deploy.sh does not work + for mariadb-galera. deploy script doesn't behave well with "-" in the + component name. +- `OOM-2421 `_ OOM nbi chart deployment + error +- `OOM-2534 `_ Cert-Service leverages + runtime external dependency +- `OOM-2554 `_ Common pods have java 8 +- `OOM-2588 `_ Various subcharts not + installing due to helm size issues +- `OOM-2629 `_ NetBox demo entry setup + not complete + + +Deliverables +------------ + +Software Deliverables +~~~~~~~~~~~~~~~~~~~~~ + +OOM provides `Helm charts `_ that needs to be +"compiled". + +Documentation Deliverables +~~~~~~~~~~~~~~~~~~~~~~~~~~ + + +Known Limitations, Issues and Workarounds +========================================= + +Known Vulnerabilities +--------------------- + +- Hard coded password used for all OOM deployments + [`OJSI-188 `_] + +Workarounds +----------- + +- `OOM-1237 `_ Workaround is to generate + them as explained in documentation. +- `OOM-1817 `_ Workaround is to use + offline installer if needed. +- `OOM-2227 `_ Workaround is to stick to + "static PV" (so, not using storage class) if backup is needed. +- `OOM-2285 `_ Workaround is to use + directly helm upgrade if needed. +- `OOM-2534 `_ Workaround is to download + in advance docker.io/openjdk:11-jre-slim where you will generate the charts + +Security Notes +-------------- + +**Fixed Security Issues** + +References +========== + +For more information on the ONAP Frankfurt release, please see: + +#. `ONAP Home Page`_ +#. `ONAP Documentation`_ +#. `ONAP Release Downloads`_ +#. `ONAP Wiki Page`_ + + +.. _`ONAP Home Page`: https://www.onap.org +.. _`ONAP Wiki Page`: https://wiki.onap.org +.. _`ONAP Documentation`: https://docs.onap.org +.. _`ONAP Release Downloads`: https://git.onap.org diff --git a/docs/sections/release_notes/release-notes-honolulu.rst b/docs/sections/release_notes/release-notes-honolulu.rst new file mode 100644 index 0000000000..3a7891a1ba --- /dev/null +++ b/docs/sections/release_notes/release-notes-honolulu.rst @@ -0,0 +1,155 @@ +.. This work is licensed under a Creative Commons Attribution 4.0 + International License. +.. http://creativecommons.org/licenses/by/4.0 +.. (c) ONAP Project and its contributors +.. _release_notes_honolulu: + +:orphan: + +************************************* +ONAP Operations Manager Release Notes +************************************* + +Previous Release Notes +====================== + +- :ref:`Guilin ` +- :ref:`Frankfurt ` +- :ref:`El Alto ` +- :ref:`Dublin ` +- :ref:`Casablanca ` +- :ref:`Beijing ` +- :ref:`Amsterdam ` + +Abstract +======== + +This document provides the release notes for the Honolulu release. + +Summary +======= + +The focus of this release is to strengthen the foundation of OOM installer. + +Release Data +============ + ++--------------------------------------+--------------------------------------+ +| **Project** | OOM | +| | | ++--------------------------------------+--------------------------------------+ +| **Docker images** | N/A | +| | | ++--------------------------------------+--------------------------------------+ +| **Release designation** | Honolulu | +| | | ++--------------------------------------+--------------------------------------+ +| **Release date** | 2021/04/29 | +| | | ++--------------------------------------+--------------------------------------+ + +New features +------------ + +* Kubernetes support for version up to 1.20 +* Helm support for version up to 3.5 +* Limits are set for most of the components +* Portal-Cassandra image updated to Bitnami, supporting IPv4/IPv6 Dual Stack +* CMPv2 external issuer implemented which extends Cert-Manager with ability to + enroll X.509 certificates from CMPv2 servers +* New version for mariadb galera using Bitnami image, supporting IPv4/IPv6 Dual + Stack +* Bump version of common PostgreSQL and ElasticSearch +* Move to automatic certificates retrieval for 80% of the components +* Consistent retrieval of docker images, with ability to configure proxy for + the 4 repositories used by ONAP + +**Bug fixes** + +A list of issues resolved in this release can be found here: +https://jira.onap.org/projects/OOM/versions/11073 + +major issues solved: + +* Better handling of persistence on PostgreSQL +* Better Ingress templating +* Better Service templating + +**Known Issues** + +- `OOM-2554 `_ Common pods have java 8 +- `OOM-2435 `_ SDNC karaf shell: + log:list: Error executing command: Unrecognized configuration +- `OOM-2629 `_ NetBox demo entry setup + not complete +- `OOM-2706 `_ CDS Blueprint Processor + does not work with local DB +- `OOM-2713 `_ Problem on onboarding + custom cert to SDNC ONAP during deployment +- `OOM-2698 `_ SO helm override fails in + for value with multi-level replacement +- `OOM-2697 `_ SO with local MariaDB + deployment fails +- `OOM-2538 `_ strange error with + CertInitializer template +- `OOM-2547 `_ Health Check failures + seen after bringing down/up control plane & worker node VM instances on which + ONAP hosted +- `OOM-2699 `_ SO so-mariadb + readinessCheck fails for local MariaDB instance +- `OOM-2705 `_ SDNC DB installation fails + on local MariaDB instance +- `OOM-2603 `_ [SDNC] allign password for + scaleoutUser/restconfUser/odlUser + +Deliverables +------------ + +Software Deliverables +~~~~~~~~~~~~~~~~~~~~~ + +OOM provides `Helm charts `_ that needs to be +"compiled". + +Documentation Deliverables +~~~~~~~~~~~~~~~~~~~~~~~~~~ + + +Known Limitations, Issues and Workarounds +========================================= + +Known Vulnerabilities +--------------------- + +- Hard coded password used for all OOM deployments + [`OJSI-188 `_] +- `Hard coded certificates <../oom_hardcoded_certificates>` in Helm packages + +Workarounds +----------- + +- ``_ + Workaround is to generate a password with "short" strength or pregenerate + passwords without single quote in it. Default deployment is using "short" + password generation for mariadb. + +Security Notes +-------------- + +**Fixed Security Issues** + +References +========== + +For more information on the ONAP Frankfurt release, please see: + +#. `ONAP Home Page`_ +#. `ONAP Documentation`_ +#. `ONAP Release Downloads`_ +#. `ONAP Wiki Page`_ + + +.. _`ONAP Home Page`: https://www.onap.org +.. _`ONAP Wiki Page`: https://wiki.onap.org +.. _`ONAP Documentation`: https://docs.onap.org +.. _`ONAP Release Downloads`: https://git.onap.org diff --git a/docs/sections/release_notes/release-notes-isntanbul.rst b/docs/sections/release_notes/release-notes-isntanbul.rst new file mode 100644 index 0000000000..8cfbc16210 --- /dev/null +++ b/docs/sections/release_notes/release-notes-isntanbul.rst @@ -0,0 +1,124 @@ +.. This work is licensed under a Creative Commons Attribution 4.0 + International License. +.. http://creativecommons.org/licenses/by/4.0 +.. (c) ONAP Project and its contributors +.. _release_notes_istanbul: + +:orphan: + +************************************* +ONAP Operations Manager Release Notes +************************************* + +Previous Release Notes +====================== + +- :ref:`Honolulu ` +- :ref:`Guilin ` +- :ref:`Frankfurt ` +- :ref:`El Alto ` +- :ref:`Dublin ` +- :ref:`Casablanca ` +- :ref:`Beijing ` +- :ref:`Amsterdam ` + +Abstract +======== + +This document provides the release notes for the Istanbul release. + +Summary +======= + + + +Release Data +============ + ++--------------------------------------+--------------------------------------+ +| **Project** | OOM | +| | | ++--------------------------------------+--------------------------------------+ +| **Docker images** | N/A | +| | | ++--------------------------------------+--------------------------------------+ +| **Release designation** | Istanbul | +| | | ++--------------------------------------+--------------------------------------+ +| **Release date** | | +| | | ++--------------------------------------+--------------------------------------+ + +New features +------------ + + +**Bug fixes** + +A list of issues resolved in this release can be found here: +https://jira.onap.org/projects/OOM/versions/11074 + + +**Known Issues** + + +Deliverables +------------ + +Software Deliverables +~~~~~~~~~~~~~~~~~~~~~ + +OOM provides `Helm charts `_ + + +Documentation Deliverables +~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Known Limitations, Issues and Workarounds +========================================= + +Known Vulnerabilities +--------------------- + + +Workarounds +----------- + +- `OOM-2754 `_ + Because of *updateEndpoint* property added to *cmpv2issuer* CRD + it is impossible to upgrade platform component from Honolulu to Istanbul + release without manual steps. Actions that should be performed: + + #. Update the CRD definition:: + + > kubectl -n onap apply -f oom/kubernetes/platform/components/cmpv2-cert-provider/crds/cmpv2issuer.yaml + #. Upgrade the component:: + + > helm -n onap upgrade dev-platform oom/kubernetes/platform + #. Make sure that *cmpv2issuer* contains correct value for + *spec.updateEndpoint*. The value should be: *v1/certificate-update*. + If it's not, edit the resource:: + + > kubectl -n onap edit cmpv2issuer cmpv2-issuer-onap + + +Security Notes +-------------- + +**Fixed Security Issues** + +References +========== + +For more information on the ONAP Istanbul release, please see: + +#. `ONAP Home Page`_ +#. `ONAP Documentation`_ +#. `ONAP Release Downloads`_ +#. `ONAP Wiki Page`_ + + +.. _`ONAP Home Page`: https://www.onap.org +.. _`ONAP Wiki Page`: https://wiki.onap.org +.. _`ONAP Documentation`: https://docs.onap.org +.. _`ONAP Release Downloads`: https://git.onap.org diff --git a/docs/sections/release_notes/release-notes-jakarta.rst b/docs/sections/release_notes/release-notes-jakarta.rst new file mode 100644 index 0000000000..97621fa657 --- /dev/null +++ b/docs/sections/release_notes/release-notes-jakarta.rst @@ -0,0 +1,124 @@ +.. This work is licensed under a Creative Commons Attribution 4.0 + International License. +.. http://creativecommons.org/licenses/by/4.0 +.. (c) ONAP Project and its contributors +.. _release_notes_jakarta: + +:orphan: + +************************************* +ONAP Operations Manager Release Notes +************************************* + +Previous Release Notes +====================== + +- :ref:`Istanbul ` +- :ref:`Honolulu ` +- :ref:`Guilin ` +- :ref:`Frankfurt ` +- :ref:`El Alto ` +- :ref:`Dublin ` +- :ref:`Casablanca ` +- :ref:`Beijing ` +- :ref:`Amsterdam ` + +Abstract +======== + +This document provides the release notes for the Jakarta release. + +Summary +======= + + + +Release Data +============ + ++--------------------------------------+--------------------------------------+ +| **Project** | OOM | +| | | ++--------------------------------------+--------------------------------------+ +| **Docker images** | N/A | +| | | ++--------------------------------------+--------------------------------------+ +| **Release designation** | Jakarta | +| | | ++--------------------------------------+--------------------------------------+ +| **Release date** | | +| | | ++--------------------------------------+--------------------------------------+ + +New features +------------ + + +**Bug fixes** + +A list of issues resolved in this release can be found here: +https://jira.onap.org/projects/OOM/versions/11498 + + +**Known Issues** + + +Deliverables +------------ + +Software Deliverables +~~~~~~~~~~~~~~~~~~~~~ + +OOM provides `Helm charts `_ + +Documentation Deliverables +~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Known Limitations, Issues and Workarounds +========================================= + +Known Vulnerabilities +--------------------- + + +Workarounds +----------- + +- `OOM-2754 `_ + Because of *updateEndpoint* property added to *cmpv2issuer* CRD + it is impossible to upgrade platform component from Istanbul to Jakarta + release without manual steps. Actions that should be performed: + + #. Update the CRD definition:: + + > kubectl -n onap apply -f oom/kubernetes/platform/components/cmpv2-cert-provider/crds/cmpv2issuer.yaml + #. Upgrade the component:: + + > helm -n onap upgrade dev-platform oom/kubernetes/platform + #. Make sure that *cmpv2issuer* contains correct value for + *spec.updateEndpoint*. The value should be: *v1/certificate-update*. + If it's not, edit the resource:: + + > kubectl -n onap edit cmpv2issuer cmpv2-issuer-onap + + +Security Notes +-------------- + +**Fixed Security Issues** + +References +========== + +For more information on the ONAP Istanbul release, please see: + +#. `ONAP Home Page`_ +#. `ONAP Documentation`_ +#. `ONAP Release Downloads`_ +#. `ONAP Wiki Page`_ + + +.. _`ONAP Home Page`: https://www.onap.org +.. _`ONAP Wiki Page`: https://wiki.onap.org +.. _`ONAP Documentation`: https://docs.onap.org +.. _`ONAP Release Downloads`: https://git.onap.org diff --git a/docs/sections/release_notes/release-notes.rst b/docs/sections/release_notes/release-notes.rst new file mode 100644 index 0000000000..c26593675d --- /dev/null +++ b/docs/sections/release_notes/release-notes.rst @@ -0,0 +1,130 @@ +.. This work is licensed under a Creative Commons Attribution 4.0 + International License. +.. http://creativecommons.org/licenses/by/4.0 +.. (c) ONAP Project and its contributors +.. _release_notes: + +************************************* +ONAP Operations Manager Release Notes +************************************* + +Previous Release Notes +====================== + +- :ref:`Jakarta ` +- :ref:`Istanbul ` +- :ref:`Honolulu ` +- :ref:`Guilin ` +- :ref:`Frankfurt ` +- :ref:`El Alto ` +- :ref:`Dublin ` +- :ref:`Casablanca ` +- :ref:`Beijing ` +- :ref:`Amsterdam ` + +Abstract +======== + +This document provides the release notes for the Jakarta release. + +Summary +======= + + + +Release Data +============ + ++--------------------------------------+--------------------------------------+ +| **Project** | OOM | +| | | ++--------------------------------------+--------------------------------------+ +| **Docker images** | N/A | +| | | ++--------------------------------------+--------------------------------------+ +| **Release designation** | Jakarta | +| | | ++--------------------------------------+--------------------------------------+ +| **Release date** | | +| | | ++--------------------------------------+--------------------------------------+ + +New features +------------ + + +**Bug fixes** + +A list of issues resolved in this release can be found here: +https://jira.onap.org/projects/OOM/versions/11498 + + +**Known Issues** + + +Deliverables +------------ + +Software Deliverables +~~~~~~~~~~~~~~~~~~~~~ + +OOM provides `Helm charts `_ + +Documentation Deliverables +~~~~~~~~~~~~~~~~~~~~~~~~~~ + +- :ref:`Project Description ` - a guide for developers of OOM +- :ref:`oom_dev_guide` - a guide for developers of OOM +- :ref:`oom_infra_setup_guide` - a guide for those setting up the environments that OOM will use +- :ref:`oom_deploy_guide` - a guide for those deploying OOM on an existing cloud +- :ref:`oom_user_guide` - a guide for operators of an OOM instance +- :ref:`oom_access_info_guide` - a guide for operators who require access to OOM applications + +Known Limitations, Issues and Workarounds +========================================= + +Known Vulnerabilities +--------------------- + + +Workarounds +----------- + +- `OOM-2754 `_ + Because of *updateEndpoint* property added to *cmpv2issuer* CRD + it is impossible to upgrade platform component from Istanbul to Jakarta + release without manual steps. Actions that should be performed: + + #. Update the CRD definition:: + + > kubectl -n onap apply -f oom/kubernetes/platform/components/cmpv2-cert-provider/crds/cmpv2issuer.yaml + #. Upgrade the component:: + + > helm -n onap upgrade dev-platform oom/kubernetes/platform + #. Make sure that *cmpv2issuer* contains correct value for + *spec.updateEndpoint*. The value should be: *v1/certificate-update*. + If it's not, edit the resource:: + + > kubectl -n onap edit cmpv2issuer cmpv2-issuer-onap + + +Security Notes +-------------- + +**Fixed Security Issues** + +References +========== + +For more information on the ONAP Istanbul release, please see: + +#. `ONAP Home Page`_ +#. `ONAP Documentation`_ +#. `ONAP Release Downloads`_ +#. `ONAP Wiki Page`_ + + +.. _`ONAP Home Page`: https://www.onap.org +.. _`ONAP Wiki Page`: https://wiki.onap.org +.. _`ONAP Documentation`: https://docs.onap.org +.. _`ONAP Release Downloads`: https://git.onap.org diff --git a/docs/sections/resources/helm/helm-search.txt b/docs/sections/resources/helm/helm-search.txt new file mode 100644 index 0000000000..75c274957b --- /dev/null +++ b/docs/sections/resources/helm/helm-search.txt @@ -0,0 +1,42 @@ +NAME CHART VERSION APP VERSION DESCRIPTION +local/onap 11.0.0 Kohn Open Network Automation Platform (ONAP) +local/aaf 11.0.0 ONAP Application Authorization Framework +local/aai 11.0.0 ONAP Active and Available Inventory +local/appc 11.0.0 Application Controller +local/cassandra 11.0.0 ONAP cassandra +local/cds 11.0.0 ONAP Controller Design Studio (CDS) +local/clamp 11.0.0 ONAP Clamp +local/cli 11.0.0 ONAP Command Line Interface +local/common 11.0.0 Common templates for inclusion in other charts +local/consul 11.0.0 ONAP Consul Agent +local/contrib 11.0.0 ONAP optional tools +local/cps 11.0.0 ONAP Configuration Persistene Service (CPS) +local/dcaegen2 11.0.0 ONAP DCAE Gen2 +local/dgbuilder 11.0.0 D.G. Builder application +local/dmaap 11.0.0 ONAP DMaaP components +local/log 11.0.0 ONAP Logging ElasticStack +local/mariadb-galera 11.0.0 Chart for MariaDB Galera cluster +local/mongo 11.0.0 MongoDB Server +local/msb 11.0.0 ONAP MicroServices Bus +local/multicloud 11.0.0 ONAP multicloud broker +local/music 11.0.0 MUSIC - Multi-site State Coordination Service +local/mysql 11.0.0 MySQL Server +local/nbi 11.0.0 ONAP Northbound Interface +local/network-name-gen 11.0.0 Name Generation Micro Service +local/nfs-provisioner 11.0.0 NFS provisioner +local/oof 11.0.0 ONAP Optimization Framework +local/policy 11.0.0 ONAP Policy Administration Point +local/pomba 11.0.0 ONAP Post Orchestration Model Based Audit +local/portal 11.0.0 ONAP Web Portal +local/postgres 11.0.0 ONAP Postgres Server +local/robot 11.0.0 A helm Chart for kubernetes-ONAP Robot +local/sdc 11.0.0 Service Design and Creation Umbrella Helm charts +local/sdnc 11.0.0 SDN Controller +local/sdnc-prom 11.0.0 ONAP SDNC Policy Driven Ownership Management +local/sniro-emulator 11.0.0 ONAP Mock Sniro Emulator +local/so 11.0.0 ONAP Service Orchestrator +local/strimzi 11.0.0 ONAP Strimzi Apache Kafka +local/uui 11.0.0 ONAP uui +local/vfc 11.0.0 ONAP Virtual Function Controller (VF-C) +local/vid 11.0.0 ONAP Virtual Infrastructure Deployment +local/vnfsdk 11.0.0 ONAP VNF SDK diff --git a/docs/sections/resources/images/consul/consulHealth.png b/docs/sections/resources/images/consul/consulHealth.png new file mode 100644 index 0000000000..cd7e730c39 Binary files /dev/null and b/docs/sections/resources/images/consul/consulHealth.png differ diff --git a/docs/sections/resources/images/k8s/kubernetes_objects.png b/docs/sections/resources/images/k8s/kubernetes_objects.png new file mode 100644 index 0000000000..768a3adb99 Binary files /dev/null and b/docs/sections/resources/images/k8s/kubernetes_objects.png differ diff --git a/docs/sections/resources/images/msb/MSB-OOM-Diagram.png b/docs/sections/resources/images/msb/MSB-OOM-Diagram.png new file mode 100644 index 0000000000..4ee878d833 Binary files /dev/null and b/docs/sections/resources/images/msb/MSB-OOM-Diagram.png differ diff --git a/docs/sections/resources/images/oom_logo/oomLogoV2-Configure.png b/docs/sections/resources/images/oom_logo/oomLogoV2-Configure.png new file mode 100644 index 0000000000..bdb1ece10c Binary files /dev/null and b/docs/sections/resources/images/oom_logo/oomLogoV2-Configure.png differ diff --git a/docs/sections/resources/images/oom_logo/oomLogoV2-Delete.png b/docs/sections/resources/images/oom_logo/oomLogoV2-Delete.png new file mode 100644 index 0000000000..10c43d2fb3 Binary files /dev/null and b/docs/sections/resources/images/oom_logo/oomLogoV2-Delete.png differ diff --git a/docs/sections/resources/images/oom_logo/oomLogoV2-Deploy.png b/docs/sections/resources/images/oom_logo/oomLogoV2-Deploy.png new file mode 100644 index 0000000000..706097cd6c Binary files /dev/null and b/docs/sections/resources/images/oom_logo/oomLogoV2-Deploy.png differ diff --git a/docs/sections/resources/images/oom_logo/oomLogoV2-Heal.png b/docs/sections/resources/images/oom_logo/oomLogoV2-Heal.png new file mode 100644 index 0000000000..97ac58e9ec Binary files /dev/null and b/docs/sections/resources/images/oom_logo/oomLogoV2-Heal.png differ diff --git a/docs/sections/resources/images/oom_logo/oomLogoV2-Monitor.png b/docs/sections/resources/images/oom_logo/oomLogoV2-Monitor.png new file mode 100644 index 0000000000..c9a184ac37 Binary files /dev/null and b/docs/sections/resources/images/oom_logo/oomLogoV2-Monitor.png differ diff --git a/docs/sections/resources/images/oom_logo/oomLogoV2-Scale.png b/docs/sections/resources/images/oom_logo/oomLogoV2-Scale.png new file mode 100644 index 0000000000..140e5ca54f Binary files /dev/null and b/docs/sections/resources/images/oom_logo/oomLogoV2-Scale.png differ diff --git a/docs/sections/resources/images/oom_logo/oomLogoV2-Upgrade.png b/docs/sections/resources/images/oom_logo/oomLogoV2-Upgrade.png new file mode 100644 index 0000000000..d51f6cfcde Binary files /dev/null and b/docs/sections/resources/images/oom_logo/oomLogoV2-Upgrade.png differ diff --git a/docs/sections/resources/images/oom_logo/oomLogoV2-medium.png b/docs/sections/resources/images/oom_logo/oomLogoV2-medium.png new file mode 100644 index 0000000000..20aea693e0 Binary files /dev/null and b/docs/sections/resources/images/oom_logo/oomLogoV2-medium.png differ diff --git a/docs/sections/resources/yaml/environments_onap_demo.yaml b/docs/sections/resources/yaml/environments_onap_demo.yaml new file mode 100644 index 0000000000..8b697cbda2 --- /dev/null +++ b/docs/sections/resources/yaml/environments_onap_demo.yaml @@ -0,0 +1,103 @@ +################################################################# +# Global configuration overrides. +# +# These overrides will affect all helm charts (ie. applications) +# that are listed below and are 'enabled'. +################################################################# +global: + # Change to an unused port prefix range to prevent port conflicts + # with other instances running within the same k8s cluster + nodePortPrefix: 302 + + # image repositories + repository: nexus3.onap.org:10001 + repositorySecret: eyJuZXh1czMub25hcC5vcmc6MTAwMDEiOnsidXNlcm5hbWUiOiJkb2NrZXIiLCJwYXNzd29yZCI6ImRvY2tlciIsImVtYWlsIjoiQCIsImF1dGgiOiJaRzlqYTJWeU9tUnZZMnRsY2c9PSJ9fQ== + # readiness check + readinessImage: onap/oom/readiness:3.0.1 + # logging agent + loggingRepository: docker.elastic.co + + # image pull policy + pullPolicy: IfNotPresent + + # override default mount path root directory + # referenced by persistent volumes and log files + persistence: + mountPath: /dockerdata + + # flag to enable debugging - application support required + debugEnabled: true + +################################################################# +# Enable/disable and configure helm charts (ie. applications) +# to customize the ONAP deployment. +################################################################# +aaf: + enabled: false +aai: + enabled: false +appc: + enabled: false +clamp: + enabled: true +cli: + enabled: false +consul: # Consul Health Check Monitoring + enabled: false +cps: + enabled: false +dcaegen2: + enabled: false +log: + enabled: false +message-router: + enabled: false +mock: + enabled: false +msb: + enabled: false +multicloud: + enabled: false +policy: + enabled: false +portal: + enabled: false +robot: # Robot Health Check + enabled: true +sdc: + enabled: false +sdnc: + enabled: false +so: # Service Orchestrator + enabled: true + + replicaCount: 1 + + liveness: + # necessary to disable liveness probe when setting breakpoints + # in debugger so K8s doesn't restart unresponsive container + enabled: true + + # so server configuration + config: + # message router configuration + dmaapTopic: "AUTO" + # openstack configuration + openStackUserName: "vnf_user" + openStackRegion: "RegionOne" + openStackKeyStoneUrl: "http://1.2.3.4:5000" + openStackServiceTenantName: "service" + openStackEncryptedPasswordHere: "c124921a3a0efbe579782cde8227681e" + + # configure embedded mariadb + mariadb: + config: + mariadbRootPassword: password +uui: + enabled: false +vfc: + enabled: false +vid: + enabled: false +vnfsdk: + enabled: false diff --git a/docs/shell/master_nfs_node.sh b/docs/shell/master_nfs_node.sh deleted file mode 100644 index 32574c9f29..0000000000 --- a/docs/shell/master_nfs_node.sh +++ /dev/null @@ -1,32 +0,0 @@ -#!/bin/sh - -usage () { - echo "Usage:" - echo " ./$(basename $0) node1_ip node2_ip ... nodeN_ip" - exit 1 -} - -if [ "$#" -lt 1 ]; then - echo "Missing NFS slave nodes" - usage -fi - -#Install NFS kernel -sudo apt-get update -sudo apt-get install -y nfs-kernel-server - -#Create /dockerdata-nfs and set permissions -sudo mkdir -p /dockerdata-nfs -sudo chmod 777 -R /dockerdata-nfs -sudo chown nobody:nogroup /dockerdata-nfs/ - -#Update the /etc/exports -NFS_EXP="" -for i in $@; do - NFS_EXP="${NFS_EXP}$i(rw,sync,no_root_squash,no_subtree_check) " -done -echo "/dockerdata-nfs "$NFS_EXP | sudo tee -a /etc/exports - -#Restart the NFS service -sudo exportfs -a -sudo systemctl restart nfs-kernel-server diff --git a/docs/shell/openstack-k8s-controlnode.sh b/docs/shell/openstack-k8s-controlnode.sh deleted file mode 100644 index d1515a7e5f..0000000000 --- a/docs/shell/openstack-k8s-controlnode.sh +++ /dev/null @@ -1,36 +0,0 @@ -#!/bin/sh - -DOCKER_VERSION=18.09.5 - -apt-get update - -curl https://releases.rancher.com/install-docker/$DOCKER_VERSION.sh | sh -mkdir -p /etc/systemd/system/docker.service.d/ -cat > /etc/systemd/system/docker.service.d/docker.conf << EOF -[Service] -ExecStart= -ExecStart=/usr/bin/dockerd -H fd:// --insecure-registry=nexus3.onap.org:10001 -EOF - -sudo usermod -aG docker ubuntu - -systemctl daemon-reload -systemctl restart docker -apt-mark hold docker-ce - -IP_ADDR=$(ip address |grep ens|grep inet|awk '{print $2}'| awk -F / '{print $1}') -HOST_NAME=$(hostname) - -echo "$IP_ADDR $HOST_NAME" >> /etc/hosts - -docker login -u docker -p docker nexus3.onap.org:10001 - -sudo apt-get install make -y - -#nfs server -sudo apt-get install nfs-kernel-server -y -sudo mkdir -p /dockerdata-nfs -sudo chown nobody:nogroup /dockerdata-nfs/ - - -exit 0 diff --git a/docs/shell/openstack-k8s-workernode.sh b/docs/shell/openstack-k8s-workernode.sh deleted file mode 100644 index 8b1b9e41ee..0000000000 --- a/docs/shell/openstack-k8s-workernode.sh +++ /dev/null @@ -1,34 +0,0 @@ -#!/bin/sh - -DOCKER_VERSION=18.09.5 - -apt-get update - -curl https://releases.rancher.com/install-docker/$DOCKER_VERSION.sh | sh -mkdir -p /etc/systemd/system/docker.service.d/ -cat > /etc/systemd/system/docker.service.d/docker.conf << EOF -[Service] -ExecStart= -ExecStart=/usr/bin/dockerd -H fd:// --insecure-registry=nexus3.onap.org:10001 -EOF - -sudo usermod -aG docker ubuntu - -systemctl daemon-reload -systemctl restart docker -apt-mark hold docker-ce - -IP_ADDR=$(ip address |grep ens|grep inet|awk '{print $2}'| awk -F / '{print $1}') -HOST_NAME=$(hostname) - -echo "$IP_ADDR $HOST_NAME" >> /etc/hosts - -docker login -u docker -p docker nexus3.onap.org:10001 - -sudo apt-get install make -y - -# install nfs -sudo apt-get install nfs-common -y - - -exit 0 diff --git a/docs/shell/openstack-nfs-server.sh b/docs/shell/openstack-nfs-server.sh deleted file mode 100644 index 395d04f27c..0000000000 --- a/docs/shell/openstack-nfs-server.sh +++ /dev/null @@ -1,18 +0,0 @@ -#!/bin/sh - -apt-get update - -IP_ADDR=$(ip address |grep ens|grep inet|awk '{print $2}'| awk -F / '{print $1}') -HOST_NAME=$(hostname) - -echo "$IP_ADDR $HOST_NAME" >> /etc/hosts - -sudo apt-get install make -y - -# nfs server -sudo apt-get install nfs-kernel-server -y - -sudo mkdir -p /nfs_share -sudo chown nobody:nogroup /nfs_share/ - -exit 0 diff --git a/docs/shell/slave_nfs_node.sh b/docs/shell/slave_nfs_node.sh deleted file mode 100644 index 1035ff5ad6..0000000000 --- a/docs/shell/slave_nfs_node.sh +++ /dev/null @@ -1,25 +0,0 @@ -#!/bin/sh - -usage () { - echo "Usage:" - echo " ./$(basename $0) nfs_master_ip" - exit 1 -} - -if [ "$#" -ne 1 ]; then - echo "Missing NFS mater node" - usage -fi - -MASTER_IP=$1 - -#Install NFS common -sudo apt-get update -sudo apt-get install -y nfs-common - -#Create NFS directory -sudo mkdir -p /dockerdata-nfs - -#Mount the remote NFS directory to the local one -sudo mount $MASTER_IP:/dockerdata-nfs /dockerdata-nfs/ -echo "$MASTER_IP:/dockerdata-nfs /dockerdata-nfs nfs auto,nofail,noatime,nolock,intr,tcp,actimeo=1800 0 0" | sudo tee -a /etc/fstab diff --git a/docs/tox.ini b/docs/tox.ini new file mode 100644 index 0000000000..8bec1b5ba0 --- /dev/null +++ b/docs/tox.ini @@ -0,0 +1,32 @@ +[tox] +minversion = 1.6 +envlist = docs,docs-linkcheck,docs-spellcheck +skipsdist = true + +[testenv:docs] +basepython = python3.8 +deps = + -r{toxinidir}/requirements-docs.txt + -chttps://raw.githubusercontent.com/openstack/requirements/stable/yoga/upper-constraints.txt + -chttps://git.onap.org/doc/plain/etc/upper-constraints.onap.txt?h=master +commands = + sphinx-build -W -q -b html -n -d {envtmpdir}/doctrees {toxinidir} {toxinidir}/_build/html + +[testenv:docs-linkcheck] +basepython = python3.8 +deps = + -r{toxinidir}/requirements-docs.txt + -chttps://raw.githubusercontent.com/openstack/requirements/stable/yoga/upper-constraints.txt + -chttps://git.onap.org/doc/plain/etc/upper-constraints.onap.txt?h=master +commands = + sphinx-build -W -q -b linkcheck -d {envtmpdir}/doctrees {toxinidir} {toxinidir}/_build/linkcheck + +[testenv:docs-spellcheck] +basepython = python3.8 +deps = + -r{toxinidir}/requirements-docs.txt + -chttps://raw.githubusercontent.com/openstack/requirements/stable/yoga/upper-constraints.txt + -chttps://git.onap.org/doc/plain/etc/upper-constraints.onap.txt?h=master +commands = + sphinx-build -b spelling -d {envtmpdir}/doctrees {toxinidir} {toxinidir}/_build/spellcheck + diff --git a/docs/yaml/cluster.yml b/docs/yaml/cluster.yml deleted file mode 100644 index 0757e15a28..0000000000 --- a/docs/yaml/cluster.yml +++ /dev/null @@ -1,156 +0,0 @@ -# An example of an HA Kubernetes cluster for ONAP -nodes: -- address: 10.12.6.85 - port: "22" - internal_address: 10.0.0.8 - role: - - controlplane - - etcd - hostname_override: "onap-control-1" - user: ubuntu - ssh_key_path: "~/.ssh/onap-key" -- address: 10.12.6.90 - port: "22" - internal_address: 10.0.0.11 - role: - - controlplane - - etcd - hostname_override: "onap-control-2" - user: ubuntu - ssh_key_path: "~/.ssh/onap-key" -- address: 10.12.6.89 - port: "22" - internal_address: 10.0.0.12 - role: - - controlplane - - etcd - hostname_override: "onap-control-3" - user: ubuntu - ssh_key_path: "~/.ssh/onap-key" -- address: 10.12.5.165 - port: "22" - internal_address: 10.0.0.14 - role: - - worker - hostname_override: "onap-k8s-1" - user: ubuntu - ssh_key_path: "~/.ssh/onap-key" -- address: 10.12.6.238 - port: "22" - internal_address: 10.0.0.26 - role: - - worker - hostname_override: "onap-k8s-2" - user: ubuntu - ssh_key_path: "~/.ssh/onap-key" -- address: 10.12.6.126 - port: "22" - internal_address: 10.0.0.5 - role: - - worker - hostname_override: "onap-k8s-3" - user: ubuntu - ssh_key_path: "~/.ssh/onap-key" -- address: 10.12.5.11 - port: "22" - internal_address: 10.0.0.6 - role: - - worker - hostname_override: "onap-k8s-4" - user: ubuntu - ssh_key_path: "~/.ssh/onap-key" -- address: 10.12.6.244 - port: "22" - internal_address: 10.0.0.9 - role: - - worker - hostname_override: "onap-k8s-5" - user: ubuntu - ssh_key_path: "~/.ssh/onap-key" -- address: 10.12.6.249 - port: "22" - internal_address: 10.0.0.17 - role: - - worker - hostname_override: "onap-k8s-6" - user: ubuntu - ssh_key_path: "~/.ssh/onap-key" -- address: 10.12.5.191 - port: "22" - internal_address: 10.0.0.20 - role: - - worker - hostname_override: "onap-k8s-7" - user: ubuntu - ssh_key_path: "~/.ssh/onap-key" -- address: 10.12.6.111 - port: "22" - internal_address: 10.0.0.10 - role: - - worker - hostname_override: "onap-k8s-8" - user: ubuntu - ssh_key_path: "~/.ssh/onap-key" -- address: 10.12.6.195 - port: "22" - internal_address: 10.0.0.4 - role: - - worker - hostname_override: "onap-k8s-9" - user: ubuntu - ssh_key_path: "~/.ssh/onap-key" -- address: 10.12.5.160 - port: "22" - internal_address: 10.0.0.16 - role: - - worker - hostname_override: "onap-k8s-10" - user: ubuntu - ssh_key_path: "~/.ssh/onap-key" -- address: 10.12.6.74 - port: "22" - internal_address: 10.0.0.18 - role: - - worker - hostname_override: "onap-k8s-11" - user: ubuntu - ssh_key_path: "~/.ssh/onap-key" -- address: 10.12.6.82 - port: "22" - internal_address: 10.0.0.7 - role: - - worker - hostname_override: "onap-k8s-12" - user: ubuntu - ssh_key_path: "~/.ssh/onap-key" -services: - kube-api: - service_cluster_ip_range: 10.43.0.0/16 - pod_security_policy: false - always_pull_images: false - kube-controller: - cluster_cidr: 10.42.0.0/16 - service_cluster_ip_range: 10.43.0.0/16 - kubelet: - cluster_domain: cluster.local - cluster_dns_server: 10.43.0.10 - fail_swap_on: false -network: - plugin: canal -authentication: - strategy: x509 -ssh_key_path: "~/.ssh/onap-key" -ssh_agent_auth: false -authorization: - mode: rbac -ignore_docker_version: false -kubernetes_version: "v1.15.11-rancher1-2" -private_registries: -- url: nexus3.onap.org:10001 - user: docker - password: docker - is_default: true -cluster_name: "onap" -restore: - restore: false - snapshot_name: "" diff --git a/docs/yaml/environments_onap_demo.yaml b/docs/yaml/environments_onap_demo.yaml deleted file mode 100644 index 8b697cbda2..0000000000 --- a/docs/yaml/environments_onap_demo.yaml +++ /dev/null @@ -1,103 +0,0 @@ -################################################################# -# Global configuration overrides. -# -# These overrides will affect all helm charts (ie. applications) -# that are listed below and are 'enabled'. -################################################################# -global: - # Change to an unused port prefix range to prevent port conflicts - # with other instances running within the same k8s cluster - nodePortPrefix: 302 - - # image repositories - repository: nexus3.onap.org:10001 - repositorySecret: eyJuZXh1czMub25hcC5vcmc6MTAwMDEiOnsidXNlcm5hbWUiOiJkb2NrZXIiLCJwYXNzd29yZCI6ImRvY2tlciIsImVtYWlsIjoiQCIsImF1dGgiOiJaRzlqYTJWeU9tUnZZMnRsY2c9PSJ9fQ== - # readiness check - readinessImage: onap/oom/readiness:3.0.1 - # logging agent - loggingRepository: docker.elastic.co - - # image pull policy - pullPolicy: IfNotPresent - - # override default mount path root directory - # referenced by persistent volumes and log files - persistence: - mountPath: /dockerdata - - # flag to enable debugging - application support required - debugEnabled: true - -################################################################# -# Enable/disable and configure helm charts (ie. applications) -# to customize the ONAP deployment. -################################################################# -aaf: - enabled: false -aai: - enabled: false -appc: - enabled: false -clamp: - enabled: true -cli: - enabled: false -consul: # Consul Health Check Monitoring - enabled: false -cps: - enabled: false -dcaegen2: - enabled: false -log: - enabled: false -message-router: - enabled: false -mock: - enabled: false -msb: - enabled: false -multicloud: - enabled: false -policy: - enabled: false -portal: - enabled: false -robot: # Robot Health Check - enabled: true -sdc: - enabled: false -sdnc: - enabled: false -so: # Service Orchestrator - enabled: true - - replicaCount: 1 - - liveness: - # necessary to disable liveness probe when setting breakpoints - # in debugger so K8s doesn't restart unresponsive container - enabled: true - - # so server configuration - config: - # message router configuration - dmaapTopic: "AUTO" - # openstack configuration - openStackUserName: "vnf_user" - openStackRegion: "RegionOne" - openStackKeyStoneUrl: "http://1.2.3.4:5000" - openStackServiceTenantName: "service" - openStackEncryptedPasswordHere: "c124921a3a0efbe579782cde8227681e" - - # configure embedded mariadb - mariadb: - config: - mariadbRootPassword: password -uui: - enabled: false -vfc: - enabled: false -vid: - enabled: false -vnfsdk: - enabled: false diff --git a/docs/yaml/example-integration-override-v3.yaml b/docs/yaml/example-integration-override-v3.yaml deleted file mode 100644 index a55b1c08fc..0000000000 --- a/docs/yaml/example-integration-override-v3.yaml +++ /dev/null @@ -1,69 +0,0 @@ -################################################################# -# This override file configures openstack parameters for ONAP -################################################################# -robot: - enabled: true - flavor: large - appcUsername: "appc@appc.onap.org" - appcPassword: "demo123456!" - # KEYSTONE Version 3 Required for Rocky and beyond - openStackKeystoneAPIVersion: "v3" - # OS_AUTH_URL without the /v3 from the openstack .RC file - openStackKeyStoneUrl: "http://10.12.25.2:5000" - # tenantID=`openstack project show $tenantName | grep -w id | awk '{print $4}'` - # where "tenantName" is OS_PROJECT_NAME from openstack .RC file - openStackTenantId: "09d8566ea45e43aa974cf447ed591d77" - # OS_USERNAME from the openstack .RC file - openStackUserName: "OS_USERNAME_HERE" - # OS_PROJECT_DOMAIN_ID from the openstack .RC file - # in some environments it is a string but in other environmens it may be a numeric - openStackDomainId: "default" - # OS_USER_DOMAIN_NAME from the openstack .RC file - openStackUserDomain: "Default" - openStackProjectName: "OPENSTACK_PROJECT_NAME_HERE" - ubuntu14Image: "ubuntu-14-04-cloud-amd64" - ubuntu16Image: "ubuntu-16-04-cloud-amd64" - # From openstack network list output - openStackPublicNetId: "971040b2-7059-49dc-b220-4fab50cb2ad4" - # From openstack network list output - openStackPrivateNetId: "83c84b68-80be-4990-8d7f-0220e3c6e5c8" - # From openstack network list output - openStackPrivateSubnetId: "e571c1d1-8ac0-4744-9b40-c3218d0a53a0" - openStackPrivateNetCidr: "10.0.0.0/16" - openStackOamNetworkCidrPrefix: "10.0" - # From openstack security group list output - openStackSecurityGroup: "bbe028dc-b64f-4f11-a10f-5c6d8d26dc89" - dcaeCollectorIp: "10.12.6.109" - # SSH public key - vnfPubKey: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDKXDgoo3+WOqcUG8/5uUbk81+yczgwC4Y8ywTmuQqbNxlY1oQ0YxdMUqUnhitSXs5S/yRuAVOYHwGg2mCs20oAINrP+mxBI544AMIb9itPjCtgqtE2EWo6MmnFGbHB4Sx3XioE7F4VPsh7japsIwzOjbrQe+Mua1TGQ5d4nfEOQaaglXLLPFfuc7WbhbJbK6Q7rHqZfRcOwAMXgDoBqlyqKeiKwnumddo2RyNT8ljYmvB6buz7KnMinzo7qB0uktVT05FH9Rg0CTWH5norlG5qXgP2aukL0gk1ph8iAt7uYLf1ktp+LJI2gaF6L0/qli9EmVCSLr1uJ38Q8CBflhkh" - demoArtifactsVersion: "1.4.0" - demoArtifactsRepoUrl: "https://nexus.onap.org/content/repositories/releases" - scriptVersion: "1.4.0" - # rancher node IP where RKE configired - rancherIpAddress: "10.12.6.160" - config: - # use the python utility to encrypt the OS_PASSWORD for the OS_USERNAME - openStackEncryptedPasswordHere: "XXXXXXXXXXXXXXXXXXXXXXXX_OPENSTACK_PYTHON_PASSWORD_HERE_XXXXXXXXXXXXXXXX" - openStackSoEncryptedPassword: "YYYYYYYYYYYYYYYYYYYYYYYY_OPENSTACK_JAVA_PASSWORD_HERE_YYYYYYYYYYYYYYYY" -so: - enabled: true - so-catalog-db-adapter: - config: - openStackUserName: "OS_USERNAME_HERE" - # OS_AUTH_URL (keep the /v3) from the openstack .RC file - openStackKeyStoneUrl: "http://10.12.25.2:5000/v3" - # use the SO Java utility to encrypt the OS_PASSWORD for the OS_USERNAME - openStackEncryptedPasswordHere: "YYYYYYYYYYYYYYYYYYYYYYYY_OPENSTACK_JAVA_PASSWORD_HERE_YYYYYYYYYYYYYYYY" -appc: - enabled: true - replicaCount: 3 - config: - enableClustering: true - openStackType: "OpenStackProvider" - openStackName: "OpenStack" - # OS_AUTH_URL from the openstack .RC file - openStackKeyStoneUrl: "http://10.12.25.2:5000/v3" - openStackServiceTenantName: "OPENSTACK_PROJECT_NAME_HERE" - openStackDomain: "OPEN_STACK_DOMAIN_NAME_HERE" - openStackUserName: "OS_USER_NAME_HERE" - openStackEncryptedPassword: "OPENSTACK_CLEAR_TEXT_PASSWORD_HERE" diff --git a/docs/yaml/example-integration-override.yaml b/docs/yaml/example-integration-override.yaml deleted file mode 100644 index 5eeee5e2f5..0000000000 --- a/docs/yaml/example-integration-override.yaml +++ /dev/null @@ -1,56 +0,0 @@ -################################################################# -# This override file configures openstack parameters for ONAP -################################################################# -appc: - config: - enableClustering: false - openStackType: "OpenStackProvider" - openStackName: "OpenStack" - # OS_AUTH_URL from the openstack .RC file - openStackKeyStoneUrl: "http://10.12.25.2:5000/v2.0" - openStackServiceTenantName: "OPENSTACK_TENANTNAME_HERE" - # OS_USER_DOMAIN_NAME from the openstack .RC file - openStackDomain: "Default" - openStackUserName: "OPENSTACK_USERNAME_HERE" - openStackEncryptedPassword: "XXXXXXXXXXXXXXXXXXXXXXXX_OPENSTACK_PASSWORD_HERE_XXXXXXXXXXXXXXXX" -robot: - appcUsername: "appc@appc.onap.org" - appcPassword: "demo123456!" - # OS_AUTH_URL without the /v2.0 from the openstack .RC file - openStackKeyStoneUrl: "http://10.12.25.2:5000" - # From openstack network list output - openStackPublicNetId: "971040b2-7059-49dc-b220-4fab50cb2ad4" - # tenantID=`openstack project show $tenantName | grep -w id | awk '{print $4}'` - # where "tenantName" is OS_PROJECT_NAME from openstack .RC file - openStackTenantId: "09d8566ea45e43aa974cf447ed591d77" - openStackUserName: "OPENSTACK_USERNAME_HERE" - ubuntu14Image: "ubuntu-14-04-cloud-amd64" - ubuntu16Image: "ubuntu-16-04-cloud-amd64" - # From openstack network list output - openStackPrivateNetId: "c7824f00-bef7-4864-81b9-f6c3afabd313" - # From openstack network list output - openStackPrivateSubnetId: "2a0e8888-f93e-4615-8d28-fc3d4d087fc3" - openStackPrivateNetCidr: "10.0.0.0/16" - # From openstack security group list output - openStackSecurityGroup: "3a7a1e7e-6d15-4264-835d-fab1ae81e8b0" - openStackOamNetworkCidrPrefix: "10.0" - # Control node IP - dcaeCollectorIp: "10.12.6.88" - # SSH public key - vnfPubKey: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDKXDgoo3+WOqcUG8/5uUbk81+yczgwC4Y8ywTmuQqbNxlY1oQ0YxdMUqUnhitSXs5S/yRuAVOYHwGg2mCs20oAINrP+mxBI544AMIb9itPjCtgqtE2EWo6MmnFGbHB4Sx3XioE7F4VPsh7japsIwzOjbrQe+Mua1TGQ5d4nfEOQaaglXLLPFfuc7WbhbJbK6Q7rHqZfRcOwAMXgDoBqlyqKeiKwnumddo2RyNT8ljYmvB6buz7KnMinzo7qB0uktVT05FH9Rg0CTWH5norlG5qXgP2aukL0gk1ph8iAt7uYLf1ktp+LJI2gaF6L0/qli9EmVCSLr1uJ38Q8CBflhkh" - demoArtifactsVersion: "1.4.0-SNAPSHOT" - demoArtifactsRepoUrl: "https://nexus.onap.org/content/repositories/releases" - scriptVersion: "1.4.0-SNAPSHOT" - # rancher node IP where RKE configired - rancherIpAddress: "10.12.5.127" - config: - # openStackEncryptedPasswordHere should match the encrypted string used in SO and APPC and overridden per environment - openStackEncryptedPasswordHere: "XXXXXXXXXXXXXXXXXXXXXXXX_OPENSTACK_ENCRYPTED_PASSWORD_HERE_XXXXXXXXXXXXXXXX" -so: - # so server configuration - so-catalog-db-adapter: - config: - openStackUserName: "OPENSTACK_USERNAME_HERE" - # OS_AUTH_URL from the openstack .RC file - openStackKeyStoneUrl: "http://10.12.25.2:5000/v2.0" - openStackEncryptedPasswordHere: "XXXXXXXXXXXXXXXXXXXXXXXX_OPENSTACK_ENCRYPTED_PASSWORD_HERE_XXXXXXXXXXXXXXXX" diff --git a/requirements.txt b/requirements.txt index 4913184e9c..a849267b23 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,4 +1,3 @@ -lfdocs-conf sphinx>=4.2.0 # BSD sphinx-rtd-theme>=1.0.0 # MIT doc8 @@ -10,3 +9,8 @@ PyEnchant pylint autopep8 gitlint-core +sphinxcontrib-blockdiag +sphinxcontrib-seqdiag +sphinxcontrib-swaggerdoc +sphinxcontrib-plantuml +sphinx-toolbox -- cgit 1.2.3-korg