aboutsummaryrefslogtreecommitdiffstats
path: root/docs
diff options
context:
space:
mode:
Diffstat (limited to 'docs')
-rw-r--r--docs/.gitignore3
-rw-r--r--docs/_static/css/ribbon.css63
-rwxr-xr-xdocs/_static/favicon.icobin0 -> 2102 bytes
-rw-r--r--docs/_static/logo_onap_2017.pngbin0 -> 12278 bytes
-rw-r--r--docs/conf.py72
-rw-r--r--docs/docs_5G_Bulk_PM.rst17
-rw-r--r--docs/docs_5G_Configuration_over_NETCONF.rst12
-rw-r--r--docs/docs_5G_NRM_Configuration.rst41
-rw-r--r--docs/docs_5G_PNF_Software_Upgrade.rst77
-rw-r--r--docs/docs_5G_PNF_Software_Upgrade_With_Schema_Update.rst114
-rw-r--r--docs/docs_5G_PNF_Software_Upgrade_ansible_with_EM.rst35
-rw-r--r--docs/docs_5G_PNF_Software_Upgrade_direct_netconf_yang.rst37
-rw-r--r--docs/docs_5G_PNF_Software_Upgrade_netconf_with_EM.rst35
-rw-r--r--docs/docs_5G_oof_pci.rst43
-rw-r--r--docs/docs_5G_oof_son.rst128
-rw-r--r--docs/docs_5g_pnf_pnp.rst72
-rw-r--r--docs/docs_5g_rtpm.rst12
-rw-r--r--docs/docs_BBS.rst302
-rw-r--r--docs/docs_CCVPN.rst554
-rw-r--r--docs/docs_CM_flexible_designer_orchestrator.rst6
-rw-r--r--docs/docs_CM_schedule_optimizer.rst23
-rw-r--r--docs/docs_E2E_network_slicing.rst638
-rw-r--r--docs/docs_NFV_Testing_Automatic_Platform_Requirements_User_Guide.rst130
-rw-r--r--docs/docs_StndDefined_Events_Collection_Mechanism.rst97
-rw-r--r--docs/docs_Testing_5G_PNF_Software_Upgrade_With_Schema_Update.rst189
-rw-r--r--docs/docs_pnf_onboarding_preonboarding.rst29
-rw-r--r--docs/docs_postman.rst190
-rw-r--r--docs/docs_robot.rst2
-rw-r--r--docs/docs_scaleout.rst541
-rw-r--r--docs/docs_usecases.rst52
-rw-r--r--docs/docs_usecases_release.rst34
-rw-r--r--docs/docs_vCPE with Tosca VNF.rst159
-rw-r--r--docs/docs_vCPE.rst107
-rw-r--r--docs/docs_vCPE_tosca_local.rst210
-rw-r--r--docs/docs_vCPE_with_Tosca_VNF.rst190
-rw-r--r--[-rwxr-xr-x]docs/docs_vFWDT.rst673
-rw-r--r--docs/docs_vFW_CNF_CDS.rst1903
-rw-r--r--docs/docs_vfw.rst13
-rw-r--r--docs/docs_vfwHPA.rst231
-rw-r--r--docs/docs_vfw_edgex_k8s.rst19
-rw-r--r--docs/docs_vipsec.rst62
-rw-r--r--docs/docs_vlb.rst39
-rw-r--r--docs/files/CI/ONAP_CI_0.pngbin0 -> 193773 bytes
-rw-r--r--docs/files/CI/ONAP_CI_1.pngbin0 -> 158090 bytes
-rw-r--r--docs/files/CI/ONAP_CI_10.pngbin0 -> 80878 bytes
-rw-r--r--docs/files/CI/ONAP_CI_2.pngbin0 -> 68472 bytes
-rw-r--r--docs/files/CI/ONAP_CI_3.pngbin0 -> 212584 bytes
-rw-r--r--docs/files/CI/ONAP_CI_4.pngbin0 -> 134170 bytes
-rw-r--r--docs/files/CI/ONAP_CI_5.pngbin0 -> 35394 bytes
-rw-r--r--docs/files/CI/ONAP_CI_6.pngbin0 -> 54172 bytes
-rw-r--r--docs/files/CI/ONAP_CI_7.pngbin0 -> 65675 bytes
-rwxr-xr-xdocs/files/CI/ONAP_CI_8.pngbin0 -> 84910 bytes
-rw-r--r--docs/files/CI/ONAP_CI_9.pngbin0 -> 157463 bytes
-rw-r--r--docs/files/bbs/BBS_dcae-ves-collector_config.pngbin0 -> 591369 bytes
-rw-r--r--docs/files/csv/release-demo-features.csv5
-rw-r--r--docs/files/csv/release-integration-features.csv5
-rw-r--r--docs/files/csv/release-integration-ref.csv39
-rw-r--r--docs/files/csv/release-oparent-features.csv4
-rw-r--r--docs/files/csv/release-pythonsdk-features.csv2
-rw-r--r--docs/files/csv/release-testsuite-features.csv2
-rw-r--r--docs/files/csv/repo-archived.csv10
-rw-r--r--docs/files/csv/repo-demo.csv2
-rw-r--r--docs/files/csv/repo-integration-external.csv2
-rw-r--r--docs/files/csv/repo-integration.csv13
-rw-r--r--docs/files/csv/repo-oparent.csv3
-rw-r--r--docs/files/csv/repo-pipelines.csv4
-rw-r--r--docs/files/csv/repo-simulators.csv13
-rw-r--r--docs/files/csv/repo-testsuite.csv10
-rw-r--r--docs/files/csv/s3p-instantiation.csv6
-rw-r--r--docs/files/csv/s3p-sdc.csv6
-rw-r--r--docs/files/csv/simulators.csv6
-rw-r--r--docs/files/csv/stability_basic_vm.csv11
-rw-r--r--docs/files/csv/stability_cluster_metric_cpu.csv2
-rw-r--r--docs/files/csv/stability_cluster_metric_memory.csv2
-rw-r--r--docs/files/csv/stability_cluster_metric_network.csv2
-rw-r--r--docs/files/csv/stability_top10_cpu.csv11
-rw-r--r--docs/files/csv/stability_top10_memory.csv11
-rw-r--r--docs/files/csv/stability_top10_net.csv11
-rw-r--r--docs/files/csv/tests-healthcheck.csv11
-rw-r--r--docs/files/csv/tests-infrastructure-healthcheck.csv4
-rw-r--r--docs/files/csv/tests-security.csv5
-rw-r--r--docs/files/csv/tests-smoke.csv11
-rw-r--r--docs/files/csv/usecases-deprecated.csv28
-rw-r--r--docs/files/csv/usecases-functional-requirements.csv3
-rw-r--r--docs/files/csv/usecases-non-functional-requirements.csv5
-rw-r--r--docs/files/csv/usecases-old-valid.csv6
-rw-r--r--docs/files/csv/usecases.csv4
-rwxr-xr-xdocs/files/dt-use-case.pngbin240228 -> 154683 bytes
-rw-r--r--docs/files/ns_automation/ns_automation_sdc_suffix.pngbin0 -> 17065 bytes
-rw-r--r--docs/files/ns_automation/ns_automation_suc.pngbin0 -> 170864 bytes
-rw-r--r--docs/files/ns_automation/ns_automation_test_class.pngbin0 -> 120331 bytes
-rw-r--r--docs/files/s3p/basic_vm_duration.pngbin0 -> 36201 bytes
-rw-r--r--docs/files/s3p/basic_vm_duration_histo.pngbin0 -> 29154 bytes
-rw-r--r--docs/files/s3p/daily_frankfurt1.pngbin0 -> 59945 bytes
-rw-r--r--docs/files/s3p/daily_frankfurt2.pngbin0 -> 24219 bytes
-rw-r--r--docs/files/s3p/guilin_daily_healthcheck.pngbin0 -> 20733 bytes
-rw-r--r--docs/files/s3p/guilin_daily_infrastructure_healthcheck.pngbin0 -> 19414 bytes
-rw-r--r--docs/files/s3p/guilin_daily_security.pngbin0 -> 10143 bytes
-rw-r--r--docs/files/s3p/guilin_daily_smoke.pngbin0 -> 17422 bytes
-rw-r--r--docs/files/s3p/honolulu_daily_healthcheck.pngbin0 -> 19579 bytes
-rw-r--r--docs/files/s3p/honolulu_daily_infrastructure_healthcheck.pngbin0 -> 24545 bytes
-rw-r--r--docs/files/s3p/honolulu_daily_security.pngbin0 -> 20443 bytes
-rw-r--r--docs/files/s3p/honolulu_daily_smoke.pngbin0 -> 26012 bytes
-rw-r--r--docs/files/s3p/honolulu_sdc_stability.pngbin0 -> 204363 bytes
-rw-r--r--docs/files/s3p/honolulu_sdc_stability_resources.pngbin0 -> 49466 bytes
-rw-r--r--docs/files/s3p/honolulu_so_stability_1_duration.pngbin0 -> 35364 bytes
-rw-r--r--docs/files/s3p/honolulu_so_stability_5.pngbin0 -> 129331 bytes
-rw-r--r--docs/files/s3p/honolulu_weekly_cpu.pngbin0 -> 263761 bytes
-rw-r--r--docs/files/s3p/honolulu_weekly_memory.pngbin0 -> 299476 bytes
-rw-r--r--docs/files/s3p/istanbul-dashboard.pngbin0 -> 60652 bytes
-rw-r--r--docs/files/s3p/istanbul_daily_healthcheck.pngbin0 -> 21941 bytes
-rw-r--r--docs/files/s3p/istanbul_daily_infrastructure_healthcheck.pngbin0 -> 21499 bytes
-rw-r--r--docs/files/s3p/istanbul_daily_security.pngbin0 -> 16609 bytes
-rw-r--r--docs/files/s3p/istanbul_daily_smoke.pngbin0 -> 21629 bytes
-rw-r--r--docs/files/s3p/istanbul_instantiation_stability_10.pngbin0 -> 90935 bytes
-rw-r--r--docs/files/s3p/istanbul_resiliency.pngbin0 -> 15880 bytes
-rw-r--r--docs/files/s3p/istanbul_sdc_stability.pngbin0 -> 75166 bytes
-rwxr-xr-xdocs/files/s3p/jakarta-dashboard.pngbin0 -> 59919 bytes
-rw-r--r--docs/files/s3p/stability_sdnc_memory.pngbin0 -> 22416 bytes
-rw-r--r--docs/files/scaleout/12.pngbin188128 -> 0 bytes
-rw-r--r--docs/files/scaleout/13.pngbin235213 -> 0 bytes
-rw-r--r--docs/files/scaleout/14.pngbin230675 -> 0 bytes
-rw-r--r--docs/files/scaleout/15.pngbin410181 -> 0 bytes
-rw-r--r--docs/files/scaleout/16.pngbin394513 -> 0 bytes
-rw-r--r--docs/files/scaleout/17.pngbin466637 -> 0 bytes
-rw-r--r--docs/files/scaleout/18.pngbin726385 -> 0 bytes
-rw-r--r--docs/files/scaleout/19.pngbin354411 -> 0 bytes
-rw-r--r--docs/files/scaleout/20.pngbin230514 -> 0 bytes
-rw-r--r--docs/files/scaleout/clamp/1.pngbin0 -> 314501 bytes
-rw-r--r--docs/files/scaleout/clamp/10.pngbin0 -> 552939 bytes
-rw-r--r--docs/files/scaleout/clamp/11.pngbin0 -> 535871 bytes
-rw-r--r--docs/files/scaleout/clamp/12.pngbin0 -> 441614 bytes
-rw-r--r--docs/files/scaleout/clamp/13.pngbin0 -> 456595 bytes
-rw-r--r--docs/files/scaleout/clamp/14.pngbin0 -> 463167 bytes
-rw-r--r--docs/files/scaleout/clamp/15.pngbin0 -> 498045 bytes
-rw-r--r--docs/files/scaleout/clamp/16.pngbin0 -> 555752 bytes
-rw-r--r--docs/files/scaleout/clamp/2.pngbin0 -> 322447 bytes
-rw-r--r--docs/files/scaleout/clamp/3.pngbin0 -> 500921 bytes
-rw-r--r--docs/files/scaleout/clamp/4.pngbin0 -> 542819 bytes
-rw-r--r--docs/files/scaleout/clamp/5.pngbin0 -> 494548 bytes
-rw-r--r--docs/files/scaleout/clamp/6.pngbin0 -> 460774 bytes
-rw-r--r--docs/files/scaleout/clamp/7.pngbin0 -> 437053 bytes
-rw-r--r--docs/files/scaleout/clamp/8.pngbin0 -> 508377 bytes
-rw-r--r--docs/files/scaleout/clamp/9.pngbin0 -> 538782 bytes
-rw-r--r--docs/files/scaleout/dcae_blueprint.yaml174
-rw-r--r--docs/files/scaleout/k8s-tca-clamp-policy-05162019.yaml175
-rw-r--r--docs/files/scaleout/latest-tca-guilin.yaml141
-rw-r--r--docs/files/simulators/NF-Simulator.pngbin0 -> 48925 bytes
-rw-r--r--docs/files/softwareUpgrade/APIDecisionTree.pngbin0 -> 54629 bytes
-rw-r--r--docs/files/softwareUpgrade/DirectNetconfYangInterface.pngbin0 -> 22980 bytes
-rw-r--r--docs/files/softwareUpgrade/OnboardingCsar.pngbin0 -> 247900 bytes
-rw-r--r--docs/files/softwareUpgrade/SWUPWorkflow.pngbin0 -> 79409 bytes
-rw-r--r--docs/files/softwareUpgrade/SchemaUpdate.pngbin0 -> 31529 bytes
-rw-r--r--docs/files/softwareUpgrade/ServiceLevelUpgrade.pngbin0 -> 106771 bytes
-rw-r--r--docs/files/softwareUpgrade/ServiceLevelWorkflow.pngbin0 -> 75772 bytes
-rw-r--r--docs/files/softwareUpgrade/WorkflowView.pngbin0 -> 47692 bytes
-rw-r--r--docs/files/softwareUpgrade/serviceModelVersions.pngbin0 -> 374401 bytes
-rw-r--r--docs/files/softwareUpgrade/verifyPNF.pngbin0 -> 348103 bytes
-rw-r--r--docs/files/softwareUpgrade/workflowList.pngbin0 -> 244285 bytes
-rw-r--r--docs/files/tests/test-basic-cnf.pngbin0 -> 56334 bytes
-rw-r--r--docs/files/tests/test-certif.pngbin0 -> 109106 bytes
-rw-r--r--docs/files/tests/test-dashboard.pngbin0 -> 91334 bytes
-rw-r--r--docs/files/tests/test-onap-helm.pngbin0 -> 43068 bytes
-rw-r--r--docs/files/tests/test-onap-k8s.pngbin0 -> 69369 bytes
-rwxr-xr-xdocs/files/vFW_CNF_CDS/Instantiation_topology.pngbin0 -> 162060 bytes
-rw-r--r--docs/files/vFW_CNF_CDS/Native_Helm_Flow.pngbin0 -> 123903 bytes
-rw-r--r--docs/files/vFW_CNF_CDS/healthcheck.pngbin0 -> 68856 bytes
-rw-r--r--docs/files/vFW_CNF_CDS/helm-overrides-steps.pngbin0 -> 112118 bytes
-rw-r--r--docs/files/vFW_CNF_CDS/helm-overrides.pngbin0 -> 7966 bytes
-rw-r--r--docs/files/vFW_CNF_CDS/k8s-resources-response.json843
-rw-r--r--docs/files/vFW_CNF_CDS/postman.zipbin0 -> 12414 bytes
-rw-r--r--docs/files/vFW_CNF_CDS/profile-templating.pngbin0 -> 121959 bytes
-rw-r--r--docs/files/vFW_CNF_CDS/scenarios.pngbin0 -> 71874 bytes
-rw-r--r--docs/files/vFW_CNF_CDS/status-response.json1213
-rwxr-xr-xdocs/files/vFW_CNF_CDS/vFW_CNF_CDS_Flow.pngbin0 -> 104595 bytes
-rwxr-xr-xdocs/files/vFW_CNF_CDS/vFW_Instance_In_Kubernetes.pngbin0 -> 56062 bytes
-rw-r--r--docs/files/vFW_CNF_CDS/vfw-generic-vnf-aai.json167
-rw-r--r--docs/files/vFW_CNF_CDS/vpkg-vf-module-aai.json133
-rw-r--r--docs/files/vLBMS_report.json2
-rw-r--r--docs/files/vcpe_tosca/create_image.pngbin0 -> 32427 bytes
-rw-r--r--docs/files/vcpe_tosca/create_project.pngbin0 -> 121527 bytes
-rw-r--r--docs/files/vcpe_tosca/create_user.pngbin0 -> 92034 bytes
-rw-r--r--docs/files/vcpe_tosca/customer_service.pngbin0 -> 463138 bytes
-rw-r--r--docs/files/vcpe_tosca/image.pngbin0 -> 36508 bytes
-rw-r--r--docs/files/vcpe_tosca/manage_project_user.pngbin0 -> 59063 bytes
-rw-r--r--docs/files/vcpe_tosca/ns_active.pngbin0 -> 360996 bytes
-rw-r--r--docs/files/vcpe_tosca/ns_create.pngbin0 -> 382016 bytes
-rw-r--r--docs/files/vcpe_tosca/ns_create_input.pngbin0 -> 352714 bytes
-rw-r--r--docs/files/vcpe_tosca/ns_delete.pngbin0 -> 375869 bytes
-rw-r--r--docs/files/vcpe_tosca/ns_deleted.pngbin0 -> 484945 bytes
-rw-r--r--docs/files/vcpe_tosca/ns_deleting.pngbin0 -> 468194 bytes
-rw-r--r--docs/files/vcpe_tosca/ns_instance.pngbin0 -> 465672 bytes
-rw-r--r--docs/files/vcpe_tosca/ns_package_list.pngbin0 -> 316778 bytes
-rw-r--r--docs/files/vcpe_tosca/ns_package_onboard.pngbin0 -> 335609 bytes
-rw-r--r--docs/files/vcpe_tosca/ns_vnf_heal.pngbin0 -> 370868 bytes
-rw-r--r--docs/files/vcpe_tosca/ns_vnf_healed.pngbin0 -> 497264 bytes
-rw-r--r--docs/files/vcpe_tosca/ns_vnf_healing.pngbin0 -> 503302 bytes
-rw-r--r--docs/files/vcpe_tosca/ns_vnf_list.pngbin0 -> 497187 bytes
-rw-r--r--docs/files/vcpe_tosca/sdc.pngbin0 -> 263081 bytes
-rw-r--r--docs/files/vcpe_tosca/vim.pngbin0 -> 194469 bytes
-rw-r--r--docs/files/vcpe_tosca/vnfm.pngbin0 -> 177001 bytes
-rw-r--r--docs/files/vfw-1-preload.json313
-rw-r--r--docs/files/vfw-2-preload.json313
-rw-r--r--docs/files/vfwdt-aai-postman.json426
-rw-r--r--docs/files/vfwdt-general-workflow-sd.pngbin0 -> 158564 bytes
-rw-r--r--docs/files/vfwdt-identification-workflow-sd.pngbin0 -> 75840 bytes
-rw-r--r--docs/files/vfwdt-td-workflow-sd.pngbin0 -> 200932 bytes
-rw-r--r--docs/files/vfwdt-upgrade-workflow-sd.pngbin0 -> 143490 bytes
-rw-r--r--docs/files/vfwdt-workflow-general.pngbin0 -> 14271 bytes
-rw-r--r--docs/files/vfwdt-workflow-traffic.pngbin0 -> 16021 bytes
-rw-r--r--docs/files/vfwdt-workflow-upgrade.pngbin0 -> 16124 bytes
-rw-r--r--docs/files/vpkg-preload.json313
-rw-r--r--docs/files/windriver/windriver_CPU.pngbin0 -> 43249 bytes
-rw-r--r--docs/files/windriver/windriver_RAM.pngbin0 -> 52154 bytes
-rw-r--r--docs/files/windriver/windriver_disks.pngbin0 -> 67839 bytes
-rw-r--r--docs/files/windriver/windriver_servers.pngbin0 -> 131462 bytes
-rw-r--r--docs/files/windriver/windrivers_servers2.pngbin0 -> 44165 bytes
-rw-r--r--docs/heat.rst236
-rw-r--r--docs/index.rst14
-rw-r--r--docs/integration-CICD.rst53
-rw-r--r--docs/integration-labs.rst38
-rw-r--r--docs/integration-missions.rst44
-rw-r--r--docs/integration-repositories.rst115
-rw-r--r--docs/integration-resources.rst16
-rw-r--r--docs/integration-s3p.rst276
-rw-r--r--docs/integration-simulators.rst111
-rw-r--r--docs/integration-tests.rst159
-rw-r--r--docs/integration-tooling.rst214
-rw-r--r--docs/onap-integration-ci.rst131
-rw-r--r--docs/onap-oom-heat.rst21
-rw-r--r--docs/release-notes.rst275
-rw-r--r--docs/requirements-docs.txt8
-rw-r--r--docs/schema-update-apis.csv49
-rw-r--r--docs/simulators/nf_simulator.rst148
-rw-r--r--docs/tox.ini31
-rw-r--r--docs/usecases/deprecated_usecases.rst28
-rw-r--r--docs/usecases/release_automated_usecases.rst37
-rw-r--r--docs/usecases/release_non_functional_requirements.rst15
-rw-r--r--docs/usecases/release_requirements.rst15
-rw-r--r--docs/usecases/release_usecases.rst37
240 files changed, 11056 insertions, 2641 deletions
diff --git a/docs/.gitignore b/docs/.gitignore
new file mode 100644
index 000000000..43ca5b671
--- /dev/null
+++ b/docs/.gitignore
@@ -0,0 +1,3 @@
+/.tox
+/_build/*
+/__pycache__/*
diff --git a/docs/_static/css/ribbon.css b/docs/_static/css/ribbon.css
new file mode 100644
index 000000000..7949130b3
--- /dev/null
+++ b/docs/_static/css/ribbon.css
@@ -0,0 +1,63 @@
+.ribbon {
+ z-index: 1000;
+ background-color: #a00;
+ overflow: hidden;
+ white-space: nowrap;
+ position: fixed;
+ top: 25px;
+ right: -50px;
+ -webkit-transform: rotate(45deg);
+ -moz-transform: rotate(45deg);
+ -ms-transform: rotate(45deg);
+ -o-transform: rotate(45deg);
+ transform: rotate(45deg);
+ -webkit-box-shadow: 0 0 10px #888;
+ -moz-box-shadow: 0 0 10px #888;
+ box-shadow: 0 0 10px #888;
+
+}
+
+.ribbon a {
+ border: 1px solid #faa;
+ color: #fff;
+ display: block;
+ font: bold 81.25% 'Helvetica Neue', Helvetica, Arial, sans-serif;
+ margin: 1px 0;
+ padding: 10px 50px;
+ text-align: center;
+ text-decoration: none;
+ text-shadow: 0 0 5px #444;
+ transition: 0.5s;
+}
+
+.ribbon a:hover {
+ background: #c11;
+ color: #fff;
+}
+
+
+/* override table width restrictions */
+@media screen and (min-width: 767px) {
+
+ .wy-table-responsive table td, .wy-table-responsive table th {
+ /* !important prevents the common CSS stylesheets from overriding
+ this as on RTD they are loaded after this stylesheet */
+ white-space: normal !important;
+ }
+
+ .wy-table-responsive {
+ overflow: visible !important;
+ }
+}
+
+@media screen and (max-width: 767px) {
+ .wy-table-responsive table td {
+ white-space: nowrap;
+ }
+}
+
+/* fix width of the screen */
+
+.wy-nav-content {
+ max-width: 800px;
+}
diff --git a/docs/_static/favicon.ico b/docs/_static/favicon.ico
new file mode 100755
index 000000000..cb712ebd2
--- /dev/null
+++ b/docs/_static/favicon.ico
Binary files differ
diff --git a/docs/_static/logo_onap_2017.png b/docs/_static/logo_onap_2017.png
new file mode 100644
index 000000000..5d064f431
--- /dev/null
+++ b/docs/_static/logo_onap_2017.png
Binary files differ
diff --git a/docs/conf.py b/docs/conf.py
new file mode 100644
index 000000000..14f37ca67
--- /dev/null
+++ b/docs/conf.py
@@ -0,0 +1,72 @@
+project = "onap"
+release = "master"
+version = "master"
+
+author = "Open Network Automation Platform"
+# yamllint disable-line rule:line-length
+copyright = "ONAP. Licensed under Creative Commons Attribution 4.0 International License"
+
+pygments_style = "sphinx"
+html_theme = "sphinx_rtd_theme"
+html_theme_options = {
+ "style_nav_header_background": "white",
+ "sticky_navigation": "False" }
+html_logo = "_static/logo_onap_2017.png"
+html_favicon = "_static/favicon.ico"
+html_static_path = ["_static"]
+html_show_sphinx = False
+
+extensions = [
+ 'sphinx.ext.intersphinx',
+ 'sphinx.ext.graphviz',
+ 'sphinxcontrib.blockdiag',
+ 'sphinxcontrib.seqdiag',
+ 'sphinxcontrib.swaggerdoc',
+ 'sphinxcontrib.plantuml'
+]
+
+#
+# Map to 'latest' if this file is used in 'latest' (master) 'doc' branch.
+# Change to {releasename} after you have created the new 'doc' branch.
+#
+
+branch = 'latest'
+
+intersphinx_mapping = {}
+doc_url = 'https://docs.onap.org/projects'
+master_doc = 'index'
+
+exclude_patterns = ['.tox']
+
+spelling_word_list_filename='spelling_wordlist.txt'
+spelling_lang = "en_GB"
+
+#
+# Example:
+# intersphinx_mapping['onap-aai-aai-common'] = ('{}/onap-aai-aai-common/en/%s'.format(doc_url) % branch, None)
+#
+intersphinx_mapping = {}
+intersphinx_mapping['onap-oom'] = ('{}/onap-oom/en/%s'.format(doc_url) % branch, None)
+intersphinx_mapping['onap-cli'] = ('{}/onap-cli/en/%s'.format(doc_url) % branch, None)
+
+html_last_updated_fmt = '%d-%b-%y %H:%M'
+
+def setup(app):
+ app.add_css_file("css/ribbon.css")
+
+linkcheck_ignore = [
+ r'http://localhost:\d+/'
+ r'http://localhost:.*',
+ r'http://CONSUL_SERVER_UI:30270/ui/#/dc1/services',
+ r'https://.*h=frankfurt',
+ r'http.*frankfurt.*',
+ r'http.*simpledemo.onap.org.*',
+ r'http://ANY_K8S_IP.*',
+ r'http://so-monitoring:30224',
+ r'http://SINK_IP_ADDRESS:667.*',
+ r'http.*K8S_HOST:30227.*',
+ r'http.*K8S_NODE_IP.*',
+ r'http.*REPO_IP.*',
+ r'http://team.onap.eu',
+ r'https://tools.ietf.org/html/rfc8345'
+]
diff --git a/docs/docs_5G_Bulk_PM.rst b/docs/docs_5G_Bulk_PM.rst
index 71d8778cd..7bdc06324 100644
--- a/docs/docs_5G_Bulk_PM.rst
+++ b/docs/docs_5G_Bulk_PM.rst
@@ -1,25 +1,27 @@
.. This work is licensed under a Creative Commons Attribution 4.0
International License. http://creativecommons.org/licenses/by/4.0
-
+
.. _docs_5g_bulk_pm:
+:orphan:
+
5G Bulk PM
----------
5G Bulk PM Package
-~~~~~~~~~~~~
-- 5G Bulk PM Package: https://wiki.onap.org/display/DW/5G+-+Bulk+PM+-+Integration+Test+Case
+~~~~~~~~~~~~~~~~~~
+- 5G Bulk PM Package: https://wiki.onap.org/pages/viewpage.action?pageId=38121543
Description
~~~~~~~~~~~
-The Bulk PM feature consists of an event-driven bulk transfer of monitoring data from an xNF to ONAP/DCAE. A micro-service will listen for 'FileReady' VES events sent from an xNF via the VES collector. Once files become available the collector micro-service will fetch them using protocol such as FTPES (committed) or SFTP. The collected data files are published internally on a DMaaP Data Router (DR) feed.
-The ONAP 5G Bulk PM Use Case Wiki Page can be found here:
+The Bulk PM feature consists of an event-driven bulk transfer of monitoring data from an xNF to ONAP/DCAE. A micro-service will listen for 'FileReady' VES events sent from an xNF via the VES collector. Once files become available the collector micro-service will fetch them using protocol such as FTPES (committed) or SFTP. The collected data files are published internally on a DMaaP Data Router (DR) feed.
+The ONAP 5G Bulk PM Use Case Wiki Page can be found here:
https://wiki.onap.org/display/DW/5G+-+Bulk+PM
How to Use
~~~~~~~~~~
-See the following instructions on how to manually test the feature. https://wiki.onap.org/display/DW/5G+Bulk+PM+Usecase+Testing+@+Ericsson+Lab+-+Casablanca
-The tests can also be executed using the Robot framework, information can be found https://wiki.onap.org/display/DW/5G+-+Bulk+PM+-+Integration+Test+Cases
+See the following instructions on how to manually test the feature. https://wiki.onap.org/display/DW/5G+Bulk+PM+Usecase+Testing+\@+Ericsson+Lab+-+Casablanca
+The tests can also be executed using the Robot framework, information can be found https://wiki.onap.org/pages/viewpage.action?pageId=38121543
Test Status and Plans
~~~~~~~~~~~~~~~~~~~~~
@@ -28,4 +30,3 @@ To see information on the status of the test see https://wiki.onap.org/display/D
Known Issues and Resolutions
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
none.
-
diff --git a/docs/docs_5G_Configuration_over_NETCONF.rst b/docs/docs_5G_Configuration_over_NETCONF.rst
index 9cf8643c5..d8701a655 100644
--- a/docs/docs_5G_Configuration_over_NETCONF.rst
+++ b/docs/docs_5G_Configuration_over_NETCONF.rst
@@ -1,10 +1,12 @@
.. This work is licensed under a Creative Commons Attribution 4.0
International License. http://creativecommons.org/licenses/by/4.0
-
+
.. _docs_5G_Configuration_over_NETCONF:
+:orphan:
+
5G - Configuration over NETCONF
-----------------------
+-------------------------------
Description
@@ -16,13 +18,13 @@ This use case is intended to be applicable for 5G base stations and other nodes
**Useful Links**
-- `5G - Configuration with NETCONF documentation <https://wiki.onap.org/display/DW/5G+-+Configuration+with+NETCONF>
-- `5G - Configuration with NETCONF - Integtion Test Cases <https://wiki.onap.org/pages/viewpage.action?pageId=58229781&src=contextnavipagetreemode>
+- `5G - Configuration with NETCONF documentation <https://wiki.onap.org/display/DW/5G+-+Configuration+with+NETCONF>`_
+- `5G - Configuration with NETCONF - Integtion Test Cases <https://wiki.onap.org/pages/viewpage.action?pageId=58229781&src=contextnavipagetreemode>`_
How to Use
~~~~~~~~~~
-Set up certificate in SDNC using <https://docs.onap.org/en/dublin/submodules/sdnc/oam.git/docs/cert_installation.html?highlight=SDNC>
+Set up certificate in SDNC using <https://docs.onap.org/projects/onap-sdnc-oam/en/latest/cert_installation.html>
As this usecase is extention of PnP PNF flow so run PnP usecase following running this usecase follow link <https://wiki.onap.org/display/DW/5G+-+PNF+Plug+and+Play>
diff --git a/docs/docs_5G_NRM_Configuration.rst b/docs/docs_5G_NRM_Configuration.rst
new file mode 100644
index 000000000..1deeb97e8
--- /dev/null
+++ b/docs/docs_5G_NRM_Configuration.rst
@@ -0,0 +1,41 @@
+.. This work is licensed under a Creative Commons Attribution 4.0
+ International License. http://creativecommons.org/licenses/by/4.0
+
+.. _docs_5G_NRM_Configuration:
+
+:orphan:
+
+5G NRM (Network Resource Model) Configuration
+---------------------------------------------
+
+Description
+~~~~~~~~~~~
+Network Resource Model (NRM) configuration management allows service providers to control and monitor the actual configuration on the Network Resources, which are the fundamental resources to the mobility networks. Considering the huge number of existing information object classes (IOC) and increasing IOCs in various domains, this use case is to handle the NRM configuration management in a dynamic manner. Moreover, it uses the http-based restful solution in R6 and other solutions may be possible.
+
+Useful Links
+============
+`5G NRM Configuration in R6 Wiki Page <https://wiki.onap.org/display/DW/5G+Network+Resource+Model+%28NRM%29+Configuration+in+R6+Frankfurt>`_
+
+Current Status in Frankfurt
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+* Provide a restful-executor in CDS blueprint processor.
+* Provide a simplified generic provisioning management service provider for simulating an external service (may be deployed in EMS or deployed standalone) for integration test.
+
+How to Use
+~~~~~~~~~~
+The pre-conditions are:
+* CDS containers are ready to use.
+* The external provisioning management service provider (could be a simulator) is ready to use.
+* At design time, CDS controller blueprint provided by xNF vendors is designed and ready for CDS.
+* Service instantiation is completed. It means users of ONAP could know the xNF instance. For this use case in R6, one PNF instance is selected.
+
+At run time, NRM configuration management is triggered when the operator provides the selected PNF instance, expected managed object instances. Then the procedure is executed in CDS:
+a. CDS sends request(s) with action-identifier{actionName, blueprintName, blueprintVersion} to the blueprint processor inside the controller using CDS self-service API.
+b. Controller/blueprint processor use the corresponding executor (and blueprint scripts) and send http requests to the external provisioning management service provider.
+c. The external provisioning management service provider is responsible of configuration management and sends responses to CDS.
+
+Test Status and Plans
+~~~~~~~~~~~~~~~~~~~~~
+To see information on the status of the test cases, please follow the link below:
+
+`5G NRM Configuration Test Status <https://wiki.onap.org/display/DW/5G+Network+Resource+Model+%28NRM%29+Configuration+in+R6+Frankfurt#id-5GNetworkResourceModel(NRM)ConfigurationinR6Frankfurt-TestStatus>`_
diff --git a/docs/docs_5G_PNF_Software_Upgrade.rst b/docs/docs_5G_PNF_Software_Upgrade.rst
index 0424a3116..a4d435b69 100644
--- a/docs/docs_5G_PNF_Software_Upgrade.rst
+++ b/docs/docs_5G_PNF_Software_Upgrade.rst
@@ -1,60 +1,57 @@
-.. This work is licensed under a Creative Commons Attribution 4.0
- International License. http://creativecommons.org/licenses/by/4.0
-
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. http://creativecommons.org/licenses/by/4.0
+
.. _docs_5g_pnf_software_upgrade:
+:orphan:
+
5G PNF Software Upgrade
-----------------------------
+-----------------------
Description
~~~~~~~~~~~
-The 5G PNF Software upgrade use case shows how users/network operators can modify the software of PNF instance during installation or regular maintaince. This use case is one aspect of Software Management. This could be used to update the PNF software to a newer or older version of software.
-**Useful Links**
-- `5G - PNF software upgrade use case documentation <https://wiki.onap.org/pages/viewpage.action?pageId=40206496>`_
-- `5G - PNF software upgrade Integration test case status for Dublin release <https://wiki.onap.org/display/DW/5G+-+PNF+SW+Upgrade+-+Integration+Test+Cases>`_
+The 5G PNF Software upgrade use case shows how users/network operators can modify the software of a PNF instance during installation or regular maintenance. This use case is one aspect of Software Management. This could be used to update the PNF software to a different version of software.
+
+Useful Link
+~~~~~~~~~~~
+
+`PNF Software Upgrade Wiki Page <https://wiki.onap.org/display/DW/PNF+software+upgrade+in+R6+Frankfurt>`_
+
+
+Current Status in Frankfurt
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+PNF Software Upgrade Scenarios
+------------------------------
+
+There are 3 PNF software upgrade scenarios supported in Frankfurt release:
-**Current status in Dublin**
-- with the support of an EM
-- LCM API (focus on controller only)
-- integration of basic 3GPP SwM interfaces (*)
-- ansible protocol only
-Note: In Dublin, Controller provided four related APIs (precheck, postcheck, upgrade and rollback), which were finally translated to invoke interfaces provided by EM. Rollback API is to call swFallback operation, and Upgrade API is to call downloadNESw, installNESw and activateNESw operations (Ref. 3GPP TS 32.532[1]).
+* `Using direct Netconf/Yang interface with PNF <docs_5G_PNF_Software_Upgrade_direct_netconf_yang>`
-**Future Plans**
-- E2E PNF Software upgrade both for design and runtime
-- Generic workflow for demonstration
+ - (https://wiki.onap.org/pages/viewpage.action?pageId=64007309)
-How to Use
-~~~~~~~~~~
-Upgrading PNF (instance) software requires the user/network operator to trigger the upgrade operation from the UI, e.g. VID or UUI. In Dublin, users need use ONAP Controllers GUI or publish DMaaP messages to trigger the LCM opeations, which are pre-check, post-check, upgrade and rollback. After receiving the API requests, the ONAP controllers will communicate to EMS through south-bound adaptors, which is Ansible protocol only in Dublin.
+* `Using Ansible protocol with EM <docs_5G_PNF_Software_Upgrade_ansible_with_EM>`
-Note that, both APPC and SDNC in R4 supported Ansible. Taking SDNC and Prechecking as an example, the steps are as follows:
+ - (https://wiki.onap.org/pages/viewpage.action?pageId=64007357)
-1) `In ansible server container, prepare the ssh connection conditions to the external controller, both ssh key file and ansible inventory configuration`_
+* `Using Netconf/Yang interface with EM <docs_5G_PNF_Software_Upgrade_netconf_with_EM>`
-2) `In sdnc controller container, update the dg configuration file: lcm-dg.properties.`_
+ - (https://wiki.onap.org/pages/viewpage.action?pageId=64008675)
-For example:
-::
-lcm.pnf.upgrade-pre-check.playbookname=ansible_huawei_precheck
-lcm.pnf.upgrade-post-check.playbookname=ansible_huawei_postcheck
-lcm.pnf.upgrade-software.playbookname=ansible_huawei_upgrade
-lcm.pnf.upgrade-rollback.playbookname=ansible_huawei_rollback
+Common tasks for all scenarios
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-3) `Login controller UI, access the pre-check LCM operation (or other operations) and send request, the detailed request parameters can be found in corresponding test case link.`_
+SO Workflows
+~~~~~~~~~~~~
-4) `The HTTP API response code 200 and LCM retured code 400 (See APPC return code design specification) indicate success, otherwise failed.`_
+Common SO workflows are used with generic SO building blocks which can be used for any PNF software upgrade scenarios. In Frankfurt release, a PNF software upgrade workflow and a PNF preparation workflow have been created.
-Test Status and Plans
-~~~~~~~~~~~~~~~~~~~~~
-To see information on the status of the test case: https://wiki.onap.org/display/DW/5G+-+PNF+SW+Upgrade+-+Integration+Test+Cases
+ .. image:: files/softwareUpgrade/SWUPWorkflow.png
-References
-==========
-[1] TS 32.532,Telecommunication management; Software management (SwM); Integration Reference Point (IRP); Information Service (IS)
+LCM evolution with API Decision Tree
+====================================
-Known Issues and Resolutions
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-None
+A decision point has been introduced in the Frankfurt release. The service designer needs to indicate which LCM API they would like to use for the LCM operations on the selected PNF source at design time (via SDC). The possible LCM APIs are: SO-REF-DATA (default), CDS, SDNC, or APPC.
+ .. image:: files/softwareUpgrade/APIDecisionTree.png
diff --git a/docs/docs_5G_PNF_Software_Upgrade_With_Schema_Update.rst b/docs/docs_5G_PNF_Software_Upgrade_With_Schema_Update.rst
new file mode 100644
index 000000000..c844f1f5d
--- /dev/null
+++ b/docs/docs_5G_PNF_Software_Upgrade_With_Schema_Update.rst
@@ -0,0 +1,114 @@
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. http://creativecommons.org/licenses/by/4.0
+
+.. _docs_5g_pnf_software_upgrade_with_schema_update:
+
+:orphan:
+
+Support xNF Software Upgrade in association to schema updates
+-------------------------------------------------------------
+
+Description
+~~~~~~~~~~~
+
+A schema update in relation to a xNF software upgrades is a routine for network upgrade to support new xNF features, improve efficiency or increase xNF capacity on the field, and to eliminate bugs. This use case provides to ONAP an advantage in orchestrating and managing the Life Cycle of a Network Services in-line with business and service objectives. Deployment and orchestration of new services over CNFs, VNFs and PNFs in a model and software driven way simplifies the network management. Enables operators and service providers to manage the Life Cycle of a Network Service. Assuring continuity of operation of services is crucial for production and carrier grade environments. The actualization or upgrades of software and in consequence required changes in the service model is a natural part of service instance life cycle. Without the support of ONAP service update with schema change, service life cycle management by ONAP can be very difficult which can impact the quality and continuity of services.
+
+
+Current Status in Guilin
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+xNF Software Upgrade with xNF artifacts updating in Release G
+-------------------------------------------------------------
+
+The following is the xNF software upgrade procedure with schema update.
+
+.. image:: files/softwareUpgrade/SchemaUpdate.png
+
+1. A vendor shall provide
+ a. a new VNF/PNF package with updated artifacts, and
+ b. the new VNF/ PNF software image to the operator.
+2. At receiving of the new package, the operator shall
+ a. onboard the new package and create a new resource template or update the existing resource template (PNF or VNF)
+ b. update the existing service template with the new or updated resource template
+ c. distribute the updated service template to run time.
+3. At run time, the operator shall, based on the updated service template,
+ a. upgrade a service instance and its resource instances, and
+ b. update the AAI entry accordingly
+
+The above procedure is based on the following conditions:
+
+* When updating a service template at design time, the resource instance name and network topology shall be unchanged.
+
+* A service template must be upgradable from any previous versions, including that any new resource template of a given resource instance (within the service template) must be upgradeable from any previous resource template versions.
+
+* At run time, resource upgrade sequence is not sensitive in service instance upgrading procedure.
+
+Function limitations in Release G
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+* The operator shall know the possible/feasible resource upgrade path based on vendor provided information.
+
+* When operator updating a service template, the updated service template must be upgradable from any previous versions:
+ - Within the service template, the resource instance name and network topology are unchanged.
+ - The new resource template of a given resource instance (within the service template) must be upgradeable from any previous resource template versions.
+
+.. note::
+ This is to avoid adding possible upgrade paths info and upgrade sequence info into SDC model
+
+Update a xNF resource template from a new onboarding package
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+When updating a resource template from a new VSP casr, the new onboarded descriptor and the new onboarded artifacts will be transformed into the new version of the resource csar. The current resource name and invariantUUID will be remained.
+
+As an alternative, a resource csar can be updated manually using SDC GUI.
+
+.. image:: files/softwareUpgrade/OnboardingCsar.png
+
+The update path (green path in above picture) is supported in the current SDC implementation. However, there are bugs which need to be fixed.
+
+Service level LCM workflow in SO
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. image:: files/softwareUpgrade/ServiceLevelWorkflow.png
+
+A generic SO workflow is created which can be used to upgrade one service instance based on the updated service template. This service level workflow is network function type independent. When upgrade one resource instance, the subsequent resource level upgrade workflow is selected based on the network function type. It contains following main steps:
+
+* Service Level Preparation
+ - Creating resource template instance upgrade list by comparing the service templates
+ - Select a resource level health check workflow based on the resource type
+ - Execute the selected resource level health check workflow on all resource instances within the service
+* Service Level Upgrade
+ - Select a resource level upgrade workflow based on the resource type
+ - Execute the selected resource level upgrade workflow on each upgrading resource instances
+ - Update the software version, model-invariant-id and model-version-id of the resource template in the A&AI entry at end of each Resource level upgrade workflow
+* Service Level Update
+ - Update the model-version-id of the service template in the A&AI entry
+* Service Level postCheck
+ - Select a resource level health check workflow based on the resource type
+ - Execute the selected resource level health check workflow on all resource instances within the service
+
+The following is an example of the service level workflow with PNF upgrade sub-workflow is called at Service Level Upgrade step:
+
+.. image:: files/softwareUpgrade/ServiceLevelUpgrade.png
+
+Workflow view
+~~~~~~~~~~~~~
+
+.. image:: files/softwareUpgrade/WorkflowView.png
+
+SO APIs
+~~~~~~~
+.. csv-table:: use case table
+ :file: schema-update-apis.csv
+ :widths: 60,20,20
+ :header-rows: 1
+
+Reference
+~~~~~~~~~~~
+
+`PNF Software Upgrade with Schema Update Wiki Page <https://wiki.onap.org/pages/viewpage.action?pageId=81400388#SupportxNFSoftwareUpgradeinassociationtoschemaupdates-DevelopmentStatus>`_
+
+Testing Procedure
+~~~~~~~~~~~~~~~~~~
+
+:ref:`Testing 5G PNF Software Upgrade with Schema Update <docs_Testing_5G_PNF_Software_Upgrade_With_Schema_Update>` \ No newline at end of file
diff --git a/docs/docs_5G_PNF_Software_Upgrade_ansible_with_EM.rst b/docs/docs_5G_PNF_Software_Upgrade_ansible_with_EM.rst
new file mode 100644
index 000000000..6426446eb
--- /dev/null
+++ b/docs/docs_5G_PNF_Software_Upgrade_ansible_with_EM.rst
@@ -0,0 +1,35 @@
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. http://creativecommons.org/licenses/by/4.0
+
+.. _docs_5g_pnf_software_upgrade_ansible_with_EM:
+
+:orphan:
+
+PNF Software Upgrade Scenario: Using Ansible protocol with EM
+-------------------------------------------------------------
+
+Software Upgrade Procedure
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+With this scenario, the pre-conditions are:
+
+* SO PNF software upgrade workflows are ready to use. For this scenario, the CONTROLLER_ACTOR is set for SDNC client for the API selection decision.
+* Service instantiation is completed, including PNF PnP. It means a PNF instance is in operation and is avaibale for ONAP (maybe via EMS).
+* ONAP Controller (SDNC and ansible server) and DMaaP are ready to use. It means necessary ansible connection and DMaaP topics are ready.
+* EMS has direct ansible interface to the ansible server. The underlying protocol is SSH.
+
+At run time, the service provider in R6 can use CLI to trigger the PNF in-place software upgrade procedure by selecting the existing PNF software upgrade workflow or uploading a custom workflow, as well as an identifier of a PNF instance, the target software version and optional json-formatted payload.
+
+Then the software upgrade workflow is executed as follows:
+
+a. SO sends request(s) with input {action, action-identifiers, common header, and optional payload} to SDNC API handler using traditional LCM API.
+b. SDNC API handler executes corresponding DG and sends requests to the ansible server.
+c. The ansible server executes ansible playbook with the EMS. Then EMS is responsible of software upgrade procedure of the selected PNF instance.
+d. Repeat above steps for each SO building block in the corresponding PNF software upgrade workflow.
+
+Test Status and Plans
+~~~~~~~~~~~~~~~~~~~~~
+
+To see information on the status of the test cases, please follow the link below:
+
+`Enhancement on PNF software upgrade using Ansible Test Status <https://wiki.onap.org/pages/viewpage.action?pageId=64007357#EnhancementonPNFS/WUpgradeusingAnsible-TestStatus>`_
diff --git a/docs/docs_5G_PNF_Software_Upgrade_direct_netconf_yang.rst b/docs/docs_5G_PNF_Software_Upgrade_direct_netconf_yang.rst
new file mode 100644
index 000000000..24098cdc4
--- /dev/null
+++ b/docs/docs_5G_PNF_Software_Upgrade_direct_netconf_yang.rst
@@ -0,0 +1,37 @@
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. http://creativecommons.org/licenses/by/4.0
+
+.. _docs_5g_pnf_software_upgrade_direct_netconf_yang:
+
+:orphan:
+
+PNF Software Upgrade Scenario: Using Direct Netconf/Yang interface with PNF
+===========================================================================
+
+Software Upgrade Procedure
+---------------------------
+
+With this scenario, the pre-conditions are:
+
+* SO PNF software upgrade workflows are ready to use
+* An SDC service template with one PNF resource has been designed (including CBA association) and has been distributed
+* Service instantiation is completed, including PNF PnP. meaning a PNF instance is in operation with connectivity between PNF-ONAP, PNF-SFTP
+* At design time, the CONTROLLER_ACTOR is set for CDS client for the API selection decision
+* PNF has direct NETCONF/YANG interface configured which can be reachable from ONAP controller.
+
+At run time, the PNF in-place software upgrade procedure is triggered when the operator provides the selected PNF software upgrade workflow, a PNF instance, and the target software version using VID GUI or CLI.
+Then the software upgrade workflow is executed in SO:
+
+a. SO sends CDS request(s) with action-identifier {actionName, blueprintName, blueprintVersion} to the blueprint processor inside the controller using CDS self-service API
+b. Controller/blueprint processor executes the blueprint scripts including sending NETCONF request(s) to the PNF instance via the direct NETCONF interface.
+c. Repeat above two steps for each SO building blocks.
+
+ .. image:: files/softwareUpgrade/DirectNetconfYangInterface.png
+
+
+Test Status and Plans
+---------------------
+
+To see information on the status of the test cases please follow the link below:
+
+`PNF Software Upgrade Test Status <https://wiki.onap.org/display/DW/PNF+software+upgrade+in+R6+Frankfurt#PNFsoftwareupgradeinR6Frankfurt-TestStatus>`_
diff --git a/docs/docs_5G_PNF_Software_Upgrade_netconf_with_EM.rst b/docs/docs_5G_PNF_Software_Upgrade_netconf_with_EM.rst
new file mode 100644
index 000000000..75eb244e1
--- /dev/null
+++ b/docs/docs_5G_PNF_Software_Upgrade_netconf_with_EM.rst
@@ -0,0 +1,35 @@
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. http://creativecommons.org/licenses/by/4.0
+
+.. _docs_5g_pnf_software_upgrade_netconf_with_EM:
+
+:orphan:
+
+PNF Software Upgrade Scenario: Using Netconf/Yang interface with EM
+-------------------------------------------------------------------
+
+Software Upgrade Procedure
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+With this scenario, the pre-conditions are:
+
+* SO PNF software upgrade workflows are ready to use.
+* An SDC service template with one PNF resource has been designed (including CBA association) and has been distributed.
+* Service instantiation is completed, including PNF PnP.
+* At design time, the CONTROLLER_ACTOR is set for CDS client for the API selection decision.
+* EMS (with netconf capability and suitable software management yang model) is ready to use. It has direct NETCONF/YANG interface configured which can be reachable from CDS.
+
+At run time, the service provider in R6 can use CLI to trigger the PNF in-place software upgrade procedure by selecting the existing PNF software upgrade workflow or uploading a custom workflow, as well as an identifier of a PNF instance, the target software version.
+
+Then the software upgrade workflow is executed as follows:
+
+a. SO sends CDS request(s) with action-identifier {actionName, blueprintName, blueprintVersion} to the blueprint processor inside the controller using CDS self-service API.
+b. Controller/blueprint processor executes the blueprint scripts including sending NETCONF request(s) to the EMS via the direct NETCONF interface. Then EMS is responsible of software upgrade procedure of the selected PNF instance.
+c. Repeat above two steps for each SO building block in the corresponding PNF software upgrade workflow.
+
+Test Status and Plans
+~~~~~~~~~~~~~~~~~~~~~
+
+To see information on the status of the test cases, please follow the link below:
+
+`PNF Software Upgrade with netconf/yang interface with EM Test Status <https://wiki.onap.org/pages/viewpage.action?pageId=64008675>`_
diff --git a/docs/docs_5G_oof_pci.rst b/docs/docs_5G_oof_pci.rst
deleted file mode 100644
index 31cfecb0e..000000000
--- a/docs/docs_5G_oof_pci.rst
+++ /dev/null
@@ -1,43 +0,0 @@
-.. This work is licensed under a Creative Commons Attribution 4.0
- International License. http://creativecommons.org/licenses/by/4.0
-
-.. _docs_5G_oof_pci:
-
-OOF-PCI
---------
-
-Description
-~~~~~~~~~~~
-The 5G OOF-PCI use case is an implementation of a SON (Self-Organizing Networks) algorithm for Physical Cell ID (PCI) optimization and the centralized Automatic Neighbor Relations (ANR) function (blacklisting a neighbor for handovers) in a 4G/5G network using the ONAP Optimization Framework (OOF). This use case began with just PCI optimization use case in Casablanca. Further details of Casablanca scope and impacts are described in https://docs.onap.org/en/casablanca/submodules/integration.git/docs/docs_5G_oof_pci.html#docs-5g-oof-pci
-
-For Dublin release, the earlier PCI-Handler MS which was a standalone MS is renamed as SON-Handler MS and onboarded as a micro-service in DCAE. Enhancements were made to Policy and SDN-C. The Config DB functionality (containing configuration details of the RAN), and some of the additions/fixes done to SDN-C are not part of the official Dublin release functionality, but are part of the full use case are only a Proof Of Concept (POC). These code changes in SDN-C are targeted for submission in El Alto release.
-
-In addition, the POC also has a RAN Simulator providing a simulated Radio Access Network (RAN) with a number of netconf servers simulating PNF elements. The functionality of the RAN Simulator has also been enhanced from the Casablanca use case to (a) generate alarms for PCI collision/confusion and (b) generate handover metrics for the different neighbor pairs (for the ANR use case).
-
-All details regarding the use case for Dublin can be found here:
-https://wiki.onap.org/display/DW/OOF-PCI+Use+Case+-+Dublin+Release+-+ONAP+based+SON+for+PCI+and+ANR
-
-The main use case page is https://wiki.onap.org/display/DW/5G+-+OOF+%28ONAP+Optimization+Framework%29+and+PCI+%28Physical+Cell+ID%29+Optimization
-
-
-How to Use
-~~~~~~~~~~
-The OOF-PCI use case is implemented in the Rutgers University (Winlab) ONAP Wireless Lab (OWL). For details, please see: https://wiki.onap.org/pages/viewpage.action?pageId=45298557 .
-This page includes instructions for access to the lab. Since this is a POC at this stage, testing is done manually.
-
-For all instructions about installing the components and test plans, please see:
-
-https://wiki.onap.org/display/DW/Installation+Aspects
-Son-handler installation - https://onap.readthedocs.io/en/latest/submodules/dcaegen2.git/docs/sections/services/son-handler/installation.html
-
-
-
-Test Status and Plans
-~~~~~~~~~~~~~~~~~~~~~
-For Dublin release, the OOF-PCI use case is a Proof of Concept (POC). OOF was enhanced with joint PCI-ANR optimization, SON-Handler MS was functionally enhanced and also onboarded on to DCAE, and Policy was also enhanced with a new control loop for ANR and control loop extension to receive feedback of actions. The pairwise testing was done in Windriver lab (https://wiki.onap.org/display/DW/Integration+Testing). Other non-release functions are all tested as part of the PoC in the Rutgers University (Winlab) ONAP Wireless Lab (OWL). To see information about test plans, please see https://wiki.onap.org/display/DW/Functional+Testing and https://wiki.onap.org/display/DW/Use+case+testing.
-
-
-Known Issues and Resolutions
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-(a) 2 known issues (Medium): CCSDK-1399 and CCSDK-1400
-(b) It is intended to have the RAN Simulator support sufficient Honeycomb netconf server instances to simulate 2000 cells. However, this number may be lower if there are hardware limitatons.
diff --git a/docs/docs_5G_oof_son.rst b/docs/docs_5G_oof_son.rst
new file mode 100644
index 000000000..0ec539d76
--- /dev/null
+++ b/docs/docs_5G_oof_son.rst
@@ -0,0 +1,128 @@
+.. This work is licensed under a Creative Commons Attribution 4.0
+ International License. http://creativecommons.org/licenses/by/4.0
+
+.. _docs_5G_oof_son:
+
+:orphan:
+
+5G-SON (earlier name was OOF-SON)
+---------------------------------
+
+Description
+~~~~~~~~~~~
+
+The 5G OOF-SON (earlier name was OOF-PCI) use case is an implementation of a **SON (Self-Organizing Networks)** algorithm for Physical Cell ID (PCI) optimization and the centralized Automatic Neighbor Relations (ANR) function (blacklisting a neighbor for handovers) in a 4G/5G network using the ONAP Optimization Framework (OOF).
+
+The use case is a multi-release effort. This use case began with the implementation of PCI optimization in the Casablanca release. In the Dublin release, the SON-Handler MS was onboarded as a micro-service in DCAE. Enhancements were made to Policy and SDN-C components.
+
+
+RAN Simulator
+~~~~~~~~~~~~~
+
+As part of this use case work, the SON Use Case team developed RAN-Sim, which is a RAN Simulator providing a simulated Radio Access Network (RAN) with a number of netconf servers simulating PNF elements representing gNodeBs. The functionality of the RAN Simulator includes:
+
+- Input of a sample topology of cells, with netconf servers (representing DUs) representing groups of cells
+- Represenation of cell locations and cell neighbor relations
+- Generation of neighbor-list-update messages
+- Generation of alarms for PCI collision/confusion and
+- Generation of handover metrics for different neighbor pairs (for the ANR use case).
+- Implementation of an O1 interface termination for CU/DU NFs
+- Implementation of an A1 interface termination with A1-Termination and RAN-App (new for Kohn release)
+
+All above functionality are enabled using a simple UI.
+
+
+Frankfurt Release
+~~~~~~~~~~~~~~~~~
+
+In Frankfurt release, the following were the main enhancements:
+
+- Introduction of Control Loop Coordination functionality, wherein a second control loop execution is denied by Policy component when another control loop is in progress.
+- Introduction of adaptive SON, wherein a set of cells whose PCI values are fixed (i.e., cannot be changed during PCI optimization) are considered during the PCI optimization.
+- In addition, the first step towards O-RAN alignment is being taken with SDN-C (R) being able to receive a DMaaP message containing configuration updates (which would be triggered when a neighbor-list-change occurs in the RAN and is communicated to ONAP over VES). `Details of this implementation <https://wiki.onap.org/display/DW/CM+Notification+Support+in+ONAP>`_
+
+
+Istanbul Release
+~~~~~~~~~~~~~~~~~
+
+In the Istanbul release, the following are the main enhancements:
+
+- Updates in FM reporting and fault handling to be in line with VES 7.2, 3GPP and smoother future alignment with O-RAN O1
+- Alignment with 3GPP NRM/O-RAN yang models for SON use case
+- Use CPS for storing/retrieving RAN config data for this use case (was stretch goal, partially addressed)
+- Configuration Management (CM) notifications over VES based on VES 7.2 (was stretch goal, partially addressed)
+
+The end-to-end setup for the use case requires a database which stores the cell related details of the RAN. This database is ConfigDB till we complete the transition to using CPS DB/TBDMT. The database is updated by SDN-C (R), and is accessed by SON-Handler MS and OOF for fetching (e.g., neighbor list, PNF id, etc):
+
+- `The Config DB implementation <https://github.com/onap-oof-pci-poc/sdnc/tree/master/ConfigDB/Dublin>`_
+- `Swagger JSON API documentation <https://github.com/onap-oof-pci-poc/sdnc/blob/master/ConfigDB/Dublin/SDNC_ConfigDB_API_v3.0.0.json>`_
+
+As part of Istanbul release work, progress was made towards the goal of transitioning from ConfigDB to CPS DB. CPS DB is fully based on yang models, and we have developed a modeling approach using two yang models:
+
+- Primary model: (e.g., ran-network). This is a modular sub-set of, and fully aligned with, ORAN/3GPP 28.541 NRM yang model. This aligns with device models and vendor models (base and extensions)
+
+- Secondary model: (e.g, cps-ran-schema-model) This model captures information which is not present in ORAN model, e.g., region-to-cell (CU) mapping, latitude/longitude of DU. This also has derived information for API/query efficiency, e.g., list of neighbor cells. This aligns with operator network model for use cases and applications.
+
+
+Jakarta Release
+~~~~~~~~~~~~~~~
+
+The following are the enhancements in the Jakarta release:
+
+- Update of SDN-R netconf code to use the new O1 yang models
+- Update of RAN-Sim to use the new O1 yang models
+
+In the Jakarta release, the SON Use Case work was impacted by the fact RAN-Sim needed enhancements to implement new features. We have made progress in the following areas in planning for future releases.
+
+- Convergence on the VES message formats to be used for FM/PM/CM
+- Inclusion of A1 based actions for the end-to-end SON Use Case
+- Enhancement of RAN-Sim to include abstraction of RAN App and A1 Termination which would process an A1 message and update of a CU/DU
+- Planning for replacement of Honeycomb netconf engine (project is archived)
+
+Kohn Release
+~~~~~~~~~~~~
+
+We have introduced a new paradigm in the Kohn release and taken steps to harmonize with O-RAN SC and new approaches for ONAP Control Loops. The following are the enhancements in the Kohn release:
+
+- We introduced a new paradigm of marking the RAN action SON control flows as being O1-based or A1-based. The PCI control flow is now an O1-based flow which goes to SDN-R for netconf-based configurations over O1 interface to the CU/DU (simulated in RAN-Sim). The ANR control flow is now an A1-based flow which goes to SDN-R/A1-PMS to generate A1 Policy messages over the A1 interface to the xApp/Near-RT RIC (simulated in RAN-Sim).
+- The formats of the Control Loop Message between SON Handler MS, Policy, and SDN-R have been updated. Policies in Policy Function have been updated. The PCI flow remains as an O1-based netconf action from SDN-R, while major changes were made for the ANR flow
+- We have introduce a new A1-based SON action flow leveraging the use of A1-PMS in SDN-R and A1-Termination in RAN-Sim. We have harmonized ONAP and O-RAN SC work, and cross-linked ONAP JIRAs to use O-RAN SC projects.
+- We have major changes for RAN-Sim. There is a new A1-Termination module as well as a new RAN-App module. The RAN-App module abstracts the function of an xApp in the Near-RT RIC. RAN-App processes the A1 policy message payload and sends a message to the RAN-Sim controller to make configuration changes in the RAN NF (CU or DU) in the RAN-Sim.
+
+
+For more information, please see:
+
+- `5G-SON Kohn release wiki page <https://wiki.onap.org/pages/viewpage.action?pageId=149029149>`_.
+
+- `5G-SON Jakarta release wiki page <https://wiki.onap.org/display/DW/R10+5G+SON+use+case>`_.
+
+- `5G-OOF-SON Base wiki page <https://wiki.onap.org/display/DW/5G+-+OOF+%28ONAP+Optimization+Framework%29+and+PCI+%28Physical+Cell+ID%29+Optimization>`_.
+
+- `OOF-SON El Alto & Frankfurt OOF (SON) wiki page <https://wiki.onap.org/display/DW/OOF+%28SON%29+in+R5+El+Alto%2C+OOF+%28SON%29+in+R6+Frankfurt>`_.
+
+
+How to Use
+~~~~~~~~~~
+
+The 5G-SON use case is implemented in the Rutgers University (Winlab) ONAP Wireless Lab (OWL).
+For details, please see
+`lab details <https://wiki.onap.org/pages/viewpage.action?pageId=45298557>`_.
+
+This page includes instructions for access to the lab. Setup and testing is done manually up to now.
+
+For all instructions about installing the components, please see:
+
+- `Wiki Installation page <https://wiki.onap.org/display/DW/Demo+setup+steps+for+Frankfurt>`_
+
+
+Test Status and Plans
+~~~~~~~~~~~~~~~~~~~~~
+
+See `test plans <https://wiki.onap.org/display/DW/R11+5G+SON+Integration+Tests>`_ for details.
+
+Known Issues and Resolutions
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+(a) It is intended to have the RAN Simulator support sufficient Honeycomb netconf server instances to simulate 2000 cells. However, this number may be lower if there are hardware limitations.
+(b) For Control Loop Co-ordination, the denial of a second Control Loop based on Target Lock (i.e., when a second Control Loop tries to operate on the same target (in this case, a PNF) is successfully tested. The CLC is also applied at Control Loop level only. However, some code updates are required in Policy to properly update the Operations History DB entry, and to check the existence of active Control Loops by Policy. This will be addressed in Jakarta release, and tracked via https://jira.onap.org/browse/POLICY-2484
+(c) Honeycomb netconf server project has been archived. The plan is to migrate to netopeer. As an interim step, we have a new ran-app module which interacts with the ran-sim controller.
diff --git a/docs/docs_5g_pnf_pnp.rst b/docs/docs_5g_pnf_pnp.rst
index 09ec6d302..7807062d8 100644
--- a/docs/docs_5g_pnf_pnp.rst
+++ b/docs/docs_5g_pnf_pnp.rst
@@ -1,16 +1,13 @@
.. This work is licensed under a Creative Commons Attribution 4.0
International License. http://creativecommons.org/licenses/by/4.0
-
+
.. _docs_5g_pnf_pnp:
+:orphan:
+
5G - PNF Plug and Play
----------------------
-Source files
-~~~~~~~~~~~~
-
-- Base PnP PNF Simulator heat template file: https://gerrit.onap.org/r/gitweb?p=integration.git;a=tree;f=test/mocks/pnfsimulator/deployment/src
-
Description
~~~~~~~~~~~
@@ -18,17 +15,66 @@ The PNF Plug and Play is a procedure that is executed between a PNF and ONAP. In
**Useful Links**
-- `5G - PNF Plug and Play use case documentation <https://wiki.onap.org/display/DW/5G+-+PNF+Plug+and+Play>`_
-- `5G - PNF Plug and Play - Integration Test Cases <https://wiki.onap.org/display/DW/5G+-+PNF+PnP+-+Integration+Test+Cases>`_
-- `Instruction how to setup PnP PNF Simulator <https://wikgit i.onap.org/display/DW/PnP+PNF+Simulator>`_
-- `Instruction how to use PnP PNF Simulator <https://gerrit.onap.org/r/gitweb?p=integration.git;a=blob_plain;f=test/mocks/pnfsimulator/pnfsimulator/README.md>`_
+1. `5G - PNF Plug and Play use case documentation <https://wiki.onap.org/display/DW/5G+-+PNF+Plug+and+Play>`_
+2. `5G - PNF Plug and Play - Integration Test Cases <https://wiki.onap.org/display/DW/5G+-+PNF+PnP+-+Integration+Test+Cases>`_
+3. Instruction how to setup and use VES CLinet from :ref:`NF Simulator <nf_simulator>`.
How to Use
~~~~~~~~~~
-1) `Create and distribute service model which contains PNF <https://wiki.onap.org/display/DW/5G+-+PNF+PnP+-+Integration+Test+Cases#id-5G-PNFPnP-IntegrationTestCases-CreateanddistributeservicewhichcontainsPNF>`_
-2) `Create service for PNF and wait for PNF Ready message in DmaaP topic <https://wiki.onap.org/display/DW/5G+-+PNF+PnP+-+Integration+Test+Cases#id-5G-PNFPnP-IntegrationTestCases-PNFReady>`_
-3) `Send PNF Registartion request from PnP PNF Simualtor and finish registration <https://wiki.onap.org/display/DW/5G+-+PNF+PnP+-+Integration+Test+Cases#id-5G-PNFPnP-IntegrationTestCases-PNFregistrationacceptingwhenAAIentrycreatedinadvance>`_
+1. Create and distribute service model which contains PNF
+2. Create service for PNF and wait for PNF Ready message in DmaaP topic
+3. Send PNF Registartion request from NF Simualtor (VES Client) and finish registration
+
+Below is present an example of event that need to be send to VES Client in order to trigger registration event from VES Client to ONAP VES Collector.
+There is need to fill following values in example json with proper values:
+
+1. dcae-ves-collector-host-name
+2. dcae-ves-collector-port
+3. sourceName - Identifier of this Pnf information element. It is the first three letters of the Vendor and the PNF serial number.
+ This is a unique identifier for the PNF instance. It is also referred to as the Correlation ID.
+4. oamV4IpAddress - This is the IP address (IPv4) for the PNF itself. This is the IPv4 address that the PNF itself can be accessed at.
+5. oamV6IpAddress - This is the IP address (IPv6) for the PNF itself. This is the IPv6 address that the PNF itself can be accessed at.
+
+::
+ {
+ "vesServerUrl": "https://<dcae-ves-collector-host-name>:<dcae-ves-collector-port>/eventListener/v7",
+ "event": {
+ "event": {
+ "commonEventHeader": {
+ "startEpochMicrosec": 1538407540940,
+ "sourceId": "val13",
+ "eventId": "registration_38407540",
+ "nfcNamingCode": "oam",
+ "internalHeaderFields": {},
+ "eventType": "pnfRegistration",
+ "priority": "Normal",
+ "version": "4.0.1",
+ "reportingEntityName": "VEN6061ZW3",
+ "sequence": 0,
+ "domain": "pnfRegistration",
+ "lastEpochMicrosec": 1538407540940,
+ "eventName": "pnfRegistration",
+ "vesEventListenerVersion": "7.0.1",
+ "sourceName": "<sourceName>",
+ "nfNamingCode": "gNB"
+ },
+ "pnfRegistrationFields": {
+ "unitType": "val8",
+ "serialNumber": "6061ZW3",
+ "pnfRegistrationFieldsVersion": "2.0",
+ "manufactureDate": "1538407540942",
+ "modelNumber": "val6",
+ "lastServiceDate": "1538407540942",
+ "unitFamily": "BBU",
+ "vendorName": "VENDOR",
+ "oamV4IpAddress": "<oamV4IpAddress>,
+ "oamV6IpAddress": "<oamV6IpAddress>",
+ "softwareVersion": "val7"
+ }
+ }
+ }
+ }
diff --git a/docs/docs_5g_rtpm.rst b/docs/docs_5g_rtpm.rst
index 2de85cdb6..45f1103f2 100644
--- a/docs/docs_5g_rtpm.rst
+++ b/docs/docs_5g_rtpm.rst
@@ -1,8 +1,10 @@
.. This work is licensed under a Creative Commons Attribution 4.0
International License. http://creativecommons.org/licenses/by/4.0
-
+
.. _docs_realtime_pm:
+:orphan:
+
5G - Real Time PM and High Stream Data Collection
-------------------------------------------------
@@ -18,16 +20,16 @@ The Real-Time Performance Measurements support allows for a PNF to send streamin
Component and API descriptions can be found under:
-- `High Volume VNF Event Streaming (HV-VES) Collector <https://onap.readthedocs.io/en/latest/submodules/dcaegen2.git/docs/sections/services/ves-hv/index.html>`_
-- `HV-VES (High Volume VES) <https://onap.readthedocs.io/en/latest/submodules/dcaegen2.git/docs/sections/apis/ves-hv/index.html#hv-ves-high-volume-ves>`_
+- `High Volume VNF Event Streaming (HV-VES) Collector <https://docs.onap.org/projects/onap-dcaegen2/en/frankfurt/sections/services/ves-hv/index.html>`_
+- `HV-VES (High Volume VES) <https://docs.onap.org/projects/onap-dcaegen2/en/frankfurt/sections/apis/ves-hv/index.html#hv-ves-high-volume-ves>`_
How to verify
~~~~~~~~~~~~~
Follow instructions in the links below to send data to HV-VES collector and verify messages published on Kafka topic:
-- `HV-VES xNF simulator integration to ONAP <https://wiki.onap.org/display/DW/HV-VES+simulator#HV-VESsimulator-HV-VESxNFsimulatorintegrationtoONAP>`_
-- `HV-VES xNF message simulation from shell <https://wiki.onap.org/display/DW/HV-VES+simulator#HV-VESsimulator-HV-VESxNFmessagesimulationfromshell>`_
+- `HV-VES xNF simulator integration to ONAP <https://wiki.onap.org/display/DW/HV-VES+simulator>`_ (HVVESsimulator-HV-VESxNFmessagesimulationfromshell)
+- `HV-VES xNF message simulation from shell <https://wiki.onap.org/display/DW/HV-VES+simulator>`_ (HV-VESsimulator-HV-VESxNFsimulatorintegrationtoONAP)
Useful links
~~~~~~~~~~~~
diff --git a/docs/docs_BBS.rst b/docs/docs_BBS.rst
index 81af17294..1047ae3d7 100644
--- a/docs/docs_BBS.rst
+++ b/docs/docs_BBS.rst
@@ -1,5 +1,7 @@
.. _docs_bbs:
+:orphan:
+
BBS (Broadband Service)
-----------------------
@@ -10,12 +12,12 @@ management and assurance of broadband services. BBS focuses on multi-Gigabit
Internet Connectivity services based on PON (Passive Optical Network) access
technology.
-In Dublin release, BBS enables ONAP to
+In Frankfurt release, BBS enables ONAP to
1. Establish a subscriber's HSIA (High Speed Internet Access) service from an ONT (Optical Network Termination unit) to the Internet drain
- The HSIA service is designed and deployed using ONAP's design and deployment capabilities
- - The HSIA service activation is initiated via ONAP's External APIs and orchestrated and controlled using ONAP orchestration and control capabilities. The control capabilities leverage a 3rd party controller to implement the requested action within the technology domain/location represented by the domain specific SDN management and control function.
+ - The HSIA service activation is initiated via ONAP's External APIs and orchestrated and controlled using ONAP orchestration and control capabilities. The control capabilities leverage a 3rd party controller to implement the requested actions within the technology domain/location represented by the domain specific SDN management and control function.
2. Detect the change of location for ONT devices (Nomadic ONT devices)
@@ -27,7 +29,8 @@ In Dublin release, BBS enables ONAP to
- Service location modification that is detected by ONAP's analytic and initiated via the closed loop capabilities
- - The closed loop capabilities invoke a HSIA location change service that is orchestrated and controlled using ONAP capabilities and 3rd party controllers
+ - The closed loop capabilities invoke a HSIA location change service that
+ is orchestrated and controlled using ONAP capabilities and 3rd party controllers
|image1|
@@ -51,16 +54,10 @@ SO: Custom Workflow Configuration
::
- ~/oom/kubernetes# kubectl edit cm dev-so-so-bpmn-infra-app-configmap
+ ~/oom/kubernetes# kubectl edit cm dev-so-bpmn-infra-app-configmap
- mso:
- ...
- workflow:
- custom:
- BBS_E2E_Service:
- sdnc:
- need: true
- ...
+ ## replace "workflow:\n CreateGenericVNFV1:\n"
+ ## with "workflow:\n custom:\n BBS_E2E_Service:\n sdnc:\n need: true\n CreateGenericVNFV1:\n"
## Restart the pod
~/oom/kubernetes# kubectl delete po dev-so-so-bpmn-infra-7556d7f6bc-8fthk
@@ -72,68 +69,35 @@ IMPORTANT: make sure vnf_recipe.NF_ROLE matches vnf_resource.MODEL_NAME, and vnf
::
- root@onap-rancher-daily:/home/ubuntu# kubectl exec -ti dev-mariadb-galera-mariadb-galera-0 sh
+ root@onap-rancher-daily:/home/ubuntu# kubectl exec -ti dev-mariadb-galera-0 sh
sh-4.2$ mysql -u root -p
MariaDB [(none)]> use catalogdb;
- MariaDB [catalogdb]> select * from vnf_recipe;
- ...
- +-------+---------------------+-----------------------+--------------+-------------+--------------------------------------------------------------------------------+-----------------------------------------------+---------------+----------------+---------------------+--------------------------------------+
- | id | NF_ROLE | ACTION | SERVICE_TYPE | VERSION_STR | DESCRIPTION | ORCHESTRATION_URI | VNF_PARAM_XSD | RECIPE_TIMEOUT | CREATION_TIMESTAMP | VF_MODULE_ID |
- +-------+---------------------+-----------------------+--------------+-------------+--------------------------------------------------------------------------------+-----------------------------------------------+---------------+----------------+---------------------+--------------------------------------+
- | 10043 | InternetProfile | createInstance | NF | 1.0 | create InternetProfile | /mso/async/services/CreateSDNCNetworkResource | NULL | 180000 | 2019-02-18 08:34:39 | NULL |
- | 10044 | AccessConnectivity | createInstance | NF | 1.0 | create AccessConnectivity | /mso/async/services/CreateSDNCNetworkResource | NULL | 180000 | 2019-02-18 08:34:39 | NULL |
- | 10045 | CPE | createInstance | NF | 1.0 | create CPE | /mso/async/services/HandlePNF | NULL | 180000 | 2019-02-18 08:34:39 | NULL |
- +-------+---------------------+-----------------------+--------------+-------------+--------------------------------------------------------------------------------+-----------------------------------------------+---------------+----------------+---------------------+--------------------------------------+
- ...
- MariaDB [catalogdb]> select * from vnf_resource;
- +--------------------+-----------------------+---------------------+--------------------------------------+-----------------+-----------------+--------------------------------------+---------------+--------------------+----------------------------------------------+-----------------------------+-------------------+-----------------------+
- | ORCHESTRATION_MODE | DESCRIPTION | CREATION_TIMESTAMP | MODEL_UUID | AIC_VERSION_MIN | AIC_VERSION_MAX | MODEL_INVARIANT_UUID | MODEL_VERSION | MODEL_NAME | TOSCA_NODE_TYPE | HEAT_TEMPLATE_ARTIFACT_UUID | RESOURCE_CATEGORY | RESOURCE_SUB_CATEGORY |
- +--------------------+-----------------------+---------------------+--------------------------------------+-----------------+-----------------+--------------------------------------+---------------+--------------------+----------------------------------------------+-----------------------------+-------------------+-----------------------+
- | HEAT | CPE VF | 2019-05-15 22:11:07 | 8f5fe623-c5e3-4ab3-90f9-3a28daea6601 | NULL | NULL | 0ee07fe6-a156-4e59-9dee-09a775d02bca | 1.0 | CPE | org.openecomp.resource.vf.Cpe | NULL | Generic | Infrastructure |
- | HEAT | InternetProfile VF | 2019-05-15 22:11:11 | a8de16d8-0d1a-4a19-80ac-2bcb2790e9a6 | NULL | NULL | acbe6358-6ce4-43a9-9385-111fe5cadad3 | 1.0 | InternetProfile | org.openecomp.resource.vf.Internetprofile | NULL | Generic | Infrastructure |
- | HEAT | AccessConnectivity VF | 2019-05-15 22:11:13 | b464fd87-3663-46c9-adc5-6f7d9e98ff26 | NULL | NULL | 53018dba-c934-415d-b4b1-0b1cae9553b8 | 1.0 | AccessConnectivity | org.openecomp.resource.vf.Accessconnectivity | NULL | Generic | Infrastructure |
- +--------------------+-----------------------+---------------------+--------------------------------------+-----------------+-----------------+--------------------------------------+---------------+--------------------+----------------------------------------------+-----------------------------+-------------------+-----------------------+
-
-Modify the MODEL_UUID and MODEL_INVARIANT_UUID for each resource in the SQL query below accordingly to your environment.
-
-::
-
- INSERT INTO `vnf_resource` (`ORCHESTRATION_MODE`, `DESCRIPTION`, `CREATION_TIMESTAMP`, `MODEL_UUID`, `AIC_VERSION_MIN`, `AIC_VERSION_MAX`, `MODEL_INVARIANT_UUID`, `MODEL_VERSION`, `MODEL_NAME`, `TOSCA_NODE_TYPE`, `HEAT_TEMPLATE_ARTIFACT_UUID`, `RESOURCE_CATEGORY`, `RESOURCE_SUB_CATEGORY`)
- VALUES
- ('HEAT', 'CPE VF', '2019-05-15 22:11:07', '8f5fe623-c5e3-4ab3-90f9-3a28daea6601', NULL, NULL, '0ee07fe6-a156-4e59-9dee-09a775d02bca', '1.0', 'CPE', 'org.openecomp.resource.vf.Cpe', NULL, 'Generic', 'Infrastructure'),
- ('HEAT', 'InternetProfile VF', '2019-05-15 22:11:11', 'a8de16d8-0d1a-4a19-80ac-2bcb2790e9a6', NULL, NULL, 'acbe6358-6ce4-43a9-9385-111fe5cadad3', '1.0', 'InternetProfile', 'org.openecomp.resource.vf.Internetprofile', NULL, 'Generic', 'Infrastructure'),
- ('HEAT', 'AccessConnectivity VF', '2019-05-15 22:11:13', 'b464fd87-3663-46c9-adc5-6f7d9e98ff26', NULL, NULL, '53018dba-c934-415d-b4b1-0b1cae9553b8', '1.0', 'AccessConnectivity', 'org.openecomp.resource.vf.Accessconnectivity', NULL, 'Generic', 'Infrastructure');
-
-Adding is_pnf flag to CPE resource input in catalogdb database. Needed in DoCreateResource BPMN for pausing the flow until a PNF is ready
-
-::
-
- INSERT INTO `vnf_resource_customization` (`ID`, `MODEL_CUSTOMIZATION_UUID`, `MODEL_INSTANCE_NAME`, `MIN_INSTANCES`, `MAX_INSTANCES`, `AVAILABILITY_ZONE_MAX_COUNT`, `NF_TYPE`, `NF_ROLE`, `NF_FUNCTION`, `NF_NAMING_CODE`, `MULTI_STAGE_DESIGN`, `CREATION_TIMESTAMP`, `VNF_RESOURCE_MODEL_UUID`, `SERVICE_MODEL_UUID`, `RESOURCE_INPUT`, `CDS_BLUEPRINT_NAME`, `CDS_BLUEPRINT_VERSION`, `SKIP_POST_INSTANTIATION_CONFIGURATION`)
+ MariaDB [catalogdb]> INSERT INTO vnf_recipe (NF_ROLE, ACTION, SERVICE_TYPE, VERSION_STR, DESCRIPTION, ORCHESTRATION_URI, VNF_PARAM_XSD, RECIPE_TIMEOUT)
VALUES
- (16, '0cea1cea-e4e4-4c91-be41-675e183a8983', 'CPE 0', NULL, NULL, NULL, NULL, NULL, NULL, NULL, 'false', '2019-05-21 11:15:42', '8f5fe623-c5e3-4ab3-90f9-3a28daea6601', '0187be8c-8efb-4531-97fa-dbe984ed9cdb', '{\\\"nf_naming\\\":\\\"true\\\",\\\"skip_post_instantiation_configuration\\\":\\\"true\\\",\\\"multi_stage_design\\\":\\\"false\\\",\\\"availability_zone_max_count\\\":\\\"1\\\",\\\"is_pnf\\\":\\\"ont_0_is_pnf|true\\\"}', NULL, NULL, 1);
-
-We need to ensure that the order in which the resources are processed by SO engine is correct. In BBS case, the PNF resource should go right after VnfVirtualLink (NOTE: the BPMN flow waits until PNF is ready in order to create AccessConnectivity and InternetProfile resources)
+ ("InternetProfile", "createInstance", "NF", "1.0", "create InternetProfile", "/mso/async/services/CreateSDNCNetworkResource", '{"operationType":"AccessConnectivity"}', 180000),
+ ("AccessConnectivity", "createInstance", "NF", "1.0", "create AccessConnectivity", "/mso/async/services/CreateSDNCNetworkResource", '{"operationType":"InternetProfile"}', 180000),
+ ("CPE", "createInstance", "NF", "1.0", "create CPE", "/mso/async/services/HandlePNF", NULL, 180000);
-::
+ MariaDB [catalogdb]> select * from vnf_recipe where NF_ROLE IN ('AccessConnectivity','InternetProfile', 'CPE');
+ +-------+--------------------+----------------+--------------+-------------+---------------------------+-----------------------------------------------+----------------------------------------+----------------+---------------------+--------------+
+ | id | NF_ROLE | ACTION | SERVICE_TYPE | VERSION_STR | DESCRIPTION | ORCHESTRATION_URI | VNF_PARAM_XSD | RECIPE_TIMEOUT | CREATION_TIMESTAMP | VF_MODULE_ID |
+ +-------+--------------------+----------------+--------------+-------------+---------------------------+-----------------------------------------------+----------------------------------------+----------------+---------------------+--------------+
+ | 10048 | InternetProfile | createInstance | NF | 1.0 | create InternetProfile | /mso/async/services/CreateSDNCNetworkResource | {"operationType":"InternetProfile"} | 1800000 | 2020-01-20 17:43:07 | NULL |
+ | 10051 | AccessConnectivity | createInstance | NF | 1.0 | create AccessConnectivity | /mso/async/services/CreateSDNCNetworkResource | {"operationType":"AccessConnectivity"} | 1800000 | 2020-01-20 17:43:07 | NULL |
+ | 10054 | CPE | createInstance | NF | 1.0 | create CPE | /mso/async/services/HandlePNF | NULL | 1800000 | 2020-01-20 17:43:07 | NULL |
+ +-------+--------------------+----------------+--------------+-------------+---------------------------+-----------------------------------------------+----------------------------------------+----------------+---------------------+--------------+
+ 3 rows in set (0.00 sec)
- MariaDB [catalogdb]> select RESOURCE_ORDER from service where MODEL_NAME="BBS_E2E_Service";
- +----------------------------------------------------------------------------+
- | RESOURCE_ORDER |
- +----------------------------------------------------------------------------+
- | VnfVirtualLink,CPE,AccessConnectivity,InternetProfile,PonUni,OltNni,OntNni |
- | VnfVirtualLink,CPE,AccessConnectivity,InternetProfile,PonUni,OltNni,OntNni |
- +----------------------------------------------------------------------------+
- 2 rows in set (0.00 sec)
DMaaP Message Router
====================
-Create required topics
+Create the required topics in DMaaP
::
- curl -X POST \
- http://mr.api.simpledemo.openecomp.org:30227/topics/create \
+ curl -kX POST \
+ https://mr.api.simpledemo.openecomp.org:30226/topics/create \
-H 'Accept: application/json' \
-H 'Content-Type: application/json' \
-H 'cache-control: no-cache' \
@@ -144,8 +108,8 @@ Create required topics
"replicationCount": "3"
}'
- curl -X POST \
- http://mr.api.simpledemo.openecomp.org:30227/topics/create \
+ curl -kX POST \
+ https://mr.api.simpledemo.openecomp.org:30226/topics/create \
-H 'Accept: application/json' \
-H 'Content-Type: application/json' \
-H 'cache-control: no-cache' \
@@ -156,8 +120,8 @@ Create required topics
"replicationCount": "3"
}'
- curl -X POST \
- http://mr.api.simpledemo.openecomp.org:30227/topics/create \
+ curl -kX POST \
+ https://mr.api.simpledemo.openecomp.org:30226/topics/create \
-H 'Accept: application/json' \
-H 'Content-Type: application/json' \
-H 'cache-control: no-cache' \
@@ -168,8 +132,8 @@ Create required topics
"replicationCount": "3"
}'
- curl -X POST \
- http://mr.api.simpledemo.openecomp.org:30227/topics/create \
+ curl -kX POST \
+ https://mr.api.simpledemo.openecomp.org:30226/topics/create \
-H 'Accept: application/json' \
-H 'Content-Type: application/json' \
-H 'cache-control: no-cache' \
@@ -180,81 +144,211 @@ Create required topics
"replicationCount": "3"
}'
+ curl -k 'https://mr.api.simpledemo.openecomp.org:30226/topics'
+
+ {
+ "topics": [
+ "org.onap.dmaap.mr.PNF_REGISTRATION",
+ "unauthenticated.DCAE_CL_OUTPUT",
+ "AAI-EVENT",
+ "SDC-DISTR-STATUS-TOPIC-AUTO",
+ "SDC-DISTR-NOTIF-TOPIC-AUTO",
+ "org.onap.dmaap.mr.PNF_READY",
+ "unauthenticated.PNF_READY",
+ "POLICY-PDP-PAP",
+ "unauthenticated.CPE_AUTHENTICATION",
+ "unauthenticated.VES_MEASUREMENT_OUTPUT",
+ "unauthenticated.PNF_UPDATE",
+ "org.onap.dmaap.mr.mirrormakeragent",
+ "__consumer_offsets"
+ ]
+ }
+
+
DCAE: BBS Event Processor (BBS-ep)
==================================
-Installation instructions: `BBS-ep <https://wiki.onap.org/pages/viewpage.action?pageId=60891185>`_
+Description: `BBS-ep <https://docs.onap.org/projects/onap-dcaegen2/en/frankfurt/sections/services/bbs-event-processor/index.html?highlight=BBS>`_
+
+The following BBS event processor blueprint will be used:
+
+- `k8s-bbs-event-processor.yaml <https://git.onap.org/dcaegen2/platform/blueprints/plain/blueprints/k8s-bbs-event-processor.yaml?h=frankfurt>`_
-Update the configuration of BBS-ep in Consul with the following version for close loop (see screenshot below) in order to match the version expected by BBS APEX policy:
+
+The BBS-ep deployment procedure:
::
- "application.clVersion": "1.0.0"
+ root@onap-nfs:/home/ubuntu# kubectl exec -ti dev-dcae-bootstrap-7599b45c77-czxsx -n onap bash
+ bash-4.2$ cfy install -b bbs-ep -d bbs-ep /blueprints/k8s-bbs-event-processor.yaml
+
+
+IMPORTANT: Make sure that the configuration of BBS-ep in Consul contains the following version for the close loop policy in order to match the version expected by BBS APEX policy:
+
+::
+
+ "application.clVersion": "1.0.2"
+
DCAE: RESTCONF Collector
========================
-Installation instructions: `RESTCONF Collector <https://wiki.onap.org/pages/viewpage.action?pageId=60891182>`_
+Description: `RESTCONF Collector <https://docs.onap.org/projects/onap-dcaegen2/en/frankfurt/sections/services/restconf/index.html>`_
+
+The following RESTCONF collector blueprint will be used:
+
+- `k8s-restconf.yaml <https://git.onap.org/dcaegen2/platform/blueprints/plain/blueprints/k8s-restconf.yaml?h=frankfurt>`_
+
+
+RESTCONF Collector deployment procedure:
+
+::
+
+ root@onap-nfs:/home/ubuntu# kubectl exec -ti dev-dcae-bootstrap-7599b45c77-czxsx -n onap bash
+ bash-4.2$ cfy install -b restconf -d restconf /blueprints/k8s-restconf.yaml
+
DCAE: VES mapper
================
-Installation instructions: `VES Mapper <https://wiki.onap.org/pages/viewpage.action?pageId=60891188>`_
+Description: `VES Mapper <https://docs.onap.org/projects/onap-dcaegen2/en/frankfurt/sections/services/mapper/index.html>`_
+
+The following VES mapper blueprint will be used:
+
+- `k8s-ves-mapper.yaml <https://git.onap.org/dcaegen2/platform/blueprints/tree/blueprints/k8s-ves-mapper.yaml?h=frankfurt>`_
+
+
+VES Mapper deployment procedure:
+
+::
+
+ root@onap-nfs:/home/ubuntu# kubectl exec -ti dev-dcae-bootstrap-7599b45c77-czxsx -n onap bash
+ bash-4.2$ cfy install -b ves-mapper -d ves-mapper /blueprints/k8s-ves-mapper.yaml
+
DCAE: VES collector
===================
-Configure mapping VES event domain to DMaaP topic: ves-statechange --> unauthenticated.CPE_AUTHENTICATION
+Configure the mapping of the VES event domain to the correct DMaaP topic in Consul: ves-statechange --> unauthenticated.CPE_AUTHENTICATION
-Access Consul UI: http://<consul_server_ui>:30270/ui/#/dc1/services
+1. Access Consul UI <http://CONSUL_SERVER_UI:30270/ui/#/dc1/services>
-Modify dcae-ves-collector configuration by adding a new VES domain to DMaaP topic mapping
+2. Modify the dcae-ves-collector configuration by adding a new VES domain to DMaaP topic mapping
::
"ves-statechange": {"type": "message_router", "dmaap_info": {"topic_url": "http://message-router:3904/events/unauthenticated.CPE_AUTHENTICATION"}}
+|image3|
+
+3. Click on UPDATE in order to apply the new configuration
+
+
SDNC: BBS DGs (Directed Graphs)
===============================
-Make sure that BBS DGs in SDNC DGBuilder are in Active state
+Make sure that the following BBS DGs in the SDNC DGBuilder are in Active state
+
+::
+
+ bbs-access-connectivity-vnf-topology-operation-create-huawei
+ bbs-access-connectivity-vnf-topology-operation-delete-huawei
+ bbs-internet-profile-vnf-topology-operation-change-huawei
+ bbs-internet-profile-vnf-topology-operation-common-huawei
+ bbs-internet-profile-vnf-topology-operation-create-huawei
+ bbs-internet-profile-vnf-topology-operation-delete-huawei
+ validate-bbs-vnf-input-parameters
+
+DGBuilder URL: https://sdnc.api.simpledemo.onap.org:30203
+
+
+Access SDN M&C DG
+=================
+Configure Access SDN M&C IP address in SDNC DG using dgbuilder. For instance:
+
+> GENERIC-RESOURCE-API: bbs-access-connectivity-vnf-topology-operation-create-huawei.json
+> GENERIC-RESOURCE-API: bbs-access-connectivity-vnf-topology-operation-delete-huawei.json
+
+1. Export the relevant DG
+
+2. Modify the IP address
+
+3. Import back the DG and Activate it
+
+DGBuilder URL: https://sdnc.api.simpledemo.onap.org:30203
+
+
+Edge SDN M&C DG
+===============
+Configure Edge SDN M&C IP address in SDNC DG using dgbuilder. For instance:
+
+> GENERIC-RESOURCE-API: bbs-access-connectivity-vnf-topology-operation-common-huawei.json
+
+1. Export the relevant DG
+
+2. Modify the IP address
+
+3. Import back the DG and Activate it
+
+DGBuilder URL: https://sdnc.api.simpledemo.onap.org:30203
-http://dguser:test123@{{sdnc-dgbuilder_Node-IP}}:30203/#
+
+Add SSL certificate of the 3rd party controller into the SDNC trust store
+=========================================================================
::
- bbs-access-connectivity-network-topology-operation-create-huawei
- bbs-access-connectivity-network-topology-operation-delete-huawei
- bbs-internet-profile-network-topology-operation-change-huawei
- bbs-internet-profile-network-topology-operation-common-huawei
- bbs-internet-profile-network-topology-operation-create-huawei
- bbs-internet-profile-network-topology-operation-delete-huawei
- validate-bbs-network-input-parameters
+ kubectl exec -ti dev-sdnc-0 -n onap -- bash
+
+ openssl s_client -connect <IP_ADDRESS_EXT_CTRL>:<PORT>
+ # copy server certificate and paste in /tmp/<CA_CERT_NAME>.crt
+ sudo keytool -importcert -file /tmp/<CA_CERT_NAME>.crt -alias <CA_CERT_NAME>_key -keystore truststore.onap.client.jks -storepass adminadmin
+ keytool -list -keystore truststore.onap.client.jks -storepass adminadmin | grep <CA_CERT_NAME>
+
Policy: BBS APEX policy
=======================
-Inside APEX container,
+Deployment procedure of BBS APEX Policy (master, apex-pdp image v2.3+)
+
+1. Make Sure APEX PDP is running and in Active state
+
+::
+
+ API: GET
+ URL: {{POLICY-PAP-URL}}/policy/pap/v1/pdps
+
+2. Create the operational control loop APEX policy type
+
+::
-1) Edit DCAEConsumer URL in `examples/config/ONAPBBS/NomadicONTPolicyModel_config.json`
+ API: POST
+ URL: {{POLICY-API-URL}}/policy/api/v1/policytypes
+ JSON Payload: https://git.onap.org/integration/usecases/bbs/tree/policy/apex/json/bbs_policytypes.json
-2) Edit AAI and SDNC URLs in `examples/config/ONAPBBS/config.txt`
+3. Create BBS APEX policy
::
- AAI_URL=aai:8443
- AAI_USERNAME=AAI
- AAI_PASSWORD=AAI
- SDNC_URL=sdnc:8282
- SDNC_USERNAME=admin
- SDNC_PASSWORD=Kp8bJ4SXszM0WXlhak3eHlcse2gAw84vaoGGmJvUy2U
- SVC_NOTIFICATION_URL=http://c1.vm1.mso.simpledemo.openecomp.org:8080
+ API: POST
+ URL: {{POLICY-API-URL}}/policy/api/v1/policytypes/onap.policies.controlloop.operational.Apex/versions/1.0.0/policies
+ JSON Payload: https://git.onap.org/integration/usecases/bbs/tree/policy/apex/json/bbs_create_policy.json
-3) Launch APEX BBS policy as a background process
+4. Deploy BBS policy
::
- nohup /opt/app/policy/apex-pdp/bin/apexApps.sh engine -c examples/config/ONAPBBS/NomadicONTPolicyModel_config.json &
+ API: POST
+ URL: {{POLICY-PAP-URL}}/policy/pap/v1/pdps/policies
+ JSON Payload: https://git.onap.org/integration/usecases/bbs/tree/policy/apex/json/bbs_simple_deploy.json
+
+5. Verify the deployment
+
+::
+
+ API: GET
+ URL: {{POLICY-API-URL}}/policy/api/v1/policytypes/onap.policies.controlloop.operational.Apex/versions/1.0.0/policies/
+
Edge Services: vBNG+AAA+DHCP, Edge SDN M&C
==========================================
@@ -264,22 +358,18 @@ Installation and setup instructions: `Swisscom Edge SDN M&C and virtual BNG <htt
References
==========
-Please refer to the following wiki page for additional set up and configuration
-instructions:
+Please refer to the following wiki page for further steps related to the BBS service design and instantiation:
-- `BBS Documentation <https://wiki.onap.org/display/DW/BBS+Documentation>`_
+- `BBS Documentation <https://wiki.onap.org/pages/viewpage.action?pageId=75303137#BBSDocumentation(Frankfurt)-BBSServiceConfiguration>`_
Known Issues
------------
-- PNF registration timeout is limited to 60s due HTTP timeout in inter-BPMN workflow calls (`SO-1938 <https://jira.onap.org/browse/SO-1938>`_)
-
-- E2E Service deletion workflow does not delete the PNF resource in AAI (`SO-1994 <https://jira.onap.org/browse/SO-1994>`_)
-
-- Under certain circumstances, multiple attachment points (logical links) are associated to a single PNF (`DCAEGEN2-1611 <https://jira.onap.org/browse/DCAEGEN2-1611>`_)
-
+- E2E Service deletion workflow does not delete the PNF resource in AAI (`SO-2609 <https://jira.onap.org/browse/SO-2609>`_)
.. |image1| image:: files/bbs/BBS_arch_overview.png
:width: 6.5in
.. |image2| image:: files/bbs/BBS_system_view.png
:width: 6.5in
+.. |image3| image:: files/bbs/BBS_dcae-ves-collector_config.png
+ :width: 6.5in
diff --git a/docs/docs_CCVPN.rst b/docs/docs_CCVPN.rst
index b0ec57239..d24862a68 100644
--- a/docs/docs_CCVPN.rst
+++ b/docs/docs_CCVPN.rst
@@ -1,10 +1,413 @@
.. This work is licensed under a Creative Commons Attribution 4.0
International License. http://creativecommons.org/licenses/by/4.0
-
+
.. _docs_ccvpn:
+:orphan:
+
CCVPN (Cross Domain and Cross Layer VPN)
----------------------------------------
+Update for London Release
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The London release enhances the CCVPN use-case by introducing the Cloud-Network Convergence support (REQ-1413).
+CCVPN London release will add transport domain support for the Intent-based cloud-network convergence.
+
+London Scope and Impacted modules
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The standardized cloud resource management APIs are still under our investigation.
+In London, we will only support the registration of the Cloud Orchestrator to SNDC,
+whose mechanism is similar to the network controller registration.
+
+The impacted ONAP modules are CCSDK and SDN-C.
+
+Installation Procedure
+~~~~~~~~~~~~~~~~~~~~~~
+
+For London's new features, the integration test environment is similar to that of
+the Kohn release: an ONAP instance with London release interfacing with 3rd party
+cloud orchestrators should be established.
+
+Functional/Integration Test Cases
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The testing procedure is described in the following few test cases:
+- Register a 3rd party cloud orchestrator to SDNC through ESR APIs
+- Create and delete a single CLL instance that accesses a single cloud, and monitor if the closed-loop call flow is getting triggered.
+- Modify the bandwidth of a connection link of an existing CLL instance, and monitor if the closed-loop call flow is getting triggered.
+
+Update for kohn Release
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The kohn release enhances the CCVPN use-case by introducing the following three features (REQ-1268):
+1. E-LINE (P2P connection) support for the Cloud Leased Line (CLL) service delivery
+2. Enhancing the Closed-Loop Automation of CCVPN services by using DCAE SDK dmaap-client lib in slice analysis MS
+3. Enhancing TN NSSMF NBI to align with the latest IETF specification (SO changes)
+
+Kohn Scope and Impacted modules
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The Kohn release is an enhancement release. E-LINE service model (P2P connection) is added to the Cloud Leased Line (CLL) service.
+Also, slice analysis MS is enhanced to use DCAE SDK dmaap-client lib.
+And lastly, TN NSSMF northbound is aligned with the latest IETF transport slice definition model (SO changes).
+
+The impacted ONAP modules are: CCSDK, SDN-C, DCAE, and SO.
+
+Installation Procedure
+~~~~~~~~~~~~~~~~~~~~~~
+
+For Kohn new features, the integration test environment is similar to that of
+the Jakarta release: an ONAP instance with Kohn release interfacing with 3rd party
+transport domain controllers should be established.
+
+Functional/Integration Test Cases
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The testing procedure is described in the following few test cases:
+- Create and delete single CLL instance which accesses single cloud, and monitor if the closed-loop call flow is getting triggered.
+- Modify the bandwidth of a connection link of an existing CLL instance, and monitor if the closed-loop call flow is getting triggered.
+
+
+Update for Jakarta Release
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The Jakarta release enhances the CCVPN use-case by introducing the following three features (REG-1076):
+1. Support for IBN service discovery by registering Cloud Leased Line (CLL) and Transport Slicing services to MSB
+2. Support for 1+1 protection of Cloud Leased Line (CLL)
+3. Support for closed-loop and user-triggered intent update
+
+Jakarta Scope and Impacted modules
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The "CCVPN closed-loop" feature and the "user-triggered intent update" feature require both a front-end and a back-end system.
+The front-end would be different for IBN and CCVPN, but the two features can share a common back-end.
+As a first step, current bandwidth usage of a CLL should be collected from the physical network. Then VES collector API
+should be called to send this information to DCAE. DCAE would then publish a new DMaaP topic to be consumed by DCAE slice
+analysis micro-service. This module will then send this notification to Policy.
+
+In Jakarta, the goal of both user-triggered intent update and CCVPN closed-loop is to ensure the max-bandwidth of the CLL service
+can satisfy user's intent throughout the intent life cycle. Thus, the modify-CLL operation triggered by DCAE and Policy is
+common to IBN and CCVPN. So a common back-end mechanism is implemented to support both use-cases.
+
+The impacted ONAP modules are: CCSDK, SDN-C, A&AI, DCAE, POLICY, and SO.
+
+Installation Procedure
+~~~~~~~~~~~~~~~~~~~~~~
+
+For Jakarta new features, the integration test environment is similar to that of
+the Istanbul release: an ONAP instance with Istanbul release interfacing with 3rd party
+transport domain controllers should be established.
+
+Functional/Integration Test Cases
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The testing procedure is described in the following few test cases:
+- Create and delete single CLL instance which accesses single cloud, and monitor if the closed-loop call flow is getting triggered.
+- Create and delete single CLL instance which access multiple clouds, and monitor if the closed-loop call flow is getting triggered.
+- Create and delete multiple CLL instances which access single cloud, and monitor if the closed-loop call flow is getting triggered.
+- Create and delete multiple CLL instances which access multiple clouds, and monitor if the closed-loop call flow is getting triggered.
+- Create a CLL instance which have connection links with different bandwidth, and monitor if the closed-loop call flow is getting triggered.
+- Modify the bandwidth of a connection link of an existing CLL instance, and monitor if the closed-loop call flow is getting triggered.
+- Modify an existing CLL instance by add a new connection link, and monitor if the closed-loop call flow is getting triggered.
+
+
+Update for Istanbul Release
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The Istanbul release introduces a new functionality for the CCVPN use-case:
+Cloud Lease Line (CLL) service support. The following three main operations were
+added in Istanbul release (REQ-719):
+
+1. The support for creating an E-Tree service, which has one ROOT (Cloud POP) and may have
+ one or more LEAFs (i.e. ONUs) as its branches.
+2. The support for modifying the maximum bandwidth supported by a given E-Tree.
+3. The support for deleting an E-Tree service.
+
+Istanbul Scope and Impacted modules
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+For operation #1 mentioned above, the user should be able to "create" an E-Tree service.
+The modification operation is able to support the following scenarios:
+
+a. An E-Tree can have one or more branches (LEAFs) located in one or multiple (different)
+ domains.
+b. When multiple LEAFs are physically located in a single OLT node, those LEAFs
+ should re-use or share the same OTN tunnels, therefore the path computation
+ mechanism should only be called once.
+
+By operation #2 mentioned above, a user can change/modify the maximum bandwidth supported
+by a given E-Tree.
+
+And by operation #3 mentioned above, a user can delete a given E-Tree.
+
+The impacted ONAP modules are: SO, SDN-C, and A&AI.
+
+For A&AI, additional edge-rules were introduced between two connectivity nodes as well as
+between a connectivity and a uni node.
+
+In SDN-C, additional Directed Graphs (DGs) were implemented to support the above-mentioned
+features. These new DGs are placed under the generic-resource-api folder in SDNC.
+
+Installation Procedure
+~~~~~~~~~~~~~~~~~~~~~~
+
+For Istanbul new features, the integration test environment is similar to that of
+the Honolulu release: an ONAP instance with Istanbul release interfacing with 3rd party
+transport domain controllers should be established.
+
+For E-Tree support, the installation procedure is similar to that of the E2E
+Network Slicing use case. In other words, we need to bring up the required modules
+including SO, ADNS, A&AI, and UUI. We also need to configure these modules along
+with the mandatory common modules such as DMaaP.
+
+Functional/Integration Test Cases
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The testing procedure is described in the following few test cases:
+
+- create an E-Tree with one ROOT and one or multiple LEAF(s) in a multi-domain topology
+- modify the maximum bw of a given E-Tree or add a new connection link to a given E-Tree
+- delete a given E-Tree
+
+To run such test cases, the user must first add (register) the domain controllers as the ESR
+3rd party controllers. As a result of this registration, a round of topology discovery gets
+triggered. After that, network-routes or UNI Endpoints have to be created in A&AI. This step
+is similar to that of Guilin release, and is described in the following link:
+https://wiki.onap.org/display/DW/Transport+Slicing+Configuration+and+Operation+Guidance
+
+Then an E-Tree creation, modification and deletion can be triggered from SO APIs.
+
+
+
+Update for Honolulu Release
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The Honolulu release continued to support and extend the Transport Slicing functionality
+developed in Guilin release. Two main features were aded in Honolulu release (REQ-456):
+
+1. The support for reuse and modification of an existing TN NSSI has been developed.
+2. In addition, the Honolulu release also continuted to support and extend the CCVPN
+ use-case and in particular, the support for inter-domain connections of three or
+ more network domains has been introduced in Honolulu release. (CCVPN in previous
+ releases were only be able to connect two domains).
+
+Honolulu Scope and Impacted modules
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+For feature #1 mentioned above, the user should be able to "modify" a Transport Slice.
+The modification operation is able to support the following three scenarios:
+
+a. A user may "Add" one or more new service(s)/connections link(s) to a given slice
+ (TN NSSI) that is already created.
+b. A user may need to change or modify the maximum bandwidth attribute (i.e. the SLA
+ agreement) using which a given slice is created.
+c. Both of the above operations.
+
+For feature #2 mentioned above, now in H release, we can have and support an artibrary
+number of domains inter-connected to each other and we can support a cross-layer
+cross-domain VPN connectivity and transport slicing for these kinds of scenarios as well.
+
+Impacted ONAP modules include: SO, SDN-C, CCSDK, A&AI.
+
+In CCSDk, a path computation engine (PCE) mechanism is introduced to support a
+graph-based path computation in a multi-domain network topologies. This PCE system is
+implemented as a SLI plugin to be called and used by Directed Graphs (DGs).
+
+For A&AI, additional attributes were introduced to the connectivity node and vpn-binding node.
+
+In SDN-C, additional Directed Graphs (DGs) were implemented to support the above-mentioned
+two features.
+
+Installation Procedure
+~~~~~~~~~~~~~~~~~~~~~~
+
+For Honolulu new features, the integration test environment is similar to that of the Guilin
+release: an ONAP instance with Honolulu release interfacing to 3rd party transport domain
+controllers should be established.
+
+For Transport Slicing, the installation procedure is similar to that of the E2E
+Network Slicing use case. In other words, we need to bring up the required modules
+including SDC, SO, A&AI, UUI and OOF. We also need to configure these modules along
+with the mandatory common modules such as DMaaP.
+
+Functional/Integration Test Cases
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The testing procedure is described in the following few test cases:
+
+- service/template design: Successful design of TN NSST and Slice Profile
+- modify max-bandwidth of existing TN NSSI: Modify the maximum bandwidth of an existing TN NSSI
+- modify connection links existing TN NSSI: Add new connection links to existing TN NSSI
+- modify both max-bandwidth and connection links of TN NSSI: Modify both the maximum bandwidth and add new connection links to an existing TN NSSI
+- three-domain network: Test create TN NSSI (or other NSI life cycle operations) on a three-domain network (i.e., need 3 ACTN PNC simulators)
+
+
+
+Update for Guilin Release
+~~~~~~~~~~~~~~~~~~~~~~~~~
+
+In Guilin Release, **MDONS** Extension feature is introduced.
+
+In addition to the MDONS extension, CCVPN has also developed an
+IETF/ACTN-based Transport Slicing solution (REQ-347). This development
+enabled ONAP to offer the TN NSSMF functionality, which was used by
+the E2E Network Slicing use case (REQ-342).  The solution was built
+upon the existing IETF/ACTN E-LINE over OTN NNI feature developed in Frankfurt release.
+
+Guilin Scope and Impacted modules
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+MDONS Extension implementation for the Frankfurt release will incorporate the following:
+
+- Support Asynchronous OpenRoadM OTN service activation notification handling
+- Add OOF support for inter domain link/path selection
+- Support Closed Loop sub-use case
+
+Impacted ONAP modules include: OOF, SDN-C, SO and Holmes.
+
+`Wiki link reference <https://wiki.onap.org/display/DW/MDONS+Extension+in+R7>`_
+
+Transport Slicing in Guilin release has implemented the following TN NSSMF functionality:
+
+- Allocate TN NSSI
+- Deallocate TN NSSI
+- Activate TN NSSI
+- Deactivate TN NSSI
+
+The Tranport Slicing implementation has made code changes in the following modules:
+
+- AAI (Schema changes only)
+- UUI
+- SO
+- OOF
+- SDN-C
+- CCSDK
+- Modelling
+
+Functional/Integration Test Cases
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+For integration test case and description of MDONS extension, refer to this
+`following wiki-page <https://wiki.onap.org/display/DW/Integration+Test+Cases+-+MDONS+Extension>`_.
+
+For integration test case and description of Transport Slicing:
+
+- `Guilin Test plan <https://wiki.onap.org/display/DW/CCVPN+-+Transport+Slicing+integration+test+plan+for+Guilin+release>`_
+- `Guilin E2E Network Slicing <https://wiki.onap.org/display/DW/E2E+Network+Slicing+Use+Case+in+R7+Guilin>`_
+
+Installation Procedure
+~~~~~~~~~~~~~~~~~~~~~~
+
+For MDONS extension, the integration test environment is established to have ONAP instance with Guilin
+release interfacing to 3rd party transport domain controllers. One controller
+instance manages OpenROADM OTN topology and the other 2 instances manage TAPI
+OTN topology. L0 infrastructure and WDM services are pre-provisioned to support
+L1 topology discovery and OTN service orchestration from ONAP.
+
+For Transport Slicing, the installation procedure is similar to that of the E2E
+Network Slicing use case. In other words, we need to bring up the required modules
+including SDC, SO, A&AI, UUI and OOF. We also need to configure these modules along
+with the mandatory common modules such as DMaaP.
+
+Testing Procedures
+~~~~~~~~~~~~~~~~~~
+
+The testing procedure is described in:
+
+- `Testing procedure for MDONS extension <https://wiki.onap.org/display/DW/Integration+Test+Cases+-+MDONS+Extension>`_
+- `Testing procedure for Transport Slicing <https://wiki.onap.org/display/DW/CCVPN+-+Transport+Slicing+integration+test+plan+for+Guilin+release>`_
+
+Update for Frankfurt release
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+In Frankfurt, we introduced two extensions in CCVPN use case. One is E-LINE service over OTN NNI handover, another is the
+multi domain optical service which aims to provide end to end layer 1 service.
+
+E-LINE over OTN NNI
+~~~~~~~~~~~~~~~~~~~
+
+Description
+~~~~~~~~~~~
+
+It is considered a typical scenario for operators to use OTN to interconnect its multiple transport network domains. Hence
+the capabilities of orchestrating end-to-end E-LINE services across the domains over OTN is important for ONAP. When operating
+with multiple domains with multi vendor solutions, it is also important to define and use standard and open
+interfaces, such as the IETF ACTN-based transport `YANG models <https://tools.ietf.org/html/rfc8345>`_, as the southbound interface
+of ONAP, in order to ensure interoperability. The SOTN NNI use-case aims to automate the design, service provision by independent
+operational entities within a service provider network by delivering E-Line over OTN orchestration capabilities into ONAP. SOTN NNI
+extends upon the CCVPN use-case by incorporating support for L1/L2 network management capabilities leveraging open standards & common
+data models.
+
+Frankfurt Scope and Impacted modules
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The Frankfurt demonstration includes L1(OTN) and L2(ETH) Topology discovery from multiple domains controllers with in an operator
+and provide VPN service provision in OTN and ETH network.
+
+The ONAP components involved in this use case are: SDC, A&AI, UUI, SO, SDNC, OOF, MSB.
+
+Functional Test Cases
+~~~~~~~~~~~~~~~~~~~~~
+
+Usecase specific developments have been realized in SO, OOF, AAI, SDNC and UUI ONAP components..
+
+Testing Procedure
+~~~~~~~~~~~~~~~~~
+Design time
+SOTNVPNInfraService service design in SDC and distribute to AAI and SO.
+
+Run Time:
+All operation will be triggered by UUI, including service creation and termination,
+link management and topology network display:
+
+- `E-LINE over OTN Inter Domain Test Cases <https://wiki.onap.org/display/DW/E-LINE+over+OTN+Inter+Domain+Test+Cases>`_
+- `Testing status <https://wiki.onap.org/display/DW/2%3A+Frankfurt+Release+Integration+Testing+Status>`_
+
+MDONS (Multi-Domain Optical Network Services)
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Overall Description
+~~~~~~~~~~~~~~~~~~~
+
+The MDONS use-case aims to automate the design, activation & operations resulting
+from an optical transport (L0/L1) service request exchange between service providers and/or independent operational entities within a service provider network by delivering E2E optical orchestration capabilities into ONAP. MDONS extends upon the CCVPN use-case by incorporating support for L0/L1 network management capabilities leveraging open standards & common data models defined by OpenROADM, Transport-API & MEF.
+
+Frankfurt Scope and Impacted modules
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+MDONS implementation for the Frankfurt release will incorporate the following:
+- Design & modelling of optical services based on MEF L1 subscriber & operator properties
+- E2E optical service workflow definitions for service instantiation & deletion
+- UI portal with L1 service instantiation templates
+- Optical Transport domain management (topology, resource onboarding) through standard models / APIs - OpenROADM, T-API
+Impacted ONAP modules include: A&AI, SDC, SDN-C, SO, UUI
+
+References:
+
+- `OpenROADM reference <https://github.com/OpenROADM/OpenROADM_MSA_Public>`_
+- `ONF Transport-API (TAPI) <https://github.com/OpenNetworkingFoundation/TAPI>`_
+- `MEF <https://wiki.mef.net/display/CESG/MEF+63+-+Subscriber+Layer+1+Service+Attributes>`_
+
+Functional/Integration Test Cases
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+For integration test case and description, refer to this following
+`wiki-page <https://wiki.onap.org/display/DW/MDONS+Integration+Test+Case>`_.
+
+Installation Procedure
+~~~~~~~~~~~~~~~~~~~~~~
+
+The integration test environment is established to have ONAP instance with
+Frankfurt release interfacing to 3rd party transport domain controllers.
+One controller instance manages OpenROADM OTN topology and the other 2 instances
+manage TAPI OTN topology. L0 infrastructure and WDM services are pre-provisioned
+to support L1 topology discovery and OTN service orchestration from ONAP.
+
+Testing Procedure
+~~~~~~~~~~~~~~~~~
+
+Test environment is described in
+`Installation and Test Procedure <https://wiki.onap.org/display/DW/MDONS+Integration+Test+Case>`_.
Update for Dublin release
~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -12,11 +415,14 @@ Update for Dublin release
1. Service model optimization
In Dublin release,the design of CCVPN was optimized by having support of List type of Input in SDC.
-During onboarding and design phase, one end to end service is created using SDC. This service is
-composed of these two kinds of resources:
-• VPN resource
-• Site resource
-You can see the details from here https://wiki.onap.org/display/DW/Details+of+Targeted+Service+Template
+During onboarding and design phase, one end to end service is created using SDC.
+This service is composed of these two kinds of resources:
+
+- VPN resource
+- Site resource
+
+See the `Details of Targeted Service Template wiki page <https://wiki.onap.org/display/DW/Details+of+Targeted+Service+Template>`_
+for details.
2. Closed Loop in bandwidth adjustment
Simulate alarm at the edge site branch and ONAP will execute close-loop automatically and trigger bandwidth to change higher.
@@ -24,53 +430,70 @@ Simulate alarm at the edge site branch and ONAP will execute close-loop automati
3. Site Change
Site can be add or delete according to the requirements
+More information about:
-More information about CCVPN in Dublin release:https://wiki.onap.org/pages/viewpage.action?pageId=45296665
-and the test case in Dublin can be found:https://wiki.onap.org/display/DW/CCVPN+Test+Cases+for+Dublin+Release
-And test status:https://wiki.onap.org/display/DW/CCVPN+Test+Status
+- `CCVPN in Dublin release <https://wiki.onap.org/pages/viewpage.action?pageId=45296665>`_
+- `Dublin test cases <https://wiki.onap.org/display/DW/CCVPN+Test+Cases+for+Dublin+Release>`_
+- `CCVPN Test Status wiki page <https://wiki.onap.org/display/DW/CCVPN+Test+Status>`_
-Note: CCVPN integration testing coversed service design, service creation and closed-loop bandwidth adjustments in Dublin release.
-The service termination and service change will continue to be tested in E release.
-During the integration testing, SDC, SO, SDC master branch are used which include the enhanced features for CCVPN use case.
+.. note::
+ CCVPN integration testing coversed service design, service creation and
+ closed-loop bandwidth adjustments in Dublin release.
+ The service termination and service change will continue to be tested in E release.
+ During the integration testing, SDC, SO, SDC master branch are used which
+ includes the enhanced features for CCVPN use case.
-Sevice used for CCVPN
-~~~~~~~~~~~~~~~~~~~~~
+Service used for CCVPN
+~~~~~~~~~~~~~~~~~~~~~~
-- SOTNVPNInfraService, SDWANVPNInfraService and SIteService: https://wiki.onap.org/display/DW/CCVPN+Service+Design
-- WanConnectionService ( Another way to describe CCVPN in a single service form which based on ONF CIM ): https://wiki.onap.org/display/DW/CCVPN+Wan+Connection+Service+Design
+- `SOTNVPNInfraService, SDWANVPNInfraService and SIteService <https://wiki.onap.org/display/DW/CCVPN+Service+Design>`_
+- `WanConnectionService (Another way to describe CCVPN in a single service form which based on ONF CIM <https://wiki.onap.org/display/DW/CCVPN+Wan+Connection+Service+Design>`_
Description
~~~~~~~~~~~
-Cross-domain, cross-layer VPN (CCVPN) is one of the use cases of the ONAP Casablanca release. This release demonstrates cross-operator ONAP orchestration and interoperability with third party SDN controllers and enables cross-domain, cross-layer and cross-operator service creation and assurance.
-The demonstration includes two ONAP instances, one deployed by Vodafone and one by China Mobile, both of which orchestrate the respective operator underlay OTN networks and overlay SD-WAN networks and peer to each other for cross-operator VPN service delivery.
+Cross-domain, cross-layer VPN (CCVPN) is one of the use cases of the ONAP
+Casablanca release. This release demonstrates cross-operator ONAP orchestration
+and interoperability with third party SDN controllers and enables cross-domain,
+cross-layer and cross-operator service creation and assurance.
-The CCVPN Use Case Wiki Page can be found here: https://wiki.onap.org/display/DW/CCVPN%28Cross+Domain+and+Cross+Layer+VPN%29+USE+CASE.
+The demonstration includes two ONAP instances, one deployed by Vodafone and one
+by China Mobile, both of which orchestrate the respective operator underlay OTN
+networks and overlay SD-WAN networks and peer to each other for cross-operator
+VPN service delivery.
+
+`CCVPN Use Case Wiki Page <https://wiki.onap.org/display/DW/CCVPN%28Cross+Domain+and+Cross+Layer+VPN%29+USE+CASE>`_
The projects covered by this use case include: SDC, A&AI, UUI, SO, SDNC, OOF, Policy, DCAE(Holmes), External API, MSB
How to Use
~~~~~~~~~~
-Design time
-SOTNVPNInfraService, SDWANVPNInfraService and SIteService service Design steps can be found here: https://wiki.onap.org/display/DW/CCVPN+Service+Design
-WanConnectionService ( Another way to describe CCVPN in a single service form which based on ONF CIM ): https://wiki.onap.org/display/DW/CCVPN+Wan+Connection+Service+Design
+
+Design time:
+
+- `SOTNVPNInfraService, SDWANVPNInfraService and SIteService service Design steps <https://wiki.onap.org/display/DW/CCVPN+Service+Design>`_
+- `WanConnectionService ( Another way to describe CCVPN in a single service form which based on ONF CIM ) <https://wiki.onap.org/display/DW/CCVPN+Wan+Connection+Service+Design>`_
Run Time:
-All opertion will be triggerd by UUI, inlcuding service creation and termination, link management and topology network display.
+- All operations will be triggered by UUI, including service creation and termination,
+ link management and topology network display.
-More details can be fonud here: https://wiki.onap.org/display/DW/CCVPN+Test+Guide
+
+See the `CCVPN Test Guide wiki page <https://wiki.onap.org/display/DW/CCVPN+Test+Guide>`_
+for details.
Test Status and Plans
~~~~~~~~~~~~~~~~~~~~~
-All test case covered by this use case: https://wiki.onap.org/display/DW/CCVPN+Integration+Test+Case
-And the test status can be found: https://wiki.onap.org/display/DW/CCVPN++-Test+Status
+- `All test case covered by this use case <https://wiki.onap.org/display/DW/CCVPN+Integration+Test+Case>`_
+- `Test status <https://wiki.onap.org/display/DW/CCVPN++-Test+Status>`_
Known Issues and Resolutions
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-1) AAI-1923. Link Management, UUI can't delete the link to external onap otn domain.
+
+1) AAI-1923. Link Management, UUI can't delete the link to external onap otn domain.
For the manual steps provided by A&AI team, we should follow the steps as follow
the only way to delete is using the forceDeleteTool shell script in the graphadmin container.
@@ -78,19 +501,19 @@ First we will need to find the vertex id, you should be able to get the id by ma
GET /aai/v14/network/ext-aai-networks/ext-aai-network/createAndDelete/esr-system-info/test-esr-system-info-id-val-0?format=raw
-::
+.. code-block:: JSON
+
+ {
-{
-"results": [
-{
-"id": "20624",
-"node-type": "pserver",
-"url": "/aai/v13/cloud-infrastructure/pservers/pserver/pserverid14503-as988q",
-"properties": {
-}
-}
-]
-}
+ "results": [
+ {
+ "id": "20624",
+ "node-type": "pserver",
+ "url": "/aai/v13/cloud-infrastructure/pservers/pserver/pserverid14503-as988q",
+ "properties": {}
+ }
+ ]
+ }
Same goes for the ext-aai-network:
@@ -103,59 +526,52 @@ Run the following command multiple times for both the esr-system-info and ext-aa
::
-kubectl exec -it $(kubectl get pods -lapp=aai-graphadmin -n onap --template 'range .items.metadata.name"\n"end' | head -1) -n onap gosu aaiadmin /opt/app/aai-graphadmin/scripts/forceDeleteTool.sh -action DELETE_NODE -userId YOUR_ID_ANY_VALUE -vertexId VERTEX_ID
+ kubectl exec -it $(kubectl get pods -lapp=aai-graphadmin -n onap --template 'range .items.metadata.name"\n"end' | head -1) -n onap gosu aaiadmin /opt/app/aai-graphadmin/scripts/forceDeleteTool.sh -action DELETE_NODE -userId YOUR_ID_ANY_VALUE -vertexId VERTEX_ID
From the above, remove the YOUR_ID_ANY_VALUE and VERTEX_ID with your info.
2) SDC-1955. Site service Distribution
To overcome the Service distribution, the SO catalog has to be populated with the model information of the services and resources.
-a) Refering to the Csar that is generated in the SDC designed as per the detailes mentioned in the below link: https://wiki.onap.org/display/DW/CCVPN+Service+Design
+a) Refering to the Csar that is generated in the SDC designed as per the details mentioned in the below link: https://wiki.onap.org/display/DW/CCVPN+Service+Design
b) Download the Csar from SDC thus generated.
c) copy the csar to SO sdc controller pod and bpmn pod
+
+.. code-block:: bash
+
kubectl -n onap get pod|grep so
kubectl -n onap exec -it dev-so-so-sdc-controller-c949f5fbd-qhfbl /bin/sh
-
mkdir null/ASDC
mkdir null/ASDC/1
kubectl -n onap cp service-Sdwanvpninfraservice-csar.csar dev-so-so-bpmn-infra-58796498cf-6pzmz:null/ASDC/1/service-Sdwanvpninfraservice-csar.csar
kubectl -n onap cp service-Sdwanvpninfraservice-csar.csar dev-so-so-bpmn-infra-58796498cf-6pzmz:ASDC/1/service-Sdwanvpninfraservice-csar.csar
-d) populate model information to SO db
- the db script example can be seen in https://wiki.onap.org/display/DW/Manual+steps+for+CCVPN+Integration+Testing
+d) populate model information to SO db: the db script example can be seen in
+ https://wiki.onap.org/display/DW/Manual+steps+for+CCVPN+Integration+Testing
The same would also be applicable for the integration of the client to create the service and get the details.
Currently the testing has been performed using the postman calls to the corresponding APIs.
-3) SDC-1955 & SDC-1958. Site serivce parsing Error
+3) SDC-1955 & SDC-1958. Site service parsing Error
-UUI: stored the csar which created based on beijing release under a fixed directory, If site serive can't parsed by SDC tosca parser, UUI will parse this default csar and get the input parameter
+UUI: stored the csar which created based on beijing release under a fixed directory, If site servive can't parsed by SDC tosca parser, UUI will parse this default csar and get the input parameter
a) Make an available csar file for CCVPN use case.
b) Replace uuid of available files with what existing in SDC.
c) Put available csar files in UUI local path (/home/uui).
-4) SO docker branch 1.3.5 has fixes for the issues 1SO-1248.
+4) SO docker branch 1.3.5 has fixes for the issues 1SO-1248
After SDC distribution success, copy all csar files from so-sdc-controller:
- connect to so-sdc-controller( eg: kubectl.exe exec -it -n onap dev-so-so-sdc-controller-77df99bbc9-stqdz /bin/sh )
- find out all csar files ( eg: find / -name '*.csar' )
- the csar files should be in this path: /app/null/ASDC/ ( eg: /app/null/ASDC/1/service-Sotnvpninfraservice-csar.csar )
- exit from the so-sdc-controller ( eg: exit )
- copy all csar files to local derectory ( eg: kubectl.exe cp onap/dev-so-so-sdc-controller-6dfdbff76c-64nf9:/app/null/ASDC/tmp/service-DemoService-csar.csar service-DemoService-csar.csar -c so-sdc-controller )
-
-Copy csar files, which got from so-sdc-controller, to so-bpmn-infra
- connect to so-bpmn-infra ( eg: kubectl.exe -n onap exec -it dev-so-so-bpmn-infra-54db5cd955-h7f5s -c so-bpmn-infra /bin/sh )
- check the /app/ASDC deretory, if doesn't exist, create it ( eg: mkdir /app/ASDC -p )
- exit from the so-bpmn-infra ( eg: exit )
- copy all csar files to so-bpmn-infra ( eg: kubectl.exe cp service-Siteservice-csar.csar onap/dev-so-so-bpmn-infra-54db5cd955-h7f5s:/app/ASDC/1/service-Siteservice-csar.csar )
-
-5) Manual steps in closed loop Scenario:
-Following steps were undertaken for the closed loop testing.
-a. Give controller ip, username and password, trust store and key store file in restconf collector collector.properties
-b. Updated DMAAP ip in cambria.hosts in DmaapConfig.json in restconf collector and run restconf collector
-c. Followed the steps provided in this link(https://wiki.onap.org/display/DW/Holmes+User+Guide+-+Casablanca#HolmesUserGuide-Casablanca-Configurations) to push CCVPN rules to holmes
-d. Followed the steps provided in this link(https://wiki.onap.org/display/DW/ONAP+Policy+Framework%3A+Installation+of+Amsterdam+Controller+and+vCPE+Policy) as reference to push CCVPN policies to policy module and updated sdnc.url, username and password in environment(/opt/app/policy/config/controlloop.properties.environment)
-As per wiki (Policy on OOM), push-policied.sh script is used to install policies. but I observed that CCVPN policy is not added in this script. So merged CCVPN policy using POLICY-1356 JIRA ticket. but policy is pushed by using push-policy_casablanca.sh script during integration test.
-It is found that the changes made were overwritten and hence had to patch the DG manually. This will be tracked by the JIRA SDNC-540.
-
-all above manual steps can be found https://wiki.onap.org/display/DW/Manual+steps+for+CCVPN+Integration+Testing \ No newline at end of file
+
+- connect to so-sdc-controller ( eg: kubectl.exe exec -it -n onap dev-so-so-sdc-controller-77df99bbc9-stqdz /bin/sh )
+- find out all csar files ( eg: find / -name "\*.csar" ), the csar files should
+ be in this path: /app/null/ASDC/ ( eg: /app/null/ASDC/1/service-Sotnvpninfraservice-csar.csar )
+- exit from the so-sdc-controller ( eg: exit )
+- copy all csar files to local derectory ( eg: kubectl.exe cp onap/dev-so-so-sdc-controller-6dfdbff76c-64nf9:/app/null/ASDC/tmp/service-DemoService-csar.csar service-DemoService-csar.csar -c so-sdc-controller )
+
+Copy csar files, which got from so-sdc-controller, to so-bpmn-infra:
+
+- connect to so-bpmn-infra ( eg: kubectl.exe -n onap exec -it dev-so-so-bpmn-infra-54db5cd955-h7f5s -c so-bpmn-infra /bin/sh )
+- check the /app/ASDC directory, if doesn't exist, create it ( eg: mkdir /app/ASDC -p )
+- exit from the so-bpmn-infra ( eg: exit )
+- copy all csar files to so-bpmn-infra ( eg: kubectl.exe cp service-Siteservice-csar.csar onap/dev-so-so-bpmn-infra-54db5cd955-h7f5s:/app/ASDC/1/service-Siteservice-csar.csar )
diff --git a/docs/docs_CM_flexible_designer_orchestrator.rst b/docs/docs_CM_flexible_designer_orchestrator.rst
index 3a9dd7bfe..c919ec6f8 100644
--- a/docs/docs_CM_flexible_designer_orchestrator.rst
+++ b/docs/docs_CM_flexible_designer_orchestrator.rst
@@ -3,8 +3,10 @@
.. _docs_CM_flexible_designer_orchestrator:
+:orphan:
+
Dublin Workflow Designer Release Notes
--------------------------------------------------------------
+--------------------------------------
The Workflow Editor was developed in the Beijing release by Amdocs and
is available in SDC for users to create workflows.
@@ -287,4 +289,4 @@ part of the Dublin release. The others were not part of the release but
are available to test with your vNF. Please refer to the Scale out
release notes for further information.
-https://onap.readthedocs.io/en/latest/submodules/integration.git/docs/docs_scaleout.html#docs-scaleout
+https://docs.onap.org/projects/onap-integration/en/frankfurt/docs_scaleout.html
diff --git a/docs/docs_CM_schedule_optimizer.rst b/docs/docs_CM_schedule_optimizer.rst
index 9da2e5337..2ff8cfca1 100644
--- a/docs/docs_CM_schedule_optimizer.rst
+++ b/docs/docs_CM_schedule_optimizer.rst
@@ -1,15 +1,22 @@
.. This work is licensed under a Creative Commons Attribution 4.0
International License. http://creativecommons.org/licenses/by/4.0
-
-.. _docs_CM_schedule_optimizer:
-Change Management Schedule Optimization
--------------------------------------------------------------
+.. _docs_CM_schedule_optimizer:
-Description
+:orphan:
+
+Change Management Schedule Optimization
+---------------------------------------
+
+Description
~~~~~~~~~~~~~~
-The change management schedule optimizer automatically identifies a conflict-free schedule for executing changes across multiple network function instances. It takes into account constraints such as concurrency limits (how many instances can be executed simultaneously), time preferences (e.g., night time maintenance windows with low traffic volumes) and applies optimization techniques to generate schedules.
+The change management schedule optimizer automatically identifies a conflict-free
+schedule for executing changes across multiple network function instances.
+It takes into account constraints such as concurrency limits (how many instances
+can be executed simultaneously), time preferences (e.g., night time maintenance
+windows with low traffic volumes) and applies optimization techniques to
+generate schedules.
-More details can be found here:
-https://onap.readthedocs.io/en/latest/submodules/optf/cmso.git/docs/index.html \ No newline at end of file
+More details can be found here:
+https://docs.onap.org/projects/onap-optf-cmso/en/latest/index.html#master-index
diff --git a/docs/docs_E2E_network_slicing.rst b/docs/docs_E2E_network_slicing.rst
new file mode 100644
index 000000000..3686b2d0c
--- /dev/null
+++ b/docs/docs_E2E_network_slicing.rst
@@ -0,0 +1,638 @@
+.. This file is licensed under the CREATIVE COMMONS ATTRIBUTION 4.0 INTERNATIONAL LICENSE
+.. Full license text at https://creativecommons.org/licenses/by/4.0/legalcode
+
+:orphan:
+
+.. contents::
+ :depth: 3
+..
+.. _docs_E2E_network_slicing:
+
+
+E2E Network Slicing Use Case
+============================
+
+Overall Blueprint
+-----------------
+The objective of this use case is to realize **End-to-End 5G Network
+Slicing** using ONAP. An End-to-End Network Slice consists of RAN (Radio
+Access Network), Transport Network (TN) and Core Network (CN) slice
+sub-nets. This use case intends to demonstrate the modeling,
+orchestration (life cycle and resources) and assurance of a network
+slice which are implemented in alignment with relevant standards. The
+key highlights of this use case include:
+
+- Modular architecture providing building blocks and flexibility under
+ various deployment scenarios
+
+- Functionality aligned with 3GPP and other relevant standards such as
+ ETSI and IETF
+
+- Interfaces and APIs aligned with relevant standards (3GPP, IETF,
+ ETSI, TM Forum, etc.) while enabling easy customization through use
+ of appropriate plug-ins. This would enable easier interoperability of
+ slice management functions realized within ONAP with 3\ :sup:`rd`
+ party slice management functions, as well as northbound and
+ southbound systems.
+
+- Taking a step-by-step approach to realizing different architectural
+ options in an extendable manner.
+
+- Providing flexibility in network slice selection by providing an
+ option of manual intervention, as well as abstracting the network
+ internals as needed.
+
+- The use case implementation team is composed of service providers,
+ software and hardware vendors, solution providers and system
+ integrators thereby taking into consideration different perspectives
+ and requirements.
+
+This use case is a multi-release effort in ONAP with the first steps
+taken in Frankfurt release. It will continue to expand in scope both in
+breadth and depth, and along the journey it shall also align with
+updates to the relevant standards which are also currently evolving.
+This use case shall also collaborate with SDOs such as
+O-RAN and ETSI to enable wider adoption and use.
+
+Architecture Choice
+-------------------
+3GPP(TS 28.801) defines three layer slice management functions which include:
+
+CSMF(Communication Service Management Function):
+
+• Responsible for translating the communication service related requirement to network slice related requirements.
+
+• Communicate with Network Slice Management Function (NSMF).
+
+NSMF(Network Slice Management Function):
+
+• Responsible for management and orchestration of NSI.
+• Derive network slice subnet related requirements from network slice related requirements.
+• Communicate with the Network Slice Subnet Management Function (NSSMF) and Communication Service Management Function.
+
+NSSMF(Network Slice Subnet Management Function):
+
+• Responsible for management and orchestration of NSSI.
+• Communicate with the NSMF.
+
+To realize the three layers of the slice management function, we need to decide whether to implement CSMF, NSMF or NSMF within ONAP, or use the external CSMF, NSMF or NSSMF. This implies that for ONAP-based network slice management, we have different choices from an architectural perspective:
+
+1) Implement CSMF, NSMF, NSSMF all within ONAP;
+
+2) Connect an external CSMF from the Northbound, Implement NSMF and NSSMF within ONAP;
+
+3) Connect an external CSMF from the Northbound, Implement NSMF within ONAP, Connect a 3rd party NSSMF from the Southbound;
+
+4) Implement CSMF, NSMF within ONAP, Connect a 3rd party NSSMF from then Southbound.
+
+5) Use external CSMF and NSMF, only implement NSSMF within ONAP.
+
+External Interfaces
+-------------------
+The guiding principle is when a Slice Management function is outside ONAP, standard interfaces/APIs (3GPP, IETF, ETSI, TM Forum, etc.) can be supported by default, while any customization of such interfaces shall also be supported by ONAP using suitable plug-ins/adaptors. This would enable easier interoperability of slice management functions realized within ONAP with 3rd party slice management functions, as well as northbound and southbound systems.
+
+Another key point would be that both internal and external interface mechanisms should be supported by the corresponding ONAP modules. To be more specific, communication between Slice Management Functions within ONAP (e.g., CSMF and NSMF) shall use ONAP internal mechanisms such as workflow calls, DMaaPmessages, etc. or standard APIs as appropriate. For example, SO acting as NSMF should support API call directly from CSMF in ONAP, as well as API trigger from an external CSMF via EXT-API.
+
+Network Slice Instance (NSI) Life Cycle View
+--------------------------------------------
+3GPP Specification (3GPP TS 28.530) describes management aspects of a Network Slice Instance, which can be described by the four phases:
+
+- Preparation: The preparation phase includes network slice design, network slice capacity planning, on-boarding and evaluation of the network functions, preparing the network environment and other necessary preparations required to be done before the creation of an NSI.
+- Commissioning: NSI provisioning in the commissioning phase includes creation of the NSI. During NSI creation all needed resources are allocated and configured to satisfy the network slice requirements. The creation of an NSI can include creation and/or modification of the NSI constituents.
+- Operation: The Operation phase includes the activation, supervision, performance reporting (e.g. for KPI monitoring), resource capacity planning, modification and de-activation of an NSI.
+- Decommissioning: Network slice instance provisioning in the decommissioning phase includes decommissioning of non-shared constituents if required and removing the NSI specific configuration from the shared constituents. After the decommissioning phase, the NSI is terminated and does not exist anymore.
+
+The ONAP-based NSI lifecycle management will finally provide the demonstration of all these phases.
+
+Abbreviations
+-------------
+
++---------------+--------------------------------------------+
+| Abbreviation | Meaning |
++===============+============================================+
+| CSMF | Communication Service Management Function |
++---------------+--------------------------------------------+
+| CSI | Communication Service Instance |
++---------------+--------------------------------------------+
+| CST | Communication Service Template |
++---------------+--------------------------------------------+
+| NSI | Network Slice Instance |
++---------------+--------------------------------------------+
+| NSMF | Network Slice Management Function |
++---------------+--------------------------------------------+
+| NSSI | Network Slice Sub-net Instance |
++---------------+--------------------------------------------+
+| NSSMF | Network Slice Sub-net Management Function |
++---------------+--------------------------------------------+
+| NST | Network Slice Template |
++---------------+--------------------------------------------+
+| NSST | Network Slice Sub-net Template |
++---------------+--------------------------------------------+
+
+
+Recap of Frankfurt functionality
+--------------------------------
+In Frankfurt release, CSMF and NSMF within ONAP was implemented, while connecting to an external Core NSSMF.
+From the NSI Life Cycle perspective, the scope for Frankfurt included NSI design and pre-provision, NSI instantiation
+and configuration, and NSI activation and deactivation. In particular:
+
+- CSMF: Functions of slice service creation, slice service activation and deactivation were implemented.
+
+- NSMF: Functions of NSI instantiation, NSI activation and deactivation were implemented. In addition, manual
+ intervention is also provided in NSMF slice task management portal to ensure the selected NSI/NSSI as well as
+ Service Profile and Slice Profile are OK or need adjustment.
+
+- Design of CST, NST and onboarding NSST that are required to support the run-time orchestration functions
+
+- To connect to the external (core) NSSMF, an adaptor was implemented to provide interface between ONAP and 3rd party
+ core NSSMF.
+
+To support the above functions, code impacts in U-UI, SO, OOF and ExtAPI components, and schema change in A&AI
+were implemented. See the `Proposed Functions for R6 and Impacted Modules wiki page <https://wiki.onap.org/display/DW/Proposed+Functions+for+R6+and+Impacted+Modules>`_ for details.
+
+As part of Frankfurt release work, we supported the minimum-scope installation of ONAP to reduce the resource requirements.
+From the module perspective, 5G E2E Slicing use case involves SDC, SO, A&AI, UUI, EXT-API, OOF and Policy modules of ONAP.
+So we will configure these required modules along with the mandatory common modules such as DMaaP. Further, for each module,
+the use case also does not use all of the charts,so we removed the not needed Charts under those modules to optimize the
+resources required for setting up the use case. This approach will help to install a minimum-scope version ONAP for the
+E2E Slicing use case.
+
+Further details of the installation steps are available at: `Install Minimum Scope ONAP for 5G Network Slicing wiki page
+<https://wiki.onap.org/display/DW/Install+Minimum+Scope+ONAP+for+5G+Network+Slicing>`_
+
+Recap of Guilin functionality
+-----------------------------
+From the architecture point of view, in Guilin release, besides the continuation of NSMF which was implemented in
+Frankfurt release, the RAN NSSMF, TN NSSMF, CORE NSSMF have been implemented within ONAP, apart from interacting with
+external RAN NSSMF and external CORE NSSMF.
+
+The following provides an overview of the enhancements done in Guilin release:
+
+- **Enhancements in NSMF**: Service Profile decomposition into Slice Profiles for 3 domains, NSI selection enhancement,
+ E2E slice instance creation including RAN, TN and CN slice sub-net instance creation/reuse, activation/deactivation
+ of E2E slice, and deciding whether to terminate E2E slice or not.
+
+- **RAN NSSMF, TN NSSMF, CN NSSMF within ONAP**: Basic logic for all 3 NSSMFs to support NSSI allocation, activation,
+ deactivation, deletion and modification (in case of reuse of NSSI).
+
+- **Enable NSMF interaction with RAN NSSMF, TN NSSMF, CN NSSMF**: Implement generic NSSMF adaptor for three domain NSSMFs,
+ alignment with standard interfaces (3GPP, IETF), enable the connection to external RAN NSSMF.
+
+- **Design of RAN NSST, TN NSST, CN NSST and Slice Profiles, TN information models**: Basic E2E Slicing model was provided
+ all the related templates designed from SDC, TN related information models.
+
+- **TMF 641 support**: Extension of the TMF 641 based interface from NB of ExtAPI to support service activation,
+ deactivation and termination.
+
+- **RAN and CN NFs set up and initial configurations**: CN NF simulators was developed: AMF, SMF, UPF and configure the
+ S-NSSAI on CN NFs; RAN NF Simulator was enhanced for PM data reporting, CU and Near-RT RIC configuration.
+
+- **KPI monitoring**: Implementation to request details of a KPI via UUI to ONAP DCAE. Providing the requested data to UUI
+ by DCAE using a new microservice (Data Exposure Service - DES). Enhancements in PM-Mapper to do KPI computation is
+ in progress, and will be completed in Honolulu release.
+
+- **Closed Loop**: First steps to realizing a simple Closed Loop in the RAN using PM data collected from the RAN was
+ implemented - update the allowed throughput for a S-NSSAI per Near-RT RIC coverage area based on DL/UL PRB for data
+ traffic that was reported from the RAN. The analysis of the PM data was done using a new Slice Analysis MS in DCAE,
+ and the Policy-based Control Loop framework was applied to trigger the updates in the RAN.
+
+- **Intelligent Slicing**: First steps to realizing a simple ML-based Closed Loop action in the RAN using PM data collected
+ from the RAN was implemented - update the maxNumberofConns for a S-NSSAI in each cell based on PDU session related
+ PM data that was reported from the RAN (PDU sessions requested, successfully setup and failed to be set up). The
+ training was done offline, and the ML model is onboarded as a micro-service to ONAP for demo purpose alone (it is
+ not part of ONAP code/repos). The ML model provides updates to the Slice Analysis MS, which then uses the
+ Policy-based Control Loop framework to trigger the updates in the RAN.
+
+- **Modeling enhancements**: Necessary modeling enhancements to support all the above functionalities.
+
+The base use case page for Guilin release is `E2E Network Slicing Use Case in R7 Guilin <https://wiki.onap.org/display/DW/E2E+Network+Slicing+Use+Case+in+R7+Guilin>`_.
+
+The child wiki pages of the above page contains details of the assumptions, flows and other relevant details.
+
+Honolulu release updates
+------------------------
+In Honolulu release, the following aspects were realized:
+
+- **Modeling Enhancements** were made, details can be found at:
+ `Modeling enhancements in Honolulu <https://wiki.onap.org/display/DW/Modeling+enhancements+in+Honolulu>`_.
+
+- **Functional Enhancements**
+
+ (a) Minor enhancements in NSMF and NSSMFs including NST Selection, Shared slices, coverageArea to
+ coverageAreaTAList mapping, etc.
+ (b) Enhancements related to endpoints for stitching together an end-to-end network slice
+ (c) Use of CPS (instead of Config DB) to determine the list of Tracking Areas corresponding to a given
+ Coverage Area (input by user). For the remaining RAN configuration data, we continue to use Config DB.
+ (d) RRM Policy update by SDN-R to RAN NFs during RAN NSSI creation/reuse
+
+- **Integration Testing**
+ Continuing with integration tests deferred in Guilin release, and associated bug-fixing
+
+Important Remarks
+~~~~~~~~~~~~~~~~~~~
+(a) 2 deployment scenarios for RAN NSSI are supported. In the first scenario, the RAN NSSI comprises also of
+ TN Fronthaul (FH) and TN Midhaul (FH) NSSIs, and RAN NSSMF shall trigger TN NSSMF for TN FH and MH NSSI
+ related actions. In the second scenario, the RAN NSSI comprises only of RAN NFs. TN NSSMF shall be triggered by
+ NSMF for TN FH and MH NSSI related actions. This part is not yet implemented in NSMF within ONAP.
+
+(b) Details of the modeling aspects, flows and other relevant info about the use case are available in:
+ `R8 E2E Network Slicing Use Case <https://wiki.onap.org/display/DW/R8+E2E+Network+Slicing+use+case>`_ and its child wiki pages.
+
+
+Impacted Modules for Honolulu
+-----------------------------
+The code-impacted modules of E2E Network Slicing in Honolulu release are:
+
+- **UUI**: The enhancements done include:
+
+ (a) The coverageArea The coverageArea number param is added in CSMF creation UI. Users could input
+ the grid numbers to specify the area where they want the slicing service to cover.
+ (b) The relation link image of AN/TN/CN has been added. Users can see the links and related params
+ of the three domains.
+ (c) The TN’s connection link with AN/CN has been added in NS Task management GUI.
+
+- **AAI**: Schema changes were introduced. We added some new parameters in 2 nodes:
+
+ (a) ‘Connectivity’ is used to store IETF/ACTN ETH service parameters. New attributes added in order
+ to support the CCVPN network configuration operations on multi-domain (2+) interconnections.
+ (b) ‘Vpn-binding’is used to store ACTN OTN Tunnel model’s parameters.
+
+- **OOF**: Updates include:
+
+ (a) NST selection is enhanced by fetching the templates from SDC directly.
+ (b) coverageArea to coverageAreaTAList mapping is done by OOF (as part of Slice Profile generation)
+ by accessing CPS.
+ (c) Bug-fixes
+
+- **SO**: Main updates include support of NSI shared scenarios by enhancing the interaction with OOF, AAI and
+ UUI. Apart from this some updates/fixes have been made in NSMF, RAN/Core/TN NSSMF functionality in SO, for
+ example:
+
+ (a) *NSMF*: Update NSI selection process support shared NSI and add sst parameter
+ (b) *AN NSSMF*: Activation flow for SDN-R interactions, allocate flow & added timeDelay in QueryJobStatus,
+ support of Option 1 for topmost RAN NSSI
+ (c) *CN NSSMF*: Non-shared allocate flow
+ (d) *TN NSSMF*: Modify TN NSSI operation
+
+- **CPS**: 2 APIs required for the use case are supported. The remaining yang models are also onboarded,
+ however, the API work as well as further enhancements to CPS Core, NF Proxy and Template-Based Data
+ Model Transformer Service shall continue beyond Honolulu.
+
+- **SDN-R**: RRMP Policy updates, enhancements for updating the RAN configuration during slice reuse,
+ closed loop and intelligent slicing.
+
+- **DCAE**:
+
+ (a) *KPI Computation MS*: This MS was introduced newly for computation of slice related KPIs. In this release,
+ it supports basic KPI computation based on formula specified via Policy. Further details about this MS is
+ available at `KPI Computation MS <https://wiki.onap.org/display/DW/DCAE+R8+KPI-Computation+ms>`_
+ (b) *Slice Analysis MS*: Minor updates were done.
+
+Apart from the above, Policy and SDC had test-only impact for this use case.
+
+In addition:
+
+- **Config DB** was updated to handle bugs and gaps found during testing. This is not an official ONAP component, and
+ its functionality is expected to be performed fully by the Configuration Persistence Service (CPS) in future ONAP
+ release (beyond Honolulu).
+
+- **Core NF simulator** and *ACTN simulator* were also updated and checked into ONAP simulator repo.
+
+- **RAN-Sim** has been updated to fix bugs found during testing, and also checked into ONAP simulator repo.
+
+
+Functional Test Cases
+---------------------
+The functional testing of this use case shall cover CSMF/NSMF, the 3 NSSMFs and Closed Loop functionality. We classify the
+test cases into 5 tracks: CSMF/NSMF, RAN NSSMF, Core NSSMF, TN NSSMF and Closed Loop.
+Details of the test cases can be found at:
+`Integration Test details for Honolulu <https://wiki.onap.org/display/DW/Integration+Test+details+for+Honolulu>`_ and its child wiki pages.
+
+
+Operation Guidance
+------------------
+The Honolulu release setup details for the E2E Network Slicing use case will be available at the following page and its
+sub-pages:
+`User Operation Guide for Honolulu release <https://wiki.onap.org/display/DW/User+Operation+Guide+for+Honolulu+release>`_
+
+
+Known Issues and Resolutions
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+Details of manual configurations, work-arounds and known issues will be documented in the child wiki pages of:
+`User Operation Guide for Honolulu release <https://wiki.onap.org/display/DW/User+Operation+Guide+for+Honolulu+release>`_
+
+The foll. integration tests are carried over to Istanbul release: see `REQ-721 <https://jira.onap.org/browse/REQ-721>`_
+- NSMF: Option 2 testing, remaining regression testing and service termination testing for NSMF
+- RAN NSSMF: RAN NSSI termination, interactions with TN NSSMF for FH/BH NSSI reuse and some minor aspects related to SDN-R <-> RAN interaction
+- TN NSSMF: Checking some minor aspects in SO for modifying TN NSSI.
+- Core NSSMF: Modifying and deallocating a Core NSSI, reusing an existing Core NSSI
+- KPI Computation, Closed Loop & Intelligent Slicing: Some minor aspects on SDN-R <-> RAN-Sim interface needs to be addressed.
+
+Further details of these test cases can be found in REQ jiras for integration testing for Honolulu, and in the
+use case wiki. This means that the functionality associated with these test cases may require updated versions
+of the relevant components - the User Operation Guide will also be updated with details of any bug fixes
+beyond Honolulu as the testing is anyhow continuing as part of Istanbul release.
+
+Istanbul release updates
+------------------------
+Below aspects are covered in Istanbul release:
+
+1. **CPS-TBDMT Enhancements** - This service shall be used to map the erstwhile Config-DB-like REST APIs to appropriate CPS API calls. The purpose of this service is to abstract the details of (possibly multiple, and complex) XPath queries from the users of CPS. It enables CPS-users to continue using simple REST API calls that are intuitive and easy-to-understand and implement. The mapping to appropriate queries to CPS (including mapping of one API call to many Xpath queries) shall be done in a generic way by the CPS-TBDMT service. In Istanbul release, following are the main enhancements done:
+
+ - Support edit query ie. post, put and patch requests to CPS
+
+ - Support Output Transformation
+
+ (a) Extract desired output from the data returned from CPS.
+ (b) If 'transformParam' is not defined in the template no transformation takes place.
+ - Support Multiple query
+
+ (a) Make multiple queries to CPS in single request.
+ (b) If 'multipleQueryTemplateId' is mentioned in the template, it will execute this template first and insert the result to the current template to make multiple queries to CPS.
+ - Support Delete data requests to CPS
+
+ (a) Process delete request type.
+ - Support for dynamic anchor - Accept anchors at run time and execute query
+
+2. **CPS Integration**
+
+ - Config DB is replaced with the CPS component to read, write, update and delete the RAN Slice details. CPS APIs are accessed via CPS-TBDMT component. CPS integration with DCAE - Slice Analysis MS and OOF are completed. SDN-R integration with CPS is completed for the shared RAN Slice flow, activateRANslice and terminateRANSlice implementations are in progress.
+ - A new SDN-C karaf feature is introduced to register the cm-handle (anchor) with CPS. The integration with CPS-DMI plugin will be done in Jakarta release.
+
+3. **NSMF based TN Slices** - Support for interacting with TN NSSMF directly from NSMF for front haul and mid haul slice subnets. There will be separate SDC template for this scenario. NST will have 5 NSSTs - CN NSST, AN NSST, TN FH NSST, TN MH NSST, TN BH NSST.
+
+4. **KPI Monitoring** - Implementation is done in KPI Computation MS to configure the required KPIs and the KPI computation formula based on policies.
+
+5. **Closed Loop** - Closed Loop updates are sent over A1 interface to Near-RT RIC. This is done at the POC level. This will be further enhanced in Jakarta release to make use of the A1-Policy Management Service in CCSDK.
+
+6. **Intelligent Slicing** - End to end intelligent slicing - closed loop flow is tested with the initial version of Machine Learning MS.
+
+7. **Carry-over Testing from Honolulu Release**
+
+ - RAN NSSMF Testing
+
+ (a) Testing completed for the allocation, modification, activation and deactivation of the RAN slice to support option1
+ (b) Integration Testing of AN NSSMF with SDNR interactions for allocate and modify flow is completed
+ - E2E Testing
+
+ (a) Service instantiation for non-shared and shared scenario and fixes to support option 1 are done
+ (b) NSI selection process support for shared NSI is tested
+
+Impacted Modules for Istanbul Release
+-------------------------------------
+- **SO**
+ (a) Support of NSI termination by enhancing the interaction with OOF, AAI and UUI
+ (b) RAN NSSI Termination support with OOF & SDN-R interactions
+ (c) Bug fixes in Option 1 (CSMF, NSMF and NSSMFs are within ONAP & TN-FH, TN-MH are created by RAN NSSMF)
+ - **CSMF**: Fixed sNSSAI format and updated authentication for NSMF invocation
+ - **NSMF**: Fixes in NSI termination issues to support OOF interaction for NSI termination query and added subnet Type support for respective TN Domain
+ - **AN NSSMF**: Fixes for different termination scenarios in Option 1
+ - **CN NSSMF**: Bug fixes in shared allocate flow, modify flow and terminate flow
+ - Slice Profile alignment with NSSMF
+ (d) NSMF based TN Slices (TN-FH, TN-MH are created by NSMF) - Work flow changes to support this approach
+
+- **OOF**
+ (a) Integration with CPS for coverage area to coverage area TA list
+ (b) Bug fixes in NxI termination
+
+- **DCAE**
+ (a) Minor changes in Slice Analysis MS to support CPS integration
+ (b) KPI Computation MS in enhanced to support policy based KPIs and formula
+
+- **SDN-R**
+ (a) Bug fixes in instantiateRANSliceAllocate, instantiateRANSliceAllocateModify, activateRANSlice, terminateRANSlice Directed Graphs
+ (b) CPS integration for the instantiateRANSliceAllocateModify, activateRANSlice, terminateRANSlice Directed Graphs
+ (c) A new karaf feature is introduced to register the cm-handle with CPS
+
+- **CPS-TBDMT**
+ (a) This component is enhanced to support different type of queries based on templates
+
+- **CPS**
+ (a) Bug fixes and support for GET, POST, PATCH and DELETE type of queries.
+
+Istanbul Release - Functional Test cases
+----------------------------------------
+**Honolulu release carry-over test cases**
+ (a) Different possible scenarios of E2E Slice (eMBB) creation are tested in I-release
+ (b) RAN slice Termination testing completed
+ (c) Test cases to validate slice reuse and terminate using Option 2 (Core NSSMF and RAN NSSMF external) are completed
+
+**R9 Integration Testing**
+ (a) RAN NSSMF integration with CPS is covered for RANSlice modification, activation, deactivation and termination
+ (b) NSMF driven TN-FH and TN-MH slices creation is tested
+ (c) CPS impacts in closed loop scenario is validated and few test cases are deferred to Jakarta release
+
+ Integration test plan is available at `Integration Testing in Istanbul Release <https://wiki.onap.org/display/DW/R9+Integration+Test+for+E2E+Network+Slicing>`_
+
+Istanbul Release - Operation Guidance
+-------------------------------------
+The steps for E2E network slicing use case will be available at `User Operation Guidance - Istanbul Release <https://wiki.onap.org/pages/viewpage.action?pageId=111118867>`_. It is an update to the user manual created in Honolulu release.
+
+Istanbul Release - Known issues and Solutions
+---------------------------------------------
+
+**REGISTER 3RD PARTY CONTROLLERS**
+
+The ONAP TSC approved on July 9th, 2020 to change the status of ESR GUI Module
+to an 'unmaintained' project. Further information about 'Unmaintained Projects'
+can be found in the `ONAP Developer Wiki. <https://wiki.onap.org/x/Pw_LBQ>`__
+
+But excluding the ESR GUI module from ONAP does not mean that the "external
+system registration" mechanism is excluded; i.e. only the GUI is not available
+anymore.
+
+Nevertheless, in order to register the 3rd party controllers (like it is done
+in E2E network slicing use case and recently in Cloud Leased Line "CLL" use
+case as part of Intent-Based Networking), AAI's API are invoked manually.
+
+To do so, please send the following CURL command (PUT) to your AAI, with the
+attached xml payload. In the payload, please adjust the controller name (in
+this case sdnc1) and the controller ip address accordingly based on your
+environment:
+
+CURL COMMAND:
+
+.. code-block:: bash
+
+ curl -k -X PUT https://{{your-onap-ip-address}}:30233/aai/v16/external-system/esr-thirdparty-sdnc-list/esr-thirdparty-sdnc/sdnc1 -u "AAI:AAI" -H "X-FromAppId:postman" -H "Content-Type:application/xml" -H "Accept: application/xml" -H "X-TransactionId:9999" -d @/home/onap/esr-registration-controller-1.xml
+
+
+PAYLOAD (esr-registration-controller-1.xml):
+
+.. code-block:: xml
+
+ <?xml version="1.0" encoding="UTF-8"?>
+ <esr-thirdparty-sdnc xmlns="http://org.onap.aai.inventory/v16">
+ <thirdparty-sdnc-id>sdnc1</thirdparty-sdnc-id>
+ <location>Core</location>
+ <product-name>TSDN</product-name>
+ <esr-system-info-list>
+ <esr-system-info>
+ <esr-system-info-id>sdnc1</esr-system-info-id>
+ <system-name>sdnc1</system-name>
+ <type>WAN</type>
+ <vendor>Huawei</vendor>
+ <version>V3R1</version>
+ <service-url>http://192.168.198.10:18181</service-url>
+ <user-name>onos</user-name>
+ <password>rocks</password>
+ <system-type>nce-t-controller</system-type>
+ <protocol>RESTCONF</protocol>
+ <ssl-cacert>example-ssl-cacert-val-20589</ssl-cacert>
+ <ssl-insecure>true</ssl-insecure>
+ <ip-address>192.168.198.10</ip-address>
+ <port>26335</port>
+ <cloud-domain>example-cloud-domain-val-76077</cloud-domain>
+ <default-tenant>example-default-tenant-val-71148</default-tenant>
+ <passive>true</passive>
+ <remote-path>example-remotepath-val-5833</remote-path>
+ <system-status>example-system-status-val-23435</system-status>
+ </esr-system-info>
+ </esr-system-info-list>
+ </esr-thirdparty-sdnc>
+
+
+Additional issues occurred during the deployment and integration testing will be
+listed in the ONAP Developer Wiki at `Network Slicing - Issues and Solutions <https://wiki.onap.org/display/DW/Network+Slicing+-+Issues+and+Solutions>`_
+
+Jakarta Release Updates
+-----------------------
+In Jakarta release, the following aspects are covered:
+
+1. **E2E Network Slicing Solution**
+ - Slice selection based on resource occupancy level. With this enhancement, NSMF/NSSMF is able to monitor and update resource levels at NSI/NSSI level. OOF returns the solution for NSI/NSSI selection based on the criteria. In case of shared scenario, NSI/NSSI can be shareable only if sufficient resources are available in the network. RAN NSSMF’s resource occupancy is considered for this release. Resource occupancy of Core and TN NSSMFs will be considered in future releases.
+2. **RAN Slicing**
+ - Optimization of cm-handle registration with CPS-DMI Plugin for RAN NF instances to upload yang model.
+ - CPS integration with SDN-R for RAN slice allocation and reconfiguration scenarios
+ - CPS integration stabilization for RAN slice activate/deactivate and terminate scenarios. Validation and bug fix for CPS integration of RAN slice lifecycle.
+3. **Transport Slicing**
+ - OOF involvement in TN slice reuse and terminate scenarios
+ - Implementation of the call to OOF for allocateNSSI to enable TN NSSI reuse in TN NSSMF
+ - Implementation of the call to OOF for terminateNxi API to deallocate TN NSSI (which may not be terminated even when NSI is terminated) in TN NSSMF
+ - Closed-loop enhancement in CCVPN to support Transport Slicing’s closed-loop (Covered in CCVPN use case).
+4. **Closed Loop**
+ - IBN based Closed loop for Network Slicing. This enhancement makes use of intents and Machine Learning models for closed loop. ML prediction microservice enhancement is done as a POC work in Jakarta release.
+ - CPS integration stabilization, which validates and enhances CPS integration for closed loop.
+5. **Carryover tests from Istanbul release**
+ - Option-1 (internal NSMF, NSMF and NSSMF)
+ - Pending test cases for E2E Slice termination
+ - Bug fixes and testing for Core slicing
+ - NF instantiation issue with same NSST
+ - Multiple non-share Core slice creation issue
+
+Impacted Modules for Jakarta Release
+------------------------------------
+- **SO**: Requirements below are identified for Jakarta release and have impacts in SO component:
+ (1) Use of Optimization solution (OOF) in allocateNSSI, deallocateNSSI in TN NSSMF
+ (2) Bug fixes/enhancements of carryover test cases from Istanbul release
+
+- **OOF**: OOF component has an impact for the requirement below:
+ (1) NSI/NSSI Selection enhancements based on resource occupancy levels
+
+- **DCAE**: The requirements below are identified for Jakarta release and have impacts in DCAE component:
+ (1) Slice selection taking into consideration of resource occupancy levels
+ (2) CPS integration in closed loop – This was done in I-release. Expecting minor enhancements in Slice Analysis MS once after the other components impacts w.r.t CPS integration and E2E testing are completed.
+ (3) IBN based Closed loop for Network Slicing - This will have impact in E2E Slicing closed loop and TN Slicing closed loop.
+
+- **CCSDK**: The requirements below are identified for network slicing use case in Jakarta release and have impacts in CCSDK component. Most of these requirements fall under the category of CPS integration.
+ (1) Optimizing cm-handle registration with CPS-DMI Plugin to upload yang model
+ (2) CPS Integration with SDN-R for RAN Slice allocate and reconfigure scenarios
+ (3) CPS Integration Stabilization - RAN Slice activate/deactivate and terminate scenarios
+
+Jakarta Release - Functional Test cases
+---------------------------------------
+The functional testing of this use case covers CSMF/NSMF, RAN/CN/TN NSSMFs and Closed Loop functionality. Test cases are classified into 5 tracks: E2E network slicing, RAN NSSMF, TN NSSMF, Closed Loop and carryover testing. Details of the test cases can be found at: `E2E Network Slicing Tests for Jakarta Release <https://wiki.onap.org/display/DW/E2E+Network+Slicing+Integration+Tests+for+Jakarta+Release>`_ and its child wiki pages.
+
+Jakarta Release - Operation Guidance
+------------------------------------
+The setup and operation details for E2E network slicing use case are available at `User Operation Guidance - Jakarta Release <https://wiki.onap.org/display/DW/User+Operation+Guidance+-+Jakarta+Release>`_.
+
+Jakarta Release - Automate Network Slicing Option2 preparation step
+-------------------------------------------------------------------
+
+Automation of the Network Slicing Option2 use case is under development. At this moment automation of the preparation step is completed, with the source code under `SMO package repo <https://github.com/sebdet/oran-deployment>`_. The detailed introduction of the framework can be found at `SMO package introduction <https://wiki.o-ran-sc.org/display/IAT/Automated+deployment+and+testing+-+using+SMO+package+and+ONAP+Python+SDK>`_.
+
+The preparation scripts are python scripts, based on the ONAP pythonsdk framework. More libraries are added under SMO package in order to run the preparation scripts.
+
+The preparation scripts locate in folder **test/pythonsdk/src/orantests/network_slicing**. Before running the script, please open **settings.py** under folder **test/pythonsdk/src/orantests/configuration**. Make sure the URL settings for all the components are the good values.
+
+If the settings are good, go to folder **test/pythonsdk/src/orantests/network-slicing** and run the following command to trigger the preparation script:
+
+
+.. code-block:: bash
+
+ cd ./test/pythonsdk/src/orantests/network-slicing
+ tox -e ns-tests
+
+The command will trigger the main script **test_network_slicing.py**, which in turn triggers the preparation script of each component.
+
+The whole preparation process will configure the components and also verifies a bit whether the configuration was done successfully at the end of each step.
+
+The whole process may take about 1 hour to complete. You can monitor the progress using the log file **pythonsdk.debug.log** located in the folder **network_slicing/preparation**.
+
+If everything goes fine, you will see similar logs as shown below in the end.
+
+.. image:: files/ns_automation/ns_automation_suc.png
+
+If things goes wrong, please read the logs to identify which part has go wrong and try to fix that step manually.
+
+Then you can update the **test_network_slicing.py**, disable steps that are already complete, and replay the tox command to complete the rest of the configuration.
+
+
+Please note, when checking **test_network_slicing.py** in details, you will find some of the preparation steps might require extra input parameters, such as **cst_id**, **cst_invariant_id** and **sp_id**. These values could be found in both logs and SDC UI.
+
+.. image:: files/ns_automation/ns_automation_test_class.png
+
+In case it failed in the middle of the SDC template creation, please update the **sdc_template_suffix** variable inside the **test_network_slicing.py** and then rerun the script with tox command.
+
+Since SDC doesn't support creating template with the same name, neither deleting of any templates, you have to add a suffix to the original name to create template with a new name.
+
+.. image:: files/ns_automation/ns_automation_sdc_suffix.png
+
+
+Jakarta Release - Known issues and Solutions
+--------------------------------------------
+Details of up to date manual configurations, known issues, solutions and work-arounds can be found in the following wiki page: `Jakarta Release - Issues and Solutions <https://wiki.onap.org/display/DW/Jakarta+Release+-+Issues+and+Solutions>`_.
+
+Kohn Release Updates
+-----------------------
+In Kohn release, the following enhancements are implemented:
+
+- IBN driven E2E Network Slicing support including enhancement to Slice Analysis MS to listen to real-time user intents posted by AAI using DCAE SDK dmaap-client lib and and report slice KPI to UUI.
+- KPI computation enhancements including new KPI added and new UUI display design, KPI spanning multiple resources, error handling for missing counters.
+- DCAE R11 global requirements contribution `(See the wikipage here) <https://wiki.onap.org/display/DW/R11+Global+requirements+Contribution>`_.
+
+Kohn release also fixed a few critical bugs in Jakarta release.
+
+Impacted Modules for Kohn Release
+------------------------------------
+- **DCAE**: Requirements below for Kohn release have impacts on DCAE component:
+ (1) Enhancement to Slice Analysis MS
+ (2) KPI computation enhancements
+ (3) DCAE R11 global requirements and bug fixes
+
+- **UUI**: Requirements below for Kohn release have impacts on UUI component
+ (1) Slicing KPI monitoring and display for IBN driven network slicing
+
+Kohn Release Functional Test Cases
+---------------------------------------
+Details of the test cases can be found at: `E2E Network Slicing Tests for Kohn Release <https://wiki.onap.org/display/DW/E2E+Network+Slicing+Integration+Tests+for+Kohn+Release>`_ and its child wiki pages.
+
+London Release Updates
+-----------------------
+The following enhancements are added to the London release:
+
+- Support of 3GPP 28.532 based APIs for network slicing
+
+Impacted Modules for London Release
+------------------------------------
+- **SO**: Requirements below for London release have impacts on SO component:
+ (1) Enhancement to SO macro flow to support 3GPP 28.532 based APIs for network slicing
+ (2) NSST selection APIs for integration with OOF
+
+- **OOF**: Requirements below for Londond release have impacts on OOF component:
+ (1) NSST selection APIs for integration with SO
+
+London Release Test Cases
+--------------------------
+Details of the test cases can be found at: `E2E Network Slicing Tests for London Release <https://wiki.onap.org/display/DW/E2E+Network+Slicing+Integration+Testing+for+London+Release>`_.
diff --git a/docs/docs_NFV_Testing_Automatic_Platform_Requirements_User_Guide.rst b/docs/docs_NFV_Testing_Automatic_Platform_Requirements_User_Guide.rst
new file mode 100644
index 000000000..65d0e41a3
--- /dev/null
+++ b/docs/docs_NFV_Testing_Automatic_Platform_Requirements_User_Guide.rst
@@ -0,0 +1,130 @@
+.. nfv_testing_automation_platform_requirements:
+
+:orphan:
+
+=======================================================
+NFV Testing Automatic Platform Requirements- User Guide
+=======================================================
+
+.. Overview: this page used to explain how to use NFV testing automatic platform,
+ the relevant requirements include REQ-335(Support for Test Topology
+ Auto Design), REQ-336(Support for Test Environment Auto Deploy),
+ REQ_337(Support for Test Task Auto Execution),REQ-338(Support for
+ Test Result Auto Analysis & Certification).
+
+Description
+===========
+
+There are a large number of cross-department and cross-organization communications
+during the traditional network element, system or equipment network access test.
+And the manual errors are inevitable, the knowledge in test field cannot be
+solidified. The cost of each test is high and the test cycle is always long.
+After introducing NFV, because network element software and hardware equipment are
+layered decoupled, the introduction of a large number of open source components as
+well as the frequent upgrade of the software itself, make network access test
+become more complicated and frequent.
+
+Testing has become a bottleneck during the introduction and iteration of new
+technologies. Therefore, it is urgent to introduce automated test tools.
+By introducing testing automatic capabilities including topology auto design,
+test environment auto deploy, test task auto execution and test result auto
+analysis & certification, it can solidify domain knowledge, and help reduce labor
+costs, shorten test cycle, improve test efficiency , optimize test accuracy.
+
+Requirement Details
+===================
+
+Test Topology Auto Design( enhancement in SDC)
+----------------------------------------------
+
+1.Quickly design a test service (topology) composed with tested VNF and test
+ environment (One way is to define abstract testing service (topology) template
+ for each type of VNF);
+
+2.For the service designed, can be imported into SDC for modification or enhancement,
+ or the test template can be reused for different test environments (the SDC needs
+ to support service import).
+
+Test Environment Auto Deploy (enhancement in VF-C)
+--------------------------------------------------
+
+By getting VM/VL/Port/VNF/NS instance information from Openstack via Multi-cloud
+to VF-C for instance information storage, enable VTP obtaining all the real-time
+instance information.
+
+Test Task Auto Execution(enhancement in VNFSDK, CLI)
+----------------------------------------------------
+1. Test instruments integration:
+
+* Test Case execution;
+* Test Case discovering and auto registration;
+* Robot profile integration
+
+2. VTP capability expansion:
+
+* Loading different test scripts and cases- Scenario Active Management ;
+* Flexible test process definition(Middle);
+* Test report customization
+* Profile HTTP API support
+
+3. Execution-Standard / Open source test case support
+
+* Enable ETSI NFV APIs conformance test cases in VTP;
+* Enable CNCF CNF conformance test case in VTP.
+
+4. Test Result Auto Analysis & Certification
+
+* The test objects that passed test certification are put into marketplace
+* OVP integrates with VTP to automatically receive VTP test results:
+
+ * Enable OVP with HTTP API for submit the result
+ * Enable VTP for result submission into OVP.
+
+New Features and Guide (Guilin Release)
+=======================================
+
+SDC New features
+----------------
+
+Service import
+>>>>>>>>>>>>>>
+
+1. Add a button “IMPORT SERVICE CSAR" to perform service CSAR import.
+2. When clicking the “IMPORT SERVICE CSAR” button on the portal, a window will
+ pop up to select the service CSAR file to be imported.
+3. After selecting the service CSAR file to be imported, it will switch to the
+ general information input page for creating the service.
+4. After filling in all the required fields, you can click the "create" button
+ to create a new service.
+5. Add a new API for the request of importing service CSAR.
+
+Abstract service template
+>>>>>>>>>>>>>>>>>>>>>>>>>
+
+1. On the general page of VF, add a IS_ABSTRACT_RESOURCE selection box, which is
+ false by default. If it is an abstract VNF, select true manually.
+2. Add three APIs to handle the corresponding requests of abstract service template:
+ 2.1 Return whether the service is a abstract service: GET /v1/catalog/abstract/service/serviceUUID/{uuid}/status
+ 2.2 Copy a new service based on the existing service: POST /v1/catalog/abstract/service/copy
+ 2.3 Replace the abstract VNF in the abstract service template with the actual VNF: PUT /v1/catalog/abstract/service/replaceVNF
+
+VTP New features
+----------------
+1. Added active scenario and profile management support
+2. Added integration with Robot CSIT tests
+3. Enabled auto discovery of test cases from 3rd party tool integration
+4. Added support for cnf-conformance test support( In order to enable CNF
+ conformance tool in VTP, please refer `the guide <https://gerrit.onap.org/r/gitweb?p=vnfsdk/validation.git;a=blob;f=cnf-conformance/README.md;h=cda3dee762f4dd2873613341f60f6662880f006a;hb=refs/heads/master>`_
+5. New VTP API has been updated: see the `VTP API wiki page <https://wiki.onap.org/display/DW/VTP+REST+API+v1>`_
+
+CLI New features
+----------------
+
+1. Enabled auto discover and registration of products functionalities as commands
+2. Profile management commands are added
+3. For the VTP Command line usage, please refer :ref:`CLI User Guide <onap-cli:cli_user_guide>`
+
+Test Status and Plans
+=====================
+
+See `the status of the test wiki page <https://wiki.onap.org/display/DW/Automatic+Testing+Requirements>`_
diff --git a/docs/docs_StndDefined_Events_Collection_Mechanism.rst b/docs/docs_StndDefined_Events_Collection_Mechanism.rst
new file mode 100644
index 000000000..89c6481c4
--- /dev/null
+++ b/docs/docs_StndDefined_Events_Collection_Mechanism.rst
@@ -0,0 +1,97 @@
+.. This work is licensed under a Creative Commons Attribution 4.0
+ International License. http://creativecommons.org/licenses/by/4.0
+
+:orphan:
+
+.. _docs_StndDefined_Events_Collection_Mechanism:
+
+VES Collector - Standard Defined Events Collection Mechanism
+------------------------------------------------------------
+
+Description
+~~~~~~~~~~~
+
+The target of standard defined events collection mechanism development was to allow collection of events defined by standards organizations using VES Collector,
+and providing them for consumption by analytics applications running on top of DCAE platform. The following features have been implemented:
+
+1. Event routing, based on a new CommonHeader field “stndDefinedNamespace”
+2. Standards-organization defined events can be included using a dedicated stndDefinedFields.data property
+3. Standards-defined events can be validated using openAPI descriptions provided by standards organizations, and indicated in stndDefinedFields.schemaReference
+
+`Standard Defined Events Collection Mechanism description <https://docs.onap.org/projects/onap-dcaegen2/en/jakarta/sections/services/ves-http/stnd-defined-validation.html>`_
+
+.. note::
+
+ VES Collector orchestrated using Helm or Cloudify uses standard defined domain schema files bundled within VES collector image during image build.
+ Also new Helm based installation mechanism for collectors doesn't support yet certain features available with the traditional Cloudify orchestration based mechanisms:
+ - Obtaining X.509 certificates from external CMP v2 server for secure xNF connections
+ - Exposing the Collector port in Dual Stack IPv4/IPv6 networks.
+
+
+How to Configure VES Collector
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+By default config maps containing schema files are defined in the `OOM <https://github.com/onap/oom/tree/jakarta/kubernetes/dcaegen2-services/resources/external>`_ repository and installed with dcaegen2 module.
+In Istanbul release in OOM are used schema files from https://forge.3gpp.org/rep/sa5/MnS/blob/SA88-Rel16/OpenAPI/.
+The newest schema files can be found in https://forge.3gpp.org/rep/sa5/MnS/tree/Rel-16-SA-91/OpenAPI
+If for production/test purpose are required different or newest schema files please follow procedure for `config map update <https://docs.onap.org/projects/onap-dcaegen2/en/latest/sections/configuration.html#config-maps>`_.
+
+In order to prepare second instance of VES Collector please follow below procedure:
+
+1. (Optional step) If VES Collector should obtaining X.509 certificates from CMPv2 server for secure xNF connections please follow below steps:
+
+ - Install `Cert Manager <https://docs.onap.org/projects/onap-oom/en/latest/sections/guides/infra_guides/oom_infra_base_config_setup.html#install-cert-manager>`_
+ - Configure `Cert Service <https://docs.onap.org/projects/onap-oom-platform-cert-service/en/jakarta/sections/configuration.html>`_ if external CMP v2 server is in use.
+
+2. If usage of config maps from OOM containing schema files is required please follow procedure for
+ `external repo schema files from OOM connection to VES collector <https://docs.onap.org/projects/onap-dcaegen2/en/jakarta/sections/services/ves-http/installation.html#external-repo-schema-files-from-oom-connection-to-ves-collector>`_
+ with changes described below.
+
+ As new instance of VES Collector will be introduced to ONAP namespace there is need to modify parameters from ``/inputs/k8s-ves-inputs-tls.yaml`` in Bootstrap POD
+
+ - external_port - set here ``node port`` from range ``30000-32767`` not used in ONAP instance for example ``30519``
+ - ``service_component_type``, ``service_id``, ``service_component_name_override`` - set here custom service name e.g. ``dcae-ves-collector-std-def-evnents``
+
+ (Optional step) If VES Collector should also obtaining X.509 certificates from CMP v2 and its clients should successfully validate its hostname then following parameters need to modified in ``/inputs/k8s-ves-inputs-tls.yaml`` file.
+
+ - ``external_cert_use_external_tls`` - change from ``false`` to ``true``
+ - ``external_cert_common_name`` - set same value as used in ``service_component_name_override parameter``
+ - ``service_component_name_override`` - add following values:
+ - all IPv4 addresses of ONAP worker hosts
+ - all IPv6 addresses of ONAP worker hosts
+ - all FQDN names of ONAP worker hosts
+ - ``service_component_name_override`` parameter value.
+
+ Deploy new instance of VES collector using ``/inputs/k8s-ves-inputs-tls.yaml``
+
+3. (Optional step) If ONAP is installed in Dual Stack and VES Collector should listen in IPv6 network
+
+ - on RKE node prepare file ``ves-ipv6.yaml`` with following content (below is an example of file for ``dcae-ves-collector-std-def-evnents`` service name created in section 2, in ``node port`` set once again value from range ``30000-32767`` not used in ONAP instance for example ``30619`` )
+ .. code-block:: bash
+
+ apiVersion: v1
+ kind: Service
+ metadata:
+ name: xdcae-ves-collector-std-def-evnents
+ namespace: onap
+ spec:
+ externalTrafficPolicy: Cluster
+ ipFamily: IPv6
+ ports:
+ - name: xdcae-ves-collector-std-def-evnents
+ nodePort: 30619
+ port: 8443
+ protocol: TCP
+ targetPort: 8443
+ selector:
+ app: dcae-ves-collector-std-def-evnents
+ sessionAffinity: None
+ type: NodePort
+
+ - apply prepared service and check if it working
+ .. code-block:: bash
+
+ kubectl -n onap apply -f ves-ipv6.yaml
+
+ kubectl -n onap get svc | grep collector-std-def-evnents
+ xdcae-ves-collector-std-def-evnents NodePort fd00:101::6ad <none> 8443:30619/TCP
diff --git a/docs/docs_Testing_5G_PNF_Software_Upgrade_With_Schema_Update.rst b/docs/docs_Testing_5G_PNF_Software_Upgrade_With_Schema_Update.rst
new file mode 100644
index 000000000..eb549bb99
--- /dev/null
+++ b/docs/docs_Testing_5G_PNF_Software_Upgrade_With_Schema_Update.rst
@@ -0,0 +1,189 @@
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. http://creativecommons.org/licenses/by/4.0
+
+.. _docs_testing_5g_pnf_software_upgrade_with_schema_update:
+
+
+:orphan:
+
+Testing xNF Software Upgrade in association to schema updates
+-------------------------------------------------------------
+
+Description
+~~~~~~~~~~~
+This procedure only describes the test instruction to upgrade schema of a service instance with at least one PNF resource based on a new onboarding package.
+
+This procedure can be used to upgrade a service instance with more than one PNF resource.
+
+A. Pre-conditions
+~~~~~~~~~~~~~~~~~
+* A service template with at least one PNF resource has been created in SDC and distributed to run time
+
+* At least one service instance has been instantiated, including PNF registration and configuration, in run time
+
+* This service instance is in health condition
+
+* A new PNF onboarding package, which contains a new software version and new artifacts, is ready for onboarding
+
+* This procedure does not support addition of new PNF resource or deletion of existing PNF resource in the service template.
+
+
+B. Update and re-distribute the service template:
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ The service template must be updated with updated schema information for the PNF resources, and then redistributed to run time.
+
+ 1. Update an existing PNF resource artifact and attach the same to an existing service template.
+
+ - url to portal: https://portal.api.simpledemo.onap.org:30225/ONAPPORTAL/login.htm
+
+ - password for users: demo123456!
+
+ - Login as cs0008, go to "ONBOARD", where all the available VSPs and Services are listed.
+
+
+ 2. Follow below mentioned procedure to update VSP and Service.
+
+ - `Update VF/PNF <https://docs.onap.org/en/kohn/guides/onap-user/design/resource-onboarding/index.html#update-vfcs-in-a-vsp-optional>`_
+
+ - `Update Service <https://docs.onap.org/en/kohn/guides/onap-user/design/service-design/index.html#update-service-optional>`_
+
+
+C. Trigger PNF service level software upgrade with schema update:
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ Schema update procedure can be triggered manually by invoking appropriate rest end points through the postman client.
+
+ 3. Get the service level workflow uuid by fetching all the available workflows from SO:
+
+ - GET http://REPO_IP:SO_PORT/onap/so/infra/workflowSpecifications/v1/workflows
+
+ - From the response, fetch the workflow uuid against the workflow name “ServiceLevelUpgrade”.
+
+ .. image:: files/softwareUpgrade/workflowList.png
+
+
+ 4. Select one service instance which need to be upgraded
+
+ - Retrieve all services instance from AAI using:
+
+ - GET https://REPO_IP:AAI_PORT/business/customers/customer/{global-customer-id}/service-subscriptions/service-subscription/{service-type}/service-instances
+
+ - Select one service instance from the service instance list received from above query.
+
+
+ 5. Get all Service-Model-Version from AAI Using Service-Model-InVariant-UUId:
+
+ - Use the Service-Model-InVariant-UUId from the selected service instance (previous step) as model-invariant-id in this query.
+
+ - GET https://REPO_IP:AAI_PORT/aai/v21/service-design-and-creation/models/model/${model-invariant-id}/model-vers
+
+ - Select one model version Id from the model version list received from above querying. The selected model version Id will be used as the target service model version at upgrade procedure.
+
+ .. image:: files/softwareUpgrade/serviceModelVersions.png
+
+
+ 6. Invoke the service level upgrade workflow to update the schema of xNF resources.
+
+ - Invoke the service level workflow by passing the older version service model id and the service level workflow uuid for “Service Level workflow” fetched in the previous steps.
+
+ - In the body of the POST request, json input needs to be supplied that contains info on the model version to which we are going to trigger the update. (2.0)
+
+ - POST http://REPO_IP:SO_PORT/onap/so/infra/instanceManagement/v1/serviceInstances/${serviceInstanceId}/workflows/${serviceLevel_workflow_uuid}
+
+ - Attaching below a sample request json :
+
+{
+
+ "requestDetails": {
+
+ "subscriberInfo": {
+
+ "globalSubscriberId": "807c7a02-249c-4db8-9fa9-bee973fe08ce"
+
+ },
+
+ "modelInfo": {
+
+ "modelVersion": "2.0",
+
+ "modelVersionId": "8351245d-50da-4695-8756-3a22618377f7",
+
+ "modelInvariantId": "fe41489e-1563-46a3-b90a-1db629e4375b",
+
+ "modelName": "Service_with_pnfs",
+
+ "modelType": "service"
+
+ },
+
+ "requestInfo": {
+
+ "suppressRollback": false,
+
+ "requestorId": "demo",
+
+ "instanceName": "PNF 2",
+
+ "source": "VID"
+
+ },
+
+ "requestParameters": {
+
+ "subscriptionServiceType": "pNF",
+
+ "userParams": [
+
+ {
+
+ "name": "targetSoftwareVersion",
+
+ "value": "pnf_sw_version-4.0.0"
+
+ }
+
+ ],
+
+ "aLaCarte": false,
+
+ "payload": "{\"k1\": \"v1\"}"
+
+ },
+
+ "project": {
+
+ "projectName": "ServiceLevelUpgrade"
+
+ },
+
+ "owningEntity": {
+
+ "owningEntityId": "67f2e84c-734d-4e90-a1e4-d2ffa2e75849",
+
+ "owningEntityName": "OE-Test"
+
+ }
+
+ }
+
+}
+
+Note down the request id for the schema update request that can be used in the subsequent steps to track the progress.
+
+
+ 7. Verify the service level upgrade workflow status
+
+ - GET http://REPO_IP:SO_PORT/onap/so/infra/orchestrationRequests/v7/${requestID}
+
+ - Verify the response status code and message for the request id fetched in the previous step.
+
+ - For successful upgrade completion, the response code must be “200” with appropriate success message.
+
+
+ 8. Verify PNF Configuration for Service Level Upgrade from AAI
+
+ - GET https://REPO_IP:AAI_PORT/aai/v16/network/pnfs/pnf/{PNF_NAME}
+
+ - Verify the software version of the pnf resource updated in AAI.
+
+ .. image:: files/softwareUpgrade/verifyPNF.png
diff --git a/docs/docs_pnf_onboarding_preonboarding.rst b/docs/docs_pnf_onboarding_preonboarding.rst
new file mode 100644
index 000000000..fb33ec370
--- /dev/null
+++ b/docs/docs_pnf_onboarding_preonboarding.rst
@@ -0,0 +1,29 @@
+.. This work is licensed under a Creative Commons Attribution 4.0
+ International License. http://creativecommons.org/licenses/by/4.0
+
+.. _docs_pnf_onboarding_preonboarding:
+
+:orphan:
+
+5G - PNF Pre-Onboarding & Onboarding
+------------------------------------
+
+Description
+~~~~~~~~~~~
+
+Use case introduces possibility of pre-onboarding and onboarding a vendor PNF onboarding package in ONAP for 5G and other use cases.
+`Detailed 5G - PNF Pre-Onboarding & Onboarding use case documentation <https://wiki.onap.org/pages/viewpage.action?pageId=45303641>`_
+PNF CSAR Package that is onboarded as Vendor Software Package to SDC must meet the following requirements: `VNF or PNF CSAR Package Requirements <https://docs.onap.org/en/frankfurt/submodules/vnfrqts/requirements.git/docs/Chapter5/Tosca/ONAP%20VNF%20or%20PNF%20CSAR%20Package.html>`_
+Before SDC Onboarding, PNF onboarding package/archive can be verified using VNF SDK tools.
+
+
+How to Use
+~~~~~~~~~~
+- PNF pre-onboarding (VNF SDK verification)
+ The pre-onboarding step is optional and it can be used to verify a vendor PNF onboarding package/archive format by VNF SDK tools
+ `VNF SDK Tools Documentation <https://docs.onap.org/en/frankfurt/submodules/vnfsdk/model.git/docs/index.html>`_
+ `VNF SDK Test Cases <https://wiki.onap.org/pages/viewpage.action?pageId=58231094>`_
+
+- PNF onboarding (SDC Resource Onboarding)
+ The onboarding step is mandatory in ONAP.
+ A vendor-provided PNF onboarding package must be onboarded according to procedure: `SDC Resource Onboarding <https://docs.onap.org/en/frankfurt/guides/onap-user/design/resource-onboarding/index.html>`_
diff --git a/docs/docs_postman.rst b/docs/docs_postman.rst
deleted file mode 100644
index 30cf83a79..000000000
--- a/docs/docs_postman.rst
+++ /dev/null
@@ -1,190 +0,0 @@
-.. This work is licensed under a Creative Commons Attribution 4.0
-.. International License. http://creativecommons.org/licenses/by/4.0
-.. Copyright 2017 AT&T Intellectual Property. All rights reserved.
-
-.. _postman-guides:
-
-Test ONAP API with Postman
-==========================
-
-Postman
--------
-
-Postman is a tool that allows a user to run REST API.
-
-The user defines the API requests and has the possibility to group them
-in files called a "Collections".
-
-The user can then run each individual API request or run a complete collection.
-
-Postman includes the possibility to define "environment" variables.
-
-An API request can then get or set a value from/to that "environment" variable.
-
-Get and install Postman tool on your own machine (Linux/windows).
-
-Postman is available here: https://www.getpostman.com/
-
-
-ONAP Postman collections
-------------------------
-
-ONAP Integration project provides several Postman collections with two
-environment files.
-
-Those Postman Collections will allow a Developer to experiment various ONAP API
-on various ONAP components (SDC, NBI, SO, AAI, SDNC)
-
-- declare a vendor
-- declare a VSP
-- upload a package
-- declare a VF based on the VSP
-- declare a Service composed of the VF and a Virtual Link
-- distribute all those informations
-- declare a customer, a service subscription
-- declare OwningEntity, Platform...
-- declare a Complex, Cloud Region, Tenant
-- associate customer/service/tenant
-- declare a service instance via a serviceOrder
-- declare a vnf
-- declare a vf-module
-- declare a network
-
-A collection is also provided to delete objects
-(reminder: it is not possible to delete object in SDC)
-
-They have been tested with Onap ElAlto (they are not all compatible with
-Dublin, and there is not guaranty about ONAP "master" as API definition
-can change)
-
-
-Download ONAP Postman collections
----------------------------------
-
-From your machine, git clone the ONAP Integration project.
-
-::
-
- git clone "https://gerrit.onap.org/r/integration"
-
-
-Import ONAP Postman collections
--------------------------------
-
-ONAP Postman collection are in the repository integration/test/postman
-
-Launch Postman tool
-
-import all ONAP Collections into Postman
-
-.. figure:: files/postman/import.png
- :align: center
-
-And you should see all the collections into Postman
-
-.. figure:: files/postman/collections.png
- :align: center
-
-Each collection is made of several API operations
-
-.. figure:: files/postman/collection-detail.png
- :align: center
-
-
-Running ONAP Postman collections
---------------------------------
-
-Running all those collections, in the order, from 1 to 10 will create a lot of
-objects in ONAP components :
-
-- SDC : vendor, VSP, zip file upload, VF from VSP, Service, add VF to Service
-- VID : OwningEntity, LineOfBusiness, Project, Platform
-- AAI : customer, subscription, cloud region, tenant
-- NBI : serviceOrder to add a service instance, serviceOrder to delete
- a service instance
-
-The order is very important because a lot of API requests will need the API
-response from the previous operation to get and set some variable values.
-
-.. figure:: files/postman/collection-detail-test.png
- :align: center
-
-It is possible to run the complete collection using Postman
-
-.. figure:: files/postman/run.png
- :align: center
-
-You need, a zip file that contains Heat files for a VNF.
-
-Collection 3 is about uploading that file into ONAP SDC.
-
-.. figure:: files/postman/zipfile.png
- :align: center
-
-Before running those collections, once in Postman, you need to have a look
-at "globals" environment parameters.
-
-.. figure:: files/postman/globals.png
- :align: center
-
-All variables that begin by "auto" must NOT be changed (they will be modified
-using API response).
-
-All other variables must be adapted to your needs.
-
-In particular, you need to put your own values for cloud_region_id, tenant_name
-and tenant_id to fit with the place where you will instantiate the VNF.
-
-
-::
-
- service:freeradius
- vf_name:integration_test_VF_freeradius
- vsp_name:integration_test_VSP
- vendor_name:onap_integration_vendor
- owning_entity:integration_test_OE
- platform:integration_test_platform
- project:integration_test_project
- lineofbusiness:integration_test_LOB
- customer_name:generic
- cloud_owner_name:OPNFV
- cloud_region_id:RegionOne
- tenant_name:openlab-vnfs
- tenant_id:234a9a2dc4b643be9812915b214cdbbb
- externalId:integration_test_BSS-order-001
- service_instance_name:integration_test_freeradius_instance_001
-
-
-Using Newman
-------------
-
-Newman is a tool that allow to run postman collections via command-line
-
-On a linux server, with Docker installed on it, run those lines:
-
-::
-
- git clone https://gitlab.com/Orange-OpenSource/lfn/onap/onap-tests.git
- cd onap-tests/postman
- sudo apt-get -y install zip
- USECASE=$'ubuntu16'
- zip -j $USECASE.zip ../onap_tests/templates/heat_files/$USECASE/*
- TAB=$'\t\t\t\t\t\t\t'
- sed -i -e "s/.*src.*/$TAB\"src\": \"$USECASE.zip\"/" 03_Onboard_VSP_part2.postman_collection.json
- docker pull postman/newman:alpine
- docker run --network="host" --volume="/home/debian/rene/onap-tests/postman:/etc/newman" postman/newman:alpine run 01_Onboard_Vendor.postman_collection.json --environment integration_test_urls.postman_environment.json --globals globals.postman_globals.json --export-environment integration_test_urls.postman_environment.json --reporters cli,json --reporter-cli-no-assertions --reporter-cli-no-console
- docker run --network="host" --volume="/home/debian/rene/onap-tests/postman:/etc/newman" postman/newman:alpine run 02_Onboard_VSP_part1.postman_collection.json --environment integration_test_urls.postman_environment.json --globals globals.postman_globals.json --export-environment integration_test_urls.postman_environment.json
- docker run --network="host" --volume="/home/debian/rene/onap-tests/postman:/etc/newman" postman/newman:alpine run 03_Onboard_VSP_part2.postman_collection.json --environment integration_test_urls.postman_environment.json --globals globals.postman_globals.json --export-environment integration_test_urls.postman_environment.json
- docker run --network="host" --volume="/home/debian/rene/onap-tests/postman:/etc/newman" postman/newman:alpine run 04_Onboard_VSP_part3.postman_collection.json --environment integration_test_urls.postman_environment.json --globals globals.postman_globals.json --export-environment integration_test_urls.postman_environment.json
- docker run --network="host" --volume="/home/debian/rene/onap-tests/postman:/etc/newman" postman/newman:alpine run 05_Onboard_VF.postman_collection.json --environment integration_test_urls.postman_environment.json --globals globals.postman_globals.json --export-environment integration_test_urls.postman_environment.json
- docker run --network="host" --volume="/home/debian/rene/onap-tests/postman:/etc/newman" postman/newman:alpine run 06_Onboard_Service.postman_collection.json --environment integration_test_urls.postman_environment.json --globals globals.postman_globals.json --export-environment integration_test_urls.postman_environment.json
- docker run --network="host" --volume="/home/debian/rene/onap-tests/postman:/etc/newman" postman/newman:alpine run 07_Declare_owningEntity_LineOfBusiness_project_platform.postman_collection.json --environment integration_test_urls.postman_environment.json --globals globals.postman_globals.json --export-environment integration_test_urls.postman_environment.json
- docker run --network="host" --volume="/home/debian/rene/onap-tests/postman:/etc/newman" postman/newman:alpine run 08_Declare_Customer_Service_Subscription_Cloud.postman_collection.json --insecure --environment integration_test_urls.postman_environment.json --globals globals.postman_globals.json --export-environment integration_test_urls.postman_environment.json
- docker run --network="host" --volume="/home/debian/rene/onap-tests/postman:/etc/newman" postman/newman:alpine run 10_instantiate_service_vnf_vfmodule.postman_collection.json --environment integration_test_urls.postman_environment.json --globals globals.postman_globals.json --export-environment integration_test_urls.postman_environment.json --reporters cli,json --reporter-cli-no-assertions --reporter-cli-no-console
-
-All collections are processed, then you can see results and you will
-also obtain result json files in the onap-tests/postman/newman directory
-
-Of course you can adapt globals variables in globals.postman_globals.json
-or change the USECASE=$'ubuntu16' value to onboard any heat template located
-in onap_tests/templates/heat_files directory
diff --git a/docs/docs_robot.rst b/docs/docs_robot.rst
index 96b4b7c65..f572f2799 100644
--- a/docs/docs_robot.rst
+++ b/docs/docs_robot.rst
@@ -3,6 +3,8 @@
.. _docs_robot:
+:orphan:
+
Robot
-----
diff --git a/docs/docs_scaleout.rst b/docs/docs_scaleout.rst
index b47c0693c..80ee6bf95 100644
--- a/docs/docs_scaleout.rst
+++ b/docs/docs_scaleout.rst
@@ -1,28 +1,191 @@
.. _docs_scaleout:
+:orphan:
+
VF Module Scale Out Use Case
----------------------------
Source files
~~~~~~~~~~~~
-- Heat templates directory: https://git.onap.org/demo/tree/heat/vLB_CDS?h=elalto
+- Heat templates directory: https://git.onap.org/demo/tree/heat?h=guilin
+- Heat templates directory (vLB_CDS use case): https://git.onap.org/demo/tree/heat/vLB_CDS?h=guilin
Additional files
~~~~~~~~~~~~~~~~
-- DCAE blueprint: https://git.onap.org/integration/tree/docs/files/scaleout/k8s-tca-clamp-policy-05162019.yaml
- TOSCA model template: https://git.onap.org/integration/tree/docs/files/scaleout/service-Vloadbalancercds-template.yml
-- Naming policy script: https://git.onap.org/integration/tree/docs/files/scaleout/push_naming_policy.sh
+- Naming policy script: :download:`push_naming_poliy.sh <files/scaleout/push_naming_policy.sh>`
+- Controller Blueprint Archive (to use with CDS) : https://git.onap.org/ccsdk/cds/tree/components/model-catalog/blueprint-model/service-blueprint/vLB_CDS_Kotlin?h=guilin
+- TCA blueprint: :download:`guilin-tca.yaml <files/scaleout/latest-tca-guilin.yaml>`
+
+Useful tool
+~~~~~~~~~~~
+POSTMAN collection that can be used to simulate all inter process queries : https://www.getpostman.com/collections/878061d291f9efe55463
+To be able to use this postman collection, you may need to expose some ports that are not exposed in OOM by default.
+These commands may help for exposing the ports:
+
+::
+
+ kubectl port-forward service/cds-blueprints-processor-http --address 0.0.0.0 32749:8080 -n onap &
+ kubectl port-forward service/so-catalog-db-adapter --address 0.0.0.0 30845:8082 -n onap &
+ kubectl port-forward service/so-request-db-adapter --address 0.0.0.0 32223:8083 -n onap &
+
+OOM Installation
+~~~~~~~~~~~~~~~~
+Before doing the OOM installation, take care to the following steps:
+
+Set the right Openstack values for Robot and SO
+===============================================
+
+The config for robot must be set in an OOM override file before the OOM installation, this will initialize the robot framework & SO with all the required openstack info.
+A section like that is required in that override file
+
+::
+
+ robot:
+ enabled: true
+ flavor: small
+ appcUsername: "appc@appc.onap.org"
+ appcPassword: "demo123456!"
+ openStackKeyStoneUrl: "http://10.12.25.2:5000"
+ openStackKeystoneAPIVersion: "v3"
+ openStackPublicNetId: "5771462c-9582-421c-b2dc-ee6a04ec9bde"
+ openStackTenantId: "c9ef9a6345b440b7a96d906a0f48c6b1"
+ openStackUserName: "openstack_user"
+ openStackUserDomain: "default"
+ openStackProjectName: "CLAMP"
+ ubuntu14Image: "trusty-server-cloudimg-amd64-disk1"
+ ubuntu16Image: "xenial-server-cloudimg-amd64-disk1"
+ openStackPrivateNetCidr: "10.0.0.0/16"
+ openStackPrivateNetId: "fd05c1ab-3f43-4f6f-8a8c-76aee04ef293"
+ openStackPrivateSubnetId: "fd05c1ab-3f43-4f6f-8a8c-76aee04ef293"
+ openStackSecurityGroup: "f05e9cbf-d40f-4d1f-9f91-d673ba591a3a"
+ openStackOamNetworkCidrPrefix: "10.0"
+ dcaeCollectorIp: "10.12.6.10"
+ vnfPubKey: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDKXDgoo3+WOqcUG8/5uUbk81+yczgwC4Y8ywTmuQqbNxlY1oQ0YxdMUqUnhitSXs5S/yRuAVOYHwGg2mCs20oAINrP+mxBI544AMIb9itPjCtgqtE2EWo6MmnFGbHB4Sx3XioE7F4VPsh7japsIwzOjbrQe+Mua1TGQ5d4nfEOQaaglXLLPFfuc7WbhbJbK6Q7rHqZfRcOwAMXgDoBqlyqKeiKwnumddo2RyNT8ljYmvB6buz7KnMinzo7qB0uktVT05FH9Rg0CTWH5norlG5qXgP2aukL0gk1ph8iAt7uYLf1ktp+LJI2gaF6L0/qli9EmVCSLr1uJ38Q8CBflhkh"
+ demoArtifactsVersion: "1.6.0"
+ demoArtifactsRepoUrl: "https://nexus.onap.org/content/repositories/releases"
+ scriptVersion: "1.6.0"
+ nfsIpAddress: "10.12.6.10"
+ config:
+ openStackEncryptedPasswordHere: "e10c86aa13e692020233d18f0ef6d527"
+ openStackSoEncryptedPassword: "1DD1B3B4477FBAFAFEA617C575639C6F09E95446B5AE1F46C72B8FD960219ABB0DBA997790FCBB12"
+ so:
+ enabled: true
+ so-catalog-db-adapter:
+ config:
+ openStackUserName: "opesntack_user"
+ openStackKeyStoneUrl: "http://10.12.25.2:5000/v3"
+ openStackEncryptedPasswordHere: "1DD1B3B4477FBAFAFEA617C575639C6F09E95446B5AE1F46C72B8FD960219ABB0DBA997790FCBB12"
+ openStackKeystoneVersion: "KEYSTONE_V3"
+
+The values that must be changed according to your lab are all "openStack******" parameters + dcaeCollectorIp + nfsIpAddress
+
+**Generating SO Encrypted Password:**
+
+The SO Encrypted Password uses a java based encryption utility since the
+Java encryption library is not easy to integrate with openssl/python that
+Robot uses in Dublin and upper versions.
+
+.. note::
+ To generate SO ``openStackEncryptedPasswordHere`` and ``openStackSoEncryptedPassword``
+ ensure `default-jdk` is installed::
+
+ apt-get update; apt-get install default-jdk
+
+ Then execute (on oom repository)::
+
+ SO_ENCRYPTION_KEY=`cat ~/oom/kubernetes/so/resources/config/mso/encryption.key`
+ OS_PASSWORD=XXXX_OS_CLEARTESTPASSWORD_XXXX
+
+ git clone http://gerrit.onap.org/r/integration
+ cd integration/deployment/heat/onap-rke/scripts
+
+ javac Crypto.java
+ java Crypto "$OS_PASSWORD" "$SO_ENCRYPTION_KEY"
+
+**Update the OpenStack parameters:**
+
+There are assumptions in the demonstration VNF Heat templates about the
+networking available in the environment. To get the most value out of these
+templates and the automation that can help confirm the setup is correct, please
+observe the following constraints.
+
+
+``openStackPublicNetId:``
+ This network should allow Heat templates to add interfaces.
+ This need not be an external network, floating IPs can be assigned to the
+ ports on the VMs that are created by the heat template but its important that
+ neutron allow ports to be created on them.
+
+``openStackPrivateNetCidr: "10.0.0.0/16"``
+ This ip address block is used to assign OA&M addresses on VNFs to allow ONAP
+ connectivity. The demonstration Heat templates assume that 10.0 prefix can be
+ used by the VNFs and the demonstration ip addressing plan embodied in the
+ preload template prevent conflicts when instantiating the various VNFs. If
+ you need to change this, you will need to modify the preload data in the
+ Robot Helm chart like integration_preload_parameters.py and the
+ demo/heat/preload_data in the Robot container. The size of the CIDR should
+ be sufficient for ONAP and the VMs you expect to create.
+
+``openStackOamNetworkCidrPrefix: "10.0"``
+ This ip prefix mush match the openStackPrivateNetCidr and is a helper
+ variable to some of the Robot scripts for demonstration. A production
+ deployment need not worry about this setting but for the demonstration VNFs
+ the ip asssignment strategy assumes 10.0 ip prefix.
+
+**Generating ROBOT Encrypted Password:**
+
+The Robot encrypted Password uses the same encryption.key as SO but an
+openssl algorithm that works with the python based Robot Framework.
+
+.. note::
+ To generate Robot ``openStackEncryptedPasswordHere`` call on oom respository::
+
+ cd so/resources/config/mso/
+ /oom/kubernetes/so/resources/config/mso# echo -n "<openstack tenant password>" | openssl aes-128-ecb -e -K `cat encryption.key` -nosalt | xxd -c 256 -p``
+
+Initialize the Customer and Owning entities
+===========================================
+
+The robot script can be helpful to initialize the customer and owning entity that
+will be used later to instantiate the VNF (PART 2 - Scale Out Use Case Instantiation)
+
+::
+
+ In the oom_folder/kubernetes/robot/ execute the following command:
+ ./demo-k8s.sh onap init_customer
+
+If this command is unsuccessful it means that the parameters provided to the OOM installation were not correct.
+
+- Verify and Get the tenant/owning entity/cloud-regions defined in AAI by Robot script:
+ These values will be required by the POSTMAN collection when instantiating the Service/vnf ...
+
+To get them some POSTMAN collection queries are useful to use:
+
+- GET "AAI Owning Entities"
+- GET "AAI Cloud-regions"
+- GET "AAI Cloud-regions/tenant"
Description
~~~~~~~~~~~
-The scale out use case uses a VNF composed of three virtual functions. A traffic generator (vPacketGen), a load balancer (vLB), and a DNS (vDNS). Communication between the vPacketGen and the vLB, and the vLB and the vDNS occurs via two separate private networks. In addition, all virtual functions have an interface to the ONAP OAM private network, as shown in the topology below.
+
+The scale out use case uses a VNF composed of three virtual functions. A traffic
+generator (vPacketGen), a load balancer (vLB), and a DNS (vDNS). Communication
+between the vPacketGen and the vLB, and the vLB and the vDNS occurs via two
+separate private networks. In addition, all virtual functions have an interface
+to the ONAP OAM private network, as shown in the topology below.
.. figure:: files/scaleout/topology.png
:align: center
-The vPacketGen issues DNS lookup queries that reach the DNS server via the vLB. vDNS replies reach the packet generator via the vLB as well. The vLB reports the average amount of traffic per vDNS instances over a given time interval (e.g. 10 seconds) to the DCAE collector via the ONAP OAM private network.
+The vPacketGen issues DNS lookup queries that reach the DNS server via the vLB.
+vDNS replies reach the packet generator via the vLB as well. The vLB reports the
+average amount of traffic per vDNS instances over a given time interval (e.g. 10
+seconds) to the DCAE collector via the ONAP OAM private network.
-To run the use case, make sure that the security group in OpenStack has ingress/egress entries for protocol 47 (GRE). Users can test the VNF by running DNS queries from the vPakcketGen:
+To run the use case, make sure that the security group in OpenStack has
+ingress/egress entries for protocol 47 (GRE). Users can test the VNF by running
+DNS queries from the vPakcketGen:
::
@@ -62,7 +225,14 @@ The output below means that the vLB has been set up correctly, has forwarded the
The Scale Out Use Case
~~~~~~~~~~~~~~~~~~~~~~
-The Scale Out use case shows how users/network operators can add Virtual Network Function Components (VNFCs) as part of a VF Module that has been instantiated in the Service model, in order to increase capacity of the network. ONAP El Alto release supports scale out with manual trigger by directly calling SO APIs and closed-loop-enabled automation from Policy. For El Alto, the APPC controller is used to demonstrate post-scaling VNF reconfiguration operations. APPC can handle different VNF types, not only the VNF described in this document.
+
+The Scale Out use case shows how users/network operators can add Virtual Network
+Function Components (VNFCs) as part of a VF Module that has been instantiated in
+the Service model, in order to increase capacity of the network. ONAP Frankfurt
+release supports scale out with manual trigger by directly calling SO APIs and
+closed-loop-enabled automation from Policy. For Frankfurt, the APPC controller is
+used to demonstrate post-scaling VNF reconfiguration operations. APPC can handle
+different VNF types, not only the VNF described in this document.
The figure below shows all the interactions that take place during scale out operations.
@@ -75,43 +245,93 @@ There are four different message flows:
- Red: Closed-loop enabled scale out.
- Black: Orchestration and VNF lifecycle management (LCM) operations.
-The numbers in the figure represent the sequence of steps within a given flow. Note that interactions between the components in the picture and AAI, SDNC, and DMaaP are not shown for clarity's sake.
-
-Scale out with manual trigger (green flow) and closed-loop-enabled scale out (red flow) are mutually exclusive. When the manual trigger is used, VID directly triggers the appropriate workflow in SO (step 1 of the green flow in the figure above). See Section 4 for more details.
-
-When closed-loop enabled scale out is used, Policy triggers the SO workflow. The closed loop starts with the vLB periodically reporting telemetry about traffic patterns to the VES collector in DCAE (step 1 of the red flow). When the amount of traffic exceeds a given threshold (which the user defines during closed loop creation in CLAMP - see Section 1-4), DCAE notifies Policy (step 2), which in turn triggers the appropriate action. For this use case, the action is contacting SO to augment resource capacity in the network (step 3).
-
-At high level, once SO receives a call for scale out actions, it first creates a new VF module (step 1 of the black flow), then calls APPC to trigger some LCM actions (step 2). APPC runs VNF health check and configuration scale out as part of LCM actions (step 3). At this time, the VNF health check only reports the health status of the vLB, while the configuration scale out operation adds a new vDNS instance to the vLB internal state. As a result of configuration scale out, the vLB opens a connection towards the new vDNS instance.
+The numbers in the figure represent the sequence of steps within a given flow.
+Note that interactions between the components in the picture and AAI, SDNC, and
+DMaaP are not shown for clarity's sake.
+
+Scale out with manual trigger (green flow) and closed-loop-enabled scale out
+(red flow) are mutually exclusive. When the manual trigger is used, VID directly
+triggers the appropriate workflow in SO (step 1 of the green flow in the figure
+above). See Section 4 for more details.
+
+When closed-loop enabled scale out is used, Policy triggers the SO workflow.
+The closed loop starts with the vLB periodically reporting telemetry about traffic
+patterns to the VES collector in DCAE (step 1 of the red flow). When the amount
+of traffic exceeds a given threshold (which the user defines during closed loop
+creation in CLAMP - see Section 1-4), DCAE notifies Policy (step 2), which in turn
+triggers the appropriate action. For this use case, the action is contacting SO to
+augment resource capacity in the network (step 3).
+
+At high level, once SO receives a call for scale out actions, it first creates a
+new VF module (step 1 of the black flow), then calls APPC to trigger some LCM
+actions (step 2). APPC runs VNF health check and configuration scale out as part
+of LCM actions (step 3). At this time, the VNF health check only reports the
+health status of the vLB, while the configuration scale out operation adds a new
+vDNS instance to the vLB internal state. As a result of configuration scale out,
+the vLB opens a connection towards the new vDNS instance.
At deeper level, the SO workflow works as depicted below:
.. figure:: files/scaleout/so-blocks.png
:align: center
-SO first contacts APPC to run VNF health check and proceeds on to the next block of the workflow only if the vLB is healthy (not shown in the previous figure for simplicity's sake). Then, SO assigns resources, instantiates, and activates the new VF module. Finally, SO calls APPC again for configuration scale out and VNF health check. The VNF health check at the end of the workflow validates that the vLB health status hasn't been negatively affected by the scale out operation.
+SO first contacts APPC to run VNF health check and proceeds on to the next block
+of the workflow only if the vLB is healthy (not shown in the previous figure for
+simplicity's sake). Then, SO assigns resources, instantiates, and activates the
+new VF module. Finally, SO calls APPC again for configuration scale out and VNF
+health check. The VNF health check at the end of the workflow validates that the
+vLB health status hasn't been negatively affected by the scale out operation.
PART 1 - Service Definition and Onboarding
------------------------------------------
+
This use-case requires operations on several ONAP components to perform service definition and onboarding.
+1-1 VNF Configuration Modeling and Upload with CDS (Recommended way)
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-1-1 VNF Configuration Modeling and Upload with CDS
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-Since Dublin, the scale out use case integrates with the Controller Design Studio (CDS) ONAP component to automate the generation of cloud configuration at VNF instantiation time. The user interested in running the use case only with manual preload can skip this section and start from Section 1-2. The description of the use case with manual preload is provided in Section5.
+Since Dublin, the scale out use case integrates with the Controller Design Studio (CDS) ONAP component to automate the generation of cloud configuration at VNF instantiation time. The user interested in running the use case only with manual preload can skip this section and start from Section 1-2. The description of the use case with manual preload is provided in Section5.
Users can model this configuration at VNF design time and onboard the blueprint to CDS via the CDS GUI. The blueprint includes naming policies and network configuration details (e.g. IP address families, network names, etc.) that CDS will use during VNF instantiation to generate resource names and assign network configuration to VMs through the cloud orchestrator.
Please look at the CDS documentation for details about how to create configuration models, blueprints, and use the CDS tool: https://wiki.onap.org/display/DW/Modeling+Concepts. For running the use case, users can use the standard model package that CDS provides out of the box, which can be found here: https://wiki.onap.org/pages/viewpage.action?pageId=64007442
+::
+
+ For the current use case you can also follow these steps (Do not use the SDC flow to deploy the CBA when importing a VSP, this is not going to work anymore since Guilin):
+ 1. You must first bootstrap CDS by using the query in the POSTMAN collection query named POST "CDS Bootstrap"
+ 2. You must upload the attached CBA by using the POSTMAN collection named POST "CDS Save without Validation", the CBA zip file can be attached in the POSTMAN query
+ Controller Blueprint Archive (to use with CDS) : https://git.onap.org/ccsdk/cds/tree/components/model-catalog/blueprint-model/service-blueprint/vLB_CDS_Kotlin?h=guilin
+ 3. Create a zip file with the HEAT files located here: https://git.onap.org/demo/tree/heat/vLB_CDS?h=guilin
+ 4. Create the VSP & Service in the SDC onboarding and SDC Catalog + Distribute the service
+ To know the right values that must be set in the SDC Service properties assignment you must open the CBA zip and look at the TOSCA-Metadata/TOSCA.meta file
+ This file looks like that:
+ TOSCA-Meta-File-Version: 1.0.0
+ CSAR-Version: 1.0
+ Created-By: Seaudi, Abdelmuhaimen <abdelmuhaimen.seaudi@orange.com>
+ Entry-Definitions: Definitions/vLB_CDS.json
+ Template-Tags: vLB_CDS
+ Template-Name: vLB_CDS
+ Template-Version: 1.0.0
+ Template-Type: DEFAULT
+
+ - The sdnc_model_version is the Template-Version
+ - The sdnc_model_name is the Template-Name
+ - The sdnc_artifact_name is the prefix of the file you want to use in the Templates folder, in our CBA example it's vnf (that is supposed to reference the /Templates/vnf-mapping.json file)
+
+ Follow this guide for the VSP onboarding + service creation + properties assignment + distribution part (just skip the CBA attachment part as the CBA should have been pushed manually with the REST command): https://wiki.onap.org/pages/viewpage.action?pageId=64007442
+
+ Note that in case of issues with the AAI distribution, this may help : https://jira.onap.org/browse/AAI-1759
1-2 VNF Onboarding and Service Creation with SDC
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
Once the configuration blueprint is uploaded to CDS, users can define and onboard a service using SDC. SDC requires users to onboard a VNF descriptor that contains the definition of all the resources (private networks, compute nodes, keys, etc.) with their parameters that compose a VNF. The VNF used to demonstrate the scale out use case supports Heat templates as VNF descriptor, and hence requires OpenStack as cloud layer. Users can use the Heat templates linked at the top of the page to create a zip file that can be uploaded to SDC during service creation. To create a zip file, the user must be in the same folder that contains the Heat templates and the Manifest file that describes the content of the package. To create a zip file from command line, type:
::
zip ../vLB.zip *
-For a complete description of service design and creation, please refer to the SDC wiki page: https://wiki.onap.org/display/DW/Design
+For a complete description of service design and creation, please refer to the SDC documentation.
During the creation of the service in SDC, there are a few extra steps that need to be executed to make the VNF ready for scale out. These require users to login to the SDC Portal as service designer user (username: cs0008, password: demo123456!).
@@ -133,14 +353,14 @@ For CDS parameters, users can search for names starting with "sdnc". These param
:align: center
-After importing the VSP, users need to onboard the DCAE blueprint and the Policy Model used to design closed loops in CLAMP. This step is only required for users that want to run closed loop; users interested in manual scale out only can skip the remainder of the section.
+After importing the VSP, users need to onboard the DCAE blueprint used to design closed loops in CLAMP. This step is only required for users that want to run closed loop; users interested in manual scale out only can skip the remainder of the section. Note that since Frankfurt users are not required to upload a Policy model from SDC, as Policy models are now managed by the Policy Engine.
-From the "Composition" tab in the service menu, select the artifact icon on the right, as shown below:
+To upload a DCAE blueprint, from the "Composition" tab in the service menu, select the artifact icon on the right, as shown below:
.. figure:: files/scaleout/1.png
:align: center
-Upload the DCAE blueprint linked at the top of the page using the pop-up window.
+Upload the DCAE blueprint (choose the one depending on your ONAP release, as the orginal TCA was depecrated in Guilin a new one is available to use) linked at the top of the page using the pop-up window.
.. figure:: files/scaleout/2.png
:align: center
@@ -150,21 +370,6 @@ The blueprint will appear in the artifacts section on the right.
.. figure:: files/scaleout/3.png
:align: center
-To attach a Policy Model to the service, open the Policy drop-down list on left.
-
-.. figure:: files/scaleout/4.png
- :align: center
-
-Then, add the TCA Policy.
-
-.. figure:: files/scaleout/5.png
- :align: center
-
-The Policy will be attached to the service defined in SDC
-
-.. figure:: files/scaleout/6.png
- :align: center
-
Finally, users need to provide the maximum number of VNF instances that ONAP is allowed to create as part of the scale out use case by setting up deployment properties.
.. figure:: files/scaleout/7.png
@@ -178,10 +383,11 @@ This VNF only supports scaling the vDNS, so users should select the vDNS module
At this point, users can complete the service creation in SDC by testing, accepting, and distributing the Service Models as described in the SDC user manual.
-
1-3 Deploy Naming Policy
~~~~~~~~~~~~~~~~~~~~~~~~
+
This step is only required if CDS is used.
+Note that in Guilin, the default naming policy is already deployed in policy so this step is optional
In order to instantiate the VNF using CDS features, users need to deploy the naming policy that CDS uses for resource name generation to the Policy Engine. User can copy and run the script at the top of the page from any ONAP pod, for example Robot or Drools. The script uses the Policy endpoint defined in the Kubernetes domain, so the execution has to be triggered from some pod in the Kubernetes space.
@@ -191,52 +397,161 @@ In order to instantiate the VNF using CDS features, users need to deploy the nam
./push_naming_policy.sh
+
1-4 Closed Loop Design with CLAMP
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-This step is only required if closed loop is used.
-Once the service model is distributed, users can design the closed loop from CLAMP, using the GUI at https://clamp.api.simpledemo.onap.org:30258/designer/index.html In El Alto, CLAMP doesn't authenticate with AAF, so users have to login using "admin" and "password" as username and password, respectively.
+This step is only required if closed loop is used, for manual scaleout this section can be skipped.
+
+Here are Json examples that can be copy pasted in each policy configuration by clicking on the button EDIT JSON, just replace the value "LOOP_test_vLB_CDS" by your loop ID:
+For TCA config:
+::
+
+ {
+ "tca.policy": {
+ "domain": "measurementsForVfScaling",
+ "metricsPerEventName": [
+ {
+ "policyScope": "DCAE",
+ "thresholds": [
+ {
+ "version": "1.0.2",
+ "severity": "MAJOR",
+ "thresholdValue": 200,
+ "closedLoopEventStatus": "ONSET",
+ "closedLoopControlName": "LOOP_test_vLB_CDS",
+ "direction": "LESS_OR_EQUAL",
+ "fieldPath": "$.event.measurementsForVfScalingFields.vNicPerformanceArray[*].receivedTotalPacketsDelta"
+ }
+ ],
+ "eventName": "vLoadBalancer",
+ "policyVersion": "v0.0.1",
+ "controlLoopSchemaType": "VM",
+ "policyName": "DCAE.Config_tca-hi-lo"
+ }
+ ]
+ }
+ }
+
+For Drools config:
+
+::
+
+ {
+ "abatement": false,
+ "operations": [
+ {
+ "failure_retries": "final_failure_retries",
+ "id": "policy-1-vfmodule-create",
+ "failure_timeout": "final_failure_timeout",
+ "failure": "final_failure",
+ "operation": {
+ "payload": {
+ "requestParameters": "{\"usePreload\":false,\"userParams\":[]}",
+ "configurationParameters": "[{\"ip-addr\":\"$.vf-module-topology.vf-module-parameters.param[16].value\",\"oam-ip-addr\":\"$.vf-module-topology.vf-module-parameters.param[30].value\"}]"
+ },
+ "target": {
+ "entityIds": {
+ "resourceID": "Vlbcds..vdns..module-3",
+ "modelInvariantId": "e95a2949-8ba5-433d-a88f-587a6244b4ea",
+ "modelVersionId": "4a6ceddc-147e-471c-ae6f-907a0df76040",
+ "modelName": "Vlbcds..vdns..module-3",
+ "modelVersion": "1",
+ "modelCustomizationId": "7806ed67-a826-4b0e-b474-9ca4fa052a10"
+ },
+ "targetType": "VFMODULE"
+ },
+ "actor": "SO",
+ "operation": "VF Module Create"
+ },
+ "failure_guard": "final_failure_guard",
+ "retries": 1,
+ "timeout": 300,
+ "failure_exception": "final_failure_exception",
+ "description": "test",
+ "success": "final_success"
+ }
+ ],
+ "trigger": "policy-1-vfmodule-create",
+ "timeout": 650,
+ "id": "LOOP_test_vLB_CDS"
+ }
+
+For Frequency Limiter config:
+
+::
+
+ {
+ "id": "LOOP_test_vLB_CDS",
+ "actor": "SO",
+ "operation": "VF Module Create",
+ "limit": 1,
+ "timeWindow": 10,
+ "timeUnits": "minute"
+ }
-Use the "Closed Loop" link to open a distributed model.
+Once the service model is distributed, users can design the closed loop from CLAMP, using the GUI at https://clamp.api.simpledemo.onap.org:30258
-.. figure:: files/scaleout/12.png
+Use the "Loop Instance" link to create a closed loop using a distributed model.
+
+.. figure:: files/scaleout/clamp/1.png
:align: center
-Select the closed loop associated to the distributed service model.
+Select the distributed service model.
-.. figure:: files/scaleout/13.png
+.. figure:: files/scaleout/clamp/2.png
:align: center
The closed loop main page for TCA microservices is shown below.
-.. figure:: files/scaleout/14.png
+.. figure:: files/scaleout/clamp/3.png
:align: center
-Click on the TCA box to create a configuration policy. From the pop-up window, users need to click "Add item" to create a new policy and fill it in with specific information, as shown below.
+Click on the TCA box to create a configuration policy. From the pop-up window, users need to click "Add" to create a new policy and fill it in with specific information, as shown below.
-.. figure:: files/scaleout/15.png
+.. figure:: files/scaleout/clamp/4.png
:align: center
For this use case, the control loop schema type is "VM", while the event name has to match the event name reported in the VNF telemetry, which is "vLoadBalancer".
Once the policy item has been created, users can define a threshold that will be used at runtime to evaluate telemetry reported by the vLB. When the specified threshold is crossed, DCAE generates an ONSET event that will tell Policy Engine which closed loop to activate.
-.. figure:: files/scaleout/16.png
+.. figure:: files/scaleout/clamp/5.png
+ :align: center
+
+Since Frankfurt, users are required to define the PDP group for the configuration policy, as shown in the figure below.
+
+.. figure:: files/scaleout/clamp/6.png
+ :align: center
+
+After the configuration policy is created, users need to create the operational policy, which the Policy Engine uses to determine which actions and parameters should be used during closed loop. From the "Loop Instance" tab, users can select "Modify" to add a new Policy Model of type Drools:
+
+.. figure:: files/scaleout/clamp/7.png
:align: center
-After the configuration policy is created, users need to create the operational policy, which the Policy Engine uses to determine which actions and parameters should be used during closed loop.
+Users are required to provide basic closed loop information, like ID, timeout, and trigger, as shown in the example below. The trigger name, in particular, must match the name of the root operational policy created during the next step.
-.. figure:: files/scaleout/17.png
+.. figure:: files/scaleout/clamp/8.png
:align: center
-Since El Alto, CLAMP adds the "Policy Decision Entry" parameter, which has to contain the name of the root operational policy in the decision tree. For this use case, there is only one operational policy, called "vlbpolicy2" in the example above ("Policy ID" parameter). As such, "Policy Decision Entry" has to be set to "vlbpolicy2" as well. During creation of the operational policy, the user should select "VF Module Create" recipe and "SO" actor. The payload section is:
+To create a new operational policy, users can use the "Add" button below, and fill up the fields in the CLAMP GUI as shown in the example below, making sure that the "id" matches the "trigger" field defined before:
+
+.. figure:: files/scaleout/clamp/9.png
+ :align: center
+
+During creation of the operational policy, the user should select "VF Module Create" recipe and "SO" actor. The payload section is a JSON object like below:
::
- requestParameters: '{"usePreload":false,"userParams":[]}'
- configurationParameters: '[{"ip-addr":"$.vf-module-topology.vf-module-parameters.param[16].value","oam-ip-addr":"$.vf-module-topology.vf-module-parameters.param[30].value"}]'
+ {"requestParameters":"{\"usePreload\":true,\"userParams\":[]}",
+ "configurationParameters":"[{\"ip-addr\":\"$.vf-module-topology.vf-module-parameters.param[16]\",\"oam-ip-addr\":\"$.vf-module-topology.vf-module-parameters.param[30]\"}]"}
+
+Users can use the "Edit JSON" button to upload the payload.
+
+.. figure:: files/scaleout/clamp/10.png
+ :align: center
-Policy Engine passes the payload to SO, which will then use it during VF module instantiation to resolve configuration parameters. The JSON path
+The Policy Engine passes the payload to SO, which will then use it during VF module instantiation to resolve configuration parameters. The JSON path
::
@@ -244,19 +559,34 @@ Policy Engine passes the payload to SO, which will then use it during VF module
indicates that resolution for parameter "ip-addr" is available at "$.vf-module-topology.vf-module-parameters.param[16].value" in the JSON object linked by the VF module self-link in AAI. See section 1-7 for an example of how to determine the right path to configuration parameters.
-The target tab allows users to select the target type for the closed loop. For this use case, the user should select VF module as target type, as we are scaling a VF module. Please select the vDNS module as target resource ID.
+The "targetType" tab allows users to select the target type for the closed loop. For this use case, the user should select VF module as target type, as we are scaling a VF module. Please select the vDNS module as target resource ID.
+
+.. figure:: files/scaleout/clamp/11.png
+ :align: center
+
+As with configuration policy, users need to assign the PDP group to the operational policy.
+
+.. figure:: files/scaleout/clamp/12.png
+ :align: center
+
+For what regards guard policies, either "Frequency Limiter", or "MinMax", or both can be used for the scale out use case. They can be added using the "Modify" item in the "Loop Instance" tab.
-.. figure:: files/scaleout/18.png
+.. figure:: files/scaleout/clamp/13.png
:align: center
-For what regards guard policies, either "Frequency Limiter", or "MinMax", or both can be used for the scale out use case. The example below shows the definition of a "Frequency Limiter" guard policy. The policy name should be guard.frequency.<policy ID> for Frequency Limiter and guard.minmax.<policy ID> for MinMax, where <policy ID> is vlbpolicy2 in the example above.
+The example below shows the definition of a "Frequency Limiter" guard policy. Note that some optional fields, such as id and time interval, should be added to the policy using the "Object Properties" button:
-.. figure:: files/scaleout/19.png
+.. figure:: files/scaleout/clamp/14.png
:align: center
-Once the operational policy design is completed, users can submit and then deploy the closed loop clicking the "Submit" and "Deploy" buttons, respectively, as shown below.
+The user needs to manually insert id, actor, and operation so as to match the same fields defined in the operational policy.
-.. figure:: files/scaleout/20.png
+.. figure:: files/scaleout/clamp/15.png
+ :align: center
+
+Once the operational policy design is completed, users can submit and then deploy the closed loop clicking the "Submit" and "Deploy" buttons from the "Loop Operations" tab, as shown below.
+
+.. figure:: files/scaleout/clamp/16.png
:align: center
At this point, the closed loop is deployed to Policy Engine and DCAE, and a new microservice will be deployed to the DCAE platform.
@@ -264,6 +594,7 @@ At this point, the closed loop is deployed to Policy Engine and DCAE, and a new
1-5 Creating a VNF Template with CDT
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
Before running scale out use case, the users need to create a VNF template using the Controller Design Tool (CDT), a design-time tool that allows users to create and on-board VNF templates into APPC. The template describes which control operation can be executed against the VNF (e.g. scale out, health check, modify configuration, etc.), the protocols that the VNF supports, port numbers, VNF APIs, and credentials for authentication. Being VNF agnostic, APPC uses these templates to "learn" about specific VNFs and the supported operations.
CDT requires two input:
@@ -319,7 +650,7 @@ Here is an example of API for the vLB VNF used for this use case. We name the fi
To create the VNF template in CDT, the following steps are required:
-- Connect to the CDT GUI: http://ANY-K8S-IP:30289
+- Connect to the CDT GUI: http://ANY_K8S_IP:30289
- Click "My VNF" Tab. Create your user ID, if necessary
- Click "Create new VNF" entering the VNF type as reported in VID or AAI, e.g. vLoadBalancerMS/vLoadBalancerMS 0
- Select "ConfigScaleOut" action
@@ -330,6 +661,8 @@ To create the VNF template in CDT, the following steps are required:
- Click "Reference Data" Tab
- Click "Save All to APPC"
+Note, if a user gets an error when saving to Appc (cannot connect to AppC network), he should open a browser to http://ANY_K8S_IP:30211 to accept AppC proxy certificate
+
For health check operation, we just need to specify the protocol, the port number and username of the VNF (REST, 8183, and "admin" respectively, in the case of vLB/vDNS) and the API. For the vLB/vDNS, the API is:
::
@@ -347,6 +680,7 @@ At this time, CDT doesn't allow users to provide VNF password from the GUI. To u
1-6 Setting the Controller Type in SO Database
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
Users need to specify which controller to use for the scale out use case. For Dublin, the supported controller is APPC. Users need to create an association between the controller and the VNF type in the SO database.
To do so:
@@ -358,7 +692,7 @@ To do so:
mysql -ucataloguser -pcatalog123
-- Use catalogdb databalse
+- Use catalogdb database
::
@@ -376,6 +710,7 @@ SO has a default entry for VNF type "vLoadBalancerMS/vLoadBalancerMS 0"
1-7 Determining VNF reconfiguration parameters
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
The post scale out VNF reconfiguration is VNF-independent but the parameters used for VNF reconfiguration depend on the specific use case. For example, the vLB-vDNS-vPacketGenerator VNF described in this documentation use the vLB as "anchor" point. The vLB maintains the state of the VNF, which, for this use case is the list of active vDNS instances. After creating a new vDNS instance, the vLB needs to know the IP addresses (of the internal private network and management network) of the new vDNS. The reconfiguration action is executed by APPC, which receives those IP addresses from SO during the scale out workflow execution. Note that different VNFs may have different reconfiguration actions. A parameter resolution is expressed as JSON path to the SDNC VF module topology parameter array. For each reconfiguration parameter, the user has to specify the array location that contains the corresponding value (IP address in the specific case). For example, the "configurationParameters" section of the input request to SO during scale out with manual trigger (see Section 4) contains the resolution path to "ip-addr" and "oam-ip-addr" parameters used by the VNF.
::
@@ -877,7 +1212,30 @@ In future releases, we plan to leverage CDS to model post scaling VNF reconfigur
PART 2 - Scale Out Use Case Instantiation
-----------------------------------------
-This step is only required if CDS is used.
+
+Manual queries with POSTMAN
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+This step is only required if CDS is used, otherwise you can use VID to instantiate the service and the VNF.
+Note that the POSTMAN collection linked at the top of this page, does provide some level of automatic scripting that will automatically get values between requests and provision the following queries
+
+You must enter in the postman config different variables:
+- "k8s" -> The k8s loadBalancer cluster node
+- "cds-service-model" -> The SDC service name distributed
+- "cds-instance-name" -> A name of your choice for the vnf instance (This must be changed each time you launch the instantiation)
+
+These useful requests are:
+CDS#1 - SDC Catalog Service -> This gets the Sdc service and provision some variables
+CDS#2 - SO Catalog DB Service VNFs - CDS -> This gets info in SO and provision some variables for the instantiation
+CDS#3 - SO Self-Serve Service Assign & Activate -> This starts the Service/vnf instantiation
+Open the body and replace the values like tenantId, Owning entity, region, and all the openstack values everywhere in the payload
+
+Note that you may have to add "onap_private_net_cidr":"10.0.0.0/16" in the "instanceParams" array depending of your openstack network configuration.
+
+CDS#4 - SO infra Active Request -> Used to get the status of the previous query
+
+Manual queries without POSTMAN
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
GET information from SDC catalogdb
@@ -1113,18 +1471,15 @@ that will instantiate Service, VNF, VF modules and Heat stacks:
"projectName":"Project-Demonstration"
},
"owningEntity":{
- "owningEntityId":"6f6c49d0-8a8c-4704-9174-321bcc526cc0",
- "owningEntityName":"OE-Demonstration"
+ "owningEntityId":"6f6c49d0-8a8c-4704-9174-321bcc526cc0",
+ "owningEntityName":"OE-Demonstration"
},
"modelInfo":{
- "modelVersion":"1.0",
- "modelVersionId":"{{service-uuid}}",
- "modelInvariantId":"{{service-invariantUUID}}",
- "modelName":"{{service-name}}",
- "modelType":"service"
- }
- }
-}'
+ "modelVersion":"1.0",
+ "modelVersionId":"{{service-uuid}}",
+ "modelInvariantId":"{{service-invariantUUID}}",
+ "modelName":"{{service-name}}",
+ "modelType":"service"}}}'
Note that the "dcae_collector_ip" parameter has to contain the IP address of one of the Kubernetes cluster nodes, 10.12.5.214 in the example above. In the response to the Macro request, the user will obtain a requestId that will be usefulto follow the instantiation request status in the ONAP SO:
@@ -1143,7 +1498,8 @@ PART 3 - Post Instantiation Operations
3-1 Post Instantiation VNF configuration
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-CDS executes post-instantiation VNF configuration if the "skip-post-instantiation" flag in the SDC service model is set to false, which is the default behavior. Manual post-instantiation configuration is necessary if the "skip-post-instantiation" flag in the service model is set to true or if the VNF is instantiated using the preload approach, which doesn't include CDS. Regardless, this step is NOT required during scale out operations, as VNF reconfiguration will be triggered by SO and executed by APPC.
+
+CDS executes post-instantiation VNF configuration if the "skip-post-instantiation" flag in the SDC service model is set to false, which is the default behavior. Manual post-instantiation configuration is necessary if the "skip-post-instantiation" flag in the service model is set to true or if the VNF is instantiated using the preload approach, which doesn't include CDS. Regardless, this step is NOT required during scale out operations, as VNF reconfiguration will be triggered by SO and executed by APPC.
If VNF post instantiation is executed manually, in order to change the state of the vLB the users should run the following REST call, replacing the IP addresses in the VNF endpoint and JSON object to match the private IP addresses of their vDNS instance:
@@ -1170,6 +1526,7 @@ At this point, the VNF is fully set up.
3-2 Updating AAI with VNF resources
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
To allow automated scale out via closed loop, the users need to inventory the VNF resources in AAI. This is done by running the heatbridge python script in /root/oom/kubernetes/robot in the Rancher VM in the Kubernetes cluster:
::
@@ -1182,7 +1539,25 @@ Note that "vlb_onap_private_ip_0" used in the heatbridge call is the actual para
PART 4 - Triggering Scale Out Manually
--------------------------------------
-For scale out with manual trigger, VID is not supported at this time. Users can run the use case by directly calling SO APIs:
+For scale out with manual trigger, VID is not supported at this time.
+
+Manual queries with POSTMAN
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Note that the POSTMAN collection linked at the top of this page, does provide some level of automatic scripting that will automatically get values between requests and provision the following queries
+
+You must enter in the postman config different variables:
+- "k8s" -> The k8s loadBalancer cluster node
+- "cds-service-model" -> The SDC service name distributed
+- "cds-instance-name" -> A name of your choice for the vnf instance (This must be changed each time you launch the instantiation)
+
+CDS#5 - SO ScaleOut -> This will initiate a Scaleout manually
+CDS#7 - SO ScaleIn -> This will initiate a ScaleIn manually
+
+Manual queries without POSTMAN
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Users can run the use case by directly calling SO APIs:
::
@@ -1398,7 +1773,7 @@ These IDs are also used in the URL request to SO:
::
- http://<Any_K8S_Node_IP_Address>:30277/onap/so/infra/serviceInstantiation/v7/serviceInstances/7d3ca782-c486-44b3-9fe5-39f322d8ee80/vnfs/9d33cf2d-d6aa-4b9e-a311-460a6be5a7de/vfModules/scaleOut
+ http://<Any_K8S_Node_IP_Address>:30277/onap/so/infra/serviceInstantiation/v7/serviceInstances/7d3ca782-c486-44b3-9fe5-39f322d8ee80/vnfs/9d33cf2d-d6aa-4b9e-a311-460a6be5a7de/vfModules/scaleOut
Finally, the "configurationParameters" section in the JSON request to SO contains the parameters that will be used to reconfigure the VNF after scaling. Please see Section 1-7 for an in-depth description of how to set the parameters correctly.
@@ -1428,7 +1803,7 @@ The procedure is similar to one described above, with some minor changes:
4) **Controller type selection** in SO works as described in Section 1-6.
-5) **VNF instantiation from VID**: users can use VID to create the service, the VNF, and instantiate the VF modules. In the VID main page, users should select GR API (this should be the default option).
+5) **VNF instantiation from VID**: users can use VID to create the service, the VNF, and instantiate the VF modules. In the VID main page, users should select GR API (this should be the default option).
.. figure:: files/scaleout/vid.png
:align: center
@@ -1736,6 +2111,7 @@ Module-1 Preload
Module-2 Preload
~~~~~~~~~~~~~~~~
+
::
@@ -2052,11 +2428,14 @@ To instantiate VF modules, please refer to this wiki page: https://wiki.onap.org
PART 6 - Known Issues and Resolutions
-------------------------------------
-1) When running closed loop-enabled scale out, the closed loop designed in CLAMP conflicts with the default closed loop defined for the old vLB/vDNS use case
-Resolution: Change TCA configuration for the old vLB/vDNS use case
+ 1) When running closed loop-enabled scale out, the closed loop designed in CLAMP conflicts with the default closed loop defined for the old vLB/vDNS use case
+
+Resolution: Change TCA configuration for the old vLB/vDNS use case
-- Connect to Consul: http://<ANY K8S VM IP ADDRESS>:30270 and click on "Key/Value" → "dcae-tca-analytics"
+- Connect to Consul: http://ANY_K8S_IP:30270 and click on "Key/Value" → "dcae-tca-analytics"
- Change "eventName" in the vLB default policy to something different, for example "vLB" instead of the default value "vLoadBalancer"
-- Change "subscriberConsumerGroup" in the TCA configuration to something different, for example "OpenDCAE-c13" instead of the default value "OpenDCAE-c12"
+- Change "subscriberConsumerGroup" in the TCA configuration to something different, for example "OpenDCAE-c13" instead of the default value "OpenDCAE-c12"
- Click "UPDATE" to upload the new TCA configuration
+
+2) During Guilin testing, it has been noticed that there is an issue between SO and APPC for Healthcheck queries, this does not prevent the use case to proceed but limit APPC capabilities
diff --git a/docs/docs_usecases.rst b/docs/docs_usecases.rst
index 7eff6f485..a8efb0d63 100644
--- a/docs/docs_usecases.rst
+++ b/docs/docs_usecases.rst
@@ -1,55 +1,11 @@
.. This work is licensed under a Creative Commons Attribution 4.0
International License. http://creativecommons.org/licenses/by/4.0
- Copyright 2018 Huawei Technologies Co., Ltd. All rights reserved.
.. _docs_usecases:
-Verified Use Cases and Functional Requirements
-----------------------------------------------
+:orphan:
-Description
-~~~~~~~~~~~
-This session includes use cases and functional requirements which have been verified in Dublin release by the Integration team:
- 1. What has been implemented
- 2. Step by step instruction on how to deploy them, including the links to download the related assets and resources
- 3. Known issues and workaround
+.. toctree::
+ :maxdepth: 1
-The final testing status can be found at `Dublin Release Integration Testing Status Summary <https://wiki.onap.org/display/DW/Dublin+Release+Integration+Testing+Status>`_
-
-Use Cases
-~~~~~~~~~
-:ref:`vFirewall Use Case <docs_vfw>`
-
-:ref:`VF Module Scale Out Use Case (vLoadBalancer/vDNS example) <docs_scaleout>`
-
-:ref:`vCPE Use Case <docs_vcpe>`
-
-:ref:`CCVPN (Cross Domain and Cross Layer VPN) Use Case <docs_ccvpn>`
-
-:ref:`vFirewall/vDNS with HPA Use Case <docs_vfw_hpa>`
-
-:ref:`vFirewall Traffic Distribution Use Case <docs_vfw_traffic>`
-
-:ref:`BBS (Broadband Service) Use Case <docs_bbs>`
-
-:ref:`vIPsec with HPA Use Case <docs_vipsec_hpa>`
-
-:ref:`vFirewall/edgex with multicloud kubernetes plugin <docs_vfw_edgex_multicloud_k8s>`
-
-Functional Requirements
-~~~~~~~~~~~~~~~~~~~~~~~
-:ref:`5G - Real Time PM and High Volume Stream Data Collection <docs_realtime_pm>`
-
-:ref:`5G - Bulk PM <docs_5g_bulk_pm>`
-
-:ref:`5G - Configuration over NETCONF <docs_5G_Configuration_over_NETCONF>`
-
-:ref:`5G - OOF and PCI <docs_5G_oof_pci>`
-
-:ref:`PNF Support - PNF Plug and Play <docs_5g_pnf_pnp>`
-
-:ref:`PNF Support - PNF Software Upgrade <docs_5g_pnf_software_upgrade>`
-
-:ref:`Change Management Flexible Designer and Orchestrator <docs_CM_flexible_designer_orchestrator>`
-
-:ref:`Change Management Schedule Optimization <docs_CM_schedule_optimizer>`
+ usecases/deprecated_usecases.rst
diff --git a/docs/docs_usecases_release.rst b/docs/docs_usecases_release.rst
new file mode 100644
index 000000000..e8f3c401f
--- /dev/null
+++ b/docs/docs_usecases_release.rst
@@ -0,0 +1,34 @@
+.. This work is licensed under a Creative Commons Attribution 4.0
+ International License. http://creativecommons.org/licenses/by/4.0
+
+.. _docs_usecases_release:
+
+:orphan:
+
+Kohn Use Cases and Requirements
+===============================
+
+Description
+-----------
+
+This session includes use cases and functional requirements which have been
+officially verified in Kohn release by the ONAP community.
+
+For each use case or functional requirement, you can find contact names and a
+link to the associated documentation.
+
+This documentation deals with
+
+ 1. What has been implemented
+ 2. Step by step instructions to deploy and execute the tests, including the
+ links to download the related assets and resources
+ 3. Known issues and workarounds
+
+.. toctree::
+ :maxdepth: 1
+
+ usecases/release_usecases.rst
+ usecases/release_automated_usecases.rst
+ usecases/release_requirements.rst
+ usecases/release_non_functional_requirements.rst
+ usecases/deprecated_usecases.rst
diff --git a/docs/docs_vCPE with Tosca VNF.rst b/docs/docs_vCPE with Tosca VNF.rst
deleted file mode 100644
index 4a5b6fc69..000000000
--- a/docs/docs_vCPE with Tosca VNF.rst
+++ /dev/null
@@ -1,159 +0,0 @@
-.. _docs_vcpe_tosca:
-
-vCPE with Tosca VNF
-----------------------------
-
-VNF Packages and NS Packages
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-vCPE tosca file url: https://git.onap.org/demo/tree/tosca/vCPE
-
-5 VNFs are here for the ONAP vCPE use case. This VNFD is transformed manually from vCPE heat template.
-Please run "./generate_csar.sh" to create the CSAR package files for these 5 VNFS. CSAR package file is just a zip formatted file. If you want to use SRIOV SRIOV-NIC", please run "./generate_csar.sh sriov" to create the CSAR package files for SRIOV.
-All the generated VNF packges can be found in the following link:
-- VNF packages: https://wiki.onap.org/display/DW/vCPE+with+Tosca+VNF+Test+Guide
-- NS packages: https://wiki.onap.org/display/DW/vCPE+with+Tosca+VNF+Test+Guide
-
-Description
-~~~~~~~~~~~
-The vCPE with Tosca VNF shows how to use ONAP to deploy tosca based vCPE. ONAP Casablanca release supports deployment,termination and manual heal Tosca based vCPE. User can trigger the above operation via UUI. and User can first chose Network serivce type and conrresponding service template in UUI and then UUI will directly invoke VF-C Northbound interfaces to do the life cycle management. In Casablanca release, we bypass SO, in the following release, we can add SO to the workflow. The main projects involved in this use case include: SDC, A&AI, UUI,VF-C, Multicloud,MSB, Policy,OOF.
-The use case is composed of five virtual functions (VFs): Infrastructure including vDNS, vDHCP, vAAA(Authorization, Authentication, Accounting) and vWEB, vBNG(Virtual Broadband Network Gateway), vGMUX(Virtual Gateway Multiplexer), vBRGEMU(Bridged Residential Gateway) and vGW(Virtual Gateway). Infrastructure VF run in one VM. the other VFs run in separate four VMs. We will send much data from vBRGEMU to vGW. we need to accelarate it using SRIOV-NIC.
-The original vCPE Use Case Wiki Page can be found here: https://wiki.onap.org/pages/viewpage.action?pageId=3246168
-
-How to Use
-~~~~~~~~~~
-
-
-Configuration:
-~~~~~~~~~~~~~~
-1) VIM Configuration
-If you want to use SRIOV-NIC, you need first config SRIOV NIC to refer to [1].
-[1] https://docs.openstack.org/ocata/networking-guide/config-sriov.html
-
-ONAP managing 1 cloud-region which have three flavors.
-Flavor 11:
-2 vcpus, 1 Gbytes of memory, 20Gb disk
-Numa page size: 2Mbytes and number pages 512
-::
-
- openstack flavor create onap.hpa.flavor11 -id auto --ram 1024 --disk 20 --vcpus 2
-
-Flavor 12:
-2 vcpus, 2 Gbytes of memory, 20Gb disk
-Numa page size: 2Mbytes and number pages 1024
-::
-
- openstack flavor create onap.hpa.flavor12 -id auto --ram 2048 --disk 20 --vcpus 2
-
-Flavor 13:
-2 vcpus, 4 Gbytes of memory, 20Gb disk
-Huge page size: 2Mbytes and number pages 2048
-1 SRIOV-NIC VF
-::
-
- openstack flavor create onap.hpa.flavor13 -id auto --ram 4096 --disk 20 -vcpus 2
- openstack flavor set onap.hpa.flavor11 --property aggregate_instance_extra_specs:sriov_nic=sriov-nic-intel-1234-5678-physnet1:1
- openstack aggregate create --property sriov_nic=sriov-nic-intel-1234-5678-physnet1:1 hpa_aggr11
-
-comments: you must change 1234 and 5678 to real vendor id and product id. you also need change physnet1 to the provider network.
-
-2)Policy Configuration
-After the patch https://gerrit.onap.org/r/#/c/73502/ is merged. With the generated policy and do some manually update as follows, the service could be distributed successfully and the Policy/VFC/OOF could work as excepted.
-
-- Need manually modify policy item because the “vendor id” and “PCI device id” and “architecture” must be changed in different VIMs since we have different PCI devices in different VIMs
-- The value of mandatory in CSAR is “true”, OOF is case intensive, it needs to use “True”. Have to update it. suggest OOF to use ignoreCase in R4.
-- The attribute key in CSAR is pciNumDevices, but the responding one in OOF/Mutlicloud is pciCount. Suggest keeping alignment in R4.
-- The policy scope has to add a value “us” into it which is a configuration issue in OOF side. Policy side also need do improvement to deal with policy scope automatically append instead of replacement so such policy could be used by several services at the same time.
-
-Design Time:
-~~~~~~~~~~~
-1) Because SDC doesn't export ETSI aigned VNF package and NS package, so in this release, we put the real ETSI aligned package as package artifact.
-2) When design Network service in SDC, should assign "gvnfmdriver" as the value of nf_type in Properties Assignment. so that VF-C can know will use gvnfm to manage VNF life cycle.
-
-Run Time:
-~~~~~~~~
-1) First onboard VNF/NS package from SDC to VF-C catalog in sequence.
-2) Trigger the NS operation via UUI
-
-More details can be fonud here: https://wiki.onap.org/display/DW/vCPE+with+Tosca+VNF+Test+Guide
-
-Test Status and Plans
-~~~~~~~~~~~~~~~~~~~~~
-The test plan 3 in https://wiki.onap.org/pages/viewpage.action?pageId=41421112.
-Test Plan 3: VF-C HPA testing
-This test plan covers the tests related to testing
-Support for the vCPE use case in VF-C
-Use vCPE (Infra, vGW, vBNG, vBRGEMU and vGMUX)
-
-Infra part of policy asking for:
-::
-
- 2 vcpus
- >= 2Gbytes of memory
- > 40Gbytes of disk
-
-vGW part of policy asking for:
-::
-
- 2 vcpus
- >=4Gbytes of memory
- >= 40Gbytes of disk
- Numa page size: 2Mbytes and pages 1024
- with one SRIOV-NIC
-
-vBNG part of policy asking for:
-::
-
- 2 vcpus
- >= 2Gbytes of memory
- > 40Gbytes of disk
- Numa page size: 2Mbytes and pages 1024
- with one SRIOV-NIC
-
-vBGREMU part of policy asking for:
-::
-
- 2 vcpus
- >= 2Gbytes of memory
- >= 40Gbytes of disk
- Numa page size: 2Mbytes and pages 1024
- with one SRIOV-NIC
-
-vGMUX part of policy asking for:
-::
-
- 2 vcpus
- >= 2Gbytes of memory
- > 40Gbytes of disk
- Numa page size: 2Mbytes and pages 1024
- with one SRIOV-NIC
-
-Instantiate the VNF
-Check for results:
-It would have selected flavor13 for vGW, vBNG, vBRGEMU and vGMUX VMs. It would have selected flavor13 and flavor12 for Infrastructure.
-
-This case completed all tests as found here: https://wiki.onap.org/display/DW/vCPE+with+TOSCA+VNF+Integration+Test++-+Test+status
-
-Known Issues and Resolutions
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-- Some SDC NS data model is not aligned to VFC NS data model, VFC NS also according to ETSI SOL0001. we also can refer to https://jira.onap.org/browse/SDC-1897. we have a workaround for this issue, we put the service as artifact file and distribute to VFC.
-- NFV Tosca parser bug https://jira.opnfv.org/browse/PARSER-187. we also filed a bug in VFC https://jira.onap.org/browse/VFC-1196.
-- 'artifacts' definition is missing in the exported csar's VDU node, we also can refer to https://jira.onap.org/browse/SDC-1900. It’s a very hacky workaround in VFC’s GVFNM. Because currently the only use case will use GVFNM is vCPE, which only uses the ubuntu16.04 image, so GVFNM just makes the ubuntu16.04 image as the default if the "sw_image" artifact is missing in the SDC’s exported CSAR.
-- OOF patch https://gerrit.onap.org/r/#/c/73332/ is not accepted by 1.2.4 image. 1.2.5 is available in nexus3 repo. But not available in Casablanca release. If you want to use it, you can use 1.2.5-SNAPSHOT-latest. If you use 1.2.4 image, you also need to modify code according to the patch.
-- vnflcm notification error patch https://gerrit.onap.org/r/#/c/73852/
-- grant error patch not merged into VF-C 1.2.2 image: https://gerrit.onap.org/r/#/c/73833/ and https://gerrit.onap.org/r/#/c/73770/
-- VF-C catalog config should be updated with the right SDC URL and user/pwd
-Resolution: Disable VFC catalog livenessprobe and update configuration
-
-a) edit dev-vfc-catalog deployment
-b) remove livenessprobe section
-c) enter into catalog pod and update configuration
-::
-
-kubectl -n onap exec -it dev-vfc-catalog-6978b76c86-87722 /bin/bash
-config file location: service/vfc/nfvo/catalog/catalog/pub/config/config.py
-Update the SDC configuration as follows:
-SDC_BASE_URL = "http://msb-iag:80/api"
-SDC_USER = "aai"
-SDC_PASSWD = "Kp8bJ4SXszM0WXlhak3eHlcse2gAw84vaoGGmJvUy2U"
-
diff --git a/docs/docs_vCPE.rst b/docs/docs_vCPE.rst
index cff5f3f27..6a8e2c536 100644
--- a/docs/docs_vCPE.rst
+++ b/docs/docs_vCPE.rst
@@ -4,12 +4,14 @@
.. _docs_vcpe:
+:orphan:
+
vCPE Use Case
-----------------------------
+-------------
Description
~~~~~~~~~~~
-vCPE use case is based on Network Enhanced Residential Gateway architecture specified in Technical Report 317 (TR-317), which defines how service providers deploy residential broadband services like High Speed Internet Access. The use case implementation has infrastructure services and customer service. The common infrastructure services are deployed first and shared by all customers. The use case demonstrates ONAP capabilities to design, deploy, configure and control sophisticated services.
+vCPE use case is based on Network Enhanced Residential Gateway architecture specified in Technical Report 317 (TR-317), which defines how service providers deploy residential broadband services like High Speed Internet Access. The use case implementation has infrastructure services and customer service. The common infrastructure services are deployed first and shared by all customers. The use case demonstrates ONAP capabilities to design, deploy, configure and control sophisticated services.
More details on the vCPE Use Case can be found on wiki page https://wiki.onap.org/pages/viewpage.action?pageId=3246168
@@ -25,135 +27,88 @@ Here are the main steps to run the use case in Integration lab environment, wher
1. Run Robot script from Rancher node to onboard VNFs, create and distribute models for vCPE four infrastructure services, i.e. infrastructure, brg, bng and gmux
-::
+::
demo-k8s.sh onap init
-
-2. Add customer SDN-ETHERNET-INTERNET (see the use case tutorial wiki page for detail)
-
-3. Add identity-url to RegionOne data in A&AI. First use POSTMAN to GET cloud-region RegionOne data, then add identity-url and PUT back to A&AI
-::
-
- GET https://{{aai}}:{{port}}/aai/v14/cloud-infrastructure/cloud-regions/cloud-region/CloudOwner/RegionOne
+2. Add route on sdnc cluster VM node, which is the cluster VM node where pod sdnc-sdnc-0 is running on. This will allow ONAP SDNC to configure BRG later on.
::
- PUT https://{{aai}}:{{port}}/aai/v14/cloud-infrastructure/cloud-regions/cloud-region/CloudOwner/RegionOne
- {
- "cloud-owner": "CloudOwner",
- "cloud-region-id": "RegionOne",
- "cloud-type": "SharedNode",
- "owner-defined-type": "OwnerType",
- "cloud-region-version": "v1",
- "identity-url": "http://10.12.25.2:5000/v2.0",
- "cloud-zone": "CloudZone",
- "resource-version": "1559336510793",
- "relationship-list": {
- ... ...
-
-4. Add route on sdnc cluster VM node, which is the cluster VM node where pod sdnc-sdnc-0 is running on. This will allow ONAP SDNC to configure BRG later on.
-
-::
-
ip route add 10.3.0.0/24 via 10.0.101.10 dev ens3
-5. Install Python and other Python libraries
+3. Install Python and other Python libraries
::
-
+
integration/test/vcpe/bin/setup.sh
-6. Change the Openstack env parameters and one customer service related parameter in vcpecommon.py
+4. Setup vcpe scripts by adjusting relevant parts of provided vcpeconfig.yaml config file. Most importantly adjust the Openstack env parameters shown below. Please issue 'vcpe.py --help' for detailed usage info.
-::
+::
- cloud = {
- '--os-auth-url': 'http://10.12.25.2:5000',
- '--os-username': 'xxxxxxxxxx',
- '--os-user-domain-id': 'default',
- '--os-project-domain-id': 'default',
- '--os-tenant-id': 'xxxxxxxxxxxxxxxx' if oom_mode else '1e097c6713e74fd7ac8e4295e605ee1e',
- '--os-region-name': 'RegionOne',
- '--os-password': 'xxxxxxxxxxx',
- '--os-project-domain-name': 'xxxxxxxxx' if oom_mode else 'Integration-SB-07',
- '--os-identity-api-version': '3'
- }
+ cloud_name: 'xxxxxxxx'
- common_preload_config = {
- 'oam_onap_net': 'xxxxxxxx' if oom_mode else 'oam_onap_lAky',
- 'oam_onap_subnet': 'xxxxxxxxxx' if oom_mode else 'oam_onap_lAky',
- 'public_net': 'xxxxxxxxx',
+ common_preload_config:
+ 'oam_onap_net': 'xxxxxxxx'
+ 'oam_onap_subnet': 'xxxxxxxxxx'
+ 'public_net': 'xxxxxxxxx'
'public_net_id': 'xxxxxxxxxxxxx'
- }
-::
+"cloud_name" should be set to Openstack cloud name from clouds.yaml. By default this file is at ~/.config/openstack directory; if it's located in scripts directory it will have precedence over the beforementoined one. Example clouds.yaml.example file is provided.
- # CHANGEME: vgw_VfModuleModelInvariantUuid is in rescust service csar, open service template with filename like service-VcpesvcRescust1118-template.yml and look for vfModuleModelInvariantUUID under groups vgw module metadata.
- self.vgw_VfModuleModelInvariantUuid = 'xxxxxxxxxxxxxxx'
-
-7. Initialize vcpe
+5. Run Robot to create and distribute for vCPE customer service. This step assumes step 1 has successfully distributed all vcpe models except customer service model
::
-
- vcpe.py init
-8. Run a command from Rancher node to insert vcpe customer service workflow entry in SO catalogdb. You should be able to see a sql command printed out from the above step output at the end, and use that sql command to replace the sample sql command below (inside the double quote) and run it from Rancher node:
+ ete-k8s.sh onap distributevCPEResCust
+
+6. If running with oom_mode=False initialize SDNC ip pool by running below command from k8s control node. It will be done automatically otherwise.
::
- kubectl exec dev-mariadb-galera-mariadb-galera-0 -- mysql -uroot -psecretpassword catalogdb -e "INSERT INTO service_recipe (ACTION, VERSION_STR, DESCRIPTION, ORCHESTRATION_URI, SERVICE_PARAM_XSD, RECIPE_TIMEOUT, SERVICE_TIMEOUT_INTERIM, CREATION_TIMESTAMP, SERVICE_MODEL_UUID) VALUES ('createInstance','1','vCPEResCust 2019-06-03 _04ba','/mso/async/services/CreateVcpeResCustService',NULL,181,NULL, NOW(),'6c4a469d-ca2c-4b02-8cf1-bd02e9c5a7ce')"
+ kubectl -n onap exec -it dev-sdnc-sdnc-0 -- /opt/sdnc/bin/addIpAddresses.sh VGW 10.5.0 22 250
-9. Run Robot to create and distribute for vCPE customer service. This step assumes step 1 has successfully distributed all vcpe models except customer service model
+7. Initialize vcpe
::
- ete-k8s.sh onap distributevCPEResCust
+ vcpe.py init
-10. Manually copy vCPE customer service csar (starting with service-Vcperescust) under Robot container /tmp/csar directory to Rancher vcpe/csar directory, now you should have these files:
+8. If running with oom_mode=False run a command printed at the end of the above step from k8s control node to insert vcpe customer service workflow entry in SO catalogdb. It will be done automatically otherwise.
-::
- root@sb00-nfs:~/integration/test/vcpe/csar# ls -l
- total 528
- -rw-r--r-- 1 root root 126545 Jun 26 11:28 service-Demovcpeinfra-csar.csar
- -rw-r--r-- 1 root root 82053 Jun 26 11:28 service-Demovcpevbng-csar.csar
- -rw-r--r-- 1 root root 74179 Jun 26 11:28 service-Demovcpevbrgemu-csar.csar
- -rw-r--r-- 1 root root 79626 Jun 26 11:28 service-Demovcpevgmux-csar.csar
- -rw-r--r-- 1 root root 78156 Jun 26 11:28 service-Demovcpevgw-csar.csar
- -rw-r--r-- 1 root root 83892 Jun 26 11:28 service-Vcperescust20190625D996-csar.csar
-
-11. Instantiate vCPE infra services
+9. Instantiate vCPE infra services
::
vcpe.py infra
-12. From Rancher node run vcpe healthcheck command to check connectivity from sdnc to brg and gmux, and vpp configuration of brg and gmux. Write down BRG MAC address printed out at the last line
+10. From Rancher node run vcpe healthcheck command to check connectivity from sdnc to brg and gmux, and vpp configuration of brg and gmux.
::
healthcheck-k8s.py --namespace <namespace name> --environment <env name>
-13. Instantiate vCPE customer service. Input the BRG MAC when prompt
+11. Instantiate vCPE customer service.
::
vcpe.py customer
-14. Update libevel.so in vGMUX VM and restart the VM. This allows vGMUX to send events to VES collector in close loop test. See tutorial wiki for details
+12. Update libevel.so in vGMUX VM and restart the VM. This allows vGMUX to send events to VES collector in close loop test. See tutorial wiki for details
-15. Run heatbridge. The heatbridge command usage: demo-k8s.sh <namespace> heatbridge <stack_name> <service_instance_id> <service> <oam-ip-address>, please refer to vCPE tutorial page on how to fill in those paraemters. See an example as following:
+13. Run heatbridge. The heatbridge command usage: demo-k8s.sh <namespace> heatbridge <stack_name> <service_instance_id> <service> <oam-ip-address>, please refer to vCPE tutorial page on how to fill in those paraemters. See an example as following:
::
~/integration/test/vcpe# ~/oom/kubernetes/robot/demo-k8s.sh onap heatbridge vcpe_vfmodule_e2744f48729e4072b20b_201811262136 d8914ef3-3fdb-4401-adfe-823ee75dc604 vCPEvGMUX 10.0.101.21
-16. Start closed loop test by triggering packet drop VES event, and monitor if vGMUX is restarting. You may need to run the command twice if the first run fails
+14. Start closed loop test by triggering packet drop VES event, and monitor if vGMUX is restarting. You may need to run the command twice if the first run fails
-::
+::
vcpe.py loop
diff --git a/docs/docs_vCPE_tosca_local.rst b/docs/docs_vCPE_tosca_local.rst
new file mode 100644
index 000000000..8b903adb7
--- /dev/null
+++ b/docs/docs_vCPE_tosca_local.rst
@@ -0,0 +1,210 @@
+.. This work is licensed under a Creative Commons Attribution 4.0
+ International License. http://creativecommons.org/licenses/by/4.0
+ Copyright 2020 CMCC Technologies Co., Ltd. All rights reserved.
+
+.. _docs_vcpe_tosca_local:
+
+:orphan:
+
+vCPE Tosca Local Mode Use Case
+------------------------------
+
+Description
+~~~~~~~~~~~
+vCPE tosca use case is based on Network Enhanced Residential Gateway architecture specified in Technical Report 317 (TR-317), which defines how service providers deploy residential broadband services like High Speed Internet Access. The use case implementation has infrastructure services and customer service. The common infrastructure services are deployed first and shared by all customers. The use case demonstrates ONAP capabilities to design, deploy, configure and control sophisticated services.
+
+More details on the vCPE Use Case can be found on wiki page https://wiki.onap.org/pages/viewpage.action?pageId=3246168
+
+Local is the way how to distribute the network elements. Here we use local means we want upload the csar file to distribute the vnf and ns configurations.
+
+Source Code
+~~~~~~~~~~~
+vcpe tosca local test scripts: https://git.onap.org/integration/tree/test/vcpe_tosca/local/vcpe_tosca_test.py
+
+How to Use
+~~~~~~~~~~
+The use case has been automated by vcpe_tosca_test scripts. The followings are the main steps to run the use case in Integration lab environment:
+
+1) Install ONAP CLI environment, open_cli_product is onap-dublin.
+ Use https://git.onap.org/integration/tree/test/vcpe_tosca/local/scripts/install-alpine.sh to install ONAP CLI.
+
+2) Prepare openstack test environment.
+
+ * Create project(tenant) and user on openstack
+
+ Openstack Horizon--Identity--Projects page
+
+ .. image:: files/vcpe_tosca/create_project.png
+
+ Openstack Horizon--Identity--Users page
+
+ .. image:: files/vcpe_tosca/create_user.png
+
+ Manage Project Members
+
+ .. image:: files/vcpe_tosca/manage_project_user.png
+
+ * Create and upload image for VNF
+
+ Identify the version of the lab server, my lab server is Ubuntu 16.04.3 LTS.
+
+ ::
+
+ root@onap-dengyuanhong-master:~# cat /etc/lsb-release
+ DISTRIB_ID=Ubuntu
+ DISTRIB_RELEASE=16.04
+ DISTRIB_CODENAME=xenial
+ DISTRIB_DESCRIPTION="Ubuntu 16.04.3 LTS"
+
+
+ Download the related ubuntu image from https://cloud-images.ubuntu.com/
+
+ .. image:: files/vcpe_tosca/image.png
+
+ Openstack Horizon--Project--Compute--Images page, create an image named image, the name must be the same with image which is defined in vnf csar file.
+
+ .. image:: files/vcpe_tosca/create_image.png
+
+3) Update the configuration file vcpe_config.json under https://git.onap.org/integration/tree/test/vcpe_tosca/local/config
+
+ You should update the values if you want to run in your environment.
+
+ Firstly, identify the Region name you used on your openstack environment, our Region name is RegionOne, it will be used by the configuration file.
+
+ ::
+
+ [wrsroot@controller-0 ~(keystone_admin)]$ openstack region list
+ +-----------+---------------+-------------+
+ | Region | Parent Region | Description |
+ +-----------+---------------+-------------+
+ | RegionOne | None | |
+ +-----------+---------------+-------------+
+
+
+ Secondly, update the values according to your environment.
+
+ ::
+
+ "open_cli_product": set to CLI product you installed, onap-dublin is OK for this test.
+ "open_cli_home": set to the oclip home path,
+ "aai_url": set to msb ip and port you used,
+ "msb_url": set to msb ip and port you used,
+ "multicloud_url": set to msb ip and port you used,
+
+ "complex_name": set to any complex name you want to use, the name must be unique
+
+ "cloud_region_data": {
+ "RegionOne":(update to your Region name) {
+ "cloud-region-version": the cloud region version of your Cloud region, you can keep "titanium_cloud"
+ "esr-system-info-id": "1111ce1f-aa78-4ebf-8d6f-4b62773e9b01",
+ "service-url": the ip change to your openstack ip address,
+ "user-name": the user name you created on openstack,
+ "password": the user password you created on openstack,
+ "system-type": "VIM",
+ "ssl-insecure": true,
+ "cloud-domain": "Default",
+ "default-tenant": the project name you created on openstack,
+ "tenant-id": the project id you created on openstack,
+ "cloud-type": "openstack",
+ "identity-url": the ip change to your openstack ip address,
+ "system-status": "active"
+ }
+ }
+ "cloud-owner": set to any cloud name you want to use, , the name must be unique
+ "service_name": set to any service name you want to use, the name must be unique
+ "customer_name": set to any customer name you want to use, the name must be unique
+ "subscriber_name": set to any subscriber name you want to use, the name must be unique
+
+ "vfc-url": set to msb ip and port you used,
+ "vnfs": {
+ "vgw": {
+ "path": "/csar/vgw.csar", set to you vgw csar file path
+ "key": "key2",
+ "value": "value2"
+ },
+ "infra": {
+ "path": "/csar/infra.csar", set to you infra csar file path
+ "key": "key2",
+ "value": "value2"
+ },
+ "vbng": {
+ "path": "/csar/vbng.csar", set to you vbng csar file path
+ "key": "key2",
+ "value": "value2"
+ },
+ "vbrgemu": {
+ "path": "/csar/vbrgemu.csar", set to you vbrgemu csar file path
+ "key": "key2",
+ "value": "value2"
+ },
+ "vgmux": {
+ "path": "/csar/vgmux.csar", set to you vgmux csar file path
+ "key": "key2",
+ "value": "value2"
+ }
+ },
+ "ns": {
+ "key": "key1",
+ "value": "value1",
+ "path": "ns_vgw.csar", set to you ns csar file path
+ "name": "vcpe11"
+ },
+ "location": "VCPE22_RegionOne", set to CloudOwner_CloudRegion
+ "vnfm_params": {
+ "GVNFMDRIVER": {
+ "type": "gvnfmdriver",
+ "vendor": "vfc",
+ "version": "v1.0",
+ "url": set to msb ip and port you used,
+ "vim-id": "VCPE22_RegionOne", set to CloudOwner_CloudRegion
+ "user-name": "admin",
+ "user-password": "admin",
+ "vnfm-version": "v1.0"
+ }
+ }
+
+
+4) The vnf csar file include Infra, vGW, vBNG, vBRGEMU and vGMUX, and the ns csar file is ns. https://git.onap.org/integration/tree/test/vcpe_tosca/local/csar
+
+
+5) The key test script is vcpe_tosca_test.py which is under https://git.onap.org/integration/tree/test/vcpe_tosca/local
+
+ Run command is
+
+ ::
+
+ python3 -m unittest vcpe_tosca_test.py
+
+ Before run the command, you should install requests: pip install requests, and update the path of configuration file vcpe_config.json.
+
+5) Release of our environment
+
+ ::
+
+ vfc-nslcm: 1.3.8
+ vfc-vnflcm: 1.3.8
+ vfc-gvnfm: 1.3.8
+ modeling-etsicatalog: 1.0.5
+ multicloud-framework: 1.5.1
+ multicloud-windriver: 1.5.5
+ cli: onap-dublin
+
+
+Note
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+1) You must authorize admin to vcpe_case when managing project members in openstack.
+
+2) You should create an image named image before running the test script, the name must be the same with image which is defined in vnf csar file.
+
+3) You should install ONAP CLI before running the script.
+
+
+Known Issues and Workaround
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+1) There is time out issue when terminating vnf, the solution is refer to
+
+ https://gerrit.onap.org/r/c/vfc/nfvo/driver/vnfm/gvnfm/+/105192
+
+2) The process of terminating job is chaotic, the solution is refer to
+
+ https://gerrit.onap.org/r/c/vfc/nfvo/lcm/+/105449
diff --git a/docs/docs_vCPE_with_Tosca_VNF.rst b/docs/docs_vCPE_with_Tosca_VNF.rst
new file mode 100644
index 000000000..3343bdf9f
--- /dev/null
+++ b/docs/docs_vCPE_with_Tosca_VNF.rst
@@ -0,0 +1,190 @@
+.. _docs_vcpe_tosca:
+
+:orphan:
+
+vCPE with Tosca VNF
+-------------------
+
+VNF Packages and NS Packages
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+vCPE tosca file url: https://git.onap.org/demo/tree/tosca/vCPE_F
+
+5 VNFs are here for the ONAP vCPE use case. The vnf csar file includes Infra, vGW, vBNG, vBRGEMU and vGMUX, and the ns csar file is ns.
+
+Description
+~~~~~~~~~~~
+The vCPE with Tosca VNF shows how to use ONAP to deploy tosca based vCPE. ONAP Casablanca release supports deployment,termination and manual heal Tosca based vCPE. User can trigger the above operation via UUI. and User can first chose Network serivce type and conrresponding service template in UUI and then UUI will directly invoke VF-C Northbound interfaces to do the life cycle management. In Casablanca release, we bypass SO, in the following release, we can add SO to the workflow. The main projects involved in this use case include: SDC, A&AI, UUI,VF-C, Multicloud,MSB, Policy,OOF.
+The use case is composed of five virtual functions (VFs): Infrastructure including vDNS, vDHCP, vAAA(Authorization, Authentication, Accounting) and vWEB, vBNG(Virtual Broadband Network Gateway), vGMUX(Virtual Gateway Multiplexer), vBRGEMU(Bridged Residential Gateway) and vGW(Virtual Gateway). Infrastructure VF run in one VM. the other VFs run in separate four VMs. We will send much data from vBRGEMU to vGW. we need to accelarate it using SRIOV-NIC.
+The original vCPE Use Case Wiki Page can be found here: https://wiki.onap.org/pages/viewpage.action?pageId=3246168
+
+How to Use
+~~~~~~~~~~
+
+
+Configuration
+~~~~~~~~~~~~~
+1) VIM Configuration
+
+ Prepare openstack test environment.
+
+ * Create project(tenant) and user on openstack
+
+ Openstack Horizon--Identity--Projects page
+
+ .. image:: files/vcpe_tosca/create_project.png
+
+ Openstack Horizon--Identity--Users page
+
+ .. image:: files/vcpe_tosca/create_user.png
+
+ Manage Project Members
+
+ .. image:: files/vcpe_tosca/manage_project_user.png
+
+ * Create and upload image for VNF
+
+ * Register VIM using CLI command or ESR GUI
+
+ .. image:: files/vcpe_tosca/vim.png
+
+2) VNFM Configuration
+
+ Register vnfm using CLI command or ESR GUI.
+
+ .. image:: files/vcpe_tosca/vnfm.png
+
+Design Time
+~~~~~~~~~~~
+1) We put the real ETSI aligned package as package artifact.
+2) When design Network service in SDC, should assign "gvnfmdriver" as the value of nf_type in Properties Assignment. so that VF-C can know will use gvnfm to manage VNF life cycle.
+
+ .. image:: files/vcpe_tosca/sdc.png
+
+Run Time
+~~~~~~~~
+1) First onboard VNF/NS package from SDC to modeling etsicatalog in sequence.
+2) Trigger the NS operation via UUI guide
+
+a) VNF/NS csar package on-boarded guide
+
+ Note:
+
+ * VNF/NS csar package can be distributed from SDC.
+ * VNF csar package should be distributed first, then NS csar package can be distributed.
+ * The csar package list page shows both the on-boarded/distributed csar package and the package from SDC.
+ * When the package from SDC is on-boarded, it will be dropped from the list, and the on-boarded package will be displayed in the list.
+
+ The following shows the guide of on-boarded a NS csar package via UUI:
+
+ Step 1. Select the SDC NS csar package vcpe_test_001 in csar package list package, and click the onboard button, the SDC NS csar package will be on-boarded to Modeling:
+
+ .. image:: files/vcpe_tosca/ns_package_list.png
+
+ Step 2. When the onboard is completed, the SDC csar vcpe_test_001 is dropped from the list, and the on-boarded csar info(vcpe) will be displayed in the csar file list:
+
+ .. image:: files/vcpe_tosca/ns_package_onboard.png
+
+ You can also onboard a VNF csar package by click the VNF tab in the csar package list page, then follow the upper two steps. You should onboard vnfs before ns.
+
+b) NS Instantiate guide
+
+ Note:
+
+ * When an NS package is on-boarded or distributed, you can start NS Instantiating.
+
+ The following steps show the guide of Instantiating NS:
+
+ Step 1. Open the service list page, first select Customer and Service Type, then click Create button.
+
+ .. image:: files/vcpe_tosca/customer_service.png
+
+ Step 2. First select the Service with Network Service, then select the TEMPLATE, then click OK button:
+
+ .. image:: files/vcpe_tosca/ns_create.png
+
+ Step 3. First input the NS Name and Description, then select the vf_location of each vnf, then click Create button:
+
+ .. image:: files/vcpe_tosca/ns_create_input.png
+
+ Step 4. A new record will be added to the list package, the Status column will show the Instantiating progress.
+
+ .. image:: files/vcpe_tosca/ns_instance.png
+
+ Step 5. When NS Instantiating is completed, the Status will updated to completed, and you can refresh the package, the Status will be updated to Active.
+
+ .. image:: files/vcpe_tosca/ns_active.png
+
+c) NS heal guide
+
+ Note:
+
+ * VF-C R3 healing only suport restart a vm of an VNF.
+
+ The following shows the guide of healing an VNF of an Instantiated NS:
+
+ Step 1. Click + button of an an Instantiated NS, the VNF list of the NS will be displayed:
+
+ .. image:: files/vcpe_tosca/ns_vnf_list.png
+
+ Step 2. Click the heal button of a VNF, select the vm of the VNF, and click OK button:
+
+ .. image:: files/vcpe_tosca/ns_vnf_heal.png
+
+ Step 3. When VNF healing is started, the Status of VNF will shows the progress of healing.
+
+ .. image:: files/vcpe_tosca/ns_vnf_healing.png
+
+ Step 4. When VNF healing is completed, the Status will be updated to completed, you can refresh the page, the Status will be updated to Active again.
+
+ .. image:: files/vcpe_tosca/ns_vnf_healed.png
+
+d) NS delete guide
+
+ The following shows the guide of deleting an VNF of an Instantiated NS:
+
+ Step 1. Select an Instantiated NS record in the list page, then click the delete button:
+
+ .. image:: files/vcpe_tosca/ns_active.png
+
+ Step 2. Select the termination Type and the graceful Termination Timeout, then click OK button:
+
+ .. image:: files/vcpe_tosca/ns_delete.png
+
+ Step 3. When the deleting is started, the Status will be updated to the progress of deleting.
+
+ .. image:: files/vcpe_tosca/ns_deleting.png
+
+ when deleting is completed, the Status will be update to completed, and soon it will be drop from the list.
+
+ .. image:: files/vcpe_tosca/ns_deleted.png
+
+Test Status and Plans
+~~~~~~~~~~~~~~~~~~~~~
+This case completed all tests as found here: https://wiki.onap.org/display/DW/vCPE%28tosca%29+-++Integration+test+cases
+
+Known Issues and Resolutions
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+1) There is time out issue when terminating vnf, the solution is refer to
+
+ https://gerrit.onap.org/r/c/vfc/nfvo/driver/vnfm/gvnfm/+/105192
+
+2) The process of terminating job is chaotic, the solution is refer to
+
+ https://gerrit.onap.org/r/c/vfc/nfvo/lcm/+/105449
+
+3) Failed to fetch NS package from SDC when having VL resource, the solution is refer to
+
+ https://gerrit.onap.org/r/c/modeling/etsicatalog/+/106074
+
+4) The model msg is error when deleting the vnf package via UUI, the solution is refer to
+
+ https://gerrit.onap.org/r/c/usecase-ui/+/106729
+
+5) Wrong number of services displayed for services-list via UUI, the solution is refer to
+
+ https://gerrit.onap.org/r/c/usecase-ui/+/106719
+
+6) The picture cannot be displayed of ns create model page via UUI, the solution is refer to
+
+ https://gerrit.onap.org/r/c/usecase-ui/+/106715
diff --git a/docs/docs_vFWDT.rst b/docs/docs_vFWDT.rst
index 4eeba7b84..0c13886d2 100755..100644
--- a/docs/docs_vFWDT.rst
+++ b/docs/docs_vFWDT.rst
@@ -1,94 +1,179 @@
.. This work is licensed under a Creative Commons Attribution 4.0
International License. http://creativecommons.org/licenses/by/4.0
-
+
.. _docs_vfw_traffic:
-.. contents::
- :depth: 3
-..
+:orphan:
+
+vFW In-Place Software Upgrade with Traffic Distribution Use Case
+----------------------------------------------------------------
-vFW Traffic Distribution Use Case
----------------------------------
Description
~~~~~~~~~~~
-The purpose of this work is to show Traffic Distribiution functionality implemented in Casablanca and Dublin releases for vFW Use Case.
-The orchstration workflow triggers a change to traffic distribution (redistribution) done by a traffic balancing/distribution entity (aka anchor point).
-The DistributeTraffic action targets the traffic balancing/distribution entity, in some cases DNS, other cases a load balancer external to the VNF instance, as examples.
-Traffic distribution (weight) changes intended to take a VNF instance out of service are completed only when all in-flight traffic/transactions have been completed.
-DistributeTrafficCheck command may be used to verify initial conditions of redistribution or can be used to verify the state of VNFs and redistribution itself.
-To complete the traffic redistribution process, gracefully taking a VNF instance out-of-service/into-service, without dropping in-flight calls or sessions,
-QuiesceTraffic/ResumeTraffic command may need to follow traffic distribution changes. The VNF application remains in an active state.
+The purpose of this work is to show In-Place Software Upgrade Traffic Distribution functionality implemented in Frankfurt release for vFW Use Case.
+The use case is an evolution of vFW Traffic Distribution Use Case which was developed for Casablanca and Dublin releases.
+The orchestration workflow triggers a change of the software on selected instance of the firewall. The change is proceeded with minimization of disruption of the
+service since the firewall being upgraded must have all the traffic migrated out before the upgrade can be started. The traffic migration (redistribution) is done by
+a traffic balancing/distribution entity (aka anchor point). The DistributeTraffic action targets the traffic balancing/distribution entity, in some cases DNS, other cases a load balancer external to the VNF instance, as examples.
+Traffic distribution (weight) changes intended to take a VNF instance out of service are completed only when all in-flight traffic/transactions have been completed.
+DistributeTrafficCheck command may be used to verify initial conditions of redistribution or can be used to verify the state of VNFs and redistribution itself.
+To complete the traffic redistribution process, gracefully taking a VNF instance out-of-service/into-service, without dropping in-flight calls or sessions,
+QuiesceTraffic/ResumeTraffic command may need to follow traffic distribution changes. The upgrade operation consist of the UpgradePreCheck operation which can used to verify
+initial conditions for the operation like difference of the software version to the one requested, SoftwareUpgrade operation is responsible for modification of the software on
+selected vFW instance and UpgradePostCheck LCM actions is used to verify if the software was properly installed on vFW. After the completion of the software upgrade the traffic is migrated to the
+instance of the vFW which was before being upgraded. The workflow can be configured also in such a way to perform only singular migration of the traffic without upgrade of the software
+what allows to experiment with the version of the workflow implemented in the previous releases. All the LCM operations are executed by APPC controller and they are implemented with Ansible protocol. In order to avoid the inconsistency in the VNFs state the Lock/Unlocks
+mechanisms is used to prevent parallel execution of LCM actions on VNFs that are under maintenance because of the workflow that is currently executed on them.
+The VNF application remains in an active state.
+Traffic Distribution and In-Place Software Upgrade functionality is an outcome of Change Management project. Further details can be found on the following pages
-Traffic Distribution functionality is an outcome of Change Management project. Further details can be found on following pages
+- Frankfurt: https://wiki.onap.org/display/DW/Change+Management+Frankfurt+Extensions (Traffic Distribution workflow enhancements)
-https://wiki.onap.org/display/DW/Change+Management+Extensions (DistributeTraffic LCM and Use Case)
+- Dublin: https://wiki.onap.org/display/DW/Change+Management+Extensions (DistributeTraffic LCM and Use Case)
-https://wiki.onap.org/display/DW/Change+Management+Dublin+Extensions (Distribute Traffic Workflow with Optimization Framework)
+- Casablanca https://wiki.onap.org/display/DW/Change+Management+Dublin+Extensions (Distribute Traffic Workflow with Optimization Framework)
-Test Scenario
-~~~~~~~~~~~~~
+Test Scenarios
+~~~~~~~~~~~~~~
.. figure:: files/dt-use-case.png
:scale: 40 %
:align: center
- Figure 1 The idea of Traffic Distribution Use Case
+ Figure 1 The overview of interaction of components in vFW In-Place Software Upgrade with Traffic Distribution Use Case
-The idea of the simplified scenario presented in the Casablanca release is shown on Figure 1. In a result of the DistributeTraffic LCM action traffic flow originated from vPKG to vFW 1 and vSINK 1 is redirected to vFW 2 and vSINK 2 (as it is seen on Figure 2).
-Result of the change can be observed also on the vSINKs' dashboards which show a current incoming traffic. Observation of the dashboard from vSINK 1 and vSINK 2 proves workflow works properly.
+The main idea of the use case and prepared workflow is to show the interaction of different components of ONAP, including AAI, Policy, OOF, APPC for realization of scenario of software upgrade
+of vFW instance with migration of the traffic in time of its upgrade. vFW instance was modified to have two instances of vFW with dedicated vSINKs. The general idea of interaction of ONAP components
+is shown on Figure 1. Software Upgrade is performed on selected vFW instance. vPKG and the other vFW taking action while migration of the traffic out of vFW being upgraded. In a result of the DistributeTraffic
+LCM action traffic flow originated from vPKG to vFW 1 and vSINK 1 is redirected to vFW 2 and vSINK 2 (as it is seen on Figure 2). Result of the change can be observed also on the vSINKs' dashboards which show
+a current incoming traffic. After migration software is upgraded on the vFW and afterwards the traffic can be migrated back to this vFW instance. Observation of the dashboard from vSINK 1 and vSINK 2 proves workflow works properly.
.. figure:: files/dt-result.png
:scale: 60 %
:align: center
- Figure 2 The result of traffic distribution
+ Figure 2 The result of traffic distribution in time of the upgrade
-The purpose of the work in the Dublin release was to built a Traffic Distribution Workflow that takes as an input configuration parameters delivered by Optimization Framework and on their basis several traffic distribution LCM actions are executed by APPC in the specific workflow.
+The traffic distribution sub-workflow takes as an input configuration parameters delivered by Optimization Framework and on their basis several traffic distribution LCM actions are executed by APPC in the specific workflow.
+Further LCM actions are executed in order to present the idea of vFW In-Place Software Upgrade with Traffic Distribution. In this use case also APPC locking mechanisms is demonstrated, changes in APPC for VNFC level Ansible
+actions support and changes for APPC Ansible automation also are used in the use case. The APPC Ansible automation scripts allows to configure LCM actions without the need to enter the CDT portal, however there is
+possibility to do it manually and documentation describes also how to do it. In the same sense, the upload of policy types and policy instances is automated but the documentation describes how to do it manually.
-.. figure:: files/dt-workflow.png
- :scale: 60 %
+The demonstration scripts can be used to execute two different scenarios:
+
+1. Simple distribution of traffic from selected vFW instance to the other one
+
+2. Upgrade of the software on selected vFW instance. Both are preceded with shared phase of identification of VF-modules for reconfiguration what is done with help of Optimization Framework.
+
+Workflows
+~~~~~~~~~
+
+Whole vFW In-Place Software Upgrade with Traffic Distribution use case can be decomposed into following workflows:
+
+1. High level workflow (simplified workflow on Figure 3 and more detailed on Figure 4)
+
+.. figure:: files/vfwdt-workflow-general.png
+ :scale: 100 %
+ :align: center
+
+ Figure 3 The In-Place Software Upgrade with Traffic Distribution general workflow
+
+* Identification of vFW instances (**I**) for migration of the traffic (source and destination) and identification of vPKG instance (anchor point) which would be responsible for reconfiguration of the traffic distribution. This operation id performed by Optimization Framework, HAS algorithm in particular
+
+* Before any operation is started workflow Locks (**II-IV**) with APPC all the VNFs involved in the procedure: vFW 1, vFW 2 and vPKG. In fact this is the vFW being upgraded, vFW which will be used to migrate traffic to and vPKG which performs the traffic distribution procedure. The VNFs needs to be locked in order to prevent the execution of other LCM actions in time of the whole workflow execution. Workflow checks state of the Lock on each VNF (**II**)(**1-6**), if the Locs are free (**III**)(**7**) the Locs are being acquired (**IV**)(**8-14**). If any Lock Check or Lock fails (**7, 14**), workflow is stopped.
+
+* Depending on the workflow type different (Traffic Distribution or In-Place Software Upgrade with Traffic Distribution) LCM action are executed by APPC (**V**). All with Ansible protocol and with VNF and VF-modules identified before by Optimization Framework or the input parameters like selected vFW VNF instance. Workflows are conditional and will not be performed if the preconditions were not satisfied. In case of failure of LCM operation any other actions are canceled.
+
+* At the end workflow Unlocks with APPC the previously Locked VNFs (**VI**)(**15-21**). This operations is performed always even when some steps before were not completed. The purpose is to not leave VNFs in locked state (in maintenance status) as this will prevent future execution of LCM actions or workflows on them. The locks are being automatically released after longer time.
+
+.. figure:: files/vfwdt-general-workflow-sd.png
+ :scale: 80 %
:align: center
- Figure 3 The Traffic Distribution Workflow
+ Figure 4 The In-Place Software Upgrade with Traffic Distribution detailed workflow
+
+2. Identification of VF-modules candidates for migration of traffic (detailed workflow is shown on Figure 5)
-The prepared Traffic Distribution Workflow has following steps:
+.. figure:: files/vfwdt-identification-workflow-sd.png
+ :scale: 80 %
+ :align: center
+
+ Figure 5 Identification of VF-Module candidates for migration of traffic
-- Workflow sends placement request to Optimization Framework (**1**) specific information about the vPKG and vFW-SINK models and VNF-ID of vFW that we want to migrate traffic out from.
- Optimization Framework role is to find the vFW-SINK VNF/VF-module instance where traffic should be migrated to and vPKG which will be associated with this vFW.
+- Workflow sends placement request to Optimization Framework (**1**) specific information about the vPKG and vFW-SINK models and VNF-ID of vFW that we want to upgrade.
+ Optimization Framework role is to find the vFW-SINK VNF/VF-module instance where traffic should be migrated to in time of the upgrade and vPKG which will be associated with this vFW.
Although in our case the calculation is very simple, the mechanism is ready to work for instances of services with VNF having houndreds of VF-modules spread accross different cloud regions.
- Optimization Framework takes from the Policy Framework policies (**2-3**) for VNFs and for relations between each other (in our case there is checked ACTIVE status of vFW-SINK and vPKG VF-modules and the Region to which they belong)
-- Optimization Framework, base on the information from the polcies and service topology information taken from A&AI (**4-11**), offers traffic distribution anchor and destination canidates' pairs (**12-13**) (pairs of VF-modules data with information about their V-Servers and their network interfaces). This information is returned to the workflow script (**14**).
+- Optimization Framework, base on the information from the policies and service topology information taken from A&AI (**4-11**), offers traffic distribution anchor and destination candidates' pairs (**12-13**) (pairs of VF-modules data with information about their V-Servers and their network interfaces). This information is returned to the workflow script (**14**).
+
+- Information from Optimization Framework can be used to construct APPC LCM requests for DistributeTrafficCheck, DistributeTraffic, UpgradePreCheck, SoftwareUpgrade and UpgradePostCheck commands. This information is used to fill CDT templates with proper data for further Ansible playbooks execution. Script generates also here CDT templates for LCM actions which can be uploaded automatically to APPC DB.
+
+3. The Traffic Distribution sub-workflow (simplified workflow on Figure 6 and more detailed on Figure 7)
+
+.. figure:: files/vfwdt-workflow-traffic.png
+ :scale: 100 %
+ :align: center
+
+ Figure 6 The Traffic Distribution general workflow
+
+- In the first DistributeTrafficCheck LCM request on vPGN VNF/VF-Module APPC, over Ansible, checks if already configured destination of vPKG packages is different than already configured one (**I-III**)(**1-8**). If not workflow is stopped (**9**).
+
+- Next, APPC performs the DistributeTraffic action (**IV**)(**10-17**). If operation is completed properly traffic should be redirected to vFW 2 and vSINK 2 instance. If not, workflow is stopped (**18**).
+
+- Finally, APPC executes the DistributeTrafficCheck action (**V**) on vFW 1 in order to verify that it does not receive any traffic anymore (**19-26**) and on vFW 2 in order to verify that it receives traffic forwarded from vFW 2 (**28-35**). Workflow is stopped with failed state (**37**) if one of those conditions was not satisfied (**27, 36**)
+
+.. figure:: files/vfwdt-td-workflow-sd.png
+ :scale: 80 %
+ :align: center
+
+ Figure 7 The Traffic Distribution detailed workflow
+
+4. The In-Place Software Upgrade with Traffic Distribution sub-workflow (simplified workflow on Figure 8 and more detailed on Figure 9)
+
+.. figure:: files/vfwdt-workflow-upgrade.png
+ :scale: 100 %
+ :align: center
+
+ Figure 8 The In-Place Software Upgrade general workflow
+
+- Firstly there is performed the UpgradePreCheck LCM operation on selected vFW instance (**I**)(**1-8**). The Ansible script executed by the APPC checks if the software version is different than the one indicated in workflow's input. If it is the same the workflow is stopped (**9**).
-- Information from Optimization Framework can be used to construct APPC LCM requests for DistributeTrafficCheck and DistributeTraffic commands (**15, 24, 33, 42**). This information is used to fill CDT templates with proper data for further Ansible playbooks execution (**17, 26, 35, 44**)
+- When software of selected vFW instance needs to be upgraded (**II**) then the traffic migration procedure needs to be performed (**III** - see sub-workflow 3). If migration of traffic fails workflow is stopped.
-- In the first DistributeTrafficCheck LCM request on vPGN VNF/VF-Module APPC, over Ansible, checks if already configured destinatrion of vPKG packages is different than already configured. If not workflow is stopped (**23**).
+- Next APPC performs over Ansible procedure of in place software upgrade. In our case this is simple refresh of the software packages on VM in order to simulate some upgrade process. Successful completion of the script should set the version of the software to the one from the upgrade request. If action fails workflow is stopped without further rollback (**18**).
-- Next, APPC performs the DistributeTraffic action like it is shown on Figure 1 and Figure 2 (**25-31**). If operation is completed properly traffic should be redirected to vFW 2 and vSINK 2 instance. If not, workflow is stopped (**32**).
+- Afterwards, APPC performs the UpgradePostCheck LCM action (**IV**)(**19-26**). The script verifies if the version of software is the same like requested before in the upgrade. If not, workflow is stopped without further rollback (**27**).
-- Finally, APPC executes the DistributeTrafficCheck action on vFW 1 in order to verify that it does not receives any traffic anymore (**34-40**) and on vFW 2 in order to verify that it receives traffic forwarded from vFW 2 (**43-49**)
+- Finally, when software upgrade is completed traffic migration procedure needs to be performed again (**VI**) to migrate traffic back to upgraded before vFW instance (see sub-workflow 3). If migration of traffic fails workflow is stopped and rollback is no being performed.
+
+.. figure:: files/vfwdt-upgrade-workflow-sd.png
+ :scale: 80 %
+ :align: center
+
+ Figure 9 The In-Place Software Upgrade detailed workflow
Scenario Setup
--------------
-In order to setup the scenario and to test the DistributeTraffic LCM API in action you need to perform the following steps:
+In order to setup the scenario and to test workflows with APPC LCM APIs in action you need to perform the following steps:
-1. Create an instance of vFWDT (vPKG , 2 x vFW, 2 x vSINK) – dedicated for the DistributeTraffic LCM API tests
+1. Create an instance of vFWDT (vPKG , 2 x vFW, 2 x vSINK) – dedicated for the traffic migration tests
-#. Gather A&AI facts for Traffic Distribution use case configuration
+#. Gather A&AI facts for use case configuration
-#. Install Traffic Distribution workflow packages
+#. Install Software Upgrade and Traffic Distribution workflow packages
-#. Configure Optimization Framework for Traffic Distribution workflow
+#. Configure Optimization Framework for Traffic Distribution candidates gathering
#. Configure vPKG and vFW VNFs in APPC CDT tool
#. Configure Ansible Server to work with vPKG and vFW VMs
-#. Execute Traffic Distribution Workflow
+#. Execute Traffic Distribution or In-Place Upgrade Workflows
You will use the following ONAP K8s VMs or containers:
@@ -98,12 +183,12 @@ You will use the following ONAP K8s VMs or containers:
- APPC Ansible Server container – setup of Ansible Server, configuration of playbook and input parameters for LCM actions
-.. note:: In all occurences <K8S-NODE-IP> constant is the IP address of any K8s Node of ONAP OOM installation which hosts ONAP pods i.e. k8s-node-1 and <K8S-RANCHER-IP> constant is the IP address of K8S Rancher Server
+.. note:: In all occurrences *K8S_NODE_IP* constant is the IP address of any K8s Node of ONAP OOM installation which hosts ONAP pods i.e. k8s-node-1 and *K8S-RANCHER-IP* constant is the IP address of K8S Rancher Server
vFWDT Service Instantiation
~~~~~~~~~~~~~~~~~~~~~~~~~~~
-In order to test a DistributeTraffic LCM API functionality a dedicated vFW instance must be prepared. It differs from a standard vFW instance by having an additional VF-module with a second instance of vFW and a second instance of vSINK. Thanks to that when a service instance is deployed there are already available two instances of vFW and vSINK that can be used for verification of DistributeTraffic LCM API – there is no need to use the ScaleOut function to test DistributeTraffic functionality what simplifies preparations for tests.
+In order to test workflows a dedicated vFW instance must be prepared. It differs from a standard vFW instance by having an additional VF-module with a second instance of vFW and a second instance of vSINK. Thanks to that when a service instance is deployed there are already available two instances of vFW and vSINK that can be used for migration of traffic from one vFW instance to the other one – there is no need to use the ScaleOut function to test workflows what simplifies preparations for tests.
In order to instantiate vFWDT service please follow the procedure for standard vFW with following changes. You can create such service manually or you can use robot framework. For manual instantiation:
@@ -111,13 +196,13 @@ In order to instantiate vFWDT service please follow the procedure for standard v
https://github.com/onap/demo/tree/master/heat/vFWDT
-2. Create Virtual Service in SDC with composition like it is shown on Figure 3
+2. Create Virtual Service in SDC with composition like it is shown on Figure 10
.. figure:: files/vfwdt-service.png
:scale: 60 %
:align: center
- Figure 3 Composition of vFWDT Service
+ Figure 10 Composition of vFWDT Service
3. Use the following payload files in the SDNC-Preload phase during the VF-Module instantiation
@@ -127,15 +212,15 @@ https://github.com/onap/demo/tree/master/heat/vFWDT
- :download:`vFW/SNK 2 preload example <files/vfw-2-preload.json>`
-.. note:: Use publikc-key that is a pair for private key files used to log into ONAP OOM Rancher server. It will simplify further configuration
+.. note:: Use public-key that is a pair for private key files used to log into ONAP OOM Rancher server. It will simplify further configuration
-.. note:: vFWDT has a specific configuration of the networks – different than the one in original vFW use case (see Figure 4). Two networks must be created before the heat stack creation: *onap-private* network (10.0.0.0/16 typically) and *onap-external-private* (e.g. "10.100.0.0/16"). The latter one should be connected over a router to the external network that gives an access to VMs. Thanks to that VMs can have a floating IP from the external network assigned automatically in a time of stacks' creation. Moreover, the vPKG heat stack must be created before the vFW/vSINK stacks (it means that the VF-module for vPKG must be created as a first one). The vPKG stack creates two networks for the vFWDT use case: *protected* and *unprotected*; so these networks must be present before the stacks for vFW/vSINK are created.
+.. note:: vFWDT has a specific configuration of the networks – different than the one in original vFW use case (see Figure 11). Two networks must be created before the heat stack creation: *onap-private* network (10.0.0.0/16 typically) and *onap-external-private* (e.g. "10.100.0.0/16"). The latter one should be connected over a router to the external network that gives an access to VMs. Thanks to that VMs can have a floating IP from the external network assigned automatically in a time of stacks' creation. Moreover, the vPKG heat stack must be created before the vFW/vSINK stacks (it means that the VF-module for vPKG must be created as a first one). The vPKG stack creates two networks for the vFWDT use case: *protected* and *unprotected*; so these networks must be present before the stacks for vFW/vSINK are created.
.. figure:: files/vfwdt-networks.png
:scale: 15 %
:align: center
- Figure 4 Configuration of networks for vFWDT service
+ Figure 11 Configuration of networks for vFWDT service
4. Go to *robot* folder in Rancher server (being *root* user)
@@ -165,12 +250,12 @@ Go to the Rancher node and locate *demo-k8s.sh* script in *oom/kubernetes/robot*
::
./demo-k8s.sh onap init
- ./ete-k8s.sh onap instantiateVFWDT
+ ./ete-k8s.sh onap instantiateVFWDTGRA
-.. note:: You can verify the status of robot's service instantiation process by going to http://<K8S-NODE-IP>:30209/logs/ (login/password: test/test)
+.. note:: You can verify the status of robot's service instantiation process by going to https://K8S_NODE_IP:30209/logs/ (login/password: test/test)
-After successful instantiation of vFWDT service go to the OpenStack dashboard and project which is configured for VNFs deployment and locate vFWDT VMs. Choose one and try to ssh into one them to proove that further ansible configuration action will be possible
+After successful instantiation of vFWDT service go to the OpenStack dashboard and project which is configured for VNFs deployment and locate vFWDT VMs. Choose one and try to ssh into one them to prove that further ansible configuration action will be possible
::
@@ -192,7 +277,7 @@ Preparation of Workflow Script Environment
::
- git clone --single-branch --branch dublin "https://gerrit.onap.org/r/demo"
+ git clone --single-branch --branch frankfurt "https://gerrit.onap.org/r/demo"
3. Enter vFWDT tutorial directory
@@ -206,7 +291,7 @@ what should show following folders
::
root@sb01-rancher:~/demo/tutorials/vFWDT# ls
- playbooks preloads workflow
+ get_secret.sh playbooks policies preloads workflow
.. note:: Remember vFWDT tutorial directory `~/demo/tutorials/vFWDT` for the further use
@@ -220,20 +305,20 @@ what should show following folders
Gathering Scenario Facts
------------------------
-In order to configure CDT tool for execution of Ansible playbooks and for execution of Traffic distribution workflow we need following A&AI facts for vFWDT service
+In order to configure CDT tool for execution of Ansible playbooks and for execution of workflows we need following A&AI facts for vFWDT service
- **vnf-id** of generic-vnf vFW instance that we want to migrate traffic out from
- **vnf-type** of vPKG VNF - required to configure CDT for Distribute Traffic LCMs
-- **vnf-type** of vFW-SINK VNFs - required to configure CDT for Distribute Traffic LCMs
+- **vnf-type** of vFW-SINK VNFs - required to configure CDT for Distribute Traffic and Software Upgrade LCMs
Gathering facts from VID Portal
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
1. Enter the VID portal
-::
-
- https://<K8S-NODE-IP>:30200/vid/welcome.htm
+::
+
+ https://K8S_NODE_IP:30200/vid/welcome.htm
2. In the left hand menu enter **Search for Existing Service Instances**
@@ -247,24 +332,24 @@ Gathering facts from VID Portal
:scale: 60 %
:align: center
- Figure 5 vnf-type and vnf-id for vPKG VNF
+ Figure 12 vnf-type and vnf-id for vPKG VNF
.. figure:: files/vfwdt-vid-vnf-1.png
:scale: 60 %
:align: center
- Figure 6 vnf-type and vnf-id for vFW-SINK 1 VNF
+ Figure 13 vnf-type and vnf-id for vFW-SINK 1 VNF
.. figure:: files/vfwdt-vid-vnf-2.png
:scale: 60 %
:align: center
- Figure 7 vnf-type and vnf-id for vFW-SINK 2 VNF
+ Figure 14 vnf-type and vnf-id for vFW-SINK 2 VNF
Gathering facts directly from A&AI
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-1. Enter OpenStack dashboard on whicvh vFWDT instance was created and got to **Project->Compute->Instances** and read VM names of vPKG VM and 2 vFW VMs created in vFWDT service instance
+1. Enter OpenStack dashboard on which vFWDT instance was created and got to **Project->Compute->Instances** and read VM names of vPKG VM and 2 vFW VMs created in vFWDT service instance
2. Open Postman or any other REST client
@@ -278,7 +363,7 @@ Gathering facts directly from A&AI
::
- https://<K8S-NODE-IP>:30233/aai/v14/cloud-infrastructure/cloud-regions/cloud-region/CloudOwner/RegionOne/tenants/
+ https://K8S_NODE_IP:30233/aai/v14/cloud-infrastructure/cloud-regions/cloud-region/CloudOwner/RegionOne/tenants/
.. note:: *CloudOwner* and *Region* names are fixed for default setup of ONAP
@@ -286,17 +371,17 @@ Gathering facts directly from A&AI
::
- https://<K8S-NODE-IP>:30233/aai/v14/cloud-infrastructure/cloud-regions/cloud-region/CloudOwner/RegionOne/tenants/tenant/<tenant-id>/vservers/?vserver-name=<vm-name>
+ https://K8S_NODE_IP:30233/aai/v14/cloud-infrastructure/cloud-regions/cloud-region/CloudOwner/RegionOne/tenants/tenant/<tenant-id>/vservers/?vserver-name=<vm-name>
-Read from the response (realtionship with *generic-vnf* type) vnf-id of vPKG VNF
+Read from the response (relationship with *generic-vnf* type) vnf-id of vPKG VNF
-.. note:: If you do not receive any vserver candidate it means that heatbridge procedure was not performed or was not completed successfuly. It is mandatory to continue this tutorial
+.. note:: If you do not receive any vserver candidate it means that heatbridge procedure was not performed or was not completed successfully. It is mandatory to continue this tutorial
8. Create new GET query for *generic-vnf* type with following link replacing <vnf-id> with value read from previous GET response
::
- https://<K8S-NODE-IP>:30233/aai/v14/network/generic-vnfs/generic-vnf/<vnf-id>
+ https://K8S_NODE_IP:30233/aai/v14/network/generic-vnfs/generic-vnf/<vnf-id>
9. Repeat this procedure also for 2 vFW VMs and note their *vnf-type* and *vnf-id*
@@ -306,110 +391,54 @@ This sections show the steps necessary to configure Policies, CDT and Ansible se
Configuration of Policies for Optimization Framework
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-We need to enter the Policy editor in order to upload policy types and then the policy rules for the demo. The polcies are required for the Optimization Framework and they guide OOF how to determine
+We need to upload neccessary optimization policy rules required for the demo. The policies are required for the Optimization Framework and they guide OOF how to determine
vFW and vPGN instances used in the Traffic Distribution workflow.
-1. Enter the Policy portal
-
-Specify *demo*:*demo* as a login and password
-
-::
-
- https://<K8S-NODE-IP>:30219/onap/login.htm
-
-From the left side menu enter *Dictionary* section and from the combo boxes select *MicroService Policy* and *MicroService Models* respectively. Below you can see the result.
-
-.. figure:: files/vfwdt-policy-type-list.png
- :scale: 70 %
- :align: center
-
- Figure 8 List of MicroService policy types in the Policy portal
-
-2. Upload the policy types
-
-Before policy rules for Traffic Distribution can be uploaded we need to create policy types to store these rules. For that we need to create following three types:
-
-- VNF Policy - it used to filter vf-module instances i.e. base on their attributes from the AAI like *provStatus*, *cloudRegionId* etc.
-- Query Policy - it is used to declare extra inpt parameters for OOF placement request - in our case we need to specify cloud region name
-- Affinity Policy - it is used to specify the placement rule used for selection vf-module candiate pairs of vFW vf-module instance (traffic destination) and vPGN vf-module instance (anchor point). In this case the match is done by belonging to the same cloud region
-
-Enter vFWDT tutorial directory on Rancher server (already created in `Preparation of Workflow Script Environment`_) and create policy types from the following files
-
-::
-
- root@sb01-rancher:~/demo/tutorials/vFWDT# ls policies/types/
- affinityPolicy-v20181031.yml queryPolicy-v20181031.yml vnfPolicy-v20181031.yml
-
-For each file press *Create* button, choose the policy type file, select the *Micro Service Option* (always one available) and enter the *Version* which must be the same like the one specified for policy instances. In this case pass value *OpenSource.version.1*
-
-.. figure:: files/vfwdt-add-micro-service-policy.png
- :scale: 70 %
- :align: center
-
- Figure 9 Creation of new MicroService policy type for OOF
-
-In a result you should see in the dictionary all three new types of policies declared
-
-.. figure:: files/vfwdt-completed-policy-type-list.png
- :scale: 70 %
- :align: center
-
- Figure 10 Completed list of MicroService policy types in the Policy portal
-
-3. Push the policies into the PDP
+1. Push the policies into the PDP
-In order to push policies into the PDP it is required to execute already prepared *uploadPolicies.sh* script that builds policy creation/update requests and automatically sends them to the Policy PDP pod
+In order to push policies into the PDP it is required to execute already prepared *uploadPolicies.sh* script that prepares policy upload requests and automatically sends them to the Policy PDP pod
::
root@sb01-rancher:~/demo/tutorials/vFWDT# ls policies/rules/
- QueryPolicy_vFW_TD.json affinity_vFW_TD.json uploadPolicies.sh vnfPolicy_vFW_TD.json vnfPolicy_vPGN_TD.json
+ QueryPolicy_vFW_TD.json affinity_vFW_TD.json uploadPolicies.sh dt-policies.sh vnfPolicy_vFW_TD.json vnfPolicy_vPGN_TD.json
-When necessary, you can modify policy json files. Script will read these files and will build new PDP requests based on them. To create new policies execute script in the following way
+When necessary, you can modify policy json files. Script will read these files and will build new PDP requests based on them. To create or update policies execute the script in the following way
::
./policies/rules/uploadPolicies.sh
-To update existing policies execute script with an extra argument
-
-::
-
- ./policies/rules/uploadPolicies.sh U
-
-The result can be verified in the Policy portal, in the *Editor* section, after entering *OSDF_DUBLIN* directory
-
-.. figure:: files/vfwdt-policy-editor-osdf-dublin.png
- :scale: 70 %
- :align: center
-
- Figure 11 List of policies for OOF and vFW traffic distribution
-
Testing Gathered Facts on Workflow Script
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-Having collected *vnf-id* and *vnf-type* parameters we can execute Traffic Distribution Workflow Python script. It works in two modes. First one executes ony initial phase where AAI and OOF
+Having collected *vnf-id* and *vnf-type* parameters we can execute Traffic Distribution Workflow Python script. It works in two modes. First one executes ony initial phase where AAI and OOF
is used to collect neccessary information for configuration of APPC and for further execution phase. The second mode performs also second phase which executes APPC LCM actions.
At this stage we will execute script in the initial mode to generate some configuration helpful in CDT and Ansible configuration.
-1. Enter vFWDT tutorial directory on Rancher server (already created in `Preparation of Workflow Script Environment`_) and execute there workflow script with follwoing parameters
+1. Enter vFWDT tutorial directory on Rancher server (already created in `Preparation of Workflow Script Environment`_). In the *workflow* folder you can find workflow script used to gather necessary configuration and responsible for execution of the LCM actions. It has following syntax
::
- python3 workflow.py <VNF-ID> <K8S-NODE-IP> True False True True
+ python3 workflow.py <VNF-ID> <RANCHER_NODE_IP> <K8S_NODE_IP> <IF-CACHE> <IF-VFWCL> <INITIAL-ONLY> <CHECK-STATUS> <VERSION>
+
+- <VNF-ID> - vnf-id of vFW VNF instance that traffic should be migrated out from
+- <RANCHER_NODE_IP> - External IP of ONAP Rancher Node i.e. 10.12.5.160 (If Rancher Node is missing this is NFS node)
+- <K8S_NODE_IP> - External IP of ONAP K8s Worker Node i.e. 10.12.5.212
+- <IF-CACHE> - If script should use and build OOF response cache (cache it speed-ups further executions of script)
+- <IF-VFWCL> - If instead of vFWDT service instance vFW or vFWCL one is used (should be False always)
+- <INITIAL-ONLY> - If only configuration information will be collected (True for initial phase and False for full execution of workflow)
+- <CHECK-STATUS> - If APPC LCM action status should be verified and FAILURE should stop workflow (when False FAILED status of LCM action does not stop execution of further LCM actions)
+- <VERSION> - New version of vFW - for tests '1.0' or '2.0'. Ignore when you want to test traffic distribution workflow
-For now and for further use workflow script has following input parameters:
+2. Execute there workflow script with following parameters
-- vnf-id of vFW VNF instance that traffic should be migrated out from
-- External IP of ONAP Rancher Node i.e. 10.12.5.160 (If Rancher Node is missing this is NFS node)
-- External IP of ONAP K8s Worker Node i.e. 10.12.5.212
-- if script should use and build OOF response cache (cache it speed-ups further executions of script)
-- if instead of vFWDT service instance vFW or vFWCL one is used (should be False always)
-- if only configuration information will be collected (True for initial phase and False for full execution of workflow)
-- if APPC LCM action status should be verified and FAILURE should stop workflow (when False FAILED status of LCM action does not stop execution of further LCM actions)
+::
+
+ python3 workflow.py <VNF-ID> <RANCHER_NODE_IP> <K8S_NODE_IP> True False True True 2.0
-2. The script at this stage should give simmilar output
+3. The script at this stage should give simmilar output
::
@@ -417,6 +446,10 @@ For now and for further use workflow script has following input parameters:
OOF Cache True, is CL vFW False, only info False, check LCM result True
+ New vFW software version 2.0
+
+ Starting OSDF Response Server...
+
vFWDT Service Information:
{
"vf-module-id": "0dce0e61-9309-449a-8e3e-f001635aaab1",
@@ -446,19 +479,21 @@ For now and for further use workflow script has following input parameters:
vofwl01vfw4407 ansible_ssh_host=10.0.110.1 ansible_ssh_user=ubuntu
vofwl02vfw4407 ansible_ssh_host=10.0.110.4 ansible_ssh_user=ubuntu
-The result should have almoast the same information for *vnf-id's* of both vFW VNFs. *vnf-type* for vPKG and vFW VNFs should be the same like those collected in previous steps.
-Ansible Inventory section contains information about the content Ansible Inventor file that will be configured later on `Configuration of Ansible Server`_
+The result should have almoast the same information for *vnf-id's* of both vFW VNFs. *vnf-type* for vPKG and vFW VNFs should be the same like those collected in previous steps.
+Ansible Inventory section contains information about the content Ansible Inventor file that will be configured later on `Configuration of Ansible Server`_. The first phase of the workflow script will generate also the CDT artifacts which can be used for automatic configuration of the CDT tool - they can be ignored for manual CDT configuration.
Configuration of VNF in the APPC CDT tool
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+.. note:: Automated procedure can be found at the end of the section
+
Following steps aim to configure DistributeTraffic LCM action for our vPKG and vFW-SINK VNFs in APPC CDT tool.
1. Enter the Controller Design Tool portal
::
- https://<K8S-NODE-IP>:30289/index.html
+ https://K8S_NODE_IP:30289/index.html
2. Click on *MY VNFS* button and login to CDT portal giving i.e. *demo* user name
@@ -468,7 +503,7 @@ Following steps aim to configure DistributeTraffic LCM action for our vPKG and v
:scale: 70 %
:align: center
- Figure 12 Creation of new VNF type in CDT
+ Figure 15 Creation of new VNF type in CDT
4. Enter previously retrieved VNF Type for vPKG VNF and press the *NEXT* button
@@ -476,7 +511,7 @@ Following steps aim to configure DistributeTraffic LCM action for our vPKG and v
:scale: 70 %
:align: center
- Figure 13 Creation of new VNF type in CDT
+ Figure 16 Creation of new VNF type in CDT
5. For already created VNF Type (if the view does not open itself) click the *View/Edit* button. In the LCM action edit view in the first tab please choose:
@@ -495,48 +530,64 @@ Following steps aim to configure DistributeTraffic LCM action for our vPKG and v
:scale: 70 %
:align: center
- Figure 14 DistributeTraffic LCM action editing
+ Figure 17 DistributeTraffic LCM action editing
+
+6. Go to the *Template* tab and in the editor paste the request template of LCM actions for vPKG VNF type
-6. Go to the *Template* tab and in the editor paste the request template of the DistributeTraffic LCM action for vPKG VNF type
+For DistributeTraffic and DistributeTrafficCheck LCMs
::
{
"InventoryNames": "VM",
- "PlaybookName": "${()=(book_name)}",
- "NodeList": [{
- "vm-info": [{
- "ne_id": "${()=(ne_id)}",
- "fixed_ip_address": "${()=(fixed_ip_address)}"
- }],
- "site": "site",
- "vnfc-type": "vpgn"
- }],
+ "PlaybookName": "${book_name}",
+ "AutoNodeList": true,
"EnvParameters": {
"ConfigFileName": "../traffic_distribution_config.json",
+ "vnf_instance": "vfwdt"
+ },
+ "FileParameters": {
+ "traffic_distribution_config.json": "${file_parameter_content}"
+ },
+ "Timeout": 3600
+ }
+
+
+For DistributeTraffic and DistributeTrafficCheck LCMs
+
+::
+
+ {
+ "InventoryNames": "VM",
+ "PlaybookName": "${book_name}",
+ "AutoNodeList": true,
+ "EnvParameters": {
+ "ConfigFileName": "../config.json",
"vnf_instance": "vfwdt",
+ "new_software_version": "${new-software-version}",
+ "existing_software_version": "${existing-software-version}"
},
"FileParameters": {
- "traffic_distribution_config.json": "${()=(file_parameter_content)}"
+ "config.json": "${file_parameter_content}"
},
"Timeout": 3600
}
-.. note:: For all this VNF types and for all actions CDT template is the same except **vnfc-type** parameter that for vPKG VNF type should have value *vpgn* and for vFW-SINK VNF type should have value *vfw-sink*
The meaning of selected template parameters is following:
- **EnvParameters** group contains all the parameters that will be passed directly to the Ansible playbook during the request's execution. *vnf_instance* is an obligatory parameter for VNF Ansible LCMs. In our case for simplification it has predefined value
-- **InventoryNames** parameter is obligatory if you want to have NodeList with limited VMs or VNFCs that playbook should be executed on. It can have value *VM* or *VNFC*. In our case *VM* valuye means that NodeList will have information about VMs on which playbook should be executed. In this use case this is always only one VM
-- **NodeList** parameter value must match the group of VMs like it was specified in the Ansible inventory file. *PlaybookName* must be the same as the name of playbook that was uploaded before to the Ansible server.
-- **FileParameters**
+- **InventoryNames** parameter is obligatory if you want to have NodeList with limited VMs or VNFCs that playbook should be executed on. It can have value *VM* or *VNFC*. In our case *VM* value means that NodeList will have information about VMs on which playbook should be executed. In this use case this is always only one VM
+- **AutoNodeList** parameter set to True indicates that template does not need the NodeList section specific and it will be generated automatically base on information from AAI - this requires proper data in the vserver and vnfc objects associated with VNFs
+- **PlaybookName** must be the same as the name of playbook that was uploaded before to the Ansible server.
+- **FileParameters** sections contains information about the configuration files with their content necessary to execute the playbook
.. figure:: files/vfwdt-create-template.png
:scale: 70 %
:align: center
- Figure 15 LCM DistributeTraffic request template
+ Figure 18 LCM DistributeTraffic request template
7. Afterwards press the *SYNCHRONIZE WITH TEMPLATE PARAMETERS* button. You will be moved to the *Parameter Definition* tab. The new parameters will be listed there.
@@ -544,17 +595,27 @@ The meaning of selected template parameters is following:
:scale: 70 %
:align: center
- Figure 16 Summary of parameters specified for DistributeTraffic LCM action.
+ Figure 19 Summary of parameters specified for DistributeTraffic LCM action.
.. note:: For each parameter you can define its: mandatory presence; default value; source (Manual/A&AI). For our case modification of this settings is not necessary
8. Finally, go back to the *Reference Data* tab and click *SAVE ALL TO APPC*.
-.. note:: Remember to configure DistributeTraffic and DistributeTrafficCheck actions for vPKG VNF type and DistributeTrafficCheck action for vFW-SINK
+.. note:: Remember to configure DistributeTraffic and DistributeTrafficCheck actions for vPKG VNF type and UpgradeSoftware, UpgradePreCheck, UpgradePostCheck and DistributeTrafficCheck actions for vFW-SINK
+
+9. Configuration of CDT tool is also automated and all steps above can be repeated with script *configure_ansible.sh*
+
+Enter vFWDT tutorial directory `Preparation of Workflow Script Environment`_ on Rancher server, make sure that *onap.pem* file is in *playbooks* directory and run
+
+::
+
+ ./playbooks/configure_ansible.sh
Configuration of Ansible Server
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+.. note:: Automated procedure can be found at the end of the section
+
After an instantiation of the vFWDT service the Ansible server must be configured in order to allow it a reconfiguration of vPKG VM.
1. Copy from Rancher server private key file used for vFWDT VMs' creation and used for access to Rancher server into the :file:`/opt/ansible-server/Playbooks/onap.pem` file
@@ -579,7 +640,7 @@ After an instantiation of the vFWDT service the Ansible server must be configure
chmod 400 onap.pem
chown ansible:ansible onap.pem
-4. Edit the :file:`/opt/ansible-server/Playbooks/Ansible\ \_\ inventory` file including all the hosts of vFWDT service instance used in this use case.
+4. Edit the :file:`/opt/ansible-server/Playbooks/Ansible\ \_\ inventory` file including all the hosts of vFWDT service instance used in this use case.
The content of the file is generated by workflow script `Testing Gathered Facts on Workflow Script`_
::
@@ -605,17 +666,17 @@ After an instantiation of the vFWDT service the Ansible server must be configure
private_key_file = /opt/ansible-server/Playbooks/onap.pem
-.. note:: This is the default privaye key file. In the `/opt/ansible-server/Playbooks/Ansible\ \_\ inventory` different key could be configured but APPC in time of execution of playbbok on Ansible server creates its own dedicated inventory file which does not have private key file specified. In consequence, this key file configured is mandatory for proper execution of playbooks by APPC
+.. note:: This is the default private key file. In the `/opt/ansible-server/Playbooks/Ansible\ \_\ inventory` different key could be configured but APPC in time of execution of playbook on Ansible server creates its own dedicated inventory file which does not have private key file specified. In consequence, this key file configured is mandatory for proper execution of playbooks by APPC
-6. Test that the Ansible server can access over ssh vFWDT hosts configured in the ansible inventory
+6. Test that the Ansible server can access over ssh vFWDT hosts configured in the ansible inventory
::
ansible –i Ansible_inventory vpgn,vfw-sink –m ping
-7. Download the distribute traffic playbook into the :file:`/opt/ansible-server/Playbooks` directory
+7. Download the LCM playbooks into the :file:`/opt/ansible-server/Playbooks` directory
Exit Ansible server pod and enter vFWDT tutorial directory `Preparation of Workflow Script Environment`_ on Rancher server. Afterwards, copy playbooks into Ansible server pod
@@ -624,13 +685,15 @@ Exit Ansible server pod and enter vFWDT tutorial directory `Preparation of Workf
sudo kubectl cp playbooks/vfw-sink onap/`kubectl get pods -o go-template --template '{{range .items}}{{.metadata.name}}{{"\n"}}{{end}}' | grep appc-ansible`:/opt/ansible-server/Playbooks/
sudo kubectl cp playbooks/vpgn onap/`kubectl get pods -o go-template --template '{{range .items}}{{.metadata.name}}{{"\n"}}{{end}}' | grep appc-ansible`:/opt/ansible-server/Playbooks/
-8. After the configuration of Ansible serverthe structure of `/opt/ansible-server/Playbooks` directory should be following
+8. Configuration of ansible server is also automated and all steps above can be repeated with script *configure_ansible.sh* introduced in the previous section
+
+9. After the configuration of Ansible server with script the structure of `/opt/ansible-server/Playbooks` directory should be following
::
/opt/ansible-server/Playbooks $ ls -R
.:
- Ansible_inventory onap.pem vfw-sink vpgn
+ ansible.cfg Ansible_inventory configure_ansible.sh onap.pem server.py upgrade.sh vfw-sink vpgn
./vfw-sink:
latest
@@ -639,11 +702,20 @@ Exit Ansible server pod and enter vFWDT tutorial directory `Preparation of Workf
ansible
./vfw-sink/latest/ansible:
- distributetrafficcheck
+ distributetrafficcheck upgradepostcheck upgradeprecheck upgradesoftware
./vfw-sink/latest/ansible/distributetrafficcheck:
site.yml
+ ./vfw-sink/latest/ansible/upgradepostcheck:
+ site.yml
+
+ ./vfw-sink/latest/ansible/upgradeprecheck:
+ site.yml
+
+ ./vfw-sink/latest/ansible/upgradesoftware:
+ site.yml
+
./vpgn:
latest
@@ -651,7 +723,7 @@ Exit Ansible server pod and enter vFWDT tutorial directory `Preparation of Workf
ansible
./vpgn/latest/ansible:
- distributetraffic distributetrafficcheck
+ distributetraffic distributetrafficcheck
./vpgn/latest/ansible/distributetraffic:
site.yml
@@ -663,55 +735,72 @@ Exit Ansible server pod and enter vFWDT tutorial directory `Preparation of Workf
Configuration of APPC DB for Ansible
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+.. note:: Automated procedure can be found at the end of the section
+
For each VNF that uses the Ansible protocol you need to configure *PASSWORD* and *URL* field in the *DEVICE_AUTHENTICATION* table. This step must be performed after configuration in CDT which populates data in *DEVICE_AUTHENTICATION* table.
-1. Enter the APPC DB container
+1. Read APPC DB password
+
+Enter vFWDT tutorial directory `Preparation of Workflow Script Environment`_ on Rancher server.
+
+::
+
+ ./get_secret.sh `kubectl get secrets | grep appc-db-root-pass`
+
+2. Enter the APPC DB container
::
kubectl exec -it -n onap `kubectl get pods -o go-template --template '{{range .items}}{{.metadata.name}}{{"\n"}}{{end}}' | grep appc-db-0` -- sh
-2. Enter the APPC DB CLI (password is *gamma*)
+3. Enter the APPC DB CLI
::
- mysql -u sdnctl -p
+ mysql -u root -p
-3. Execute the following SQL commands
+4. Execute the following SQL commands
::
MariaDB [(none)]> use sdnctl;
- MariaDB [sdnctl]> UPDATE DEVICE_AUTHENTICATION SET URL = 'http://appc-ansible-server:8000/Dispatch' WHERE ACTION LIKE 'DistributeTraffic%';
- MariaDB [sdnctl]> UPDATE DEVICE_AUTHENTICATION SET PASSWORD = 'admin' WHERE ACTION LIKE 'DistributeTraffic%';
- MariaDB [sdnctl]> select * from DEVICE_AUTHENTICATION;
+ MariaDB [sdnctl]> UPDATE DEVICE_AUTHENTICATION SET URL = 'http://appc-ansible-server:8000/Dispatch' WHERE WHERE PROTOCOL LIKE 'ANSIBLE' AND URL IS NULL;
+ MariaDB [sdnctl]> UPDATE DEVICE_AUTHENTICATION SET PASSWORD = 'admin' WHERE PROTOCOL LIKE 'ANSIBLE' AND PASSWORD IS NULL;
+ MariaDB [sdnctl]> select * from DEVICE_AUTHENTICATION WHERE PROTOCOL LIKE 'ANSIBLE';
-Result should be simmilar to the following one:
+Result should be similar to the following one:
::
+--------------------------+------------------------------------------------------+----------+------------------------+-----------+----------+-------------+------------------------------------------+
| DEVICE_AUTHENTICATION_ID | VNF_TYPE | PROTOCOL | ACTION | USER_NAME | PASSWORD | PORT_NUMBER | URL |
+--------------------------+------------------------------------------------------+----------+------------------------+-----------+----------+-------------+------------------------------------------+
- | 137 | vFWDT 2019-05-20 21:10:/vFWDT_vPKG a646a255-9bee 0 | ANSIBLE | DistributeTraffic | admin | admin | 8000 | http://appc-ansible-server:8000/Dispatch |
- | 143 | vFWDT 2019-05-20 21:10:/vFWDT_vFWSNK b463aa83-b1fc 0 | ANSIBLE | DistributeTraffic | admin | admin | 8000 | http://appc-ansible-server:8000/Dispatch |
- | 149 | vFWDT 2019-05-20 21:10:/vFWDT_vFWSNK b463aa83-b1fc 0 | ANSIBLE | DistributeTrafficCheck | admin | admin | 8000 | http://appc-ansible-server:8000/Dispatch |
- | 152 | vFWDT 2019-05-20 21:10:/vFWDT_vPKG a646a255-9bee 0 | ANSIBLE | DistributeTrafficCheck | admin | admin | 8000 | http://appc-ansible-server:8000/Dispatch |
+ | 118 | vFWDT 2020-04-21 17-26-/vFWDT_vFWSNK 1faca5b5-4c29 1 | ANSIBLE | DistributeTrafficCheck | admin | admin | 8000 | http://appc-ansible-server:8000/Dispatch |
+ | 121 | vFWDT 2020-04-21 17-26-/vFWDT_vFWSNK 1faca5b5-4c29 1 | ANSIBLE | UpgradeSoftware | admin | admin | 8000 | http://appc-ansible-server:8000/Dispatch |
+ | 124 | vFWDT 2020-04-21 17-26-/vFWDT_vFWSNK 1faca5b5-4c29 1 | ANSIBLE | UpgradePreCheck | admin | admin | 8000 | http://appc-ansible-server:8000/Dispatch |
+ | 127 | vFWDT 2020-04-21 17-26-/vFWDT_vFWSNK 1faca5b5-4c29 1 | ANSIBLE | UpgradePostCheck | admin | admin | 8000 | http://appc-ansible-server:8000/Dispatch |
+ | 133 | vFWDT 2020-04-21 17-26-/vFWDT_vPKG 8021eee9-3a8f 0 | ANSIBLE | DistributeTraffic | admin | admin | 8000 | http://appc-ansible-server:8000/Dispatch |
+ | 136 | vFWDT 2020-04-21 17-26-/vFWDT_vPKG 8021eee9-3a8f 0 | ANSIBLE | DistributeTrafficCheck | admin | admin | 8000 | http://appc-ansible-server:8000/Dispatch |
+--------------------------+------------------------------------------------------+----------+------------------------+-----------+----------+-------------+------------------------------------------+
- 4 rows in set (0.00 sec)
+ 6 rows in set (0.00 sec)
+
+4. Configuration of APPC DB is also automated and all steps above can be repeated with script *configure_ansible.sh* introduced in the previous sections
-Testing Traffic Distribution Workflow
--------------------------------------
-Since all the configuration of components of ONAP is already prepared it is possible to enter second phase of Traffic Distribution Workflow execution -
-the execution of DistributeTraffic and DistributeTrafficCheck LCM actions with configuration resolved before by OptimizationFramework.
+Testing Workflows
+-----------------
+
+Since all the configuration of components of ONAP is already prepared it is possible to enter second phase of workflows execution -
+the execution of APPC LCM actions with configuration resolved before by OptimizationFramework.
Workflow Execution
~~~~~~~~~~~~~~~~~~
-In order to run Traffic Distribution Workflow execute following commands from the vFWDT tutorial directory `Preparation of Workflow Script Environment`_ on Rancher server.
+In order to run workflows execute following commands from the vFWDT tutorial directory `Preparation of Workflow Script Environment`_ on Rancher server.
+
+For Traffic Distribution workflow run
::
@@ -719,65 +808,83 @@ In order to run Traffic Distribution Workflow execute following commands from th
python3 workflow.py 909d396b-4d99-4c6a-a59b-abe948873303 10.12.5.217 10.12.5.63 True False False True
-The order of executed LCM actions is following:
+The order of executed LCM actions for Traffic Distribution workflow is following:
-1. DistributeTrafficCheck on vPKG VM - ansible playbook checks if traffic destinations specified by OOF is not configued in the vPKG and traffic does not go from vPKG already.
- If vPKG send alreadyt traffic to destination the playbook will fail and workflow will break.
-2. DistributeTraffic on vPKG VM - ansible playbook reconfigures vPKG in order to send traffic to destination specified before by OOF. When everything is fine at this stage
- change of the traffic should be observed on following dashboards (please turn on automatic reload of graphs)
+1. CheckLock on vPKG, vFW-1 and vFW-2 VMs
+2. Lock on vPKG, vFW-1 and vFW-2 VMs
+3. DistributeTrafficCheck on vPKG VM - ansible playbook checks if traffic destinations specified by OOF is not configured in the vPKG and traffic does not go from vPKG already.
+ If vPKG send already traffic to destination the playbook will fail and workflow will break.
+4. DistributeTraffic on vPKG VM - ansible playbook reconfigures vPKG in order to send traffic to destination specified before by OOF.
+5. DistributeTrafficCheck on vFW-1 VM - ansible playbook checks if traffic is not present on vFW from which traffic should be migrated out. If traffic is still present after 30 seconds playbook fails
+6. DistributeTrafficCheck on vFW-2 VM - ansible playbook checks if traffic is present on vFW from which traffic should be migrated out. If traffic is still not present after 30 seconds playbook fails
+7. Lock on vPKG, vFW-1 and vFW-2 VMs
- ::
-
- http://<vSINK-1-IP>:667/
- http://<vSINK-2-IP>:667/
-3. DistributeTrafficCheck on vFW-1 VM - ansible playbook checks if traffic is not present on vFW from which traffic should be migrated out. If traffic is still present after 30 seconds playbook fails
-4. DistributeTrafficCheck on vFW-2 VM - ansible playbook checks if traffic is present on vFW from which traffic should be migrated out. If traffic is still not present after 30 seconds playbook fails
+For In-Place Software Upgrade with Traffic Distribution workflow run
+::
+
+ cd workflow
+ python3 workflow.py 909d396b-4d99-4c6a-a59b-abe948873303 10.12.5.217 10.12.5.63 True False False True 2.0
+
+
+The order of executed LCM actions for In-Place Software Upgrade with Traffic Distribution workflow is following:
+
+1. CheckLock on vPKG, vFW-1 and vFW-2 VMs
+2. Lock on vPKG, vFW-1 and vFW-2 VMs
+3. UpgradePreCheck on vFW-1 VM - checks if the software version on vFW is different than the one requested in the workflow input
+4. DistributeTrafficCheck on vPKG VM - ansible playbook checks if traffic destinations specified by OOF is not configured in the vPKG and traffic does not go from vPKG already.
+ If vPKG send already traffic to destination the playbook will fail and workflow will break.
+5. DistributeTraffic on vPKG VM - ansible playbook reconfigures vPKG in order to send traffic to destination specified before by OOF.
+6. DistributeTrafficCheck on vFW-1 VM - ansible playbook checks if traffic is not present on vFW from which traffic should be migrated out. If traffic is still present after 30 seconds playbook fails
+7. DistributeTrafficCheck on vFW-2 VM - ansible playbook checks if traffic is present on vFW from which traffic should be migrated out. If traffic is still not present after 30 seconds playbook fails
+8. UpgradeSoftware on vFW-1 VM - ansible playbook modifies the software on the vFW instance and sets the version of the software to the specified one in the request
+9. UpgradePostCheck on vFW-1 VM - ansible playbook checks if the software of vFW is the same like the one specified in the workflows input.
+10. DistributeTraffic on vPKG VM - ansible playbook reconfigures vPKG in order to send traffic to destination specified before by OOF (reverse configuration).
+11. DistributeTrafficCheck on vFW-2 VM - ansible playbook checks if traffic is not present on vFW from which traffic should be migrated out. If traffic is still present after 30 seconds playbook fails
+12. DistributeTrafficCheck on vFW-1 VM - ansible playbook checks if traffic is present on vFW from which traffic should be migrated out. If traffic is still not present after 30 seconds playbook fails
+13. Unlock on vPKG, vFW-1 and vFW-2 VMs
+
+
+For both workflows when everything is fine with both workflows change of the traffic should be observed on following dashboards (please turn on automatic reload of graphs). The observed traffic pattern for upgrade scenario should be similar to the one presented in Figure 2
+
+ ::
+
+ http://vSINK-1-IP:667/
+ http://vSINK-2-IP:667/
Workflow Results
~~~~~~~~~~~~~~~~
-Expected result of workflow execution, when everythin is fine, is following:
+Expected result of Traffic Distribution workflow execution, when everything is fine, is following:
::
Distribute Traffic Workflow Execution:
- APPC REQ 0 - DistributeTrafficCheck
- Request Accepted. Receiving result status...
- Checking LCM DistributeTrafficCheck Status
- IN_PROGRESS
- IN_PROGRESS
- IN_PROGRESS
- IN_PROGRESS
+ WORKFLOW << Migrate vFW Traffic Conditionally >>
+ APPC LCM << CheckLock >> [Check vPGN Lock Status]
+ UNLOCKED
+ APPC LCM << CheckLock >> [Check vFW-1 Lock Status]
+ UNLOCKED
+ APPC LCM << CheckLock >> [Check vFW-2 Lock ]
+ UNLOCKED
+ APPC LCM << Lock >> [Lock vPGN]
SUCCESSFUL
- APPC REQ 1 - DistributeTraffic
- Request Accepted. Receiving result status...
- Checking LCM DistributeTraffic Status
- IN_PROGRESS
- IN_PROGRESS
- IN_PROGRESS
- IN_PROGRESS
- IN_PROGRESS
- IN_PROGRESS
- IN_PROGRESS
- IN_PROGRESS
- IN_PROGRESS
- IN_PROGRESS
- IN_PROGRESS
- IN_PROGRESS
- IN_PROGRESS
- IN_PROGRESS
- IN_PROGRESS
- IN_PROGRESS
+ APPC LCM << Lock >> [Lock vFW-1]
+ SUCCESSFUL
+ APPC LCM << Lock >> [Lock vFW-2]
+ SUCCESSFUL
+ APPC LCM << DistributeTrafficCheck >> [Check current traffic destination on vPGN]
+ ACCEPTED
+ APPC LCM << DistributeTrafficCheck >> [Status]
IN_PROGRESS
IN_PROGRESS
IN_PROGRESS
SUCCESSFUL
- APPC REQ 2 - DistributeTrafficCheck
- Request Accepted. Receiving result status...
- Checking LCM DistributeTrafficCheck Status
- IN_PROGRESS
+ WORKFLOW << Migrate Traffic and Verify >>
+ APPC LCM << DistributeTraffic >> [Migrating source vFW traffic to destination vFW]
+ ACCEPTED
+ APPC LCM << DistributeTraffic >> [Status]
IN_PROGRESS
IN_PROGRESS
IN_PROGRESS
@@ -787,49 +894,77 @@ Expected result of workflow execution, when everythin is fine, is following:
IN_PROGRESS
IN_PROGRESS
SUCCESSFUL
- APPC REQ 3 - DistributeTrafficCheck
- Request Accepted. Receiving result status...
- Checking LCM DistributeTrafficCheck Status
- IN_PROGRESS
- IN_PROGRESS
+ APPC LCM << DistributeTrafficCheck >> [Checking traffic has been stopped on the source vFW]
+ ACCEPTED
+ APPC LCM << DistributeTrafficCheck >> [Status]
IN_PROGRESS
IN_PROGRESS
IN_PROGRESS
+ SUCCESSFUL
+ APPC LCM << DistributeTrafficCheck >> [Checking traffic has appeared on the destination vFW]
+ ACCEPTED
+ APPC LCM << DistributeTrafficCheck >> [Status]
IN_PROGRESS
IN_PROGRESS
SUCCESSFUL
+ APPC LCM << Unlock >> [Unlock vPGN]
+ SUCCESSFUL
+ APPC LCM << Unlock >> [Unlock vFW-1]
+ SUCCESSFUL
+ APPC LCM << Unlock >> [Unlock vFW-2]
+ SUCCESSFUL
+
+
+In case we want to execute operation and one of the VNFs is locked because of other operation being executed:
+
+::
+
+ Distribute Traffic Workflow Execution:
+ WORKFLOW << Migrate vFW Traffic Conditionally >>
+ APPC LCM << CheckLock >> [Check vPGN Lock Status]
+ LOCKED
+ Traceback (most recent call last):
+ File "workflow.py", line 1235, in <module>
+ sys.argv[6].lower() == 'true', sys.argv[7].lower() == 'true', new_version)
+ File "workflow.py", line 1209, in execute_workflow
+ _execute_lcm_requests({"requests": lcm_requests, "description": "Migrate vFW Traffic Conditionally"}, onap_ip, check_result)
+ File "workflow.py", line 101, in wrap
+ ret = f(*args, **kwargs)
+ File "workflow.py", line 1007, in _execute_lcm_requests
+ raise Exception("APPC LCM << {} >> FAILED".format(req['input']['action']))
+ Exception: APPC LCM << CheckLock >> FAILED
+
In case of failure the result can be following:
::
Distribute Traffic Workflow Execution:
- APPC REQ 0 - DistributeTrafficCheck
- Request Accepted. Receiving result status...
- Checking LCM DistributeTrafficCheck Status
- IN_PROGRESS
- IN_PROGRESS
- IN_PROGRESS
- IN_PROGRESS
- IN_PROGRESS
- IN_PROGRESS
- IN_PROGRESS
- IN_PROGRESS
- IN_PROGRESS
- IN_PROGRESS
- IN_PROGRESS
- IN_PROGRESS
- IN_PROGRESS
- IN_PROGRESS
- IN_PROGRESS
+ WORKFLOW << Migrate vFW Traffic Conditionally >>
+ APPC LCM << CheckLock >> [Check vPGN Lock Status]
+ UNLOCKED
+ APPC LCM << CheckLock >> [Check vFW-1 Lock Status]
+ UNLOCKED
+ APPC LCM << CheckLock >> [Check vFW-2 Lock ]
+ UNLOCKED
+ APPC LCM << Lock >> [Lock vPGN]
+ SUCCESSFUL
+ APPC LCM << Lock >> [Lock vFW-1]
+ SUCCESSFUL
+ APPC LCM << Lock >> [Lock vFW-2]
+ SUCCESSFUL
+ APPC LCM << DistributeTrafficCheck >> [Check current traffic destination on vPGN]
+ ACCEPTED
+ APPC LCM << DistributeTrafficCheck >> [Status]
FAILED
- Traceback (most recent call last):
- File "workflow.py", line 563, in <module>
- sys.argv[5].lower() == 'true', sys.argv[6].lower() == 'true')
- File "workflow.py", line 557, in execute_workflow
- confirm_appc_lcm_action(onap_ip, req, check_result)
- File "workflow.py", line 529, in confirm_appc_lcm_action
- raise Exception("LCM {} {} - {}".format(req['input']['action'], status['status'], status['status-reason']))
- Exception: LCM DistributeTrafficCheck FAILED - FAILED
-
-.. note:: When CDT and Ansible is configured properly Traffic Distribution Workflow can fail when you pass as a vnf-id argument the ID of vFW VNF which does not handle traffic at the moment. To solve that pass the VNF ID of the other vFW VNF instance. Because of the same reason you cannot execute twice in a row workflow for the same VNF ID if first execution succedds.
+ APPC LCM <<DistributeTrafficCheck>> [FAILED - FAILED]
+ WORKFLOW << Migrate Traffic and Verify >> SKIP
+ APPC LCM << Unlock >> [Unlock vPGN]
+ SUCCESSFUL
+ APPC LCM << Unlock >> [Unlock vFW-1]
+ SUCCESSFUL
+ APPC LCM << Unlock >> [Unlock vFW-2]
+ SUCCESSFUL
+
+
+.. note:: When CDT and Ansible is configured properly Traffic Distribution Workflow can fail when you pass as a vnf-id argument the ID of vFW VNF which does not handle traffic at the moment. To solve that pass the VNF ID of the other vFW VNF instance. Because of the same reason you cannot execute twice in a row workflow for the same VNF ID if first execution succeeds.
diff --git a/docs/docs_vFW_CNF_CDS.rst b/docs/docs_vFW_CNF_CDS.rst
new file mode 100644
index 000000000..5e01df317
--- /dev/null
+++ b/docs/docs_vFW_CNF_CDS.rst
@@ -0,0 +1,1903 @@
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. http://creativecommons.org/licenses/by/4.0
+.. Copyright 2022 ONAP
+
+.. _docs_vFW_CNF_CDS:
+
+:orphan:
+
+vFirewall CNF Use Case
+----------------------
+
+Source files
+~~~~~~~~~~~~
+- Heat/Helm/CDS models: `vFW_CNF_CDS Model`_
+- Automation Scripts: `vFW_CNF_CDS Automation`_
+
+Description
+~~~~~~~~~~~
+This use case is a combination of `vFW CDS Dublin`_ and `vFW EDGEX K8S`_ use cases and it is continously improved since Frankfurt release. The aim is to continue improving Kubernetes based Network Functions (a.k.a CNF) support in ONAP. Use case continues where `vFW EDGEX K8S`_ left and brings CDS support into picture like `vFW CDS Dublin`_ did for the old vFW Use case. Predecessor use case is also documented here `vFW EDGEX K8S In ONAP Wiki`_.
+
+This use case shows how to onboard helm packages and to instantiate them with help of ONAP. Following improvements were made in the vFW CNF Use Case:
+
+- vFW Kubernetes Helm charts support overrides (previously mostly hardcoded values)
+- SDC accepts Onboarding Package with many helm packages what allows to keep decomposition of service instance similar to `vFW CDS Dublin`_
+- Compared to `vFW EDGEX K8S`_ use case **MACRO** workflow in SO is used instead of VNF a'la carte workflow
+- No VNF data preloading used, instead resource-assignment feature of CDS is used
+- CDS is used to resolve instantiation time parameters (Helm overrides)
+ * IP addresses with IPAM
+ * Unique names for resources with ONAP naming service
+ * CDS is used to create and upload **multicloud/k8s profile** as part of instantiation flow
+- Combined all models (Heat, Helm, CBA) in to same git repo and a created single onboarding package `vFW_CNF_CDS Model`_
+- vFW CNF status is monitored prior to the completion of the instantiation process.
+- It is possible to not only provide overrides for Helm packages but we can modify Helm packages before instantiation or we can modify CNF after its deployment
+- Use case does not contain Closed Loop part of the vFW demo.
+
+All changes to related ONAP components and Use Case can be found in the following tickets:
+
+- `REQ-182`_
+- `REQ-341`_
+- `REQ-458`_
+- `REQ-627`_
+- `REQ-890`_
+
+The vFW CNF Use Case
+~~~~~~~~~~~~~~~~~~~~
+The vFW CNF CDS use case shows how to instantiate multiple CNF instances in similar way as VNFs bringing CNFs closer to first class citizens in ONAP.
+
+One of the biggest practical change compared to the old demos (any ONAP demo) is that whole network function content (user provided content) is collected to one place and more importantly into git repository (`vFW_CNF_CDS Model`_) that provides version control (that is pretty important thing). That is very basic thing but unfortunately this is a common problem when running any ONAP demo and trying to find all content from many different git repositories and even some files only in ONAP wiki.
+
+Demo git directory has also `Data Dictionary`_ file (CDS model time resource) included.
+
+Another founding idea from the start was to provide complete content in single onboarding package available directly from that git repository. Not any revolutionary idea as that's the official package format ONAP supports and all content supposed to be in that same package for single service regardless of the models and closed loops and configurations etc.
+
+Following table describes all the source models to which this demo is based on.
+
+=============== ================= ===========
+Model Git reference Description
+--------------- ----------------- -----------
+Heat `vFW_NextGen`_ Heat templates used in original vFW demo but split into multiple vf-modules
+Helm `vFW_Helm Model`_ Helm templates used in `vFW EDGEX K8S`_ demo
+CDS model `vFW CBA Model`_ CDS CBA model used in `vFW CDS Dublin`_ demo
+=============== ================= ===========
+
+.. note:: Since the Guilin release `vFW_CNF_CDS Model`_ contains sources that allow to model and instantiate CNF with VNF/Heat orchestration approach (Frankfurt) and with native Helm orchestration approach (Guilin and beyond). VNF/Heat orchestration approach is deprecated and will not be enhanced in the future. Please follow README.txt description and further documentation here to generate and select appropriate onboarding package which will leverage appropriate SO orchestration path.
+
+Since Honolulu release vFW CNF use case supports three different scenarios where different capabilities of CNF Orchestration in ONAP can be experimented:
+
+.. figure:: files/vFW_CNF_CDS/scenarios.png
+ :scale: 60 %
+ :align: center
+
+ vFW CNF Scenarios
+
+- Scenario 1: simple deployment of vFW CNF instance
+- Scenario 2: deployment of vFW CNF instance with enrichment of the Helm deployment with profiling mechanism
+- Scenario 3: deployment of vFW CNF instance with Day2 configuration applied and CNF status checked as a part of a config-deploy operation
+
+The 3rd scenario presents the most comprehensive way of managing the CNF in ONAP, including Day 0/1/2 operations. It shows also how to combine in the Day2 operation information for the AAI and SDNC MDSAL. All scenarios can be supported by execution of the dedicated Healthcheck workflow `3-5 Verification of the CNF Status`_.
+
+Modeling of Onboarding Package/Helm
+...................................
+
+The starting point for this demo was Helm package containing one Kubernetes application, see `vFW_Helm Model`_. In this demo we decided to follow SDC/SO vf-module concept the same way as original vFW demo was split into multiple vf-modules instead of one (`vFW_NextGen`_). The same way we splitted Helm version of vFW into multiple Helm packages each matching one dedicated vf-module.
+
+The Jakarta version of the `vFW_CNF_CDS Model`_ contains files required to create **VSP onboarding packages in Helm Native format** where each Helm package is standalone and is natively understood in consequence by SO. The **Dummy Heat** (available in Frankfurt release already) one that considers association of each Helm package with dummy heat templates since Jakarta is not a prt of the vFW CNF demo. If you are interested to see how to onboard and orchestrate the CNF using the **Dummy Heat** approach, please open the Istanbul version of the documentation. The VSP Helm packages are matched to the vf-module concept, so basically each Helm application after instantiation is visible to ONAP as a separate vf-module. The **Native Helm** format for onboarding has **crucial** role in the further orchestration approach applied for Helm package instantiation as it leverages the **CNF Adapter** and it populates k8s resource information to AAI what plays significant role in the Day operation for CNFs, including closed-loop automation with Prometheus. Read more in `3-1 CNF Orchestration Paths in ONAP`_
+
+Produced **Native Helm** VSP onboarding package `Creating Onboarding Package`_ format has following MANIFEST file (package_native/MANIFEST.json). The Helm package is delivered as HELM package through SDC and SO. The *isBase* flag of HELM artifact is ignored by SDC but in the manifest one HELM or HEAT artifacts must be defined as isBase = true. If both HEAT and HELM are present in the same manifest file the base one must be always one of HELM artifacts. Moreover, the name of HELM type artifact **MUST** match the specified pattern: *helm_<some_name>* and the HEAT type artifacts, if present in the same manifest, cannot contain keyword *helm*. These limitations are a consequence of current limitations of the SDC onboarding and VSP validation engine and will be adresssed in the future releases.
+
+::
+
+ {
+ "name": "virtualFirewall",
+ "description": "",
+ "data": [
+ {
+ "file": "CBA.zip",
+ "type": "CONTROLLER_BLUEPRINT_ARCHIVE"
+ },
+ {
+ "file": "helm_base_template.tgz",
+ "type": "HELM",
+ "isBase": "true"
+ },
+ {
+ "file": "helm_vfw.tgz",
+ "type": "HELM",
+ "isBase": "false"
+ },
+ {
+ "file": "helm_vpkg.tgz",
+ "type": "HELM",
+ "isBase": "false"
+ },
+ {
+ "file": "helm_vsn.tgz",
+ "type": "HELM",
+ "isBase": "false"
+ }
+ ]
+ }
+
+.. note:: CDS model (CBA package) is delivered as SDC supported own type CONTROLLER_BLUEPRINT_ARCHIVE but the current limitation of VSP onbarding forces to use the artifact name *CBA.zip* to automaticaly recognize CBA as a CONTROLLER_BLUEPRINT_ARCHIVE.
+
+CDS Model (CBA)
+...............
+
+CDS plays a crucial role in the process of CNF instantiation and is responsible for delivery of instantiation parameters, CNF customization, configuration of CBF after the deployment and may be used in the process of CNF status verification.
+
+Creating CDS model was the core of the use case work and also the most difficult and time consuming part. Current template used by use-case should be easily reusable for anyone. Once CDS GUI will be fully working, we think that CBA development should be much easier. For CBA structure reference, please visit it's documentation page `CDS Documentation`_.
+
+At first the target was to keep CDS model as close as possible to `vFW_CNF_CDS Model`_ use case model and only add smallest possible changes to enable also k8s usage. That is still the target but in practice model deviated from the original one already and time pressure pushed us to not care about sync. Basically the end result could be possible much streamlined if wanted to be smallest possible to working only for K8S based network functions.
+
+Base on this example there are demonstrated following features of CDS and CBA model
+
+- resource assignment string, integer and json types
+- sourcing of resolved value on vf-module level from vnf level assignment
+- extracting data from AAI and MD-SAL during the resource assignment
+- custom resource assignment with Kotlin script
+- templating of the vtl files
+- building of imperative workflows
+- utilization of on_succes and on_failure event in imperative workflow
+- handling of the failure in the workflow
+- implementation of custom workflow logic with Kotlin script
+- example of config-assign and config-deploy operation decomposed into many steps
+- complex parametrization of config deploy operation
+- combination and aggregation of AAI and MD-SAL data in config-assign and config-deploy operations
+
+The prepared CBA model demonstrates also how to utilize CNF specific features of CBA, suited for the deployment of CNF with k8splugin in ONAP:
+
+- building and upload of k8s profile template into k8splugin
+- building and upload of k8s configuration template into k8splugin
+- parametrization and creation of configuration instance from configuration template
+- validation of CNF status with Kotlin script
+- execution of the CNF healtcheck
+
+As K8S application is split into multiple Helm packages to match vf-modules, CBA modeling follows the same and for each vf-module there's own template in CBA package. The **Native Helm** approach, requires the Helm artifact names to star with *helm_* prefix, in the same way like names of artifacts in the MANIFEST file of VSP differs. The **Native Helm** artifacts' list is following:
+
+::
+
+ "artifacts" : {
+ "helm_base_template-template" : {
+ "type" : "artifact-template-velocity",
+ "file" : "Templates/base_template-template.vtl"
+ },
+ "helm_base_template-mapping" : {
+ "type" : "artifact-mapping-resource",
+ "file" : "Templates/base_template-mapping.json"
+ },
+ "helm_vpkg-template" : {
+ "type" : "artifact-template-velocity",
+ "file" : "Templates/vpkg-template.vtl"
+ },
+ "helm_vpkg-mapping" : {
+ "type" : "artifact-mapping-resource",
+ "file" : "Templates/vpkg-mapping.json"
+ },
+ "helm_vfw-template" : {
+ "type" : "artifact-template-velocity",
+ "file" : "Templates/vfw-template.vtl"
+ },
+ "helm_vfw-mapping" : {
+ "type" : "artifact-mapping-resource",
+ "file" : "Templates/vfw-mapping.json"
+ },
+ "vnf-template" : {
+ "type" : "artifact-template-velocity",
+ "file" : "Templates/vnf-template.vtl"
+ },
+ "vnf-mapping" : {
+ "type" : "artifact-mapping-resource",
+ "file" : "Templates/vnf-mapping.json"
+ },
+ "helm_vsn-template" : {
+ "type" : "artifact-template-velocity",
+ "file" : "Templates/vsn-template.vtl"
+ },
+ "helm_vsn-mapping" : {
+ "type" : "artifact-mapping-resource",
+ "file" : "Templates/vsn-mapping.json"
+ }
+ }
+
+SO requires for instantiation name of the profile in the parameter: *k8s-rb-profile-name* and name of the release of thr application: *k8s-rb-instance-release-name*. The latter one, when not specified, will be replaced with combination of profile name and vf-module-id for each Helm instance/vf-module instantiated. Both values can be found in vtl templates dedicated for vf-modules.
+
+CBA offers possibility of the automatic generation and upload to multicloud/k8s plugin the RB profile content. RB profile is required if you want to deploy your CNF into k8s namesapce other than *default*. Also, if you want to ensure particular templating of your Helm charts, specific to particular version of the cluster into which Helm packages will deployed on, profile is used to specify the version of your cluster.
+
+RB profile can be used to enrich or to modify the content of the original helm package. Profile can be also used to add additional k8s helm templates for helm installation or can be used to modify existing k8s helm templates for each create CNF instance. It opens another level of CNF customization, much more than customization of the Helm package with override values. K8splugin offers also *default* profile without content, for default namespace and default cluster version.
+
+::
+
+ ---
+ version: v1
+ type:
+ values: "override_values.yaml"
+ configresource:
+ - filepath: resources/deployment.yaml
+ chartpath: templates/deployment.yaml
+
+
+Above we have exemplary manifest file of the RB profile. Since Frankfurt *override_values.yaml* file does not need to be used as instantiation values are passed to the plugin over Instance API of k8s plugin. In the example, profile contains additional k8s Helm template which will be added on demand to the helm package during its installation. In our case, depending on the SO instantiation request input parameters, vPGN helm package can be enriched with additional ssh service. Such service will be dynamically added to the profile by CDS and later on CDS will upload whole custom RB profile to multicloud/k8s plugin.
+
+In order to support generation and upload of profile, our vFW CBA model has enhanced **resource-assignment** workflow which contains additional step: **profile-upload**. It leverages dedicated functionality introduced in Guilin release that can be used to upload predefined profile or to generate and upload content of the profile with Velocity templating mechanism.
+
+::
+
+ "resource-assignment": {
+ "steps": {
+ "resource-assignment": {
+ "description": "Resource Assign Workflow",
+ "target": "resource-assignment",
+ "activities": [
+ {
+ "call_operation": "ResourceResolutionComponent.process"
+ }
+ ],
+ "on_success": [
+ "profile-upload"
+ ]
+ },
+ "profile-upload": {
+ "description": "Generate and upload K8s Profile",
+ "target": "k8s-profile-upload",
+ "activities": [
+ {
+ "call_operation": "ComponentScriptExecutor.process"
+ }
+ ]
+ }
+ },
+
+.. note:: In the Frankfurt release profile upload was implemented as a custom Kotlin script included into the CBA. It was responsible for upload of K8S profile into multicloud/k8s plugin. It is still a good example of the integration of Kotlin scripting into the CBA. For those interested in this functionaliy we recommend to look into the `Frankfurt CBA Definition`_ and `Frankfurt CBA Script`_. Since Honolulu we introduce more advanced use of the Kotlin script for verification of the CNF status or custom resolution of complex parameters over Kotlin script - both can be found in the further part of the documentation.
+
+In our example for vPKG helm package we may select *vfw-cnf-cds-vpkg-profile* profile that is included into CBA as a folder. Profile generation step uses Velocity templates processing embedded CDS functionality on its basis ssh port number (specified in the SO request as *vpg-management-port*).
+
+::
+
+ {
+ "name": "vpg-management-port",
+ "property": {
+ "description": "The number of node port for ssh service of vpg",
+ "type": "integer",
+ "default": "0"
+ },
+ "input-param": false,
+ "dictionary-name": "vpg-management-port",
+ "dictionary-source": "default",
+ "dependencies": []
+ }
+
+*vpg-management-port* can be included directly into the helm template and such template will be included into vPKG helm package in time of its instantiation.
+
+::
+
+ apiVersion: v1
+ kind: Service
+ metadata:
+ name: {{ .Values.vpg_name_0 }}-ssh-access
+ labels:
+ vnf-name: {{ .Values.vnf_name }}
+ vf-module-name: {{ .Values.vpg_name_0 }}
+ release: {{ .Release.Name }}
+ chart: {{ .Chart.Name }}
+ spec:
+ type: NodePort
+ ports:
+ - port: 22
+ nodePort: ${vpg-management-port}
+ selector:
+ vf-module-name: {{ .Values.vpg_name_0 }}
+ release: {{ .Release.Name }}
+ chart: {{ .Chart.Name }}
+
+.. warning:: The port value is of Integer type and CDS resolves it as an integer. If the resolved values are returned to SO during the resource resolution phase they are being passed to k8splugin back only as a strings. In consequence, Integer values are passed to the Instantiation API as a strings and then they have go be converted in the helm template to the integer. In order to avoid such conversion it is better to customize override values with Integers in the profile and to skip return of this parameters in the resource resolution phase (they should not be included in the .vtl files).
+
+The mechanism of profile generation and upload requires specific node teamplate in the CBA definition. In our case, it comes with the declaration of two profiles: one static *vfw-cnf-cds-base-profile* in a form of an archive and the second complex *vfw-cnf-cds-vpkg-profile* in a form of a folder for processing and profile generation. Below is the example of the definition of node type for execution of the profile upload operation.
+
+::
+
+ "k8s-profile-upload": {
+ "type": "component-k8s-profile-upload",
+ "interfaces": {
+ "K8sProfileUploadComponent": {
+ "operations": {
+ "process": {
+ "inputs": {
+ "artifact-prefix-names": {
+ "get_input": "template-prefix"
+ },
+ "resource-assignment-map": {
+ "get_attribute": [
+ "resource-assignment",
+ "assignment-map"
+ ]
+ }
+ }
+ }
+ }
+ }
+ },
+ "artifacts": {
+ "vfw-cnf-cds-base-profile": {
+ "type": "artifact-k8sprofile-content",
+ "file": "Templates/k8s-profiles/vfw-cnf-cds-base-profile.tar.gz"
+ },
+ "vfw-cnf-cds-vpkg-profile": {
+ "type": "artifact-k8sprofile-content",
+ "file": "Templates/k8s-profiles/vfw-cnf-cds-vpkg-profile"
+ },
+ "vfw-cnf-cds-vpkg-profile-mapping": {
+ "type": "artifact-mapping-resource",
+ "file": "Templates/k8s-profiles/vfw-cnf-cds-vpkg-profile/ssh-service-mapping.json"
+ }
+ }
+ }
+
+Artifact file determines a place of the static profile or the content of the complex profile. In the latter case we need a pair of profile folder and mapping file with a declaration of the parameters that CDS needs to resolve first, before the Velocity templating is applied to the .vtl files present in the profile content. After Velocity templating the .vtl extensions will be dropped from the file names. The embedded mechanism will include in the profile only files present in the profile MANIFEST file that needs to contain the list of final names of the files to be included into the profile. The figure below shows the idea of profile templating.
+
+.. figure:: files/vFW_CNF_CDS/profile-templating.png
+ :align: center
+
+ K8s Profile Templating
+
+SO requires for instantiation name of the profile in the parameter: *k8s-rb-profile-name*. The *component-k8s-profile-upload* that stands behind the profile uploading mechanism has input parameters that can be passed directly (checked in the first order) or can be taken from the *resource-assignment-map* parameter which can be a result of associated *component-resource-resolution* result, like in our case their values are resolved on vf-module level resource assignment. The *component-k8s-profile-upload* inputs are following:
+
+- k8s-rb-definition-name [string] - (mandatory) the name under which RB definition was created - **VF Module Model Invariant ID** in ONAP
+- k8s-rb-definition-version [string] - (mandatory) the version of created RB definition name - **VF Module Model Customization ID** in ONAP
+- k8s-rb-profile-name [string] - (mandatory) the name of the profile under which it will be created in k8s plugin. Other parameters are required only when profile must be uploaded because it does not exist yet
+- k8s-rb-profile-source [string] - the source of profile content - name of the artifact of the profile. If missing *k8s-rb-profile-name* is treated as a source
+- k8s-rb-profile-namespace [string] - (mandatory) the k8s namespace name associated with profile being created
+- k8s-rb-profile-kubernetes-version [string] - the version of the cluster on which application will be deployed - it may impact the helm templating process like selection of the api versions for resources so it should match the version of k8s cluster in which resources are bing deployed.
+- k8s-rb-profile-labels [json] - the extra labels (label-name: label-value) to add for each k8s resource created for CNF in the k8s cluster (since Jakarta release).
+- k8s-rb-profile-extra-types [list<json>] - the list of extra k8s types that should be returned by StatusAPI. It may be usefull when k8s resources associated with CNF instance are created outside of the helm package (i.e. by k8s operator) but should be treated like resources of CNF. To make it hapens such resources should have the instance label *k8splugin.io/rb-instance-id* what may be assured by such tools like *kyverno*. Each extra type json object needs *Group*, *Version* and *Kind* attributes. (since Jakarta release).
+- resource-assignment-map [json] - result of the associated resource assignment step - it may deliver values of inputs if they are not specified directly
+- artifact-prefix-names [list<string>] - (mandatory) the list of artifact prefixes like for resource-assigment step in the resource-assigment workflow or its subset
+
+In the SO request user can pass parameter of name *k8s-rb-profile-name* which in our case may have value: *vfw-cnf-cds-base-profile*, *vfw-cnf-cds-vpkg-profile* or *default*. The *default* profile does not contain any content inside and allows instantiation of CNF without the need to define and upload any additional profiles. *vfw-cnf-cds-vpkg-profile* has been prepared to test instantiation of the second modified vFW CNF instance.
+
+K8splugin allows to specify override parameters (similar to --set behavior of helm client) to instantiated resource bundles. This allows for providing dynamic parameters to instantiated resources without the need to create new profiles for this purpose. This mechanism should be used with *default* profile but may be used also with any custom profile.
+
+The overall flow of helm overrides parameters processing is visible on following figure. When *rb definition* (helm package) is being instantiated for specified *rb profile* K8splugin combines override values from the helm package, *rb profile* and from the instantiation request - in the respective order. It means that the value from the instantiation request (SO request input or CDS resource assignment result) has a precedence over the value from the *rb profile* and value from the *rb profile* has a precedence over the helm package default override value. Similarly, profile can contain resource files that may extend or ammend the existing files for the original helm package content.
+
+.. figure:: files/vFW_CNF_CDS/helm-overrides.png
+ :align: center
+
+ The overall flow of helm data processing
+
+Both profile content (4) like the instantiation request values (5) can be generated during the resource assignment process according to its definition for CBA associated with helm package. CBA may generate i.e. names, IP addresses, ports and can use this information to produce the *rb-profile* (3) content. Finally, all three sources of override values, temnplates and additional resources files are merged together (6) by K8splugin in the order exaplained before.
+
+.. figure:: files/vFW_CNF_CDS/helm-overrides-steps.png
+ :align: center
+
+ The steps of processing of helm data with help of CDS
+
+Both profile content (4) like the instantiation request values (5) can be generated during the resource assignment process according to its definition for CBA associated with helm package. CBA may generate i.e. names, IP addresses, ports and can use this information to produce the *rb-profile* (3) content. Finally, all three sources of override values, temnplates and additional resources files are merged together (6) by K8splugin in the order exaplained before.
+
+Besides the deployment of Helm application the CBA of vFW demonstrates also how to use deicated features for config-assign (7) and config-deploy (8) operations. In the use case, *config-assign* and *config-deploy* operations deal mainly with creation and instantiation of configuration template for k8s plugin. The configuration template has a form of Helm package. When k8s plugin instantiates configuration, it creates or may replace existing resources deployed on k8s cluster. In our case the configuration template is used to provide alternative way of upload of the additional ssh-service but it coud be used to modify configmap of vfw or vpkg vf-modules.
+
+In order to provide configuration instantiation capability standard *config-assign* and *config-deploy* workflows have been changed into imperative workflows with first step responsible for collection of information for configuration templating and configuration instantiation. The source of data for this operations is AAI, MDSAL with data for vnf and vf-modules as *config-assign* and *config-deploy* does not receive dedicated input parameters from SO. In consequence both operations need to source from *resource-assignment* phase and data placed in the AAI and MDSAL.
+
+vFW CNF *config-assign* workflow is following:
+
+::
+
+ "config-assign": {
+ "steps": {
+ "config-setup": {
+ "description": "Gather necessary input for config template upload",
+ "target": "config-setup-process",
+ "activities": [
+ {
+ "call_operation": "ResourceResolutionComponent.process"
+ }
+ ],
+ "on_success": [
+ "config-template"
+ ]
+ },
+ "config-template": {
+ "description": "Generate and upload K8s config template",
+ "target": "k8s-config-template",
+ "activities": [
+ {
+ "call_operation": "K8sConfigTemplateComponent.process"
+ }
+ ]
+ }
+ },
+
+vFW CNF *config-deploy* workflow is following:
+
+::
+
+ "config-deploy": {
+ "steps": {
+ "config-setup": {
+ "description": "Gather necessary input for config init and status verification",
+ "target": "config-setup-process",
+ "activities": [
+ {
+ "call_operation": "ResourceResolutionComponent.process"
+ }
+ ],
+ "on_success": [
+ "config-apply"
+ ]
+ },
+ "config-apply": {
+ "description": "Activate K8s config template",
+ "target": "k8s-config-apply",
+ "activities": [
+ {
+ "call_operation": "K8sConfigTemplateComponent.process"
+ }
+ ],
+ "on_success": [
+ "status-verification-script"
+ ]
+ },
+
+
+In our example configuration template for vFW CNF is a helm package that contains the same resource that we can find in the vPKG *vfw-cnf-cds-vpkg-profile* profile - extra ssh service. This helm package contains Helm encapsulation for ssh-service and the values.yaml file with declaration of all the inputs that may parametrize the ssh-service. The configuration templating step leverages the *component-k8s-config-template* component that prepares the configuration template and uploads it to k8splugin. In consequence, it may be used later on for instatiation of the configuration.
+
+In this use case we have two options with *ssh-service-config* and *ssh-service-config-customizable* as a source of the same configuration template. In consequence, or we take a complete template or we have have the template folder with the content of the helm package and CDS may perform dedicated resource resolution for it with templating of all the files with .vtl extensions. The process is very similar to the one describe for profile upload functionality.
+
+::
+
+ "k8s-config-template": {
+ "type": "component-k8s-config-template",
+ "interfaces": {
+ "K8sConfigTemplateComponent": {
+ "operations": {
+ "process": {
+ "inputs": {
+ "artifact-prefix-names": [
+ "helm_vpkg"
+ ],
+ "resource-assignment-map": {
+ "get_attribute": [
+ "config-setup-process",
+ "",
+ "assignment-map",
+ "config-deploy",
+ "config-deploy-setup"
+ ]
+ }
+ }
+ }
+ }
+ }
+ },
+ "artifacts": {
+ "ssh-service-config": {
+ "type": "artifact-k8sconfig-content",
+ "file": "Templates/k8s-configs/ssh-service.tar.gz"
+ },
+ "ssh-service-config-customizable": {
+ "type": "artifact-k8sconfig-content",
+ "file": "Templates/k8s-configs/ssh-service-config"
+ },
+ "ssh-service-config-customizable-mapping": {
+ "type": "artifact-mapping-resource",
+ "file": "Templates/k8s-configs/ssh-service-config/ssh-service-mapping.json"
+ }
+ }
+ }
+
+The *component-k8s-config-template* that stands behind creation of configuration template has input parameters that can be passed directly (checked in the first order) or can be taken from the *resource-assignment-map* parameter which can be a result of associated *component-resource-resolution* result, like in vFW CNF use case their values are resolved on vf-module level dedicated for *config-assign* and *config-deploy* resource assignment step. The *component-k8s-config-template* inputs are following:
+
+- k8s-rb-definition-name [string] - (mandatory) the name under which RB definition was created - **VF Module Model Invariant ID** in ONAP
+- k8s-rb-definition-version [string] - (mandatory) the version of created RB definition name - **VF Module Model Customization ID** in ONAP
+- k8s-rb-config-template-name [string] - (mandatory) the name of the configuration template under which it will be created in k8s plugin. Other parameters are required only when configuration template must be uploaded because it does not exist yet
+- k8s-rb-config-template-source [string] - the source of config template content - name of the artifact of the configuration template. When missing, the main definition helm package will be used as a configuration template source (since Jakarta release).
+- resource-assignment-map [json] - result of the associated resource assignment step - it may deliver values of inputs if they are not specified directly
+- artifact-prefix-names [list<string>] - (mandatory) the list of artifact prefixes like for resource-assigment step in the resource-assigment workflow or its subset
+
+In our case the *component-k8s-config-template* component receives all the inputs from the dedicated resource-assignment process *config-setup* that is responsible for resolution of all the inputs for configuration templating. This process generates data for *helm_vpkg* prefix and such one is specified in the list of prefixes of the configuration template component. It means that configuration template will be prepared only for vPKG function.
+
+::
+
+ "k8s-config-apply": {
+ "type": "component-k8s-config-value",
+ "interfaces": {
+ "K8sConfigValueComponent": {
+ "operations": {
+ "process": {
+ "inputs": {
+ "artifact-prefix-names": [
+ "helm_vpkg"
+ ],
+ "k8s-config-operation-type": "create",
+ "resource-assignment-map": {
+ "get_attribute": [
+ "config-setup-process",
+ "",
+ "assignment-map",
+ "config-deploy",
+ "config-deploy-setup"
+ ]
+ }
+ }
+ }
+ }
+ }
+ },
+ "artifacts": {
+ "ssh-service-default": {
+ "type": "artifact-k8sconfig-content",
+ "file": "Templates/k8s-configs/ssh-service-config/values.yaml"
+ },
+ "ssh-service-config": {
+ "type": "artifact-k8sconfig-content",
+ "file": "Templates/k8s-configs/ssh-service-values/values.yaml.vtl"
+ },
+ "ssh-service-config-mapping": {
+ "type": "artifact-mapping-resource",
+ "file": "Templates/k8s-configs/ssh-service-values/ssh-service-mapping.json"
+ }
+ }
+ }
+
+
+The *component-k8s-config-value* that stands behind creation of configuration instance has input parameters that can be passed directly (checked in the first order) or can be taken from the *resource-assignment-map* parameter which can be a result of associated *component-resource-resolution* result, like in vFW CNF use case their values are resolved on vf-module level dedicated for *config-assign* and *config-deploy*'s' resource-assignment step. The *component-k8s-config-value* inputs are following:
+
+- k8s-rb-config-name [string] - (mandatory) the name of the configuration template under which it will be created in k8s plugin. Other parameters are required only when configuration template must be uploaded because it does not exist yet
+- k8s-rb-config-template-name [string] - (mandatory) the name of the configuration template under which it will be created in k8s plugin. Other parameters are required only when configuration template must be uploaded because it does not exist yet
+- k8s-rb-config-value-source [string] - the source of config template content - name of the artifact of the configuration template. If missing *k8s-rb-config-name* is treated as a source
+- k8s-rb-config-version [string] - the version of the configuration to restore during the *rollback* operation. First configuratino after *create* has version *1* and new ones, after *update* will have version of the following numbers. When *rollback* operation is performed all previous versions on the path to the desired one are being restored one, by one. (since Jakarta)
+- k8s-instance-id [string] - (mandatory) the identifier of the rb instance for which the configuration should be applied
+- k8s-config-operation-type [string] - the type of the configuration operation to perform: *create*, *update*, *rollback*, *delete* or *delete_config*. By default *create* operation is performed. *rollback* and *delete_config* types are present since Jakarta release. The *update* operation creates new version of the configuration. *delete* operation creates also new version of configuratino that deletes all the resources in k8s from the cluster. *delete_config* operation aims to delete configuration entirely but it does not delete or update any resources associated with the configuration.
+- resource-assignment-map [json] - result of the associated resource assignment step - it may deliver values of inputs if they are not specified directly
+- artifact-prefix-names [list<string>] - (mandatory) the list of artifact prefixes like for resource-assigment step in the resource-assigment workflow or its subset
+
+Like for the configuration template, the *component-k8s-config-value* component receives all the inputs from the dedicated resource-assignment process *config-setup* that is responsible for resolution of all the inputs for configuration. This process generates data for *helm_vpkg* prefix and such one is specified in the list of prefixes of the configuration values component. It means that configuration instance will be created only for vPKG function (component allows also update or delete of the configuration but in the vFW CNF case it is used only to create configuration instance).
+
+Finally, `Data Dictionary`_ is also included into demo git directory, re-modeling and making changes into model utilizing CDS model time / runtime is easier as used DD is also known.
+
+.. note:: CBA of vFW CNF use case is already enriched and VSP of vFW CNF has CBA included inside. In conequence, when VSP is being onboarded into SDC and service is being distributed, CBA is uploaded into CDS. Anyway, CDS contains in the starter dictionary all data dictionary values used in the use case and enrichment of CBA should work as well.
+
+Instantiation Overview
+----------------------
+
+.. note:: Since Guilin release use case is equipped with automated method **<AUTOMATED>** with python scripts to replace Postman method **<MANUAL>** used in Frankfurt. Nevertheless, Postman collection is good to understand the entire process. If a user selects to follow Postman collection, then automation scripts **must not** be used. **For the entire process use only scripts or only Postman collection**. Both options are described in the further steps of this instruction.
+
+The figure below shows all the interactions that take place during vFW CNF instantiation. It's not describing flow of actions (ordered steps) but rather component dependencies.
+
+.. figure:: files/vFW_CNF_CDS/Instantiation_topology.png
+ :align: center
+
+ vFW CNF CDS Use Case Runtime interactions.
+
+PART 1 - ONAP Installation
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+1-1 Deployment components
+.........................
+
+In order to run the vFW_CNF_CDS use case, we need ONAP Jakarta Release (or later) with at least following components:
+
+======================================================= ===========
+ONAP Component name Describtion
+------------------------------------------------------- -----------
+AAI Required for Inventory Cloud Owner, Customer, Owning Entity, Service, Generic VNF, VF Module
+SDC VSP, VF and Service Modeling of the CNF
+DMAAP Distribution of the onboarding package including CBA to all ONAP components
+SO Required for Macro Orchestration using the generic building blocks
+CDS Resolution of cloud parameters including Helm override parameters for the CNF. Creation of the multicloud/k8s profile for CNF instantion. Creation of configuration template and its instantiation
+SDNC (needs to include netbox and Naming Generation mS) Provides GENERIC-RESOURCE-API for cloud Instantiation orchestration via CDS.
+Policy Used to Store Naming Policy
+AAF Used for Authentication and Authorization of requests
+Portal Required to access SDC.
+MSB Exposes multicloud interfaces used by SO.
+Multicloud K8S plugin part used to pass SO instantiation requests to external Kubernetes cloud region.
+Contrib Chart containing multiple external components. Out of those, we only use Netbox utility in this use-case for IPAM
+Robot Optional. Can be used for running automated tasks, like provisioning cloud customer, cloud region, service subscription, etc ..
+Shared Cassandra DB Used as a shared storage for ONAP components that rely on Cassandra DB, like AAI
+Shared Maria DB Used as a shared storage for ONAP components that rely on Maria DB, like SDNC, and SO
+======================================================= ===========
+
+1-2 Deployment
+..............
+
+In order to deploy such an instance, follow the `ONAP Deployment Guide`_
+
+As we can see from the guide, we can use an override file that helps us customize our ONAP deployment, without modifying the OOM Folder, so you can download this override file here, that includes the necessary components mentioned above.
+
+**override.yaml** file where enabled: true is set for each component needed in demo (by default all components are disabled).
+
+::
+
+ aai:
+ enabled: true
+ aaf:
+ enabled: true
+ cassandra:
+ enabled: true
+ cds:
+ enabled: true
+ contrib:
+ enabled: true
+ dmaap:
+ enabled: true
+ mariadb-galera:
+ enabled: true
+ msb:
+ enabled: true
+ multicloud:
+ enabled: true
+ policy:
+ enabled: true
+ portal:
+ enabled: true
+ robot:
+ enabled: true
+ sdc:
+ enabled: true
+ sdnc:
+ enabled: true
+ so:
+ enabled: true
+
+Then deploy ONAP with Helm with your override file.
+
+::
+
+ helm deploy onap local/onap --namespace onap -f ~/override.yaml
+
+In case redeployment needed `Helm Healer`_ could be a faster and convenient way to redeploy.
+
+::
+
+ helm-healer.sh -n onap -f ~/override.yaml -s /dockerdata-nfs --delete-all
+
+Or redeploy (clean re-deploy also data removed) just wanted components (Helm releases), cds in this example.
+
+::
+
+ helm-healer.sh -f ~/override.yaml -s /dockerdata-nfs/ -n onap -c onap-cds
+
+There are many instructions in ONAP wiki how to follow your deployment status and does it succeeded or not, mostly using Robot Health checks. One way we used is to skip the outermost Robot wrapper and use directly ete-k8s.sh to able to select checked components easily. Script is found from OOM git repository *oom/kubernetes/robot/ete-k8s.sh*.
+
+::
+
+ {
+ failed=
+ for comp in {aaf,aai,dmaap,msb,multicloud,policy,portal,sdc,sdnc,so}; do
+ if ! ./ete-k8s.sh onap health-$comp; then
+ failed=$failed,$comp
+ fi
+ done
+ if [ -n "$failed" ]; then
+ echo "These components failed: $failed"
+ false
+ else
+ echo "Healthcheck successful"
+ fi
+ }
+
+And check status of pods, deployments, jobs etc.
+
+::
+
+ kubectl -n onap get pods | grep -vie 'completed' -e 'running'
+ kubectl -n onap get deploy,sts,jobs
+
+
+1-3 Post Deployment
+...................
+
+After completing the first part above, we should have a functional ONAP deployment for the Jakarta Release.
+
+We will need to apply a few modifications to the deployed ONAP Jakarta instance in order to run the use case.
+
+Retrieving logins and passwords of ONAP components
+++++++++++++++++++++++++++++++++++++++++++++++++++
+
+Since Frankfurt release hardcoded passwords were mostly removed and it is possible to configure passwords of ONAP components in time of their installation. In order to retrieve these passwords with associated logins it is required to get them with kubectl. Below is the procedure on mariadb-galera DB component example.
+
+::
+
+ kubectl get secret `kubectl get secrets | grep mariadb-galera-db-root-password | awk '{print $1}'` -o jsonpath="{.data.login}" | base64 --decode
+ kubectl get secret `kubectl get secrets | grep mariadb-galera-db-root-password | awk '{print $1}'` -o jsonpath="{.data.password}" | base64 --decode
+
+In this case login is empty as the secret is dedicated to root user.
+
+
+Postman collection setup
+++++++++++++++++++++++++
+
+In this demo we have on purpose created all manual ONAP preparation steps (which in real life are automated) by using Postman so it will be clear what exactly is needed. Some of the steps like AAI population is automated by Robot scripts in other ONAP demos (**./demo-k8s.sh onap init**) and Robot script could be used for many parts also in this demo.
+
+Postman collection is used also to trigger instantiation using SO APIs.
+
+Following steps are needed to setup Postman:
+
+- Import this Postman collection zip
+
+ :download:`Postman collection <files/vFW_CNF_CDS/postman.zip>`
+
+- Extract the zip and import Postman collection into Postman. Environment file is provided for reference, it's better to create own environment on your own providing variables as listed in next chapter.
+ - `vFW_CNF_CDS.postman_collection.json`
+ - `vFW_CNF_CDS.postman_environment.json`
+
+- For use case debugging purposes to get Kubernetes cluster external access to SO CatalogDB (GET operations only), modify SO CatalogDB service to NodePort instead of ClusterIP. You may also create separate own NodePort if you wish, but here we have just edited directly the service with kubectl.
+
+::
+
+ kubectl -n onap edit svc so-catalog-db-adapter
+ - .spec.type: ClusterIP
+ + .spec.type: NodePort
+ + .spec.ports[0].nodePort: 30120
+
+.. note:: The port number 30120 is used in included Postman collection
+
+- You may also want to inspect after SDC distribution if CBA has been correctly delivered to CDS. In order to do it, there are created relevant calls later described in doc, however CDS since Frankfurt doesn't expose blueprints-processor's service as NodePort. This is OPTIONAL but if you'd like to use these calls later, you need to expose service in similar way as so-catalog-db-adapter above:
+
+::
+
+ kubectl edit -n onap svc cds-blueprints-processor-http
+ - .spec.type: ClusterIP
+ + .spec.type: NodePort
+ + .spec.ports[0].nodePort: 30499
+
+.. note:: The port number 30499 is used in included Postman collection
+
+**Postman variables:**
+
+Most of the Postman variables are automated by Postman scripts and environment file provided, but there are few mandatory variables to fill by user.
+
+===================== ===================
+Variable Description
+--------------------- -------------------
+k8s ONAP Kubernetes host
+sdnc_port port of sdnc service for accessing MDSAL
+service-name name of service as defined in SDC
+service-version version of service defined in SDC (if service wasn't updated, it should be set to "1.0")
+service-instance-name name of instantiated service (if ending with -{num}, will be autoincremented for each instantiation request)
+===================== ===================
+
+You can get the sdnc_port value with
+
+::
+
+ kubectl -n onap get svc sdnc -o json | jq '.spec.ports[]|select(.port==8282).nodePort'
+
+Automation Environment Setup
+............................
+
+Whole content of this use case is stored into single git repository and it contains both the required onboarding information as well as automation scripts for onboarding and instantiation of the use case.
+
+::
+
+ git clone --single-branch --branch jakarta "https://gerrit.onap.org/r/demo"
+ cd demo/heat/vFW_CNF_CDS/templates
+
+In order to prepare environment for onboarding and instantiation of the use case make sure you have *git*, *make*, *helm* and *pipenv* applications installed.
+
+The automation scripts are based on `Python SDK`_ and are adopted to automate process of service onboarding, instantiation, deletion and cloud region registration. To configure them for further use:
+
+::
+
+ cd demo/heat/vFW_CNF_CDS/automation
+
+1. Install required packages with
+::
+
+ pipenv pipenv install
+
+2. Run virtual python environment
+::
+
+ pipenv shell --fancy
+
+3. Add kubeconfig files, one for ONAP cluster, and one for k8s cluster that will host vFW
+
+.. note:: Both files can be configured after creation of k8s cluster for vFW instance `2-1 Installation of Managed Kubernetes`_. Make sure that they have configured external IP address properly. If any cluster uses self signed certificates set also *insecure-skip-tls-verify* flag in the config file.
+
+- artifacts/cluster_kubeconfig - IP address must be reachable by ONAP pods, especially *mutlicloud-k8s* pod
+
+- artifacts/onap_kubeconfig - IP address must be reachable by automation scripts
+
+4. Modify config.py file
+
+- SCENARIO - like described in the `The vFW CNF Use Case`_ section
+- NATIVE - when enabled (default) **Native Helm** path will be used, otherwise **Dummy Heat** path will be used (deprecated)
+- MACRO_INSTANTIATION - instantiation method used: macro (default) or a'la carte. A'la carte only for the purpose of use with other use cases
+- K8S_NAMESPACE - k8s namespace to use for deployment of CNF (vfirewall by default)
+- K8S_VERSION - version of the k8s cluster
+- K8S_REGION - name of the k8s region from the CLOUD_REGIONS (kud by default)
+- CLOUD_REGIONS - configuration of k8s or Openstack regions
+- GLOBAL_CUSTOMER_ID - identifier of customer in ONAP
+- VENDOR - name of the Vendor in ONAP
+- SERVICENAME - **Name of your service model in SDC**
+- SKIP_POST_INSTANTIATION - whether post instantiation configuration should be run (it is set indirectly by *SCENARIO*)
+- VNF_PARAM_LIST - list of parameters to pass for VNF creation process
+- VF_MODULE_PARAM_LIST - list of parameters to pass for VF Module creation
+
+.. note:: For automation script it is necessary to modify only SCENARIO constant. Other constants may be modified if needed.
+
+AAI
+...
+
+Some basic entries are needed in ONAP AAI. These entries are needed ones per onap installation and do not need to be repeated when running multiple demos based on same definitions.
+
+Create all these entries into AAI in this order. Postman collection provided in this demo can be used for creating each entry.
+
+**<MANUAL>**
+::
+
+ Postman -> Initial ONAP setup -> Create
+
+- Create Customer
+- Create Owning-entity
+- Create Platform
+- Create Project
+- Create Line Of Business
+
+Corresponding GET operations in "Check" folder in Postman can be used to verify entries created. Postman collection also includes some code that tests/verifies some basic issues e.g. gives error if entry already exists.
+
+**<AUTOMATED>**
+
+This step is performed jointly with onboarding step `3-2 Onboarding`_
+
+Naming Policy
++++++++++++++
+
+Naming policy is needed to generate unique names for all instance time resources that are wanted to be modeled in the way naming policy is used. Those are normally VNF, VNFC and VF-module names, network names etc. Naming is general ONAP feature and not limited to this use case.
+
+This usecase leverages default ONAP naming policy - "SDNC_Policy.ONAP_NF_NAMING_TIMESTAMP".
+To check that the naming policy is created and pushed OK, we can run the command below from inside any ONAP pod.
+
+::
+
+ curl --silent -k --user 'healthcheck:zb!XztG34' -X GET "https://policy-api:6969/policy/api/v1/policytypes/onap.policies.Naming/versions/1.0.0/policies/SDNC_Policy.ONAP_NF_NAMING_TIMESTAMP/versions/1.0.0"
+
+.. note:: Please change credentials respectively to your installation. The required credentials can be retrieved with instruction `Retrieving logins and passwords of ONAP components`_
+
+PART 2 - Installation of managed Kubernetes cluster
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+In this demo the target cloud region is a Kubernetes cluster of your choice basically just like with Openstack. ONAP platform is a bit too much hard wired to Openstack and it's visible in many demos.
+
+2-1 Installation of Managed Kubernetes
+......................................
+
+In this demo we use Kubernetes deployment used by ONAP multicloud/k8s team to test their plugin features see `KUD github`_. There's also some outdated instructions in ONAP wiki `KUD in Wiki`_.
+
+KUD deployment is fully automated and also used in ONAP's CI/CD to automatically verify all `Multicloud k8s gerrit`_ commits (see `KUD Jenkins ci/cd verification`_) and that's quite good (and rare) level of automated integration testing in ONAP. KUD deployemnt is used as it's installation is automated and it also includes bunch of Kubernetes plugins used to tests various k8s plugin features. In addition to deployement, KUD repository also contains test scripts to automatically test multicloud/k8s plugin features. Those scripts are run in CI/CD.
+
+See `KUD subproject in github`_ for a list of additional plugins this Kubernetes deployment has. In this demo the tested CNF is dependent on following plugins:
+
+- ovn4nfv
+- Multus
+- Virtlet
+
+Follow instructions in `KUD github`_ and install target Kubernetes cluster in your favorite machine(s), simplest being just one machine. Your cluster nodes(s) needs to be accessible from ONAP Kuberenetes nodes. Make sure your installed *pip* is of **version < 21.0**. Version 21 do not support python 2.7 that is used in *aio.sh* script. Also to avoid performance problems of your k8s cluster make sure you install only necessary plugins and before running *aio.sh* script execute following command
+::
+
+ export KUD_ADDONS="virtlet ovn4nfv"
+
+.. warning:: In order to run vFW CNF Use Case deployment test please make sure that this workaround does not have to be applied as well. `KUD Interface Permission`_
+
+2-2 Cloud Registration
+......................
+
+Managed Kubernetes cluster is registered here into ONAP as one cloud region. This obviously is done just one time for this particular cloud. Cloud registration information is kept in AAI.
+
+**<MANUAL>**
+
+Postman collection have folder/entry for each step. Execute in this order.
+::
+
+ Postman -> K8s Cloud Region Registration -> Create
+
+- Create Complex
+- Create Cloud Region
+- Create Complex-Cloud Region Relationship
+- Create Service
+- Create Service Subscription
+- Create Cloud Tenant
+- Create Availability Zone
+- Upload Connectivity Info
+
+.. note:: For "Upload Connectivity Info" call you need to provide kubeconfig file of existing KUD cluster. You can find that kubeconfig on deployed KUD in the directory `~/.kube/config` and this file can be easily copied e.g. via SCP. Please ensure that kubeconfig contains external IP of K8s cluster in kubeconfig and correct it, if it's not.
+
+SO database needs to be (manually) modified for SO to know that this particular cloud region is to be handled by multicloud. Values we insert needs to obviously match to the ones we populated into AAI.
+
+.. note:: Please change credentials respectively to your installation. The required credentials can be retrieved with instruction `Retrieving logins and passwords of ONAP components`_
+
+::
+
+ kubectl -n onap exec onap-mariadb-galera-0 -it -- mysql -uroot -psecretpassword -D catalogdb
+ select * from cloud_sites;
+ insert into cloud_sites(ID, REGION_ID, IDENTITY_SERVICE_ID, CLOUD_VERSION, CLLI, ORCHESTRATOR) values("k8sregionfour", "k8sregionfour", "DEFAULT_KEYSTONE", "2.5", "clli2", "multicloud");
+ select * from cloud_sites;
+ exit
+
+.. note:: The configuration of the new k8s cloud site is documented also here `K8s cloud site config`_
+
+**<AUTOMATED>**
+
+Please copy the kubeconfig file of existing KUD cluster to automation/artifacts/cluster_kubeconfig location `Automation Environment Setup`_ - step **3**. You can find that kubeconfig on deployed KUD in the directory `~/.kube/config` and this file can be easily copied e.g. via SCP. Please ensure that kubeconfig contains external IP of K8s cluster in kubeconfig and correct it, if it's not.
+
+::
+
+ python create_cloud_regions.py
+
+PART 3 - Execution of the Use Case
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+This part contains all the steps to run the use case by using ONAP GUIs, Postman or Python automation scripts.
+
+3-1 CNF Orchestration Paths in ONAP
+...................................
+
+Following picture describe the overall sequential flow of the use case for **Native Helm** path (with CNF Adapter)
+
+Native Helm CNF Orchestration
+.............................
+
+Introduced in the Guilin release CNF orchestration method brings native distribution of Helm packages from SDC and native orchestration of CNFs (Helm packages) with SO. SO leverages CNF adapter to interact with K8sPlugin that takes resposnibility for the communication with k8s clusters. Heat templates are not required in the SDC onboarding package and, thanks to the fact that SO knows about Helm package orchestration, synchronization of data between k8s clusters and AAI is possible. Only in this path, since Istanbul release, k8s-resource object is created in relation to tenant, vf-module and generic-vnf objects in AAI. SO CNF adapter is resposobile for synchronization of data between AAI and k8s cluster, however currently it happens only once - after creation of CNF by SO, so any further changes (like new pods) will not be synchronized into AAI.
+
+.. figure:: files/vFW_CNF_CDS/Native_Helm_Flow.png
+ :align: center
+
+ vFW CNF CDS Use Case sequence flow for *Native Helm* (Guilin+) path.
+
+
+Kubernetes and Helm Compatibility
+.................................
+
+K8sPlugin, in the Istanbul release (0.9.x), supports Helm packages that can be validated by Helm 3.5 application. It means that new Helm fetures introduced after Helm 3.5 version are not supported currently. Moreover, since Jakarta release K8sPlugin 0.10.x implementation supports upgrade operation but CNF Upgrade orchestration workflow is not yet fully supported in SO orchestration workflows. In consequence, new service moel cna e distributed with new Helm package over SDC but the Helm upgrade procedure must be performed by direct call to k8sPlugin. The request payload is almost the same liek for Isnatce create but release-name comes for the already created instance.
+
+::
+
+ curl -i -X POST http://${K8S_NODE_IP}:30280/api/multicloud-k8s/v1/v1/instance/{rb-instance-id}/upgrade
+
+K8sPlugin utilizes also v0.19.4 version of K8s client and its compatibility matrix with k8s clusters can be found here `K8s Client Compatibility`_, Compatibility Matrix section.
+
+3-2 Onboarding
+..............
+
+.. note:: Make sure you have performed `Automation Environment Setup`_ steps before following actions here.
+
+Creating Onboarding Package
++++++++++++++++++++++++++++
+
+Content of the onboarding package can be created with provided Makefile in the *template* folder.
+
+Complete content of both Onboarding Packages for **Dummy Heat** and **Native Helm** is packaged to the following VSP onboarding package files:
+
+- **Dummy Heat** path: **vfw_k8s_demo.zip**
+
+- **Native Helm** path: **native_vfw_k8s_demo.zip**
+
+.. note:: Procedure requires *make* and *helm* applications installed
+
+::
+
+ git clone --single-branch --branch jakarta "https://gerrit.onap.org/r/demo"
+ cd demo/heat/vFW_CNF_CDS/templates
+ make
+
+The result of make operation execution is following:
+::
+
+ make clean
+ make[1]: Entering directory '/mnt/c/Users/advnet/Desktop/SOURCES/demo/heat/vFW_CNF_CDS/templates'
+ rm -rf package_dummy/
+ rm -rf package_native/
+ rm -rf cba_dummy
+ rm -f vfw_k8s_demo.zip
+ rm -f native_vfw_k8s_demo.zip
+ make[1]: Leaving directory '/mnt/c/Users/advnet/Desktop/SOURCES/demo/heat/vFW_CNF_CDS/templates'
+ make all
+ make[1]: Entering directory '/mnt/c/Users/advnet/Desktop/SOURCES/demo/heat/vFW_CNF_CDS/templates'
+ mkdir package_dummy/
+ mkdir package_native/
+ make -C helm
+ make[2]: Entering directory '/mnt/c/Users/advnet/Desktop/SOURCES/demo/heat/vFW_CNF_CDS/templates/helm'
+ rm -f base_template-*.tgz
+ rm -f helm_base_template.tgz
+ rm -f base_template_cloudtech_k8s_charts.tgz
+ helm package base_template
+ Successfully packaged chart and saved it to: /mnt/c/Users/advnet/Desktop/SOURCES/demo/heat/vFW_CNF_CDS/templates/helm/base_template-0.2.0.tgz
+ mv base_template-*.tgz helm_base_template.tgz
+ cp helm_base_template.tgz base_template_cloudtech_k8s_charts.tgz
+ rm -f vpkg-*.tgz
+ rm -f helm_vpkg.tgz
+ rm -f vpkg_cloudtech_k8s_charts.tgz
+ helm package vpkg
+ Successfully packaged chart and saved it to: /mnt/c/Users/advnet/Desktop/SOURCES/demo/heat/vFW_CNF_CDS/templates/helm/vpkg-0.2.0.tgz
+ mv vpkg-*.tgz helm_vpkg.tgz
+ cp helm_vpkg.tgz vpkg_cloudtech_k8s_charts.tgz
+ rm -f vfw-*.tgz
+ rm -f helm_vfw.tgz
+ rm -f vfw_cloudtech_k8s_charts.tgz
+ helm package vfw
+ Successfully packaged chart and saved it to: /mnt/c/Users/advnet/Desktop/SOURCES/demo/heat/vFW_CNF_CDS/templates/helm/vfw-0.2.0.tgz
+ mv vfw-*.tgz helm_vfw.tgz
+ cp helm_vfw.tgz vfw_cloudtech_k8s_charts.tgz
+ rm -f vsn-*.tgz
+ rm -f helm_vsn.tgz
+ rm -f vsn_cloudtech_k8s_charts.tgz
+ helm package vsn
+ Successfully packaged chart and saved it to: /mnt/c/Users/advnet/Desktop/SOURCES/demo/heat/vFW_CNF_CDS/templates/helm/vsn-0.2.0.tgz
+ mv vsn-*.tgz helm_vsn.tgz
+ cp helm_vsn.tgz vsn_cloudtech_k8s_charts.tgz
+ make[2]: Leaving directory '/mnt/c/Users/advnet/Desktop/SOURCES/demo/heat/vFW_CNF_CDS/templates/helm'
+ mv helm/helm_*.tgz package_native/
+ mv helm/*.tgz package_dummy/
+ cp base_dummy/* package_dummy/
+ cp base_native/* package_native/
+ cp -r cba cba_dummy
+ sed -i 's/"helm_/"/g' cba_dummy/Definitions/vFW_CNF_CDS.json
+ cd cba_dummy/ && zip -r CBA.zip . -x pom.xml .idea/\* target/\*
+ adding: Definitions/ (stored 0%)
+ adding: Definitions/artifact_types.json (deflated 69%)
+ adding: Definitions/data_types.json (deflated 88%)
+ adding: Definitions/node_types.json (deflated 90%)
+ adding: Definitions/policy_types.json (stored 0%)
+ adding: Definitions/relationship_types.json (stored 0%)
+ adding: Definitions/resources_definition_types.json (deflated 94%)
+ adding: Definitions/vFW_CNF_CDS.json (deflated 87%)
+ adding: Scripts/ (stored 0%)
+ adding: Scripts/kotlin/ (stored 0%)
+ adding: Scripts/kotlin/README.md (stored 0%)
+ adding: Templates/ (stored 0%)
+ adding: Templates/base_template-mapping.json (deflated 89%)
+ adding: Templates/base_template-template.vtl (deflated 87%)
+ adding: Templates/k8s-profiles/ (stored 0%)
+ adding: Templates/k8s-profiles/vfw-cnf-cds-base-profile.tar.gz (stored 0%)
+ adding: Templates/k8s-profiles/vfw-cnf-cds-vpkg-profile/ (stored 0%)
+ adding: Templates/k8s-profiles/vfw-cnf-cds-vpkg-profile/manifest.yaml (deflated 35%)
+ adding: Templates/k8s-profiles/vfw-cnf-cds-vpkg-profile/override_values.yaml (stored 0%)
+ adding: Templates/k8s-profiles/vfw-cnf-cds-vpkg-profile/ssh-service-mapping.json (deflated 51%)
+ adding: Templates/k8s-profiles/vfw-cnf-cds-vpkg-profile/ssh-service-template.yaml.vtl (deflated 56%)
+ adding: Templates/nf-params-mapping.json (deflated 88%)
+ adding: Templates/nf-params-template.vtl (deflated 44%)
+ adding: Templates/vfw-mapping.json (deflated 89%)
+ adding: Templates/vfw-template.vtl (deflated 87%)
+ adding: Templates/vnf-mapping.json (deflated 89%)
+ adding: Templates/vnf-template.vtl (deflated 93%)
+ adding: Templates/vpkg-mapping.json (deflated 89%)
+ adding: Templates/vpkg-template.vtl (deflated 87%)
+ adding: Templates/vsn-mapping.json (deflated 89%)
+ adding: Templates/vsn-template.vtl (deflated 87%)
+ adding: TOSCA-Metadata/ (stored 0%)
+ adding: TOSCA-Metadata/TOSCA.meta (deflated 37%)
+ cd cba/ && zip -r CBA.zip . -x pom.xml .idea/\* target/\*
+ adding: Definitions/ (stored 0%)
+ adding: Definitions/artifact_types.json (deflated 69%)
+ adding: Definitions/data_types.json (deflated 88%)
+ adding: Definitions/node_types.json (deflated 90%)
+ adding: Definitions/policy_types.json (stored 0%)
+ adding: Definitions/relationship_types.json (stored 0%)
+ adding: Definitions/resources_definition_types.json (deflated 94%)
+ adding: Definitions/vFW_CNF_CDS.json (deflated 87%)
+ adding: Scripts/ (stored 0%)
+ adding: Scripts/kotlin/ (stored 0%)
+ adding: Scripts/kotlin/README.md (stored 0%)
+ adding: Templates/ (stored 0%)
+ adding: Templates/base_template-mapping.json (deflated 89%)
+ adding: Templates/base_template-template.vtl (deflated 87%)
+ adding: Templates/k8s-profiles/ (stored 0%)
+ adding: Templates/k8s-profiles/vfw-cnf-cds-base-profile.tar.gz (stored 0%)
+ adding: Templates/k8s-profiles/vfw-cnf-cds-vpkg-profile/ (stored 0%)
+ adding: Templates/k8s-profiles/vfw-cnf-cds-vpkg-profile/manifest.yaml (deflated 35%)
+ adding: Templates/k8s-profiles/vfw-cnf-cds-vpkg-profile/override_values.yaml (stored 0%)
+ adding: Templates/k8s-profiles/vfw-cnf-cds-vpkg-profile/ssh-service-mapping.json (deflated 51%)
+ adding: Templates/k8s-profiles/vfw-cnf-cds-vpkg-profile/ssh-service-template.yaml.vtl (deflated 56%)
+ adding: Templates/nf-params-mapping.json (deflated 88%)
+ adding: Templates/nf-params-template.vtl (deflated 44%)
+ adding: Templates/vfw-mapping.json (deflated 89%)
+ adding: Templates/vfw-template.vtl (deflated 87%)
+ adding: Templates/vnf-mapping.json (deflated 89%)
+ adding: Templates/vnf-template.vtl (deflated 93%)
+ adding: Templates/vpkg-mapping.json (deflated 89%)
+ adding: Templates/vpkg-template.vtl (deflated 87%)
+ adding: Templates/vsn-mapping.json (deflated 89%)
+ adding: Templates/vsn-template.vtl (deflated 87%)
+ adding: TOSCA-Metadata/ (stored 0%)
+ adding: TOSCA-Metadata/TOSCA.meta (deflated 37%)
+ mv cba/CBA.zip package_native/
+ mv cba_dummy/CBA.zip package_dummy/
+ cd package_dummy/ && zip -r vfw_k8s_demo.zip .
+ adding: base_template.env (deflated 22%)
+ adding: base_template.yaml (deflated 59%)
+ adding: base_template_cloudtech_k8s_charts.tgz (stored 0%)
+ adding: CBA.zip (stored 0%)
+ adding: MANIFEST.json (deflated 84%)
+ adding: vfw.env (deflated 23%)
+ adding: vfw.yaml (deflated 60%)
+ adding: vfw_cloudtech_k8s_charts.tgz (stored 0%)
+ adding: vpkg.env (deflated 13%)
+ adding: vpkg.yaml (deflated 59%)
+ adding: vpkg_cloudtech_k8s_charts.tgz (stored 0%)
+ adding: vsn.env (deflated 15%)
+ adding: vsn.yaml (deflated 59%)
+ adding: vsn_cloudtech_k8s_charts.tgz (stored 0%)
+ cd package_native/ && zip -r native_vfw_k8s_demo.zip .
+ adding: CBA.zip (stored 0%)
+ adding: helm_base_template.tgz (stored 0%)
+ adding: helm_vfw.tgz (stored 0%)
+ adding: helm_vpkg.tgz (stored 0%)
+ adding: helm_vsn.tgz (stored 0%)
+ adding: MANIFEST.json (deflated 71%)
+ mv package_dummy/vfw_k8s_demo.zip .
+ mv package_native/native_vfw_k8s_demo.zip .
+ $
+
+Import this package into SDC and follow onboarding steps.
+
+Service Creation with SDC
++++++++++++++++++++++++++
+
+**<MANUAL>**
+
+Service Creation in SDC is composed of the same steps that are performed by most other use-cases. For reference, you can look at `vLB use-case`_
+
+Onboard VSP
+
+- Remember during VSP onboard to choose "Network Package" Onboarding procedure
+
+Create VF and Service
+Service -> Properties Assignment -> Choose VF (at right box):
+
+- sdnc_artifact_name - vnf
+- sdnc_model_name - vFW_CNF_CDS
+- sdnc_model_version - 8.0.0
+- skip_post_instantiation_configuration - True
+
+.. note:: Since Honolulu skip_post_instantiation_configuration flag can be set to *False* if we want to run config-assign/config-deploy operations.
+
+::
+
+ python onboarding.py
+
+Distribution Of Service
++++++++++++++++++++++++
+
+**<MANUAL>**
+
+Distribute service.
+
+Verify in SDC UI if distribution was successful. In case of any errors (sometimes SO fails on accepting CLOUD_TECHNOLOGY_SPECIFIC_ARTIFACT), try redistribution. You can also verify distribution for few components manually:
+
+- SDC:
+
+ SDC Catalog database should have our service now defined.
+
+ ::
+
+ Postman -> LCM -> [SDC] Catalog Service
+
+ ::
+
+ {
+ "uuid": "64dd38f3-2307-4e0a-bc98-5c2cbfb260b6",
+ "invariantUUID": "cd1a5c2d-2d4e-4d62-ac10-a5fe05e32a22",
+ "name": "vfw_cnf_cds_svc",
+ "version": "1.0",
+ "toscaModelURL": "/sdc/v1/catalog/services/64dd38f3-2307-4e0a-bc98-5c2cbfb260b6/toscaModel",
+ "category": "Network L4+",
+ "lifecycleState": "CERTIFIED",
+ "lastUpdaterUserId": "cs0008",
+ "distributionStatus": "DISTRIBUTED"
+ }
+
+ Listing should contain entry with our service name **vfw_cnf_cds_svc**.
+
+.. note:: Note that it's an example name, it depends on how your model is named during Service design in SDC and must be kept in sync with Postman variables.
+
+- SO:
+
+ SO Catalog database should have our service NFs defined now.
+
+ ::
+
+ Postman -> LCM -> [SO] Catalog DB Service xNFs
+
+ ::
+
+ {
+ "serviceVnfs": [
+ {
+ "modelInfo": {
+ "modelName": "VfVfwK8sDemoCnfMc202109231",
+ "modelUuid": "70edaca8-8c79-468a-aa76-8224cfe686d0",
+ "modelInvariantUuid": "7901fc89-a94d-434a-8454-1e27b99dc0e2",
+ "modelVersion": "1.0",
+ "modelCustomizationUuid": "86dc8af4-aa17-4fc7-9b20-f12160d99718",
+ "modelInstanceName": "vfw_cnf_cds_vsp 0"
+ },
+ "toscaNodeType": "org.openecomp.resource.vf.VfwCnfCdsVsp",
+ "nfFunction": null,
+ "nfType": null,
+ "nfRole": null,
+ "nfNamingCode": null,
+ "multiStageDesign": "false",
+ "vnfcInstGroupOrder": null,
+ "resourceInput": "TBD",
+ "vfModules": [
+ {
+ "modelInfo": {
+ "modelName": "VfVfwK8sDemoCnfMc202109231..helm_base_template..module-4",
+ "modelUuid": "a9f5d65f-20c3-485c-8cf9-eda9ea94300e",
+ "modelInvariantUuid": "7888f606-3ee8-4edb-b96d-467fead6ee4f",
+ "modelVersion": "1",
+ "modelCustomizationUuid": "b9faba47-d03d-4ba1-a117-4c19632b2136"
+ },
+ "isBase": false,
+ "vfModuleLabel": "base_template",
+ "initialCount": 1,
+ "hasVolumeGroup": false
+ },
+ {
+ "modelInfo": {
+ "modelName": "VfVfwK8sDemoCnfMc202109293..helm_vsn..module-1",
+ "modelUuid": "8e72ed23-4842-471a-ad83-6a4d285c48e1",
+ "modelInvariantUuid": "4f5a8a02-0dc6-4387-b86e-bd352f711e18",
+ "modelVersion": "1",
+ "modelCustomizationUuid": "ab5614d6-25c2-4863-bad3-93e354b4d5ba"
+ },
+ "isBase": false,
+ "vfModuleLabel": "vsn",
+ "initialCount": 0,
+ "hasVolumeGroup": false
+ },
+ {
+ "modelInfo": {
+ "modelName": "VfVfwK8sDemoCnfMc202109293..helm_vpkg..module-2",
+ "modelUuid": "64f9d622-a8c1-4992-ba35-abdc13f87660",
+ "modelInvariantUuid": "88d8d71a-30c9-4e00-a6b9-bd86bae7ed37",
+ "modelVersion": "1",
+ "modelCustomizationUuid": "37ab4199-19aa-4f63-9a11-d31b8c25ce46"
+ },
+ "isBase": false,
+ "vfModuleLabel": "vpkg",
+ "initialCount": 0,
+ "hasVolumeGroup": false
+ },
+ {
+ "modelInfo": {
+ "modelName": "VfVfwK8sDemoCnfMc202109293..helm_vfw..module-3",
+ "modelUuid": "f6f62096-d5cc-474e-82c7-655e7d6628b2",
+ "modelInvariantUuid": "6077ce70-3a1d-47e6-87a0-6aed6a29b089",
+ "modelVersion": "1",
+ "modelCustomizationUuid": "879cda5e-7af9-43d2-bd6c-50e330ab328e"
+ },
+ "isBase": false,
+ "vfModuleLabel": "vfw",
+ "initialCount": 0,
+ "hasVolumeGroup": false
+ }
+ ],
+ "groups": []
+ }
+ ]
+ }
+
+.. note:: For **Native Helm** path both modelName will have prefix *helm_* i.e. *helm_vfw* and vfModuleLabel will have *helm_* keyword inside i.e. *VfVfwK8sDemoCnfMc202109293..helm_vfw..module-3*
+
+- SDNC:
+
+ SDNC should have it's database updated with *sdnc_* properties that were set during service modeling.
+
+.. note:: Please change credentials respectively to your installation. The required credentials can be retrieved with instruction `Retrieving logins and passwords of ONAP components`_
+
+
+::
+
+ kubectl -n onap exec onap-mariadb-galera-0 -it -- sh
+ mysql -uroot -psecretpassword -D sdnctl
+ MariaDB [sdnctl]> select sdnc_model_name, sdnc_model_version, sdnc_artifact_name from VF_MODEL WHERE customization_uuid = '86dc8af4-aa17-4fc7-9b20-f12160d99718';
+ +-----------------+--------------------+--------------------+
+ | sdnc_model_name | sdnc_model_version | sdnc_artifact_name |
+ +-----------------+--------------------+--------------------+
+ | vFW_CNF_CDS | 8.0.0 | vnf |
+ +-----------------+--------------------+--------------------+
+ 1 row in set (0.00 sec)
+
+
+.. note:: customization_uuid value is the modelCustomizationUuid of the VNF (serviceVnfs response in 2nd Postman call from SO Catalog DB)
+
+- CDS:
+
+ CDS should onboard CBA uploaded as part of VF.
+
+ ::
+
+ Postman -> Distribution Verification -> [CDS] List CBAs
+
+ ::
+
+ [
+ {
+ "blueprintModel": {
+ "id": "c505e516-b35d-4181-b1e2-bcba361cfd0a",
+ "artifactUUId": null,
+ "artifactType": "SDNC_MODEL",
+ "artifactVersion": "8.0.0",
+ "artifactDescription": "Controller Blueprint for vFW_CNF_CDS:8.0.0",
+ "internalVersion": null,
+ "createdDate": "2020-05-29T06:02:20.000Z",
+ "artifactName": "vFW_CNF_CDS",
+ "published": "N",
+ "updatedBy": "Samuli Silvius <s.silvius@partner.samsung.com>",
+ "tags": "Samuli Silvius, Lukasz Rajewski, vFW_CNF_CDS"
+ }
+ }
+ ]
+
+ The list should have the matching entries with SDNC database:
+
+ - sdnc_model_name == artifactName
+ - sdnc_model_version == artifactVersion
+
+ You can also use Postman to download CBA for further verification but it's fully optional.
+
+ ::
+
+ Postman -> Distribution Verification -> [CDS] CBA Download
+
+- K8splugin:
+
+ K8splugin should onboard 4 resource bundles related to helm resources:
+
+ ::
+
+ Postman -> Distribution Verification -> [K8splugin] List Resource Bundle Definitions
+
+ ::
+
+ [
+ {
+ "rb-name": "a9f5d65f-20c3-485c-8cf9-eda9ea94300e",
+ "rb-version": "b9faba47-d03d-4ba1-a117-4c19632b2136",
+ "chart-name": "base_template",
+ "description": "",
+ "labels": {
+ "vf_module_model_name": "VfVfwK8sDemoCnfMc202109231..helm_base_template..module-4",
+ "vf_module_model_uuid": "7888f606-3ee8-4edb-b96d-467fead6ee4f"
+ }
+ },
+ {
+ "rb-name": "f6f62096-d5cc-474e-82c7-655e7d6628b2",
+ "rb-version": "879cda5e-7af9-43d2-bd6c-50e330ab328e",
+ "chart-name": "vfw",
+ "description": "",
+ "labels": {
+ "vf_module_model_name": "VfVfwK8sDemoCnfMc202109293..helm_vfw..module-3",
+ "vf_module_model_uuid": "6077ce70-3a1d-47e6-87a0-6aed6a29b089"
+ }
+ },
+ {
+ "rb-name": "8e72ed23-4842-471a-ad83-6a4d285c48e1",
+ "rb-version": "ab5614d6-25c2-4863-bad3-93e354b4d5ba",
+ "chart-name": "vsn",
+ "description": "",
+ "labels": {
+ "vf_module_model_name": "VfVfwK8sDemoCnfMc202109293..helm_vsn..module-1",
+ "vf_module_model_uuid": "4f5a8a02-0dc6-4387-b86e-bd352f711e18"
+ }
+ },
+ {
+ "rb-name": "64f9d622-a8c1-4992-ba35-abdc13f87660",
+ "rb-version": "37ab4199-19aa-4f63-9a11-d31b8c25ce46",
+ "chart-name": "vpkg",
+ "description": "",
+ "labels": {
+ "vf_module_model_name": "VfVfwK8sDemoCnfMc202109293..helm_vpkg..module-2",
+ "vf_module_model_uuid": "88d8d71a-30c9-4e00-a6b9-bd86bae7ed37"
+ }
+ }
+ ]
+
+**<AUTOMATED>**
+
+Distribution is a part of the onboarding step and at this stage is performed
+
+3-3 CNF Instantiation
+.....................
+
+This is the whole beef of the use case and furthermore the core of it is that we can instantiate any amount of instances of the same CNF each running and working completely of their own. Very basic functionality in VM (VNF) side but for Kubernetes and ONAP integration this is the first milestone towards other normal use cases familiar for VNFs.
+
+**<MANUAL>**
+
+Postman collection is automated to populate needed parameters when queries are run in correct order. If you did not already run following 2 queries after distribution (to verify distribution), run those now:
+
+::
+
+ Postman -> LCM -> 1.[SDC] Catalog Service
+
+::
+
+ Postman -> LCM -> 2. [SO] Catalog DB Service xNFs
+
+Now actual instantiation can be triggered with:
+
+::
+
+ Postman -> LCM -> 3. [SO] Self-Serve Service Assign & Activate
+
+**<AUTOMATED>**
+
+Required inputs for instantiation process are taken from the *config.py* file.
+::
+
+ python instantiation.py
+
+
+Finally, to follow the progress of instantiation request with SO's GET request:
+
+**<MANUAL>**
+
+::
+
+ Postman -> LCM -> 4. [SO] Infra Active Requests
+
+The successful reply payload in that query should start like this:
+
+::
+
+ {
+ "requestStatus": "COMPLETE",
+ "statusMessage": "Macro-Service-createInstance request was executed correctly.",
+ "flowStatus": "Successfully completed all Building Blocks",
+ "progress": 100,
+ "startTime": 1590996766000,
+ "endTime": 1590996945000,
+ "source": "Postman",
+ "vnfId": "93b3350d-ed6f-413b-9cc5-a158c1676eb0",
+ "tenantId": "aaaa",
+ "requestBody": "**REDACTED FOR READABILITY**",
+ "lastModifiedBy": "CamundaBPMN",
+ "modifyTime": "2020-06-01T07:35:45.000+0000",
+ "cloudRegion": "k8sregionfour",
+ "serviceInstanceId": "8ead0480-cf44-428e-a4c2-0e6ed10f7a72",
+ "serviceInstanceName": "vfw-cnf-16",
+ "requestScope": "service",
+ "requestAction": "createInstance",
+ "requestorId": "11c2ddb7-4659-4bf0-a685-a08dcbb5a099",
+ "requestUrl": "http://infra:30277/onap/so/infra/serviceInstantiation/v7/serviceInstances",
+ "tenantName": "k8stenant",
+ "cloudApiRequests": [],
+ "requestURI": "6a369c8e-d492-4ab5-a107-46804eeb7873",
+ "_links": {
+ "self": {
+ "href": "http://infra:30277/infraActiveRequests/6a369c8e-d492-4ab5-a107-46804eeb7873"
+ },
+ "infraActiveRequests": {
+ "href": "http://infra:30277/infraActiveRequests/6a369c8e-d492-4ab5-a107-46804eeb7873"
+ }
+ }
+ }
+
+
+Progress can be also followed also with `SO Monitoring`_ dashboard.
+
+Service Instance Termination
+++++++++++++++++++++++++++++
+
+Service instance can be terminated with the following postman call:
+
+**<MANUAL>**
+::
+
+ Postman -> LCM -> 5. [SO] Service Delete
+
+**<AUTOMATED>**
+::
+
+ python delete.py
+
+.. note:: Automated service deletion mecvhanism takes information about the instantiated service instance from the *config.py* file and *SERVICE_INSTANCE_NAME* variable. If you modify this value before the deletion of existing service instance then you will loose opportunity to easy delete already created service instance.
+
+Second Service Instance Instantiation
++++++++++++++++++++++++++++++++++++++
+
+To finally verify that all the work done within this demo, it should be possible to instantiate second vFW instance successfully.
+
+Trigger new instance createion. You can use previous call or a separate one that will utilize profile templating mechanism implemented in CBA:
+
+**<MANUAL>**
+::
+
+ Postman -> LCM -> 6. [SO] Self-Serve Service Assign & Activate - Second
+
+**<AUTOMATED>**
+
+Before second instance of service is created you need to modify *config.py* file changing the *SERVICENAME* and *SERVICE_INSTANCE_NAME* to different values and by changing the value or *k8s-rb-profile-name* parameter for *vpg* module from value *default* or *vfw-cnf-cds-base-profile* to *vfw-cnf-cds-vpkg-profile* what will result with instantiation of additional ssh service for *vpg* module. Second onboarding in automated case is required due to the existing limitations of *python-sdk* librarier that create vf-module instance name base on the vf-module model name. For manual Postman option vf-module instance name is set on service instance name basis what makes it unique.
+::
+
+ python onboarding.py
+ python instantiation.py
+
+3-4 Results and Logs
+....................
+
+Now multiple instances of Kubernetes variant of vFW are running in target VIM (KUD deployment).
+
+.. figure:: files/vFW_CNF_CDS/vFW_Instance_In_Kubernetes.png
+ :align: center
+
+ vFW Instance In Kubernetes
+
+**<MANUAL>**
+
+To review situation after instantiation from different ONAP components, most of the info can be found using Postman queries provided. For each query, example response payload(s) is/are saved and can be found from top right corner of the Postman window.
+
+::
+
+ Postman -> Instantiation verification**
+
+Execute example Postman queries and check example section to see the valid results.
+
+========================== =================
+Verify Target Postman query
+-------------------------- -----------------
+Service Instances in AAI **Postman -> Instantiation verification -> [AAI] List Service Instances**
+Service Instances in MDSAL **Postman -> Instantiation verification -> [SDNC] GR-API MD-SAL Services**
+K8S Instances in KUD **Postman -> Instantiation verification -> [K8splugin] List Instances**
+========================== =================
+
+.. note:: "[AAI] List vServers <Empty>" Request won't return any vserver info from AAI, as currently such information are not provided during instantiation process.
+
+
+Query also directly from VIM:
+
+::
+
+ #
+ ubuntu@kud-host:~$ kubectl get pods,svc,networks,cm,network-attachment-definition,deployments
+ NAME READY STATUS RESTARTS AGE
+ pod/vfw-17f6f7d3-8424-4550-a188-cd777f0ab48f-7cfb9949d9-8b5vg 1/1 Running 0 22s
+ pod/vfw-19571429-4af4-49b3-af65-2eb1f97bba43-75cd7c6f76-4gqtz 1/1 Running 0 11m
+ pod/vpg-5ea0d3b0-9a0c-4e88-a2e2-ceb84810259e-f4485d485-pln8m 1/1 Running 0 11m
+ pod/vpg-8581bc79-8eef-487e-8ed1-a18c0d638b26-6f8cff54d-dvw4j 1/1 Running 0 32s
+ pod/vsn-8e7ac4fc-2c31-4cf8-90c8-5074c5891c14-5879c56fd-q59l7 2/2 Running 0 11m
+ pod/vsn-fdc9b4ba-c0e9-4efc-8009-f9414ae7dd7b-5889b7455-96j9d 2/2 Running 0 30s
+
+ NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
+ service/vpg-5ea0d3b0-9a0c-4e88-a2e2-ceb84810259e-management-api NodePort 10.244.43.245 <none> 2831:30831/TCP 11m
+ service/vpg-8581bc79-8eef-487e-8ed1-a18c0d638b26-management-api NodePort 10.244.1.45 <none> 2831:31831/TCP 33s
+ service/vsn-8e7ac4fc-2c31-4cf8-90c8-5074c5891c14-darkstat-ui NodePort 10.244.16.187 <none> 667:30667/TCP 11m
+ service/vsn-fdc9b4ba-c0e9-4efc-8009-f9414ae7dd7b-darkstat-ui NodePort 10.244.20.229 <none> 667:31667/TCP 30s
+
+ NAME AGE
+ network.k8s.plugin.opnfv.org/55118b80-8470-4c99-bfdf-d122cd412739-management-network 40s
+ network.k8s.plugin.opnfv.org/55118b80-8470-4c99-bfdf-d122cd412739-protected-network 40s
+ network.k8s.plugin.opnfv.org/55118b80-8470-4c99-bfdf-d122cd412739-unprotected-network 40s
+ network.k8s.plugin.opnfv.org/567cecc3-9692-449e-877a-ff0b560736be-management-network 11m
+ network.k8s.plugin.opnfv.org/567cecc3-9692-449e-877a-ff0b560736be-protected-network 11m
+ network.k8s.plugin.opnfv.org/567cecc3-9692-449e-877a-ff0b560736be-unprotected-network 11m
+
+ NAME DATA AGE
+ configmap/vfw-17f6f7d3-8424-4550-a188-cd777f0ab48f-configmap 6 22s
+ configmap/vfw-19571429-4af4-49b3-af65-2eb1f97bba43-configmap 6 11m
+ configmap/vpg-5ea0d3b0-9a0c-4e88-a2e2-ceb84810259e-configmap 6 11m
+ configmap/vpg-8581bc79-8eef-487e-8ed1-a18c0d638b26-configmap 6 33s
+ configmap/vsn-8e7ac4fc-2c31-4cf8-90c8-5074c5891c14-configmap 2 11m
+ configmap/vsn-fdc9b4ba-c0e9-4efc-8009-f9414ae7dd7b-configmap 2 30s
+
+ NAME AGE
+ networkattachmentdefinition.k8s.cni.cncf.io/55118b80-8470-4c99-bfdf-d122cd412739-ovn-nat 40s
+ networkattachmentdefinition.k8s.cni.cncf.io/567cecc3-9692-449e-877a-ff0b560736be-ovn-nat 11m
+
+ NAME READY UP-TO-DATE AVAILABLE AGE
+ deployment.extensions/vfw-17f6f7d3-8424-4550-a188-cd777f0ab48f 1/1 1 1 22s
+ deployment.extensions/vfw-19571429-4af4-49b3-af65-2eb1f97bba43 1/1 1 1 11m
+ deployment.extensions/vpg-5ea0d3b0-9a0c-4e88-a2e2-ceb84810259e 1/1 1 1 11m
+ deployment.extensions/vpg-8581bc79-8eef-487e-8ed1-a18c0d638b26 1/1 1 1 33s
+ deployment.extensions/vsn-8e7ac4fc-2c31-4cf8-90c8-5074c5891c14 1/1 1 1 11m
+ deployment.extensions/vsn-fdc9b4ba-c0e9-4efc-8009-f9414ae7dd7b 1/1 1 1 30s
+
+
+Component Logs From The Execution
++++++++++++++++++++++++++++++++++
+
+**<MANUAL>**
+
+All logs from the use case execution can be retrieved with following
+
+::
+
+ kubectl -n onap logs `kubectl -n onap get pods -o go-template --template '{{range .items}}{{.metadata.name}}{{"\n"}}{{end}}' | grep -m1 <COMPONENT_NAME>` -c <CONTAINER>
+
+where <COMPONENT_NAME> and <CONTAINER> should be replaced with following keywords respectively:
+
+- so-bpmn-infra, so-bpmn-infra
+- so-openstack-adapter, so-openstack-adapter
+- so-cnf-adapter, so-cnf-adapter
+- sdnc-0, sdnc
+
+ From karaf.log all requests (payloads) to CDS can be found by searching following string:
+
+ ``'Sending request below to url http://cds-blueprints-processor-http:8080/api/v1/execution-service/process'``
+
+- cds-blueprints-processor, cds-blueprints-processor
+- multicloud-k8s, multicloud-k8s
+- network-name-gen, network-name-gen,
+
+**Debug log**
+
+In case more detailed logging is needed, here's instructions how to setup DEBUG logging for few components.
+
+- SDNC
+
+ ::
+
+ kubectl -n onap exec -it onap-sdnc-0 -c sdnc /opt/opendaylight/bin/client log:set DEBUG
+
+
+- CDS Blueprint Processor
+
+ ::
+
+ # Edit configmap
+ kubectl -n onap edit configmap onap-cds-blueprints-processor-configmap
+
+ # Edit logback.xml content change root logger level from info to debug.
+ <root level="debug">
+ <appender-ref ref="STDOUT"/>
+ </root>
+
+ # Delete the Pods to make changes effective
+ kubectl -n onap delete pods -l app=cds-blueprints-processor
+
+3-5 Verification of the CNF Status
+..................................
+
+**<MANUAL>**
+
+The Guilin introduced new API for verification of the status of instantiated resources in k8s cluster. The API gives result similar to *kubectl describe* operation for all the resources created for particular *rb-definition*. Status API can be used to verify the k8s resources after instantiation but also can be used leveraged for synchronization of the information with external components, like AAI. To use Status API call
+
+::
+
+ curl -i http://${K8S_NODE_IP}:30280/api/multicloud-k8s/v1/v1/instance/{rb-instance-id}/status
+
+where {rb-instance-id} can be taken from the list of instances resolved the following call or from AAI *heat-stack-id* property of created *vf-module* associated with each Helm package from onboarded VSP which holds the *rb-instance-id* value.
+
+The same API can be accessed over cnf-adapter endpoint (ClusterIP):
+
+::
+
+ curl -i http://${K8S_NODE_IP}:30280/api/multicloud-k8s/v1/v1/instance/{rb-instance-id}/status
+
+The similar to Status API is Query API, avaialble since Honolulu, that allows to fetch specific resources that belong to the created instance. The Query API allows to filter resources by Name, Kind, APiVersion, Namespace and Labels. The k8splugin endpoint is:
+
+::
+
+ curl -i http://${K8S_NODE_IP}:30280/api/multicloud-k8s/v1/v1/instance/{rb-instance-id}/query?ApiVersion=v1&Kind=Deployment&Name=vfw-1-vfw&Namespace=vfirewall
+
+and cnf-adapter endpoint is:
+
+::
+
+ curl -i http://${K8S_NODE_IP}:8090/api/cnf-adapter/v1/instance/{rb-instance-id}/query?ApiVersion=v1&Kind=Deployment&Name=vfw-1-vfw&Namespace=vfirewall
+
+
+Examplary output of Status API is shown below (full result of test vFW CNF helm package in the attached file). It shows the list of GVK resources created for requested *rb-instance* (Helm and vf-module in the same time) with assocated describe result for all of them.
+
+ :download:`Full Status API Result <files/vFW_CNF_CDS/status-response.json>`
+
+::
+
+ {
+ "request": {
+ "rb-name": "vfw",
+ "rb-version": "plugin_test",
+ "profile-name": "test_profile",
+ "release-name": "",
+ "cloud-region": "kud",
+ "labels": {
+ "testCaseName": "plugin_fw.sh"
+ },
+ "override-values": {
+ "global.onapPrivateNetworkName": "onap-private-net-test"
+ }
+ },
+ "ready": true,
+ "resourceCount": 1,
+ "resourcesStatus": [
+ {
+ "name": "sink-configmap",
+ "GVK": {
+ "Group": "",
+ "Version": "v1",
+ "Kind": "ConfigMap"
+ },
+ "status": {
+ "apiVersion": "v1",
+ "data": {
+ "protected_net_gw": "192.168.20.100",
+ "protected_private_net_cidr": "192.168.10.0/24"
+ },
+ "kind": "ConfigMap",
+ "metadata": {
+ "creationTimestamp": "2020-09-29T13:36:25Z",
+ "labels": {
+ "k8splugin.io/rb-instance-id": "practical_nobel"
+ },
+ "name": "sink-configmap",
+ "namespace": "plugin-tests-namespace",
+ "resourceVersion": "10720771",
+ "selfLink": "/api/v1/namespaces/plugin-tests-namespace/configmaps/sink-configmap",
+ "uid": "46c8bec4-980c-455b-9eb0-fb84ac8cc450"
+ }
+ }
+ }
+ ]
+ }
+
+**<AUTOMATED>**
+
+Since Honolulu release vFW CNF Use Case is equipped with dedicated mechanisms for verification of the CNF status automatically, during the instantiation. The process utilizes the k8sPlugin Status and Healthcheck APIs that both are natively exposed in the CDS and can be executed from the script execution functionality in the CDS.
+
+.. figure:: files/vFW_CNF_CDS/healthcheck.png
+ :scale: 60 %
+ :align: center
+
+ vFW CNF Healthcheck flow concept
+
+There is exposed a dedicated workflow in CBA, where Status API result verification is run with *status-verification-script* step and execution of the healthcheck job is run with *health-check-process*. The first one verifies if all pods have *Running* state. If yes, then verification of the health is started by execution of the dedicated Helm tests which are a jobs that verify connectivity in each component.
+
+::
+
+ "health-check": {
+ "steps": {
+ "config-setup": {
+ "description": "Gather necessary input for config init and status verification",
+ "target": "config-setup-process",
+ "activities": [
+ {
+ "call_operation": "ResourceResolutionComponent.process"
+ }
+ ],
+ "on_success": [
+ "config-apply"
+ ],
+ "on_failure": [
+ "handle_error"
+ ]
+ },
+ "status-verification-script": {
+ "description": "Simple status verification script",
+ "target": "simple-status-check",
+ "activities": [
+ {
+ "call_operation": "ComponentScriptExecutor.process"
+ }
+ ],
+ "on_success": [
+ "health-check-process"
+ ],
+ "on_failure": [
+ "handle_error"
+ ]
+ },
+ "health-check-process": {
+ "description": "Start health check script",
+ "target": "health-check-script",
+ "activities": [
+ {
+ "call_operation": "ComponentScriptExecutor.process"
+ }
+ ],
+ "on_success": [
+ "collect-results"
+ ],
+ "on_failure": [
+ "handle_error"
+ ]
+ },
+ "handle_error": {
+ "description": "Simple error verification script",
+ "target": "simple-error-check",
+ "activities": [
+ {
+ "call_operation": "ComponentScriptExecutor.process"
+ }
+ ],
+ "on_success": [
+ "collect-results"
+ ]
+ },
+ "collect-results": {
+ "description": "Final collection of results",
+ "target": "collect-results"
+ }
+ },
+
+
+Since Istanbul release, SO is equipped with dedicated workflow for verification of the CNF status. It works similarly to the workflow introduced in Honolulu, however basic CNF Status Verification step utilizes "Ready" flag of the StatusAPI response to check if k8s resources created from Helm package are up and running. Ready flag works properly in k8splugin 0.9.1 or higher. Both operations are performed by ControllerExecutionBB in SO and are realized by cnf-adapter component in SO. This workflow can be triggered by a dedicated endpoint documented here: `CNF Health Check`_. This workflow is not yet integrated into automation scripts.
+
+3-6 Synchronization of created k8s resources into AAI
+.....................................................
+
+Since Istanbul release `AAI v24 schema`_ version is used to store basic information about k8s resources deployed from each helm package. The AAI change is described in `K8s Resource in AAI`_. The information stored in AAI lets to identify all the deployed k8s resoureces but the details about them have to be fetched from the k8s cluster on demand. Such design is motivated by high frequency of k8s resource status change and the plethora of resource types avaialble in k8s - including the CRDs that extend the predefined resource types available in k8s. In consequence, there was no sense to store in AAI full runtime picture of the k8s resources as the synchronization of them would be impossible.
+
+K8s-Resource object is stored in the cloud-infrastructure set of AAI APIs and it belongs to the tenant, and is related with both generic-vnf and vf-module. Each k8s-resource object created in AAI has selflink for cnf-adapter Query API, described in `3-5 Verification of the CNF Status`_, that allows to fetch actual information about the resource in k8s. The examplary set of k8s-resources with related generic-vnf and vf-modules for vFW CNF use case is in the files attached below.
+
+ :download:`List of K8s Resources <files/vFW_CNF_CDS/k8s-resources-response.json>`
+
+ :download:`Generic VNF with modules <files/vFW_CNF_CDS/vfw-generic-vnf-aai.json>`
+
+ :download:`vPKG VF-Module with related k8s-resource relations <files/vFW_CNF_CDS/vpkg-vf-module-aai.json>`
+
+AAI synchronization is run just after creation of the vf-module by SO. Since Jakarta release, cnf-adapter synchronizes into AAI information about any change on k8s resources performed after their initial creation. For instance, if pod is deleted in k8s cluster, the new one is automatically created. In consequence, K8sPlugin sends notification about the change to cnf-adapter, and the latter one performs update of the information in AAI by removing the old pod and creating the new one in AAI. The update in AAI, after the change in k8s cluster, should by applied with no more than 30s delay.
+
+In order to force an imidiate update of AAI information about the concrete Helm package, the following API can be also used with properly modified body (all except the callbackUrl).
+
+::
+
+ curl -i -X POST http://${K8S_NODE_IP}:8090/api/cnf-adapter/v1/aai-update
+
+
+::
+
+ {
+ "instanceId": "keen_darwin",
+ "cloudRegion": "kud",
+ "cloudOwner": "K8sCloudOwner",
+ "tenantId": "dca807fa-0d3e-4fb1-85eb-b9e1c03108a3",
+ "callbackUrl": "http://example",
+ "genericVnfId": "8b3af2e0-fd66-460d-b928-22f5dac517a6",
+ "vfModuleId": "a0161551-9d13-47c2-ba4f-896d4ee401d4"
+ }
+
+
+PART 4 - Future improvements needed
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Future development areas for this use case:
+
+- Include Closed Loop part of the vFW CNF demo.
+- vFW service with Openstack VNF (KUD) and Kubernetes CNF
+
+Future development areas for CNF support:
+
+- Extraction of override values in time of the package onboarding
+- Upgrade of the vFW CNF similar to Helm Upgrade through the SDC and SO
+- Use multicloud/k8S API v2 (EMCO)
+
+Some of the features from the list above are covered by the Jakarta roadmap described in `REQ-890`_.
+
+
+.. _ONAP Deployment Guide: https://docs.onap.org/projects/onap-oom/en/latest/sections/guides/deployment_guides/oom_deployment.html
+.. _CDS Documentation: https://docs.onap.org/projects/onap-ccsdk-cds/en/latest/
+.. _vLB use-case: https://wiki.onap.org/pages/viewpage.action?pageId=71838898
+.. _vFW_CNF_CDS Model: https://git.onap.org/demo/tree/heat/vFW_CNF_CDS/templates?h=guilin
+.. _vFW_CNF_CDS Automation: https://git.onap.org/demo/tree/heat/vFW_CNF_CDS/automation?h=guilin
+.. _vFW CDS Dublin: https://wiki.onap.org/display/DW/vFW+CDS+Dublin
+.. _vFW CBA Model: https://git.onap.org/ccsdk/cds/tree/components/model-catalog/blueprint-model/service-blueprint/vFW?h=elalto
+.. _vFW_Helm Model: https://git.onap.org/multicloud/k8s/tree/kud/demo/firewall?h=elalto
+.. _vFW_NextGen: https://git.onap.org/demo/tree/heat/vFW_NextGen?h=elalto
+.. _vFW EDGEX K8S: https://docs.onap.org/projects/onap-integration/en/latest/docs_vfw_edgex_k8s.html
+.. _vFW EDGEX K8S In ONAP Wiki: https://wiki.onap.org/display/DW/Deploying+vFw+and+EdgeXFoundry+Services+on+Kubernets+Cluster+with+ONAP
+.. _KUD github: https://github.com/onap/multicloud-k8s/tree/honolulu/kud/hosting_providers/baremetal
+.. _KUD in Wiki: https://wiki.onap.org/display/DW/Kubernetes+Baremetal+deployment+setup+instructions
+.. _Multicloud k8s gerrit: https://gerrit.onap.org/r/q/status:open+project:+multicloud/k8s
+.. _KUD subproject in github: https://github.com/onap/multicloud-k8s/tree/honolulu/kud
+.. _KUD Interface Permission: https://jira.onap.org/browse/MULTICLOUD-1310
+.. _Frankfurt CBA Definition: https://git.onap.org/demo/tree/heat/vFW_CNF_CDS/templates/cba/Definitions/vFW_CNF_CDS.json?h=frankfurt
+.. _Frankfurt CBA Script: https://git.onap.org/demo/tree/heat/vFW_CNF_CDS/templates/cba/Scripts/kotlin/KotlinK8sProfileUpload.kt?h=frankfurt
+.. _SO-3403: https://jira.onap.org/browse/SO-3403
+.. _SO-3404: https://jira.onap.org/browse/SO-3404
+.. _REQ-182: https://jira.onap.org/browse/REQ-182
+.. _REQ-341: https://jira.onap.org/browse/REQ-341
+.. _REQ-458: https://jira.onap.org/browse/REQ-458
+.. _REQ-627: https://jira.onap.org/browse/REQ-627
+.. _REQ-890: https://jira.onap.org/browse/REQ-890
+.. _Python SDK: https://docs.onap.org/projects/onap-integration/en/latest/integration-tooling.html#python-onap-sdk
+.. _KUD Jenkins ci/cd verification: https://jenkins.onap.org/job/multicloud-k8s-master-kud-deployment-verify-shell/
+.. _K8s cloud site config: https://docs.onap.org/en/latest/guides/onap-operator/cloud_site/k8s/index.html
+.. _SO Monitoring: https://docs.onap.org/projects/onap-so/en/latest/developer_info/Working_with_so_monitoring.html
+.. _Data Dictionary: https://git.onap.org/demo/tree/heat/vFW_CNF_CDS/templates/cba-dd.json?h=guilin
+.. _Helm Healer: https://git.onap.org/oom/offline-installer/tree/tools/helm-healer.sh?h=frankfurt
+.. _infra_workload: https://docs.onap.org/projects/onap-multicloud-framework/en/latest/specs/multicloud_infra_workload.html?highlight=multicloud
+.. _K8s Client Compatibility: https://github.com/kubernetes/client-go
+.. _CNF Health Check: https://docs.onap.org/projects/onap-so/en/latest/api/apis/serviceInstances-api.html#healthcheck
+.. _K8s Resource in AAI: https://jira.onap.org/browse/ONAPMODEL-37
+.. _AAI v24 schema: https://nexus.onap.org/service/local/repositories/releases/archive/org/onap/aai/schema-service/aai-schema/1.9.2/aai-schema-1.9.2.jar/!/onap/aai_swagger_html/aai_swagger_v24.html
diff --git a/docs/docs_vfw.rst b/docs/docs_vfw.rst
index b9ed9adb7..1fdb2aaa7 100644
--- a/docs/docs_vfw.rst
+++ b/docs/docs_vfw.rst
@@ -1,5 +1,7 @@
.. _docs_vfw:
+:orphan:
+
vFirewall Use Case
------------------
@@ -28,7 +30,7 @@ These VFs run in three separate VMs. The packet generator sends packets to the
packet sink through the firewall.
The firewall reports the volume of traffic passing though to the ONAP DCAE
collector. To check the traffic volume that lands at the sink VM, you can access
-the link http://sink_ip_address:667 through your browser and enable automatic page
+the link <http://SINK_IP_ADDRESS:667> through your browser and enable automatic page
refresh by clicking the "Off" button. You can see the traffic volume in the charts.
The packet generator includes a script that periodically generates different
@@ -60,7 +62,7 @@ operation policies that are currently enabled for the vFirewall use case:
operational policy to request APPC to adjust the traffic volume to 500 packets
per 10 seconds.
- APPC sends a request to the packet generator to adjust the traffic volume.
-- Changes to the traffic volume can be observed through the link http://sink_ip_address:667.
+- Changes to the traffic volume can be observed through the link <http://SINK_IP_ADDRESS:667>.
Adjust packet generator
@@ -109,9 +111,8 @@ At the end of the test , robot sets the streams back to Medium so that it is
setup for the next test.
For documentation about running the use case manually for previous releases,
-please look at the videos and the material available at this `wiki page`__.
-
-__ https://wiki.onap.org/display/DW/Running+the+ONAP+Demos
+please look at the videos and the material available in
+`Running the ONAP Demos wiki page <https://wiki.onap.org/display/DW/Running+the+ONAP+Demos>`_
Although videos are still valid, users are encouraged to use the Heat templates
linked at the top of this page rather than the old Heat templates in that wiki page.
@@ -130,7 +131,7 @@ expire. Monitoring the DMaaP topic for DCAE_CL_OUTPUT can be used to confirm
that no TCA events are coming in from the VNF through VES/TCA.
::
- http://<k8s-host>:30227/events/unauthenticated.DCAE_CL_OUTPUT/g1/c3?timeout=5000
+ http://K8S_HOST:30227/events/unauthenticated.DCAE_CL_OUTPUT/g1/c3?timeout=5000
+-------------+------------+
| JIRA ID | Status |
diff --git a/docs/docs_vfwHPA.rst b/docs/docs_vfwHPA.rst
index 2dd229b31..147d80d2a 100644
--- a/docs/docs_vfwHPA.rst
+++ b/docs/docs_vfwHPA.rst
@@ -4,11 +4,13 @@
.. _docs_vfw_hpa:
+:orphan:
+
vFW/vDNS with HPA Tutorial: Setting Up and Configuration
--------------------------------------------------------
Description
-~~~~~~~~~~
+~~~~~~~~~~~
This use case makes modifications to the regular vFW use case in ONAP by giving the VMs certain hardware features (such as SRIOV NIC, CPU pinning, pci passthrough.. etc.) in order to enhance their performance. Multiple cloud regions with flavors that have HPA features are registered with ONAP. We then create policies that specify the HPA requirements of each VM in the use case. When a service instance is created with OOF specified as the homing solution, OOF responds with the homing solution (cloud region) and flavor directives that meet the requirements specified in the policy.
This tutorial covers enhancements 1 to 5 in Background of https://wiki.onap.org/pages/viewpage.action?pageId=41421112. It focuses on Test Plan 1.
@@ -18,7 +20,7 @@ This tutorial covers enhancements 1 to 5 in Background of https://wiki.onap.org/
`HPA Enhancements Page <https://wiki.onap.org/pages/viewpage.action?pageId=34376310>`_
-`vFW with HPA Test Status Page <https://wiki.onap.org/display/DW/vFW+with+HPA+Integration+Test+-+Test+Status>`_
+`vFW with HPA Test Status Page <https://wiki.onap.org/pages/viewpage.action?pageId=45301960>`_
`Hardware Platform Enablement in ONAP <https://wiki.onap.org/display/DW/Hardware+Platform+Enablement+In+ONAP>`_
@@ -26,16 +28,13 @@ This tutorial covers enhancements 1 to 5 in Background of https://wiki.onap.org/
Setting Up and Installation
-~~~~~~~~~~~~~~~~~~~~~~~~~~
-Some fixes for HPA support were made subsequent to the release of the Casablanca images. Several updated docker images need to be used to utilize the fixes. The details of the docker images that need to be used and the issues that are fixed are described at this link https://wiki.onap.org/display/DW/Docker+image+updates+for+HPA+vFW+testing
-
-Instructions for updating the manifest of ONAP docker images can be found here: https://onap.readthedocs.io/en/casablanca/submodules/integration.git/docs/#deploying-an-updated-docker-manifest
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
-Install OOM ONAP using the deploy script in the integration repo. Instructions for this can be found in this link https://wiki.onap.org/display/DW/OOM+Component. When the installation is complete (all the pods are either in running or completed state) Do the following;
+Install OOM ONAP using the deploy script in the integration repo. Instructions for this can be found in this link https://docs.onap.org/projects/onap-oom/en/latest/sections/guides/deployment_guides/oom_deployment.html. When the installation is complete (all the pods are either in running or completed state) Do the following;
1. Check that all the required components were deployed;
-
+
``oom-rancher# helm list``
2. Check the state of the pods;
@@ -44,14 +43,14 @@ Install OOM ONAP using the deploy script in the integration repo. Instructions f
3. Run robot health check
- ``oom-rancher# cd oom/kubernetes/robot``
+ ``oom-rancher# cd oom/kubernetes/robot``
``oom-rancher# ./ete-k8s.sh onap health``
Ensure all the required components pass the health tests
4. Modify the SO bpmn configmap to change the SO vnf adapter endpoint to v2
-
- ``oom-rancher# kubectl -n onap edit configmap dev-so-so-bpmn-infra-app-configmap``
+
+ ``oom-rancher# kubectl -n onap edit configmap dev-so-so-bpmn-infra-app-configmap``
``- vnf:``
@@ -74,7 +73,7 @@ Install OOM ONAP using the deploy script in the integration repo. Instructions f
``oom-rancher# kubectl delete <pod-name> -n onap``
-5. Create HPA flavors in cloud regions to be registered with ONAP. All HPA flavor names must start with onap. During our tests, 3 cloud regions were registered and we created flavors in each cloud. The flavors match the flavors described in the test plan `here <https://wiki.onap.org/pages/viewpage.action?pageId=41421112>`_.
+5. Create HPA flavors in cloud regions to be registered with ONAP. All HPA flavor names must start with onap. During our tests, 3 cloud regions were registered and we created flavors in each cloud. The flavors match the flavors described in the test plan `here <https://wiki.onap.org/pages/viewpage.action?pageId=41421112>`_.
- **Cloud Region One**
@@ -82,7 +81,7 @@ Install OOM ONAP using the deploy script in the integration repo. Instructions f
``#nova flavor-create onap.hpa.flavor11 111 8 20 2``
``#nova flavor-key onap.hpa.flavor11 set hw:mem_page_size=2048``
-
+
**Flavor12**
``#nova flavor-create onap.hpa.flavor12 112 12 20 2``
@@ -91,9 +90,9 @@ Install OOM ONAP using the deploy script in the integration repo. Instructions f
``#openstack aggregate create --property aggregate_instance_extra_specs:sriov_nic=sriov-nic-intel-8086-154C-shared-1:3 aggr121``
``#openstack flavor set onap.hpa.flavor12 --property aggregate_instance_extra_specs:sriov_nic=sriov-nic-intel-8086-154C-shared-1:3``
-
+
**Flavor13**
- ``#nova flavor-create onap.hpa.flavor13 113 12 20 2``
+ ``#nova flavor-create onap.hpa.flavor13 113 12 20 2``
``#nova flavor-key onap.hpa.flavor13 set hw:mem_page_size=2048``
@@ -111,7 +110,7 @@ Install OOM ONAP using the deploy script in the integration repo. Instructions f
``#nova flavor-key onap.hpa.flavor21 set hw:cpu_policy=dedicated``
``#nova flavor-key onap.hpa.flavor21 set hw:cpu_thread_policy=isolate``
-
+
**Flavor22**
``#nova flavor-create onap.hpa.flavor22 222 12 20 2``
@@ -120,9 +119,9 @@ Install OOM ONAP using the deploy script in the integration repo. Instructions f
``#openstack aggregate create --property aggregate_instance_extra_specs:sriov_nic=sriov-nic-intel-8086-154C-shared-1:2 aggr221``
``#openstack flavor set onap.hpa.flavor22 --property aggregate_instance_extra_specs:sriov_nic=sriov-nic-intel-8086-154C-shared-1:2``
-
+
**Flavor23**
- ``#nova flavor-create onap.hpa.flavor23 223 12 20 2``
+ ``#nova flavor-create onap.hpa.flavor23 223 12 20 2``
``#nova flavor-key onap.hpa.flavor23 set hw:mem_page_size=2048``
@@ -140,20 +139,20 @@ Install OOM ONAP using the deploy script in the integration repo. Instructions f
``#nova flavor-key onap.hpa.flavor31 set hw:cpu_policy=dedicated``
``#nova flavor-key onap.hpa.flavor31 set hw:cpu_thread_policy=isolate``
-
+
**Flavor32**
``#nova flavor-create onap.hpa.flavor32 332 8192 20 2``
``#nova flavor-key onap.hpa.flavor32 set hw:mem_page_size=1048576``
-
+
**Flavor33**
- ``#nova flavor-create onap.hpa.flavor33 333 12 20 2``
+ ``#nova flavor-create onap.hpa.flavor33 333 12 20 2``
``#nova flavor-key onap.hpa.flavor33 set hw:mem_page_size=2048``
``#openstack aggregate create --property aggregate_instance_extra_specs:sriov_nic=sriov-nic-intel-8086-154C-shared-1:1 aggr331``
- ``#openstack flavor set onap.hpa.flavor33 --property aggregate_instance_extra_specs:sriov_nic=sriov-nic-intel-8086-154C-shared-1:1``
+ ``#openstack flavor set onap.hpa.flavor33 --property aggregate_instance_extra_specs:sriov_nic=sriov-nic-intel-8086-154C-shared-1:1``
**Note: Use case can be run manually or using automation script (recommended)**
@@ -219,7 +218,7 @@ If an update is needed, the update can be done via rest using curl or postman
}'
-9. Register new cloud regions. This can be done using instructions (Step 1 to Step 3) on this `page <https://onap.readthedocs.io/en/latest/submodules/multicloud/framework.git/docs/multicloud-plugin-windriver/UserGuide-MultiCloud-WindRiver-TitaniumCloud.html#tutorial-onboard-instance-of-wind-river-titanium-cloud>`_. The already existing CloudOwner and cloud complex can be used. If step 3 does not work using the k8s ip and external port. It can be done using the internal ip address and port. Exec into any pod and run the command from the pod.
+9. Register new cloud regions. This can be done using instructions (Step 1 to Step 3) on this `page <https://docs.onap.org/projects/onap-multicloud-framework/en/latest/multicloud-plugin-windriver/UserGuide-MultiCloud-WindRiver-TitaniumCloud.html?highlight=multicloud>`_. The already existing CloudOwner and cloud complex can be used. If step 3 does not work using the k8s ip and external port. It can be done using the internal ip address and port. Exec into any pod and run the command from the pod.
- Get msb-iag internal ip address and port
@@ -229,7 +228,7 @@ If an update is needed, the update can be done via rest using curl or postman
``oom-rancher# kubectl exec dev-oof-oof-6c848594c5-5khps -it -- bash``
-10. Put required subscription list into tenant for all the newly added cloud regions. An easy way to do this is to do a get on the default cloud region, copy the tenant information with the subscription. Then paste it in your put command and modify the region id, tenant-id, tenant-name and resource-version.
+10. Put required subscription list into tenant for all the newly added cloud regions. An easy way to do this is to do a get on the default cloud region, copy the tenant information with the subscription. Then paste it in your put command and modify the region id, tenant-id, tenant-name and resource-version.
**GET COMMAND**
@@ -374,122 +373,122 @@ If an update is needed, the update can be done via rest using curl or postman
}
}'
-
+
11. Onboard the vFW HPA template. The templates can be gotten from the `demo <https://github.com/onap/demo>`_ repo. The heat and env files used are located in demo/heat/vFW_HPA/vFW/. Create a zip file using the files. For onboarding instructions see steps 4 to 9 of `vFWCL instantiation, testing and debugging <https://wiki.onap.org/display/DW/vFWCL+instantiation%2C+testing%2C+and+debuging>`_. Note that in step 5, only one VSP is created. For the VSP the option to submit for testing in step 5cii was not shown. So you can check in and certify the VSP and proceed to step 6.
12. Get the parameters (model info, model invarant id...etc) required to create a service instance via rest. This can be done by creating a service instance via VID as in step 10 of `vFWCL instantiation, testing and debugging <https://wiki.onap.org/display/DW/vFWCL+instantiation%2C+testing%2C+and+debuging>`_. After creating the service instance, exec into the SO bpmn pod and look into the /app/logs/bpmn/debug.log file. Search for the service instance and look for its request details. Then populate the parameters required to create a service instance via rest in step 13 below.
13. Create a service instance rest request but do not create service instance yet. Specify OOF as the homing solution and multicloud as the orchestrator. Be sure to use a service instance name that does not exist and populate the parameters with values gotten from step 12.
-::
+::
curl -k -X POST \
http://{{k8s}}:30277/onap/so/infra/serviceInstances/v6 \
-H 'authorization: Basic SW5mcmFQb3J0YWxDbGllbnQ6cGFzc3dvcmQxJA== \
-H 'content-type: application/json' \
-
- -d '{
-
- "requestDetails":{
-
- "modelInfo":{
-
+
+ -d '{
+
+ "requestDetails":{
+
+ "modelInfo":{
+
"modelInvariantId":"b7564cb9-4074-4c9b-95d6-39d4191e80d9",
-
+
"modelType":"service",
-
+
"modelName":"vfw_HPA",
-
+
"modelVersion":"1.0",
-
+
"modelVersionId":"35d184e8-1cba-46e3-9311-a17ace766eb0",
-
+
"modelUuid":"35d184e8-1cba-46e3-9311-a17ace766eb0",
-
+
"modelInvariantUuid":"b7564cb9-4074-4c9b-95d6-39d4191e80d9"
-
+
},
-
- "requestInfo":{
-
+
+ "requestInfo":{
+
"source":"VID",
-
+
"instanceName":"oof-12-homing",
-
+
"suppressRollback":false,
-
+
"requestorId":"demo"
-
+
},
-
- "subscriberInfo":{
-
+
+ "subscriberInfo":{
+
"globalSubscriberId":"Demonstration"
-
+
},
-
- "requestParameters":{
-
+
+ "requestParameters":{
+
"subscriptionServiceType":"vFW",
-
+
"aLaCarte":true,
-
+
"testApi":"VNF_API",
-
- "userParams":[
-
- {
-
+
+ "userParams":[
+
+ {
+
"name":"Customer_Location",
-
- "value":{
-
+
+ "value":{
+
"customerLatitude":"32.897480",
-
+
"customerLongitude":"97.040443",
-
+
"customerName":"some_company"
-
+
}
-
+
},
-
- {
-
+
+ {
+
"name":"Homing_Solution",
-
+
"value":"oof"
-
+
},
-
- {
-
+
+ {
+
"name":"orchestrator",
-
+
"value":"multicloud"
-
+
}
-
+
]
-
+
},
-
- "project":{
-
+
+ "project":{
+
"projectName":"Project-Demonstration"
-
+
},
-
- "owningEntity":{
-
+
+ "owningEntity":{
+
"owningEntityId":"e1564fc9-b9d0-44f9-b5af-953b4aad2f40",
-
+
"owningEntityName":"OE-Demonstration"
-
+
}
-
+
}
-
+
}'
14. Get the resourceModuleName to be used for creating policies. This can be gotten from the CSAR file of the service model created. However, an easy way to get the resourceModuleName is to send the service instance create request in step 13 above. This will fail as there are no policies but you can then go into the bpmn debug.log file and get its value by searching for resourcemodulename.
@@ -513,14 +512,14 @@ To Update a policy, use the following curl command. Modify the policy as require
"onapName": "SampleDemo",
"policyScope": "OSDF_CASABLANCA"
}' 'https://pdp:8081/pdp/api/updatePolicy'
-
+
To delete a policy, use two commands below to delete from PDP and PAP
**DELETE POLICY INSIDE PDP**
::
-
+
curl -k -v -H 'Content-Type: application/json' \
-H 'Accept: application/json' \
-H 'ClientAuth: cHl0aG9uOnRlc3Q=' \
@@ -533,14 +532,14 @@ To delete a policy, use two commands below to delete from PDP and PAP
**DELETE POLICY INSIDE PAP**
::
-
+
curl -k -v -H 'Content-Type: application/json' \
-H 'Accept: application/json' \
-H 'ClientAuth: cHl0aG9uOnRlc3Q=' \
-H 'Authorization: Basic dGVzdHBkcDphbHBoYTEyMw==' \
-H 'Environment: TEST' \
-X DELETE \
- -d '{"policyName": "OSDF_CASABLANCA.Config_MS_vnfPolicy_vFWHPA.1.xml","policyComponent":"PAP","policyType":"Optimization","deleteCondition":"ALL"}' https://pdp:8081/pdp/api/deletePolicy
+ -d '{"policyName": "OSDF_CASABLANCA.Config_MS_vnfPolicy_vFWHPA.1.xml","policyComponent":"PAP","policyType":"Optimization","deleteCondition":"ALL"}' https://pdp:8081/pdp/api/deletePolicy
Below are the 3 HPA policies for test cases in the `test plan <https://wiki.onap.org/pages/viewpage.action?pageId=41421112>`_
@@ -559,7 +558,7 @@ Create Policy
}' 'https://pdp:8081/pdp/api/createPolicy'
-Push Policy
+Push Policy
::
@@ -587,7 +586,7 @@ Create Policy
}' 'https://pdp:8081/pdp/api/createPolicy'
-Push Policy
+Push Policy
::
@@ -611,8 +610,8 @@ Create Policy
"onapName": "SampleDemo",
"policyScope": "OSDF_CASABLANCA"
}' 'https://pdp:8081/pdp/api/createPolicy'
-
-Push Policy
+
+Push Policy
::
@@ -621,7 +620,7 @@ Push Policy
"policyName": "OSDF_CASABLANCA.hpa_policy_vFW_3",
"policyType": "MicroService"
}' 'https://pdp:8081/pdp/api/pushPolicy'
-
+
17. Create Service Instance using step 13 above
18. Check bpmn logs to ensure that OOF sent homing response and flavor directives.
@@ -652,9 +651,9 @@ Push Policy
"vnf-networks": [],
"vnf-vms": []
},
-
-
- "vnf-parameters": [
+
+
+ "vnf-parameters": [
{
"vnf-parameter-name": "vfw_image_name",
"vnf-parameter-value": "ubuntu-16.04"
@@ -731,7 +730,7 @@ Push Policy
"vnf-parameter-name": "vsn_private_ip_1",
"vnf-parameter-value": "10.0.100.3"
},
-
+
{
"vnf-parameter-name": "vfw_name_0",
"vnf-parameter-value": "vfw"
@@ -774,7 +773,7 @@ Push Policy
},
{
"vnf-parameter-name": "vf_module_id",
- "vnf-parameter-value": "VfwHpa..base_vfw..module-0"
+ "vnf-parameter-value": "VfwHpa..base_vfw..module-0"
},
{
"vnf-parameter-name": "sec_group",
@@ -797,32 +796,32 @@ Push Policy
"vnf-parameter-name": "oof_directives",
"vnf-parameter-value": "{\"directives\": [{\"id\": \"vfw\", \"type\": \"vnfc\", \"directives\": [{\"attributes\": [{\"attribute_name\": \"firewall_flavor_name\", \"attribute_value\": \"onap.hpa.flavor31\"}, {\"attribute_name\": \"flavorId\", \"attribute_value\": \"2297339f-6a89-4808-a78f-68216091f904\"}, {\"attribute_name\": \"flavorId\", \"attribute_value\": \"2297339f-6a89-4808-a78f-68216091f904\"}, {\"attribute_name\": \"flavorId\", \"attribute_value\": \"2297339f-6a89-4808-a78f-68216091f904\"}], \"type\": \"flavor_directives\"}]}, {\"id\": \"vgenerator\", \"type\": \"vnfc\", \"directives\": [{\"attributes\": [{\"attribute_name\": \"packetgen_flavor_name\", \"attribute_value\": \"onap.hpa.flavor32\"}, {\"attribute_name\": \"flavorId\", \"attribute_value\": \"2297339f-6a89-4808-a78f-68216091f904\"}], \"type\": \"flavor_directives\"}]}, {\"id\": \"vsink\", \"type\": \"vnfc\", \"directives\": [{\"attributes\": [{\"attribute_name\": \"sink_flavor_name\", \"attribute_value\": \"onap.large\"}, {\"attribute_name\": \"flavorId\", \"attribute_value\": \"2297339f-6a89-4808-a78f-68216091f904\"}], \"type\": \"flavor_directives\"}]}]}"
},
-
+
{
"vnf-parameter-name": "sdnc_directives",
"vnf-parameter-value": "{}"
- },
-
+ },
+
{
"vnf-parameter-name": "template_type",
"vnf-parameter-value": "heat"
}
-
-
+
+
],
"vnf-topology-identifier": {
"generic-vnf-name": "oof-12-vnf-3",
- "generic-vnf-type": "vfw_hpa 0",
+ "generic-vnf-type": "vfw_hpa 0",
"service-type": "6b17354c-0fae-4491-b62e-b41619929c54",
- "vnf-name": "vfwhpa_stack",
+ "vnf-name": "vfwhpa_stack",
"vnf-type": "VfwHpa..base_vfw..module-0"
-
+
}
}
}}
-
-Change parameters based on your environment.
+
+Change parameters based on your environment.
**Note**
@@ -833,5 +832,5 @@ Change parameters based on your environment.
"service-type": "6b17354c-0fae-4491-b62e-b41619929c54", <-- same as Service Instance ID
"vnf-name": "vfwhpa_stack", <-- name to be given to the vf module
"vnf-type": "VfwHpa..base_vfw..module-0" <-- can be found on the VID - VF Module dialog screen - Model Name
-
+
21. Create vf module (11g of `vFWCL instantiation, testing and debugging <https://wiki.onap.org/display/DW/vFWCL+instantiation%2C+testing%2C+and+debuging>`_). If everything worked properly, you should see the stack created in your VIM(WR titanium cloud openstack in this case).
diff --git a/docs/docs_vfw_edgex_k8s.rst b/docs/docs_vfw_edgex_k8s.rst
index a25b349a2..256d65948 100644
--- a/docs/docs_vfw_edgex_k8s.rst
+++ b/docs/docs_vfw_edgex_k8s.rst
@@ -4,6 +4,8 @@
.. _docs_vfw_edgex_multicloud_k8s:
+:orphan:
+
vFW/Edgex with Multicloud Kubernetes Plugin: Setting Up and Configuration
-------------------------------------------------------------------------
@@ -201,8 +203,8 @@ It is an example of the minimal HEAT template.
Onboard the CSAR
----------------
-For onboarding instructions please refer to steps 4-9 from the document
-`here <https://wiki.onap.org/display/DW/vFWCL+instantiation%2C+testing%2C+and+debuging>`__.
+For onboarding instructions please refer to steps 4-9 from
+`vFWCL instantiation, testing and debuging wiki page <https://wiki.onap.org/display/DW/vFWCL+instantiation%2C+testing%2C+and+debuging>`_.
Steps for installing KUD Cloud
------------------------------
@@ -210,9 +212,8 @@ Steps for installing KUD Cloud
Follow the link to install KUD Kubernetes Deployment. KUD contains all the
packages required for running vfw use case.
-Kubernetes Baremetal deployment instructions can be found here_
-
-.. _here: https://wiki.onap.org/display/DW/Kubernetes+Baremetal+deployment+setup+instructions/
+Kubernetes Baremetal deployment instructions can be found in
+`Kubernetes Baremetal deployment setup instructions wiki page <https://wiki.onap.org/display/DW/Kubernetes+Baremetal+deployment+setup+instructions>`_
REGISTER KUD CLOUD REGION with K8s-Plugin
-----------------------------------------
@@ -272,15 +273,15 @@ registration) pointing to a real OpenStack tenant (e.g. the OOF tenant in
the lab where we tested).
This will cause multicloud to add the tenant to the k8s cloud region and
-then, similarly to #10 in the documentation
-`here <https://onap.readthedocs.io/en/casablanca/submodules/integration.git/docs/docs_vfwHPA.html#docs-vfw-hpa>`__,
+then, similarly to #10 in the
+`vFW HPA casablanca official documentation <https://docs.onap.org/projects/onap-integration/en/latest/docs_vfwHPA.html>`_,
the service-subscription can be added to that object.
**NOTE:** use same name cloud-region and cloud-owner name
An example is shown below for K8s cloud but following the steps 1,2,3
from
-`here <https://onap.readthedocs.io/en/latest/submodules/multicloud/framework.git/docs/multicloud-plugin-windriver/UserGuide-MultiCloud-WindRiver-TitaniumCloud.html#tutorial-onboard-instance-of-wind-river-titanium-cloud>`__.
+`Multicloud Windriver Plugin documentation <https://docs.onap.org/projects/onap-multicloud-framework/en/latest/multicloud-plugin-windriver/UserGuide-MultiCloud-WindRiver-TitaniumCloud.html?highlight=multicloud>`_.
The sample input below is for k8s cloud type.
**Step 1**: Cloud Registration/ Create a cloud region to represent the instance
@@ -647,7 +648,7 @@ using the Kubernetes API.
curl -X GET http://MSB_NODE_IP:30280/api/multicloud-k8s/v1/v1/instance/ZKMTSaxv
-`*\ https://github.com/onap/oom/blob/master/kubernetes/multicloud/resources/config/provider-plugin.json <https://github.com/onap/oom/blob/master/kubernetes/multicloud/resources/config/provider-plugin.json>`__
+`*\ https://github.com/onap/oom/blob/master/kubernetes/multicloud/resources/config/provider-plugin.json <https://github.com/onap/oom/blob/master/kubernetes/multicloud/resources/config/provider-plugin.json>`_
Create User parameters
~~~~~~~~~~~~~~~~~~~~~~
diff --git a/docs/docs_vipsec.rst b/docs/docs_vipsec.rst
index 755d4c085..db9e894ad 100644
--- a/docs/docs_vipsec.rst
+++ b/docs/docs_vipsec.rst
@@ -4,6 +4,8 @@
.. _docs_vipsec_hpa:
+:orphan:
+
vIPsec with HPA Tutorial: Setting Up and Configuration
--------------------------------------------------------
@@ -22,13 +24,11 @@ The deploy steps look just alike the one for vFW with HPA use case. It is also u
Setting Up and Installation
~~~~~~~~~~~~~~~~~~~~~~~~~~~
-Instructions for updating the manifest of ONAP docker images can be found here: https://onap.readthedocs.io/en/casablanca/submodules/integration.git/docs/#deploying-an-updated-docker-manifest
-
Install OOM ONAP using the deploy script in the integration repo. Instructions for this can be found in this link https://wiki.onap.org/display/DW/OOM+Component. When the installation is complete (all the pods are either in running or completed state) Do the following;
1. Check that all the required components were deployed;
-
+
``oom-rancher# helm list``
2. Check the state of the pods;
@@ -37,14 +37,14 @@ Install OOM ONAP using the deploy script in the integration repo. Instructions f
3. Run robot health check
- ``oom-rancher# cd oom/kubernetes/robot``
+ ``oom-rancher# cd oom/kubernetes/robot``
``oom-rancher# ./ete-k8s.sh onap health``
Ensure all the required components pass the health tests
4. Modify the SO bpmn configmap to change the SO vnf adapter endpoint to v2
-
- ``oom-rancher# kubectl -n onap edit configmap dev-so-so-bpmn-infra-app-configmap``
+
+ ``oom-rancher# kubectl -n onap edit configmap dev-so-so-bpmn-infra-app-configmap``
``- vnf:``
@@ -73,7 +73,7 @@ Install OOM ONAP using the deploy script in the integration repo. Instructions f
``oom-rancher# ./demo-k8s.sh onap init``
-7. Create HPA flavors in cloud regions to be registered with ONAP. All HPA flavor names must start with onap. During our tests, 3 cloud regions were registered and we created flavors in each cloud. The flavors match the flavors described in the test plan `here <https://wiki.onap.org/pages/viewpage.action?pageId=41421112>`_.
+7. Create HPA flavors in cloud regions to be registered with ONAP. All HPA flavor names must start with onap. During our tests, 3 cloud regions were registered and we created flavors in each cloud. The flavors match the flavors described in the test plan `here <https://wiki.onap.org/pages/viewpage.action?pageId=41421112>`_.
- **Cloud Region One**
@@ -81,7 +81,7 @@ Install OOM ONAP using the deploy script in the integration repo. Instructions f
``#nova flavor-create onap.hpa.flavor11 111 8 20 2``
``#nova flavor-key onap.hpa.flavor11 set hw:mem_page_size=2048``
-
+
**Flavor12**
``#nova flavor-create onap.hpa.flavor12 112 12 20 2``
@@ -90,9 +90,9 @@ Install OOM ONAP using the deploy script in the integration repo. Instructions f
``#openstack aggregate create --property aggregate_instance_extra_specs:sriov_nic=sriov-nic-intel-8086-154C-shared-1:3 aggr121``
``#openstack flavor set onap.hpa.flavor12 --property aggregate_instance_extra_specs:sriov_nic=sriov-nic-intel-8086-154C-shared-1:3``
-
+
**Flavor13**
- ``#nova flavor-create onap.hpa.flavor13 113 12 20 2``
+ ``#nova flavor-create onap.hpa.flavor13 113 12 20 2``
``#nova flavor-key onap.hpa.flavor13 set hw:mem_page_size=2048``
@@ -110,7 +110,7 @@ Install OOM ONAP using the deploy script in the integration repo. Instructions f
``#nova flavor-key onap.hpa.flavor21 set hw:cpu_policy=dedicated``
``#nova flavor-key onap.hpa.flavor21 set hw:cpu_thread_policy=isolate``
-
+
**Flavor22**
``#nova flavor-create onap.hpa.flavor22 222 12 20 2``
@@ -119,9 +119,9 @@ Install OOM ONAP using the deploy script in the integration repo. Instructions f
``#openstack aggregate create --property aggregate_instance_extra_specs:sriov_nic=sriov-nic-intel-8086-154C-shared-1:2 aggr221``
``#openstack flavor set onap.hpa.flavor22 --property aggregate_instance_extra_specs:sriov_nic=sriov-nic-intel-8086-154C-shared-1:2``
-
+
**Flavor23**
- ``#nova flavor-create onap.hpa.flavor23 223 12 20 2``
+ ``#nova flavor-create onap.hpa.flavor23 223 12 20 2``
``#nova flavor-key onap.hpa.flavor23 set hw:mem_page_size=2048``
@@ -139,20 +139,20 @@ Install OOM ONAP using the deploy script in the integration repo. Instructions f
``#nova flavor-key onap.hpa.flavor31 set hw:cpu_policy=dedicated``
``#nova flavor-key onap.hpa.flavor31 set hw:cpu_thread_policy=isolate``
-
+
**Flavor32**
``#nova flavor-create onap.hpa.flavor32 332 8192 20 2``
``#nova flavor-key onap.hpa.flavor32 set hw:mem_page_size=1048576``
-
+
**Flavor33**
- ``#nova flavor-create onap.hpa.flavor33 333 12 20 2``
+ ``#nova flavor-create onap.hpa.flavor33 333 12 20 2``
``#nova flavor-key onap.hpa.flavor33 set hw:mem_page_size=2048``
``#openstack aggregate create --property aggregate_instance_extra_specs:sriov_nic=sriov-nic-intel-8086-154C-shared-1:1 aggr331``
- ``#openstack flavor set onap.hpa.flavor33 --property aggregate_instance_extra_specs:sriov_nic=sriov-nic-intel-8086-154C-shared-1:1``
+ ``#openstack flavor set onap.hpa.flavor33 --property aggregate_instance_extra_specs:sriov_nic=sriov-nic-intel-8086-154C-shared-1:1``
8. Check that the cloud complex has the right values and update if it does not. Required values are;
@@ -205,7 +205,7 @@ If an update is needed, the update can be done via rest using curl or postman
}'
-9. Register new cloud regions. This can be done using instructions (Step 1 to Step 3) on this `page <https://onap.readthedocs.io/en/latest/submodules/multicloud/framework.git/docs/multicloud-plugin-windriver/UserGuide-MultiCloud-WindRiver-TitaniumCloud.html#tutorial-onboard-instance-of-wind-river-titanium-cloud>`_. The already existing CloudOwner and cloud complex can be used. If step 3 does not work using the k8s ip and external port. It can be done using the internal ip address and port. Exec into any pod and run the command from the pod.
+9. Register new cloud regions. This can be done using instructions (Step 1 to Step 3) on this `page <https://docs.onap.org/projects/onap-multicloud-framework/en/latest/multicloud-plugin-windriver/UserGuide-MultiCloud-WindRiver-TitaniumCloud.html?highlight=multicloud>`_. The already existing CloudOwner and cloud complex can be used. If step 3 does not work using the k8s ip and external port. It can be done using the internal ip address and port. Exec into any pod and run the command from the pod.
- Get msb-iag internal ip address and port
@@ -215,7 +215,7 @@ If an update is needed, the update can be done via rest using curl or postman
``oom-rancher# kubectl exec dev-oof-oof-6c848594c5-5khps -it -- bash``
-10. Put required subscription list into tenant for all the newly added cloud regions. An easy way to do this is to do a get on the default cloud region, copy the tenant information with the subscription. Then paste it in your put command and modify the region id, tenant-id, tenant-name and resource-version.
+10. Put required subscription list into tenant for all the newly added cloud regions. An easy way to do this is to do a get on the default cloud region, copy the tenant information with the subscription. Then paste it in your put command and modify the region id, tenant-id, tenant-name and resource-version.
**GET COMMAND**
@@ -360,14 +360,14 @@ If an update is needed, the update can be done via rest using curl or postman
}
}'
-
+
11. Onboard the vFW HPA template. The templates can be gotten from the `demo <https://github.com/onap/demo>`_ repo. The heat and env files used are located in demo/heat/vFW_HPA/vFW/. Create a zip file using the files. For onboarding instructions see steps 4 to 9 of `vFWCL instantiation, testing and debugging <https://wiki.onap.org/display/DW/vFWCL+instantiation%2C+testing%2C+and+debuging>`_. Note that in step 5, only one VSP is created. For the VSP the option to submit for testing in step 5cii was not shown. So you can check in and certify the VSP and proceed to step 6.
12. Get the parameters (model info, model invarant id...etc) required to create a service instance via rest. This can be done by creating a service instance via VID as in step 10 of `vFWCL instantiation, testing and debugging <https://wiki.onap.org/display/DW/vFWCL+instantiation%2C+testing%2C+and+debuging>`_. After creating the service instance, exec into the SO bpmn pod and look into the /app/logs/bpmn/debug.log file. Search for the service instance and look for its request details. Then populate the parameters required to create a service instance via rest in step 13 below.
13. Create a service instance rest request but do not create service instance yet. Specify OOF as the homing solution and multicloud as the orchestrator. Be sure to use a service instance name that does not exist and populate the parameters with values gotten from step 12.
-::
+::
curl -k -X POST \
http://{{k8s}}:30277/onap/so/infra/serviceInstances/v6 \
@@ -448,14 +448,14 @@ To Update a policy, use the following curl command. Modify the policy as require
"onapName": "SampleDemo",
"policyScope": "OSDF_DUBLIN"
}' 'https://pdp:8081/pdp/api/updatePolicy'
-
+
To delete a policy, use two commands below to delete from PDP and PAP
**DELETE POLICY INSIDE PDP**
::
-
+
curl -k -v -H 'Content-Type: application/json' \
-H 'Accept: application/json' \
-H 'ClientAuth: cHl0aG9uOnRlc3Q=' \
@@ -468,7 +468,7 @@ To delete a policy, use two commands below to delete from PDP and PAP
**DELETE POLICY INSIDE PAP**
::
-
+
curl -k -v -H 'Content-Type: application/json' \
-H 'Accept: application/json' \
-H 'ClientAuth: cHl0aG9uOnRlc3Q=' \
@@ -495,7 +495,7 @@ Create Policy
-Push Policy
+Push Policy
::
@@ -506,7 +506,7 @@ Push Policy
}' 'https://pdp:8081/pdp/api/pushPolicy'
-
+
17. Create Service Instance using step 13 above
18. Check bpmn logs to ensure that OOF sent homing response and flavor directives.
@@ -538,7 +538,7 @@ Push Policy
"vnf-vms": []
},
-
+
"vnf-parameters": [
{
"vnf-parameter-name":"vf_module_id",
@@ -787,13 +787,13 @@ Push Policy
"service-type": "8c071bd1-c361-4157-8282-3fef7689d32e",
"vnf-name": "ipsec-test",
"vnf-type": "Ipsec..base_vipsec..module-0"
-
+
}
}
}}
-
-Change parameters based on your environment.
+
+Change parameters based on your environment.
**Note**
@@ -804,5 +804,5 @@ Change parameters based on your environment.
"service-type": "8c071bd1-c361-4157-8282-3fef7689d32e", <-- same as Service Instance ID
"vnf-name": "ipsec-test", <-- name to be given to the vf module
"vnf-type": "Ipsec..base_vipsec..module-0" <-- can be found on the VID - VF Module dialog screen - Model Name
-
+
21. Create vf module (11g of `vFWCL instantiation, testing and debugging <https://wiki.onap.org/display/DW/vFWCL+instantiation%2C+testing%2C+and+debuging>`_). If everything worked properly, you should see the stack created in your VIM(WR titanium cloud openstack in this case).
diff --git a/docs/docs_vlb.rst b/docs/docs_vlb.rst
index ded308f05..5a9f6a2fb 100644
--- a/docs/docs_vlb.rst
+++ b/docs/docs_vlb.rst
@@ -1,5 +1,7 @@
.. _docs_vlb:
+:orphan:
+
vLoadBalancer Use Case
----------------------
@@ -15,7 +17,7 @@ Source files
Description
~~~~~~~~~~~
-The use case is composed of three VFs: packet generator, load balancer, and DNS server. These VFs run in three separate VMs. The packet generator issues DNS lookup queries that reach the DNS server via the load balancer. DNS replies reach the packet generator via the load balancer as well. The load balancer reports the average amount of traffic per DNS over a time interval to the DCAE collector. When the average amount of traffic per DNS server crosses a predefined threshold, the closed-loop is triggered and a new DNS server is instantiated.
+The use case is composed of three VFs: packet generator, load balancer, and DNS server. These VFs run in three separate VMs. The packet generator issues DNS lookup queries that reach the DNS server via the load balancer. DNS replies reach the packet generator via the load balancer as well. The load balancer reports the average amount of traffic per DNS over a time interval to the DCAE collector. When the average amount of traffic per DNS server crosses a predefined threshold, the closed-loop is triggered and a new DNS server is instantiated.
To test the application, make sure that the security group in OpenStack has ingress/egress entries for protocol 47 (GRE). The user can run a DNS query from the packet generator VM:
@@ -23,7 +25,7 @@ To test the application, make sure that the security group in OpenStack has ingr
dig @vLoadBalancer_IP host1.dnsdemo.onap.org
-The output below means that the load balancer has been set up correctly, has forwarded the DNS queries to one DNS instance, and the packet generator has received the DNS reply message.
+The output below means that the load balancer has been set up correctly, has forwarded the DNS queries to one DNS instance, and the packet generator has received the DNS reply message.
::
@@ -34,26 +36,26 @@ The output below means that the load balancer has been set up correctly, has for
;; ->>HEADER<<- opcode: QUERY, status: NOERROR, id: 31892
;; flags: qr aa rd; QUERY: 1, ANSWER: 1, AUTHORITY: 1, ADDITIONAL: 2
;; WARNING: recursion requested but not available
-
+
;; OPT PSEUDOSECTION:
; EDNS: version: 0, flags:; udp: 4096
;; QUESTION SECTION:
;host1.dnsdemo.onap.org. IN A
-
+
;; ANSWER SECTION:
host1.dnsdemo.onap.org. 604800 IN A 10.0.100.101
-
+
;; AUTHORITY SECTION:
dnsdemo.onap.org. 604800 IN NS dnsdemo.onap.org.
-
+
;; ADDITIONAL SECTION:
dnsdemo.onap.org. 604800 IN A 10.0.100.100
-
+
;; Query time: 0 msec
;; SERVER: 192.168.9.111#53(192.168.9.111)
;; WHEN: Fri Nov 10 17:39:12 UTC 2017
;; MSG SIZE rcvd: 97
-
+
Closedloop for vLoadBalancer/vDNS
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -69,10 +71,9 @@ To change the volume of queries generated by the packet generator, run the follo
::
+ curl -X PUT -H "Authorization: Basic YWRtaW46YWRtaW4=" -H "Content-Type: application/json" -H "Cache-Control: no-cache" -d '{"pg-streams":{"pg-stream": [{"id":"dns1", "is-enabled":"true"}]}}' "http://PacketGen_IP:8183/restconf/config/sample-plugin:sample-plugin/pg-streams"
- curl -X PUT -H "Authorization: Basic YWRtaW46YWRtaW4=" -H "Content-Type: application/json" -H "Cache-Control: no-cache" -d '{"pg-streams":{"pg-stream": [{"id":"dns1", "is-enabled":"true"}]}}' "http://PacketGen_IP:8183/restconf/config/sample-plugin:sample-plugin/pg-streams"
-
-- {"id":"dns1", "is-enabled":"true"} shows the stream "dns1" is enabled. The packet generator sends requests in the rate of 100 packets per 10 seconds;
+- {"id":"dns1", "is-enabled":"true"} shows the stream "dns1" is enabled. The packet generator sends requests in the rate of 100 packets per 10 seconds;
- To increase the amount of traffic, you can enable more streams. The packet generator has 10 streams, "dns1", "dns2", "dns3" to "dns10". Each of them generates 100 packets per 10 seconds. To enable the streams, please add {"id":"dnsX", "is-enabled":"true"} to the pg-stream bracket of the curl command, where X is the stream ID.
For example, if you want to enable 3 streams, the curl command will be:
@@ -83,18 +84,20 @@ For example, if you want to enable 3 streams, the curl command will be:
When the VNF starts, the packet generator is automatically configured to run 5 streams.
-
Running the Use Case
~~~~~~~~~~~~~~~~~~~~
-Automated closed loop via Robot Framework is not supported at this time. For documentation about running the use case manually for previous releases, please look at the videos and the material available at this `wiki page`__.
-
-__ https://wiki.onap.org/display/DW/Running+the+ONAP+Demos
+Automated closed loop via Robot Framework is not supported at this time.
+For documentation about running the use case manually for previous releases,
+please look at the videos and the material available at
+`Running the ONAP Demos wiki page <https://wiki.onap.org/display/DW/Running+the+ONAP+Demos>`_
-Although videos are still valid, users are encouraged to use the Heat templates linked at the top of this page rather than the old Heat templates in that wiki page.
+Although videos are still valid, users are encouraged to use the Heat templates
+linked at the top of this page rather than the old Heat templates in that wiki page.
Known issues and resolution
~~~~~~~~~~~~~~~~~~~~~~~~~~~
-1) The packet generator may become unresponsive and stop generating traffic. To solve the problem, reboot the packet generator.
+1) The packet generator may become unresponsive and stop generating traffic.
+ To solve the problem, reboot the packet generator.
2) The base and scaling VF module names need to follow a specific naming convention:
@@ -102,4 +105,4 @@ Known issues and resolution
b) The SDNC preload for the scaling VF module must set the VF module name to "vDNS\_xyz", where "xyz" is the same as the base module. This is required because during closed loop Policy looks for "Vfmodule\_" and replaces it with "vDNS\_"
-3) Only one scaling operation is supported. \ No newline at end of file
+3) Only one scaling operation is supported.
diff --git a/docs/files/CI/ONAP_CI_0.png b/docs/files/CI/ONAP_CI_0.png
new file mode 100644
index 000000000..a0193ec63
--- /dev/null
+++ b/docs/files/CI/ONAP_CI_0.png
Binary files differ
diff --git a/docs/files/CI/ONAP_CI_1.png b/docs/files/CI/ONAP_CI_1.png
new file mode 100644
index 000000000..46765e865
--- /dev/null
+++ b/docs/files/CI/ONAP_CI_1.png
Binary files differ
diff --git a/docs/files/CI/ONAP_CI_10.png b/docs/files/CI/ONAP_CI_10.png
new file mode 100644
index 000000000..fbc2cca0a
--- /dev/null
+++ b/docs/files/CI/ONAP_CI_10.png
Binary files differ
diff --git a/docs/files/CI/ONAP_CI_2.png b/docs/files/CI/ONAP_CI_2.png
new file mode 100644
index 000000000..d98b19112
--- /dev/null
+++ b/docs/files/CI/ONAP_CI_2.png
Binary files differ
diff --git a/docs/files/CI/ONAP_CI_3.png b/docs/files/CI/ONAP_CI_3.png
new file mode 100644
index 000000000..616440cc5
--- /dev/null
+++ b/docs/files/CI/ONAP_CI_3.png
Binary files differ
diff --git a/docs/files/CI/ONAP_CI_4.png b/docs/files/CI/ONAP_CI_4.png
new file mode 100644
index 000000000..05ab52e40
--- /dev/null
+++ b/docs/files/CI/ONAP_CI_4.png
Binary files differ
diff --git a/docs/files/CI/ONAP_CI_5.png b/docs/files/CI/ONAP_CI_5.png
new file mode 100644
index 000000000..ce53661a6
--- /dev/null
+++ b/docs/files/CI/ONAP_CI_5.png
Binary files differ
diff --git a/docs/files/CI/ONAP_CI_6.png b/docs/files/CI/ONAP_CI_6.png
new file mode 100644
index 000000000..b8a11d9a0
--- /dev/null
+++ b/docs/files/CI/ONAP_CI_6.png
Binary files differ
diff --git a/docs/files/CI/ONAP_CI_7.png b/docs/files/CI/ONAP_CI_7.png
new file mode 100644
index 000000000..13f8782b0
--- /dev/null
+++ b/docs/files/CI/ONAP_CI_7.png
Binary files differ
diff --git a/docs/files/CI/ONAP_CI_8.png b/docs/files/CI/ONAP_CI_8.png
new file mode 100755
index 000000000..3263e93f8
--- /dev/null
+++ b/docs/files/CI/ONAP_CI_8.png
Binary files differ
diff --git a/docs/files/CI/ONAP_CI_9.png b/docs/files/CI/ONAP_CI_9.png
new file mode 100644
index 000000000..db31cab6f
--- /dev/null
+++ b/docs/files/CI/ONAP_CI_9.png
Binary files differ
diff --git a/docs/files/bbs/BBS_dcae-ves-collector_config.png b/docs/files/bbs/BBS_dcae-ves-collector_config.png
new file mode 100644
index 000000000..edce5985a
--- /dev/null
+++ b/docs/files/bbs/BBS_dcae-ves-collector_config.png
Binary files differ
diff --git a/docs/files/csv/release-demo-features.csv b/docs/files/csv/release-demo-features.csv
new file mode 100644
index 000000000..22bc99eaa
--- /dev/null
+++ b/docs/files/csv/release-demo-features.csv
@@ -0,0 +1,5 @@
+Issue-ID;Description
+INT-2094;[APACHE] Add Apache CNF use case files
+INT-2069;Make Network Slicing usecase more user friendly
+INT-1960;[vFW CNF CDS] Fix issue with multiple tenants creation for k8s region
+INT-1960;[vFW CNF CDS] vf_module and vnf name generation improvment in CBA
diff --git a/docs/files/csv/release-integration-features.csv b/docs/files/csv/release-integration-features.csv
new file mode 100644
index 000000000..ed06a4284
--- /dev/null
+++ b/docs/files/csv/release-integration-features.csv
@@ -0,0 +1,5 @@
+Issue-ID;Description
+INT-2070;Automate 5G Slicing use case in CI - Manual configuration step 7 - AAI Configuration
+INT-2164;Update Apache CNF Demo
+INT-2126;Data provider - add relationships into cloud region and tenant
+INT-2135;Add CPS resources into data-provider
diff --git a/docs/files/csv/release-integration-ref.csv b/docs/files/csv/release-integration-ref.csv
new file mode 100644
index 000000000..b9f3535ef
--- /dev/null
+++ b/docs/files/csv/release-integration-ref.csv
@@ -0,0 +1,39 @@
+Repository;Revision
+demo;5dcd47bfc76ab8257fcc3b8927b78295a4376c8b
+integration;cdacb811f7acc2eb0a6e5662d8d225a967160f2c
+integration/csit;08bc9551a07da29c478ca2f6487aafa651ea95dd
+integration/data-provider;3f1416193789e00f6b09029c7e841d98803e5749
+integration/docker/onap-java11;1a5e22c812aa988fbfc27a668c1d2541db971080
+integration/docker/onap-python;e2de6cdd2d5f2e4a8067c40d1af0392b02426d61
+integration/ietf-actn-tools;3a8b2c604c13584fbc807ac21058288c424893b3
+integration/onap-component-simulators;748da401868fbf35744e292ee132be614b489623
+integration/pipelines/chained-ci;0399d9842c2a5670e4ee21d45343d2ac168eee2d
+integration/pipelines/oom-automatic-installation;4934d86bfe15a6132331f802afb4b3a062cbaf8c
+integration/pipelines/xtesting-onap;276fb7948607bd6b3fc09693619f021763b5ec6e
+integration/python-onapsdk;25cafb994c9ba3b874cd973a1e1d440fb0b98bf0
+integration/seccom;0131d962bdfcf97794ac49a2f45f5eb974249288
+integration/simulators/5G-core-nf-simulator;1eaabdb8299e49dc7e81c205edce8ce46e64a511
+integration/simulators/A1-policy-enforcement-simulator;63501fbea1b8a6dc859099c3465b1758e19e0956
+integration/simulators/core-nssmf-simulator;5ce930d4a9e8137b1fbac98a58ebe2cfd3e6a77e
+integration/simulators/nf-simulator;b4e937c4c92ef68fd1ab640ce9e30a531112b371
+integration/simulators/nf-simulator/avcn-manager;13fab3acdc7a22ca7000125112c84a2e942ea307
+integration/simulators/nf-simulator/netconf-server;8d0a5c8e95ef58e391eee31c1562dcb541c4c369
+integration/simulators/nf-simulator/pm-https-server;1f0bcaac6410f2a5497aad7c6ed5e46610a4a8a7
+integration/simulators/nf-simulator/ves-client;94891f32b3e2c4be8240b4df88830f97f2255e0b
+integration/simulators/pnf-simulator;f00c718bc3978d5b7ed55a5cda1c2a1443919af9
+integration/simulators/ran-app;746cc0a4aa1ada72d98ed161322fb2bd1e359637
+integration/simulators/ran-nssmf-simulator;1528d0a38026e3e183de2d4dcf7bbfcfec633eda
+integration/simulators/ran-simulator;41bbe166748510b4c5be7606ff27ee7ee64cb001
+integration/usecases/A1-policy-enforcement;0a885a2b3595988922b8e7af3e76bef4913de8bd
+integration/usecases/A1-policy-enforcement-r-apps;e3f79f43ba8e51fda97a5d67cd5c01b04e54c9e1
+integration/xtesting;c90366fa6ec58cd063addcf50c6948ea859f5fc9
+oparent;371de4b87ccf46f1292d68468fcfd41303ab394c
+testsuite;d7fe3776469399384a340b9a38e2c0de8771e6c1
+testsuite/cds;a5eb7d86926fac92965c35de1cae9114dc471a35
+testsuite/cds-mock-odl;b7f29dc3f03ec40369941af6e525d40c822d1ced
+testsuite/cds-mock-server;7db71adaf139e54f2186cfd19d468f5a1123835d
+testsuite/cds-mock-ssh;a43ce8950dcc36363c406b1cc4043dc7d623c9f4
+testsuite/oom;9e5fee150e86c868c0ef40f2a34494be36bd41fc
+testsuite/python-testing-utils;f9d29ad319d54cdabe63b52c20c9acd9d475347b
+testsuite/pythonsdk-tests;a9dddc1095dad400626871f3f1dc5df96d05e035
+testsuite/robot-utils;7e7fbedd13aa9c33433601c8d276f0d43fcd6c78
diff --git a/docs/files/csv/release-oparent-features.csv b/docs/files/csv/release-oparent-features.csv
new file mode 100644
index 000000000..b4a48add1
--- /dev/null
+++ b/docs/files/csv/release-oparent-features.csv
@@ -0,0 +1,4 @@
+Issue-ID;Description
+
+USECASEUI-709;Upgrade dependency versions in oparent
+USECASEUI-709;Update Logback to Version 1.2.10
diff --git a/docs/files/csv/release-pythonsdk-features.csv b/docs/files/csv/release-pythonsdk-features.csv
new file mode 100644
index 000000000..6d96dd7b7
--- /dev/null
+++ b/docs/files/csv/release-pythonsdk-features.csv
@@ -0,0 +1,2 @@
+Issue-ID;Description
+INT-2119;Use 10.0.0 version of SDK in tests \ No newline at end of file
diff --git a/docs/files/csv/release-testsuite-features.csv b/docs/files/csv/release-testsuite-features.csv
new file mode 100644
index 000000000..2b05ce227
--- /dev/null
+++ b/docs/files/csv/release-testsuite-features.csv
@@ -0,0 +1,2 @@
+Issue-ID;Description
+INT-2160;[ROBOT] Fix the DCAEMOD testcase for ServiceMes
diff --git a/docs/files/csv/repo-archived.csv b/docs/files/csv/repo-archived.csv
new file mode 100644
index 000000000..4eedae852
--- /dev/null
+++ b/docs/files/csv/repo-archived.csv
@@ -0,0 +1,10 @@
+Repository;Description;Link
+integration/benchmark;Benchmark project;`link <https://git.onap.org/integration/benchmark>`__
+integration/devtool;Devtool project;`link <https://git.onap.org/integration/devtool>`__
+integration/simulators/dc-simulator;Data Center simulator;`link <https://git.onap.org/integration/simulators/dc-simulator>`__
+integration/simulators/masspnf-simulator;Mass PNF Simulator;`link <https://git.onap.org/integration/simulators/masspnf-simulator>`__
+integration/terraform;Terraform based alternative infrastructure installation;`link <https://git.onap.org/integration/terraform>`__
+integration/terragrunt;Compagnon repository of terraform;`link <https://git.onap.org/integration/terragrunt>`__
+integration/usecases/bbs;BBS use case introduced in Dublin and extracted from global repository in frankfurt;`link <https://git.onap.org/integration/usecases/bbs>`__
+integration/usecases/mdons;MDONS use case introduced in Frankfurt;`link <https://git.onap.org/integration/usecases/mdons>`__
+testsuite/heatbridge;python utils to manage the heatbridge function to enrich cloud information to AAI (deprecated);`link <https://git.onap.org/testsuite/heatbridge>`__ \ No newline at end of file
diff --git a/docs/files/csv/repo-demo.csv b/docs/files/csv/repo-demo.csv
new file mode 100644
index 000000000..2a1432693
--- /dev/null
+++ b/docs/files/csv/repo-demo.csv
@@ -0,0 +1,2 @@
+Repository;Description;Link
+demo;Historical repository to host use case artifacts (heat templates, json files,..);`link <https://git.onap.org/demo>`__
diff --git a/docs/files/csv/repo-integration-external.csv b/docs/files/csv/repo-integration-external.csv
new file mode 100644
index 000000000..dc401c0a9
--- /dev/null
+++ b/docs/files/csv/repo-integration-external.csv
@@ -0,0 +1,2 @@
+Repository;Description;Link
+integration-view;Repository integration hosting the itegration portal including the hosting of the web site;`link <https://gitlab.com/Orange-OpenSource/lfn/onap/integration-view>`__
diff --git a/docs/files/csv/repo-integration.csv b/docs/files/csv/repo-integration.csv
new file mode 100644
index 000000000..b7d8a392a
--- /dev/null
+++ b/docs/files/csv/repo-integration.csv
@@ -0,0 +1,13 @@
+Repository;Description;Link
+integration;Historical main repository including documentation, simulators (e.g. mass PNF simulator), non robot tests (e.g. security tests, vCPE Tosca,..), ...;`link <https://git.onap.org/integration>`__
+integration/csit;Repository hosting some tooling to start component functional tests in Jenkins (To be deprecated in Guilin as such tests must be reinsourced by the projects);`link <https://git.onap.org/integration/csit>`__
+integration/data-provider;Project that provides a tool to automate common ONAP resource creation;`link <https://git.onap.org/integration/data-provider>`__
+integration/docker/onap-java11;Java11 baseline image conformed to SECCOM recommendations;`link <https://git.onap.org/integration/docker/onap-java11>`__
+integration/docker/onap-python;Python baseline image conformed to SECCOM recommendations;`link <https://git.onap.org/integration/docker/onap-python>`__
+integration/ietf-actn-tools;IETF ACTN tools introduced in Honolulu);`link <https://git.onap.org/integration/ietf-actn-tools>`__
+integration/onap-component-simulators;ONAP component simulators used for tests;`link <https://git.onap.org/integration/onap-component-simulators/>`__
+integration/python-onapsdk;ONAP Python SDK repository;`link <https://git.onap.org/integration/python-onapsdk/>`__
+integration/seccom;Repory hosting seccom recommended versions and security test waivers;`link <https://git.onap.org/integration/seccom>`__
+integration/usecases/A1-policy-enforcement;A1 policy enforcement introduced in Honolulu;`link <https://git.onap.org/integration/usecases/A1-policy-enforcement>`__
+integration/usecases/A1-policy-enforcement-r-apps;A1 policy enforcement (analyticis part) introduced in Honolulu;`link <https://git.onap.org/integration/usecases/A1-policy-enforcement-r-apps>`__
+integration/xtesting;Repository in charge to build th xtesting dockers used in CI/CD chains;`link <https://git.onap.org/integration/xtesting>`__
diff --git a/docs/files/csv/repo-oparent.csv b/docs/files/csv/repo-oparent.csv
new file mode 100644
index 000000000..30cbdc78a
--- /dev/null
+++ b/docs/files/csv/repo-oparent.csv
@@ -0,0 +1,3 @@
+Repository;Description;Link
+oparent;Java dependencies for JAVA projects;`link <https://git.onap.org/oparent>`__
+oparent/cia;Dockerfile optimization and best practices;`link <https://git.onap.org/oparent/cia>`__
diff --git a/docs/files/csv/repo-pipelines.csv b/docs/files/csv/repo-pipelines.csv
new file mode 100644
index 000000000..27e43e82c
--- /dev/null
+++ b/docs/files/csv/repo-pipelines.csv
@@ -0,0 +1,4 @@
+Repository;Description;Link
+integration/pipelines/chained-ci;Main pipelines project which holds configuration;`link <https://git.onap.org/integration/pipelines/chained-ci/>`__
+integration/pipelines/oom-automatic-installation;Definition of pipelines to instantiate ONAP;`link <https://git.onap.org/integration/pipelines/oom-automatic-installation/>`__
+integration/pipelines/xtesting-onap;Definition of pipelines to execute tests;`link <https://git.onap.org/integration/pipelines/xtesting-onap/>`__ \ No newline at end of file
diff --git a/docs/files/csv/repo-simulators.csv b/docs/files/csv/repo-simulators.csv
new file mode 100644
index 000000000..91f75eb66
--- /dev/null
+++ b/docs/files/csv/repo-simulators.csv
@@ -0,0 +1,13 @@
+Repository;Description;Link
+integration/simulators/5G-core-nf-simulator;5G core nf simulator;`link <https://git.onap.org/integration/simulators/5G-core-nf-simulator>`__
+integration/simulators/A1-policy-enforcement-simulator;A1 Policy Enforcement Simulator;`link <https://git.onap.org/integration/simulators/A1-policy-enforcement-simulator>`__
+integration/simulators/core-nssmf-simulator;Core NSSMF Simulator;`link <https://git.onap.org/integration/simulators/core-nssmf-simulator>`__
+integration/simulators/nf-simulator;NF simulator;`link <https://git.onap.org/integration/simulators/nf-simulator>`__
+integration/simulators/nf-simulator/avcn-manager;NF simulator avcn manager;`link <https://git.onap.org/integration/simulators/nf-simulator/avcn-manager>`__
+integration/simulators/nf-simulator/netconf-server;NF simulator netconf server;`link <https://git.onap.org/integration/simulators/nf-simulator/netconf-server>`__
+integration/simulators/nf-simulator/pm-https-server;NF simulator pm https server;`link <https://git.onap.org/integration/simulators/nf-simulator/pm-https-server>`__
+integration/simulators/nf-simulator/ves-client;NF simulator ves client;`link <https://git.onap.org/integration/simulators/nf-simulator/ves-client>`__
+integration/simulators/pnf-simulator;PNF Simulator;`link <https://git.onap.org/integration/simulators/pnf-simulator>`__
+integration/simulators/ran-app;RAN App;`link <https://git.onap.org/integration/simulators/ran-app/>`__
+integration/simulators/ran-nssmf-simulator;RAN NSSMF simulator;`link <https://git.onap.org/integration/simulators/ran-nssmf-simulator>`__
+integration/simulators/ran-simulator;RAN simulator;`link <https://git.onap.org/integration/simulators/ran-simulator>`__
diff --git a/docs/files/csv/repo-testsuite.csv b/docs/files/csv/repo-testsuite.csv
new file mode 100644
index 000000000..79fc01860
--- /dev/null
+++ b/docs/files/csv/repo-testsuite.csv
@@ -0,0 +1,10 @@
+Repository;Description;Link
+testsuite;repository hosting the robot test suites;`link <https://git.onap.org/testsuite>`__
+testsuite/cds;Repository hosting (standalone) CDS test suites shared by Bell Canada team, not yet integrated in CI/CD;`link <https://git.onap.org/testsuite/cds>`__
+testsuite/cds-mock-odl;needed for cds regression tests;`link <https://git.onap.org/testsuite/cds-mock-odl>`__
+testsuite/cds-mock-server;needed for cds regression tests;`link <https://git.onap.org/testsuite/cds-mock-server>`__
+testsuite/cds-mock-ssh;needed for cds regression tests;`link <https://git.onap.org/testsuite/cds-mock-ssh>`__
+testsuite/oom;Helm chart for robot pod (to be deprecated in Honolulu and moved back to OOM);`link <https://git.onap.org/testsuite/oom>`__
+testsuite/python-testing-utils;Python and robot util libraries used for robot tests;`link <https://git.onap.org/testsuite/python-testing-utils>`__
+testsuite/pythonsdk-tests;Repository hosting the test scenarios leveraging python-onapsdk for end to end smoke tests;`link <https://git.onap.org/testsuite/pythonsdk-tests>`__
+testsuite/robot-utils;Repository aiming to provide a robot wrapper for python-onapsdk;`link <https://git.onap.org/testsuite/robot-utils>`__
diff --git a/docs/files/csv/s3p-instantiation.csv b/docs/files/csv/s3p-instantiation.csv
new file mode 100644
index 000000000..d21f2ee5e
--- /dev/null
+++ b/docs/files/csv/s3p-instantiation.csv
@@ -0,0 +1,6 @@
+Parameters;Jakarta;Istanbul;Honolulu
+Number of tests;1190;1310;1410
+Global success rate;96%;97%;96%
+Min duration;140s;193s;81s
+Max duration;2075s;2128s;2000s
+mean duration;603s;564s;530s \ No newline at end of file
diff --git a/docs/files/csv/s3p-sdc.csv b/docs/files/csv/s3p-sdc.csv
new file mode 100644
index 000000000..cd9bb9e6c
--- /dev/null
+++ b/docs/files/csv/s3p-sdc.csv
@@ -0,0 +1,6 @@
+Parameters;Jakarta;Istanbul;Honolulu
+Number of tests;1000;1085;715
+Global success rate;92%;92%;93%
+Min duration;119;111s;80s
+Max duration;844;799s;1128s
+mean duration;394s;366s;565s \ No newline at end of file
diff --git a/docs/files/csv/simulators.csv b/docs/files/csv/simulators.csv
new file mode 100644
index 000000000..69e6b57f8
--- /dev/null
+++ b/docs/files/csv/simulators.csv
@@ -0,0 +1,6 @@
+Name;Description;Link;Contacts
+NF Simulator;Evolution of the pnf simulator, the Network service simulator;:ref:`official doc <nf_simulator>`;K.Kuzmicki
+A1 Policy Enforcement Simulator;Simulator that supports the A1-P OSC_2.1.0 interface and also provides internal API to manage the RAN elements (Cells, Ues) and allows to customize and send VES Events;`official readme <https://git.onap.org/integration/simulators/A1-policy-enforcement-simulator/tree/README.md>`__;Krystian Kędroń
+Mass PNF Simulator;Mimic the PNF for benchmark purposes;`official readme <https://git.onap.org/integration/simulators/masspnf-simulator/tree/README.md>`__;Tamas Bakai
+Ran simulator;RAN-SIM is a Radio Access Network Simulator, it is used to simulate the various functionalities of an eNodeB;`official readme <https://git.onap.org/integration/simulators/ran-simulator/tree/README.md>`__;Priyadharshini B
+DC simulator;Data Center simulator;`official readme <https://git.onap.org/integration/simulators/dc-simulator/tree/README.md>`__;Xin Miao
diff --git a/docs/files/csv/stability_basic_vm.csv b/docs/files/csv/stability_basic_vm.csv
new file mode 100644
index 000000000..5ff8d0807
--- /dev/null
+++ b/docs/files/csv/stability_basic_vm.csv
@@ -0,0 +1,11 @@
+Basic_vm metric;Value
+Number of PASS occurences;557
+Number of Raw FAIL Occurences;174
+Raw Success rate; 76%
+Corrected success rate; 86%
+Average duration of the test;549s (9m9s)
+Min duration;188s (3m8s)
+Max duration;2161 (36m1s)
+Median duration;271s (4m34s)
+% of Duration < 282s; 50%
+% of duration > 660s; 29%
diff --git a/docs/files/csv/stability_cluster_metric_cpu.csv b/docs/files/csv/stability_cluster_metric_cpu.csv
new file mode 100644
index 000000000..e77d61691
--- /dev/null
+++ b/docs/files/csv/stability_cluster_metric_cpu.csv
@@ -0,0 +1,2 @@
+Namespace;CPU Utilisation (from requests);CPU utilisation (from limits);Memory Utilisation (from requests);Memory Utilisation (from limits)
+onap;2.22%;0.816%;19%;7.4%
diff --git a/docs/files/csv/stability_cluster_metric_memory.csv b/docs/files/csv/stability_cluster_metric_memory.csv
new file mode 100644
index 000000000..40c6fa566
--- /dev/null
+++ b/docs/files/csv/stability_cluster_metric_memory.csv
@@ -0,0 +1,2 @@
+Namespace;Pods;Workloads;Memory Usage;Memory Requests;Memory Requests %;Memory Limits;Memory Limits %
+onap;242;181;160.70 GiB;193.13 GiB;83.21%;493.09 GiB;32.59%
diff --git a/docs/files/csv/stability_cluster_metric_network.csv b/docs/files/csv/stability_cluster_metric_network.csv
new file mode 100644
index 000000000..46f02a7f7
--- /dev/null
+++ b/docs/files/csv/stability_cluster_metric_network.csv
@@ -0,0 +1,2 @@
+Namespace;Current Receive Bandwidth;Current Transmit Bandwidth;Rate of Received Packets;Rate of Transmitted Packets;Rate of Received Packets Dropped;Rate of Transmitted Packets Dropped
+onap; 1.03 MBs; 1.07 MBs;5.08 kpps;5.02 kpps;0 pps;0 pps
diff --git a/docs/files/csv/stability_top10_cpu.csv b/docs/files/csv/stability_top10_cpu.csv
new file mode 100644
index 000000000..cdd93c1a9
--- /dev/null
+++ b/docs/files/csv/stability_top10_cpu.csv
@@ -0,0 +1,11 @@
+Pod;CPU Usage;CPU Requests;CPU Request %;CPU Limits;CPU Limits %
+onap-robot;0.92;1;92%;4;23%
+onap-cassandra-0;0.4;0.4;101%;1.6;25%
+onap-cassandra-2;0.36;0.4;83%;1.6;22%
+onap-ejbca;0.35;0.02;1771%;3;11%
+onap-aws;0.35;6;6%;;
+onap-cassandra-1;0.33;0.4;83%;1.6;21%
+onap-oof-has-api;0.12;2;6%;8;1%
+onap-music-cassandra-2;0.12;0.32;32%;1;12%
+onap-dcae-cloudify-manager;0.11;2;6%;4;3%
+onap-music-cassandra-1;0.09;0.32;29%;1;9%
diff --git a/docs/files/csv/stability_top10_memory.csv b/docs/files/csv/stability_top10_memory.csv
new file mode 100644
index 000000000..504afc2ac
--- /dev/null
+++ b/docs/files/csv/stability_top10_memory.csv
@@ -0,0 +1,11 @@
+Pod;Memory Usage;Memory Requests;Memory Requests %;Memory Limits;Memory Limits %
+onap-portal-cassandra;3.34 GiB;6.2 GiB;53.8%;7.5 GiB;45%
+onap-cassandra-2;2.6 GiB;5 GiB;52%;8 GiB;32%
+onap-cassandra-0;2.6 GiB;5 GiB;52%;8 GiB;32%
+onap-cassandra-1;2.54 GiB;5 GiB;51%;8 GiB;32%
+onap-appc;2.46 GiB;4 GiB;62%;8 GiB; 32%
+onap-sdnc;2.43 GiB;4 GiB;61%;8 GiB; 30%
+onap-policy-mariadb-0;2.4 GiB;1.96 GiB;122%;5.5 GiB;44%
+onap-dcae-cloudify-manager;4.7 GiB;2 GiB;233%;4 GiB;115%
+onap-awx;1.72 GiB;12 GiB;14%;;
+onap-aaf-cass;1.45 GiB;2.5 GiB;58%;3.5 GiB;41.4%
diff --git a/docs/files/csv/stability_top10_net.csv b/docs/files/csv/stability_top10_net.csv
new file mode 100644
index 000000000..b86ba909f
--- /dev/null
+++ b/docs/files/csv/stability_top10_net.csv
@@ -0,0 +1,11 @@
+Pod;Current Receive Bandwidth;Current Transmit Bandwidth;Rate of Received Packets;Rate of Transmitted Packets, Rate of Received Packets Dropped;Rate of Transmitted Packets Dropped
+onap-oof-has-api;372 kB/s;670 B/s;9.21 p/s;5.4 p/s;0 p/s;0 p/s
+onap-cassandra-2;231 kB/s;155 kB/s;90.35 p/s;69.84 p/s;0 p/s;0 p/s
+onap-cassandra-1;228 kB/s;156 kB/s;87 p/s;82 p/s;0 p/s;0 p/s
+onap-cassandra-0;144 kB/s;245 kB/s;63 p/s;75 p/s;0 p/s;0 p/s
+onap-message-router-0;17 kB/s;18 kB/s;187 p/s;188 p/s;0 p/s;0 p/s
+onap-portal-app;15 kB/s;4.7 kB/s;187 p/s;188 p/s;0 p/s;0 p/s
+onap-consul;14 kB/s;3.9 kB/s;38 p/s;40 p/s;0 p/s;0 p/s
+onap-message-router-kafka-010kB/s;10 kB/s;112 p/s;115 p/s;0 p/s;0 p/s
+onap-dcaemodul-onboarding-apt_install;6.7 kB/s;6.1 kB/s;45 p/s;47 p/s;0 p/s;0 p/s
+onap-message-router-kafka-2;6.3 kB/s;6.1 kB/s;70 p/s;72 p/s;0 p/s;0 p/s
diff --git a/docs/files/csv/tests-healthcheck.csv b/docs/files/csv/tests-healthcheck.csv
new file mode 100644
index 000000000..32ee6cfce
--- /dev/null
+++ b/docs/files/csv/tests-healthcheck.csv
@@ -0,0 +1,11 @@
+Tests;Description;Code;Comments
+core;Robot healthcheck tests of the core components (AA&I, DMAAP, Portal, SDC, SDNC, SO);`robot tests <https://git.onap.org/testsuite/tree/robot/testsuites/health-check.robot>`__;`robotframework <https://robotframework.org/>`__
+full;Robot healthcheck tests for all the components, **holmes healthcheck** have been reintroduced;`robot tests <https://git.onap.org/testsuite/tree/robot/testsuites/health-check.robot>`__;`robotframework <https://robotframework.org/>`__
+healthdist;Check the onboarding and distribution of the vFW;`robot tests <https://git.onap.org/testsuite/tree/robot/testsuites/health-check.robot>`__;`robotframework <https://robotframework.org/>`__
+postinstall;Check dmaap and AA&I Design model DB tests;`robot tests <https://git.onap.org/testsuite/tree/robot/testsuites/post-install-tests.robot>`__;`robotframework <https://robotframework.org/>`__
+ves-collector (new);Suite for checking handling events by VES Collector;`code <https://git.onap.org/testsuite/tree/robot/testsuites/ves.robot>`__;`robotframework <https://robotframework.org/>`__
+hv-ves;HV-VES 'Sunny Scenario' Robot Framework test - message is sent to the collector and Kafka topic is checked if the message has been published. Content is decoded and checked.;`code <https://git.onap.org/testsuite/tree/robot/testsuites/hvves-ci.robot>`__;`robotframework <https://robotframework.org/>`__
+basic_onboard;onboard a model, subset of most of the other basic_* tests, created to perform stability testing;`code <https://git.onap.org/testsuite/pythonsdk-tests/tree/src/onaptests/scenario/basic_onboard.py>`__;`onap_pythonsdk <https://gitlab.com/Orange-OpenSource/lfn/onap/python-onapsdk>`__, `pythonsdk-tests <https://git.onap.org/testsuite/pythonsdk-tests/tree/>`__
+cps-healthcheck;Call liveness and readiness probes of the CPS module;`robot tests <https://github.com/onap/cps/blob/master/csit/tests/actuator/actuator.robot>`__;`robotframework <https://robotframework.org/>`__
+**cps-temporal-healthcheck**;Call endpoints of CPS Temporal component;`robot tests <https://github.com/onap/cps-cps-temporal/blob/master/csit/tests/actuator/actuator.robot>`__;`robotframework <https://robotframework.org/>`__
+**cps-dmi-plugin-healthcheck**;Call endpoints of CPS DMI plugin component;`robot tests <https://github.com/onap/cps-ncmp-dmi-plugin/blob/master/csit/tests/actuator/actuator.robot>`__;`robotframework <https://robotframework.org/>`__ \ No newline at end of file
diff --git a/docs/files/csv/tests-infrastructure-healthcheck.csv b/docs/files/csv/tests-infrastructure-healthcheck.csv
new file mode 100644
index 000000000..a350f6bc8
--- /dev/null
+++ b/docs/files/csv/tests-infrastructure-healthcheck.csv
@@ -0,0 +1,4 @@
+Tests;Description;Code;Comments
+onap-helm;Verify Helm chart status, the test has been updated to take into account Helm3;`python module <https://gitlab.com/Orange-OpenSource/lfn/tools/helm-onap-status>`__;
+onap-k8s;Check common resources of the ONAP Kubernetes namespace ;`python module <https://gitlab.com/Orange-OpenSource/lfn/tools/kubernetes-status>`__;kubernetes python library
+onap-k8s-teardown;Check common resources of the ONAP Kubernetes namespace after all tests execution;`python module <https://gitlab.com/Orange-OpenSource/lfn/tools/kubernetes-status>`__;kubernetes python library
diff --git a/docs/files/csv/tests-security.csv b/docs/files/csv/tests-security.csv
new file mode 100644
index 000000000..9d949a9e0
--- /dev/null
+++ b/docs/files/csv/tests-security.csv
@@ -0,0 +1,5 @@
+Tests;Description;Code;Comments
+root_pods;check that pods are nor using root user or started as root; `bash script <https://git.onap.org/integration/xtesting/tree/security/scripts/check_security_root.sh>`__; kubectl
+unlimitted_pods;check that limits are set for pods;`bash script <https://git.onap.org/integration/xtesting/tree/security/scripts/check_unlimitted_pods.sh>`__; kubectl
+nonssl_endpoints;check that all public HTTP endpoints exposed in ONAP cluster use SSL tunnels;`Go script <https://git.onap.org/integration/plain/test/security/sslendpoints/main.go>`__;kubetl, nmap
+nodeport_check_certs;This test list the nodeports and tries to get SSL information to evaluate the validity of the certificates (expiration and issuer) used on the nodeports;`python module <https://git.onap.org/integration/tree/test/security/check_certificates>`__;pyopenssl, kubernetes python libraries
diff --git a/docs/files/csv/tests-smoke.csv b/docs/files/csv/tests-smoke.csv
new file mode 100644
index 000000000..16ea7593e
--- /dev/null
+++ b/docs/files/csv/tests-smoke.csv
@@ -0,0 +1,11 @@
+Tests;Description;Code;Comments
+basic_vm;Onboard, distribute and instantiate an Openstack VM using à la carte BPMN, replaced the former basic_vm test;`code <https://git.onap.org/testsuite/pythonsdk-tests/tree/src/onaptests/scenario/basic_vm.py>`__;`onap_pythonsdk <https://gitlab.com/Orange-OpenSource/lfn/onap/python-onapsdk>`__, `pythonsdk-tests <https://git.onap.org/testsuite/pythonsdk-tests/tree/>`__
+basic_network;Onboard, distribute and instantiate a Neutron network;`code <https://git.onap.org/testsuite/pythonsdk-tests/tree/src/onaptests/scenario/basic_network.py>`__;`onap_pythonsdk <https://gitlab.com/Orange-OpenSource/lfn/onap/python-onapsdk>`__, `pythonsdk-tests <https://git.onap.org/testsuite/pythonsdk-tests/tree/>`__
+basic_cnf;Onboard (new), distribute and instantiate a Kubernetes pods;`code <https://git.onap.org/testsuite/pythonsdk-tests/tree/src/onaptests/scenario/basic_cnf.py>`__;`onap_pythonsdk <https://gitlab.com/Orange-OpenSource/lfn/onap/python-onapsdk>`__, `pythonsdk-tests <https://git.onap.org/testsuite/pythonsdk-tests/tree/>`__
+5gbulkpm;5G Bulk PM Usecase functionality. The test has been significantly enhanced in Honolulu;`code <https://git.onap.org/testsuite/tree/robot/testsuites/usecases/5gbulkpm.robot>`__;`robotframework <https://robotframework.org/>`__
+pnf-registrate;Executes the PNF registration test cases including setup and teardown;`code <https://git.onap.org/testsuite/tree/robot/testsuites/pnf-registration.robot>`__;`robotframework <https://robotframework.org/>`__
+cmpv2;CMPv2 Usecase functionality;`code <https://git.onap.org/testsuite/tree/robot/testsuites/cmpv2.robot>`__;`robotframework <https://robotframework.org/>`__
+basic_vm_macro;Instantiate a VM using macro bpmn;`code <https://git.onap.org/testsuite/pythonsdk-tests/tree/src/onaptests/scenario/basic_vm_macro.py>`__;`onap_pythonsdk <https://gitlab.com/Orange-OpenSource/lfn/onap/python-onapsdk>`__, `pythonsdk-tests <https://git.onap.org/testsuite/pythonsdk-tests/tree/>`__
+pnf_macro;Run PNF simulator, onboard, distribute and instantiate service including PNF;`code <https://git.onap.org/testsuite/pythonsdk-tests/tree/src/onaptests/scenario/pnf_macro.py>`__;`onap_pythonsdk <https://gitlab.com/Orange-OpenSource/lfn/onap/python-onapsdk>`__, `pythonsdk-tests <https://git.onap.org/testsuite/pythonsdk-tests/tree/>`__
+cds_resource_resolution;Upload blueprint file into CDS, execute test workflow and check results;`code <https://git.onap.org/testsuite/pythonsdk-tests/tree/src/onaptests/scenario/cds_resource_resolution.py>`__;`onap_pythonsdk <https://gitlab.com/Orange-OpenSource/lfn/onap/python-onapsdk>`__, `pythonsdk-tests <https://git.onap.org/testsuite/pythonsdk-tests/tree/>`__
+basic_cnf_macro;Onboard (new), distribute and instantiate a Kubernetes pods using SO's macro flow;`code <https://git.onap.org/testsuite/pythonsdk-tests/tree/src/onaptests/scenario/basic_cnf_macro.py>`__;`onap_pythonsdk <https://gitlab.com/Orange-OpenSource/lfn/onap/python-onapsdk>`__, `pythonsdk-tests <https://git.onap.org/testsuite/pythonsdk-tests/tree/>`__
diff --git a/docs/files/csv/usecases-deprecated.csv b/docs/files/csv/usecases-deprecated.csv
new file mode 100644
index 000000000..a1c3b3b33
--- /dev/null
+++ b/docs/files/csv/usecases-deprecated.csv
@@ -0,0 +1,28 @@
+Use Case;Link;Last Valid Version;Comments
+vFirewall with closed loop;:ref:`official doc <docs_vfw>`;Guilin;Shall still be OK in Honolulu but not tested yet
+Scale Out;:ref:`official doc <docs_scaleout>`;Guilin;Shall still be OK in Honolulu but not tested yet
+vCPE Use Case;:ref:`official doc <docs_vcpe>`;El Alto;No resources to test on Frankfurt
+vIPsec with HPA Use Case;:ref:`official doc<docs_vipsec_hpa>`;El Alto;No resources to test on Frankfurt
+Change Management Schedule Optimization;:ref:`official doc<docs_CM_schedule_optimizer>`;El Alto;No resources to test on Frankfurt
+Change Management Flexible Designer and Orchestrator;:ref:`official doc<docs_CM_flexible_designer_orchestrator>`;El Alto;No resources to test on Frankfurt
+vFirewall/vDNS with HPA;:ref:`official doc <docs_vfw_hpa>`;Frankfurt;No resources to test on Guilin
+BBS (Broadband Service);:ref:`official doc <docs_bbs>`;Frankfurt;No resources to test on Guilin
+vFirewall CNF with multicloud k8s plugin;:ref:`official doc <docs_vfw_edgex_multicloud_k8s>`;Frankfurt;No resources to test on Guilin
+EdgeXFoundry CNF with multicloud k8s plugin;:ref:`official doc <docs_vfw_edgex_multicloud_k8s>`;Frankfurt;No resources to test on Guilin
+vCPE with Tosca;:ref:`official doc <docs_vcpe_tosca_local>`;Frankfurt;No resources to test on Guilin
+E2E Automation vLB with CDS;`wiki page <https://wiki.onap.org/pages/viewpage.action?pageId=71838891>`__; Frankfurt;No resources to test on Guilin
+vFirewall In-Place Software Upgrade with Traffic Distribution;:ref:`official doc <docs_vfw_traffic>`;Frankfurt;APPC in maintenance mode
+5G Bulk PM; :ref:`official doc <docs_5g_bulk_pm>`;Frankfurt;No tested in Guilin
+5G NRM Network Resource Model (Configuration management);:ref:`official doc <docs_5G_NRM_Configuration>`;Frankfurt;No tested in Guilin
+5G NETCONF configuration;:ref:`official doc <docs_5G_Configuration_over_NETCONF>`;Frankfurt;No tested in Guilin
+PNF Software Upgrade using direct Netconf Yang interface with PNF;:ref:`official doc <docs_5g_pnf_software_upgrade>`;Frankfurt;No tested in Guilin
+PNF Software Upgrade with EM with Ansible;:ref:`official doc <docs_5G_PNF_Software_Upgrade_ansible_with_EM>`;Frankfurt;No tested in Guilin
+PNF Software Upgrade with EM with Netconf; :ref:`official doc <docs_5g_pnf_software_upgrade_netconf_with_EM>`;Frankfurt;No tested in Guilin
+PNF Software Upgrade in association to schema updates; :ref:`official doc <docs_5G_PNF_Software_Upgrade_With_Schema_Update>`;Frankfurt;No tested in Guilin
+VSP Compliance and Validation Check within SDC;`wiki page <https://wiki.onap.org/display/DW/VSP+Compliance+and+Validation+Check+within+SDC+%28Frankfurt%29+-+Phase+2#VSPComplianceandValidationCheckwithinSDC(Frankfurt)Phase2-VSPComplianceCheckwithinSDC-IntegrationTestPlan>`_;Frankfurt;No tested in Guilin
+Enable PNF software version at onboarding;`wiki page <https://jira.onap.org/browse/REQ-88?src=confmacro>`__;Frankfurt;No tested in Guilin
+xNF communication security enhancements; `wiki page <https://wiki.onap.org/display/DW/xNF+communication+security+enhancements+-+Tests+Description+and+Status>`__;Frankfurt;No tested in Guilin
+ETSI Alignment SO plugin to support SOL003 to connect to an external VNFM;`wiki page <https://wiki.onap.org/display/DW/ETSI+Alignment+Support>`__;Frankfurt;No tested in Guilin
+Integration of CDS as an Actor; `official doc <https://docs.onap.org/projects/onap-ccsdk-cds/en/latest/ui/designer-guide.html>`_;Frankfurt;No tested in Guilin
+3rd Party Operational Domain Manager; `wiki page <https://wiki.onap.org/display/DW/Third-party+Operational+Domain+Manager>`__;Frankfurt;No tested in Guilin
+Configuration & persistency; `wiki page <https://wiki.onap.org/pages/viewpage.action?pageId=64003184>`__;Frankfurt;No tested in Guilin
diff --git a/docs/files/csv/usecases-functional-requirements.csv b/docs/files/csv/usecases-functional-requirements.csv
new file mode 100644
index 000000000..6bb6494d7
--- /dev/null
+++ b/docs/files/csv/usecases-functional-requirements.csv
@@ -0,0 +1,3 @@
+Issue key;Summary;Contact;Comment
+REQ-1215;E2E Network Slicing use case enhancements for Kohn release;Kevin Tang;
+REQ-1212;5G SON use case enhancements for Kohn release;N.K. Shankaranarayanan;
diff --git a/docs/files/csv/usecases-non-functional-requirements.csv b/docs/files/csv/usecases-non-functional-requirements.csv
new file mode 100644
index 000000000..3b489ac43
--- /dev/null
+++ b/docs/files/csv/usecases-non-functional-requirements.csv
@@ -0,0 +1,5 @@
+Issue key;Summary;Contact;Comment
+REQ-1267;General intent model and general intent interface requirements in R11;Keguang He;
+REQ-1214;Maintenance and Enhancement of Intent-driven Closed-loop Autonomous Networks in R11;Dong Wang;
+REQ-1268;CCVPN Kohn Enhancements for Intent-based Cloud Leased Line and Transport Slicing;Henry Yu;
+REQ-1342;Retirement of unmaintained repos;Amy Zwarico; \ No newline at end of file
diff --git a/docs/files/csv/usecases-old-valid.csv b/docs/files/csv/usecases-old-valid.csv
new file mode 100644
index 000000000..c10709e9d
--- /dev/null
+++ b/docs/files/csv/usecases-old-valid.csv
@@ -0,0 +1,6 @@
+Summary;Link;Contacts
+vFirewall CNF With CDS;:ref:`official doc <docs_vFW_CNF_CDS>`;L.Rajewski, K.Banka
+5G Realtime PM and High Volume Stream Data Collection; :ref:`official doc <docs_realtime_pm>`;M.Przybysz
+5G PNF Plug and Play; :ref:`official doc <docs_5g_pnf_pnp>`; M.Przybysz K.Kuzmicki
+5G PNF Pre-Onboarding & Onboarding;:ref:`official doc <docs_pnf_onboarding_preonboarding>`;M.Przybysz K.Kuzmicki D.Melia A.Walshe
+MDONS extension;:ref:`official doc <docs_CCVPN>`;X.Miao
diff --git a/docs/files/csv/usecases.csv b/docs/files/csv/usecases.csv
new file mode 100644
index 000000000..629088202
--- /dev/null
+++ b/docs/files/csv/usecases.csv
@@ -0,0 +1,4 @@
+Ref;Summary;Link;Contacts
+REQ-440;E2E Network Slicing;:ref:`official doc <docs_E2E_network_slicing>`;Kevin Tang
+REQ-429;5G OOF SON;:ref:`official doc <docs_5G_oof_son>`;N. K. Shankaranarayanan
+REQ-459;CCVPN-Transport Slicing;:ref:`official doc <docs_ccvpn>`;Henry Yu
diff --git a/docs/files/dt-use-case.png b/docs/files/dt-use-case.png
index 068e9e587..62b67d078 100755
--- a/docs/files/dt-use-case.png
+++ b/docs/files/dt-use-case.png
Binary files differ
diff --git a/docs/files/ns_automation/ns_automation_sdc_suffix.png b/docs/files/ns_automation/ns_automation_sdc_suffix.png
new file mode 100644
index 000000000..c78d27230
--- /dev/null
+++ b/docs/files/ns_automation/ns_automation_sdc_suffix.png
Binary files differ
diff --git a/docs/files/ns_automation/ns_automation_suc.png b/docs/files/ns_automation/ns_automation_suc.png
new file mode 100644
index 000000000..ff7a6d9b4
--- /dev/null
+++ b/docs/files/ns_automation/ns_automation_suc.png
Binary files differ
diff --git a/docs/files/ns_automation/ns_automation_test_class.png b/docs/files/ns_automation/ns_automation_test_class.png
new file mode 100644
index 000000000..5f7976841
--- /dev/null
+++ b/docs/files/ns_automation/ns_automation_test_class.png
Binary files differ
diff --git a/docs/files/s3p/basic_vm_duration.png b/docs/files/s3p/basic_vm_duration.png
new file mode 100644
index 000000000..71e522681
--- /dev/null
+++ b/docs/files/s3p/basic_vm_duration.png
Binary files differ
diff --git a/docs/files/s3p/basic_vm_duration_histo.png b/docs/files/s3p/basic_vm_duration_histo.png
new file mode 100644
index 000000000..d201d3b81
--- /dev/null
+++ b/docs/files/s3p/basic_vm_duration_histo.png
Binary files differ
diff --git a/docs/files/s3p/daily_frankfurt1.png b/docs/files/s3p/daily_frankfurt1.png
new file mode 100644
index 000000000..44d82870d
--- /dev/null
+++ b/docs/files/s3p/daily_frankfurt1.png
Binary files differ
diff --git a/docs/files/s3p/daily_frankfurt2.png b/docs/files/s3p/daily_frankfurt2.png
new file mode 100644
index 000000000..f2f9ae854
--- /dev/null
+++ b/docs/files/s3p/daily_frankfurt2.png
Binary files differ
diff --git a/docs/files/s3p/guilin_daily_healthcheck.png b/docs/files/s3p/guilin_daily_healthcheck.png
new file mode 100644
index 000000000..34a58ebda
--- /dev/null
+++ b/docs/files/s3p/guilin_daily_healthcheck.png
Binary files differ
diff --git a/docs/files/s3p/guilin_daily_infrastructure_healthcheck.png b/docs/files/s3p/guilin_daily_infrastructure_healthcheck.png
new file mode 100644
index 000000000..be24c02ce
--- /dev/null
+++ b/docs/files/s3p/guilin_daily_infrastructure_healthcheck.png
Binary files differ
diff --git a/docs/files/s3p/guilin_daily_security.png b/docs/files/s3p/guilin_daily_security.png
new file mode 100644
index 000000000..1d3d518c0
--- /dev/null
+++ b/docs/files/s3p/guilin_daily_security.png
Binary files differ
diff --git a/docs/files/s3p/guilin_daily_smoke.png b/docs/files/s3p/guilin_daily_smoke.png
new file mode 100644
index 000000000..5200c575e
--- /dev/null
+++ b/docs/files/s3p/guilin_daily_smoke.png
Binary files differ
diff --git a/docs/files/s3p/honolulu_daily_healthcheck.png b/docs/files/s3p/honolulu_daily_healthcheck.png
new file mode 100644
index 000000000..01216aee4
--- /dev/null
+++ b/docs/files/s3p/honolulu_daily_healthcheck.png
Binary files differ
diff --git a/docs/files/s3p/honolulu_daily_infrastructure_healthcheck.png b/docs/files/s3p/honolulu_daily_infrastructure_healthcheck.png
new file mode 100644
index 000000000..660902029
--- /dev/null
+++ b/docs/files/s3p/honolulu_daily_infrastructure_healthcheck.png
Binary files differ
diff --git a/docs/files/s3p/honolulu_daily_security.png b/docs/files/s3p/honolulu_daily_security.png
new file mode 100644
index 000000000..2efc9c84a
--- /dev/null
+++ b/docs/files/s3p/honolulu_daily_security.png
Binary files differ
diff --git a/docs/files/s3p/honolulu_daily_smoke.png b/docs/files/s3p/honolulu_daily_smoke.png
new file mode 100644
index 000000000..4192e404d
--- /dev/null
+++ b/docs/files/s3p/honolulu_daily_smoke.png
Binary files differ
diff --git a/docs/files/s3p/honolulu_sdc_stability.png b/docs/files/s3p/honolulu_sdc_stability.png
new file mode 100644
index 000000000..4d6c4ee2c
--- /dev/null
+++ b/docs/files/s3p/honolulu_sdc_stability.png
Binary files differ
diff --git a/docs/files/s3p/honolulu_sdc_stability_resources.png b/docs/files/s3p/honolulu_sdc_stability_resources.png
new file mode 100644
index 000000000..dd9333687
--- /dev/null
+++ b/docs/files/s3p/honolulu_sdc_stability_resources.png
Binary files differ
diff --git a/docs/files/s3p/honolulu_so_stability_1_duration.png b/docs/files/s3p/honolulu_so_stability_1_duration.png
new file mode 100644
index 000000000..47f625604
--- /dev/null
+++ b/docs/files/s3p/honolulu_so_stability_1_duration.png
Binary files differ
diff --git a/docs/files/s3p/honolulu_so_stability_5.png b/docs/files/s3p/honolulu_so_stability_5.png
new file mode 100644
index 000000000..fe8487524
--- /dev/null
+++ b/docs/files/s3p/honolulu_so_stability_5.png
Binary files differ
diff --git a/docs/files/s3p/honolulu_weekly_cpu.png b/docs/files/s3p/honolulu_weekly_cpu.png
new file mode 100644
index 000000000..dbf55d272
--- /dev/null
+++ b/docs/files/s3p/honolulu_weekly_cpu.png
Binary files differ
diff --git a/docs/files/s3p/honolulu_weekly_memory.png b/docs/files/s3p/honolulu_weekly_memory.png
new file mode 100644
index 000000000..5cbdf04be
--- /dev/null
+++ b/docs/files/s3p/honolulu_weekly_memory.png
Binary files differ
diff --git a/docs/files/s3p/istanbul-dashboard.png b/docs/files/s3p/istanbul-dashboard.png
new file mode 100644
index 000000000..f8bad42ad
--- /dev/null
+++ b/docs/files/s3p/istanbul-dashboard.png
Binary files differ
diff --git a/docs/files/s3p/istanbul_daily_healthcheck.png b/docs/files/s3p/istanbul_daily_healthcheck.png
new file mode 100644
index 000000000..e1cf16ae6
--- /dev/null
+++ b/docs/files/s3p/istanbul_daily_healthcheck.png
Binary files differ
diff --git a/docs/files/s3p/istanbul_daily_infrastructure_healthcheck.png b/docs/files/s3p/istanbul_daily_infrastructure_healthcheck.png
new file mode 100644
index 000000000..1e8877d0e
--- /dev/null
+++ b/docs/files/s3p/istanbul_daily_infrastructure_healthcheck.png
Binary files differ
diff --git a/docs/files/s3p/istanbul_daily_security.png b/docs/files/s3p/istanbul_daily_security.png
new file mode 100644
index 000000000..605edb140
--- /dev/null
+++ b/docs/files/s3p/istanbul_daily_security.png
Binary files differ
diff --git a/docs/files/s3p/istanbul_daily_smoke.png b/docs/files/s3p/istanbul_daily_smoke.png
new file mode 100644
index 000000000..cdeb999da
--- /dev/null
+++ b/docs/files/s3p/istanbul_daily_smoke.png
Binary files differ
diff --git a/docs/files/s3p/istanbul_instantiation_stability_10.png b/docs/files/s3p/istanbul_instantiation_stability_10.png
new file mode 100644
index 000000000..73749572a
--- /dev/null
+++ b/docs/files/s3p/istanbul_instantiation_stability_10.png
Binary files differ
diff --git a/docs/files/s3p/istanbul_resiliency.png b/docs/files/s3p/istanbul_resiliency.png
new file mode 100644
index 000000000..567a98c5c
--- /dev/null
+++ b/docs/files/s3p/istanbul_resiliency.png
Binary files differ
diff --git a/docs/files/s3p/istanbul_sdc_stability.png b/docs/files/s3p/istanbul_sdc_stability.png
new file mode 100644
index 000000000..67346cb0d
--- /dev/null
+++ b/docs/files/s3p/istanbul_sdc_stability.png
Binary files differ
diff --git a/docs/files/s3p/jakarta-dashboard.png b/docs/files/s3p/jakarta-dashboard.png
new file mode 100755
index 000000000..e5f2fd1b8
--- /dev/null
+++ b/docs/files/s3p/jakarta-dashboard.png
Binary files differ
diff --git a/docs/files/s3p/stability_sdnc_memory.png b/docs/files/s3p/stability_sdnc_memory.png
new file mode 100644
index 000000000..c381077f5
--- /dev/null
+++ b/docs/files/s3p/stability_sdnc_memory.png
Binary files differ
diff --git a/docs/files/scaleout/12.png b/docs/files/scaleout/12.png
deleted file mode 100644
index c6e79a8dd..000000000
--- a/docs/files/scaleout/12.png
+++ /dev/null
Binary files differ
diff --git a/docs/files/scaleout/13.png b/docs/files/scaleout/13.png
deleted file mode 100644
index b64d57759..000000000
--- a/docs/files/scaleout/13.png
+++ /dev/null
Binary files differ
diff --git a/docs/files/scaleout/14.png b/docs/files/scaleout/14.png
deleted file mode 100644
index 959fef355..000000000
--- a/docs/files/scaleout/14.png
+++ /dev/null
Binary files differ
diff --git a/docs/files/scaleout/15.png b/docs/files/scaleout/15.png
deleted file mode 100644
index ed552d9d0..000000000
--- a/docs/files/scaleout/15.png
+++ /dev/null
Binary files differ
diff --git a/docs/files/scaleout/16.png b/docs/files/scaleout/16.png
deleted file mode 100644
index 78ec99002..000000000
--- a/docs/files/scaleout/16.png
+++ /dev/null
Binary files differ
diff --git a/docs/files/scaleout/17.png b/docs/files/scaleout/17.png
deleted file mode 100644
index 4165da725..000000000
--- a/docs/files/scaleout/17.png
+++ /dev/null
Binary files differ
diff --git a/docs/files/scaleout/18.png b/docs/files/scaleout/18.png
deleted file mode 100644
index c0fc3b57a..000000000
--- a/docs/files/scaleout/18.png
+++ /dev/null
Binary files differ
diff --git a/docs/files/scaleout/19.png b/docs/files/scaleout/19.png
deleted file mode 100644
index f83e1d5a9..000000000
--- a/docs/files/scaleout/19.png
+++ /dev/null
Binary files differ
diff --git a/docs/files/scaleout/20.png b/docs/files/scaleout/20.png
deleted file mode 100644
index 71147e3de..000000000
--- a/docs/files/scaleout/20.png
+++ /dev/null
Binary files differ
diff --git a/docs/files/scaleout/clamp/1.png b/docs/files/scaleout/clamp/1.png
new file mode 100644
index 000000000..acf850843
--- /dev/null
+++ b/docs/files/scaleout/clamp/1.png
Binary files differ
diff --git a/docs/files/scaleout/clamp/10.png b/docs/files/scaleout/clamp/10.png
new file mode 100644
index 000000000..17dcaa937
--- /dev/null
+++ b/docs/files/scaleout/clamp/10.png
Binary files differ
diff --git a/docs/files/scaleout/clamp/11.png b/docs/files/scaleout/clamp/11.png
new file mode 100644
index 000000000..f41f72988
--- /dev/null
+++ b/docs/files/scaleout/clamp/11.png
Binary files differ
diff --git a/docs/files/scaleout/clamp/12.png b/docs/files/scaleout/clamp/12.png
new file mode 100644
index 000000000..8acc00439
--- /dev/null
+++ b/docs/files/scaleout/clamp/12.png
Binary files differ
diff --git a/docs/files/scaleout/clamp/13.png b/docs/files/scaleout/clamp/13.png
new file mode 100644
index 000000000..74894e9e7
--- /dev/null
+++ b/docs/files/scaleout/clamp/13.png
Binary files differ
diff --git a/docs/files/scaleout/clamp/14.png b/docs/files/scaleout/clamp/14.png
new file mode 100644
index 000000000..52d1b2a02
--- /dev/null
+++ b/docs/files/scaleout/clamp/14.png
Binary files differ
diff --git a/docs/files/scaleout/clamp/15.png b/docs/files/scaleout/clamp/15.png
new file mode 100644
index 000000000..9bfd74f3e
--- /dev/null
+++ b/docs/files/scaleout/clamp/15.png
Binary files differ
diff --git a/docs/files/scaleout/clamp/16.png b/docs/files/scaleout/clamp/16.png
new file mode 100644
index 000000000..edec29070
--- /dev/null
+++ b/docs/files/scaleout/clamp/16.png
Binary files differ
diff --git a/docs/files/scaleout/clamp/2.png b/docs/files/scaleout/clamp/2.png
new file mode 100644
index 000000000..e5ff5f6cb
--- /dev/null
+++ b/docs/files/scaleout/clamp/2.png
Binary files differ
diff --git a/docs/files/scaleout/clamp/3.png b/docs/files/scaleout/clamp/3.png
new file mode 100644
index 000000000..f0dcc352f
--- /dev/null
+++ b/docs/files/scaleout/clamp/3.png
Binary files differ
diff --git a/docs/files/scaleout/clamp/4.png b/docs/files/scaleout/clamp/4.png
new file mode 100644
index 000000000..5563b2442
--- /dev/null
+++ b/docs/files/scaleout/clamp/4.png
Binary files differ
diff --git a/docs/files/scaleout/clamp/5.png b/docs/files/scaleout/clamp/5.png
new file mode 100644
index 000000000..590b01c7e
--- /dev/null
+++ b/docs/files/scaleout/clamp/5.png
Binary files differ
diff --git a/docs/files/scaleout/clamp/6.png b/docs/files/scaleout/clamp/6.png
new file mode 100644
index 000000000..bbdd69878
--- /dev/null
+++ b/docs/files/scaleout/clamp/6.png
Binary files differ
diff --git a/docs/files/scaleout/clamp/7.png b/docs/files/scaleout/clamp/7.png
new file mode 100644
index 000000000..281f7bcf9
--- /dev/null
+++ b/docs/files/scaleout/clamp/7.png
Binary files differ
diff --git a/docs/files/scaleout/clamp/8.png b/docs/files/scaleout/clamp/8.png
new file mode 100644
index 000000000..81217e8a4
--- /dev/null
+++ b/docs/files/scaleout/clamp/8.png
Binary files differ
diff --git a/docs/files/scaleout/clamp/9.png b/docs/files/scaleout/clamp/9.png
new file mode 100644
index 000000000..ef8d3200d
--- /dev/null
+++ b/docs/files/scaleout/clamp/9.png
Binary files differ
diff --git a/docs/files/scaleout/dcae_blueprint.yaml b/docs/files/scaleout/dcae_blueprint.yaml
new file mode 100644
index 000000000..c3d1a1db9
--- /dev/null
+++ b/docs/files/scaleout/dcae_blueprint.yaml
@@ -0,0 +1,174 @@
+#
+# ============LICENSE_START====================================================
+# =============================================================================
+# Copyright (c) 2019 AT&T Intellectual Property. All rights reserved.
+# =============================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END======================================================
+
+tosca_definitions_version: cloudify_dsl_1_3
+
+description: >
+ This blueprint deploys/manages the TCA module as a Docker container
+
+imports:
+ - http://www.getcloudify.org/spec/cloudify/3.4/types.yaml
+ - https://nexus.onap.org/service/local/repositories/raw/content/org.onap.dcaegen2.platform.plugins/R4/k8splugin/1.4.13/k8splugin_types.yaml
+# - https://nexus.onap.org/service/local/repositories/raw/content/org.onap.dcaegen2.platform.plugins/R4/dcaepolicyplugin/2.3.0/dcaepolicyplugin_types.yaml
+ - https://nexus.onap.org/service/local/repositories/raw/content/org.onap.dcaegen2.platform.plugins/R4/clamppolicyplugin/1.0.0/clamppolicyplugin_types.yaml
+inputs:
+ aaiEnrichmentHost:
+ type: string
+ default: "aai.onap.svc.cluster.local"
+ aaiEnrichmentPort:
+ type: string
+ default: "8443"
+ enableAAIEnrichment:
+ type: string
+ default: true
+ dmaap_host:
+ type: string
+ default: message-router.onap
+ dmaap_port:
+ type: string
+ default: "3904"
+ enableRedisCaching:
+ type: string
+ default: false
+ redisHosts:
+ type: string
+ default: dcae-redis.onap.svc.cluster.local:6379
+ tag_version:
+ type: string
+ default: "nexus3.onap.org:10001/onap/org.onap.dcaegen2.deployments.tca-cdap-container:1.1.1"
+ consul_host:
+ type: string
+ default: consul-server.onap
+ consul_port:
+ type: string
+ default: "8500"
+ cbs_host:
+ type: string
+ default: "config-binding-service"
+ cbs_port:
+ type: string
+ default: "10000"
+ policy_id:
+ type: string
+ default: "onap.restart.tca"
+ external_port:
+ type: string
+ description: Kubernetes node port on which CDAPgui is exposed
+ default: "32012"
+ policy_model_id:
+ type: string
+ default: "onap.policies.monitoring.cdap.tca.hi.lo.app"
+node_templates:
+ tca_k8s:
+ type: dcae.nodes.ContainerizedServiceComponent
+ relationships:
+ - target: tca_policy
+ type: cloudify.relationships.depends_on
+ properties:
+ service_component_type: 'dcaegen2-analytics-tca'
+ docker_config: {}
+ image:
+ get_input: tag_version
+ log_info:
+ log_directory: "/opt/app/TCAnalytics/logs"
+ application_config:
+ app_config:
+ appDescription: DCAE Analytics Threshold Crossing Alert Application
+ appName: dcae-tca
+ tcaAlertsAbatementTableName: TCAAlertsAbatementTable
+ tcaAlertsAbatementTableTTLSeconds: '1728000'
+ tcaSubscriberOutputStreamName: TCASubscriberOutputStream
+ tcaVESAlertsTableName: TCAVESAlertsTable
+ tcaVESAlertsTableTTLSeconds: '1728000'
+ tcaVESMessageStatusTableName: TCAVESMessageStatusTable
+ tcaVESMessageStatusTableTTLSeconds: '86400'
+ thresholdCalculatorFlowletInstances: '2'
+ app_preferences:
+ aaiEnrichmentHost:
+ get_input: aaiEnrichmentHost
+ aaiEnrichmentIgnoreSSLCertificateErrors: 'true'
+ aaiEnrichmentPortNumber: '8443'
+ aaiEnrichmentProtocol: https
+ aaiEnrichmentUserName: dcae@dcae.onap.org
+ aaiEnrichmentUserPassword: demo123456!
+ aaiVMEnrichmentAPIPath: /aai/v11/search/nodes-query
+ aaiVNFEnrichmentAPIPath: /aai/v11/network/generic-vnfs/generic-vnf
+ enableAAIEnrichment:
+ get_input: enableAAIEnrichment
+ enableRedisCaching:
+ get_input: enableRedisCaching
+ redisHosts:
+ get_input: redisHosts
+ enableAlertCEFFormat: 'false'
+ publisherContentType: application/json
+ publisherHostName:
+ get_input: dmaap_host
+ publisherHostPort:
+ get_input: dmaap_port
+ publisherMaxBatchSize: '1'
+ publisherMaxRecoveryQueueSize: '100000'
+ publisherPollingInterval: '20000'
+ publisherProtocol: http
+ publisherTopicName: unauthenticated.DCAE_CL_OUTPUT
+ subscriberConsumerGroup: OpenDCAE-clamp
+ subscriberConsumerId: c12
+ subscriberContentType: application/json
+ subscriberHostName:
+ get_input: dmaap_host
+ subscriberHostPort:
+ get_input: dmaap_port
+ subscriberMessageLimit: '-1'
+ subscriberPollingInterval: '30000'
+ subscriberProtocol: http
+ subscriberTimeoutMS: '-1'
+ subscriberTopicName: unauthenticated.VES_MEASUREMENT_OUTPUT
+ tca_policy: ''
+ service_component_type: dcaegen2-analytics_tca
+ interfaces:
+ cloudify.interfaces.lifecycle:
+ start:
+ inputs:
+ envs:
+ DMAAPHOST:
+ { get_input: dmaap_host }
+ DMAAPPORT:
+ { get_input: dmaap_port }
+ DMAAPPUBTOPIC: "unauthenticated.DCAE_CL_OUTPUT"
+ DMAAPSUBTOPIC: "unauthenticated.VES_MEASUREMENT_OUTPUT"
+ AAIHOST:
+ { get_input: aaiEnrichmentHost }
+ AAIPORT:
+ { get_input: aaiEnrichmentPort }
+ CONSUL_HOST:
+ { get_input: consul_host }
+ CONSUL_PORT:
+ { get_input: consul_port }
+ CBS_HOST:
+ { get_input: cbs_host }
+ CBS_PORT:
+ { get_input: cbs_port }
+ CONFIG_BINDING_SERVICE: "config_binding_service"
+ ports:
+ - concat: ["11011:", { get_input: external_port }]
+ tca_policy:
+ type: clamp.nodes.policy
+ properties:
+ policy_id:
+ get_input: policy_id
+ policy_model_id:
+ get_input: policy_model_id
diff --git a/docs/files/scaleout/k8s-tca-clamp-policy-05162019.yaml b/docs/files/scaleout/k8s-tca-clamp-policy-05162019.yaml
deleted file mode 100644
index b4e545391..000000000
--- a/docs/files/scaleout/k8s-tca-clamp-policy-05162019.yaml
+++ /dev/null
@@ -1,175 +0,0 @@
-#
-# ============LICENSE_START====================================================
-# =============================================================================
-# Copyright (c) 2019 AT&T Intellectual Property. All rights reserved.
-# =============================================================================
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ============LICENSE_END======================================================
-
-tosca_definitions_version: cloudify_dsl_1_3
-
-description: >
- This blueprint deploys/manages the TCA module as a Docker container
-
-imports:
- - http://www.getcloudify.org/spec/cloudify/3.4/types.yaml
- - https://nexus.onap.org/service/local/repositories/raw/content/org.onap.dcaegen2.platform.plugins/R4/k8splugin/1.4.13/k8splugin_types.yaml
-# - https://nexus.onap.org/service/local/repositories/raw/content/org.onap.dcaegen2.platform.plugins/R4/dcaepolicyplugin/2.3.0/dcaepolicyplugin_types.yaml
- - https://nexus.onap.org/service/local/repositories/raw/content/org.onap.dcaegen2.platform.plugins/R4/clamppolicyplugin/1.0.0/clamppolicyplugin_types.yaml
-inputs:
- aaiEnrichmentHost:
- type: string
- default: "aai.onap.svc.cluster.local"
- aaiEnrichmentPort:
- type: string
- default: "8443"
- enableAAIEnrichment:
- type: string
- default: true
- dmaap_host:
- type: string
- default: message-router.onap
- dmaap_port:
- type: string
- default: "3904"
- enableRedisCaching:
- type: string
- default: false
- redisHosts:
- type: string
- default: dcae-redis.onap.svc.cluster.local:6379
- tag_version:
- type: string
- default: "nexus3.onap.org:10001/onap/org.onap.dcaegen2.deployments.tca-cdap-container:1.1.1"
- consul_host:
- type: string
- default: consul-server.onap
- consul_port:
- type: string
- default: "8500"
- cbs_host:
- type: string
- default: "config-binding-service"
- cbs_port:
- type: string
- default: "10000"
- policy_id:
- type: string
- default: "onap.restart.tca"
- external_port:
- type: string
- description: Kubernetes node port on which CDAPgui is exposed
- default: "32012"
- policy_model_id:
- type: string
- default: "onap.policies.monitoring.cdap.tca.hi.lo.app"
-node_templates:
- tca_k8s:
- type: dcae.nodes.ContainerizedServiceComponent
- relationships:
- - target: tca_policy
- type: cloudify.relationships.depends_on
- properties:
- service_component_type: 'dcaegen2-analytics-tca'
- application_config: {}
- docker_config: {}
- image:
- get_input: tag_version
- log_info:
- log_directory: "/opt/app/TCAnalytics/logs"
- application_config:
- app_config:
- appDescription: DCAE Analytics Threshold Crossing Alert Application
- appName: dcae-tca
- tcaAlertsAbatementTableName: TCAAlertsAbatementTable
- tcaAlertsAbatementTableTTLSeconds: '1728000'
- tcaSubscriberOutputStreamName: TCASubscriberOutputStream
- tcaVESAlertsTableName: TCAVESAlertsTable
- tcaVESAlertsTableTTLSeconds: '1728000'
- tcaVESMessageStatusTableName: TCAVESMessageStatusTable
- tcaVESMessageStatusTableTTLSeconds: '86400'
- thresholdCalculatorFlowletInstances: '2'
- app_preferences:
- aaiEnrichmentHost:
- get_input: aaiEnrichmentHost
- aaiEnrichmentIgnoreSSLCertificateErrors: 'true'
- aaiEnrichmentPortNumber: '8443'
- aaiEnrichmentProtocol: https
- aaiEnrichmentUserName: dcae@dcae.onap.org
- aaiEnrichmentUserPassword: demo123456!
- aaiVMEnrichmentAPIPath: /aai/v11/search/nodes-query
- aaiVNFEnrichmentAPIPath: /aai/v11/network/generic-vnfs/generic-vnf
- enableAAIEnrichment:
- get_input: enableAAIEnrichment
- enableRedisCaching:
- get_input: enableRedisCaching
- redisHosts:
- get_input: redisHosts
- enableAlertCEFFormat: 'false'
- publisherContentType: application/json
- publisherHostName:
- get_input: dmaap_host
- publisherHostPort:
- get_input: dmaap_port
- publisherMaxBatchSize: '1'
- publisherMaxRecoveryQueueSize: '100000'
- publisherPollingInterval: '20000'
- publisherProtocol: http
- publisherTopicName: unauthenticated.DCAE_CL_OUTPUT
- subscriberConsumerGroup: OpenDCAE-clamp
- subscriberConsumerId: c12
- subscriberContentType: application/json
- subscriberHostName:
- get_input: dmaap_host
- subscriberHostPort:
- get_input: dmaap_port
- subscriberMessageLimit: '-1'
- subscriberPollingInterval: '30000'
- subscriberProtocol: http
- subscriberTimeoutMS: '-1'
- subscriberTopicName: unauthenticated.VES_MEASUREMENT_OUTPUT
- tca_policy: ''
- service_component_type: dcaegen2-analytics_tca
- interfaces:
- cloudify.interfaces.lifecycle:
- start:
- inputs:
- envs:
- DMAAPHOST:
- { get_input: dmaap_host }
- DMAAPPORT:
- { get_input: dmaap_port }
- DMAAPPUBTOPIC: "unauthenticated.DCAE_CL_OUTPUT"
- DMAAPSUBTOPIC: "unauthenticated.VES_MEASUREMENT_OUTPUT"
- AAIHOST:
- { get_input: aaiEnrichmentHost }
- AAIPORT:
- { get_input: aaiEnrichmentPort }
- CONSUL_HOST:
- { get_input: consul_host }
- CONSUL_PORT:
- { get_input: consul_port }
- CBS_HOST:
- { get_input: cbs_host }
- CBS_PORT:
- { get_input: cbs_port }
- CONFIG_BINDING_SERVICE: "config_binding_service"
- ports:
- - concat: ["11011:", { get_input: external_port }]
- tca_policy:
- type: clamp.nodes.policy
- properties:
- policy_id:
- get_input: policy_id
- policy_model_id:
- get_input: policy_model_id \ No newline at end of file
diff --git a/docs/files/scaleout/latest-tca-guilin.yaml b/docs/files/scaleout/latest-tca-guilin.yaml
new file mode 100644
index 000000000..e7d967a26
--- /dev/null
+++ b/docs/files/scaleout/latest-tca-guilin.yaml
@@ -0,0 +1,141 @@
+# ============LICENSE_START====================================================
+# =============================================================================
+# Copyright (C) 2019-2020 AT&T Intellectual Property. All rights reserved.
+# =============================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END======================================================
+#k8s-tca-gen2-v3.yaml
+
+tosca_definitions_version: cloudify_dsl_1_3
+imports:
+ - https://www.getcloudify.org/spec/cloudify/4.5.5/types.yaml
+ - plugin:k8splugin?version=3.4.2
+ - plugin:clamppolicyplugin?version=1.1.0
+inputs:
+ service_name:
+ type: string
+ default: 'dcae-tcagen2'
+ log_directory:
+ type: string
+ default: "/opt/logs/dcae-analytics-tca"
+ replicas:
+ type: integer
+ description: number of instances
+ default: 1
+ spring.data.mongodb.uri:
+ type: string
+ default: "mongodb://dcae-mongohost/dcae-tcagen2"
+ tag_version:
+ type: string
+ default: "nexus3.onap.org:10001/onap/org.onap.dcaegen2.analytics.tca-gen2.dcae-analytics-tca-web:1.2.1"
+ tca.aai.password:
+ type: string
+ default: "DCAE"
+ tca.aai.url:
+ type: string
+ default: "http://aai.onap.svc.cluster.local"
+ tca.aai.username:
+ type: string
+ default: "DCAE"
+ tca_handle_in_subscribe_url:
+ type: string
+ default: "http://message-router.onap.svc.cluster.local:3904/events/unauthenticated.VES_MEASUREMENT_OUTPUT"
+ tca_handle_out_publish_url:
+ type: string
+ default: "http://message-router.onap.svc.cluster.local:3904/events/unauthenticated.DCAE_CL_OUTPUT"
+ tca_consumer_group:
+ type: string
+ default: "cg1"
+ policy_model_id:
+ type: string
+ default: "onap.policies.monitoring.tcagen2"
+ policy_id:
+ type: string
+ default: "onap.restart.tca"
+node_templates:
+ docker.tca:
+ type: dcae.nodes.ContainerizedServiceComponent
+ relationships:
+ - target: tcagen2_policy
+ type: cloudify.relationships.depends_on
+ interfaces:
+ cloudify.interfaces.lifecycle:
+ start:
+ inputs:
+ ports:
+ - concat: ["9091:", "0"]
+ properties:
+ application_config:
+ service_calls: []
+ streams_publishes:
+ tca_handle_out:
+ dmaap_info:
+ topic_url:
+ get_input: tca_handle_out_publish_url
+ type: message_router
+ streams_subscribes:
+ tca_handle_in:
+ dmaap_info:
+ topic_url:
+ get_input: tca_handle_in_subscribe_url
+ type: message_router
+ spring.data.mongodb.uri:
+ get_input: spring.data.mongodb.uri
+ streams_subscribes.tca_handle_in.consumer_group:
+ get_input: tca_consumer_group
+ streams_subscribes.tca_handle_in.consumer_ids[0]: c0
+ streams_subscribes.tca_handle_in.consumer_ids[1]: c1
+ streams_subscribes.tca_handle_in.message_limit: 50000
+ streams_subscribes.tca_handle_in.polling.auto_adjusting.max: 60000
+ streams_subscribes.tca_handle_in.polling.auto_adjusting.min: 30000
+ streams_subscribes.tca_handle_in.polling.auto_adjusting.step_down: 30000
+ streams_subscribes.tca_handle_in.polling.auto_adjusting.step_up: 10000
+ streams_subscribes.tca_handle_in.polling.fixed_rate: 0
+ streams_subscribes.tca_handle_in.timeout: -1
+ tca.aai.enable_enrichment: true
+ tca.aai.generic_vnf_path: aai/v11/network/generic-vnfs/generic-vnf
+ tca.aai.node_query_path: aai/v11/search/nodes-query
+ tca.aai.password:
+ get_input: tca.aai.password
+ tca.aai.url:
+ get_input: tca.aai.url
+ tca.aai.username:
+ get_input: tca.aai.username
+ tca.policy: '{"domain":"measurementsForVfScaling","metricsPerEventName":[{"eventName":"vFirewallBroadcastPackets","controlLoopSchemaType":"VM","policyScope":"DCAE","policyName":"DCAE.Config_tca-hi-lo","policyVersion":"v0.0.1","thresholds":[{"closedLoopControlName":"ControlLoop-vFirewall-d0a1dfc6-94f5-4fd4-a5b5-4630b438850a","version":"1.0.2","fieldPath":"$.event.measurementsForVfScalingFields.vNicPerformanceArray[*].receivedTotalPacketsDelta","thresholdValue":300,"direction":"LESS_OR_EQUAL","severity":"MAJOR","closedLoopEventStatus":"ONSET"},{"closedLoopControlName":"ControlLoop-vFirewall-d0a1dfc6-94f5-4fd4-a5b5-4630b438850a","version":"1.0.2","fieldPath":"$.event.measurementsForVfScalingFields.vNicPerformanceArray[*].receivedTotalPacketsDelta","thresholdValue":700,"direction":"GREATER_OR_EQUAL","severity":"CRITICAL","closedLoopEventStatus":"ONSET"}]},{"eventName":"vLoadBalancer","controlLoopSchemaType":"VM","policyScope":"DCAE","policyName":"DCAE.Config_tca-hi-lo","policyVersion":"v0.0.1","thresholds":[{"closedLoopControlName":"ControlLoop-vDNS-6f37f56d-a87d-4b85-b6a9-cc953cf779b3","version":"1.0.2","fieldPath":"$.event.measurementsForVfScalingFields.vNicPerformanceArray[*].receivedTotalPacketsDelta","thresholdValue":300,"direction":"GREATER_OR_EQUAL","severity":"CRITICAL","closedLoopEventStatus":"ONSET"}]},{"eventName":"Measurement_vGMUX","controlLoopSchemaType":"VNF","policyScope":"DCAE","policyName":"DCAE.Config_tca-hi-lo","policyVersion":"v0.0.1","thresholds":[{"closedLoopControlName":"ControlLoop-vCPE-48f0c2c3-a172-4192-9ae3-052274181b6e","version":"1.0.2","fieldPath":"$.event.measurementsForVfScalingFields.additionalMeasurements[*].arrayOfFields[0].value","thresholdValue":0,"direction":"EQUAL","severity":"MAJOR","closedLoopEventStatus":"ABATED"},{"closedLoopControlName":"ControlLoop-vCPE-48f0c2c3-a172-4192-9ae3-052274181b6e","version":"1.0.2","fieldPath":"$.event.measurementsForVfScalingFields.additionalMeasurements[*].arrayOfFields[0].value","thresholdValue":0,"direction":"GREATER","severity":"CRITICAL","closedLoopEventStatus":"ONSET"}]}]}'
+ tca.processing_batch_size: 10000
+ tca.enable_abatement: true
+ tca.enable_ecomp_logging: true
+ docker_config:
+ healthcheck:
+ endpoint: /actuator/health
+ interval: 30s
+ timeout: 10s
+ type: http
+ image:
+ get_input: tag_version
+ log_info:
+ log_directory:
+ get_input: log_directory
+ tls_info:
+ use_tls: true
+ cert_directory: '/etc/tca-gen2/ssl'
+ replicas:
+ get_input: replicas
+ service_component_type: { get_input: service_name }
+ tcagen2_policy:
+ type: clamp.nodes.policy
+ properties:
+ policy_id:
+ get_input: policy_id
+ policy_model_id:
+ get_input: policy_model_id
diff --git a/docs/files/simulators/NF-Simulator.png b/docs/files/simulators/NF-Simulator.png
new file mode 100644
index 000000000..b52aaa5ff
--- /dev/null
+++ b/docs/files/simulators/NF-Simulator.png
Binary files differ
diff --git a/docs/files/softwareUpgrade/APIDecisionTree.png b/docs/files/softwareUpgrade/APIDecisionTree.png
new file mode 100644
index 000000000..dff8d38fd
--- /dev/null
+++ b/docs/files/softwareUpgrade/APIDecisionTree.png
Binary files differ
diff --git a/docs/files/softwareUpgrade/DirectNetconfYangInterface.png b/docs/files/softwareUpgrade/DirectNetconfYangInterface.png
new file mode 100644
index 000000000..4da660793
--- /dev/null
+++ b/docs/files/softwareUpgrade/DirectNetconfYangInterface.png
Binary files differ
diff --git a/docs/files/softwareUpgrade/OnboardingCsar.png b/docs/files/softwareUpgrade/OnboardingCsar.png
new file mode 100644
index 000000000..0a5ba2bfd
--- /dev/null
+++ b/docs/files/softwareUpgrade/OnboardingCsar.png
Binary files differ
diff --git a/docs/files/softwareUpgrade/SWUPWorkflow.png b/docs/files/softwareUpgrade/SWUPWorkflow.png
new file mode 100644
index 000000000..6455a5ac9
--- /dev/null
+++ b/docs/files/softwareUpgrade/SWUPWorkflow.png
Binary files differ
diff --git a/docs/files/softwareUpgrade/SchemaUpdate.png b/docs/files/softwareUpgrade/SchemaUpdate.png
new file mode 100644
index 000000000..25884bdcb
--- /dev/null
+++ b/docs/files/softwareUpgrade/SchemaUpdate.png
Binary files differ
diff --git a/docs/files/softwareUpgrade/ServiceLevelUpgrade.png b/docs/files/softwareUpgrade/ServiceLevelUpgrade.png
new file mode 100644
index 000000000..86d856765
--- /dev/null
+++ b/docs/files/softwareUpgrade/ServiceLevelUpgrade.png
Binary files differ
diff --git a/docs/files/softwareUpgrade/ServiceLevelWorkflow.png b/docs/files/softwareUpgrade/ServiceLevelWorkflow.png
new file mode 100644
index 000000000..ea37d1603
--- /dev/null
+++ b/docs/files/softwareUpgrade/ServiceLevelWorkflow.png
Binary files differ
diff --git a/docs/files/softwareUpgrade/WorkflowView.png b/docs/files/softwareUpgrade/WorkflowView.png
new file mode 100644
index 000000000..79a28f1db
--- /dev/null
+++ b/docs/files/softwareUpgrade/WorkflowView.png
Binary files differ
diff --git a/docs/files/softwareUpgrade/serviceModelVersions.png b/docs/files/softwareUpgrade/serviceModelVersions.png
new file mode 100644
index 000000000..a918ffa00
--- /dev/null
+++ b/docs/files/softwareUpgrade/serviceModelVersions.png
Binary files differ
diff --git a/docs/files/softwareUpgrade/verifyPNF.png b/docs/files/softwareUpgrade/verifyPNF.png
new file mode 100644
index 000000000..f0aacec9f
--- /dev/null
+++ b/docs/files/softwareUpgrade/verifyPNF.png
Binary files differ
diff --git a/docs/files/softwareUpgrade/workflowList.png b/docs/files/softwareUpgrade/workflowList.png
new file mode 100644
index 000000000..339907ac5
--- /dev/null
+++ b/docs/files/softwareUpgrade/workflowList.png
Binary files differ
diff --git a/docs/files/tests/test-basic-cnf.png b/docs/files/tests/test-basic-cnf.png
new file mode 100644
index 000000000..87701587e
--- /dev/null
+++ b/docs/files/tests/test-basic-cnf.png
Binary files differ
diff --git a/docs/files/tests/test-certif.png b/docs/files/tests/test-certif.png
new file mode 100644
index 000000000..69d6264c2
--- /dev/null
+++ b/docs/files/tests/test-certif.png
Binary files differ
diff --git a/docs/files/tests/test-dashboard.png b/docs/files/tests/test-dashboard.png
new file mode 100644
index 000000000..afd4eee49
--- /dev/null
+++ b/docs/files/tests/test-dashboard.png
Binary files differ
diff --git a/docs/files/tests/test-onap-helm.png b/docs/files/tests/test-onap-helm.png
new file mode 100644
index 000000000..e5f5b5366
--- /dev/null
+++ b/docs/files/tests/test-onap-helm.png
Binary files differ
diff --git a/docs/files/tests/test-onap-k8s.png b/docs/files/tests/test-onap-k8s.png
new file mode 100644
index 000000000..69693f7f5
--- /dev/null
+++ b/docs/files/tests/test-onap-k8s.png
Binary files differ
diff --git a/docs/files/vFW_CNF_CDS/Instantiation_topology.png b/docs/files/vFW_CNF_CDS/Instantiation_topology.png
new file mode 100755
index 000000000..85c50599f
--- /dev/null
+++ b/docs/files/vFW_CNF_CDS/Instantiation_topology.png
Binary files differ
diff --git a/docs/files/vFW_CNF_CDS/Native_Helm_Flow.png b/docs/files/vFW_CNF_CDS/Native_Helm_Flow.png
new file mode 100644
index 000000000..7e896aa8c
--- /dev/null
+++ b/docs/files/vFW_CNF_CDS/Native_Helm_Flow.png
Binary files differ
diff --git a/docs/files/vFW_CNF_CDS/healthcheck.png b/docs/files/vFW_CNF_CDS/healthcheck.png
new file mode 100644
index 000000000..693e9e74c
--- /dev/null
+++ b/docs/files/vFW_CNF_CDS/healthcheck.png
Binary files differ
diff --git a/docs/files/vFW_CNF_CDS/helm-overrides-steps.png b/docs/files/vFW_CNF_CDS/helm-overrides-steps.png
new file mode 100644
index 000000000..ad9355344
--- /dev/null
+++ b/docs/files/vFW_CNF_CDS/helm-overrides-steps.png
Binary files differ
diff --git a/docs/files/vFW_CNF_CDS/helm-overrides.png b/docs/files/vFW_CNF_CDS/helm-overrides.png
new file mode 100644
index 000000000..93a4a34c5
--- /dev/null
+++ b/docs/files/vFW_CNF_CDS/helm-overrides.png
Binary files differ
diff --git a/docs/files/vFW_CNF_CDS/k8s-resources-response.json b/docs/files/vFW_CNF_CDS/k8s-resources-response.json
new file mode 100644
index 000000000..3d0f91344
--- /dev/null
+++ b/docs/files/vFW_CNF_CDS/k8s-resources-response.json
@@ -0,0 +1,843 @@
+{
+ "k8s-resource": [
+ {
+ "id": "e5a4eca381ade9439806cf426eb7a07237fe9a8c9088038bd0c8135f728fabe2",
+ "name": "vfw-1-vfw",
+ "group": "apps",
+ "version": "v1",
+ "kind": "Deployment",
+ "namespace": "vfirewall",
+ "labels": [
+ "chart=vfw",
+ "k8splugin.io/rb-instance-id=brave_brattain",
+ "release=vfw-1-vfw",
+ "vf-module-name=vfw-1-vfw",
+ "vnf-name=test-vnf"
+ ],
+ "selflink": "http://so-cnf-adapter:8090/api/cnf-adapter/v1/instance/brave_brattain/query?ApiVersion=v1&Kind=Deployment&Name=vfw-1-vfw&Namespace=vfirewall",
+ "data-owner": "CnfAdapter",
+ "data-source": "K8sPlugin",
+ "data-source-version": "35940985",
+ "resource-version": "1635943384048",
+ "relationship-list": {
+ "relationship": [
+ {
+ "related-to": "generic-vnf",
+ "relationship-label": "tosca.relationships.HostedOn",
+ "related-link": "/aai/v24/network/generic-vnfs/generic-vnf/d4af22f0-70e6-47ac-8d5b-4d645fc25757",
+ "relationship-data": [
+ {
+ "relationship-key": "generic-vnf.vnf-id",
+ "relationship-value": "d4af22f0-70e6-47ac-8d5b-4d645fc25757"
+ }
+ ],
+ "related-to-property": [
+ {
+ "property-key": "generic-vnf.vnf-name",
+ "property-value": "VF_vfw_k8s_demo_CNF_LR_1"
+ }
+ ]
+ },
+ {
+ "related-to": "vf-module",
+ "relationship-label": "org.onap.relationships.inventory.Uses",
+ "related-link": "/aai/v24/network/generic-vnfs/generic-vnf/d4af22f0-70e6-47ac-8d5b-4d645fc25757/vf-modules/vf-module/d56c54b9-40cc-4b7a-abce-50454571e39d",
+ "relationship-data": [
+ {
+ "relationship-key": "generic-vnf.vnf-id",
+ "relationship-value": "d4af22f0-70e6-47ac-8d5b-4d645fc25757"
+ },
+ {
+ "relationship-key": "vf-module.vf-module-id",
+ "relationship-value": "d56c54b9-40cc-4b7a-abce-50454571e39d"
+ }
+ ]
+ }
+ ]
+ }
+ },
+ {
+ "id": "5b43d0c6e6b3ebb3d279dccbdad024a02995d0e66c2858c866ba9295c34cde62",
+ "name": "vfw-1-vsn-5f45887f5f-khqdd",
+ "group": "",
+ "version": "v1",
+ "kind": "Pod",
+ "namespace": "vfirewall",
+ "labels": [
+ "chart=vsn",
+ "k8splugin.io/rb-instance-id=sharp_torvalds",
+ "pod-template-hash=5f45887f5f",
+ "release=vfw-1-vsn",
+ "vf-module-name=vfw-1-vsn",
+ "vnf-name=test-vnf"
+ ],
+ "selflink": "http://so-cnf-adapter:8090/api/cnf-adapter/v1/instance/sharp_torvalds/query?ApiVersion=v1&Kind=Pod&Name=vfw-1-vsn-5f45887f5f-khqdd&Namespace=vfirewall",
+ "data-owner": "CnfAdapter",
+ "data-source": "K8sPlugin",
+ "data-source-version": "35941084",
+ "resource-version": "1635943399747",
+ "relationship-list": {
+ "relationship": [
+ {
+ "related-to": "generic-vnf",
+ "relationship-label": "tosca.relationships.HostedOn",
+ "related-link": "/aai/v24/network/generic-vnfs/generic-vnf/d4af22f0-70e6-47ac-8d5b-4d645fc25757",
+ "relationship-data": [
+ {
+ "relationship-key": "generic-vnf.vnf-id",
+ "relationship-value": "d4af22f0-70e6-47ac-8d5b-4d645fc25757"
+ }
+ ],
+ "related-to-property": [
+ {
+ "property-key": "generic-vnf.vnf-name",
+ "property-value": "VF_vfw_k8s_demo_CNF_LR_1"
+ }
+ ]
+ },
+ {
+ "related-to": "vf-module",
+ "relationship-label": "org.onap.relationships.inventory.Uses",
+ "related-link": "/aai/v24/network/generic-vnfs/generic-vnf/d4af22f0-70e6-47ac-8d5b-4d645fc25757/vf-modules/vf-module/56f3d02b-5a32-4a97-9e7b-d3c0094c07e8",
+ "relationship-data": [
+ {
+ "relationship-key": "generic-vnf.vnf-id",
+ "relationship-value": "d4af22f0-70e6-47ac-8d5b-4d645fc25757"
+ },
+ {
+ "relationship-key": "vf-module.vf-module-id",
+ "relationship-value": "56f3d02b-5a32-4a97-9e7b-d3c0094c07e8"
+ }
+ ]
+ }
+ ]
+ }
+ },
+ {
+ "id": "afce111381db5b5e61d12726d96e4d08b5dc1c7fdb5b069a24bb68c36314d45f",
+ "name": "kud-lr-onap-nf-20211103t124217642438z-ovn-nat",
+ "group": "k8s.cni.cncf.io",
+ "version": "v1",
+ "kind": "NetworkAttachmentDefinition",
+ "namespace": "vfirewall",
+ "labels": [
+ "chart=base_template",
+ "k8splugin.io/rb-instance-id=nifty_lichterman",
+ "release=vfw-1-base",
+ "vnf-name=test-vnf"
+ ],
+ "selflink": "http://so-cnf-adapter:8090/api/cnf-adapter/v1/instance/nifty_lichterman/query?ApiVersion=v1&Kind=NetworkAttachmentDefinition&Name=kud-lr-onap-nf-20211103t124217642438z-ovn-nat&Namespace=vfirewall",
+ "data-owner": "CnfAdapter",
+ "data-source": "K8sPlugin",
+ "data-source-version": "35940925",
+ "resource-version": "1635943376139",
+ "relationship-list": {
+ "relationship": [
+ {
+ "related-to": "generic-vnf",
+ "relationship-label": "tosca.relationships.HostedOn",
+ "related-link": "/aai/v24/network/generic-vnfs/generic-vnf/d4af22f0-70e6-47ac-8d5b-4d645fc25757",
+ "relationship-data": [
+ {
+ "relationship-key": "generic-vnf.vnf-id",
+ "relationship-value": "d4af22f0-70e6-47ac-8d5b-4d645fc25757"
+ }
+ ],
+ "related-to-property": [
+ {
+ "property-key": "generic-vnf.vnf-name",
+ "property-value": "VF_vfw_k8s_demo_CNF_LR_1"
+ }
+ ]
+ },
+ {
+ "related-to": "vf-module",
+ "relationship-label": "org.onap.relationships.inventory.Uses",
+ "related-link": "/aai/v24/network/generic-vnfs/generic-vnf/d4af22f0-70e6-47ac-8d5b-4d645fc25757/vf-modules/vf-module/abb282c8-c932-45dc-9c62-01938eab32fa",
+ "relationship-data": [
+ {
+ "relationship-key": "generic-vnf.vnf-id",
+ "relationship-value": "d4af22f0-70e6-47ac-8d5b-4d645fc25757"
+ },
+ {
+ "relationship-key": "vf-module.vf-module-id",
+ "relationship-value": "abb282c8-c932-45dc-9c62-01938eab32fa"
+ }
+ ]
+ }
+ ]
+ }
+ },
+ {
+ "id": "eaa4c964f1e0559cb061cd543c381e8067fc19524195e04ca25b539fdde19712",
+ "name": "kud-lr-onap-nf-20211103t124217642438z-unprotected-network",
+ "group": "k8s.plugin.opnfv.org",
+ "version": "v1alpha1",
+ "kind": "Network",
+ "namespace": "vfirewall",
+ "labels": [
+ "chart=base_template",
+ "k8splugin.io/rb-instance-id=nifty_lichterman",
+ "release=vfw-1-base",
+ "vnf-name=test-vnf"
+ ],
+ "selflink": "http://so-cnf-adapter:8090/api/cnf-adapter/v1/instance/nifty_lichterman/query?ApiVersion=v1alpha1&Kind=Network&Name=kud-lr-onap-nf-20211103t124217642438z-unprotected-network&Namespace=vfirewall",
+ "data-owner": "CnfAdapter",
+ "data-source": "K8sPlugin",
+ "data-source-version": "35940922",
+ "resource-version": "1635943376139",
+ "relationship-list": {
+ "relationship": [
+ {
+ "related-to": "generic-vnf",
+ "relationship-label": "tosca.relationships.HostedOn",
+ "related-link": "/aai/v24/network/generic-vnfs/generic-vnf/d4af22f0-70e6-47ac-8d5b-4d645fc25757",
+ "relationship-data": [
+ {
+ "relationship-key": "generic-vnf.vnf-id",
+ "relationship-value": "d4af22f0-70e6-47ac-8d5b-4d645fc25757"
+ }
+ ],
+ "related-to-property": [
+ {
+ "property-key": "generic-vnf.vnf-name",
+ "property-value": "VF_vfw_k8s_demo_CNF_LR_1"
+ }
+ ]
+ },
+ {
+ "related-to": "vf-module",
+ "relationship-label": "org.onap.relationships.inventory.Uses",
+ "related-link": "/aai/v24/network/generic-vnfs/generic-vnf/d4af22f0-70e6-47ac-8d5b-4d645fc25757/vf-modules/vf-module/abb282c8-c932-45dc-9c62-01938eab32fa",
+ "relationship-data": [
+ {
+ "relationship-key": "generic-vnf.vnf-id",
+ "relationship-value": "d4af22f0-70e6-47ac-8d5b-4d645fc25757"
+ },
+ {
+ "relationship-key": "vf-module.vf-module-id",
+ "relationship-value": "abb282c8-c932-45dc-9c62-01938eab32fa"
+ }
+ ]
+ }
+ ]
+ }
+ },
+ {
+ "id": "6e5fc95d95eee51f818cd3fbd8fbd40bc29d9fe4c587a1e2840ef4d17fb2fac6",
+ "name": "vfw-1-vsn-configmap",
+ "group": "",
+ "version": "v1",
+ "kind": "ConfigMap",
+ "namespace": "vfirewall",
+ "labels": [
+ "chart=vsn",
+ "k8splugin.io/rb-instance-id=sharp_torvalds",
+ "release=vfw-1-vsn",
+ "vf-module-name=vfw-1-vsn",
+ "vnf-name=test-vnf"
+ ],
+ "selflink": "http://so-cnf-adapter:8090/api/cnf-adapter/v1/instance/sharp_torvalds/query?ApiVersion=v1&Kind=ConfigMap&Name=vfw-1-vsn-configmap&Namespace=vfirewall",
+ "data-owner": "CnfAdapter",
+ "data-source": "K8sPlugin",
+ "data-source-version": "35941065",
+ "resource-version": "1635943399747",
+ "relationship-list": {
+ "relationship": [
+ {
+ "related-to": "generic-vnf",
+ "relationship-label": "tosca.relationships.HostedOn",
+ "related-link": "/aai/v24/network/generic-vnfs/generic-vnf/d4af22f0-70e6-47ac-8d5b-4d645fc25757",
+ "relationship-data": [
+ {
+ "relationship-key": "generic-vnf.vnf-id",
+ "relationship-value": "d4af22f0-70e6-47ac-8d5b-4d645fc25757"
+ }
+ ],
+ "related-to-property": [
+ {
+ "property-key": "generic-vnf.vnf-name",
+ "property-value": "VF_vfw_k8s_demo_CNF_LR_1"
+ }
+ ]
+ },
+ {
+ "related-to": "vf-module",
+ "relationship-label": "org.onap.relationships.inventory.Uses",
+ "related-link": "/aai/v24/network/generic-vnfs/generic-vnf/d4af22f0-70e6-47ac-8d5b-4d645fc25757/vf-modules/vf-module/56f3d02b-5a32-4a97-9e7b-d3c0094c07e8",
+ "relationship-data": [
+ {
+ "relationship-key": "generic-vnf.vnf-id",
+ "relationship-value": "d4af22f0-70e6-47ac-8d5b-4d645fc25757"
+ },
+ {
+ "relationship-key": "vf-module.vf-module-id",
+ "relationship-value": "56f3d02b-5a32-4a97-9e7b-d3c0094c07e8"
+ }
+ ]
+ }
+ ]
+ }
+ },
+ {
+ "id": "9e587e69bfe74762f66c59c5cb2ed41ca37bafa59b454b4e1432f4c61f0361f7",
+ "name": "kud-lr-onap-nf-20211103t124217642438z-management-network",
+ "group": "k8s.plugin.opnfv.org",
+ "version": "v1alpha1",
+ "kind": "Network",
+ "namespace": "vfirewall",
+ "labels": [
+ "chart=base_template",
+ "k8splugin.io/rb-instance-id=nifty_lichterman",
+ "release=vfw-1-base",
+ "vnf-name=test-vnf"
+ ],
+ "selflink": "http://so-cnf-adapter:8090/api/cnf-adapter/v1/instance/nifty_lichterman/query?ApiVersion=v1alpha1&Kind=Network&Name=kud-lr-onap-nf-20211103t124217642438z-management-network&Namespace=vfirewall",
+ "data-owner": "CnfAdapter",
+ "data-source": "K8sPlugin",
+ "data-source-version": "35940926",
+ "resource-version": "1635943376139",
+ "relationship-list": {
+ "relationship": [
+ {
+ "related-to": "generic-vnf",
+ "relationship-label": "tosca.relationships.HostedOn",
+ "related-link": "/aai/v24/network/generic-vnfs/generic-vnf/d4af22f0-70e6-47ac-8d5b-4d645fc25757",
+ "relationship-data": [
+ {
+ "relationship-key": "generic-vnf.vnf-id",
+ "relationship-value": "d4af22f0-70e6-47ac-8d5b-4d645fc25757"
+ }
+ ],
+ "related-to-property": [
+ {
+ "property-key": "generic-vnf.vnf-name",
+ "property-value": "VF_vfw_k8s_demo_CNF_LR_1"
+ }
+ ]
+ },
+ {
+ "related-to": "vf-module",
+ "relationship-label": "org.onap.relationships.inventory.Uses",
+ "related-link": "/aai/v24/network/generic-vnfs/generic-vnf/d4af22f0-70e6-47ac-8d5b-4d645fc25757/vf-modules/vf-module/abb282c8-c932-45dc-9c62-01938eab32fa",
+ "relationship-data": [
+ {
+ "relationship-key": "generic-vnf.vnf-id",
+ "relationship-value": "d4af22f0-70e6-47ac-8d5b-4d645fc25757"
+ },
+ {
+ "relationship-key": "vf-module.vf-module-id",
+ "relationship-value": "abb282c8-c932-45dc-9c62-01938eab32fa"
+ }
+ ]
+ }
+ ]
+ }
+ },
+ {
+ "id": "20413fe7d44e703f3b498a442184b7e95a1e52afccf68cdc5785bfb1855a70c9",
+ "name": "vfw-1-vfw-configmap",
+ "group": "",
+ "version": "v1",
+ "kind": "ConfigMap",
+ "namespace": "vfirewall",
+ "labels": [
+ "chart=vfw",
+ "k8splugin.io/rb-instance-id=brave_brattain",
+ "release=vfw-1-vfw",
+ "vf-module-name=vfw-1-vfw",
+ "vnf-name=test-vnf"
+ ],
+ "selflink": "http://so-cnf-adapter:8090/api/cnf-adapter/v1/instance/brave_brattain/query?ApiVersion=v1&Kind=ConfigMap&Name=vfw-1-vfw-configmap&Namespace=vfirewall",
+ "data-owner": "CnfAdapter",
+ "data-source": "K8sPlugin",
+ "data-source-version": "35940973",
+ "resource-version": "1635943384048",
+ "relationship-list": {
+ "relationship": [
+ {
+ "related-to": "generic-vnf",
+ "relationship-label": "tosca.relationships.HostedOn",
+ "related-link": "/aai/v24/network/generic-vnfs/generic-vnf/d4af22f0-70e6-47ac-8d5b-4d645fc25757",
+ "relationship-data": [
+ {
+ "relationship-key": "generic-vnf.vnf-id",
+ "relationship-value": "d4af22f0-70e6-47ac-8d5b-4d645fc25757"
+ }
+ ],
+ "related-to-property": [
+ {
+ "property-key": "generic-vnf.vnf-name",
+ "property-value": "VF_vfw_k8s_demo_CNF_LR_1"
+ }
+ ]
+ },
+ {
+ "related-to": "vf-module",
+ "relationship-label": "org.onap.relationships.inventory.Uses",
+ "related-link": "/aai/v24/network/generic-vnfs/generic-vnf/d4af22f0-70e6-47ac-8d5b-4d645fc25757/vf-modules/vf-module/d56c54b9-40cc-4b7a-abce-50454571e39d",
+ "relationship-data": [
+ {
+ "relationship-key": "generic-vnf.vnf-id",
+ "relationship-value": "d4af22f0-70e6-47ac-8d5b-4d645fc25757"
+ },
+ {
+ "relationship-key": "vf-module.vf-module-id",
+ "relationship-value": "d56c54b9-40cc-4b7a-abce-50454571e39d"
+ }
+ ]
+ }
+ ]
+ }
+ },
+ {
+ "id": "e73732351195c8c10d28413ddff1d968bd53b0b0e395c24b3b0fcd39f46ea730",
+ "name": "vfw-1-vpkg-mgmt",
+ "group": "",
+ "version": "v1",
+ "kind": "Service",
+ "namespace": "vfirewall",
+ "labels": [
+ "chart=vpkg",
+ "k8splugin.io/rb-instance-id=dazzling_nightingale",
+ "release=vfw-1-vpkg",
+ "vf-module-name=vfw-1-vpkg",
+ "vnf-name=test-vnf"
+ ],
+ "selflink": "http://so-cnf-adapter:8090/api/cnf-adapter/v1/instance/dazzling_nightingale/query?ApiVersion=v1&Kind=Service&Name=vfw-1-vpkg-mgmt&Namespace=vfirewall",
+ "data-owner": "CnfAdapter",
+ "data-source": "K8sPlugin",
+ "data-source-version": "35941017",
+ "resource-version": "1635943391652",
+ "relationship-list": {
+ "relationship": [
+ {
+ "related-to": "generic-vnf",
+ "relationship-label": "tosca.relationships.HostedOn",
+ "related-link": "/aai/v24/network/generic-vnfs/generic-vnf/d4af22f0-70e6-47ac-8d5b-4d645fc25757",
+ "relationship-data": [
+ {
+ "relationship-key": "generic-vnf.vnf-id",
+ "relationship-value": "d4af22f0-70e6-47ac-8d5b-4d645fc25757"
+ }
+ ],
+ "related-to-property": [
+ {
+ "property-key": "generic-vnf.vnf-name",
+ "property-value": "VF_vfw_k8s_demo_CNF_LR_1"
+ }
+ ]
+ },
+ {
+ "related-to": "vf-module",
+ "relationship-label": "org.onap.relationships.inventory.Uses",
+ "related-link": "/aai/v24/network/generic-vnfs/generic-vnf/d4af22f0-70e6-47ac-8d5b-4d645fc25757/vf-modules/vf-module/314795d7-6005-4462-a9fe-7006538e3ff9",
+ "relationship-data": [
+ {
+ "relationship-key": "generic-vnf.vnf-id",
+ "relationship-value": "d4af22f0-70e6-47ac-8d5b-4d645fc25757"
+ },
+ {
+ "relationship-key": "vf-module.vf-module-id",
+ "relationship-value": "314795d7-6005-4462-a9fe-7006538e3ff9"
+ }
+ ]
+ }
+ ]
+ }
+ },
+ {
+ "id": "f65235da9cb098588b2db0c9e7da1ccb00954268fba6bd621bb9ef0b48cd717f",
+ "name": "vfw-1-vpkg",
+ "group": "apps",
+ "version": "v1",
+ "kind": "Deployment",
+ "namespace": "vfirewall",
+ "labels": [
+ "chart=vpkg",
+ "k8splugin.io/rb-instance-id=dazzling_nightingale",
+ "release=vfw-1-vpkg",
+ "vf-module-name=vfw-1-vpkg",
+ "vnf-name=test-vnf"
+ ],
+ "selflink": "http://so-cnf-adapter:8090/api/cnf-adapter/v1/instance/dazzling_nightingale/query?ApiVersion=v1&Kind=Deployment&Name=vfw-1-vpkg&Namespace=vfirewall",
+ "data-owner": "CnfAdapter",
+ "data-source": "K8sPlugin",
+ "data-source-version": "35941032",
+ "resource-version": "1635943391652",
+ "relationship-list": {
+ "relationship": [
+ {
+ "related-to": "generic-vnf",
+ "relationship-label": "tosca.relationships.HostedOn",
+ "related-link": "/aai/v24/network/generic-vnfs/generic-vnf/d4af22f0-70e6-47ac-8d5b-4d645fc25757",
+ "relationship-data": [
+ {
+ "relationship-key": "generic-vnf.vnf-id",
+ "relationship-value": "d4af22f0-70e6-47ac-8d5b-4d645fc25757"
+ }
+ ],
+ "related-to-property": [
+ {
+ "property-key": "generic-vnf.vnf-name",
+ "property-value": "VF_vfw_k8s_demo_CNF_LR_1"
+ }
+ ]
+ },
+ {
+ "related-to": "vf-module",
+ "relationship-label": "org.onap.relationships.inventory.Uses",
+ "related-link": "/aai/v24/network/generic-vnfs/generic-vnf/d4af22f0-70e6-47ac-8d5b-4d645fc25757/vf-modules/vf-module/314795d7-6005-4462-a9fe-7006538e3ff9",
+ "relationship-data": [
+ {
+ "relationship-key": "generic-vnf.vnf-id",
+ "relationship-value": "d4af22f0-70e6-47ac-8d5b-4d645fc25757"
+ },
+ {
+ "relationship-key": "vf-module.vf-module-id",
+ "relationship-value": "314795d7-6005-4462-a9fe-7006538e3ff9"
+ }
+ ]
+ }
+ ]
+ }
+ },
+ {
+ "id": "87cbdb83bf436703bdb9823e07e1498a7b3ec7fb12ba14193aadd4630649e0ae",
+ "name": "vfw-1-vpkg-c6bdb954c-mlpz9",
+ "group": "",
+ "version": "v1",
+ "kind": "Pod",
+ "namespace": "vfirewall",
+ "labels": [
+ "chart=vpkg",
+ "k8splugin.io/rb-instance-id=dazzling_nightingale",
+ "pod-template-hash=c6bdb954c",
+ "release=vfw-1-vpkg",
+ "vf-module-name=vfw-1-vpkg",
+ "vnf-name=test-vnf"
+ ],
+ "selflink": "http://so-cnf-adapter:8090/api/cnf-adapter/v1/instance/dazzling_nightingale/query?ApiVersion=v1&Kind=Pod&Name=vfw-1-vpkg-c6bdb954c-mlpz9&Namespace=vfirewall",
+ "data-owner": "CnfAdapter",
+ "data-source": "K8sPlugin",
+ "data-source-version": "35941033",
+ "resource-version": "1635943391652",
+ "relationship-list": {
+ "relationship": [
+ {
+ "related-to": "generic-vnf",
+ "relationship-label": "tosca.relationships.HostedOn",
+ "related-link": "/aai/v24/network/generic-vnfs/generic-vnf/d4af22f0-70e6-47ac-8d5b-4d645fc25757",
+ "relationship-data": [
+ {
+ "relationship-key": "generic-vnf.vnf-id",
+ "relationship-value": "d4af22f0-70e6-47ac-8d5b-4d645fc25757"
+ }
+ ],
+ "related-to-property": [
+ {
+ "property-key": "generic-vnf.vnf-name",
+ "property-value": "VF_vfw_k8s_demo_CNF_LR_1"
+ }
+ ]
+ },
+ {
+ "related-to": "vf-module",
+ "relationship-label": "org.onap.relationships.inventory.Uses",
+ "related-link": "/aai/v24/network/generic-vnfs/generic-vnf/d4af22f0-70e6-47ac-8d5b-4d645fc25757/vf-modules/vf-module/314795d7-6005-4462-a9fe-7006538e3ff9",
+ "relationship-data": [
+ {
+ "relationship-key": "generic-vnf.vnf-id",
+ "relationship-value": "d4af22f0-70e6-47ac-8d5b-4d645fc25757"
+ },
+ {
+ "relationship-key": "vf-module.vf-module-id",
+ "relationship-value": "314795d7-6005-4462-a9fe-7006538e3ff9"
+ }
+ ]
+ }
+ ]
+ }
+ },
+ {
+ "id": "8c8a65ef11c599eb15a1054ccd590a94305d52d2efa1e72c7581ee2094cace1b",
+ "name": "vfw-1-vsn",
+ "group": "apps",
+ "version": "v1",
+ "kind": "Deployment",
+ "namespace": "vfirewall",
+ "labels": [
+ "chart=vsn",
+ "k8splugin.io/rb-instance-id=sharp_torvalds",
+ "release=vfw-1-vsn",
+ "vf-module-name=vfw-1-vsn",
+ "vnf-name=test-vnf"
+ ],
+ "selflink": "http://so-cnf-adapter:8090/api/cnf-adapter/v1/instance/sharp_torvalds/query?ApiVersion=v1&Kind=Deployment&Name=vfw-1-vsn&Namespace=vfirewall",
+ "data-owner": "CnfAdapter",
+ "data-source": "K8sPlugin",
+ "data-source-version": "35941083",
+ "resource-version": "1635943399747",
+ "relationship-list": {
+ "relationship": [
+ {
+ "related-to": "generic-vnf",
+ "relationship-label": "tosca.relationships.HostedOn",
+ "related-link": "/aai/v24/network/generic-vnfs/generic-vnf/d4af22f0-70e6-47ac-8d5b-4d645fc25757",
+ "relationship-data": [
+ {
+ "relationship-key": "generic-vnf.vnf-id",
+ "relationship-value": "d4af22f0-70e6-47ac-8d5b-4d645fc25757"
+ }
+ ],
+ "related-to-property": [
+ {
+ "property-key": "generic-vnf.vnf-name",
+ "property-value": "VF_vfw_k8s_demo_CNF_LR_1"
+ }
+ ]
+ },
+ {
+ "related-to": "vf-module",
+ "relationship-label": "org.onap.relationships.inventory.Uses",
+ "related-link": "/aai/v24/network/generic-vnfs/generic-vnf/d4af22f0-70e6-47ac-8d5b-4d645fc25757/vf-modules/vf-module/56f3d02b-5a32-4a97-9e7b-d3c0094c07e8",
+ "relationship-data": [
+ {
+ "relationship-key": "generic-vnf.vnf-id",
+ "relationship-value": "d4af22f0-70e6-47ac-8d5b-4d645fc25757"
+ },
+ {
+ "relationship-key": "vf-module.vf-module-id",
+ "relationship-value": "56f3d02b-5a32-4a97-9e7b-d3c0094c07e8"
+ }
+ ]
+ }
+ ]
+ }
+ },
+ {
+ "id": "677e61310b562007084bc99c42aefb9106438d4c782afc6504d6a6a062b974a8",
+ "name": "kud-lr-onap-nf-20211103t124217642438z-protected-network",
+ "group": "k8s.plugin.opnfv.org",
+ "version": "v1alpha1",
+ "kind": "Network",
+ "namespace": "vfirewall",
+ "labels": [
+ "chart=base_template",
+ "k8splugin.io/rb-instance-id=nifty_lichterman",
+ "release=vfw-1-base",
+ "vnf-name=test-vnf"
+ ],
+ "selflink": "http://so-cnf-adapter:8090/api/cnf-adapter/v1/instance/nifty_lichterman/query?ApiVersion=v1alpha1&Kind=Network&Name=kud-lr-onap-nf-20211103t124217642438z-protected-network&Namespace=vfirewall",
+ "data-owner": "CnfAdapter",
+ "data-source": "K8sPlugin",
+ "data-source-version": "35940927",
+ "resource-version": "1635943376139",
+ "relationship-list": {
+ "relationship": [
+ {
+ "related-to": "generic-vnf",
+ "relationship-label": "tosca.relationships.HostedOn",
+ "related-link": "/aai/v24/network/generic-vnfs/generic-vnf/d4af22f0-70e6-47ac-8d5b-4d645fc25757",
+ "relationship-data": [
+ {
+ "relationship-key": "generic-vnf.vnf-id",
+ "relationship-value": "d4af22f0-70e6-47ac-8d5b-4d645fc25757"
+ }
+ ],
+ "related-to-property": [
+ {
+ "property-key": "generic-vnf.vnf-name",
+ "property-value": "VF_vfw_k8s_demo_CNF_LR_1"
+ }
+ ]
+ },
+ {
+ "related-to": "vf-module",
+ "relationship-label": "org.onap.relationships.inventory.Uses",
+ "related-link": "/aai/v24/network/generic-vnfs/generic-vnf/d4af22f0-70e6-47ac-8d5b-4d645fc25757/vf-modules/vf-module/abb282c8-c932-45dc-9c62-01938eab32fa",
+ "relationship-data": [
+ {
+ "relationship-key": "generic-vnf.vnf-id",
+ "relationship-value": "d4af22f0-70e6-47ac-8d5b-4d645fc25757"
+ },
+ {
+ "relationship-key": "vf-module.vf-module-id",
+ "relationship-value": "abb282c8-c932-45dc-9c62-01938eab32fa"
+ }
+ ]
+ }
+ ]
+ }
+ },
+ {
+ "id": "5538b19871da1fd05b82366c38cbbe88bae4d3444b6a21018f83787327958617",
+ "name": "vfw-1-vpkg-configmap",
+ "group": "",
+ "version": "v1",
+ "kind": "ConfigMap",
+ "namespace": "vfirewall",
+ "labels": [
+ "chart=vpkg",
+ "k8splugin.io/rb-instance-id=dazzling_nightingale",
+ "release=vfw-1-vpkg",
+ "vf-module-name=vfw-1-vpkg",
+ "vnf-name=test-vnf"
+ ],
+ "selflink": "http://so-cnf-adapter:8090/api/cnf-adapter/v1/instance/dazzling_nightingale/query?ApiVersion=v1&Kind=ConfigMap&Name=vfw-1-vpkg-configmap&Namespace=vfirewall",
+ "data-owner": "CnfAdapter",
+ "data-source": "K8sPlugin",
+ "data-source-version": "35941014",
+ "resource-version": "1635943391652",
+ "relationship-list": {
+ "relationship": [
+ {
+ "related-to": "generic-vnf",
+ "relationship-label": "tosca.relationships.HostedOn",
+ "related-link": "/aai/v24/network/generic-vnfs/generic-vnf/d4af22f0-70e6-47ac-8d5b-4d645fc25757",
+ "relationship-data": [
+ {
+ "relationship-key": "generic-vnf.vnf-id",
+ "relationship-value": "d4af22f0-70e6-47ac-8d5b-4d645fc25757"
+ }
+ ],
+ "related-to-property": [
+ {
+ "property-key": "generic-vnf.vnf-name",
+ "property-value": "VF_vfw_k8s_demo_CNF_LR_1"
+ }
+ ]
+ },
+ {
+ "related-to": "vf-module",
+ "relationship-label": "org.onap.relationships.inventory.Uses",
+ "related-link": "/aai/v24/network/generic-vnfs/generic-vnf/d4af22f0-70e6-47ac-8d5b-4d645fc25757/vf-modules/vf-module/314795d7-6005-4462-a9fe-7006538e3ff9",
+ "relationship-data": [
+ {
+ "relationship-key": "generic-vnf.vnf-id",
+ "relationship-value": "d4af22f0-70e6-47ac-8d5b-4d645fc25757"
+ },
+ {
+ "relationship-key": "vf-module.vf-module-id",
+ "relationship-value": "314795d7-6005-4462-a9fe-7006538e3ff9"
+ }
+ ]
+ }
+ ]
+ }
+ },
+ {
+ "id": "6134c369330c7398807d550c708890b0bcef2affbe5af1f9acde02c5a58c104a",
+ "name": "vfw-1-vsn-ui",
+ "group": "",
+ "version": "v1",
+ "kind": "Service",
+ "namespace": "vfirewall",
+ "labels": [
+ "chart=vsn",
+ "k8splugin.io/rb-instance-id=sharp_torvalds",
+ "release=vfw-1-vsn",
+ "vf-module-name=vfw-1-vsn",
+ "vnf-name=test-vnf"
+ ],
+ "selflink": "http://so-cnf-adapter:8090/api/cnf-adapter/v1/instance/sharp_torvalds/query?ApiVersion=v1&Kind=Service&Name=vfw-1-vsn-ui&Namespace=vfirewall",
+ "data-owner": "CnfAdapter",
+ "data-source": "K8sPlugin",
+ "data-source-version": "35941068",
+ "resource-version": "1635943399747",
+ "relationship-list": {
+ "relationship": [
+ {
+ "related-to": "generic-vnf",
+ "relationship-label": "tosca.relationships.HostedOn",
+ "related-link": "/aai/v24/network/generic-vnfs/generic-vnf/d4af22f0-70e6-47ac-8d5b-4d645fc25757",
+ "relationship-data": [
+ {
+ "relationship-key": "generic-vnf.vnf-id",
+ "relationship-value": "d4af22f0-70e6-47ac-8d5b-4d645fc25757"
+ }
+ ],
+ "related-to-property": [
+ {
+ "property-key": "generic-vnf.vnf-name",
+ "property-value": "VF_vfw_k8s_demo_CNF_LR_1"
+ }
+ ]
+ },
+ {
+ "related-to": "vf-module",
+ "relationship-label": "org.onap.relationships.inventory.Uses",
+ "related-link": "/aai/v24/network/generic-vnfs/generic-vnf/d4af22f0-70e6-47ac-8d5b-4d645fc25757/vf-modules/vf-module/56f3d02b-5a32-4a97-9e7b-d3c0094c07e8",
+ "relationship-data": [
+ {
+ "relationship-key": "generic-vnf.vnf-id",
+ "relationship-value": "d4af22f0-70e6-47ac-8d5b-4d645fc25757"
+ },
+ {
+ "relationship-key": "vf-module.vf-module-id",
+ "relationship-value": "56f3d02b-5a32-4a97-9e7b-d3c0094c07e8"
+ }
+ ]
+ }
+ ]
+ }
+ },
+ {
+ "id": "633a8c14f7df72b14f4d8e7b77cf7a2f33b71d54136e8582f135678c586fcde3",
+ "name": "vfw-1-vfw-89bd4bfdb-mshpq",
+ "group": "",
+ "version": "v1",
+ "kind": "Pod",
+ "namespace": "vfirewall",
+ "labels": [
+ "chart=vfw",
+ "k8splugin.io/rb-instance-id=brave_brattain",
+ "pod-template-hash=89bd4bfdb",
+ "release=vfw-1-vfw",
+ "vf-module-name=vfw-1-vfw",
+ "vnf-name=test-vnf"
+ ],
+ "selflink": "http://so-cnf-adapter:8090/api/cnf-adapter/v1/instance/brave_brattain/query?ApiVersion=v1&Kind=Pod&Name=vfw-1-vfw-89bd4bfdb-mshpq&Namespace=vfirewall",
+ "data-owner": "CnfAdapter",
+ "data-source": "K8sPlugin",
+ "data-source-version": "35940986",
+ "resource-version": "1635943384048",
+ "relationship-list": {
+ "relationship": [
+ {
+ "related-to": "generic-vnf",
+ "relationship-label": "tosca.relationships.HostedOn",
+ "related-link": "/aai/v24/network/generic-vnfs/generic-vnf/d4af22f0-70e6-47ac-8d5b-4d645fc25757",
+ "relationship-data": [
+ {
+ "relationship-key": "generic-vnf.vnf-id",
+ "relationship-value": "d4af22f0-70e6-47ac-8d5b-4d645fc25757"
+ }
+ ],
+ "related-to-property": [
+ {
+ "property-key": "generic-vnf.vnf-name",
+ "property-value": "VF_vfw_k8s_demo_CNF_LR_1"
+ }
+ ]
+ },
+ {
+ "related-to": "vf-module",
+ "relationship-label": "org.onap.relationships.inventory.Uses",
+ "related-link": "/aai/v24/network/generic-vnfs/generic-vnf/d4af22f0-70e6-47ac-8d5b-4d645fc25757/vf-modules/vf-module/d56c54b9-40cc-4b7a-abce-50454571e39d",
+ "relationship-data": [
+ {
+ "relationship-key": "generic-vnf.vnf-id",
+ "relationship-value": "d4af22f0-70e6-47ac-8d5b-4d645fc25757"
+ },
+ {
+ "relationship-key": "vf-module.vf-module-id",
+ "relationship-value": "d56c54b9-40cc-4b7a-abce-50454571e39d"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ ]
+}
diff --git a/docs/files/vFW_CNF_CDS/postman.zip b/docs/files/vFW_CNF_CDS/postman.zip
new file mode 100644
index 000000000..11d3ef47c
--- /dev/null
+++ b/docs/files/vFW_CNF_CDS/postman.zip
Binary files differ
diff --git a/docs/files/vFW_CNF_CDS/profile-templating.png b/docs/files/vFW_CNF_CDS/profile-templating.png
new file mode 100644
index 000000000..a60b040fe
--- /dev/null
+++ b/docs/files/vFW_CNF_CDS/profile-templating.png
Binary files differ
diff --git a/docs/files/vFW_CNF_CDS/scenarios.png b/docs/files/vFW_CNF_CDS/scenarios.png
new file mode 100644
index 000000000..53da8668d
--- /dev/null
+++ b/docs/files/vFW_CNF_CDS/scenarios.png
Binary files differ
diff --git a/docs/files/vFW_CNF_CDS/status-response.json b/docs/files/vFW_CNF_CDS/status-response.json
new file mode 100644
index 000000000..78b6c836d
--- /dev/null
+++ b/docs/files/vFW_CNF_CDS/status-response.json
@@ -0,0 +1,1213 @@
+{
+ "request": {
+ "rb-name": "vfw",
+ "rb-version": "plugin_test",
+ "profile-name": "test_profile",
+ "release-name": "",
+ "cloud-region": "kud",
+ "labels": {
+ "testCaseName": "plugin_fw.sh"
+ },
+ "override-values": {
+ "global.onapPrivateNetworkName": "onap-private-net-test"
+ }
+ },
+ "ready": false,
+ "resourceCount": 12,
+ "resourcesStatus": [
+ {
+ "name": "sink-configmap",
+ "GVK": {
+ "Group": "",
+ "Version": "v1",
+ "Kind": "ConfigMap"
+ },
+ "status": {
+ "apiVersion": "v1",
+ "data": {
+ "protected_net_gw": "192.168.20.100",
+ "protected_private_net_cidr": "192.168.10.0/24"
+ },
+ "kind": "ConfigMap",
+ "metadata": {
+ "creationTimestamp": "2020-09-29T13:36:25Z",
+ "labels": {
+ "k8splugin.io/rb-instance-id": "practical_nobel"
+ },
+ "name": "sink-configmap",
+ "namespace": "plugin-tests-namespace",
+ "resourceVersion": "10720771",
+ "selfLink": "/api/v1/namespaces/plugin-tests-namespace/configmaps/sink-configmap",
+ "uid": "46c8bec4-980c-455b-9eb0-fb84ac8cc450"
+ }
+ }
+ },
+ {
+ "name": "packetgen-service",
+ "GVK": {
+ "Group": "",
+ "Version": "v1",
+ "Kind": "Service"
+ },
+ "status": {
+ "apiVersion": "v1",
+ "kind": "Service",
+ "metadata": {
+ "creationTimestamp": "2020-09-29T13:36:25Z",
+ "labels": {
+ "app": "packetgen",
+ "chart": "packetgen",
+ "k8splugin.io/rb-instance-id": "practical_nobel",
+ "release": "test-release"
+ },
+ "name": "packetgen-service",
+ "namespace": "plugin-tests-namespace",
+ "resourceVersion": "10720776",
+ "selfLink": "/api/v1/namespaces/plugin-tests-namespace/services/packetgen-service",
+ "uid": "5e1c27c8-1db8-4883-a3a2-6f4e98e2f48b"
+ },
+ "spec": {
+ "clusterIP": "10.244.8.190",
+ "externalTrafficPolicy": "Cluster",
+ "ports": [
+ {
+ "nodePort": 30831,
+ "port": 2831,
+ "protocol": "TCP",
+ "targetPort": 2831
+ }
+ ],
+ "selector": {
+ "app": "packetgen",
+ "release": "test-release"
+ },
+ "sessionAffinity": "None",
+ "type": "NodePort"
+ },
+ "status": {
+ "loadBalancer": {}
+ }
+ }
+ },
+ {
+ "name": "sink-service",
+ "GVK": {
+ "Group": "",
+ "Version": "v1",
+ "Kind": "Service"
+ },
+ "status": {
+ "apiVersion": "v1",
+ "kind": "Service",
+ "metadata": {
+ "creationTimestamp": "2020-09-29T13:36:25Z",
+ "labels": {
+ "app": "sink",
+ "chart": "sink",
+ "k8splugin.io/rb-instance-id": "practical_nobel",
+ "release": "test-release"
+ },
+ "name": "sink-service",
+ "namespace": "plugin-tests-namespace",
+ "resourceVersion": "10720780",
+ "selfLink": "/api/v1/namespaces/plugin-tests-namespace/services/sink-service",
+ "uid": "789a14fe-1246-4cdd-ba9a-359240ba614f"
+ },
+ "spec": {
+ "clusterIP": "10.244.2.4",
+ "externalTrafficPolicy": "Cluster",
+ "ports": [
+ {
+ "nodePort": 30667,
+ "port": 667,
+ "protocol": "TCP",
+ "targetPort": 667
+ }
+ ],
+ "selector": {
+ "app": "sink",
+ "release": "test-release"
+ },
+ "sessionAffinity": "None",
+ "type": "NodePort"
+ },
+ "status": {
+ "loadBalancer": {}
+ }
+ }
+ },
+ {
+ "name": "test-release-packetgen",
+ "GVK": {
+ "Group": "apps",
+ "Version": "v1",
+ "Kind": "Deployment"
+ },
+ "status": {
+ "apiVersion": "apps/v1",
+ "kind": "Deployment",
+ "metadata": {
+ "annotations": {
+ "deployment.kubernetes.io/revision": "1"
+ },
+ "creationTimestamp": "2020-09-29T13:36:25Z",
+ "generation": 1,
+ "labels": {
+ "app": "packetgen",
+ "chart": "packetgen",
+ "k8splugin.io/rb-instance-id": "practical_nobel",
+ "release": "test-release"
+ },
+ "name": "test-release-packetgen",
+ "namespace": "plugin-tests-namespace",
+ "resourceVersion": "10720804",
+ "selfLink": "/apis/apps/v1/namespaces/plugin-tests-namespace/deployments/test-release-packetgen",
+ "uid": "42578e9f-7c88-46d6-94f7-a7bcc8e69ec6"
+ },
+ "spec": {
+ "progressDeadlineSeconds": 600,
+ "replicas": 1,
+ "revisionHistoryLimit": 10,
+ "selector": {
+ "matchLabels": {
+ "app": "packetgen",
+ "release": "test-release"
+ }
+ },
+ "strategy": {
+ "rollingUpdate": {
+ "maxSurge": "25%",
+ "maxUnavailable": "25%"
+ },
+ "type": "RollingUpdate"
+ },
+ "template": {
+ "metadata": {
+ "annotations": {
+ "VirtletCloudInitUserData": "ssh_pwauth: True\nusers:\n- name: admin\n gecos: User\n primary-group: admin\n groups: users\n sudo: ALL=(ALL) NOPASSWD:ALL\n lock_passwd: false\n passwd: \"$6$rounds=4096$QA5OCKHTE41$jRACivoPMJcOjLRgxl3t.AMfU7LhCFwOWv2z66CQX.TSxBy50JoYtycJXSPr2JceG.8Tq/82QN9QYt3euYEZW/\"\nruncmd:\n - export demo_artifacts_version=1.5.0\n - export vfw_private_ip_0=192.168.10.3\n - export vsn_private_ip_0=192.168.20.3\n - export protected_net_cidr=192.168.20.0/24\n - export dcae_collector_ip=10.0.4.1\n - export dcae_collector_port=8081\n - export protected_net_gw=192.168.20.100/24\n - export protected_private_net_cidr=192.168.10.0/24\n - wget -O - https://git.onap.org/multicloud/k8s/plain/kud/tests/vFW/packetgen | sudo -E bash\n",
+ "VirtletLibvirtCPUSetting": "mode: host-model\n",
+ "VirtletRootVolumeSize": "5Gi",
+ "app": "packetgen",
+ "k8s.plugin.opnfv.org/nfn-network": "{ \"type\": \"ovn4nfv\", \"interface\":[ { \"name\": \"unprotected-private-net\", \"ipAddress\": \"192.168.10.2\", \"interface\": \"eth1\" , \"defaultGateway\": \"false\"}, { \"name\": \"onap-private-net-test\", \"ipAddress\": \"10.0.100.2\", \"interface\": \"eth2\" , \"defaultGateway\": \"false\"} ]}",
+ "k8s.v1.cni.cncf.io/networks": "[{\"name\": \"ovn-networkobj\", \"namespace\": \"default\"}]",
+ "kubernetes.io/target-runtime": "virtlet.cloud",
+ "release": "test-release"
+ },
+ "creationTimestamp": null,
+ "labels": {
+ "app": "packetgen",
+ "k8splugin.io/rb-instance-id": "practical_nobel",
+ "release": "test-release"
+ }
+ },
+ "spec": {
+ "affinity": {
+ "nodeAffinity": {
+ "requiredDuringSchedulingIgnoredDuringExecution": {
+ "nodeSelectorTerms": [
+ {
+ "matchExpressions": [
+ {
+ "key": "extraRuntime",
+ "operator": "In",
+ "values": [
+ "virtlet"
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ }
+ },
+ "containers": [
+ {
+ "image": "virtlet.cloud/ubuntu/16.04:latest",
+ "imagePullPolicy": "IfNotPresent",
+ "name": "packetgen",
+ "resources": {
+ "limits": {
+ "memory": "4Gi"
+ }
+ },
+ "stdin": true,
+ "terminationMessagePath": "/dev/termination-log",
+ "terminationMessagePolicy": "File",
+ "tty": true
+ }
+ ],
+ "dnsPolicy": "ClusterFirst",
+ "restartPolicy": "Always",
+ "schedulerName": "default-scheduler",
+ "securityContext": {},
+ "terminationGracePeriodSeconds": 30
+ }
+ }
+ },
+ "status": {
+ "conditions": [
+ {
+ "lastTransitionTime": "2020-09-29T13:36:25Z",
+ "lastUpdateTime": "2020-09-29T13:36:25Z",
+ "message": "Deployment does not have minimum availability.",
+ "reason": "MinimumReplicasUnavailable",
+ "status": "False",
+ "type": "Available"
+ },
+ {
+ "lastTransitionTime": "2020-09-29T13:36:25Z",
+ "lastUpdateTime": "2020-09-29T13:36:25Z",
+ "message": "ReplicaSet \"test-release-packetgen-5647bfb56\" is progressing.",
+ "reason": "ReplicaSetUpdated",
+ "status": "True",
+ "type": "Progressing"
+ }
+ ],
+ "observedGeneration": 1,
+ "replicas": 1,
+ "unavailableReplicas": 1,
+ "updatedReplicas": 1
+ }
+ }
+ },
+ {
+ "name": "test-release-sink",
+ "GVK": {
+ "Group": "apps",
+ "Version": "v1",
+ "Kind": "Deployment"
+ },
+ "status": {
+ "apiVersion": "apps/v1",
+ "kind": "Deployment",
+ "metadata": {
+ "annotations": {
+ "deployment.kubernetes.io/revision": "1"
+ },
+ "creationTimestamp": "2020-09-29T13:36:25Z",
+ "generation": 1,
+ "labels": {
+ "app": "sink",
+ "chart": "sink",
+ "k8splugin.io/rb-instance-id": "practical_nobel",
+ "release": "test-release"
+ },
+ "name": "test-release-sink",
+ "namespace": "plugin-tests-namespace",
+ "resourceVersion": "10720857",
+ "selfLink": "/apis/apps/v1/namespaces/plugin-tests-namespace/deployments/test-release-sink",
+ "uid": "1f50eecf-c924-4434-be87-daf7c64b6506"
+ },
+ "spec": {
+ "progressDeadlineSeconds": 600,
+ "replicas": 1,
+ "revisionHistoryLimit": 10,
+ "selector": {
+ "matchLabels": {
+ "app": "sink",
+ "release": "test-release"
+ }
+ },
+ "strategy": {
+ "rollingUpdate": {
+ "maxSurge": "25%",
+ "maxUnavailable": "25%"
+ },
+ "type": "RollingUpdate"
+ },
+ "template": {
+ "metadata": {
+ "annotations": {
+ "k8s.plugin.opnfv.org/nfn-network": "{ \"type\": \"ovn4nfv\", \"interface\": [ { \"name\": \"protected-private-net\", \"ipAddress\": \"192.168.20.3\", \"interface\": \"eth1\", \"defaultGateway\": \"false\" }, { \"name\": \"onap-private-net-test\", \"ipAddress\": \"10.10.100.4\", \"interface\": \"eth2\" , \"defaultGateway\": \"false\"} ]}",
+ "k8s.v1.cni.cncf.io/networks": "[{\"name\": \"ovn-networkobj\", \"namespace\": \"default\"}]"
+ },
+ "creationTimestamp": null,
+ "labels": {
+ "app": "sink",
+ "k8splugin.io/rb-instance-id": "practical_nobel",
+ "release": "test-release"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "envFrom": [
+ {
+ "configMapRef": {
+ "name": "sink-configmap"
+ }
+ }
+ ],
+ "image": "rtsood/onap-vfw-demo-sink:0.2.0",
+ "imagePullPolicy": "IfNotPresent",
+ "name": "sink",
+ "resources": {},
+ "securityContext": {
+ "privileged": true
+ },
+ "stdin": true,
+ "terminationMessagePath": "/dev/termination-log",
+ "terminationMessagePolicy": "File",
+ "tty": true
+ },
+ {
+ "image": "electrocucaracha/darkstat:latest",
+ "imagePullPolicy": "IfNotPresent",
+ "name": "darkstat",
+ "ports": [
+ {
+ "containerPort": 667,
+ "protocol": "TCP"
+ }
+ ],
+ "resources": {},
+ "stdin": true,
+ "terminationMessagePath": "/dev/termination-log",
+ "terminationMessagePolicy": "File",
+ "tty": true
+ }
+ ],
+ "dnsPolicy": "ClusterFirst",
+ "restartPolicy": "Always",
+ "schedulerName": "default-scheduler",
+ "securityContext": {},
+ "terminationGracePeriodSeconds": 30
+ }
+ }
+ },
+ "status": {
+ "availableReplicas": 1,
+ "conditions": [
+ {
+ "lastTransitionTime": "2020-09-29T13:36:33Z",
+ "lastUpdateTime": "2020-09-29T13:36:33Z",
+ "message": "Deployment has minimum availability.",
+ "reason": "MinimumReplicasAvailable",
+ "status": "True",
+ "type": "Available"
+ },
+ {
+ "lastTransitionTime": "2020-09-29T13:36:25Z",
+ "lastUpdateTime": "2020-09-29T13:36:33Z",
+ "message": "ReplicaSet \"test-release-sink-6546c4f698\" has successfully progressed.",
+ "reason": "NewReplicaSetAvailable",
+ "status": "True",
+ "type": "Progressing"
+ }
+ ],
+ "observedGeneration": 1,
+ "readyReplicas": 1,
+ "replicas": 1,
+ "updatedReplicas": 1
+ }
+ }
+ },
+ {
+ "name": "test-release-firewall",
+ "GVK": {
+ "Group": "apps",
+ "Version": "v1",
+ "Kind": "Deployment"
+ },
+ "status": {
+ "apiVersion": "apps/v1",
+ "kind": "Deployment",
+ "metadata": {
+ "annotations": {
+ "deployment.kubernetes.io/revision": "1"
+ },
+ "creationTimestamp": "2020-09-29T13:36:25Z",
+ "generation": 1,
+ "labels": {
+ "app": "firewall",
+ "chart": "firewall",
+ "k8splugin.io/rb-instance-id": "practical_nobel",
+ "release": "test-release"
+ },
+ "name": "test-release-firewall",
+ "namespace": "plugin-tests-namespace",
+ "resourceVersion": "10720823",
+ "selfLink": "/apis/apps/v1/namespaces/plugin-tests-namespace/deployments/test-release-firewall",
+ "uid": "77392f60-7d12-4846-8edb-f4a65a4be098"
+ },
+ "spec": {
+ "progressDeadlineSeconds": 600,
+ "replicas": 1,
+ "revisionHistoryLimit": 10,
+ "selector": {
+ "matchLabels": {
+ "app": "firewall",
+ "release": "test-release"
+ }
+ },
+ "strategy": {
+ "rollingUpdate": {
+ "maxSurge": "25%",
+ "maxUnavailable": "25%"
+ },
+ "type": "RollingUpdate"
+ },
+ "template": {
+ "metadata": {
+ "annotations": {
+ "VirtletCloudInitUserData": "ssh_pwauth: True\nusers:\n- name: admin\n gecos: User\n primary-group: admin\n groups: users\n sudo: ALL=(ALL) NOPASSWD:ALL\n lock_passwd: false\n passwd: \"$6$rounds=4096$QA5OCKHTE41$jRACivoPMJcOjLRgxl3t.AMfU7LhCFwOWv2z66CQX.TSxBy50JoYtycJXSPr2JceG.8Tq/82QN9QYt3euYEZW/\"\nruncmd:\n - export demo_artifacts_version=1.5.0\n - export vfw_private_ip_0=192.168.10.3\n - export vsn_private_ip_0=192.168.20.3\n - export protected_net_cidr=192.168.20.0/24\n - export dcae_collector_ip=10.0.4.1\n - export dcae_collector_port=8081\n - export protected_net_gw=192.168.20.100/24\n - export protected_private_net_cidr=192.168.10.0/24\n - wget -O - https://git.onap.org/multicloud/k8s/plain/kud/tests/vFW/firewall | sudo -E bash\n",
+ "VirtletLibvirtCPUSetting": "mode: host-model\n",
+ "VirtletRootVolumeSize": "5Gi",
+ "k8s.plugin.opnfv.org/nfn-network": "{ \"type\": \"ovn4nfv\", \"interface\": [ { \"name\": \"unprotected-private-net\", \"ipAddress\": \"192.168.10.3\", \"interface\": \"eth1\" , \"defaultGateway\": \"false\"}, { \"name\": \"protected-private-net\", \"ipAddress\": \"192.168.20.2\", \"interface\": \"eth2\", \"defaultGateway\": \"false\" }, { \"name\": \"onap-private-net-test\", \"ipAddress\": \"10.10.100.3\", \"interface\": \"eth3\" , \"defaultGateway\": \"false\"} ]}",
+ "k8s.v1.cni.cncf.io/networks": "[{\"name\": \"ovn-networkobj\", \"namespace\": \"default\"}]",
+ "kubernetes.io/target-runtime": "virtlet.cloud"
+ },
+ "creationTimestamp": null,
+ "labels": {
+ "app": "firewall",
+ "k8splugin.io/rb-instance-id": "practical_nobel",
+ "release": "test-release"
+ }
+ },
+ "spec": {
+ "affinity": {
+ "nodeAffinity": {
+ "requiredDuringSchedulingIgnoredDuringExecution": {
+ "nodeSelectorTerms": [
+ {
+ "matchExpressions": [
+ {
+ "key": "extraRuntime",
+ "operator": "In",
+ "values": [
+ "virtlet"
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ }
+ },
+ "containers": [
+ {
+ "image": "virtlet.cloud/ubuntu/16.04:latest",
+ "imagePullPolicy": "IfNotPresent",
+ "name": "firewall",
+ "resources": {
+ "limits": {
+ "memory": "4Gi"
+ }
+ },
+ "stdin": true,
+ "terminationMessagePath": "/dev/termination-log",
+ "terminationMessagePolicy": "File",
+ "tty": true
+ }
+ ],
+ "dnsPolicy": "ClusterFirst",
+ "restartPolicy": "Always",
+ "schedulerName": "default-scheduler",
+ "securityContext": {},
+ "terminationGracePeriodSeconds": 30
+ }
+ }
+ },
+ "status": {
+ "conditions": [
+ {
+ "lastTransitionTime": "2020-09-29T13:36:25Z",
+ "lastUpdateTime": "2020-09-29T13:36:25Z",
+ "message": "Deployment does not have minimum availability.",
+ "reason": "MinimumReplicasUnavailable",
+ "status": "False",
+ "type": "Available"
+ },
+ {
+ "lastTransitionTime": "2020-09-29T13:36:25Z",
+ "lastUpdateTime": "2020-09-29T13:36:25Z",
+ "message": "ReplicaSet \"test-release-firewall-5bf9995f5f\" is progressing.",
+ "reason": "ReplicaSetUpdated",
+ "status": "True",
+ "type": "Progressing"
+ }
+ ],
+ "observedGeneration": 1,
+ "replicas": 1,
+ "unavailableReplicas": 1,
+ "updatedReplicas": 1
+ }
+ }
+ },
+ {
+ "name": "onap-private-net-test",
+ "GVK": {
+ "Group": "k8s.plugin.opnfv.org",
+ "Version": "v1alpha1",
+ "Kind": "Network"
+ },
+ "status": {
+ "apiVersion": "k8s.plugin.opnfv.org/v1alpha1",
+ "kind": "Network",
+ "metadata": {
+ "creationTimestamp": "2020-09-29T13:36:25Z",
+ "finalizers": [
+ "nfnCleanUpNetwork"
+ ],
+ "generation": 2,
+ "labels": {
+ "k8splugin.io/rb-instance-id": "practical_nobel"
+ },
+ "name": "onap-private-net-test",
+ "namespace": "plugin-tests-namespace",
+ "resourceVersion": "10720825",
+ "selfLink": "/apis/k8s.plugin.opnfv.org/v1alpha1/namespaces/plugin-tests-namespace/networks/onap-private-net-test",
+ "uid": "43d413f1-f222-4d98-9ddd-b209d3ade106"
+ },
+ "spec": {
+ "cniType": "ovn4nfv",
+ "dns": {},
+ "ipv4Subnets": [
+ {
+ "gateway": "10.10.0.1/16",
+ "name": "subnet1",
+ "subnet": "10.10.0.0/16"
+ }
+ ]
+ },
+ "status": {
+ "state": "Created"
+ }
+ }
+ },
+ {
+ "name": "protected-private-net",
+ "GVK": {
+ "Group": "k8s.plugin.opnfv.org",
+ "Version": "v1alpha1",
+ "Kind": "Network"
+ },
+ "status": {
+ "apiVersion": "k8s.plugin.opnfv.org/v1alpha1",
+ "kind": "Network",
+ "metadata": {
+ "creationTimestamp": "2020-09-29T13:36:25Z",
+ "finalizers": [
+ "nfnCleanUpNetwork"
+ ],
+ "generation": 2,
+ "labels": {
+ "k8splugin.io/rb-instance-id": "practical_nobel"
+ },
+ "name": "protected-private-net",
+ "namespace": "plugin-tests-namespace",
+ "resourceVersion": "10720827",
+ "selfLink": "/apis/k8s.plugin.opnfv.org/v1alpha1/namespaces/plugin-tests-namespace/networks/protected-private-net",
+ "uid": "75c98944-80b6-4158-afed-8efa7a1075e2"
+ },
+ "spec": {
+ "cniType": "ovn4nfv",
+ "dns": {},
+ "ipv4Subnets": [
+ {
+ "gateway": "192.168.20.100/24",
+ "name": "subnet1",
+ "subnet": "192.168.20.0/24"
+ }
+ ]
+ },
+ "status": {
+ "state": "Created"
+ }
+ }
+ },
+ {
+ "name": "unprotected-private-net",
+ "GVK": {
+ "Group": "k8s.plugin.opnfv.org",
+ "Version": "v1alpha1",
+ "Kind": "Network"
+ },
+ "status": {
+ "apiVersion": "k8s.plugin.opnfv.org/v1alpha1",
+ "kind": "Network",
+ "metadata": {
+ "creationTimestamp": "2020-09-29T13:36:25Z",
+ "finalizers": [
+ "nfnCleanUpNetwork"
+ ],
+ "generation": 2,
+ "labels": {
+ "k8splugin.io/rb-instance-id": "practical_nobel"
+ },
+ "name": "unprotected-private-net",
+ "namespace": "plugin-tests-namespace",
+ "resourceVersion": "10720829",
+ "selfLink": "/apis/k8s.plugin.opnfv.org/v1alpha1/namespaces/plugin-tests-namespace/networks/unprotected-private-net",
+ "uid": "54995c10-bffd-4bb2-bbab-5de266af9456"
+ },
+ "spec": {
+ "cniType": "ovn4nfv",
+ "dns": {},
+ "ipv4Subnets": [
+ {
+ "gateway": "192.168.10.1/24",
+ "name": "subnet1",
+ "subnet": "192.168.10.0/24"
+ }
+ ]
+ },
+ "status": {
+ "state": "Created"
+ }
+ }
+ },
+ {
+ "name": "test-release-firewall-5bf9995f5f-hnvps",
+ "GVK": {
+ "Group": "",
+ "Version": "",
+ "Kind": ""
+ },
+ "status": {
+ "metadata": {
+ "annotations": {
+ "VirtletCloudInitUserData": "ssh_pwauth: True\nusers:\n- name: admin\n gecos: User\n primary-group: admin\n groups: users\n sudo: ALL=(ALL) NOPASSWD:ALL\n lock_passwd: false\n passwd: \"$6$rounds=4096$QA5OCKHTE41$jRACivoPMJcOjLRgxl3t.AMfU7LhCFwOWv2z66CQX.TSxBy50JoYtycJXSPr2JceG.8Tq/82QN9QYt3euYEZW/\"\nruncmd:\n - export demo_artifacts_version=1.5.0\n - export vfw_private_ip_0=192.168.10.3\n - export vsn_private_ip_0=192.168.20.3\n - export protected_net_cidr=192.168.20.0/24\n - export dcae_collector_ip=10.0.4.1\n - export dcae_collector_port=8081\n - export protected_net_gw=192.168.20.100/24\n - export protected_private_net_cidr=192.168.10.0/24\n - wget -O - https://git.onap.org/multicloud/k8s/plain/kud/tests/vFW/firewall | sudo -E bash\n",
+ "VirtletLibvirtCPUSetting": "mode: host-model\n",
+ "VirtletRootVolumeSize": "5Gi",
+ "k8s.plugin.opnfv.org/nfn-network": "{ \"type\": \"ovn4nfv\", \"interface\": [ { \"name\": \"unprotected-private-net\", \"ipAddress\": \"192.168.10.3\", \"interface\": \"eth1\" , \"defaultGateway\": \"false\"}, { \"name\": \"protected-private-net\", \"ipAddress\": \"192.168.20.2\", \"interface\": \"eth2\", \"defaultGateway\": \"false\" }, { \"name\": \"onap-private-net-test\", \"ipAddress\": \"10.10.100.3\", \"interface\": \"eth3\" , \"defaultGateway\": \"false\"} ]}",
+ "k8s.plugin.opnfv.org/ovnInterfaces": "[{\"ip_address\":\"192.168.10.3/24\", \"mac_address\":\"00:00:00:2b:62:71\", \"gateway_ip\": \"192.168.10.1\",\"defaultGateway\":\"false\",\"interface\":\"eth1\"},{\"ip_address\":\"192.168.20.2/24\", \"mac_address\":\"00:00:00:43:d6:f3\", \"gateway_ip\": \"192.168.20.100\",\"defaultGateway\":\"false\",\"interface\":\"eth2\"},{\"ip_address\":\"10.10.100.3/16\", \"mac_address\":\"00:00:00:03:4c:34\", \"gateway_ip\": \"10.10.0.1\",\"defaultGateway\":\"false\",\"interface\":\"eth3\"}]",
+ "k8s.v1.cni.cncf.io/networks": "[{\"name\": \"ovn-networkobj\", \"namespace\": \"default\"}]",
+ "k8s.v1.cni.cncf.io/networks-status": "[{\n \"name\": \"cni0\",\n \"interface\": \"virtlet-eth0\",\n \"ips\": [\n \"10.244.64.45\"\n ],\n \"mac\": \"0a:58:0a:f4:40:2d\",\n \"default\": true,\n \"dns\": {}\n},{\n \"name\": \"ovn4nfv-k8s-plugin\",\n \"interface\": \"eth3\",\n \"ips\": [\n \"192.168.10.3\",\n \"192.168.20.2\",\n \"10.10.100.3\"\n ],\n \"mac\": \"00:00:00:03:4c:34\",\n \"dns\": {}\n}]",
+ "kubernetes.io/target-runtime": "virtlet.cloud"
+ },
+ "creationTimestamp": "2020-09-29T13:36:25Z",
+ "generateName": "test-release-firewall-5bf9995f5f-",
+ "labels": {
+ "app": "firewall",
+ "k8splugin.io/rb-instance-id": "practical_nobel",
+ "pod-template-hash": "5bf9995f5f",
+ "release": "test-release"
+ },
+ "name": "test-release-firewall-5bf9995f5f-hnvps",
+ "namespace": "plugin-tests-namespace",
+ "ownerReferences": [
+ {
+ "apiVersion": "apps/v1",
+ "blockOwnerDeletion": true,
+ "controller": true,
+ "kind": "ReplicaSet",
+ "name": "test-release-firewall-5bf9995f5f",
+ "uid": "8d68ff0c-c6f8-426c-8ebc-0ce5b7fb5132"
+ }
+ ],
+ "resourceVersion": "10720850",
+ "selfLink": "/api/v1/namespaces/plugin-tests-namespace/pods/test-release-firewall-5bf9995f5f-hnvps",
+ "uid": "92b169e3-2d25-449d-b029-d47674eb98e6"
+ },
+ "spec": {
+ "affinity": {
+ "nodeAffinity": {
+ "requiredDuringSchedulingIgnoredDuringExecution": {
+ "nodeSelectorTerms": [
+ {
+ "matchExpressions": [
+ {
+ "key": "extraRuntime",
+ "operator": "In",
+ "values": [
+ "virtlet"
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ }
+ },
+ "containers": [
+ {
+ "image": "virtlet.cloud/ubuntu/16.04:latest",
+ "imagePullPolicy": "IfNotPresent",
+ "name": "firewall",
+ "resources": {
+ "limits": {
+ "memory": "4Gi"
+ },
+ "requests": {
+ "memory": "4Gi"
+ }
+ },
+ "stdin": true,
+ "terminationMessagePath": "/dev/termination-log",
+ "terminationMessagePolicy": "File",
+ "tty": true,
+ "volumeMounts": [
+ {
+ "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount",
+ "name": "default-token-gsh95",
+ "readOnly": true
+ }
+ ]
+ }
+ ],
+ "dnsPolicy": "ClusterFirst",
+ "enableServiceLinks": true,
+ "nodeName": "localhost",
+ "priority": 0,
+ "restartPolicy": "Always",
+ "schedulerName": "default-scheduler",
+ "securityContext": {},
+ "serviceAccount": "default",
+ "serviceAccountName": "default",
+ "terminationGracePeriodSeconds": 30,
+ "tolerations": [
+ {
+ "effect": "NoExecute",
+ "key": "node.kubernetes.io/not-ready",
+ "operator": "Exists",
+ "tolerationSeconds": 300
+ },
+ {
+ "effect": "NoExecute",
+ "key": "node.kubernetes.io/unreachable",
+ "operator": "Exists",
+ "tolerationSeconds": 300
+ }
+ ],
+ "volumes": [
+ {
+ "name": "default-token-gsh95",
+ "secret": {
+ "defaultMode": 420,
+ "secretName": "default-token-gsh95"
+ }
+ }
+ ]
+ },
+ "status": {
+ "conditions": [
+ {
+ "lastProbeTime": null,
+ "lastTransitionTime": "2020-09-29T13:36:25Z",
+ "status": "True",
+ "type": "Initialized"
+ },
+ {
+ "lastProbeTime": null,
+ "lastTransitionTime": "2020-09-29T13:36:25Z",
+ "message": "containers with unready status: [firewall]",
+ "reason": "ContainersNotReady",
+ "status": "False",
+ "type": "Ready"
+ },
+ {
+ "lastProbeTime": null,
+ "lastTransitionTime": "2020-09-29T13:36:25Z",
+ "message": "containers with unready status: [firewall]",
+ "reason": "ContainersNotReady",
+ "status": "False",
+ "type": "ContainersReady"
+ },
+ {
+ "lastProbeTime": null,
+ "lastTransitionTime": "2020-09-29T13:36:25Z",
+ "status": "True",
+ "type": "PodScheduled"
+ }
+ ],
+ "containerStatuses": [
+ {
+ "image": "virtlet.cloud/ubuntu/16.04:latest",
+ "imageID": "",
+ "lastState": {},
+ "name": "firewall",
+ "ready": false,
+ "restartCount": 0,
+ "state": {
+ "waiting": {
+ "reason": "ContainerCreating"
+ }
+ }
+ }
+ ],
+ "hostIP": "192.168.255.3",
+ "phase": "Pending",
+ "qosClass": "Burstable",
+ "startTime": "2020-09-29T13:36:25Z"
+ }
+ }
+ },
+ {
+ "name": "test-release-packetgen-5647bfb56-ghpbs",
+ "GVK": {
+ "Group": "",
+ "Version": "",
+ "Kind": ""
+ },
+ "status": {
+ "metadata": {
+ "annotations": {
+ "VirtletCloudInitUserData": "ssh_pwauth: True\nusers:\n- name: admin\n gecos: User\n primary-group: admin\n groups: users\n sudo: ALL=(ALL) NOPASSWD:ALL\n lock_passwd: false\n passwd: \"$6$rounds=4096$QA5OCKHTE41$jRACivoPMJcOjLRgxl3t.AMfU7LhCFwOWv2z66CQX.TSxBy50JoYtycJXSPr2JceG.8Tq/82QN9QYt3euYEZW/\"\nruncmd:\n - export demo_artifacts_version=1.5.0\n - export vfw_private_ip_0=192.168.10.3\n - export vsn_private_ip_0=192.168.20.3\n - export protected_net_cidr=192.168.20.0/24\n - export dcae_collector_ip=10.0.4.1\n - export dcae_collector_port=8081\n - export protected_net_gw=192.168.20.100/24\n - export protected_private_net_cidr=192.168.10.0/24\n - wget -O - https://git.onap.org/multicloud/k8s/plain/kud/tests/vFW/packetgen | sudo -E bash\n",
+ "VirtletLibvirtCPUSetting": "mode: host-model\n",
+ "VirtletRootVolumeSize": "5Gi",
+ "app": "packetgen",
+ "k8s.plugin.opnfv.org/nfn-network": "{ \"type\": \"ovn4nfv\", \"interface\":[ { \"name\": \"unprotected-private-net\", \"ipAddress\": \"192.168.10.2\", \"interface\": \"eth1\" , \"defaultGateway\": \"false\"}, { \"name\": \"onap-private-net-test\", \"ipAddress\": \"10.0.100.2\", \"interface\": \"eth2\" , \"defaultGateway\": \"false\"} ]}",
+ "k8s.plugin.opnfv.org/ovnInterfaces": "[{\"ip_address\":\"192.168.10.2/24\", \"mac_address\":\"00:00:00:ed:8c:d1\", \"gateway_ip\": \"192.168.10.1\",\"defaultGateway\":\"false\",\"interface\":\"eth1\"},{\"ip_address\":\"10.0.100.2/16\", \"mac_address\":\"00:00:00:97:31:3f\", \"gateway_ip\": \"10.10.0.1\",\"defaultGateway\":\"false\",\"interface\":\"eth2\"}]",
+ "k8s.v1.cni.cncf.io/networks": "[{\"name\": \"ovn-networkobj\", \"namespace\": \"default\"}]",
+ "k8s.v1.cni.cncf.io/networks-status": "[{\n \"name\": \"cni0\",\n \"interface\": \"virtlet-eth0\",\n \"ips\": [\n \"10.244.64.44\"\n ],\n \"mac\": \"0a:58:0a:f4:40:2c\",\n \"default\": true,\n \"dns\": {}\n},{\n \"name\": \"ovn4nfv-k8s-plugin\",\n \"interface\": \"eth2\",\n \"ips\": [\n \"192.168.10.2\",\n \"10.0.100.2\"\n ],\n \"mac\": \"00:00:00:97:31:3f\",\n \"dns\": {}\n}]",
+ "kubernetes.io/target-runtime": "virtlet.cloud",
+ "release": "test-release"
+ },
+ "creationTimestamp": "2020-09-29T13:36:25Z",
+ "generateName": "test-release-packetgen-5647bfb56-",
+ "labels": {
+ "app": "packetgen",
+ "k8splugin.io/rb-instance-id": "practical_nobel",
+ "pod-template-hash": "5647bfb56",
+ "release": "test-release"
+ },
+ "name": "test-release-packetgen-5647bfb56-ghpbs",
+ "namespace": "plugin-tests-namespace",
+ "ownerReferences": [
+ {
+ "apiVersion": "apps/v1",
+ "blockOwnerDeletion": true,
+ "controller": true,
+ "kind": "ReplicaSet",
+ "name": "test-release-packetgen-5647bfb56",
+ "uid": "3c227839-04ad-4d16-b9ea-d8f436426de1"
+ }
+ ],
+ "resourceVersion": "10720852",
+ "selfLink": "/api/v1/namespaces/plugin-tests-namespace/pods/test-release-packetgen-5647bfb56-ghpbs",
+ "uid": "74aad8c5-b881-4881-b634-46ad48ccb857"
+ },
+ "spec": {
+ "affinity": {
+ "nodeAffinity": {
+ "requiredDuringSchedulingIgnoredDuringExecution": {
+ "nodeSelectorTerms": [
+ {
+ "matchExpressions": [
+ {
+ "key": "extraRuntime",
+ "operator": "In",
+ "values": [
+ "virtlet"
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ }
+ },
+ "containers": [
+ {
+ "image": "virtlet.cloud/ubuntu/16.04:latest",
+ "imagePullPolicy": "IfNotPresent",
+ "name": "packetgen",
+ "resources": {
+ "limits": {
+ "memory": "4Gi"
+ },
+ "requests": {
+ "memory": "4Gi"
+ }
+ },
+ "stdin": true,
+ "terminationMessagePath": "/dev/termination-log",
+ "terminationMessagePolicy": "File",
+ "tty": true,
+ "volumeMounts": [
+ {
+ "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount",
+ "name": "default-token-gsh95",
+ "readOnly": true
+ }
+ ]
+ }
+ ],
+ "dnsPolicy": "ClusterFirst",
+ "enableServiceLinks": true,
+ "nodeName": "localhost",
+ "priority": 0,
+ "restartPolicy": "Always",
+ "schedulerName": "default-scheduler",
+ "securityContext": {},
+ "serviceAccount": "default",
+ "serviceAccountName": "default",
+ "terminationGracePeriodSeconds": 30,
+ "tolerations": [
+ {
+ "effect": "NoExecute",
+ "key": "node.kubernetes.io/not-ready",
+ "operator": "Exists",
+ "tolerationSeconds": 300
+ },
+ {
+ "effect": "NoExecute",
+ "key": "node.kubernetes.io/unreachable",
+ "operator": "Exists",
+ "tolerationSeconds": 300
+ }
+ ],
+ "volumes": [
+ {
+ "name": "default-token-gsh95",
+ "secret": {
+ "defaultMode": 420,
+ "secretName": "default-token-gsh95"
+ }
+ }
+ ]
+ },
+ "status": {
+ "conditions": [
+ {
+ "lastProbeTime": null,
+ "lastTransitionTime": "2020-09-29T13:36:25Z",
+ "status": "True",
+ "type": "Initialized"
+ },
+ {
+ "lastProbeTime": null,
+ "lastTransitionTime": "2020-09-29T13:36:25Z",
+ "message": "containers with unready status: [packetgen]",
+ "reason": "ContainersNotReady",
+ "status": "False",
+ "type": "Ready"
+ },
+ {
+ "lastProbeTime": null,
+ "lastTransitionTime": "2020-09-29T13:36:25Z",
+ "message": "containers with unready status: [packetgen]",
+ "reason": "ContainersNotReady",
+ "status": "False",
+ "type": "ContainersReady"
+ },
+ {
+ "lastProbeTime": null,
+ "lastTransitionTime": "2020-09-29T13:36:25Z",
+ "status": "True",
+ "type": "PodScheduled"
+ }
+ ],
+ "containerStatuses": [
+ {
+ "image": "virtlet.cloud/ubuntu/16.04:latest",
+ "imageID": "",
+ "lastState": {},
+ "name": "packetgen",
+ "ready": false,
+ "restartCount": 0,
+ "state": {
+ "waiting": {
+ "reason": "ContainerCreating"
+ }
+ }
+ }
+ ],
+ "hostIP": "192.168.255.3",
+ "phase": "Pending",
+ "qosClass": "Burstable",
+ "startTime": "2020-09-29T13:36:25Z"
+ }
+ }
+ },
+ {
+ "name": "test-release-sink-6546c4f698-dv529",
+ "GVK": {
+ "Group": "",
+ "Version": "",
+ "Kind": ""
+ },
+ "status": {
+ "metadata": {
+ "annotations": {
+ "k8s.plugin.opnfv.org/nfn-network": "{ \"type\": \"ovn4nfv\", \"interface\": [ { \"name\": \"protected-private-net\", \"ipAddress\": \"192.168.20.3\", \"interface\": \"eth1\", \"defaultGateway\": \"false\" }, { \"name\": \"onap-private-net-test\", \"ipAddress\": \"10.10.100.4\", \"interface\": \"eth2\" , \"defaultGateway\": \"false\"} ]}",
+ "k8s.plugin.opnfv.org/ovnInterfaces": "[{\"ip_address\":\"192.168.20.3/24\", \"mac_address\":\"00:00:00:13:40:87\", \"gateway_ip\": \"192.168.20.100\",\"defaultGateway\":\"false\",\"interface\":\"eth1\"},{\"ip_address\":\"10.10.100.4/16\", \"mac_address\":\"00:00:00:49:de:fc\", \"gateway_ip\": \"10.10.0.1\",\"defaultGateway\":\"false\",\"interface\":\"eth2\"}]",
+ "k8s.v1.cni.cncf.io/networks": "[{\"name\": \"ovn-networkobj\", \"namespace\": \"default\"}]",
+ "k8s.v1.cni.cncf.io/networks-status": "[{\n \"name\": \"cni0\",\n \"interface\": \"eth0\",\n \"ips\": [\n \"10.244.64.46\"\n ],\n \"mac\": \"0a:58:0a:f4:40:2e\",\n \"default\": true,\n \"dns\": {}\n},{\n \"name\": \"ovn4nfv-k8s-plugin\",\n \"interface\": \"eth2\",\n \"ips\": [\n \"192.168.20.3\",\n \"10.10.100.4\"\n ],\n \"mac\": \"00:00:00:49:de:fc\",\n \"dns\": {}\n}]"
+ },
+ "creationTimestamp": "2020-09-29T13:36:25Z",
+ "generateName": "test-release-sink-6546c4f698-",
+ "labels": {
+ "app": "sink",
+ "k8splugin.io/rb-instance-id": "practical_nobel",
+ "pod-template-hash": "6546c4f698",
+ "release": "test-release"
+ },
+ "name": "test-release-sink-6546c4f698-dv529",
+ "namespace": "plugin-tests-namespace",
+ "ownerReferences": [
+ {
+ "apiVersion": "apps/v1",
+ "blockOwnerDeletion": true,
+ "controller": true,
+ "kind": "ReplicaSet",
+ "name": "test-release-sink-6546c4f698",
+ "uid": "72c9da29-af3b-4b5c-a90b-06285ae83429"
+ }
+ ],
+ "resourceVersion": "10720854",
+ "selfLink": "/api/v1/namespaces/plugin-tests-namespace/pods/test-release-sink-6546c4f698-dv529",
+ "uid": "a4e24041-65c9-4b86-8f10-a27a4dba26bb"
+ },
+ "spec": {
+ "containers": [
+ {
+ "envFrom": [
+ {
+ "configMapRef": {
+ "name": "sink-configmap"
+ }
+ }
+ ],
+ "image": "rtsood/onap-vfw-demo-sink:0.2.0",
+ "imagePullPolicy": "IfNotPresent",
+ "name": "sink",
+ "resources": {},
+ "securityContext": {
+ "privileged": true
+ },
+ "stdin": true,
+ "terminationMessagePath": "/dev/termination-log",
+ "terminationMessagePolicy": "File",
+ "tty": true,
+ "volumeMounts": [
+ {
+ "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount",
+ "name": "default-token-gsh95",
+ "readOnly": true
+ }
+ ]
+ },
+ {
+ "image": "electrocucaracha/darkstat:latest",
+ "imagePullPolicy": "IfNotPresent",
+ "name": "darkstat",
+ "ports": [
+ {
+ "containerPort": 667,
+ "protocol": "TCP"
+ }
+ ],
+ "resources": {},
+ "stdin": true,
+ "terminationMessagePath": "/dev/termination-log",
+ "terminationMessagePolicy": "File",
+ "tty": true,
+ "volumeMounts": [
+ {
+ "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount",
+ "name": "default-token-gsh95",
+ "readOnly": true
+ }
+ ]
+ }
+ ],
+ "dnsPolicy": "ClusterFirst",
+ "enableServiceLinks": true,
+ "nodeName": "localhost",
+ "priority": 0,
+ "restartPolicy": "Always",
+ "schedulerName": "default-scheduler",
+ "securityContext": {},
+ "serviceAccount": "default",
+ "serviceAccountName": "default",
+ "terminationGracePeriodSeconds": 30,
+ "tolerations": [
+ {
+ "effect": "NoExecute",
+ "key": "node.kubernetes.io/not-ready",
+ "operator": "Exists",
+ "tolerationSeconds": 300
+ },
+ {
+ "effect": "NoExecute",
+ "key": "node.kubernetes.io/unreachable",
+ "operator": "Exists",
+ "tolerationSeconds": 300
+ }
+ ],
+ "volumes": [
+ {
+ "name": "default-token-gsh95",
+ "secret": {
+ "defaultMode": 420,
+ "secretName": "default-token-gsh95"
+ }
+ }
+ ]
+ },
+ "status": {
+ "conditions": [
+ {
+ "lastProbeTime": null,
+ "lastTransitionTime": "2020-09-29T13:36:25Z",
+ "status": "True",
+ "type": "Initialized"
+ },
+ {
+ "lastProbeTime": null,
+ "lastTransitionTime": "2020-09-29T13:36:33Z",
+ "status": "True",
+ "type": "Ready"
+ },
+ {
+ "lastProbeTime": null,
+ "lastTransitionTime": "2020-09-29T13:36:33Z",
+ "status": "True",
+ "type": "ContainersReady"
+ },
+ {
+ "lastProbeTime": null,
+ "lastTransitionTime": "2020-09-29T13:36:25Z",
+ "status": "True",
+ "type": "PodScheduled"
+ }
+ ],
+ "containerStatuses": [
+ {
+ "containerID": "docker://87c9af78735400606d70ccd9cd85e2545e43cb3be9c30d4b4fe173da0062dda9",
+ "image": "electrocucaracha/darkstat:latest",
+ "imageID": "docker-pullable://electrocucaracha/darkstat@sha256:a6764fcc2e15f6156ac0e56f1d220b98970f2d4da9005bae99fb518cfd2f9c25",
+ "lastState": {},
+ "name": "darkstat",
+ "ready": true,
+ "restartCount": 0,
+ "started": true,
+ "state": {
+ "running": {
+ "startedAt": "2020-09-29T13:36:33Z"
+ }
+ }
+ },
+ {
+ "containerID": "docker://a004f95e7c7a681c7f400852aade096e3ffd75b7efc64e12e65b4ce1fe326577",
+ "image": "rtsood/onap-vfw-demo-sink:0.2.0",
+ "imageID": "docker-pullable://rtsood/onap-vfw-demo-sink@sha256:15b7abb0b67a3804ea5f954254633f996fc99c680b09d86a6cf15c3d7b14ab16",
+ "lastState": {},
+ "name": "sink",
+ "ready": true,
+ "restartCount": 0,
+ "started": true,
+ "state": {
+ "running": {
+ "startedAt": "2020-09-29T13:36:32Z"
+ }
+ }
+ }
+ ],
+ "hostIP": "192.168.255.3",
+ "phase": "Running",
+ "podIP": "10.244.64.46",
+ "podIPs": [
+ {
+ "ip": "10.244.64.46"
+ }
+ ],
+ "qosClass": "BestEffort",
+ "startTime": "2020-09-29T13:36:25Z"
+ }
+ }
+ }
+ ]
+}
diff --git a/docs/files/vFW_CNF_CDS/vFW_CNF_CDS_Flow.png b/docs/files/vFW_CNF_CDS/vFW_CNF_CDS_Flow.png
new file mode 100755
index 000000000..ca2d1239a
--- /dev/null
+++ b/docs/files/vFW_CNF_CDS/vFW_CNF_CDS_Flow.png
Binary files differ
diff --git a/docs/files/vFW_CNF_CDS/vFW_Instance_In_Kubernetes.png b/docs/files/vFW_CNF_CDS/vFW_Instance_In_Kubernetes.png
new file mode 100755
index 000000000..0f6118b1b
--- /dev/null
+++ b/docs/files/vFW_CNF_CDS/vFW_Instance_In_Kubernetes.png
Binary files differ
diff --git a/docs/files/vFW_CNF_CDS/vfw-generic-vnf-aai.json b/docs/files/vFW_CNF_CDS/vfw-generic-vnf-aai.json
new file mode 100644
index 000000000..89b7f7a2d
--- /dev/null
+++ b/docs/files/vFW_CNF_CDS/vfw-generic-vnf-aai.json
@@ -0,0 +1,167 @@
+{
+ "vnf-id": "d4af22f0-70e6-47ac-8d5b-4d645fc25757",
+ "vnf-name": "VF_vfw_k8s_demo_CNF_LR_1",
+ "vnf-type": "vfw_k8s_demo_CNF_LR_1/null",
+ "service-id": "vfw_k8s_demo_CNF_LR_1",
+ "prov-status": "NVTPROV",
+ "orchestration-status": "Active",
+ "in-maint": false,
+ "is-closed-loop-disabled": false,
+ "resource-version": "1635943409675",
+ "model-invariant-id": "a5c188d5-7f0a-44e9-bd92-4a60781cb2cf",
+ "model-version-id": "033d9730-549a-4ff8-b166-1581fb73aa08",
+ "model-customization-id": "f107d24c-0a2f-4eb9-96d3-7a631c973cfd",
+ "relationship-list": {
+ "relationship": [
+ {
+ "related-to": "tenant",
+ "relationship-label": "org.onap.relationships.inventory.BelongsTo",
+ "related-link": "/aai/v21/cloud-infrastructure/cloud-regions/cloud-region/K8sCloudOwner/kud-lr/tenants/tenant/3444a566-2717-4d85-83bd-45c104657173",
+ "relationship-data": [
+ {
+ "relationship-key": "cloud-region.cloud-owner",
+ "relationship-value": "K8sCloudOwner"
+ },
+ {
+ "relationship-key": "cloud-region.cloud-region-id",
+ "relationship-value": "kud-lr"
+ },
+ {
+ "relationship-key": "tenant.tenant-id",
+ "relationship-value": "3444a566-2717-4d85-83bd-45c104657173"
+ }
+ ],
+ "related-to-property": [
+ {
+ "property-key": "tenant.tenant-name",
+ "property-value": "kud-lr-tenant"
+ }
+ ]
+ },
+ {
+ "related-to": "cloud-region",
+ "relationship-label": "org.onap.relationships.inventory.LocatedIn",
+ "related-link": "/aai/v21/cloud-infrastructure/cloud-regions/cloud-region/K8sCloudOwner/kud-lr",
+ "relationship-data": [
+ {
+ "relationship-key": "cloud-region.cloud-owner",
+ "relationship-value": "K8sCloudOwner"
+ },
+ {
+ "relationship-key": "cloud-region.cloud-region-id",
+ "relationship-value": "kud-lr"
+ }
+ ],
+ "related-to-property": [
+ {
+ "property-key": "cloud-region.owner-defined-type",
+ "property-value": "t1"
+ }
+ ]
+ },
+ {
+ "related-to": "service-instance",
+ "relationship-label": "org.onap.relationships.inventory.ComposedOf",
+ "related-link": "/aai/v21/business/customers/customer/customer_cnf/service-subscriptions/service-subscription/vfw_k8s_demo_CNF_LR_1/service-instances/service-instance/93b89241-104b-40a7-8030-32e3b6eff459",
+ "relationship-data": [
+ {
+ "relationship-key": "customer.global-customer-id",
+ "relationship-value": "customer_cnf"
+ },
+ {
+ "relationship-key": "service-subscription.service-type",
+ "relationship-value": "vfw_k8s_demo_CNF_LR_1"
+ },
+ {
+ "relationship-key": "service-instance.service-instance-id",
+ "relationship-value": "93b89241-104b-40a7-8030-32e3b6eff459"
+ }
+ ],
+ "related-to-property": [
+ {
+ "property-key": "service-instance.service-instance-name",
+ "property-value": "INSTANCE_vfw_k8s_demo_CNF_LR_1"
+ }
+ ]
+ },
+ {
+ "related-to": "platform",
+ "relationship-label": "org.onap.relationships.inventory.Uses",
+ "related-link": "/aai/v21/business/platforms/platform/%3Conapsdk.vid.vid.Platform%20object%20at%200x7f48eddc2c40%3E",
+ "relationship-data": [
+ {
+ "relationship-key": "platform.platform-name",
+ "relationship-value": "<onapsdk.vid.vid.Platform object at 0x7f48eddc2c40>"
+ }
+ ]
+ },
+ {
+ "related-to": "line-of-business",
+ "relationship-label": "org.onap.relationships.inventory.Uses",
+ "related-link": "/aai/v21/business/lines-of-business/line-of-business/%3Conapsdk.vid.vid.LineOfBusiness%20object%20at%200x7f48eddc2040%3E",
+ "relationship-data": [
+ {
+ "relationship-key": "line-of-business.line-of-business-name",
+ "relationship-value": "<onapsdk.vid.vid.LineOfBusiness object at 0x7f48eddc2040>"
+ }
+ ]
+ }
+ ]
+ },
+ "vf-modules": {
+ "vf-module": [
+ {
+ "vf-module-id": "abb282c8-c932-45dc-9c62-01938eab32fa",
+ "vf-module-name": "INSTANCE_vfw_k8s_demo_CNF_LR_1_vf_vfw_k8s_demo_cnf_lr_10..VfVfwK8sDemoCnfLr1..helm_base_template..module-4",
+ "heat-stack-id": "nifty_lichterman",
+ "orchestration-status": "Active",
+ "is-base-vf-module": false,
+ "automated-assignment": false,
+ "resource-version": "1635943380124",
+ "model-invariant-id": "7b0bcafb-6437-461c-bb48-7240f67ee718",
+ "model-version-id": "5cc1eda3-24e2-4e5e-a4a0-cb18477834f6",
+ "model-customization-id": "b80dedcd-902e-4c75-939a-310a68acb440",
+ "module-index": 0
+ },
+ {
+ "vf-module-id": "314795d7-6005-4462-a9fe-7006538e3ff9",
+ "vf-module-name": "INSTANCE_vfw_k8s_demo_CNF_LR_1_vf_vfw_k8s_demo_cnf_lr_10..VfVfwK8sDemoCnfLr1..helm_vpkg..module-2",
+ "heat-stack-id": "dazzling_nightingale",
+ "orchestration-status": "Active",
+ "is-base-vf-module": false,
+ "automated-assignment": false,
+ "resource-version": "1635943396304",
+ "model-invariant-id": "8f3652a6-af23-4d8c-9aa2-3e8d6f1a5b6e",
+ "model-version-id": "f4e54571-7cc7-4a67-b973-1851b8e540a7",
+ "model-customization-id": "5f1445b0-9ef2-4eb3-8051-a445fa35f877",
+ "module-index": 0
+ },
+ {
+ "vf-module-id": "56f3d02b-5a32-4a97-9e7b-d3c0094c07e8",
+ "vf-module-name": "INSTANCE_vfw_k8s_demo_CNF_LR_1_vf_vfw_k8s_demo_cnf_lr_10..VfVfwK8sDemoCnfLr1..helm_vsn..module-1",
+ "heat-stack-id": "sharp_torvalds",
+ "orchestration-status": "Active",
+ "is-base-vf-module": false,
+ "automated-assignment": false,
+ "resource-version": "1635943404667",
+ "model-invariant-id": "46a8e556-6c5f-4acd-9cfc-ea29b51c919e",
+ "model-version-id": "aa1ecbc5-990e-4ed0-a03e-a135f21763d3",
+ "model-customization-id": "0e61ce72-5eef-4fd7-b790-2107b67044f6",
+ "module-index": 0
+ },
+ {
+ "vf-module-id": "d56c54b9-40cc-4b7a-abce-50454571e39d",
+ "vf-module-name": "INSTANCE_vfw_k8s_demo_CNF_LR_1_vf_vfw_k8s_demo_cnf_lr_10..VfVfwK8sDemoCnfLr1..helm_vfw..module-3",
+ "heat-stack-id": "brave_brattain",
+ "orchestration-status": "Active",
+ "is-base-vf-module": false,
+ "automated-assignment": false,
+ "resource-version": "1635943387739",
+ "model-invariant-id": "89f47572-1d25-44b4-a6e0-52d0421a0980",
+ "model-version-id": "12a89df1-9fad-4045-a90e-dcb64264eed4",
+ "model-customization-id": "c81f3c71-3f42-4831-b3b2-7ceffb567795",
+ "module-index": 0
+ }
+ ]
+ }
+}
diff --git a/docs/files/vFW_CNF_CDS/vpkg-vf-module-aai.json b/docs/files/vFW_CNF_CDS/vpkg-vf-module-aai.json
new file mode 100644
index 000000000..84d62ca6c
--- /dev/null
+++ b/docs/files/vFW_CNF_CDS/vpkg-vf-module-aai.json
@@ -0,0 +1,133 @@
+{
+ "vf-module-id": "314795d7-6005-4462-a9fe-7006538e3ff9",
+ "vf-module-name": "INSTANCE_vfw_k8s_demo_CNF_LR_1_vf_vfw_k8s_demo_cnf_lr_10..VfVfwK8sDemoCnfLr1..helm_vpkg..module-2",
+ "heat-stack-id": "dazzling_nightingale",
+ "orchestration-status": "Active",
+ "is-base-vf-module": false,
+ "automated-assignment": false,
+ "resource-version": "1635943396304",
+ "model-invariant-id": "8f3652a6-af23-4d8c-9aa2-3e8d6f1a5b6e",
+ "model-version-id": "f4e54571-7cc7-4a67-b973-1851b8e540a7",
+ "model-customization-id": "5f1445b0-9ef2-4eb3-8051-a445fa35f877",
+ "module-index": 0,
+ "relationship-list": {
+ "relationship": [
+ {
+ "related-to": "k8s-resource",
+ "relationship-label": "org.onap.relationships.inventory.Uses",
+ "related-link": "/aai/v24/cloud-infrastructure/cloud-regions/cloud-region/K8sCloudOwner/kud-lr/tenants/tenant/3444a566-2717-4d85-83bd-45c104657173/k8s-resources/k8s-resource/e73732351195c8c10d28413ddff1d968bd53b0b0e395c24b3b0fcd39f46ea730",
+ "relationship-data": [
+ {
+ "relationship-key": "cloud-region.cloud-owner",
+ "relationship-value": "K8sCloudOwner"
+ },
+ {
+ "relationship-key": "cloud-region.cloud-region-id",
+ "relationship-value": "kud-lr"
+ },
+ {
+ "relationship-key": "tenant.tenant-id",
+ "relationship-value": "3444a566-2717-4d85-83bd-45c104657173"
+ },
+ {
+ "relationship-key": "k8s-resource.id",
+ "relationship-value": "e73732351195c8c10d28413ddff1d968bd53b0b0e395c24b3b0fcd39f46ea730"
+ }
+ ],
+ "related-to-property": [
+ {
+ "property-key": "k8s-resource.name",
+ "property-value": "vfw-1-vpkg-mgmt"
+ }
+ ]
+ },
+ {
+ "related-to": "k8s-resource",
+ "relationship-label": "org.onap.relationships.inventory.Uses",
+ "related-link": "/aai/v24/cloud-infrastructure/cloud-regions/cloud-region/K8sCloudOwner/kud-lr/tenants/tenant/3444a566-2717-4d85-83bd-45c104657173/k8s-resources/k8s-resource/f65235da9cb098588b2db0c9e7da1ccb00954268fba6bd621bb9ef0b48cd717f",
+ "relationship-data": [
+ {
+ "relationship-key": "cloud-region.cloud-owner",
+ "relationship-value": "K8sCloudOwner"
+ },
+ {
+ "relationship-key": "cloud-region.cloud-region-id",
+ "relationship-value": "kud-lr"
+ },
+ {
+ "relationship-key": "tenant.tenant-id",
+ "relationship-value": "3444a566-2717-4d85-83bd-45c104657173"
+ },
+ {
+ "relationship-key": "k8s-resource.id",
+ "relationship-value": "f65235da9cb098588b2db0c9e7da1ccb00954268fba6bd621bb9ef0b48cd717f"
+ }
+ ],
+ "related-to-property": [
+ {
+ "property-key": "k8s-resource.name",
+ "property-value": "vfw-1-vpkg"
+ }
+ ]
+ },
+ {
+ "related-to": "k8s-resource",
+ "relationship-label": "org.onap.relationships.inventory.Uses",
+ "related-link": "/aai/v24/cloud-infrastructure/cloud-regions/cloud-region/K8sCloudOwner/kud-lr/tenants/tenant/3444a566-2717-4d85-83bd-45c104657173/k8s-resources/k8s-resource/87cbdb83bf436703bdb9823e07e1498a7b3ec7fb12ba14193aadd4630649e0ae",
+ "relationship-data": [
+ {
+ "relationship-key": "cloud-region.cloud-owner",
+ "relationship-value": "K8sCloudOwner"
+ },
+ {
+ "relationship-key": "cloud-region.cloud-region-id",
+ "relationship-value": "kud-lr"
+ },
+ {
+ "relationship-key": "tenant.tenant-id",
+ "relationship-value": "3444a566-2717-4d85-83bd-45c104657173"
+ },
+ {
+ "relationship-key": "k8s-resource.id",
+ "relationship-value": "87cbdb83bf436703bdb9823e07e1498a7b3ec7fb12ba14193aadd4630649e0ae"
+ }
+ ],
+ "related-to-property": [
+ {
+ "property-key": "k8s-resource.name",
+ "property-value": "vfw-1-vpkg-c6bdb954c-mlpz9"
+ }
+ ]
+ },
+ {
+ "related-to": "k8s-resource",
+ "relationship-label": "org.onap.relationships.inventory.Uses",
+ "related-link": "/aai/v24/cloud-infrastructure/cloud-regions/cloud-region/K8sCloudOwner/kud-lr/tenants/tenant/3444a566-2717-4d85-83bd-45c104657173/k8s-resources/k8s-resource/5538b19871da1fd05b82366c38cbbe88bae4d3444b6a21018f83787327958617",
+ "relationship-data": [
+ {
+ "relationship-key": "cloud-region.cloud-owner",
+ "relationship-value": "K8sCloudOwner"
+ },
+ {
+ "relationship-key": "cloud-region.cloud-region-id",
+ "relationship-value": "kud-lr"
+ },
+ {
+ "relationship-key": "tenant.tenant-id",
+ "relationship-value": "3444a566-2717-4d85-83bd-45c104657173"
+ },
+ {
+ "relationship-key": "k8s-resource.id",
+ "relationship-value": "5538b19871da1fd05b82366c38cbbe88bae4d3444b6a21018f83787327958617"
+ }
+ ],
+ "related-to-property": [
+ {
+ "property-key": "k8s-resource.name",
+ "property-value": "vfw-1-vpkg-configmap"
+ }
+ ]
+ }
+ ]
+ }
+}
diff --git a/docs/files/vLBMS_report.json b/docs/files/vLBMS_report.json
index b8688bba9..d06f9cf5c 100644
--- a/docs/files/vLBMS_report.json
+++ b/docs/files/vLBMS_report.json
@@ -10118,4 +10118,4 @@
"errors": []
}
]
-} \ No newline at end of file
+}
diff --git a/docs/files/vcpe_tosca/create_image.png b/docs/files/vcpe_tosca/create_image.png
new file mode 100644
index 000000000..44b226231
--- /dev/null
+++ b/docs/files/vcpe_tosca/create_image.png
Binary files differ
diff --git a/docs/files/vcpe_tosca/create_project.png b/docs/files/vcpe_tosca/create_project.png
new file mode 100644
index 000000000..814e4089d
--- /dev/null
+++ b/docs/files/vcpe_tosca/create_project.png
Binary files differ
diff --git a/docs/files/vcpe_tosca/create_user.png b/docs/files/vcpe_tosca/create_user.png
new file mode 100644
index 000000000..800247a94
--- /dev/null
+++ b/docs/files/vcpe_tosca/create_user.png
Binary files differ
diff --git a/docs/files/vcpe_tosca/customer_service.png b/docs/files/vcpe_tosca/customer_service.png
new file mode 100644
index 000000000..0bd69fadd
--- /dev/null
+++ b/docs/files/vcpe_tosca/customer_service.png
Binary files differ
diff --git a/docs/files/vcpe_tosca/image.png b/docs/files/vcpe_tosca/image.png
new file mode 100644
index 000000000..05ac77290
--- /dev/null
+++ b/docs/files/vcpe_tosca/image.png
Binary files differ
diff --git a/docs/files/vcpe_tosca/manage_project_user.png b/docs/files/vcpe_tosca/manage_project_user.png
new file mode 100644
index 000000000..3deb7b05b
--- /dev/null
+++ b/docs/files/vcpe_tosca/manage_project_user.png
Binary files differ
diff --git a/docs/files/vcpe_tosca/ns_active.png b/docs/files/vcpe_tosca/ns_active.png
new file mode 100644
index 000000000..4d24e29a5
--- /dev/null
+++ b/docs/files/vcpe_tosca/ns_active.png
Binary files differ
diff --git a/docs/files/vcpe_tosca/ns_create.png b/docs/files/vcpe_tosca/ns_create.png
new file mode 100644
index 000000000..7fe2fd91b
--- /dev/null
+++ b/docs/files/vcpe_tosca/ns_create.png
Binary files differ
diff --git a/docs/files/vcpe_tosca/ns_create_input.png b/docs/files/vcpe_tosca/ns_create_input.png
new file mode 100644
index 000000000..f6d2c88a8
--- /dev/null
+++ b/docs/files/vcpe_tosca/ns_create_input.png
Binary files differ
diff --git a/docs/files/vcpe_tosca/ns_delete.png b/docs/files/vcpe_tosca/ns_delete.png
new file mode 100644
index 000000000..931334e81
--- /dev/null
+++ b/docs/files/vcpe_tosca/ns_delete.png
Binary files differ
diff --git a/docs/files/vcpe_tosca/ns_deleted.png b/docs/files/vcpe_tosca/ns_deleted.png
new file mode 100644
index 000000000..2663fbe97
--- /dev/null
+++ b/docs/files/vcpe_tosca/ns_deleted.png
Binary files differ
diff --git a/docs/files/vcpe_tosca/ns_deleting.png b/docs/files/vcpe_tosca/ns_deleting.png
new file mode 100644
index 000000000..977d771d0
--- /dev/null
+++ b/docs/files/vcpe_tosca/ns_deleting.png
Binary files differ
diff --git a/docs/files/vcpe_tosca/ns_instance.png b/docs/files/vcpe_tosca/ns_instance.png
new file mode 100644
index 000000000..32e47da91
--- /dev/null
+++ b/docs/files/vcpe_tosca/ns_instance.png
Binary files differ
diff --git a/docs/files/vcpe_tosca/ns_package_list.png b/docs/files/vcpe_tosca/ns_package_list.png
new file mode 100644
index 000000000..77f024fd6
--- /dev/null
+++ b/docs/files/vcpe_tosca/ns_package_list.png
Binary files differ
diff --git a/docs/files/vcpe_tosca/ns_package_onboard.png b/docs/files/vcpe_tosca/ns_package_onboard.png
new file mode 100644
index 000000000..0ecce911e
--- /dev/null
+++ b/docs/files/vcpe_tosca/ns_package_onboard.png
Binary files differ
diff --git a/docs/files/vcpe_tosca/ns_vnf_heal.png b/docs/files/vcpe_tosca/ns_vnf_heal.png
new file mode 100644
index 000000000..aded069f9
--- /dev/null
+++ b/docs/files/vcpe_tosca/ns_vnf_heal.png
Binary files differ
diff --git a/docs/files/vcpe_tosca/ns_vnf_healed.png b/docs/files/vcpe_tosca/ns_vnf_healed.png
new file mode 100644
index 000000000..7fa669ce1
--- /dev/null
+++ b/docs/files/vcpe_tosca/ns_vnf_healed.png
Binary files differ
diff --git a/docs/files/vcpe_tosca/ns_vnf_healing.png b/docs/files/vcpe_tosca/ns_vnf_healing.png
new file mode 100644
index 000000000..140e00b74
--- /dev/null
+++ b/docs/files/vcpe_tosca/ns_vnf_healing.png
Binary files differ
diff --git a/docs/files/vcpe_tosca/ns_vnf_list.png b/docs/files/vcpe_tosca/ns_vnf_list.png
new file mode 100644
index 000000000..77af139da
--- /dev/null
+++ b/docs/files/vcpe_tosca/ns_vnf_list.png
Binary files differ
diff --git a/docs/files/vcpe_tosca/sdc.png b/docs/files/vcpe_tosca/sdc.png
new file mode 100644
index 000000000..98e5ffaa6
--- /dev/null
+++ b/docs/files/vcpe_tosca/sdc.png
Binary files differ
diff --git a/docs/files/vcpe_tosca/vim.png b/docs/files/vcpe_tosca/vim.png
new file mode 100644
index 000000000..e9cb0cc44
--- /dev/null
+++ b/docs/files/vcpe_tosca/vim.png
Binary files differ
diff --git a/docs/files/vcpe_tosca/vnfm.png b/docs/files/vcpe_tosca/vnfm.png
new file mode 100644
index 000000000..6315b9a14
--- /dev/null
+++ b/docs/files/vcpe_tosca/vnfm.png
Binary files differ
diff --git a/docs/files/vfw-1-preload.json b/docs/files/vfw-1-preload.json
index be42a3bd4..f4207257d 100644
--- a/docs/files/vfw-1-preload.json
+++ b/docs/files/vfw-1-preload.json
@@ -1,141 +1,176 @@
{
- "input": {
- "request-information": {
- "notification-url": "openecomp.org",
- "order-number": "1",
- "order-version": "1",
- "request-action": "PreloadVNFRequest",
- "request-id": "robot12"
- },
- "sdnc-request-header": {
- "svc-action": "reserve",
- "svc-notification-url": "http:\/\/openecomp.org:8080\/adapters\/rest\/SDNCNotify",
- "svc-request-id": "robot12"
- },
- "vnf-topology-information": {
- "vnf-assignments": {
- "availability-zones": [],
- "vnf-networks": [],
- "vnf-vms": []
- },
- "vnf-parameters":
- [{
- "vnf-parameter-name": "unprotected_private_net_id",
- "vnf-parameter-value": "unprotected_net_dt"
- }, {
- "vnf-parameter-name": "unprotected_private_subnet_id",
- "vnf-parameter-value": "unprotected_subnet_dt"
- }, {
- "vnf-parameter-name": "unprotected_private_net_cidr",
- "vnf-parameter-value": "192.168.10.0/24"
- }, {
- "vnf-parameter-name": "protected_private_net_id",
- "vnf-parameter-value": "protected_net_dt"
- }, {
- "vnf-parameter-name": "protected_private_net_cidr",
- "vnf-parameter-value": "192.168.20.0/24"
- }, {
- "vnf-parameter-name": "protected_private_subnet_id",
- "vnf-parameter-value": "protected_subnet_dt"
- }, {
- "vnf-parameter-name": "key_name",
- "vnf-parameter-value": "vfw_key"
- }, {
- "vnf-parameter-name": "cloud_env",
- "vnf-parameter-value": "openstack"
- }, {
- "vnf-parameter-name": "vsn_name_0",
- "vnf-parameter-value": "vfw-vsn-1-dt"
- }, {
- "vnf-parameter-name": "onap_private_net_id",
- "vnf-parameter-value": "oam_onap_y3id"
- }, {
- "vnf-parameter-name": "onap_private_subnet_id",
- "vnf-parameter-value": "oam_onap_y3id"
- }, {
- "vnf-parameter-name": "onap_private_net_cidr",
- "vnf-parameter-value": "10.0.0.0/16"
- }, {
- "vnf-parameter-name": "ext_private_net_id",
- "vnf-parameter-value": "onap_oam_ext"
- }, {
- "vnf-parameter-name": "ext_private_subnet_id",
- "vnf-parameter-value": "onap_oam_ext_sub"
- }, {
- "vnf-parameter-name": "ext_private_net_cidr",
- "vnf-parameter-value": "10.100.0.0/16"
- }, {
- "vnf-parameter-name": "nexus_artifact_repo",
- "vnf-parameter-value": "https://nexus.onap.org"
- }, {
- "vnf-parameter-name": "vfw_name_0",
- "vnf-parameter-value": "vfw-vfw-1-dt"
- }, {
- "vnf-parameter-name": "dcae_collector_port",
- "vnf-parameter-value": "8080"
- }, {
- "vnf-parameter-name": "public_net_id",
- "vnf-parameter-value": "external"
- }, {
- "vnf-parameter-name": "image_name",
- "vnf-parameter-value": "ubuntu-14-04-cloud-amd64"
- }, {
- "vnf-parameter-name": "flavor_name",
- "vnf-parameter-value": "m1.medium"
- }, {
- "vnf-parameter-name": "install_script_version",
- "vnf-parameter-value": "1.4.0"
- }, {
- "vnf-parameter-name": "vfw_private_ip_0",
- "vnf-parameter-value": "192.168.10.100"
- }, {
- "vnf-parameter-name": "vfw_private_ip_1",
- "vnf-parameter-value": "192.168.20.100"
- }, {
- "vnf-parameter-name": "vfw_private_ip_2",
- "vnf-parameter-value": "10.0.110.1"
- }, {
- "vnf-parameter-name": "vfw_private_ip_3",
- "vnf-parameter-value": "10.100.100.1"
- }, {
- "vnf-parameter-name": "vpg_private_ip_0",
- "vnf-parameter-value": "192.168.10.200"
- }, {
- "vnf-parameter-name": "vpg_private_ip_1",
- "vnf-parameter-value": "10.0.110.2"
- }, {
- "vnf-parameter-name": "vpg_private_ip_2",
- "vnf-parameter-value": "10.100.100.2"
- }, {
- "vnf-parameter-name": "vsn_private_ip_1",
- "vnf-parameter-value": "10.0.110.3"
- }, {
- "vnf-parameter-name": "vsn_private_ip_0",
- "vnf-parameter-value": "192.168.20.250"
- }, {
- "vnf-parameter-name": "vsn_private_ip_2",
- "vnf-parameter-value": "10.100.100.3"
- }, {
- "vnf-parameter-name": "demo_artifacts_version",
- "vnf-parameter-value": "1.4.0"
- }, {
- "vnf-parameter-name": "pub_key",
- "vnf-parameter-value": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC6KbTxfT368DNBzLyfTYJ0INI7mkRXc/xMpx349TXGBTQ06g2toN/p9OM188EGZd/qERRXcdnKY8lYULVIXZEoQbXTus2gTQWXy1zn5nhSkhste3L1uG+sjHySQHBj5NyOC32iNehRNu03bHi8XRBAOHZsXx6JpkltLXgWqQI91+h9i7I/4trpPyODtzz1DLIkbXv9woncwllKf4bmm+Bk6/OJKHuAlBdCLL5diZqODEZlKOyPPC9c07a835sskMgbscPrvBQigZFRNerjQlFXXCHjRwoBMvfS7kD7/R/K1fmSLtH0w4VEp5iwS9SIezgEeO2aDFPucmYG4MQDGzAz"
- }, {
- "vnf-parameter-name": "dcae_collector_ip",
- "vnf-parameter-value": "10.0.4.1"
- }, {
- "vnf-parameter-name": "sec_group",
- "vnf-parameter-value": "onap_sg_y3id"
- }
- ],
- "vnf-topology-identifier": {
- "service-type": "1c38ef10-0f14-4d78-876c-3cc10ff4e535",
- "vnf-type": "VfwsnkDistributetraffic..base_vfw..module-0",
- "generic-vnf-name": "vfw-vsnk-dt-1",
- "generic-vnf-type": "vFWSNK-DistributeTraffic 0",
- "vnf-name": "vfw-dt-module-1"
- }
- }
- }
+ "input": {
+ "request-information": {
+ "notification-url": "openecomp.org",
+ "order-number": "1",
+ "order-version": "1",
+ "request-action": "PreloadVNFRequest",
+ "request-id": "robot12"
+ },
+ "sdnc-request-header": {
+ "svc-action": "reserve",
+ "svc-notification-url": "http://openecomp.org:8080/adapters/rest/SDNCNotify",
+ "svc-request-id": "robot12"
+ },
+ "vnf-topology-information": {
+ "vnf-assignments": {
+ "availability-zones": [],
+ "vnf-networks": [],
+ "vnf-vms": []
+ },
+ "vnf-parameters": [
+ {
+ "vnf-parameter-name": "unprotected_private_net_id",
+ "vnf-parameter-value": "unprotected_net_dt"
+ },
+ {
+ "vnf-parameter-name": "unprotected_private_subnet_id",
+ "vnf-parameter-value": "unprotected_subnet_dt"
+ },
+ {
+ "vnf-parameter-name": "unprotected_private_net_cidr",
+ "vnf-parameter-value": "192.168.10.0/24"
+ },
+ {
+ "vnf-parameter-name": "protected_private_net_id",
+ "vnf-parameter-value": "protected_net_dt"
+ },
+ {
+ "vnf-parameter-name": "protected_private_net_cidr",
+ "vnf-parameter-value": "192.168.20.0/24"
+ },
+ {
+ "vnf-parameter-name": "protected_private_subnet_id",
+ "vnf-parameter-value": "protected_subnet_dt"
+ },
+ {
+ "vnf-parameter-name": "key_name",
+ "vnf-parameter-value": "vfw_key"
+ },
+ {
+ "vnf-parameter-name": "cloud_env",
+ "vnf-parameter-value": "openstack"
+ },
+ {
+ "vnf-parameter-name": "vsn_name_0",
+ "vnf-parameter-value": "vfw-vsn-1-dt"
+ },
+ {
+ "vnf-parameter-name": "onap_private_net_id",
+ "vnf-parameter-value": "oam_onap_y3id"
+ },
+ {
+ "vnf-parameter-name": "onap_private_subnet_id",
+ "vnf-parameter-value": "oam_onap_y3id"
+ },
+ {
+ "vnf-parameter-name": "onap_private_net_cidr",
+ "vnf-parameter-value": "10.0.0.0/16"
+ },
+ {
+ "vnf-parameter-name": "ext_private_net_id",
+ "vnf-parameter-value": "onap_oam_ext"
+ },
+ {
+ "vnf-parameter-name": "ext_private_subnet_id",
+ "vnf-parameter-value": "onap_oam_ext_sub"
+ },
+ {
+ "vnf-parameter-name": "ext_private_net_cidr",
+ "vnf-parameter-value": "10.100.0.0/16"
+ },
+ {
+ "vnf-parameter-name": "nexus_artifact_repo",
+ "vnf-parameter-value": "https://nexus.onap.org"
+ },
+ {
+ "vnf-parameter-name": "vfw_name_0",
+ "vnf-parameter-value": "vfw-vfw-1-dt"
+ },
+ {
+ "vnf-parameter-name": "dcae_collector_port",
+ "vnf-parameter-value": "8080"
+ },
+ {
+ "vnf-parameter-name": "public_net_id",
+ "vnf-parameter-value": "external"
+ },
+ {
+ "vnf-parameter-name": "image_name",
+ "vnf-parameter-value": "ubuntu-14-04-cloud-amd64"
+ },
+ {
+ "vnf-parameter-name": "flavor_name",
+ "vnf-parameter-value": "m1.medium"
+ },
+ {
+ "vnf-parameter-name": "install_script_version",
+ "vnf-parameter-value": "1.4.0"
+ },
+ {
+ "vnf-parameter-name": "vfw_private_ip_0",
+ "vnf-parameter-value": "192.168.10.100"
+ },
+ {
+ "vnf-parameter-name": "vfw_private_ip_1",
+ "vnf-parameter-value": "192.168.20.100"
+ },
+ {
+ "vnf-parameter-name": "vfw_private_ip_2",
+ "vnf-parameter-value": "10.0.110.1"
+ },
+ {
+ "vnf-parameter-name": "vfw_private_ip_3",
+ "vnf-parameter-value": "10.100.100.1"
+ },
+ {
+ "vnf-parameter-name": "vpg_private_ip_0",
+ "vnf-parameter-value": "192.168.10.200"
+ },
+ {
+ "vnf-parameter-name": "vpg_private_ip_1",
+ "vnf-parameter-value": "10.0.110.2"
+ },
+ {
+ "vnf-parameter-name": "vpg_private_ip_2",
+ "vnf-parameter-value": "10.100.100.2"
+ },
+ {
+ "vnf-parameter-name": "vsn_private_ip_1",
+ "vnf-parameter-value": "10.0.110.3"
+ },
+ {
+ "vnf-parameter-name": "vsn_private_ip_0",
+ "vnf-parameter-value": "192.168.20.250"
+ },
+ {
+ "vnf-parameter-name": "vsn_private_ip_2",
+ "vnf-parameter-value": "10.100.100.3"
+ },
+ {
+ "vnf-parameter-name": "demo_artifacts_version",
+ "vnf-parameter-value": "1.4.0"
+ },
+ {
+ "vnf-parameter-name": "pub_key",
+ "vnf-parameter-value": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC6KbTxfT368DNBzLyfTYJ0INI7mkRXc/xMpx349TXGBTQ06g2toN/p9OM188EGZd/qERRXcdnKY8lYULVIXZEoQbXTus2gTQWXy1zn5nhSkhste3L1uG+sjHySQHBj5NyOC32iNehRNu03bHi8XRBAOHZsXx6JpkltLXgWqQI91+h9i7I/4trpPyODtzz1DLIkbXv9woncwllKf4bmm+Bk6/OJKHuAlBdCLL5diZqODEZlKOyPPC9c07a835sskMgbscPrvBQigZFRNerjQlFXXCHjRwoBMvfS7kD7/R/K1fmSLtH0w4VEp5iwS9SIezgEeO2aDFPucmYG4MQDGzAz"
+ },
+ {
+ "vnf-parameter-name": "dcae_collector_ip",
+ "vnf-parameter-value": "10.0.4.1"
+ },
+ {
+ "vnf-parameter-name": "sec_group",
+ "vnf-parameter-value": "onap_sg_y3id"
+ }
+ ],
+ "vnf-topology-identifier": {
+ "service-type": "1c38ef10-0f14-4d78-876c-3cc10ff4e535",
+ "vnf-type": "VfwsnkDistributetraffic..base_vfw..module-0",
+ "generic-vnf-name": "vfw-vsnk-dt-1",
+ "generic-vnf-type": "vFWSNK-DistributeTraffic 0",
+ "vnf-name": "vfw-dt-module-1"
+ }
+ }
+ }
}
diff --git a/docs/files/vfw-2-preload.json b/docs/files/vfw-2-preload.json
index 17b473efa..956aec459 100644
--- a/docs/files/vfw-2-preload.json
+++ b/docs/files/vfw-2-preload.json
@@ -1,141 +1,176 @@
{
- "input": {
- "request-information": {
- "notification-url": "openecomp.org",
- "order-number": "1",
- "order-version": "1",
- "request-action": "PreloadVNFRequest",
- "request-id": "robot12"
- },
- "sdnc-request-header": {
- "svc-action": "reserve",
- "svc-notification-url": "http:\/\/openecomp.org:8080\/adapters\/rest\/SDNCNotify",
- "svc-request-id": "robot12"
- },
- "vnf-topology-information": {
- "vnf-assignments": {
- "availability-zones": [],
- "vnf-networks": [],
- "vnf-vms": []
- },
- "vnf-parameters":
- [{
- "vnf-parameter-name": "unprotected_private_net_id",
- "vnf-parameter-value": "unprotected_net_dt"
- }, {
- "vnf-parameter-name": "unprotected_private_subnet_id",
- "vnf-parameter-value": "unprotected_subnet_dt"
- }, {
- "vnf-parameter-name": "unprotected_private_net_cidr",
- "vnf-parameter-value": "192.168.10.0/24"
- }, {
- "vnf-parameter-name": "protected_private_net_id",
- "vnf-parameter-value": "protected_net_dt"
- }, {
- "vnf-parameter-name": "protected_private_net_cidr",
- "vnf-parameter-value": "192.168.20.0/24"
- }, {
- "vnf-parameter-name": "protected_private_subnet_id",
- "vnf-parameter-value": "protected_subnet_dt"
- }, {
- "vnf-parameter-name": "key_name",
- "vnf-parameter-value": "vfw_key"
- }, {
- "vnf-parameter-name": "cloud_env",
- "vnf-parameter-value": "openstack"
- }, {
- "vnf-parameter-name": "vsn_name_0",
- "vnf-parameter-value": "vfw-vsn-2-dt"
- }, {
- "vnf-parameter-name": "onap_private_net_id",
- "vnf-parameter-value": "oam_onap_y3id"
- }, {
- "vnf-parameter-name": "onap_private_subnet_id",
- "vnf-parameter-value": "oam_onap_y3id"
- }, {
- "vnf-parameter-name": "onap_private_net_cidr",
- "vnf-parameter-value": "10.0.0.0/16"
- }, {
- "vnf-parameter-name": "ext_private_net_id",
- "vnf-parameter-value": "onap_oam_ext"
- }, {
- "vnf-parameter-name": "ext_private_subnet_id",
- "vnf-parameter-value": "onap_oam_ext_sub"
- }, {
- "vnf-parameter-name": "ext_private_net_cidr",
- "vnf-parameter-value": "10.100.0.0/16"
- }, {
- "vnf-parameter-name": "nexus_artifact_repo",
- "vnf-parameter-value": "https://nexus.onap.org"
- }, {
- "vnf-parameter-name": "vfw_name_0",
- "vnf-parameter-value": "vfw-vfw-2-dt"
- }, {
- "vnf-parameter-name": "dcae_collector_port",
- "vnf-parameter-value": "8080"
- }, {
- "vnf-parameter-name": "public_net_id",
- "vnf-parameter-value": "external"
- }, {
- "vnf-parameter-name": "image_name",
- "vnf-parameter-value": "ubuntu-14-04-cloud-amd64"
- }, {
- "vnf-parameter-name": "flavor_name",
- "vnf-parameter-value": "m1.medium"
- }, {
- "vnf-parameter-name": "install_script_version",
- "vnf-parameter-value": "1.4.0"
- }, {
- "vnf-parameter-name": "vfw_private_ip_0",
- "vnf-parameter-value": "192.168.10.110"
- }, {
- "vnf-parameter-name": "vfw_private_ip_1",
- "vnf-parameter-value": "192.168.20.110"
- }, {
- "vnf-parameter-name": "vfw_private_ip_2",
- "vnf-parameter-value": "10.0.110.4"
- }, {
- "vnf-parameter-name": "vfw_private_ip_3",
- "vnf-parameter-value": "10.100.100.4"
- }, {
- "vnf-parameter-name": "vpg_private_ip_0",
- "vnf-parameter-value": "192.168.10.200"
- }, {
- "vnf-parameter-name": "vpg_private_ip_1",
- "vnf-parameter-value": "10.0.110.2"
- }, {
- "vnf-parameter-name": "vpg_private_ip_2",
- "vnf-parameter-value": "10.100.100.2"
- }, {
- "vnf-parameter-name": "vsn_private_ip_1",
- "vnf-parameter-value": "10.0.110.5"
- }, {
- "vnf-parameter-name": "vsn_private_ip_0",
- "vnf-parameter-value": "192.168.20.240"
- }, {
- "vnf-parameter-name": "vsn_private_ip_2",
- "vnf-parameter-value": "10.100.100.5"
- }, {
- "vnf-parameter-name": "demo_artifacts_version",
- "vnf-parameter-value": "1.4.0"
- }, {
- "vnf-parameter-name": "pub_key",
- "vnf-parameter-value": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC6KbTxfT368DNBzLyfTYJ0INI7mkRXc/xMpx349TXGBTQ06g2toN/p9OM188EGZd/qERRXcdnKY8lYULVIXZEoQbXTus2gTQWXy1zn5nhSkhste3L1uG+sjHySQHBj5NyOC32iNehRNu03bHi8XRBAOHZsXx6JpkltLXgWqQI91+h9i7I/4trpPyODtzz1DLIkbXv9woncwllKf4bmm+Bk6/OJKHuAlBdCLL5diZqODEZlKOyPPC9c07a835sskMgbscPrvBQigZFRNerjQlFXXCHjRwoBMvfS7kD7/R/K1fmSLtH0w4VEp5iwS9SIezgEeO2aDFPucmYG4MQDGzAz"
- }, {
- "vnf-parameter-name": "dcae_collector_ip",
- "vnf-parameter-value": "10.0.4.1"
- }, {
- "vnf-parameter-name": "sec_group",
- "vnf-parameter-value": "onap_sg_y3id"
- }
- ],
- "vnf-topology-identifier": {
- "service-type": "1c38ef10-0f14-4d78-876c-3cc10ff4e535",
- "vnf-type": "VfwsnkDistributetraffic..base_vfw..module-0",
- "generic-vnf-name": "vfw-vsnk-dt-2",
- "generic-vnf-type": "vFWSNK-DistributeTraffic 1",
- "vnf-name": "vfw-dt-module-2"
- }
- }
- }
+ "input": {
+ "request-information": {
+ "notification-url": "openecomp.org",
+ "order-number": "1",
+ "order-version": "1",
+ "request-action": "PreloadVNFRequest",
+ "request-id": "robot12"
+ },
+ "sdnc-request-header": {
+ "svc-action": "reserve",
+ "svc-notification-url": "http://openecomp.org:8080/adapters/rest/SDNCNotify",
+ "svc-request-id": "robot12"
+ },
+ "vnf-topology-information": {
+ "vnf-assignments": {
+ "availability-zones": [],
+ "vnf-networks": [],
+ "vnf-vms": []
+ },
+ "vnf-parameters": [
+ {
+ "vnf-parameter-name": "unprotected_private_net_id",
+ "vnf-parameter-value": "unprotected_net_dt"
+ },
+ {
+ "vnf-parameter-name": "unprotected_private_subnet_id",
+ "vnf-parameter-value": "unprotected_subnet_dt"
+ },
+ {
+ "vnf-parameter-name": "unprotected_private_net_cidr",
+ "vnf-parameter-value": "192.168.10.0/24"
+ },
+ {
+ "vnf-parameter-name": "protected_private_net_id",
+ "vnf-parameter-value": "protected_net_dt"
+ },
+ {
+ "vnf-parameter-name": "protected_private_net_cidr",
+ "vnf-parameter-value": "192.168.20.0/24"
+ },
+ {
+ "vnf-parameter-name": "protected_private_subnet_id",
+ "vnf-parameter-value": "protected_subnet_dt"
+ },
+ {
+ "vnf-parameter-name": "key_name",
+ "vnf-parameter-value": "vfw_key"
+ },
+ {
+ "vnf-parameter-name": "cloud_env",
+ "vnf-parameter-value": "openstack"
+ },
+ {
+ "vnf-parameter-name": "vsn_name_0",
+ "vnf-parameter-value": "vfw-vsn-2-dt"
+ },
+ {
+ "vnf-parameter-name": "onap_private_net_id",
+ "vnf-parameter-value": "oam_onap_y3id"
+ },
+ {
+ "vnf-parameter-name": "onap_private_subnet_id",
+ "vnf-parameter-value": "oam_onap_y3id"
+ },
+ {
+ "vnf-parameter-name": "onap_private_net_cidr",
+ "vnf-parameter-value": "10.0.0.0/16"
+ },
+ {
+ "vnf-parameter-name": "ext_private_net_id",
+ "vnf-parameter-value": "onap_oam_ext"
+ },
+ {
+ "vnf-parameter-name": "ext_private_subnet_id",
+ "vnf-parameter-value": "onap_oam_ext_sub"
+ },
+ {
+ "vnf-parameter-name": "ext_private_net_cidr",
+ "vnf-parameter-value": "10.100.0.0/16"
+ },
+ {
+ "vnf-parameter-name": "nexus_artifact_repo",
+ "vnf-parameter-value": "https://nexus.onap.org"
+ },
+ {
+ "vnf-parameter-name": "vfw_name_0",
+ "vnf-parameter-value": "vfw-vfw-2-dt"
+ },
+ {
+ "vnf-parameter-name": "dcae_collector_port",
+ "vnf-parameter-value": "8080"
+ },
+ {
+ "vnf-parameter-name": "public_net_id",
+ "vnf-parameter-value": "external"
+ },
+ {
+ "vnf-parameter-name": "image_name",
+ "vnf-parameter-value": "ubuntu-14-04-cloud-amd64"
+ },
+ {
+ "vnf-parameter-name": "flavor_name",
+ "vnf-parameter-value": "m1.medium"
+ },
+ {
+ "vnf-parameter-name": "install_script_version",
+ "vnf-parameter-value": "1.4.0"
+ },
+ {
+ "vnf-parameter-name": "vfw_private_ip_0",
+ "vnf-parameter-value": "192.168.10.110"
+ },
+ {
+ "vnf-parameter-name": "vfw_private_ip_1",
+ "vnf-parameter-value": "192.168.20.110"
+ },
+ {
+ "vnf-parameter-name": "vfw_private_ip_2",
+ "vnf-parameter-value": "10.0.110.4"
+ },
+ {
+ "vnf-parameter-name": "vfw_private_ip_3",
+ "vnf-parameter-value": "10.100.100.4"
+ },
+ {
+ "vnf-parameter-name": "vpg_private_ip_0",
+ "vnf-parameter-value": "192.168.10.200"
+ },
+ {
+ "vnf-parameter-name": "vpg_private_ip_1",
+ "vnf-parameter-value": "10.0.110.2"
+ },
+ {
+ "vnf-parameter-name": "vpg_private_ip_2",
+ "vnf-parameter-value": "10.100.100.2"
+ },
+ {
+ "vnf-parameter-name": "vsn_private_ip_1",
+ "vnf-parameter-value": "10.0.110.5"
+ },
+ {
+ "vnf-parameter-name": "vsn_private_ip_0",
+ "vnf-parameter-value": "192.168.20.240"
+ },
+ {
+ "vnf-parameter-name": "vsn_private_ip_2",
+ "vnf-parameter-value": "10.100.100.5"
+ },
+ {
+ "vnf-parameter-name": "demo_artifacts_version",
+ "vnf-parameter-value": "1.4.0"
+ },
+ {
+ "vnf-parameter-name": "pub_key",
+ "vnf-parameter-value": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC6KbTxfT368DNBzLyfTYJ0INI7mkRXc/xMpx349TXGBTQ06g2toN/p9OM188EGZd/qERRXcdnKY8lYULVIXZEoQbXTus2gTQWXy1zn5nhSkhste3L1uG+sjHySQHBj5NyOC32iNehRNu03bHi8XRBAOHZsXx6JpkltLXgWqQI91+h9i7I/4trpPyODtzz1DLIkbXv9woncwllKf4bmm+Bk6/OJKHuAlBdCLL5diZqODEZlKOyPPC9c07a835sskMgbscPrvBQigZFRNerjQlFXXCHjRwoBMvfS7kD7/R/K1fmSLtH0w4VEp5iwS9SIezgEeO2aDFPucmYG4MQDGzAz"
+ },
+ {
+ "vnf-parameter-name": "dcae_collector_ip",
+ "vnf-parameter-value": "10.0.4.1"
+ },
+ {
+ "vnf-parameter-name": "sec_group",
+ "vnf-parameter-value": "onap_sg_y3id"
+ }
+ ],
+ "vnf-topology-identifier": {
+ "service-type": "1c38ef10-0f14-4d78-876c-3cc10ff4e535",
+ "vnf-type": "VfwsnkDistributetraffic..base_vfw..module-0",
+ "generic-vnf-name": "vfw-vsnk-dt-2",
+ "generic-vnf-type": "vFWSNK-DistributeTraffic 1",
+ "vnf-name": "vfw-dt-module-2"
+ }
+ }
+ }
}
diff --git a/docs/files/vfwdt-aai-postman.json b/docs/files/vfwdt-aai-postman.json
index a37792217..aaa03a4f8 100644
--- a/docs/files/vfwdt-aai-postman.json
+++ b/docs/files/vfwdt-aai-postman.json
@@ -1,214 +1,214 @@
{
- "info": {
- "_postman_id": "a2db5e8d-6c15-4db1-9a62-9346a414b0be",
- "name": "ONAP-AAI-Test",
- "schema": "https://schema.getpostman.com/json/collection/v2.1.0/collection.json"
- },
- "item": [
- {
- "name": "VServersByName",
- "request": {
- "method": "GET",
- "header": [
- {
- "key": "X-FromAppId",
- "value": "AAI",
- "type": "text"
- },
- {
- "key": "X-TransactionId",
- "value": "get_aai_subscr",
- "type": "text"
- },
- {
- "key": "Accept",
- "value": "application/json",
- "type": "text"
- },
- {
- "key": "Content-Type",
- "value": "application/json",
- "type": "text"
- }
- ],
- "url": {
- "raw": "https://10.12.5.63:30233/aai/v14/cloud-infrastructure/cloud-regions/cloud-region/CloudOwner/RegionOne/tenants/tenant/087050388b204c73a3e418dd2c1fe30b/vservers/?vserver-name=vofwl01vfw4407",
- "protocol": "https",
- "host": [
- "10",
- "12",
- "5",
- "63"
- ],
- "port": "30233",
- "path": [
- "aai",
- "v14",
- "cloud-infrastructure",
- "cloud-regions",
- "cloud-region",
- "CloudOwner",
- "RegionOne",
- "tenants",
- "tenant",
- "087050388b204c73a3e418dd2c1fe30b",
- "vservers",
- ""
- ],
- "query": [
- {
- "key": "vserver-name",
- "value": "vofwl01vfw4407"
- }
- ]
- }
- },
- "response": []
- },
- {
- "name": "Tenants",
- "request": {
- "method": "GET",
- "header": [
- {
- "key": "X-FromAppId",
- "value": "AAI",
- "type": "text"
- },
- {
- "key": "X-TransactionId",
- "value": "get_aai_subscr",
- "type": "text"
- },
- {
- "key": "Accept",
- "value": "application/json",
- "type": "text"
- },
- {
- "key": "Content-Type",
- "value": "application/json",
- "type": "text"
- }
- ],
- "url": {
- "raw": "https://10.12.5.63:30233/aai/v14/cloud-infrastructure/cloud-regions/cloud-region/CloudOwner/RegionOne/tenants/tenant/087050388b204c73a3e418dd2c1fe30b/vservers/?vserver-name=vofwl01vfw4407",
- "protocol": "https",
- "host": [
- "10",
- "12",
- "5",
- "63"
- ],
- "port": "30233",
- "path": [
- "aai",
- "v14",
- "cloud-infrastructure",
- "cloud-regions",
- "cloud-region",
- "CloudOwner",
- "RegionOne",
- "tenants",
- "tenant",
- "087050388b204c73a3e418dd2c1fe30b",
- "vservers",
- ""
- ],
- "query": [
- {
- "key": "vserver-name",
- "value": "vofwl01vfw4407"
- }
- ]
- }
- },
- "response": []
- },
- {
- "name": "GenericVNFById",
- "request": {
- "method": "GET",
- "header": [
- {
- "key": "X-FromAppId",
- "value": "AAI",
- "type": "text"
- },
- {
- "key": "X-TransactionId",
- "value": "get_aai_subscr",
- "type": "text"
- },
- {
- "key": "Accept",
- "value": "application/json",
- "type": "text"
- },
- {
- "key": "Content-Type",
- "value": "application/json",
- "type": "text"
- }
- ],
- "url": {
- "raw": "https://10.12.5.63:30233/aai/v14/network/generic-vnfs/generic-vnf/2d125b4d-e120-4815-a0c7-4c4bec6c89f9",
- "protocol": "https",
- "host": [
- "10",
- "12",
- "5",
- "63"
- ],
- "port": "30233",
- "path": [
- "aai",
- "v14",
- "network",
- "generic-vnfs",
- "generic-vnf",
- "2d125b4d-e120-4815-a0c7-4c4bec6c89f9"
- ]
- }
- },
- "response": []
- }
- ],
- "auth": {
- "type": "basic",
- "basic": [
- {
- "key": "password",
- "value": "AAI",
- "type": "string"
- },
- {
- "key": "username",
- "value": "AAI",
- "type": "string"
- }
- ]
- },
- "event": [
- {
- "listen": "prerequest",
- "script": {
- "id": "e35b8a0f-24b0-4990-88f2-4c83421cb4a4",
- "type": "text/javascript",
- "exec": [
- ""
- ]
- }
- },
- {
- "listen": "test",
- "script": {
- "id": "74b3d3ac-e313-4570-93b0-bd6d64beebfa",
- "type": "text/javascript",
- "exec": [
- ""
- ]
- }
- }
- ]
-} \ No newline at end of file
+ "info": {
+ "_postman_id": "a2db5e8d-6c15-4db1-9a62-9346a414b0be",
+ "name": "ONAP-AAI-Test",
+ "schema": "https://schema.getpostman.com/json/collection/v2.1.0/collection.json"
+ },
+ "item": [
+ {
+ "name": "VServersByName",
+ "request": {
+ "method": "GET",
+ "header": [
+ {
+ "key": "X-FromAppId",
+ "value": "AAI",
+ "type": "text"
+ },
+ {
+ "key": "X-TransactionId",
+ "value": "get_aai_subscr",
+ "type": "text"
+ },
+ {
+ "key": "Accept",
+ "value": "application/json",
+ "type": "text"
+ },
+ {
+ "key": "Content-Type",
+ "value": "application/json",
+ "type": "text"
+ }
+ ],
+ "url": {
+ "raw": "https://10.12.5.63:30233/aai/v14/cloud-infrastructure/cloud-regions/cloud-region/CloudOwner/RegionOne/tenants/tenant/087050388b204c73a3e418dd2c1fe30b/vservers/?vserver-name=vofwl01vfw4407",
+ "protocol": "https",
+ "host": [
+ "10",
+ "12",
+ "5",
+ "63"
+ ],
+ "port": "30233",
+ "path": [
+ "aai",
+ "v14",
+ "cloud-infrastructure",
+ "cloud-regions",
+ "cloud-region",
+ "CloudOwner",
+ "RegionOne",
+ "tenants",
+ "tenant",
+ "087050388b204c73a3e418dd2c1fe30b",
+ "vservers",
+ ""
+ ],
+ "query": [
+ {
+ "key": "vserver-name",
+ "value": "vofwl01vfw4407"
+ }
+ ]
+ }
+ },
+ "response": []
+ },
+ {
+ "name": "Tenants",
+ "request": {
+ "method": "GET",
+ "header": [
+ {
+ "key": "X-FromAppId",
+ "value": "AAI",
+ "type": "text"
+ },
+ {
+ "key": "X-TransactionId",
+ "value": "get_aai_subscr",
+ "type": "text"
+ },
+ {
+ "key": "Accept",
+ "value": "application/json",
+ "type": "text"
+ },
+ {
+ "key": "Content-Type",
+ "value": "application/json",
+ "type": "text"
+ }
+ ],
+ "url": {
+ "raw": "https://10.12.5.63:30233/aai/v14/cloud-infrastructure/cloud-regions/cloud-region/CloudOwner/RegionOne/tenants/tenant/087050388b204c73a3e418dd2c1fe30b/vservers/?vserver-name=vofwl01vfw4407",
+ "protocol": "https",
+ "host": [
+ "10",
+ "12",
+ "5",
+ "63"
+ ],
+ "port": "30233",
+ "path": [
+ "aai",
+ "v14",
+ "cloud-infrastructure",
+ "cloud-regions",
+ "cloud-region",
+ "CloudOwner",
+ "RegionOne",
+ "tenants",
+ "tenant",
+ "087050388b204c73a3e418dd2c1fe30b",
+ "vservers",
+ ""
+ ],
+ "query": [
+ {
+ "key": "vserver-name",
+ "value": "vofwl01vfw4407"
+ }
+ ]
+ }
+ },
+ "response": []
+ },
+ {
+ "name": "GenericVNFById",
+ "request": {
+ "method": "GET",
+ "header": [
+ {
+ "key": "X-FromAppId",
+ "value": "AAI",
+ "type": "text"
+ },
+ {
+ "key": "X-TransactionId",
+ "value": "get_aai_subscr",
+ "type": "text"
+ },
+ {
+ "key": "Accept",
+ "value": "application/json",
+ "type": "text"
+ },
+ {
+ "key": "Content-Type",
+ "value": "application/json",
+ "type": "text"
+ }
+ ],
+ "url": {
+ "raw": "https://10.12.5.63:30233/aai/v14/network/generic-vnfs/generic-vnf/2d125b4d-e120-4815-a0c7-4c4bec6c89f9",
+ "protocol": "https",
+ "host": [
+ "10",
+ "12",
+ "5",
+ "63"
+ ],
+ "port": "30233",
+ "path": [
+ "aai",
+ "v14",
+ "network",
+ "generic-vnfs",
+ "generic-vnf",
+ "2d125b4d-e120-4815-a0c7-4c4bec6c89f9"
+ ]
+ }
+ },
+ "response": []
+ }
+ ],
+ "auth": {
+ "type": "basic",
+ "basic": [
+ {
+ "key": "password",
+ "value": "AAI",
+ "type": "string"
+ },
+ {
+ "key": "username",
+ "value": "AAI",
+ "type": "string"
+ }
+ ]
+ },
+ "event": [
+ {
+ "listen": "prerequest",
+ "script": {
+ "id": "e35b8a0f-24b0-4990-88f2-4c83421cb4a4",
+ "type": "text/javascript",
+ "exec": [
+ ""
+ ]
+ }
+ },
+ {
+ "listen": "test",
+ "script": {
+ "id": "74b3d3ac-e313-4570-93b0-bd6d64beebfa",
+ "type": "text/javascript",
+ "exec": [
+ ""
+ ]
+ }
+ }
+ ]
+}
diff --git a/docs/files/vfwdt-general-workflow-sd.png b/docs/files/vfwdt-general-workflow-sd.png
new file mode 100644
index 000000000..89fa1f4ab
--- /dev/null
+++ b/docs/files/vfwdt-general-workflow-sd.png
Binary files differ
diff --git a/docs/files/vfwdt-identification-workflow-sd.png b/docs/files/vfwdt-identification-workflow-sd.png
new file mode 100644
index 000000000..83310f731
--- /dev/null
+++ b/docs/files/vfwdt-identification-workflow-sd.png
Binary files differ
diff --git a/docs/files/vfwdt-td-workflow-sd.png b/docs/files/vfwdt-td-workflow-sd.png
new file mode 100644
index 000000000..73c6305a0
--- /dev/null
+++ b/docs/files/vfwdt-td-workflow-sd.png
Binary files differ
diff --git a/docs/files/vfwdt-upgrade-workflow-sd.png b/docs/files/vfwdt-upgrade-workflow-sd.png
new file mode 100644
index 000000000..6b2ee5dfa
--- /dev/null
+++ b/docs/files/vfwdt-upgrade-workflow-sd.png
Binary files differ
diff --git a/docs/files/vfwdt-workflow-general.png b/docs/files/vfwdt-workflow-general.png
new file mode 100644
index 000000000..3ffe35db6
--- /dev/null
+++ b/docs/files/vfwdt-workflow-general.png
Binary files differ
diff --git a/docs/files/vfwdt-workflow-traffic.png b/docs/files/vfwdt-workflow-traffic.png
new file mode 100644
index 000000000..8bc6073dd
--- /dev/null
+++ b/docs/files/vfwdt-workflow-traffic.png
Binary files differ
diff --git a/docs/files/vfwdt-workflow-upgrade.png b/docs/files/vfwdt-workflow-upgrade.png
new file mode 100644
index 000000000..6e24c706d
--- /dev/null
+++ b/docs/files/vfwdt-workflow-upgrade.png
Binary files differ
diff --git a/docs/files/vpkg-preload.json b/docs/files/vpkg-preload.json
index 8a303b28d..4b2be182d 100644
--- a/docs/files/vpkg-preload.json
+++ b/docs/files/vpkg-preload.json
@@ -1,141 +1,176 @@
{
- "input": {
- "request-information": {
- "notification-url": "openecomp.org",
- "order-number": "1",
- "order-version": "1",
- "request-action": "PreloadVNFRequest",
- "request-id": "robot12"
- },
- "sdnc-request-header": {
- "svc-action": "reserve",
- "svc-notification-url": "http:\/\/openecomp.org:8080\/adapters\/rest\/SDNCNotify",
- "svc-request-id": "robot12"
- },
- "vnf-topology-information": {
- "vnf-assignments": {
- "availability-zones": [],
- "vnf-networks": [],
- "vnf-vms": []
- },
- "vnf-parameters":
- [{
- "vnf-parameter-name": "unprotected_private_net_id",
- "vnf-parameter-value": "unprotected_net_dt"
- }, {
- "vnf-parameter-name": "unprotected_private_subnet_id",
- "vnf-parameter-value": "unprotected_subnet_dt"
- }, {
- "vnf-parameter-name": "unprotected_private_net_cidr",
- "vnf-parameter-value": "192.168.10.0/24"
- }, {
- "vnf-parameter-name": "protected_private_net_id",
- "vnf-parameter-value": "protected_net_dt"
- }, {
- "vnf-parameter-name": "protected_private_net_cidr",
- "vnf-parameter-value": "192.168.20.0/24"
- }, {
- "vnf-parameter-name": "protected_private_subnet_id",
- "vnf-parameter-value": "protected_subnet_dt"
- }, {
- "vnf-parameter-name": "key_name",
- "vnf-parameter-value": "vfw_key"
- }, {
- "vnf-parameter-name": "cloud_env",
- "vnf-parameter-value": "openstack"
- }, {
- "vnf-parameter-name": "vsn_name_0",
- "vnf-parameter-value": "vfw-vsn-0-dt"
- }, {
- "vnf-parameter-name": "onap_private_net_id",
- "vnf-parameter-value": "oam_onap_y3id"
- }, {
- "vnf-parameter-name": "onap_private_subnet_id",
- "vnf-parameter-value": "oam_onap_y3id"
- }, {
- "vnf-parameter-name": "onap_private_net_cidr",
- "vnf-parameter-value": "10.0.0.0/16"
- }, {
- "vnf-parameter-name": "ext_private_net_id",
- "vnf-parameter-value": "onap_oam_ext"
- }, {
- "vnf-parameter-name": "ext_private_subnet_id",
- "vnf-parameter-value": "onap_oam_ext_sub"
- }, {
- "vnf-parameter-name": "ext_private_net_cidr",
- "vnf-parameter-value": "10.100.0.0/16"
- }, {
- "vnf-parameter-name": "nexus_artifact_repo",
- "vnf-parameter-value": "https://nexus.onap.org"
- }, {
- "vnf-parameter-name": "vfw_name_0",
- "vnf-parameter-value": "vfw-vfw-0-dt"
- }, {
- "vnf-parameter-name": "dcae_collector_port",
- "vnf-parameter-value": "8080"
- }, {
- "vnf-parameter-name": "public_net_id",
- "vnf-parameter-value": "external"
- }, {
- "vnf-parameter-name": "image_name",
- "vnf-parameter-value": "ubuntu-14-04-cloud-amd64"
- }, {
- "vnf-parameter-name": "flavor_name",
- "vnf-parameter-value": "m1.medium"
- }, {
- "vnf-parameter-name": "install_script_version",
- "vnf-parameter-value": "1.4.0"
- }, {
- "vnf-parameter-name": "vfw_private_ip_0",
- "vnf-parameter-value": "192.168.10.100"
- }, {
- "vnf-parameter-name": "vfw_private_ip_1",
- "vnf-parameter-value": "192.168.20.100"
- }, {
- "vnf-parameter-name": "vfw_private_ip_2",
- "vnf-parameter-value": "10.0.110.1"
- }, {
- "vnf-parameter-name": "vfw_private_ip_3",
- "vnf-parameter-value": "10.100.100.1"
- }, {
- "vnf-parameter-name": "vpg_private_ip_0",
- "vnf-parameter-value": "192.168.10.200"
- }, {
- "vnf-parameter-name": "vpg_private_ip_1",
- "vnf-parameter-value": "10.0.110.2"
- }, {
- "vnf-parameter-name": "vpg_private_ip_2",
- "vnf-parameter-value": "10.100.100.2"
- }, {
- "vnf-parameter-name": "vsn_private_ip_1",
- "vnf-parameter-value": "10.0.110.3"
- }, {
- "vnf-parameter-name": "vsn_private_ip_0",
- "vnf-parameter-value": "192.168.20.250"
- }, {
- "vnf-parameter-name": "vsn_private_ip_2",
- "vnf-parameter-value": "10.100.100.3"
- }, {
- "vnf-parameter-name": "demo_artifacts_version",
- "vnf-parameter-value": "1.4.0"
- }, {
- "vnf-parameter-name": "pub_key",
- "vnf-parameter-value": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC6KbTxfT368DNBzLyfTYJ0INI7mkRXc/xMpx349TXGBTQ06g2toN/p9OM188EGZd/qERRXcdnKY8lYULVIXZEoQbXTus2gTQWXy1zn5nhSkhste3L1uG+sjHySQHBj5NyOC32iNehRNu03bHi8XRBAOHZsXx6JpkltLXgWqQI91+h9i7I/4trpPyODtzz1DLIkbXv9woncwllKf4bmm+Bk6/OJKHuAlBdCLL5diZqODEZlKOyPPC9c07a835sskMgbscPrvBQigZFRNerjQlFXXCHjRwoBMvfS7kD7/R/K1fmSLtH0w4VEp5iwS9SIezgEeO2aDFPucmYG4MQDGzAz"
- }, {
- "vnf-parameter-name": "dcae_collector_ip",
- "vnf-parameter-value": "10.0.4.1"
- }, {
- "vnf-parameter-name": "sec_group",
- "vnf-parameter-value": "onap_sg_y3id"
- }
- ],
- "vnf-topology-identifier": {
- "service-type": "1c38ef10-0f14-4d78-876c-3cc10ff4e535",
- "vnf-type": "VpkgDistributetraffic..base_vpkg..module-0",
- "generic-vnf-name": "vpkg-dt-1",
- "generic-vnf-type": "vPKG-DistributeTraffic 0",
- "vnf-name": "vpkg-dt-module-1"
- }
- }
- }
+ "input": {
+ "request-information": {
+ "notification-url": "openecomp.org",
+ "order-number": "1",
+ "order-version": "1",
+ "request-action": "PreloadVNFRequest",
+ "request-id": "robot12"
+ },
+ "sdnc-request-header": {
+ "svc-action": "reserve",
+ "svc-notification-url": "http://openecomp.org:8080/adapters/rest/SDNCNotify",
+ "svc-request-id": "robot12"
+ },
+ "vnf-topology-information": {
+ "vnf-assignments": {
+ "availability-zones": [],
+ "vnf-networks": [],
+ "vnf-vms": []
+ },
+ "vnf-parameters": [
+ {
+ "vnf-parameter-name": "unprotected_private_net_id",
+ "vnf-parameter-value": "unprotected_net_dt"
+ },
+ {
+ "vnf-parameter-name": "unprotected_private_subnet_id",
+ "vnf-parameter-value": "unprotected_subnet_dt"
+ },
+ {
+ "vnf-parameter-name": "unprotected_private_net_cidr",
+ "vnf-parameter-value": "192.168.10.0/24"
+ },
+ {
+ "vnf-parameter-name": "protected_private_net_id",
+ "vnf-parameter-value": "protected_net_dt"
+ },
+ {
+ "vnf-parameter-name": "protected_private_net_cidr",
+ "vnf-parameter-value": "192.168.20.0/24"
+ },
+ {
+ "vnf-parameter-name": "protected_private_subnet_id",
+ "vnf-parameter-value": "protected_subnet_dt"
+ },
+ {
+ "vnf-parameter-name": "key_name",
+ "vnf-parameter-value": "vfw_key"
+ },
+ {
+ "vnf-parameter-name": "cloud_env",
+ "vnf-parameter-value": "openstack"
+ },
+ {
+ "vnf-parameter-name": "vsn_name_0",
+ "vnf-parameter-value": "vfw-vsn-0-dt"
+ },
+ {
+ "vnf-parameter-name": "onap_private_net_id",
+ "vnf-parameter-value": "oam_onap_y3id"
+ },
+ {
+ "vnf-parameter-name": "onap_private_subnet_id",
+ "vnf-parameter-value": "oam_onap_y3id"
+ },
+ {
+ "vnf-parameter-name": "onap_private_net_cidr",
+ "vnf-parameter-value": "10.0.0.0/16"
+ },
+ {
+ "vnf-parameter-name": "ext_private_net_id",
+ "vnf-parameter-value": "onap_oam_ext"
+ },
+ {
+ "vnf-parameter-name": "ext_private_subnet_id",
+ "vnf-parameter-value": "onap_oam_ext_sub"
+ },
+ {
+ "vnf-parameter-name": "ext_private_net_cidr",
+ "vnf-parameter-value": "10.100.0.0/16"
+ },
+ {
+ "vnf-parameter-name": "nexus_artifact_repo",
+ "vnf-parameter-value": "https://nexus.onap.org"
+ },
+ {
+ "vnf-parameter-name": "vfw_name_0",
+ "vnf-parameter-value": "vfw-vfw-0-dt"
+ },
+ {
+ "vnf-parameter-name": "dcae_collector_port",
+ "vnf-parameter-value": "8080"
+ },
+ {
+ "vnf-parameter-name": "public_net_id",
+ "vnf-parameter-value": "external"
+ },
+ {
+ "vnf-parameter-name": "image_name",
+ "vnf-parameter-value": "ubuntu-14-04-cloud-amd64"
+ },
+ {
+ "vnf-parameter-name": "flavor_name",
+ "vnf-parameter-value": "m1.medium"
+ },
+ {
+ "vnf-parameter-name": "install_script_version",
+ "vnf-parameter-value": "1.4.0"
+ },
+ {
+ "vnf-parameter-name": "vfw_private_ip_0",
+ "vnf-parameter-value": "192.168.10.100"
+ },
+ {
+ "vnf-parameter-name": "vfw_private_ip_1",
+ "vnf-parameter-value": "192.168.20.100"
+ },
+ {
+ "vnf-parameter-name": "vfw_private_ip_2",
+ "vnf-parameter-value": "10.0.110.1"
+ },
+ {
+ "vnf-parameter-name": "vfw_private_ip_3",
+ "vnf-parameter-value": "10.100.100.1"
+ },
+ {
+ "vnf-parameter-name": "vpg_private_ip_0",
+ "vnf-parameter-value": "192.168.10.200"
+ },
+ {
+ "vnf-parameter-name": "vpg_private_ip_1",
+ "vnf-parameter-value": "10.0.110.2"
+ },
+ {
+ "vnf-parameter-name": "vpg_private_ip_2",
+ "vnf-parameter-value": "10.100.100.2"
+ },
+ {
+ "vnf-parameter-name": "vsn_private_ip_1",
+ "vnf-parameter-value": "10.0.110.3"
+ },
+ {
+ "vnf-parameter-name": "vsn_private_ip_0",
+ "vnf-parameter-value": "192.168.20.250"
+ },
+ {
+ "vnf-parameter-name": "vsn_private_ip_2",
+ "vnf-parameter-value": "10.100.100.3"
+ },
+ {
+ "vnf-parameter-name": "demo_artifacts_version",
+ "vnf-parameter-value": "1.4.0"
+ },
+ {
+ "vnf-parameter-name": "pub_key",
+ "vnf-parameter-value": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC6KbTxfT368DNBzLyfTYJ0INI7mkRXc/xMpx349TXGBTQ06g2toN/p9OM188EGZd/qERRXcdnKY8lYULVIXZEoQbXTus2gTQWXy1zn5nhSkhste3L1uG+sjHySQHBj5NyOC32iNehRNu03bHi8XRBAOHZsXx6JpkltLXgWqQI91+h9i7I/4trpPyODtzz1DLIkbXv9woncwllKf4bmm+Bk6/OJKHuAlBdCLL5diZqODEZlKOyPPC9c07a835sskMgbscPrvBQigZFRNerjQlFXXCHjRwoBMvfS7kD7/R/K1fmSLtH0w4VEp5iwS9SIezgEeO2aDFPucmYG4MQDGzAz"
+ },
+ {
+ "vnf-parameter-name": "dcae_collector_ip",
+ "vnf-parameter-value": "10.0.4.1"
+ },
+ {
+ "vnf-parameter-name": "sec_group",
+ "vnf-parameter-value": "onap_sg_y3id"
+ }
+ ],
+ "vnf-topology-identifier": {
+ "service-type": "1c38ef10-0f14-4d78-876c-3cc10ff4e535",
+ "vnf-type": "VpkgDistributetraffic..base_vpkg..module-0",
+ "generic-vnf-name": "vpkg-dt-1",
+ "generic-vnf-type": "vPKG-DistributeTraffic 0",
+ "vnf-name": "vpkg-dt-module-1"
+ }
+ }
+ }
}
diff --git a/docs/files/windriver/windriver_CPU.png b/docs/files/windriver/windriver_CPU.png
new file mode 100644
index 000000000..abf334b81
--- /dev/null
+++ b/docs/files/windriver/windriver_CPU.png
Binary files differ
diff --git a/docs/files/windriver/windriver_RAM.png b/docs/files/windriver/windriver_RAM.png
new file mode 100644
index 000000000..1333f01cb
--- /dev/null
+++ b/docs/files/windriver/windriver_RAM.png
Binary files differ
diff --git a/docs/files/windriver/windriver_disks.png b/docs/files/windriver/windriver_disks.png
new file mode 100644
index 000000000..1f7fc5265
--- /dev/null
+++ b/docs/files/windriver/windriver_disks.png
Binary files differ
diff --git a/docs/files/windriver/windriver_servers.png b/docs/files/windriver/windriver_servers.png
new file mode 100644
index 000000000..39671782f
--- /dev/null
+++ b/docs/files/windriver/windriver_servers.png
Binary files differ
diff --git a/docs/files/windriver/windrivers_servers2.png b/docs/files/windriver/windrivers_servers2.png
new file mode 100644
index 000000000..90d63c35a
--- /dev/null
+++ b/docs/files/windriver/windrivers_servers2.png
Binary files differ
diff --git a/docs/heat.rst b/docs/heat.rst
deleted file mode 100644
index 78796df95..000000000
--- a/docs/heat.rst
+++ /dev/null
@@ -1,236 +0,0 @@
-ONAP HEAT Template
-------------------
-
-Source files
-~~~~~~~~~~~~
-
-- Template file: https://git.onap.org/demo/plain/heat/ONAP/onap_openstack.yaml
-- Environment file: https://git.onap.org/demo/plain/heat/ONAP/onap_openstack.env
-
-Description
-~~~~~~~~~~~
-
-The ONAP HEAT template spins up the entire ONAP platform. The template,
-onap_openstack.yaml, comes with an environment file,
-onap_openstack.env, in which all the default values are defined.
-
-.. note::
- onap_openstack.yaml AND onap_openstack.env ARE THE HEAT TEMPLATE
- AND ENVIRONMENT FILE CURRENTLY SUPPORTED.
- onap_openstack_float.yaml/env AND onap_openstack_nofloat.yaml/env
- AREN'T UPDATED AND THEIR USAGE IS NOT SUGGESTED.
-
-The HEAT template is composed of two sections: (i) parameters, and (ii)
-resources.
-The parameter section contains the declaration and
-description of the parameters that will be used to spin up ONAP, such as
-public network identifier, URLs of code and artifacts repositories, etc.
-The default values of these parameters can be found in the environment
-file.
-
-The resource section contains the definition of:
-
-- ONAP Private Management Network, which ONAP components use to communicate with each other and with VNFs
-- ONAP Virtual Machines (VMs)
-- Public/private key pair used to access ONAP VMs
-- Virtual interfaces towards the ONAP Private Management Network
-- Disk volumes.
-
-Each VM specification includes Operating System image name, VM size
-(i.e. flavor), VM name, etc. Each VM has two virtual network interfaces:
-one towards the public network and one towards the ONAP Private
-Management network, as described above. Furthermore, each VM runs a
-post-instantiation script that downloads and installs software
-dependencies (e.g. Java JDK, gcc, make, Python, ...) and ONAP software
-packages and docker containers from remote repositories.
-
-When the HEAT template is executed, the Openstack HEAT engine creates
-the resources defined in the HEAT template, based on the parameters
-values defined in the environment file.
-
-Environment file
-~~~~~~~~~~~~~~~~
-
-Before running HEAT, it is necessary to customize the environment file.
-Indeed, some parameters, namely public_net_id, pub_key,
-openstack_tenant_id, openstack_username, and openstack_api_key,
-need to be set depending on the user's environment:
-
-**Global parameters**
-
-::
-
- public_net_id: PUT YOUR NETWORK ID/NAME HERE
- pub_key: PUT YOUR PUBLIC KEY HERE
- openstack_tenant_id: PUT YOUR OPENSTACK PROJECT ID HERE
- openstack_username: PUT YOUR OPENSTACK USERNAME HERE
- openstack_api_key: PUT YOUR OPENSTACK PASSWORD HERE
- horizon_url: PUT THE HORIZON URL HERE
- keystone_url: PUT THE KEYSTONE URL HERE (do not include version number)
-
-openstack_region parameter is set to RegionOne (OpenStack default). If
-your OpenStack is using another Region, please modify this parameter.
-
-public_net_id is the unique identifier (UUID) or name of the public
-network of the cloud provider. To get the public_net_id, use the
-following OpenStack CLI command (ext is the name of the external
-network, change it with the name of the external network of your
-installation)
-
-::
-
- openstack network list | grep ext | awk '{print $2}'
-
-pub_key is string value of the public key that will be installed in
-each ONAP VM. To create a public/private key pair in Linux, please
-execute the following instruction:
-
-::
-
- user@ubuntu:~$ ssh-keygen -t rsa
-
-The following operations to create the public/private key pair occur:
-
-::
-
- Generating public/private rsa key pair.
- Enter file in which to save the key (/home/user/.ssh/id_rsa):
- Created directory '/home/user/.ssh'.
- Enter passphrase (empty for no passphrase):
- Enter same passphrase again:
- Your identification has been saved in /home/user/.ssh/id_rsa.
- Your public key has been saved in /home/user/.ssh/id_rsa.pub.
-
-openstack_username, openstack_tenant_id (password), and
-openstack_api_key are user's credentials to access the
-OpenStack-based cloud.
-
-**Images and flavors parameters**
-
-::
-
- ubuntu_1404_image: PUT THE UBUNTU 14.04 IMAGE NAME HERE
- ubuntu_1604_image: PUT THE UBUNTU 16.04 IMAGE NAME HERE
- flavor_small: PUT THE SMALL FLAVOR NAME HERE
- flavor_medium: PUT THE MEDIUM FLAVOR NAME HERE
- flavor_large: PUT THE LARGE FLAVOR NAME HERE
- flavor_xlarge: PUT THE XLARGE FLAVOR NAME HERE
- flavor_xxlarge: PUT THE XXLARGE FLAVOR NAME HERE
-
-To get the images in your OpenStack environment, use the following
-OpenStack CLI command:
-
-::
-
- openstack image list | grep 'ubuntu'
-
-To get the flavor names used in your OpenStack environment, use the
-following OpenStack CLI command:
-
-::
-
- openstack flavor list
-
-**DNS parameters**
-
-::
-
- dns_list: PUT THE ADDRESS OFTHE EXTERNAL DNS HERE (e.g. a comma-separated list of IP addresses in your /etc/resolv.conf in UNIX-based Operating Systems). THIS LIST MUST INCLUDE THE DNS SERVER THAT OFFERS DNS AS AS SERVICE (see DCAE section below for more details)
- external_dns: PUT THE FIRST ADDRESS OF THE EXTERNAL DNS LIST HERE oam_network_cidr: 10.0.0.0/16
-
-You can use the Google Public DNS 8.8.8.8 and 4.4.4.4 address or your internal DNS servers
-
-**DCAE Parameters**
-
-DCAE spins up ONAP's data collection and analytics system in two phases.
-The first is the launching of a bootstrap VM that is specified in the
-ONAP Heat template. This VM requires a number of deployment specific
-conifiguration parameters being provided so that it can subsequently
-bring up the DCAE system. There are two groups of parameters.
-
-The first group relates to the launching of DCAE VMs, including parameters such as
-the keystone URL and additional VM image IDs/names. DCAE VMs are
-connected to the same internal network as the rest of ONAP VMs, but
-dynamically spun up by the DCAE core platform. Hence these parameters
-need to be provided to DCAE. Note that although DCAE VMs will be
-launched in the same tenant as the rest of ONAP, because DCAE may use
-MultiCloud node as the agent for interfacing with the underying cloud,
-it needs a separate keystone URL (which points to MultiCloud node
-instead of the underlying cloud).
-
-The second group of configuration parameters relate to DNS As A Service support (DNSaaS).
-DCAE requires DNSaaS for registering its VMs into organization-wide DNS service. For
-OpenStack, DNSaaS is provided by Designate. Designate support can be
-provided via an integrated service endpoint listed under the service
-catalog of the OpenStack installation; or proxyed by the ONAP MultiCloud
-service. For the latter case, a number of parameters are needed to
-configure MultiCloud to use the correct Designate service. These
-parameters are described below:
-
-::
-
- dcae_keystone_url: PUT THE KEYSTONE URL OF THE OPENSTACK INSTANCE WHERE DCAE IS DEPLOYED (Note: put the MultiCloud proxy URL if the DNSaaS is proxyed by MultiCloud)
- dcae_centos_7_image: PUT THE CENTOS7 IMAGE ID/NAME AVAILABLE AT THE OPENSTACK INSTANCE WHERE DCAE IS DEPLOYED
- dcae_security_group: PUT THE SECURITY GROUP ID/NAME TO BE USED AT THE OPENSTACK INSTANCE WHERE DCAE IS DEPLOYED
- dcae_key_name: PUT THE ACCESS KEY-PAIR NAME REGISTER AT THE OPENSTACK INSTANCE WHERE DCAE IS DEPLOYED
- dcae_public_key: PUT THE PUBLIC KEY OF A KEY-PAIR USED FOR DCAE BOOTSTRAP NODE TO COMMUNICATE WITH DCAE VMS
- dcae_private_key: PUT THE PRIVATE KEY OF A KEY-PAIR USED FOR DCAE BOOTSTRAP NODE TO COMMUNICATE WITH DCAE VMS
-
- dnsaas_config_enabled: true or false FOR WHETHER DNSAAS IS PROXYED
- dnsaas_region: PUT THE REGION OF THE OPENSTACK INSTANCE WHERE DNSAAS IS PROVIDED
- dnsaas_tenant_id: PUT THE TENANT ID/NAME OF THE OPENSTACK INSTANCE WHERE DNSAAS IS PROVIDED
- dnsaas_keystone_url: PUT THE KEYSTONE URL OF THE OPENSTACK INSTANCE WHERE DNSAAS IS PROVIDED
- dnsaas_username: PUT THE USERNAME OF THE OPENSTACK INSTANCE WHERE DNSAAS IS PROVIDED
- dnsaas_password: PUT THE PASSWORD OF THE OPENSTACK INSTANCE WHERE DNSAAS IS PROVIDED
-
-Instantiation
-~~~~~~~~~~~~~
-
-The ONAP platform can be instantiated via Horizon (OpenStack dashboard)
-or Command Line.
-
-**Instantiation via Horizon:**
-
-- Login to Horizon URL with your personal credentials
-- Click "Stacks" from the "Orchestration" menu
-- Click "Launch Stack"
-- Paste or manually upload the HEAT template file (onap_openstack.yaml) in the "Template Source" form
-- Paste or manually upload the HEAT environment file (onap_openstack.env) in the "Environment Source" form
-- Click "Next" - Specify a name in the "Stack Name" form
-- Provide the password in the "Password" form
-- Click "Launch"
-
-**Instantiation via Command Line:**
-
-- Install the HEAT client on your machine, e.g. in Ubuntu (ref. http://docs.openstack.org/user-guide/common/cli-install-openstack-command-line-clients.html):
-
-::
-
- apt-get install python-dev python-pip
- pip install python-heatclient # Install heat client
- pip install python-openstackclient # Install the Openstack client to support multiple services
-
-- Create a file (named i.e. ~/openstack/openrc) that sets all the
- environmental variables required to access Rackspace:
-
-::
-
- export OS_AUTH_URL=INSERT THE AUTH URL HERE
- export OS_USERNAME=INSERT YOUR USERNAME HERE
- export OS_TENANT_ID=INSERT YOUR TENANT ID HERE
- export OS_REGION_NAME=INSERT THE REGION HERE
- export OS_PASSWORD=INSERT YOUR PASSWORD HERE
-
-- Run the script from command line:
-
-::
-
- source ~/openstack/openrc
-
-- In order to install the ONAP platform, type:
-
-::
-
- heat stack-create STACK_NAME -f PATH_TO_HEAT_TEMPLATE(YAML FILE) -e PATH_TO_ENV_FILE # Old HEAT client, OR
- openstack stack create -t PATH_TO_HEAT_TEMPLATE(YAML FILE) -e PATH_TO_ENV_FILE STACK_NAME # New Openstack client
-
diff --git a/docs/index.rst b/docs/index.rst
index 57ac59220..eb10f12a8 100644
--- a/docs/index.rst
+++ b/docs/index.rst
@@ -1,11 +1,13 @@
-.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. SPDX-License-Identifier: CC-BY-4.0
+.. _master_index:
INTEGRATION
===========
-The Integration project provides the following artifacts:
-- Heat template to deploy the virtual ressources needed for the ONAP deployment
-- Test suites and tools to check the various ONAP components based on Robot Framework
-- Artifacts and documentation for the use-case deployments
+.. toctree::
+ :maxdepth: 1
-.. include:: onap-oom-heat.rst
+ release-notes.rst
+ docs_usecases_release.rst
+ integration-resources.rst
+ integration-missions.rst
diff --git a/docs/integration-CICD.rst b/docs/integration-CICD.rst
new file mode 100644
index 000000000..5ac342008
--- /dev/null
+++ b/docs/integration-CICD.rst
@@ -0,0 +1,53 @@
+.. This work is licensed under a
+ Creative Commons Attribution 4.0 International License.
+.. integration-CICD:
+
+.. integration_main-doc:
+
+CI/CD
+=====
+
+.. important::
+ Integration team deals with 2 different CI/CD systems.
+
+ - Jenkins CI/CD, CI managed by LF IT and CD by Integration team
+ - GitLab-CI managed by Integration team
+
+Continuous Integration
+----------------------
+
+The CI part provides the following features:
+
+- Repository verification (format of the INFO.yaml)
+- Patchset verification thanks to json/yaml/python/go/rst/md linters. These Jenkins
+ verification jobs are hosted in the ci-management repository. They can vote
+ +1/-1 on patchset submission. Integration team systematically enables linters
+ on any new repository
+- Docker build: Integration team builds testsuite dockers and xtesting dockers.
+ These dockers are built then pushed to Nexus through a jjb also hosted in the
+ ci-management repository.
+
+The different verification chains are defined in https://jenkins.onap.org/:
+
+- CSIT: https://jenkins.onap.org/view/CSIT/
+- testsuite: https://jenkins.onap.org/view/testsuite/
+- integration: https://jenkins.onap.org/view/integration/
+- testsuite-robot-utils: https://jenkins.onap.org/view/testsuite-robot-utils/
+
+The Jenkins jobs (jjb) are hosted in https://git.onap.org/ci-management/.
+
+Continuous Deployment
+---------------------
+
+GitLab CD
+.........
+
+This CD is leveraging public gitlab-ci mechanism and used to deploy several ONAP
+labs:
+
+- Daily Master: daily run using OOM Master
+- Weekly Master: run once a week with longer tests
+- Gating: run on OOM, clamp or SO patchset submission. It means a full ONAP
+ deployment on demand based on new patchset declared in gerrit.
+
+See :ref:`Integration CI guideline <integration-ci>` for details.
diff --git a/docs/integration-labs.rst b/docs/integration-labs.rst
new file mode 100644
index 000000000..49915c846
--- /dev/null
+++ b/docs/integration-labs.rst
@@ -0,0 +1,38 @@
+.. This work is licensed under a
+ Creative Commons Attribution 4.0 International License.
+.. integration-labs:
+
+.. integration_main-doc:
+
+Integration Labs
+================
+
+.. important::
+ The Integration team deals with several community labs:
+
+ - The Azure staging lab
+ - The DT lab
+ - The University New Hempshire lab
+
+Additionally integration contributors may deal with their own lab pushing results
+in the integration portal (See DT http://testresults.opnfv.org/onap-integration/dt/dt.html)
+
+Azure staging lab
+-----------------
+
+An additional Azure staging lab has been created for Guilin. It is installed as
+any daily/weekly/gating labs (see CI/CD sections).
+Contact the Integration team to get an access.
+
+DT lab
+------
+
+The DT lab reported Master daily results in addition of Istanbul daily results.
+Results are shared with the community in
+`<https://logs.onap.org/onap-integration/daily/onap-master-daily-dell/>`_
+
+University New Hempshire lab
+----------------------------
+
+Lab for community use. See `ONAP UNH lab <https://wiki.onap.org/display/DW/ONAP+UNH-IOL+Lab>`_
+for more information.
diff --git a/docs/integration-missions.rst b/docs/integration-missions.rst
new file mode 100644
index 000000000..421519859
--- /dev/null
+++ b/docs/integration-missions.rst
@@ -0,0 +1,44 @@
+.. This work is licensed under a
+ Creative Commons Attribution 4.0 International License.
+.. _integration-missions:
+
+Integration Missions
+====================
+
+.. important::
+ The Integration project is in charge of:
+
+ - Providing testing environment
+ - Supporting the use case teams
+ - Managing ONAP CI/CD chains
+ - Developing tests
+ - Providing baseline images
+ - Validating the ONAP releases
+
+The different activities may be summarized as follows (proportions are indicative):
+
+- Community support
+- Lab support
+- Use case support
+- Test development
+- Management of daily/weekly CI chains
+- Build baseline images
+- Automate tests
+- Validate the release
+
+For each release, the integration team provides the following artifacts:
+
+- A daily CI chain corresponding to the release
+- Staging labs to perform the pairwise testing (when not automated) and support
+ the use case teams
+- Baseline Java and Python images
+- oparent library to manage Java dependencies
+- Test suites and tools to check the various ONAP components
+- Use-case documentation and artifacts
+- A testsuite docker included in the ONAP cluster to execute the robot based tests
+- Configuration files (scripts, Heat templates, CSAR files) to help installing
+ and testing ONAP
+- Wiki release follow-up tables (blocking points, docker versions,...)
+
+Please see the `integration wiki page <https://wiki.onap.org/display/DW/Integration+Project>`_
+for details.
diff --git a/docs/integration-repositories.rst b/docs/integration-repositories.rst
new file mode 100644
index 000000000..2501c7321
--- /dev/null
+++ b/docs/integration-repositories.rst
@@ -0,0 +1,115 @@
+.. This work is licensed under a
+ Creative Commons Attribution 4.0 International License.
+.. integration-repositories:
+
+Integration repositories
+========================
+
+.. important::
+ The Integration project deals with lots of code repositories.
+
+Integration
+-----------
+
+The integration repository is the historical repository.
+As a consequence it includes several elements in the same repository:
+
+- Deployment scripts (deployment directory)
+- Tests: the first non robot tests (security, vCPE,..)
+- Simulators/emulators (test/mocks)
+- Integration and use cases documentation (docs)
+- Tools (bootstrap, S3Ptools)
+
+Since Frankfurt version, we created more smaller repositories especially for the use
+cases and the simulators.
+It shall help improving the maintenance of the different elements.
+It shall also help identifying, leveraging and adopting existing simulators
+rather than systematically re-inventing the wheel.
+
+.. note::
+ There is a new section of repositories - pipelines. These repositories are migrated for Orange GitLab project.
+ Code on these repositories is planned to be used to run ONAP tests GitLab CI/CD pipelines on `GitLab ONAP integration group <https://gitlab.com/onap/integration>`__
+
+.. csv-table:: Integration Repositories
+ :file: ./files/csv/repo-integration.csv
+ :widths: 30,50,20
+ :delim: ;
+ :header-rows: 1
+
+.. csv-table:: Integration Simulators
+ :file: ./files/csv/repo-simulators.csv
+ :widths: 30,50,20
+ :delim: ;
+ :header-rows: 1
+
+.. csv-table:: Integration Pipelines
+ :file: ./files/csv/repo-pipelines.csv
+ :widths: 30,50,20
+ :delim: ;
+ :header-rows: 1
+
+Testsuite
+---------
+
+The testsuite repository and its sub repositories deal exclusively with tests.
+
+The testsuite repository includes all the robotframework scripts.
+The robot pod that can be installed as part of the ONAP cluster is built from
+this repository.
+
+Several tooling repositories are associated with the robot tests (heatbridge,
+robot-python-testing-utils).
+
+.. csv-table:: Testsuite Repositories
+ :file: ./files/csv/repo-testsuite.csv
+ :widths: 30,50,20
+ :delim: ;
+ :header-rows: 1
+
+Demo
+----
+
+In this repository you will find any artifacts needed for demo, PoC and use cases
+if they do not have their own repository (mainly old use cases).
+
+.. csv-table:: Demo Repository
+ :file: ./files/csv/repo-demo.csv
+ :widths: 30,50,20
+ :delim: ;
+ :header-rows: 1
+
+Oparent
+-------
+
+.. csv-table:: Oparent Repository
+ :file: ./files/csv/repo-oparent.csv
+ :widths: 30,50,20
+ :delim: ;
+ :header-rows: 1
+
+Archived repositories
+---------------------
+
+Some repositories are archived and marked as "read-only" due to the lack of any activity in them.
+
+.. csv-table:: Archived Repositories
+ :file: ./files/csv/repo-archived.csv
+ :widths: 30,50,20
+ :delim: ;
+ :header-rows: 1
+
+
+External repositories
+---------------------
+
+Additionally, the Integration team also deals with external gitlab.com
+repositories.
+
+.. important::
+ All of these repositories should be migrated into ONAP's Gerrit
+
+.. csv-table:: Integration external repositories table
+ :file: ./files/csv/repo-integration-external.csv
+ :widths: 30,50,20
+ :delim: ;
+ :header-rows: 1
diff --git a/docs/integration-resources.rst b/docs/integration-resources.rst
new file mode 100644
index 000000000..4af90c15d
--- /dev/null
+++ b/docs/integration-resources.rst
@@ -0,0 +1,16 @@
+.. This work is licensed under a
+ Creative Commons Attribution 4.0 International License.
+.. _integration-resources:
+
+Integration Resources
+=====================
+
+.. toctree::
+ :glob:
+
+ integration-repositories.rst
+ integration-labs.rst
+ integration-tests.rst
+ integration-CICD.rst
+ integration-simulators.rst
+ integration-tooling.rst
diff --git a/docs/integration-s3p.rst b/docs/integration-s3p.rst
index f42b48911..13e36c17a 100644
--- a/docs/integration-s3p.rst
+++ b/docs/integration-s3p.rst
@@ -1,99 +1,207 @@
+.. This work is licensed under a
+ Creative Commons Attribution 4.0 International License.
.. _integration-s3p:
-ONAP Maturity Testing Notes
----------------------------
+:orphan:
-For the El Alto release, ONAP continues to improve in multiple
-areas of Scalability, Security, Stability and Performance (S3P)
-metrics.
+Stability
+=========
+.. important::
+ The Release stability has been evaluated by:
+ - The daily CI/CD chain
+ - Stability tests
-Stability
-=========
+.. note:
+ The scope of these tests remains limited and does not provide a full set of
+ KPIs to determinate the limits and the dimensioning of the ONAP solution.
+
+CI results
+----------
+
+As usual, a daily CI chain dedicated to the release is created after RC0.
+
+The daily results can be found in `LF DT lab daily results web site <https://logs.onap.org/onap-integration/daily/onap-daily-dt-oom-master/>`_.
+
+.. image:: files/s3p/jakarta-dashboard.png
+ :align: center
+
+
+Infrastructure Healthcheck Tests
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+These tests deal with the Kubernetes/Helm tests on ONAP cluster.
+
+The global expected criteria is **100%**.
+
+The onap-k8s and onap-k8s-teardown, providing a snapshop of the onap namespace
+in Kubernetes, as well as the onap-helm tests are expected to be PASS.
+
+.. image:: files/s3p/istanbul_daily_infrastructure_healthcheck.png
+ :align: center
-** TODO **
+Healthcheck Tests
+~~~~~~~~~~~~~~~~~
-Integration Stability Testing verifies that the ONAP platform remains fully
-functional after running for an extended amounts of time.
-This is done by repeated running tests against an ONAP instance for a period of
-72 hours.
+These tests are the traditionnal robot healthcheck tests and additional tests
+dealing with a single component.
-Methodology
+The expectation is **100% OK**.
+
+.. image:: files/s3p/istanbul_daily_healthcheck.png
+ :align: center
+
+Smoke Tests
~~~~~~~~~~~
-The Stability Test has two main components:
+These tests are end to end and automated use case tests.
+See the :ref:`the Integration Test page <integration-tests>` for details.
+
+The expectation is **100% OK**.
+
+.. figure:: files/s3p/istanbul_daily_smoke.png
+ :align: center
+
+Security Tests
+~~~~~~~~~~~~~~
+
+These tests are tests dealing with security.
+See the :ref:`the Integration Test page <integration-tests>` for details.
+
+Waivers have been granted on different projects for the different tests.
+The list of waivers can be found in
+https://git.onap.org/integration/seccom/tree/waivers?h=jakarta.
-- Running "ete stability72hr" Robot suite periodically. This test suite
- verifies that ONAP can instantiate vDNS, vFWCL, and VVG.
-- Set up vFW Closed Loop to remain running, then check periodically that the
- closed loop functionality is still working.
+nodeport_check_certs test is expected to fail. Even tremendous progress have
+been done in this area, some certificates (unmaintained, upstream or integration
+robot pods) are still not correct due to bad certificate issuers (Root CA
+certificate non valid) or extra long validity. Most of the certificates have
+been installed using cert-manager and will be easily renewable.
+The expectation is **80% OK**. The criteria is met.
-Results: 100% PASS
+.. figure:: files/s3p/istanbul_daily_security.png
+ :align: center
+
+Stability tests
+---------------
+
+Stability tests have been performed on Istanbul release:
+
+- SDC stability test
+- Parallel instantiation test
+
+The results can be found in the weekly backend logs
+https://logs.onap.org/onap-integration/weekly/onap_weekly_pod4_istanbul.
+
+SDC stability test
~~~~~~~~~~~~~~~~~~
-=================== ======== ========== ======== ========= =========
-Test Case Attempts Env Issues Failures Successes Pass Rate
-=================== ======== ========== ======== ========= =========
-Stability 72 hours 72 34 0 38 100%
-vFW Closed Loop 75 7 0 68 100%
-**Total** 147 41 0 106 **100%**
-=================== ======== ========== ======== ========= =========
-
-Detailed results can be found at https://wiki.onap.org/display/DW/Dublin+Release+Stability+Testing+Status .
-
-.. note::
- - Overall results were good. All of the test failures were due to
- issues with the unstable environment and tooling framework.
- - JIRAs were created for readiness/liveness probe issues found while
- testing under the unstable environment. Patches applied to oom and
- testsuite during the testing helped reduce test failures due to
- environment and tooling framework issues.
- - The vFW Closed Loop test was very stable and self recovered from
- environment issues.
-
-
-Resilience
-==========
-
-Integration Resilience Testing verifies that ONAP can automatically recover
-from failures of any of its components.
-This is done by deleting the ONAP pods that are involved in each particular Use
-Case flow and then checking that the Use Case flow can again be executed
-successfully after ONAP recovers.
-
-Methodology
-~~~~~~~~~~~
-For each Use Case, a list of the ONAP components involved is identified.
-The pods of each of those components are systematically deleted one-by-one;
-after each pod deletion, we wait for the pods to recover, then execute the Use
-Case again to verify successful ONAP platform recovery.
-
-
-Results: 99.4% PASS
-~~~~~~~~~~~~~~~~~~~
-=============================== ======== ========== ======== ========= =========
-Use Case Attempts Env Issues Failures Successes Pass Rate
-=============================== ======== ========== ======== ========= =========
-VNF Onboarding and Distribution 49 0 0 49 100%
-VNF Instantiation 64 19 1 44 97.8%
-vFW Closed Loop 66 0 0 66 100%
-**Total** 179 19 1 159 **99.4%**
-=============================== ======== ========== ======== ========= =========
-
-Detailed results can be found at https://wiki.onap.org/display/DW/Dublin+Release+Resilience+Testing+Status .
-
-
-Deployability
-=============
-
-Smaller ONAP container images footprint reduces resource consumption,
-time to deploy, time to heal, as well as scale out resources.
-
-Minimizing the footprint of ONAP container images reduces resource
-consumption, time to deploy, time and time to heal. It also reduces
-the resources needed to scale out and time to scale in. For those
-reasons footprint minimization postively impacts the scalability of
-the ONAP platform. Smaller ONAP container images footprint reduces
-resource consumption, time to deploy, time to heal, as well as scale
-out resources.
+
+In this test, we consider the basic_onboard automated test and we run 5
+simultaneous onboarding procedures in parallel during 24h.
+
+The basic_onboard test consists in the following steps:
+
+- [SDC] VendorOnboardStep: Onboard vendor in SDC.
+- [SDC] YamlTemplateVspOnboardStep: Onboard vsp described in YAML file in SDC.
+- [SDC] YamlTemplateVfOnboardStep: Onboard vf described in YAML file in SDC.
+- [SDC] YamlTemplateServiceOnboardStep: Onboard service described in YAML file
+ in SDC.
+
+The test has been initiated on the Istanbul weekly lab on the 14th of November.
+
+As already observed in daily|weekly|gating chain, we got race conditions on
+some tests (https://jira.onap.org/browse/INT-1918).
+
+The success rate is expected to be above 95% on the 100 first model upload
+and above 80% until we onboard more than 500 models.
+
+We may also notice that the function test_duration=f(time) increases
+continuously. At the beginning the test takes about 200s, 24h later the same
+test will take around 1000s.
+Finally after 36h, the SDC systematically answers with a 500 HTTP answer code
+explaining the linear decrease of the success rate.
+
+The following graphs provides a good view of the SDC stability test.
+
+.. image:: files/s3p/istanbul_sdc_stability.png
+ :align: center
+
+.. csv-table:: S3P Onboarding stability results
+ :file: ./files/csv/s3p-sdc.csv
+ :widths: 60,20,20,20
+ :delim: ;
+ :header-rows: 1
+
+.. important::
+ The onboarding duration increases linearly with the number of on-boarded
+ models, which is already reported and may be due to the fact that models
+ cannot be deleted. In fact the test client has to retrieve the list of
+ models, which is continuously increasing. No limit tests have been
+ performed.
+ However 1085 on-boarded models is already a vry high figure regarding the
+ possible ONAP usage.
+ Moreover the mean duration time is much lower in Istanbul.
+ It explains why it was possible to run 35% more tests within the same
+ time frame.
+
+Parallel instantiations stability test
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The test is based on the single test (basic_vm) that can be described as follows:
+
+- [SDC] VendorOnboardStep: Onboard vendor in SDC.
+- [SDC] YamlTemplateVspOnboardStep: Onboard vsp described in YAML file in SDC.
+- [SDC] YamlTemplateVfOnboardStep: Onboard vf described in YAML file in SDC.
+- [SDC] YamlTemplateServiceOnboardStep: Onboard service described in YAML file
+ in SDC.
+- [AAI] RegisterCloudRegionStep: Register cloud region.
+- [AAI] ComplexCreateStep: Create complex.
+- [AAI] LinkCloudRegionToComplexStep: Connect cloud region with complex.
+- [AAI] CustomerCreateStep: Create customer.
+- [AAI] CustomerServiceSubscriptionCreateStep: Create customer's service
+ subscription.
+- [AAI] ConnectServiceSubToCloudRegionStep: Connect service subscription with
+ cloud region.
+- [SO] YamlTemplateServiceAlaCarteInstantiateStep: Instantiate service described
+ in YAML using SO a'la carte method.
+- [SO] YamlTemplateVnfAlaCarteInstantiateStep: Instantiate vnf described in YAML
+ using SO a'la carte method.
+- [SO] YamlTemplateVfModuleAlaCarteInstantiateStep: Instantiate VF module
+ described in YAML using SO a'la carte method.
+
+10 instantiation attempts are done simultaneously on the ONAP solution during 24h.
+
+The results can be described as follows:
+
+.. image:: files/s3p/istanbul_instantiation_stability_10.png
+ :align: center
+
+.. csv-table:: S3P Instantiation stability results
+ :file: ./files/csv/s3p-instantiation.csv
+ :widths: 60,20,20,20
+ :delim: ;
+ :header-rows: 1
+
+The results are good with a success rate above 95%. After 24h more than 1300
+VNF have been created and deleted.
+
+As for SDC, we can observe a linear increase of the test duration. This issue
+has been reported since Guilin. For SDC as it is not possible to delete the
+models, it is possible to imagine that the duration increases due to the fact
+that the database of models continuously increases. Therefore the client has
+to retrieve an always bigger list of models.
+But for the instantiations, it is not the case as the references
+(module, VNF, service) are cleaned at the end of each test and all the tests
+use the same model. Then the duration of an instantiation test should be
+almost constant, which is not the case. Further investigations are needed.
+
+.. important::
+ The test has been executed with the mariadb-galera replicaset set to 1
+ (3 by default). With this configuration the results during 24h are very
+ good. When set to 3, the error rate is higher and after some hours
+ most of the instantiation are failing.
+ However, even with a replicaset set to 1, a test on Master weekly chain
+ showed that the system is hitting another limit after about 35h
+ (https://jira.onap.org/browse/SO-3791).
diff --git a/docs/integration-simulators.rst b/docs/integration-simulators.rst
new file mode 100644
index 000000000..7f6688f05
--- /dev/null
+++ b/docs/integration-simulators.rst
@@ -0,0 +1,111 @@
+.. This work is licensed under a
+ Creative Commons Attribution 4.0 International License.
+.. integration-tooling:
+
+.. _integration-simulators:
+
+Simulators
+==========
+
+Simulators are regularly created for use cases. The goal of this section is to:
+
+- Highlight the existing Simulators
+- Provide recommendations when starting developing a new simulator
+
+.. important::
+ Before developing a new simulator, check that it does not exist...and
+ refactor/contribute to existing simulators rather than recreating new ones.
+
+
+Existing simulators
+-------------------
+
+.. csv-table:: Simulators
+ :file: ./files/csv/simulators.csv
+ :widths: 10,50,20,20
+ :delim: ;
+ :header-rows: 1
+
+
+Recommendations
+---------------
+
+The simulator code
+..................
+
+We recommend to create a dedicated repository (ask Integration team).
+
+.. csv-table:: Simulator repositories
+ :file: ./files/csv/repo-simulators.csv
+ :widths: 30,50,20
+ :delim: ;
+ :header-rows: 1
+
+
+Dockerization
+.............
+
+From this repository, create a jenkins job to automatically build the dockers.
+
+Helm Chart
+..........
+
+It is recommended to create a helm chart in order to run the simulators.
+
+
+Wrapper for simulators
+......................
+
+1. In order to deploy the Helm release with a simulator, place a YAML file
+describing the Helm release in src/onaptests/templates/helm_charts.
+
+ The structure of the YAML file should be like in the example below.
+ Dependencies contain all the charts that need to be pulled.
+
+ .. code-block:: YAML
+
+ # Helm release information
+ api_version: # API_VERSION
+ app_version: # APP_VERSION
+ chart_name: # SIMULATOR_NAME
+ version: # CHART_VERSION
+
+ # Helm charts that need to be pulled
+ dependencies:
+ - name: # SIMULATOR_NAME
+ version: # CHART_VERSION
+ repository: # URL
+ local_repo_name: # REPO_NAME
+
+2. Install the Helm release:
+
+ .. code-block:: Python
+
+ from onaptests.steps.wrapper.helm_charts import HelmChartStep
+
+ chart = HelmChartStep(
+ cleanup = BOOLEAN,
+ chart_info_file = YAML_FILE_NAME # name, not the path
+ )
+ chart.execute()
+
+3. Start the simulator via an API call:
+
+ .. code-block:: Python
+
+ start = SimulatorStartStep(
+ cleanup = BOOLEAN,
+ https = BOOLEAN,
+ host = HOSTNAME,
+ port = PORT,
+ endpoint = START_ENDPOINT, # if applicable
+ method = REQUEST_METHOD, # GET, POST etc.
+ data = PAYLOAD # {"json": {...}, ...}
+ )
+ start.execute()
+
+4. Undeploy the Helm release:
+
+ .. code-block:: Python
+
+ chart.cleanup()
diff --git a/docs/integration-tests.rst b/docs/integration-tests.rst
new file mode 100644
index 000000000..6453e55ef
--- /dev/null
+++ b/docs/integration-tests.rst
@@ -0,0 +1,159 @@
+.. This work is licensed under a
+ Creative Commons Attribution 4.0 International License.
+.. _integration-tests:
+
+Tests
+=====
+
+.. important::
+ Integration is in charge of several types of tests:
+
+ - Use Cases: developed by use case teams, usually complex, demonstrating high value capabilities of ONAP. They may be partially automated and even
+ integrated in CD.
+ - CSIT Tests: functional tests created by the projects, partially hosted in CSIT repository
+ - Automatic Test Cases: these use cases are usually more simple and aim to validate that ONAP is working properly.
+ These tests have been developed to validate ONAP as a software solution.
+ In theory all the main functions shall be covered by such tests in order to have more robust CI/CD and then avoid regressions.
+ These tests are usually developed and maintained by the integration team.
+
+We may also indicate that when the development of the test framework python-onapsdk
+follows standard development quality rules and imposes the creation of
+unit/functional/integration tests.
+As an example python-onapsdk requires a unit test coverage of 98% before merging
+a new feature, which is far above the project criteria in SonarCloud today.
+
+Use Cases
+---------
+
+The use cases of the last release are described in
+:ref:`Verified Use cases <docs_usecases_release>`.
+
+CSIT Tests
+----------
+
+The CSIT tests are functional tests executed by the projects on mocked
+environment to validate their components.
+Historically it was hosted in a CSIT repository.
+
+Integration team invited the projects to bring back such tests back to home
+repository for 2 main reasons:
+
+- integration cannot be a bottleneck: +2/merge from integration needed for each
+ project
+- most of the tests are abandoned and not maintained when hosted in a third party
+ repository leading to CI/CD resource waste and misleading test reporting
+
+Automated Tests
+---------------
+
+These tests are run daily/weekly on each new gate (new patchset in OOM, CLAMP
+or SO). They can be in any language (bash, go, python,...), leveraging any test
+framework (robotframework, MTS, python-onapsdk).
+They are all embedded in `xtesting <https://pypi.org/project/xtesting/>`_ dockers.
+
+.. hint::
+ Automatic tests are currently divided in 4 different categories:
+
+ - infrastructure-healthcheck: tests from OOM checking the ONAP namespace, certificates...
+ - healthcheck: basic tests on components
+ - smoke tests: end to end tests
+ - security tests
+
+A dashboard summarizing the status and providing the links to the test result
+page or the logs is automatically created at the end of the execution of the
+tests.
+
+.. figure:: files/tests/test-dashboard.png
+
+ Test dashboard (Guilin version)
+
+All the pages and artifacts are pushed to LF backend:
+
+- Daily chains: https://logs.onap.org/onap-integration/daily
+- Weekly chains: https://logs.onap.org/onap-integration/weekly
+- Gating chains: the result link is indicated in gerrit
+
+A video has been recorded to help launching some of the automated tests on ONAP Guilin.
+See `Running ONAP tests in Guilin Video <https://www.youtube.com/watch?v=ABvuJfyGDmw>`_
+
+Infrastructure Healthcheck Tests
+................................
+
+.. csv-table:: Infrastructure Healthcheck Tests
+ :file: ./files/csv/tests-infrastructure-healthcheck.csv
+ :widths: 20,40,20,20
+ :delim: ;
+ :header-rows: 1
+
+See `Infrastructure Healthcheck README <https://git.onap.org/integration/xtesting/tree/infra-healthcheck/README.md>`_
+to adapt then run infrastructure healthcheck tests on your own system.
+
+Please note that the onap-k8s is run 2 times in CD chains. It is run just after
+the installation (onap-k8s) and at the end of the test execution (onap-k8s-teardown)
+in order to collect the logs of the different components during the test execution.
+
+.. figure:: files/tests/test-onap-k8s.png
+
+Healthcheck Tests
+.................
+
+.. csv-table:: Healthcheck Tests
+ :file: ./files/csv/tests-healthcheck.csv
+ :widths: 20,40,20,20
+ :delim: ;
+ :header-rows: 1
+
+See `Healthcheck README <https://git.onap.org/integration/xtesting/tree/healthcheck/README.md>`_
+to adapt then run healthcheck tests on your own system.
+
+Smoke Tests
+...........
+
+.. csv-table:: Smoke Tests
+ :file: ./files/csv/tests-smoke.csv
+ :widths: 20,40,20,20
+ :delim: ;
+ :header-rows: 1
+
+There are 2 main families of smoke tests:
+
+* RobotFramework based tests, usually run from inside the cluster as a k8s job
+* Pythonsdk based tests. These tests (also known as onaptests) are consuming
+ several SDKs: the Openstack and Kubernetes SDK for the management of the cloud
+ resources and the python ONAP SDK for the interactions with ONAP
+
+To launch the the robot based tests, please see
+`Robot smoke test README <https://git.onap.org/integration/xtesting/tree/smoke-usecases-robot/README.md>`_
+Standard Robot html pages are generated. See :ref:`Robot page <docs_robot>`.
+
+To launch the pythonsdk based tests, please see
+`Python smoke test README <https://git.onap.org/integration/xtesting/tree/smoke-usecases-pythonsdk/README.md>`_
+
+.. note:
+ Please note that the OpenStack minimal version is pike.
+
+An html page is generated by the pythonsdk-test tests.
+
+.. figure:: files/tests/test-basic-cnf.png
+
+Security Tests
+...............
+
+.. csv-table:: Security Tests
+ :file: ./files/csv/tests-security.csv
+ :widths: 20,40,20,20
+ :delim: ;
+ :header-rows: 1
+
+See `Security test README <https://git.onap.org/integration/xtesting/tree/security/README.md>`_
+to adapt then run the security tests on your own system.
+
+Note for security tests, integration team follows `SECCOM recommendations and
+apply waivers granted by SECCOM if needed through xfail lists <https://git.onap.org/integration/seccom/tree/>`_.
+
+Stability tests
+---------------
+
+Ensuring the stability of ONAP is one of the missions of the Integration team.
+CI chains and stability tests are performed to help stabilising the release.
+See :ref:`Integration stability tests <integration-s3p>` for details.
diff --git a/docs/integration-tooling.rst b/docs/integration-tooling.rst
new file mode 100644
index 000000000..d615e7e27
--- /dev/null
+++ b/docs/integration-tooling.rst
@@ -0,0 +1,214 @@
+.. This work is licensed under a
+ Creative Commons Attribution 4.0 International License.
+.. integration-tooling:
+
+Tooling
+=======
+
+.. important::
+ Integration team deals with lots of tools to complete its missions. The goal
+ of this section is to highlight some of them and redirect to their official
+ documentation. These tools can be used for CI/CD, Testing or platform management.
+
+ **Upstream tools** are privileged but when needed specific developments can be done.
+
+ Please note that none of these tools are imposed to test developers, in other
+ words, any kind of test is accepted and can be integrated, the list of tools
+ is just indicative.
+
+Integration Project
+-------------------
+
+Integration portal
+~~~~~~~~~~~~~~~~~~
+
+A portal is built to report the status of the different labs collaborating in
+Integration, see http://testresults.opnfv.org/onap-integration/
+
+.. figure:: files/CI/ONAP_CI_3.png
+ :align: center
+ :width: 6.5in
+
+The code of this web site is shared on a public gitlab project.
+
+
+Communication channels
+~~~~~~~~~~~~~~~~~~~~~~
+
+The main communication channel for real time support is the official ONAP
+Slack #integration-team chan (https://onapproject.slack.com/).
+
+You can also send a mail to onap-discuss AT lists.onap.org
+with [ONAP] [Integration] prefix in the title.
+
+Repository management
+~~~~~~~~~~~~~~~~~~~~~
+
+Since Integration team manages few dozens of different repositories a tool was provided to aid the process of mass editing the INFO.yaml files. It can be found `here <https://git.onap.org/integration/tree/ptl/edit_committers_info>`__.
+
+Testing
+-------
+
+Test frameworks
+~~~~~~~~~~~~~~~
+
+Robotframework
+..............
+
+`robotframework <https://robotframework.org/>`_ is a well known test framework.
+Lots of ONAP tests are leveraging this framework.
+This framework is fully developed upstream even if some extensions (python
+modules) were created especially to deal with OpenStack (see
+`python-testing-utils project <https://git.onap.org/testsuite/python-testing-utils/>`_).
+
+Some GUI tests (using Robotframework Selenium extension) had been initiated but
+not maintained, as a consequence they are not integrated in CI/CD.
+
+
+Python ONAP SDK
+...............
+
+The Openstack and Kubernetes python SDK are references widely adopted by the
+developers and the industry. Developing a python ONAP SDK aimed to follow the
+examples of the infrastructure SDK with the same expectations in term of code
+quality.
+After an evaluation of the CLI project (JAVA SDK re-exposing primitives through
+python system calls), and a first prototype (onap_tests used until Frankfurt for
+end to end tests) it was decided to develop a new python SDK.
+
+This SDK has been developed in gitlab.com to benefit from the numerous built-in
+options offered by gitlab and ensure the best possible code quality.
+
+- `python SDK repositoy <https://gerrit.onap.org/r/admin/repos/integration/python-onapsdk>`_
+- `python SDK documentation <https://python-onapsdk.readthedocs.io/en/latest/?badge=develop>`_
+
+The project is fully Open Source, released under the Apache v2 license.
+Integration committers are invited to join the project. The main maintainers are
+ONAP integration and OOM committers.
+
+Any new feature shall respect the code quality criteria:
+
+- unit test coverage > 98%
+- functional tests (several components mock objects have been developed)
+
+.. attention::
+ Python-onapsdk is a **SDK**, it means it is a tool allowing to communicate
+ with ONAP. It is a **middleware** that can be used by test projects but it is
+ **NOT a test**.
+
+A companion project has been created in ONAP:
+`pythonsdk-tests <https://git.onap.org/testsuite/pythonsdk-tests/>`_.
+
+The pythonsdk-test project defines tests based on python-onapsdk.
+
+The tests are hosted in this repository. They consume the different needed SDK:
+python-onapsdk but also the kubernetes, the OpenStack SDK and or any needed
+additional middlewares.
+The project developed the notion of steps that can been combined and reorganized
+as need to design a test. This project interacts with ONAP only through the
+python-onapsdk library.
+The tests are described in :ref:`The Integration Test page <integration-tests>`.
+
+The available steps are:
+
+- [CLAMP] OnboardClampStep: Onboard a SDC including a TCA blueprint
+- [CDS] ExposeCDSBlueprintprocessorNodePortStep: expose CDS blueprint nodeport (Guilin workaround)
+- [CDS] BootstrapBlueprintprocessor: Bootstrap a blueprint processor
+- [CDS] DataDictionaryUploadStep: Upload a Data Dictionary to CDS
+- [CDZ] CbaEnrichStep: Enrich CBA
+- [K8S plugin] K8SProfileStep: Create K8S profile
+- [SO] YamlTemplateVfModuleAlaCarteInstantiateStep: Instantiate VF module described in YAML using SO a'la carte method
+- [SO] YamlTemplateVlAlaCarteInstantiateStep: Instantiate network link described in YAML using SO a'la carte method.
+- [SO] YamlTemplateVfModuleAlaCarteInstantiateStep: Instantiate VF module described in YAML using SO a'la carte method
+- [SO] YamlTemplateVnfAlaCarteInstantiateStep: Instantiate vnf described in YAML using SO a'la carte method
+- [SO] YamlTemplateServiceAlaCarteInstantiateStep: Instantiate service described in YAML using SO a'la carte method
+- [AAI] ConnectServiceSubToCloudRegionStep: Connect service subscription with cloud region
+- [AAI] CustomerServiceSubscriptionCreateStep: Create customer's service subscription
+- [AAI] CustomerCreateStep: Create customer
+- [AAI] LinkCloudRegionToComplexStep: Connect cloud region with complex
+- [AAI] ComplexCreateStep: Create complex
+- [AAI] RegisterCloudRegionStep: Register cloud region
+- [SDC] YamlTemplateServiceOnboardStep: Onboard service described in YAML file in SDC
+- [SDC] YamlTemplateVfOnboardStep: Onboard vf described in YAML file in SDC
+- [SDC] YamlTemplateVspOnboardStep: Onboard vsp described in YAML file in SDC
+- [SDC] VendorOnboardStep: Onboard vendor in SDC
+
+You can reuse the existing steps to compose your test and/or code your own step
+if it is not supported yet.
+
+The procedure to start a test is described in `pythonsdk-test README <https://git.onap.org/testsuite/pythonsdk-tests/tree/README.md>`_
+
+CI/CD
+-----
+
+The CI/CD is key for integration. It consolidates the trustability in the solution
+by the automated verification of the deployment and the execution of tests.
+Integration tests complete the component tests (unit and functional known as
+CSIT tests).
+
+Xtesting
+~~~~~~~~
+
+As the tests can be very heterogeneous (framework, language, outputs), the
+integration team integrates the tests in simple isolated execution context based
+on docker called **xtesting dockers**.
+
+Xtesting is a python library harmonizing the way to setup, run, teardown,
+manage the artifacts, manage the reporting of the tests (automatic push of the
+results on a DB backend). It was developed by
+`OPNFV functest project <https://git.opnfv.org/functest-xtesting/>`_.
+This python library is included in an alpine docker and contains the needed
+tests, their associated libraries as well as a testcases.yaml listing these tests.
+These docker files are built on any change in the integration/xtesting repository
+and daily to take into account the upstream changes.
+
+The integration project manages 5 xtesting dockers, see
+:ref:`Integration Test page <integration-tests>`.
+
+.. important::
+ **xtesting is a CI/CD framework, neither a test nor a test framework**
+
+ Testers can provide tests independently from xtesting.
+ However to be part of the CI/CD chains, an integration of the test in xtesting
+ will be required.
+
+The configuration files are provided as volumes and defined in each docker.
+The use of this CI/CD abstraction for the tests simplify the integration
+of the test suites in any CI/CD systems and harmonize the inputs and the outputs.
+
+The official documentation can be found on
+`xtesting official web site <https://xtesting.readthedocs.io/en/latest/>`_
+
+Integration Test database
+~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The integration team shares a Test Result Database with the OPNFV project. All
+the test results of the CD are automatically pushed to this database.
+It is possible to retrieve the results through the Test API associated with this
+test Database.
+
+The following information are available:
+
+- List of pods allowed to push results: http://testresults.opnfv.org/onap/api/v1/pods
+- List of projects that declared test cases for CI/CD: http://testresults.opnfv.org/onap/api/v1/projects
+- List of integration test cases:
+ http://testresults.opnfv.org/onap/api/v1/projects/integration/cases
+- List of security test cases:
+ http://testresults.opnfv.org/onap/api/v1/projects/security/cases
+- Results with lots of possible filter combinations: http://testresults.opnfv.org/onap/api/v1/results?last=3
+
+It is possible to get results according to several criteria (version, case name,
+lab, period, last, CI id,..)
+See the `OPNFV test API documentation
+<https://docs.anuket.io/en/stable-lakelse/testing/ecosystem/overview.html#test-api-description>`_.
+
+Any company running ONAP Integration tests can be referenced to push their results
+to this database.
+This Database is hosted on a LF OPNFV server. Results are backuped daily.
+Integration committers can have access to this server.
+
+VNF demo Artifacts
+~~~~~~~~~~~~~~~~~~
+
+VNF demo artifacts are hosted in the demo repositories and published in
+https://nexus.onap.org/content/repositories/releases/org/onap/demo/vnf/.
diff --git a/docs/onap-integration-ci.rst b/docs/onap-integration-ci.rst
new file mode 100644
index 000000000..150c82b40
--- /dev/null
+++ b/docs/onap-integration-ci.rst
@@ -0,0 +1,131 @@
+.. _integration-ci:
+
+:orphan:
+
+Integration Continuous Integration Guide
+----------------------------------------
+
+.. important::
+ Continuous Integration is key due to the complexity of the ONAP projects.
+ Several chains have been created:
+
+ - Daily stable chain
+ - Daily master chain
+ - Gating: On demand deployment of a full ONAP solution to validate patchsets
+
+They are run on different environments (Orange labs, DT labs, Azure Cloud).
+
+The following document will detail these chains and how you could set up such
+chains and/or provide test results to the community.
+
+Integration CI Ecosystem
+------------------------
+
+Overview
+~~~~~~~~
+
+The global ecosystem can de described as follows:
+
+.. figure:: files/CI/ONAP_CI_10.png
+ :align: center
+
+Several chains are run in ONAP.
+The CI chains are triggered from different CI systems (Jenkins or gitlab-ci) (1)
+on different target environments hosted on community labs (Windriver,
+Orange, DT, E///) or Azure clouds. Jobs (installation, tests) are executed on
+these labs (2). At the end, the results are pushed through the OPNFV test API (3)
+to a test database (4) hosted by Linux Foundation on
+http://testresults.opnfv.org.
+Results can be reported in different web pages hosted on LF or on gitlab.com (5).
+
+
+Daily Chains
+~~~~~~~~~~~~
+
+CI daily chains (Master and last Stable) are run on Orange, DT using gitlab-ci
+jobs and Ericsson using jenkins jobs.
+
+Gating
+~~~~~~
+
+OOM gating has been introduced for El Alto. It consists of a deployment followed
+by a set of tests on patchsets submitted to OOM repository.
+
+The CI part is managed on gitlab.com and the deployment is executed on ONAP
+Orange lab and Azure clouds.
+The goal is to provide a feedback - and ultimately to vote - on code change
+prior to merge to consolidate the OOM Master branch.
+
+The developer can evaluate the consequences of his/her patchset on a fresh
+installation.
+
+The gating is triggered in 2 scenarios:
+
+ - new patchset in OOM
+ - comment with the magic word **oom_redeploy** is posted in the Gerrit's comment
+ section
+
+The procedure to submit new feature in CI is done in 3 steps as described in the
+figure below:
+
+.. figure:: files/CI/ONAP_CI_0.png
+ :align: center
+
+
+Visualization of the CI pipelines
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+As the CI chains are triggered from different systems, several web interfaces
+can be used to visualize them.
+
+A web site has been created to centralize the links on http://testresults.opnfv.org/onap-integration/index.html
+
+For Gating and gitlab.com based CI chains, the pipelines consist in pipelines of
+pipelines managed through the chaining of .gitlab-ci.yml file thanks to an Open
+Source deployment called chained-ci (https://gitlab.com/Orange-OpenSource/lfn/ci_cd/chained-ci).
+A visualization tool is available to list all your chains as described in the
+figure below:
+
+.. figure:: files/CI/ONAP_CI_1.png
+ :align: center
+ :width: 10in
+
+If you click on any element of the chain, you will open a new window:
+
+.. figure:: files/CI/ONAP_CI_2.png
+ :align: center
+
+In order to provide the logs to the developer an additional web page has been
+created to summarize the tests and grant access to their associated logs:
+
+.. figure:: files/CI/ONAP_CI_8.png
+ :align: center
+ :width: 6.5in
+
+Additionnaly, for the daily chain, another page displays the results as time
+series, allowing to see the evolution of the tests over time.
+
+.. figure:: files/CI/ONAP_CI_9.png
+ :align: center
+ :width: 6.5in
+
+
+Setup Your Own CI Chains
+------------------------
+
+If you want to setup a gitlab.com based CI chain, and want to use chained-ci,
+you can follow the tutorial on https://gitlab.com/Orange-OpenSource/lfn/ci_cd/chained-ci-handson
+
+You should be able to chain your automation projects:
+
+* Create resources
+* Deployment of Kubernetes
+* Test of your Kubernetes (using OPNFV functest-k8s tests)
+* Deployment of your ONAP (you can use your own automatic installation
+ procedure or https://gitlab.com/Orange-OpenSource/lfn/onap/onap_oom_automatic_installation/)
+* Test ONAP thanks to the differnet ONAP xtesting dockers covering infrastructure
+ healthcheck, components healthcheck tests, end to end tests, security tests.
+
+If you want to report your results to the community, do not hesitate to contact
+the integration team. The Test database is public but the pods must be declared
+to be allowed to report results from third party labs.
diff --git a/docs/onap-oom-heat.rst b/docs/onap-oom-heat.rst
index bb9c1abff..13e6ca6db 100644
--- a/docs/onap-oom-heat.rst
+++ b/docs/onap-oom-heat.rst
@@ -1,6 +1,8 @@
-.. _onap-oom-heat:
+.. _integration-installation:
-Integration Environement Installation
+:orphan:
+
+Integration Environment Installation
-------------------------------------
ONAP is deployed on top of kubernetes through the OOM installer.
@@ -16,16 +18,16 @@ are provided, they correspond to files used on windriver environment.
This environment is used by the integration team to validate the installation,
perform tests and troubleshoot.
-If you intend to deploy your own environement, they can be used as reference but
+If you intend to deploy your own environment, they can be used as reference but
must be adapted according to your context.
Source files
~~~~~~~~~~~~
-- HEAT template files: https://git.onap.org/integration/tree/deployment/heat/onap-rke?h=elalto
-- Sample OpenStack RC file: https://git.onap.org/integration/tree/deployment/heat/onap-rke/env/windriver/Integration-SB-00-openrc?h=elalto
-- Sample environment file: https://git.onap.org/integration/tree/deployment/heat/onap-rke/env/windriver/onap-oom.env?h=elatlo
-- Deployment script: https://git.onap.org/integration/tree/deployment/heat/onap-rke/scripts/deploy.sh?h=elalto
+- HEAT template files: https://git.onap.org/integration/tree/deployment/heat/onap-rke
+- Sample OpenStack RC file: https://git.onap.org/integration/tree/deployment/heat/onap-rke/env/windriver/Integration-SB-00-openrc
+- Sample environment file: https://git.onap.org/integration/tree/deployment/heat/onap-rke/env/windriver/onap-oom.env
+- Deployment script: https://git.onap.org/integration/tree/deployment/heat/onap-rke/scripts/deploy.sh
Heat Template Description
@@ -33,7 +35,7 @@ Heat Template Description
The ONAP Integration Project provides a sample HEAT template that
fully automates the deployment of ONAP using OOM as described in
-:ref:`ONAP Operations Manager (OOM) over Kubernetes<installing-onap>`.
+OOM documentation.
The ONAP OOM HEAT template deploys the entire ONAP platform. It spins
up an HA-enabled Kubernetes cluster, and deploys ONAP using OOM onto
@@ -126,8 +128,7 @@ Exploring the Rancher VM
The Rancher VM that is spun up by this HEAT template serves the
following key roles:
-- Hosts the /dockerdata-nfs/ NFS export shared by all the k8s VMs for persistent
- volumes
+- Hosts the /dockerdata-nfs/ NFS export shared by all the k8s VMs for persistent volumes
- git clones the oom repo into /root/oom
- git clones the integration repo into /root/integration
- Creates the helm override file at /root/integration-override.yaml
diff --git a/docs/release-notes.rst b/docs/release-notes.rst
index 884998fa1..07ba0b9f3 100644
--- a/docs/release-notes.rst
+++ b/docs/release-notes.rst
@@ -1,238 +1,119 @@
+.. _release_notes:
.. This work is licensed under a Creative Commons Attribution 4.0
International License. http://creativecommons.org/licenses/by/4.0
-.. _doc-release-notes:
+Integration Kohn Release Notes
+==============================
-Integration Release Notes
-=========================
+.. csv-table:: Integration Releases
+ :file: ./files/csv/release-integration-ref.csv
+ :widths: 50,50
+ :delim: ;
+ :header-rows: 1
+.. important::
-Integration Repo
-----------------
-
-Version: 4.0.0
-..............
-
-:Release Date: 2019-10-21
-
-**New Features**
-
-* Add new integration labs
-* Introduction of OOM Gating
-* Updated scripts for OOM daily automated deployment tests
-* Refactoring of the Integration wiki home page
-* Automation script for use cases
-* Updated java artifact versions for ONAP El Alto release
-* Cleaning of CSIT jobs
-* Update oparent library to fix security Vulnerabilities
-* Update Postman collection for test
-
-Quick Links:
- - `Integration project page <https://wiki.onap.org/display/DW/Integration+Project>`_
- - ` El Alto use testing status page <https://wiki.onap.org/display/DW/2%3A+El+Alto+Release+Integration+Testing+Status>`
-
-ONAP Maturity Testing Notes
----------------------------
-
-For El Alto release, ONAP continues to improve in multiple areas of
-Scalability, Security, Stability and Performance (S3P) metrics.
-
-In addition of the windriver lab, Master and El Alto use cases have been tested
-on Ericcson (Daily Master CI chain), Orange (Daily Master chain, Gating) and
-windriver labs (use cases, daily, long duration). See `Integration Lab portal
-<http://testresults.opnfv.org/onap-integration>`
-
-
-A gating chain has been setup for OOM. This CI chain provides feedback to the
-integration team. For each OOM change, a full ONAP deployment is triggered then
-several tests are executed (k8s verification, helm chart verification, 61 robot
-healthcheck, healthdist and end to end basic VNF tests).
-For El Alto, more than 1000 pipelines have been executed (gating, daily master
-and stable).
-The results of the tests for the OOM gating can be found ` here
-<https://orange-opensource.gitlab.io/lfn/onap/xtesting-onap-view/index.html>`
-
-Tests dealing with more than 25 test cases are executed on Windriver
-environment.
-
-The Integration team ran the 72 hours stability testing (xx% passing rate) and
-full resilience testing (xx% passing rate) at ONAP OpenLabs.
-More details in :ref:`ONAP Maturity Testing Notes <integration-s3p>`.
-
-
-Verified Use Cases and Functional Requirements
-----------------------------------------------
-
-The Integration team verified 22 use cases and functional requirements.
-The details can be found at
-:ref:`Verified Use Cases and Functional Requirements <docs_usecases>` session.
-
-O-Parent
---------
-
-Version: 2.2.0
-..............
-
-:Release Date: 2019-09-03
-
-**New Features**
-
-* Updated oparent POM files to support LF's new global job template.
-* commons-beanutils 1.9.4
-* tomcat-embed-core 9.0.24
-* jquery 3.4.1
-
-
-Version: 2.0.0
-..............
-
-:Release Date: 2019-03-08
-
-**New Features**
-
-* Updated oparent POM files to support LF's new global job template.
+ - New repositories (see dedicated section)
+ - Bug fixes
-Version: 1.2.3
-..............
+ Quick Links:
-:Release Date: 2019-02-11
+ - `Kohn Integration page <https://wiki.onap.org/display/DW/Integration+Kohn>`_
+ - `Kohn Integration JIRA follow-up <https://wiki.onap.org/display/DW/Kohn+Integration+Blocking+points>`_
+ - `Kohn Integration weather Board <https://wiki.onap.org/display/DW/0%3A+Integration+Weather+Board+for+Kohn+Release>`_
-**Bug Fixes**
+Code changes
+------------
-* Updated various library dependency versions per SECCOM input
-* Fixed Checkstyle configuration issues
-
-
-Version: 1.2.2
-..............
-
-:Release Date: 2018-11-11
-
-**Bug Fixes**
-
-* Updated Spring dependency version to fix CLM issues
-* Remove hard-coding of ONAP nexus repos
-
-
-Version: 1.2.1
-..............
-
-:Release Date: 2018-09-14
-
-**New Features**
-
-* Refactor oparent pom to support running builds against local nexus
- repos without having to modify oparent source code
-* Standardize 3rd party library versions
-
-Version: 1.2.0
-..............
-
-:Release Date: 2018-08-24
-
-**New Features**
-
-* Add depedencyManagement sub-module to declare dependecies
-
-
-Demo Artifacts (HEAT Templates)
--------------------------------
-
-Version: 1.5.0
-..............
-
-:Release Date: 2019-10-11
-
-**New Features**
-
-* vFW DT tutorial improvement
-* Helm chart for visualization operator
-* bug fixes
-* Robot enhancements for various use cases
-
-
-Version: 1.4.0
-..............
-
-:Release Date: 2019-06-13
-
-**New Features**
-
-The main changes for this release are the additional templates and
-other changes to support Use Cases such as vFWCL, vFWDT, vCPE, Scale-out,
-and TOSCA templates.
+Integration Repo
+.................
+:Release Date: 2022-10-27
-Version: 1.3.0
-..............
-:Release Date: 2018-11-15
+Version: 11.0.0 (aka Kohn)
+^^^^^^^^^^^^^^^^^^^^^^^^^^
-**New Features**
+.. csv-table:: Integration Changes
+ :file: ./files/csv/release-integration-features.csv
+ :widths: 30,70
+ :delim: ;
+ :header-rows: 1
-The main changes for this release are the additional templates and
-other changes to support Use Cases such as HPA, vCPE, Scale-out,
-and TOSCA templates.
+Onaptests (pythonsdk_tests)
+...........................
-Robot Test Suites
------------------
+Main changes:
-Version: 1.5.4
+.. csv-table:: pythonsdk_tests Changes
+ :file: ./files/csv/release-pythonsdk-features.csv
+ :widths: 30,70
+ :delim: ;
+ :header-rows: 1
-:Release Date: 2019-10-24
+Robot (Testsuite)
+.................
-**New Features**
+Version: 1.11.0
+^^^^^^^^^^^^^^^
-* bug Fixes(Teardown, control loop, alotteed properties)
-* Add repush Policy
-* CDS support
-* HV-VES SSL support
-* Add testsuite for VNF Lifecycle validation
-* Cleaning (remaining openecomp ref, ocata and lenovo healthcheck, unused or
- redundant variables and scripts)
+Main changes:
+.. csv-table:: Testsuite Changes
+ :file: ./files/csv/release-testsuite-features.csv
+ :widths: 30,70
+ :delim: ;
+ :header-rows: 1
-Version: 1.4.1
-:Release Date: 2019-06-09
+O-Parent
+........
-**New Features**
+Version: 3.3.2
+^^^^^^^^^^^^^^
-* Update vFWCL use case test script
+.. csv-table:: Oparent Changes
+ :file: ./files/csv/release-oparent-features.csv
+ :widths: 30,70
+ :delim: ;
+ :header-rows: 1
+Demo Artifacts (Heat Templates)
+...............................
-Version: 1.4.0
+Version: 1.11.0
+^^^^^^^^^^^^^^^
-:Release Date: 2019-05-24
+.. csv-table:: Demo Changes
+ :file: ./files/csv/release-demo-features.csv
+ :widths: 30,70
+ :delim: ;
+ :header-rows: 1
-**New Features**
+The demo artifacts are pushed to https://nexus.onap.org/content/repositories/releases/org/onap/demo/vnf
-* Update vFWCL use case closed loop policy
-* Fix vCPE use case test bugs
-* Support resource VL type in test script
-* Add test script for new use cases
-* Enhance existing use cases test script
-Version: 1.3.2
-..............
+Use Cases and Requirements
+--------------------------
-:Release Date: 2018-11-20
+See dedicated :ref:`Kohn Use Cases and requirements page <docs_usecases_release>`
-**New Features**
+Maturity Testing Notes
+----------------------
-* Fully automated vFW Closed Loop instantiation and testing
-* Instantiation of 5 new vCPE models
+:ref:`Maturity testing page <integration-s3p>`
+Open JIRAs/Known issues
+-----------------------
-Version: 1.3.1
-..............
+Integration
+...........
-:Release Date: 2018-11-14
+`Integration JIRA page <https://jira.onap.org/issues/?jql=project%20%3D%20Integration%20>`_
-**New Features**
+Testsuite
+.........
-* Additional health checks for new ONAP components in Casablanca
-* New ETE test suite to test Portal functionality
-* Various enhancements to improve stability of Robot tests
+`Testsuite JIRA page <https://jira.onap.org/issues/?jql=project%20%3D%20Test>`_
diff --git a/docs/requirements-docs.txt b/docs/requirements-docs.txt
new file mode 100644
index 000000000..097282b97
--- /dev/null
+++ b/docs/requirements-docs.txt
@@ -0,0 +1,8 @@
+sphinx>=4.2.0 # BSD
+sphinx-rtd-theme>=1.0.0 # MIT
+sphinxcontrib-blockdiag # BSD
+sphinxcontrib-seqdiag # BSD
+sphinxcontrib-swaggerdoc
+sphinxcontrib-spelling
+sphinxcontrib-plantuml
+six
diff --git a/docs/schema-update-apis.csv b/docs/schema-update-apis.csv
new file mode 100644
index 000000000..391615363
--- /dev/null
+++ b/docs/schema-update-apis.csv
@@ -0,0 +1,49 @@
+API,Service level workflow retrieving API,Service level workflow execution API
+Name,RetrieveServiceLevelWorkflow,ExecuteServiceLevelWorkflow
+Type,Get,Post
+URL,/onap/so/infra/workflowSpecifications/v1/workflows?resourceTarget=service,/onap/so/infra/instanceManagement/v1/serviceInstances/{serviceInstanceId}/workflows/{workflow_UUID}
+Request,"Headers: application/json
+
+Path parameters: resourceTarget=service
+
+Body={
+
+}
+
+
+
+","Headers: application/json
+
+Path parameters: serviceInstances; workflow_UUID
+
+Body={
+
+ ""modelInfo"":{ #targetServiceModelVersionId
+
+ ""modelType"":""service"",
+
+ ""modelInvariantUuid"":""fe41489e-1563-46a3-b90a-1db629e4375b"",
+
+ ""modelVersionId"" : ""cd4decf6-4f27-4775-9561-0e683ed43635"",
+
+ ""modelVersion"":""1.0""
+
+ }
+
+}"
+Response,"200​ – Successful retrieval of workflows
+
+400 - Bad Request
+
+500 - Internal Server Error
+
+
+
+
+","202​ - Request has been accepted for processing
+
+400 - Bad Request
+
+500 - Internal Server Error"
+,,
+,,
diff --git a/docs/simulators/nf_simulator.rst b/docs/simulators/nf_simulator.rst
new file mode 100644
index 000000000..64a6ac3b5
--- /dev/null
+++ b/docs/simulators/nf_simulator.rst
@@ -0,0 +1,148 @@
+.. This work is licensed under a Creative Commons Attribution 4.0
+.. International License. http://creativecommons.org/licenses/by/4.0
+
+.. _nf_simulator:
+
+:orphan:
+
+NF Simulator
+============
+
+Description
+-----------
+An idea behind NF (Network Function) Simulator is to introduce simulator, which supports ORAN defined O1 interface (reporting of NF events to Service Management Orchestrators).
+Within the use-case, it is expected, that an NF configuration change, happening due to multiple reasons (network mechanism triggered change - e.g. discovery of neighbours) is reported to the network management system, using ONAP`s VES REST events.
+The simulator is expected to cover planned NF behaviour - receive the config change via a NetConf protocol and report that change (also potentially other related changes) to the network management system using ONAP`s VES event.
+
+|image1|
+
+**Figure 1. Architecture Overview**
+
+1. NF Simulator code is stored in https://github.com/onap/integration-simulators-nf-simulator and all it's sub repos are:
+
+ - for VES Client - https://github.com/onap/integration-simulators-nf-simulator-ves-client
+ - for Netconf Server - https://github.com/onap/integration-simulators-nf-simulator-netconf-server
+ - for AVCN Manager - https://github.com/onap/integration-simulators-nf-simulator-avcn-manager
+ - for PM HTTPS Server - https://github.com/onap/integration-simulators-nf-simulator-pm-https-server
+
+2. For above components have been prepared:
+
+ - docker images in ONAP Nexus:
+
+ - VES Client image - nexus3.onap.org:10001/onap/org.onap.integration.nfsimulator.vesclient
+ - AVCN Manager image - nexus3.onap.org:10001/onap/org.onap.integration.nfsimulator.avcnmanager
+ - PM HTTPS Server image - nexus3.onap.org:10001/onap/org.onap.integration.nfsimulator.pmhttpsserver
+ - Netconf Server images - nexus3.onap.org:10001/onap/org.onap.integration.nfsimulator.netconfserver
+
+ - example helm charts:
+
+ - `Helm charts <https://github.com/onap/integration-simulators-nf-simulator/tree/master/helm>`_
+
+**VES Client, Netconf Server and PM HTTPS Server can be used and deployed separately depending on needs.**
+
+Only AVCN Manger connects VES Client with Netconf Server in order to support O1 interface.
+
+1. VES Client:
+--------------
+
+1. Purpose of VES Client
+
+ Application that generates VES events on demand.
+
+2. What does it do?
+
+ - Support both basic auth and TLS CMPv2 method of authentication.
+ - Allow to turn on and turn off hostname, verification in SSL.
+ - Allow to send one-time event and periodic events, based on event templates.
+ - Expose API to manage VES Client
+ - Provide template mechanism (Template is a draft event. Merging event with patch will result in valid VES event.
+ Template itself should be a correct VES event as well as valid json object. )
+ - Patching - User is able to provide patch in request, which will be merged into template.
+ - Simulator support corresponding keywords in templates: RandomInteger(start,end), RandomPrimitiveInteger(start,end), RandomInteger,
+ RandomString(length), RandomString, Timestamp, TimestampPrimitive, Increment
+ - In place variables support - Simulator supports dynamic keywords e.g. #dN to automatically substitute selected phrases in defined json schema.
+ - Logging - Every start of simulator will generate new logs that can be found in docker ves-client container.
+ - Swagger - Detailed view of simulator REST API is available via Swagger UI
+ - History - User is able to view events history.
+
+2. Netconf Server:
+------------------
+
+1. Purpose of Netconf Server
+
+ This server uses sysrepo to simulate network configuration.
+ It is based on sysrepo-netopeer2 image.
+
+2. What does it do?
+
+ Server allows to:
+
+ - Install custom configuration models on start up.
+ - Change configuration of that modules on runtime.
+ - Use TLS custom certificates
+ - Configure change subscription for particular YANG modules (Netconf server image run python application on the startup.)
+ - Manage netconf server using REST interface, with endpoints:
+
+ - GET /healthcheck returns 200 "UP" if server is up and running
+ - POST /readiness return 200 "Ready" if server is ready, if not, returns 503 "Not Ready"
+ - POST /change_config/<path:module_name> changes configuration ad returns 202 "Accepted"
+ - GET /change_history returns 200 and change history as json
+ - GET /get_config/<path:module_name> returns 200 and current configuration
+
+3. AVCN Manager:
+----------------
+
+1. Purpose of AVCN Manager
+
+ Manager that fetch changes of configuration from kafka and sends them to VES client.
+
+2. What does it do?
+
+ The manager process notifications from NETCONF server. It does this by being a subscriber of a Kafka topic that is fed with NETCONF notifications. Incoming notifications are then processed and output of this processing is sent to VES client.
+
+4. PM HTTPS Server:
+-------------------
+
+1. Purpose of PM HTTPS Server
+
+ Server that is used in Bulk PM usecase over HTTPS
+
+2. What does it do?
+
+ - Support TLS (CMPv2) method of authentication (used during connection to Data File Collector)
+ - Allow to use custom certificates
+ - Expose REST API in order to manage PM files stored in HTTPS server
+
+
+Guides
+======
+
+User And Developer Guide
+------------------------
+1. User guides:
+
+ - `VES Client user guide. <https://github.com/onap/integration-simulators-nf-simulator-ves-client/blob/master/README.md>`_
+ - `AVCN Manager user guide. <https://github.com/onap/integration-simulators-nf-simulator-avcn-manager/blob/master/README.md>`_
+ - `PM HTTPS Server user guide. <https://github.com/onap/integration-simulators-nf-simulator-pm-https-server/blob/master/README.md>`_
+ - `Netconf Server user guide. <https://github.com/onap/integration-simulators-nf-simulator-netconf-server/blob/master/README.md>`_
+ - `Netconf Notification Application user guide. <https://github.com/onap/integration-simulators-nf-simulator-netconf-server/blob/master/src/python/README.md>`_
+ - `NF Simulator CLI user guide <https://github.com/onap/integration-simulators-nf-simulator/blob/master/simulator-cli/README.md>`_
+
+2. Jenkins builds:
+
+ - `VES Client jenkins builds <https://jenkins.onap.org/view/integration-simulators-nf-simulator-avcn-manager/>`_
+ - `AVCN Manager jenkins builds <https://jenkins.onap.org/view/integration-simulators-nf-simulator-netconf-server/>`_
+ - `PM HTTPS Server jenkins builds <https://jenkins.onap.org/view/integration-simulators-nf-simulator-pm-https-server/>`_
+ - `Netconf Server jenkins builds <https://jenkins.onap.org/view/integration-simulators-nf-simulator-ves-client/>`_
+
+3. NF Simulator CSIT test cases:
+
+ - `Project integration-simulators-nf-simulator-netconf-server-master-verify-csit-testsuites <https://jenkins.onap.org/view/integration-simulators-nf-simulator-netconf-server/job/integration-simulators-nf-simulator-netconf-server-master-verify-csit-testsuites/>`_
+ - `Project integration-simulators-nf-simulator-netconf-server-master-csit-testsuites <https://jenkins.onap.org/view/integration-simulators-nf-simulator-netconf-server/job/integration-simulators-nf-simulator-netconf-server-master-csit-testsuites/>`_
+
+4. NF Simulator sanity checks:
+
+ - https://github.com/onap/integration-simulators-nf-simulator/tree/master/sanitycheck
+
+.. |image1| image:: ../files/simulators/NF-Simulator.png
+ :width: 10in \ No newline at end of file
diff --git a/docs/tox.ini b/docs/tox.ini
new file mode 100644
index 000000000..46075fa6a
--- /dev/null
+++ b/docs/tox.ini
@@ -0,0 +1,31 @@
+[tox]
+minversion = 1.6
+envlist = docs,docs-linkcheck,docs-spellcheck
+skipsdist = true
+
+[testenv:docs]
+basepython = python3.8
+deps =
+ -r{toxinidir}/requirements-docs.txt
+ -chttps://releases.openstack.org/constraints/upper/yoga
+ -chttps://git.onap.org/doc/plain/etc/upper-constraints.onap.txt?h=master
+commands =
+ sphinx-build -W -q -b html -n -d {envtmpdir}/doctrees {toxinidir} {toxinidir}/_build/html
+
+[testenv:docs-linkcheck]
+basepython = python3.8
+deps =
+ -r{toxinidir}/requirements-docs.txt
+ -chttps://releases.openstack.org/constraints/upper/yoga
+ -chttps://git.onap.org/doc/plain/etc/upper-constraints.onap.txt?h=master
+commands =
+ sphinx-build -W -q -b linkcheck -d {envtmpdir}/doctrees {toxinidir} {toxinidir}/_build/linkcheck
+
+[testenv:docs-spellcheck]
+basepython = python3.8
+deps =
+ -r{toxinidir}/requirements-docs.txt
+ -chttps://releases.openstack.org/constraints/upper/yoga
+ -chttps://git.onap.org/doc/plain/etc/upper-constraints.onap.txt?h=master
+commands =
+ sphinx-build -W -q -b spelling -d {envtmpdir}/doctrees {toxinidir} {toxinidir}/_build/spellcheck
diff --git a/docs/usecases/deprecated_usecases.rst b/docs/usecases/deprecated_usecases.rst
new file mode 100644
index 000000000..6d82140db
--- /dev/null
+++ b/docs/usecases/deprecated_usecases.rst
@@ -0,0 +1,28 @@
+.. This work is licensed under a Creative Commons Attribution 4.0
+ International License. http://creativecommons.org/licenses/by/4.0
+
+.. _deprecated_usecases:
+
+:orphan:
+
+Deprecated Use Cases and Functional Requirements
+================================================
+
+Each ONAP release deals with lots of use cases and functional requirements.
+When possible, it is strongly recommended to automate the use cases.
+In this case Integration team can take over the maintenance part of the use case.
+If not automated, the use cases are fully under the responsibility of the use
+case team and usually valid for the release the team was involved in.
+However, these use cases, their artifacts remain in the repository.
+Anyone can give a try even if the use cases are no more supported.
+
+This section deals with such use cases.
+These use cases have been part of one release but have not been tested on the
+last releases. They might fully deprecated or usable through minor adaptations.
+The entry points are the use case owners.
+
+.. csv-table:: deprecated use case table
+ :file: ../files/csv/usecases-deprecated.csv
+ :widths: 50,20,10,20
+ :header-rows: 1
+ :delim: ;
diff --git a/docs/usecases/release_automated_usecases.rst b/docs/usecases/release_automated_usecases.rst
new file mode 100644
index 000000000..932a0d4f3
--- /dev/null
+++ b/docs/usecases/release_automated_usecases.rst
@@ -0,0 +1,37 @@
+.. This work is licensed under a Creative Commons Attribution 4.0
+ International License. http://creativecommons.org/licenses/by/4.0
+
+.. _release_automated_usecases:
+
+:orphan:
+
+Automated Use Cases
+-------------------
+
+These use cases have been run on the Daily CI chains and are used to
+validate the integration of any new dockers in OOM.
+New tests are indicated in **bold**.
+
+.. csv-table:: Infrastructure Healthcheck Tests
+ :file: ../files/csv/tests-infrastructure-healthcheck.csv
+ :widths: 20,40,20,20
+ :delim: ;
+ :header-rows: 1
+
+.. csv-table:: Healthcheck Tests
+ :file: ../files/csv/tests-healthcheck.csv
+ :widths: 20,40,20,20
+ :delim: ;
+ :header-rows: 1
+
+.. csv-table:: Smoke Tests
+ :file: ../files/csv/tests-smoke.csv
+ :widths: 20,40,20,20
+ :delim: ;
+ :header-rows: 1
+
+.. csv-table:: Security Tests
+ :file: ../files/csv/tests-security.csv
+ :widths: 20,40,20,20
+ :delim: ;
+ :header-rows: 1
diff --git a/docs/usecases/release_non_functional_requirements.rst b/docs/usecases/release_non_functional_requirements.rst
new file mode 100644
index 000000000..b3f5a0b54
--- /dev/null
+++ b/docs/usecases/release_non_functional_requirements.rst
@@ -0,0 +1,15 @@
+.. This work is licensed under a Creative Commons Attribution 4.0
+ International License. http://creativecommons.org/licenses/by/4.0
+
+.. _release_non_functional_requirements:
+
+:orphan:
+
+Non Functional Requirements
+----------------------------
+
+.. csv-table:: non functional requirements table
+ :file: ../files/csv/usecases-non-functional-requirements.csv
+ :widths: 5,45,12,38
+ :delim: ;
+ :header-rows: 1
diff --git a/docs/usecases/release_requirements.rst b/docs/usecases/release_requirements.rst
new file mode 100644
index 000000000..0ec4b1b95
--- /dev/null
+++ b/docs/usecases/release_requirements.rst
@@ -0,0 +1,15 @@
+.. This work is licensed under a Creative Commons Attribution 4.0
+ International License. http://creativecommons.org/licenses/by/4.0
+
+.. _release_requirements:
+
+:orphan:
+
+Functional Requirements
+-----------------------
+
+.. csv-table:: functional requirements table
+ :file: ../files/csv/usecases-functional-requirements.csv
+ :widths: 5,45,12,38
+ :delim: ;
+ :header-rows: 1
diff --git a/docs/usecases/release_usecases.rst b/docs/usecases/release_usecases.rst
new file mode 100644
index 000000000..749c4483a
--- /dev/null
+++ b/docs/usecases/release_usecases.rst
@@ -0,0 +1,37 @@
+.. This work is licensed under a Creative Commons Attribution 4.0
+ International License. http://creativecommons.org/licenses/by/4.0
+
+.. _release_usecases:
+
+:orphan:
+
+Kohn Use Cases
+==============
+
+Description
+-----------
+
+This session includes use cases and functional requirements which have been
+officially verified in Kohn release by the ONAP community.
+
+For each use case or functional requirement, you can find contact names and a
+link to the associated documentation.
+
+This documentation deals with
+
+ 1. What has been implemented
+ 2. Step by step instructions to deploy and execute the tests, including the
+ links to download the related assets and resources
+ 3. Known issues and workarounds
+
+Use cases
+---------
+
+Kohn Official Use Cases
+~~~~~~~~~~~~~~~~~~~~~~~
+
+.. csv-table::
+ :file: ../files/csv/usecases.csv
+ :widths: 10,40,20,30
+ :delim: ;
+ :header-rows: 1