From 451a3400b76511393c62a444f588a4ed15f4a549 Mon Sep 17 00:00:00 2001 From: Michael Lando Date: Sun, 19 Feb 2017 10:28:42 +0200 Subject: Initial OpenECOMP SDC commit Change-Id: I0924d5a6ae9cdc161ae17c68d3689a30d10f407b Signed-off-by: Michael Lando --- ui-ci-dev/src/main/resources/Files/CP.yml | 65 ++ .../src/main/resources/Files/CP_LAN - Copy.yml | 13 + ui-ci-dev/src/main/resources/Files/CP_LAN.yml | 19 + ui-ci-dev/src/main/resources/Files/CP_WAN.yml | 19 + .../src/main/resources/Files/Heat-File 1.yaml | 791 +++++++++++++++++++++ .../src/main/resources/Files/Heat-File 2.yaml | 791 +++++++++++++++++++++ ui-ci-dev/src/main/resources/Files/Heat-File.yaml | 791 +++++++++++++++++++++ .../main/resources/Files/InValid_tosca_File .yml | 34 + ui-ci-dev/src/main/resources/Files/JDM_vf.yml | 57 ++ ui-ci-dev/src/main/resources/Files/JDM_vfc.yml | 57 ++ .../src/main/resources/Files/Sample_CSAR.csar | Bin 0 -> 1094 bytes .../src/main/resources/Files/Sample_CSAR2.csar | Bin 0 -> 1085 bytes ui-ci-dev/src/main/resources/Files/UCPE_VFC.yml | 65 ++ ui-ci-dev/src/main/resources/Files/VF.yml | 17 + ui-ci-dev/src/main/resources/Files/VFC.yml | 77 ++ .../src/main/resources/Files/VFCWithAttributes.yml | 43 ++ ui-ci-dev/src/main/resources/Files/VL.yml | 17 + ui-ci-dev/src/main/resources/Files/Valid xml.xml | 4 + .../main/resources/Files/Valid_tosca_Mycompute.yml | 35 + .../resources/Files/Valid_tosca_ReplaceTest.yml | 35 + .../Files/hot-nimbus-oam-volumes_v0.3.env | 6 + .../main/resources/Files/hot-nimbus-oam_v0.6.env | 18 + .../main/resources/Files/hot-nimbus-oam_v0.6.yaml | 108 +++ .../main/resources/Files/hot-nimbus-pcm_v0.6.yaml | 80 +++ ui-ci-dev/src/main/resources/Files/myYang.xml | 8 + ui-ci-dev/src/main/resources/Files/mycompute.yml | 18 + .../main/resources/Files/service_with_inputs.csar | Bin 0 -> 35388 bytes ui-ci-dev/src/main/resources/Files/vADTRAN.zip | Bin 0 -> 2499 bytes ui-ci-dev/src/main/resources/Files/vCDN.zip | Bin 0 -> 4547 bytes ui-ci-dev/src/main/resources/Files/vFW_VF.yml | 58 ++ ui-ci-dev/src/main/resources/Files/vFW_VFC.yml | 58 ++ ui-ci-dev/src/main/resources/Files/vRouter_vfc.yml | 78 ++ .../main/resources/Files/valid HEAT_ENV files.env | 54 ++ .../src/main/resources/Files/validHEATfiles.yaml | 787 ++++++++++++++++++++ ui-ci-dev/src/main/resources/Files/valid_vf.csar | Bin 0 -> 1316 bytes .../src/main/resources/Files/vf_with_groups.csar | Bin 0 -> 25253 bytes ui-ci-dev/src/main/resources/Files/yamlSample.yml | 5 + ui-ci-dev/src/main/resources/Files/yamlSample2.yml | 5 + 38 files changed, 4213 insertions(+) create mode 100644 ui-ci-dev/src/main/resources/Files/CP.yml create mode 100644 ui-ci-dev/src/main/resources/Files/CP_LAN - Copy.yml create mode 100644 ui-ci-dev/src/main/resources/Files/CP_LAN.yml create mode 100644 ui-ci-dev/src/main/resources/Files/CP_WAN.yml create mode 100644 ui-ci-dev/src/main/resources/Files/Heat-File 1.yaml create mode 100644 ui-ci-dev/src/main/resources/Files/Heat-File 2.yaml create mode 100644 ui-ci-dev/src/main/resources/Files/Heat-File.yaml create mode 100644 ui-ci-dev/src/main/resources/Files/InValid_tosca_File .yml create mode 100644 ui-ci-dev/src/main/resources/Files/JDM_vf.yml create mode 100644 ui-ci-dev/src/main/resources/Files/JDM_vfc.yml create mode 100644 ui-ci-dev/src/main/resources/Files/Sample_CSAR.csar create mode 100644 ui-ci-dev/src/main/resources/Files/Sample_CSAR2.csar create mode 100644 ui-ci-dev/src/main/resources/Files/UCPE_VFC.yml create mode 100644 ui-ci-dev/src/main/resources/Files/VF.yml create mode 100644 ui-ci-dev/src/main/resources/Files/VFC.yml create mode 100644 ui-ci-dev/src/main/resources/Files/VFCWithAttributes.yml create mode 100644 ui-ci-dev/src/main/resources/Files/VL.yml create mode 100644 ui-ci-dev/src/main/resources/Files/Valid xml.xml create mode 100644 ui-ci-dev/src/main/resources/Files/Valid_tosca_Mycompute.yml create mode 100644 ui-ci-dev/src/main/resources/Files/Valid_tosca_ReplaceTest.yml create mode 100644 ui-ci-dev/src/main/resources/Files/hot-nimbus-oam-volumes_v0.3.env create mode 100644 ui-ci-dev/src/main/resources/Files/hot-nimbus-oam_v0.6.env create mode 100644 ui-ci-dev/src/main/resources/Files/hot-nimbus-oam_v0.6.yaml create mode 100644 ui-ci-dev/src/main/resources/Files/hot-nimbus-pcm_v0.6.yaml create mode 100644 ui-ci-dev/src/main/resources/Files/myYang.xml create mode 100644 ui-ci-dev/src/main/resources/Files/mycompute.yml create mode 100644 ui-ci-dev/src/main/resources/Files/service_with_inputs.csar create mode 100644 ui-ci-dev/src/main/resources/Files/vADTRAN.zip create mode 100644 ui-ci-dev/src/main/resources/Files/vCDN.zip create mode 100644 ui-ci-dev/src/main/resources/Files/vFW_VF.yml create mode 100644 ui-ci-dev/src/main/resources/Files/vFW_VFC.yml create mode 100644 ui-ci-dev/src/main/resources/Files/vRouter_vfc.yml create mode 100644 ui-ci-dev/src/main/resources/Files/valid HEAT_ENV files.env create mode 100644 ui-ci-dev/src/main/resources/Files/validHEATfiles.yaml create mode 100644 ui-ci-dev/src/main/resources/Files/valid_vf.csar create mode 100644 ui-ci-dev/src/main/resources/Files/vf_with_groups.csar create mode 100644 ui-ci-dev/src/main/resources/Files/yamlSample.yml create mode 100644 ui-ci-dev/src/main/resources/Files/yamlSample2.yml (limited to 'ui-ci-dev/src/main/resources/Files') diff --git a/ui-ci-dev/src/main/resources/Files/CP.yml b/ui-ci-dev/src/main/resources/Files/CP.yml new file mode 100644 index 0000000000..48b592265f --- /dev/null +++ b/ui-ci-dev/src/main/resources/Files/CP.yml @@ -0,0 +1,65 @@ +tosca_definitions_version: tosca_simple_yaml_1_0_0 + +node_types: + org.openecomp.resource.cp.CP: + derived_from: tosca.nodes.Root + properties: + att-ucpe-part-number: + type: string + vendor-name: + type: string + required: true + vendor-model: + type: string + required: true + total-vcpu: + type: integer + description: number of vCPUs + total-memory: + type: integer + description: GB + total-disk: + type: integer + description: GB + base-system-image-file-name: + type: string + linux-host-vendor: + type: string + linux-host-os-version: + type: version + base-system-software: + type: string + jdm-vcpu: + type: integer + jdm-memory: + type: integer + description: GB + jdm-disk: + type: integer + description: GB + jdm-version: + type: string + jcp-vcpu: + type: integer + jcp-memory: + type: integer + description: GB + jcp-disk: + type: integer + description: GB + jcp-version: + type: version + capabilities: + vnf_hosting: + type: tosca.capabilities.Container + description: Provides hosting capability for VNFs + WAN_connectivity: + type: tosca.capabilities.network.Bindable + valid_source_types: [org.openecomp.cp.Wan] + description: external WAN1 n/w interface + occurrences: [1,2] + LAN_connectivity: + type: tosca.capabilities.network.Bindable + valid_source_types: [org.openecomp.cp.Lan] + description: external LAN n/w interface + occurrences: [1,8] \ No newline at end of file diff --git a/ui-ci-dev/src/main/resources/Files/CP_LAN - Copy.yml b/ui-ci-dev/src/main/resources/Files/CP_LAN - Copy.yml new file mode 100644 index 0000000000..224d61f2c9 --- /dev/null +++ b/ui-ci-dev/src/main/resources/Files/CP_LAN - Copy.yml @@ -0,0 +1,13 @@ +tosca_definitions_version: tosca_simple_yaml_1_0_0 + +node_types: + org.openecomp.resource.vfc.uCPE: + derived_from: tosca.nodes.Root + properties: + type: + type: string + required: false + requirements: + - virtualLink: + capability: tosca.capabilities.network.Linkable + relationship: tosca.relationships.network.LinksTo diff --git a/ui-ci-dev/src/main/resources/Files/CP_LAN.yml b/ui-ci-dev/src/main/resources/Files/CP_LAN.yml new file mode 100644 index 0000000000..a96084ba34 --- /dev/null +++ b/ui-ci-dev/src/main/resources/Files/CP_LAN.yml @@ -0,0 +1,19 @@ +tosca_definitions_version: tosca_simple_yaml_1_0_0 + +node_types: + org.openecomp.resource.cp.LAN: + derived_from: org.openecomp.resource.cp.CP + properties: + type: + type: string + required: false + requirements: + - virtualLink_in: + capability: tosca.capabilities.network.Linkable + relationship: tosca.relationships.network.LinksTo + - virtualLink_out: + capability: tosca.capabilities.network.Linkable + relationship: tosca.relationships.network.LinksTo + - virtualbinding: + capability: tosca.capabilities.network.Bindable + relationship: tosca.relationships.network.BindsTo \ No newline at end of file diff --git a/ui-ci-dev/src/main/resources/Files/CP_WAN.yml b/ui-ci-dev/src/main/resources/Files/CP_WAN.yml new file mode 100644 index 0000000000..1bce457d43 --- /dev/null +++ b/ui-ci-dev/src/main/resources/Files/CP_WAN.yml @@ -0,0 +1,19 @@ +tosca_definitions_version: tosca_simple_yaml_1_0_0 + +node_types: + org.openecomp.resource.cp.WAN: + derived_from: org.openecomp.resource.cp.CP + properties: + type: + type: string + required: false + requirements: + - virtualLink_in: + capability: tosca.capabilities.network.Linkable + relationship: tosca.relationships.network.LinksTo + - virtualLink_out: + capability: tosca.capabilities.network.Linkable + relationship: tosca.relationships.network.LinksTo + - virtualbinding: + capability: tosca.capabilities.network.Bindable + relationship: tosca.relationships.network.BindsTo \ No newline at end of file diff --git a/ui-ci-dev/src/main/resources/Files/Heat-File 1.yaml b/ui-ci-dev/src/main/resources/Files/Heat-File 1.yaml new file mode 100644 index 0000000000..d332078d35 --- /dev/null +++ b/ui-ci-dev/src/main/resources/Files/Heat-File 1.yaml @@ -0,0 +1,791 @@ +heat_template_version: 2013-05-23 +################################# +# +# Changes in v0.2: +# - Unique availability zone for each VM +# - LAN8 and SLAN networks removed according to latest Prod/Type I diagram +# - 2 DB VMs added +# - Images corrected +# - VM start-up order: SMP->DB->BE->FE (no error handling yet) +# - Provisioning scripts placeholders +# +################################# + +description: ASC Template + +parameters: +# availability_zone_smp0: +# type: string +# default: nova +# availability_zone_smp1: +# type: string +# default: nova +# availability_zone_fe0: +# type: string +# default: nova +# availability_zone_fe1: +# type: string +# default: nova +# availability_zone_db0: +# type: string +# default: nova +# availability_zone_db1: +# type: string +# default: nova +# availability_zone_be0: +# type: string +# default: nova +# availability_zone_be1: +# type: string +# default: nova +# availability_zone_be2: +# type: string +# default: nova +# availability_zone_be3: +# type: string +# default: nova +# availability_zone_be4: +# type: string +# default: nova + + vnf_name: + type: string + description: Unique name for this VNF instance + default: This_is_the_SCP_name + vnf_id: + type: string + description: Unique ID for this VNF instance + default: This_is_ths_SCP_id + + flavor_scp_be_id: + type: string + description: flavor type + default: a1.Small + flavor_scp_fe_id: + type: string + description: flavor type + default: a1.Small + flavor_smp_id: + type: string + description: flavor type + default: a1.Small + flavor_db_id: + type: string + description: flavor type + default: a1.Small + image_scp_be_id: + type: string + description: Image use to boot a server + default: asc_base_image_be + image_scp_fe_id: + type: string + description: Image use to boot a server + default: asc_base_image_fe + image_smp_id: + type: string + description: Image use to boot a server + default: asc_base_image_smp + image_db_id: + type: string + description: Image use to boot a server + default: asc_base_image_db + + int_vscp_fe_cluster_net_id: + type: string + description: LAN2 FE Cluster/KA + int_vscp_fe_cluster_cidr: + type: string + description: Private Network2 Address (CIDR notation) + int_vscp_cluster_net_id: + type: string + description: LAN3 Cluster + int_vscp_cluster_cidr: + type: string + description: Private Network3 Address (CIDR notation) + int_vscp_db_network_net_id: + type: string + description: LAN4 DB + int_vscp_db_network_cidr: + type: string + description: Private Network4 Address (CIDR notation) + SIGNET_vrf_A1_direct_net_id: + type: string + description: Network name for SIGTRAN_A + SIGNET_vrf_B1_direct_net_id: + type: string + description: Network name for SIGTRAN_B + Cricket_OCS_protected_net_id: + type: string + description: Network name for CRICKET_OCS + OAM_direct_net_id: + type: string + description: Network name for OAM + be0_Cricket_OCS_protected_ips: + type: string + label: be0 port 5 OAM ip address + description: be0 port 5 OAM ip address + be1_Cricket_OCS_protected_ips: + type: string + label: be1 port 5 OAM ip address + description: be1 port 5 OAM ip address + be2_Cricket_OCS_protected_ips: + type: string + label: be2 port 5 OAM ip address + description: be2 port 5 OAM ip address + be3_Cricket_OCS_protected_ips: + type: string + label: be3 port 5 OAM ip address + description: be3 port 5 OAM ip address + be4_Cricket_OCS_protected_ips: + type: string + label: be4 port 5 OAM ip address + description: be4 port 5 OAM ip address + be0_OAM_direct_ips: + type: string + label: be0 port 7 OAM ip address + description: be0 port 7 OAM ip address + be1_OAM_direct_ips: + type: string + label: be1 port 7 OAM ip address + description: be1 port 7 OAM ip address + be2_OAM_direct_ips: + type: string + label: be2 port 7 OAM ip address + description: be2 port 7 OAM ip address + be3_OAM_direct_ips: + type: string + label: be3 port 7 OAM ip address + description: be3 port 7 OAM ip address + be4_OAM_direct_ips: + type: string + label: be4 port 7 OAM ip address + description: be4 port 7 OAM ip address + fe0_SIGNET_vrf_A1_direct_ips: + type: string + label: fe0 port 0 SIGTRAN ip address + description: fe0 port 0 SIGTRAN ip address + fe0_OAM_direct_ips: + type: string + label: fe0 port 7 OAM ip address + description: fe0 port 7 OAM ip address + fe1_SIGNET_vrf_B1_direct_ips: + type: string + label: fe1 port 1 SIGTRAN ip address + description: fe1 port 1 SIGTRAN ip address + fe1_OAM_direct_ips: + type: string + label: fe1 port 7 OAM ip address + description: fe1 port 7 OAM ip address + smp0_OAM_direct_ips: + type: string + label: smp0 port 7 OAM ip address + description: smp0 port 7 OAM ip address + smp1_OAM_direct_ips: + type: string + label: smp1 port 7 OAM ip address + description: smp1 port 7 OAM ip address + db0_OAM_direct_ips: + type: string + label: db0 port 7 OAM ip address + description: smp0 port 7 OAM ip address + db1_OAM_direct_ips: + type: string + label: smp1 port 7 OAM ip address + description: db1 port 7 OAM ip address + vm_scp_be0_name: + type: string + default: vSCP_BE0 + description: name of VM + vm_scp_be1_name: + type: string + default: vSCP_BE1 + description: name of VM + vm_scp_be2_name: + type: string + default: vSCP_BE2 + description: name of VM + vm_scp_be3_name: + type: string + default: vSCP_BE3 + description: name of VM + vm_scp_be4_name: + type: string + default: vSCP_BE4 + description: name of VM + vm_scp_fe0_name: + type: string + default: vSCP_FE0 + description: name of VM + vm_scp_fe1_name: + type: string + default: vSCP_FE1 + description: name of VM + vm_smp0_name: + type: string + default: vSMP0 + description: name of VM + vm_smp1_name: + type: string + default: vSMP1 + description: name of VM + vm_db0_name: + type: string + default: vDB0 + description: name of VM + vm_db1_name: + type: string + default: vDB1 + description: name of VM + +resources: +# scp_be_wait_condition: +# type: OS::Heat::WaitCondition +# properties: +# handle: { get_resource: scp_be_wait_handle } +# count: 5 +# timeout: 300 +# scp_be_wait_handle: +# type: OS::Heat::WaitConditionHandle +# +# scp_fe_wait_condition: +# type: OS::Heat::WaitCondition +# properties: +# handle: { get_resource: scp_fe_wait_handle } +# count: 2 +# timeout: 300 +# scp_fe_wait_handle: +# type: OS::Heat::WaitConditionHandle +# +# smp_wait_condition: +# type: OS::Heat::WaitCondition +# properties: +# handle: { get_resource: smp_wait_handle } +# count: 2 +# timeout: 300 +# smp_wait_handle: +# type: OS::Heat::WaitConditionHandle +# +# db_wait_condition: +# type: OS::Heat::WaitCondition +# properties: +# handle: { get_resource: db_wait_handle } +# count: 2 +# timeout: 300 +# db_wait_handle: +# type: OS::Heat::WaitConditionHandle + + FE_Affinity: + type: OS::Nova::ServerGroup + properties: + policies: ["anti-affinity"] + BE_Affinity: + type: OS::Nova::ServerGroup + properties: + policies: ["anti-affinity"] + SMP_Affinity: + type: OS::Nova::ServerGroup + properties: + policies: ["anti-affinity"] + DB_Affinity: + type: OS::Nova::ServerGroup + properties: + policies: ["anti-affinity"] + + FE_Clustering_KA: + type: OS::Contrail::VirtualNetwork + properties: + name: { get_param: int_vscp_fe_cluster_net_id } + + FE_Clustering_subnet: + type: OS::Neutron::Subnet + properties: + network_id: { get_resource: FE_Clustering_KA } + cidr: { get_param: int_vscp_fe_cluster_cidr } + + Clustering_Network: + type: OS::Contrail::VirtualNetwork + properties: + name: { get_param: int_vscp_cluster_net_id } + + Clustering_Network_subnet: + type: OS::Neutron::Subnet + properties: + network_id: { get_resource: Clustering_Network } + cidr: { get_param: int_vscp_cluster_cidr } + + DB_Network: + type: OS::Contrail::VirtualNetwork + properties: + name: { get_param: int_vscp_db_network_net_id } + + DB_Network_subnet: + type: OS::Neutron::Subnet + properties: + network_id: { get_resource: DB_Network } + cidr: { get_param: int_vscp_db_network_cidr } + + server_scp_be0: + type: OS::Nova::Server +# depends on: db_wait_condition + properties: + name: { get_param: vm_scp_be0_name } + image: { get_param: image_scp_be_id } +# availability_zone: { get_param: availability_zone_be0 } + flavor: { get_param: flavor_scp_be_id } + scheduler_hints: { group: { get_resource: BE_Affinity } } + networks: + - port: { get_resource: be0_port_3 } + - port: { get_resource: be0_port_4 } + - port: { get_resource: be0_port_5 } + - port: { get_resource: be0_port_7 } + metadata: + vnf_id: { get_param: vnf_id } + user_data: + str_replace: + template: | + #!/bin/bash + #todo: provision $vm_name + wc_notify --data-binary '{"status": "SUCCESS"}' + params: + $vm_name: {get_param: vm_scp_be0_name} +# wc_notify: { get_attr: ['scp_be_wait_handle', 'curl_cli'] } + be0_port_2: + type: OS::Neutron::Port + properties: + network_id: { get_resource: Clustering_Network } + + be0_port_3: + type: OS::Neutron::Port + properties: + network_id: { get_resource: Clustering_Network } + + be0_port_4: + type: OS::Neutron::Port + properties: + network_id: { get_resource: DB_Network } + + be0_port_5: + type: OS::Neutron::Port + properties: + network: { get_param: Cricket_OCS_protected_net_id } + fixed_ips: [{"ip_address": {get_param: be0_Cricket_OCS_protected_ips}}] + + be0_port_7: + type: OS::Neutron::Port + properties: + network: { get_param: OAM_direct_net_id } + fixed_ips: [{"ip_address": {get_param: be0_OAM_direct_ips}}] + + server_scp_be1: + type: OS::Nova::Server +# depends on: db_wait_condition + properties: + name: { get_param: vm_scp_be1_name } + image: { get_param: image_scp_be_id } +# availability_zone: { get_param: availability_zone_be1 } + flavor: { get_param: flavor_scp_be_id } + scheduler_hints: { group: { get_resource: BE_Affinity } } + networks: + - port: { get_resource: be1_port_3 } + - port: { get_resource: be1_port_4 } + - port: { get_resource: be1_port_5 } + - port: { get_resource: be1_port_7 } + metadata: + vnf_id: { get_param: vnf_id } + user_data: + str_replace: + template: | + #!/bin/bash + #todo: provision $vm_name + wc_notify --data-binary '{"status": "SUCCESS"}' + params: + $vm_name: {get_param: vm_scp_be1_name} +# wc_notify: { get_attr: ['scp_be_wait_handle', 'curl_cli'] } + + be1_port_3: + type: OS::Neutron::Port + properties: + network_id: { get_resource: Clustering_Network } + + be1_port_4: + type: OS::Neutron::Port + properties: + network_id: { get_resource: DB_Network } + + be1_port_5: + type: OS::Neutron::Port + properties: + network: { get_param: Cricket_OCS_protected_net_id } + fixed_ips: [{"ip_address": {get_param: be1_Cricket_OCS_protected_ips}}] + + be1_port_7: + type: OS::Neutron::Port + properties: + network: { get_param: OAM_direct_net_id } + fixed_ips: [{"ip_address": {get_param: be1_OAM_direct_ips}}] + + server_scp_be2: + type: OS::Nova::Server +# depends on: db_wait_condition + properties: + name: { get_param: vm_scp_be2_name } + image: { get_param: image_scp_be_id } +# availability_zone: { get_param: availability_zone_be2 } + flavor: { get_param: flavor_scp_be_id } + scheduler_hints: { group: { get_resource: BE_Affinity } } + networks: + - port: { get_resource: be2_port_3 } + - port: { get_resource: be2_port_4 } + - port: { get_resource: be2_port_5 } + - port: { get_resource: be2_port_7 } + metadata: + vnf_id: { get_param: vnf_id } + user_data: + str_replace: + template: | + #!/bin/bash + #todo: provision $vm_name + wc_notify --data-binary '{"status": "SUCCESS"}' + params: + $vm_name: {get_param: vm_scp_be2_name} +# wc_notify: { get_attr: ['scp_be_wait_handle', 'curl_cli'] } + + be2_port_3: + type: OS::Neutron::Port + properties: + network_id: { get_resource: Clustering_Network } + + be2_port_4: + type: OS::Neutron::Port + properties: + network_id: { get_resource: DB_Network } + + be2_port_5: + type: OS::Neutron::Port + properties: + network: { get_param: Cricket_OCS_protected_net_id } + fixed_ips: [{"ip_address": {get_param: be2_Cricket_OCS_protected_ips}}] + + be2_port_7: + type: OS::Neutron::Port + properties: + network: { get_param: OAM_direct_net_id } + fixed_ips: [{"ip_address": {get_param: be2_OAM_direct_ips}}] + + server_scp_be3: + type: OS::Nova::Server +# depends on: db_wait_condition + properties: + name: { get_param: vm_scp_be3_name } + image: { get_param: image_scp_be_id } +# availability_zone: { get_param: availability_zone_be3 } + flavor: { get_param: flavor_scp_be_id } + scheduler_hints: { group: { get_resource: BE_Affinity } } + networks: + - port: { get_resource: be3_port_3 } + - port: { get_resource: be3_port_4 } + - port: { get_resource: be3_port_5 } + - port: { get_resource: be3_port_7 } + metadata: + vnf_id: { get_param: vnf_id } + user_data: + str_replace: + template: | + #!/bin/bash + #todo: provision $vm_name + wc_notify --data-binary '{"status": "SUCCESS"}' + params: + $vm_name: {get_param: vm_scp_be3_name} +# wc_notify: { get_attr: ['scp_be_wait_handle', 'curl_cli'] } + + be3_port_3: + type: OS::Neutron::Port + properties: + network_id: { get_resource: Clustering_Network } + + be3_port_4: + type: OS::Neutron::Port + properties: + network_id: { get_resource: DB_Network } + + be3_port_5: + type: OS::Neutron::Port + properties: + network: { get_param: Cricket_OCS_protected_net_id } + fixed_ips: [{"ip_address": {get_param: be3_Cricket_OCS_protected_ips}}] + + be3_port_7: + type: OS::Neutron::Port + properties: + network: { get_param: OAM_direct_net_id } + fixed_ips: [{"ip_address": {get_param: be3_OAM_direct_ips}}] + + server_scp_be4: + type: OS::Nova::Server +# depends on: db_wait_condition + properties: + name: { get_param: vm_scp_be4_name } + image: { get_param: image_scp_be_id } +# availability_zone: { get_param: availability_zone_be4 } + flavor: { get_param: flavor_scp_be_id } + scheduler_hints: { group: { get_resource: BE_Affinity } } + networks: + - port: { get_resource: be4_port_3 } + - port: { get_resource: be4_port_4 } + - port: { get_resource: be4_port_5 } + - port: { get_resource: be4_port_7 } + metadata: + vnf_id: { get_param: vnf_id } + user_data: + str_replace: + template: | + #!/bin/bash + #todo: provision $vm_name + wc_notify --data-binary '{"status": "SUCCESS"}' + params: + $vm_name: {get_param: vm_scp_be4_name} +# wc_notify: { get_attr: ['scp_be_wait_handle', 'curl_cli'] } + + be4_port_3: + type: OS::Neutron::Port + properties: + network_id: { get_resource: Clustering_Network } + + be4_port_4: + type: OS::Neutron::Port + properties: + network_id: { get_resource: DB_Network } + + be4_port_5: + type: OS::Neutron::Port + properties: + network: { get_param: Cricket_OCS_protected_net_id } + fixed_ips: [{"ip_address": {get_param: be4_Cricket_OCS_protected_ips}}] + + be4_port_7: + type: OS::Neutron::Port + properties: + network: { get_param: OAM_direct_net_id } + fixed_ips: [{"ip_address": {get_param: be4_OAM_direct_ips}}] + + server_scp_fe0: + type: OS::Nova::Server +# depends on: scp_be_wait_condition + properties: + name: { get_param: vm_scp_fe0_name } + image: { get_param: image_scp_fe_id } +# availability_zone: { get_param: availability_zone_fe0 } + flavor: { get_param: flavor_scp_fe_id } + scheduler_hints: { group: { get_resource: FE_Affinity } } + networks: + - port: { get_resource: fe0_port_0 } + - port: { get_resource: fe0_port_2 } + - port: { get_resource: fe0_port_3 } + - port: { get_resource: fe0_port_7 } + metadata: + vnf_id: { get_param: vnf_id } + user_data: + str_replace: + template: | + #!/bin/bash + #todo: provision $vm_name + wc_notify --data-binary '{"status": "SUCCESS"}' + params: + $vm_name: {get_param: vm_scp_fe0_name} +# wc_notify: { get_attr: ['scp_fe_wait_handle', 'curl_cli'] } + + fe0_port_0: + type: OS::Neutron::Port + properties: + network: { get_param: SIGNET_vrf_A1_direct_net_id } + fixed_ips: [{"ip_address": {get_param: fe0_SIGNET_vrf_A1_direct_ips}}] + + fe0_port_2: + type: OS::Neutron::Port + properties: + network_id: { get_resource: FE_Clustering_KA } + + fe0_port_3: + type: OS::Neutron::Port + properties: + network_id: { get_resource: Clustering_Network } + + fe0_port_7: + type: OS::Neutron::Port + properties: + network: { get_param: OAM_direct_net_id } + fixed_ips: [{"ip_address": {get_param: fe0_OAM_direct_ips}}] + + server_scp_fe1: + type: OS::Nova::Server +# depends on: scp_be_wait_condition + properties: + name: { get_param: vm_scp_fe1_name } + image: { get_param: image_scp_fe_id } +# availability_zone: { get_param: availability_zone_fe1 } + flavor: { get_param: flavor_scp_fe_id } + scheduler_hints: { group: { get_resource: FE_Affinity } } + networks: + - port: { get_resource: fe1_port_1 } + - port: { get_resource: fe1_port_2 } + - port: { get_resource: fe1_port_3 } + - port: { get_resource: fe1_port_7 } + metadata: + vnf_id: { get_param: vnf_id } + user_data: + str_replace: + template: | + #!/bin/bash + #todo: provision $vm_name + wc_notify --data-binary '{"status": "SUCCESS"}' + params: + $vm_name: {get_param: vm_scp_fe1_name} +# wc_notify: { get_attr: ['scp_fe_wait_handle', 'curl_cli'] } + + fe1_port_1: + type: OS::Neutron::Port + properties: + network: { get_param: SIGNET_vrf_B1_direct_net_id } + fixed_ips: [{"ip_address": {get_param: fe1_SIGNET_vrf_B1_direct_ips}}] + + fe1_port_2: + type: OS::Neutron::Port + properties: + network_id: { get_resource: FE_Clustering_KA } + + fe1_port_3: + type: OS::Neutron::Port + properties: + network_id: { get_resource: Clustering_Network } + + fe1_port_7: + type: OS::Neutron::Port + properties: + network: { get_param: OAM_direct_net_id } + fixed_ips: [{"ip_address": {get_param: fe1_OAM_direct_ips}}] + + server_smp0: + type: OS::Nova::Server + properties: + name: { get_param: vm_smp0_name } + image: { get_param: image_smp_id } +# availability_zone: { get_param: availability_zone_smp0 } + flavor: { get_param: flavor_smp_id } + scheduler_hints: { group: { get_resource: SMP_Affinity } } + networks: + - port: { get_resource: smp0_port_7 } + metadata: + vnf_id: { get_param: vnf_id } + user_data: + str_replace: + template: | + #!/bin/bash + #todo: provision $vm_name + wc_notify --data-binary '{"status": "SUCCESS"}' + params: + $vm_name: {get_param: vm_smp0_name} +# wc_notify: { get_attr: ['smp_wait_handle', 'curl_cli'] } + + smp0_port_7: + type: OS::Neutron::Port + properties: + network: { get_param: OAM_direct_net_id } + fixed_ips: [{"ip_address": {get_param: smp0_OAM_direct_ips}}] + + server_smp1: + type: OS::Nova::Server + properties: + name: { get_param: vm_smp1_name } + image: { get_param: image_smp_id } +# availability_zone: { get_param: availability_zone_smp1 } + flavor: { get_param: flavor_smp_id } + scheduler_hints: { group: { get_resource: SMP_Affinity } } + networks: + - port: { get_resource: smp1_port_7 } + metadata: + vnf_id: { get_param: vnf_id } + user_data: + str_replace: + template: | + #!/bin/bash + #todo: provision $vm_name + wc_notify --data-binary '{"status": "SUCCESS"}' + params: + $vm_name: {get_param: vm_smp1_name} +# wc_notify: { get_attr: ['smp_wait_handle', 'curl_cli'] } + + smp1_port_7: + type: OS::Neutron::Port + properties: + network: { get_param: OAM_direct_net_id } + fixed_ips: [{"ip_address": {get_param: smp1_OAM_direct_ips}}] + + server_db0: + type: OS::Nova::Server +# depends_on: smp_wait_condition + properties: + name: { get_param: vm_db0_name } + image: { get_param: image_db_id } +# availability_zone: { get_param: availability_zone_db0 } + flavor: { get_param: flavor_db_id } + scheduler_hints: { group: { get_resource: DB_Affinity } } + networks: + - port: { get_resource: db0_port_4 } + - port: { get_resource: db0_port_7 } + metadata: + vnf_id: { get_param: vnf_id } + user_data: + str_replace: + template: | + #!/bin/bash + #todo: provision $vm_name + wc_notify --data-binary '{"status": "SUCCESS"}' + params: + $vm_name: {get_param: vm_db0_name} +# wc_notify: { get_attr: ['db_wait_handle', 'curl_cli'] } + + db0_port_4: + type: OS::Neutron::Port + properties: + network_id: { get_resource: DB_Network } + + db0_port_7: + type: OS::Neutron::Port + properties: + network: { get_param: OAM_direct_net_id } + fixed_ips: [{"ip_address": {get_param: db0_OAM_direct_ips}}] + + server_db1: + type: OS::Nova::Server +# depends_on: smp_wait_condition + properties: + name: { get_param: vm_db1_name } + image: { get_param: image_db_id } +# availability_zone: { get_param: availability_zone_db1 } + flavor: { get_param: flavor_db_id } + scheduler_hints: { group: { get_resource: DB_Affinity } } + networks: + - port: { get_resource: db1_port_4 } + - port: { get_resource: db1_port_7 } + metadata: + vnf_id: { get_param: vnf_id } + user_data: + str_replace: + template: | + #!/bin/bash + #todo: provision $vm_name + wc_notify --data-binary '{"status": "SUCCESS"}' + params: + $vm_name: {get_param: vm_db1_name} +# wc_notify: { get_attr: ['db_wait_handle', 'curl_cli'] } + + db1_port_4: + type: OS::Neutron::Port + properties: + network_id: { get_resource: DB_Network } + + db1_port_7: + type: OS::Neutron::Port + properties: + network: { get_param: OAM_direct_net_id } + fixed_ips: [{"ip_address": {get_param: db1_OAM_direct_ips}}] \ No newline at end of file diff --git a/ui-ci-dev/src/main/resources/Files/Heat-File 2.yaml b/ui-ci-dev/src/main/resources/Files/Heat-File 2.yaml new file mode 100644 index 0000000000..d332078d35 --- /dev/null +++ b/ui-ci-dev/src/main/resources/Files/Heat-File 2.yaml @@ -0,0 +1,791 @@ +heat_template_version: 2013-05-23 +################################# +# +# Changes in v0.2: +# - Unique availability zone for each VM +# - LAN8 and SLAN networks removed according to latest Prod/Type I diagram +# - 2 DB VMs added +# - Images corrected +# - VM start-up order: SMP->DB->BE->FE (no error handling yet) +# - Provisioning scripts placeholders +# +################################# + +description: ASC Template + +parameters: +# availability_zone_smp0: +# type: string +# default: nova +# availability_zone_smp1: +# type: string +# default: nova +# availability_zone_fe0: +# type: string +# default: nova +# availability_zone_fe1: +# type: string +# default: nova +# availability_zone_db0: +# type: string +# default: nova +# availability_zone_db1: +# type: string +# default: nova +# availability_zone_be0: +# type: string +# default: nova +# availability_zone_be1: +# type: string +# default: nova +# availability_zone_be2: +# type: string +# default: nova +# availability_zone_be3: +# type: string +# default: nova +# availability_zone_be4: +# type: string +# default: nova + + vnf_name: + type: string + description: Unique name for this VNF instance + default: This_is_the_SCP_name + vnf_id: + type: string + description: Unique ID for this VNF instance + default: This_is_ths_SCP_id + + flavor_scp_be_id: + type: string + description: flavor type + default: a1.Small + flavor_scp_fe_id: + type: string + description: flavor type + default: a1.Small + flavor_smp_id: + type: string + description: flavor type + default: a1.Small + flavor_db_id: + type: string + description: flavor type + default: a1.Small + image_scp_be_id: + type: string + description: Image use to boot a server + default: asc_base_image_be + image_scp_fe_id: + type: string + description: Image use to boot a server + default: asc_base_image_fe + image_smp_id: + type: string + description: Image use to boot a server + default: asc_base_image_smp + image_db_id: + type: string + description: Image use to boot a server + default: asc_base_image_db + + int_vscp_fe_cluster_net_id: + type: string + description: LAN2 FE Cluster/KA + int_vscp_fe_cluster_cidr: + type: string + description: Private Network2 Address (CIDR notation) + int_vscp_cluster_net_id: + type: string + description: LAN3 Cluster + int_vscp_cluster_cidr: + type: string + description: Private Network3 Address (CIDR notation) + int_vscp_db_network_net_id: + type: string + description: LAN4 DB + int_vscp_db_network_cidr: + type: string + description: Private Network4 Address (CIDR notation) + SIGNET_vrf_A1_direct_net_id: + type: string + description: Network name for SIGTRAN_A + SIGNET_vrf_B1_direct_net_id: + type: string + description: Network name for SIGTRAN_B + Cricket_OCS_protected_net_id: + type: string + description: Network name for CRICKET_OCS + OAM_direct_net_id: + type: string + description: Network name for OAM + be0_Cricket_OCS_protected_ips: + type: string + label: be0 port 5 OAM ip address + description: be0 port 5 OAM ip address + be1_Cricket_OCS_protected_ips: + type: string + label: be1 port 5 OAM ip address + description: be1 port 5 OAM ip address + be2_Cricket_OCS_protected_ips: + type: string + label: be2 port 5 OAM ip address + description: be2 port 5 OAM ip address + be3_Cricket_OCS_protected_ips: + type: string + label: be3 port 5 OAM ip address + description: be3 port 5 OAM ip address + be4_Cricket_OCS_protected_ips: + type: string + label: be4 port 5 OAM ip address + description: be4 port 5 OAM ip address + be0_OAM_direct_ips: + type: string + label: be0 port 7 OAM ip address + description: be0 port 7 OAM ip address + be1_OAM_direct_ips: + type: string + label: be1 port 7 OAM ip address + description: be1 port 7 OAM ip address + be2_OAM_direct_ips: + type: string + label: be2 port 7 OAM ip address + description: be2 port 7 OAM ip address + be3_OAM_direct_ips: + type: string + label: be3 port 7 OAM ip address + description: be3 port 7 OAM ip address + be4_OAM_direct_ips: + type: string + label: be4 port 7 OAM ip address + description: be4 port 7 OAM ip address + fe0_SIGNET_vrf_A1_direct_ips: + type: string + label: fe0 port 0 SIGTRAN ip address + description: fe0 port 0 SIGTRAN ip address + fe0_OAM_direct_ips: + type: string + label: fe0 port 7 OAM ip address + description: fe0 port 7 OAM ip address + fe1_SIGNET_vrf_B1_direct_ips: + type: string + label: fe1 port 1 SIGTRAN ip address + description: fe1 port 1 SIGTRAN ip address + fe1_OAM_direct_ips: + type: string + label: fe1 port 7 OAM ip address + description: fe1 port 7 OAM ip address + smp0_OAM_direct_ips: + type: string + label: smp0 port 7 OAM ip address + description: smp0 port 7 OAM ip address + smp1_OAM_direct_ips: + type: string + label: smp1 port 7 OAM ip address + description: smp1 port 7 OAM ip address + db0_OAM_direct_ips: + type: string + label: db0 port 7 OAM ip address + description: smp0 port 7 OAM ip address + db1_OAM_direct_ips: + type: string + label: smp1 port 7 OAM ip address + description: db1 port 7 OAM ip address + vm_scp_be0_name: + type: string + default: vSCP_BE0 + description: name of VM + vm_scp_be1_name: + type: string + default: vSCP_BE1 + description: name of VM + vm_scp_be2_name: + type: string + default: vSCP_BE2 + description: name of VM + vm_scp_be3_name: + type: string + default: vSCP_BE3 + description: name of VM + vm_scp_be4_name: + type: string + default: vSCP_BE4 + description: name of VM + vm_scp_fe0_name: + type: string + default: vSCP_FE0 + description: name of VM + vm_scp_fe1_name: + type: string + default: vSCP_FE1 + description: name of VM + vm_smp0_name: + type: string + default: vSMP0 + description: name of VM + vm_smp1_name: + type: string + default: vSMP1 + description: name of VM + vm_db0_name: + type: string + default: vDB0 + description: name of VM + vm_db1_name: + type: string + default: vDB1 + description: name of VM + +resources: +# scp_be_wait_condition: +# type: OS::Heat::WaitCondition +# properties: +# handle: { get_resource: scp_be_wait_handle } +# count: 5 +# timeout: 300 +# scp_be_wait_handle: +# type: OS::Heat::WaitConditionHandle +# +# scp_fe_wait_condition: +# type: OS::Heat::WaitCondition +# properties: +# handle: { get_resource: scp_fe_wait_handle } +# count: 2 +# timeout: 300 +# scp_fe_wait_handle: +# type: OS::Heat::WaitConditionHandle +# +# smp_wait_condition: +# type: OS::Heat::WaitCondition +# properties: +# handle: { get_resource: smp_wait_handle } +# count: 2 +# timeout: 300 +# smp_wait_handle: +# type: OS::Heat::WaitConditionHandle +# +# db_wait_condition: +# type: OS::Heat::WaitCondition +# properties: +# handle: { get_resource: db_wait_handle } +# count: 2 +# timeout: 300 +# db_wait_handle: +# type: OS::Heat::WaitConditionHandle + + FE_Affinity: + type: OS::Nova::ServerGroup + properties: + policies: ["anti-affinity"] + BE_Affinity: + type: OS::Nova::ServerGroup + properties: + policies: ["anti-affinity"] + SMP_Affinity: + type: OS::Nova::ServerGroup + properties: + policies: ["anti-affinity"] + DB_Affinity: + type: OS::Nova::ServerGroup + properties: + policies: ["anti-affinity"] + + FE_Clustering_KA: + type: OS::Contrail::VirtualNetwork + properties: + name: { get_param: int_vscp_fe_cluster_net_id } + + FE_Clustering_subnet: + type: OS::Neutron::Subnet + properties: + network_id: { get_resource: FE_Clustering_KA } + cidr: { get_param: int_vscp_fe_cluster_cidr } + + Clustering_Network: + type: OS::Contrail::VirtualNetwork + properties: + name: { get_param: int_vscp_cluster_net_id } + + Clustering_Network_subnet: + type: OS::Neutron::Subnet + properties: + network_id: { get_resource: Clustering_Network } + cidr: { get_param: int_vscp_cluster_cidr } + + DB_Network: + type: OS::Contrail::VirtualNetwork + properties: + name: { get_param: int_vscp_db_network_net_id } + + DB_Network_subnet: + type: OS::Neutron::Subnet + properties: + network_id: { get_resource: DB_Network } + cidr: { get_param: int_vscp_db_network_cidr } + + server_scp_be0: + type: OS::Nova::Server +# depends on: db_wait_condition + properties: + name: { get_param: vm_scp_be0_name } + image: { get_param: image_scp_be_id } +# availability_zone: { get_param: availability_zone_be0 } + flavor: { get_param: flavor_scp_be_id } + scheduler_hints: { group: { get_resource: BE_Affinity } } + networks: + - port: { get_resource: be0_port_3 } + - port: { get_resource: be0_port_4 } + - port: { get_resource: be0_port_5 } + - port: { get_resource: be0_port_7 } + metadata: + vnf_id: { get_param: vnf_id } + user_data: + str_replace: + template: | + #!/bin/bash + #todo: provision $vm_name + wc_notify --data-binary '{"status": "SUCCESS"}' + params: + $vm_name: {get_param: vm_scp_be0_name} +# wc_notify: { get_attr: ['scp_be_wait_handle', 'curl_cli'] } + be0_port_2: + type: OS::Neutron::Port + properties: + network_id: { get_resource: Clustering_Network } + + be0_port_3: + type: OS::Neutron::Port + properties: + network_id: { get_resource: Clustering_Network } + + be0_port_4: + type: OS::Neutron::Port + properties: + network_id: { get_resource: DB_Network } + + be0_port_5: + type: OS::Neutron::Port + properties: + network: { get_param: Cricket_OCS_protected_net_id } + fixed_ips: [{"ip_address": {get_param: be0_Cricket_OCS_protected_ips}}] + + be0_port_7: + type: OS::Neutron::Port + properties: + network: { get_param: OAM_direct_net_id } + fixed_ips: [{"ip_address": {get_param: be0_OAM_direct_ips}}] + + server_scp_be1: + type: OS::Nova::Server +# depends on: db_wait_condition + properties: + name: { get_param: vm_scp_be1_name } + image: { get_param: image_scp_be_id } +# availability_zone: { get_param: availability_zone_be1 } + flavor: { get_param: flavor_scp_be_id } + scheduler_hints: { group: { get_resource: BE_Affinity } } + networks: + - port: { get_resource: be1_port_3 } + - port: { get_resource: be1_port_4 } + - port: { get_resource: be1_port_5 } + - port: { get_resource: be1_port_7 } + metadata: + vnf_id: { get_param: vnf_id } + user_data: + str_replace: + template: | + #!/bin/bash + #todo: provision $vm_name + wc_notify --data-binary '{"status": "SUCCESS"}' + params: + $vm_name: {get_param: vm_scp_be1_name} +# wc_notify: { get_attr: ['scp_be_wait_handle', 'curl_cli'] } + + be1_port_3: + type: OS::Neutron::Port + properties: + network_id: { get_resource: Clustering_Network } + + be1_port_4: + type: OS::Neutron::Port + properties: + network_id: { get_resource: DB_Network } + + be1_port_5: + type: OS::Neutron::Port + properties: + network: { get_param: Cricket_OCS_protected_net_id } + fixed_ips: [{"ip_address": {get_param: be1_Cricket_OCS_protected_ips}}] + + be1_port_7: + type: OS::Neutron::Port + properties: + network: { get_param: OAM_direct_net_id } + fixed_ips: [{"ip_address": {get_param: be1_OAM_direct_ips}}] + + server_scp_be2: + type: OS::Nova::Server +# depends on: db_wait_condition + properties: + name: { get_param: vm_scp_be2_name } + image: { get_param: image_scp_be_id } +# availability_zone: { get_param: availability_zone_be2 } + flavor: { get_param: flavor_scp_be_id } + scheduler_hints: { group: { get_resource: BE_Affinity } } + networks: + - port: { get_resource: be2_port_3 } + - port: { get_resource: be2_port_4 } + - port: { get_resource: be2_port_5 } + - port: { get_resource: be2_port_7 } + metadata: + vnf_id: { get_param: vnf_id } + user_data: + str_replace: + template: | + #!/bin/bash + #todo: provision $vm_name + wc_notify --data-binary '{"status": "SUCCESS"}' + params: + $vm_name: {get_param: vm_scp_be2_name} +# wc_notify: { get_attr: ['scp_be_wait_handle', 'curl_cli'] } + + be2_port_3: + type: OS::Neutron::Port + properties: + network_id: { get_resource: Clustering_Network } + + be2_port_4: + type: OS::Neutron::Port + properties: + network_id: { get_resource: DB_Network } + + be2_port_5: + type: OS::Neutron::Port + properties: + network: { get_param: Cricket_OCS_protected_net_id } + fixed_ips: [{"ip_address": {get_param: be2_Cricket_OCS_protected_ips}}] + + be2_port_7: + type: OS::Neutron::Port + properties: + network: { get_param: OAM_direct_net_id } + fixed_ips: [{"ip_address": {get_param: be2_OAM_direct_ips}}] + + server_scp_be3: + type: OS::Nova::Server +# depends on: db_wait_condition + properties: + name: { get_param: vm_scp_be3_name } + image: { get_param: image_scp_be_id } +# availability_zone: { get_param: availability_zone_be3 } + flavor: { get_param: flavor_scp_be_id } + scheduler_hints: { group: { get_resource: BE_Affinity } } + networks: + - port: { get_resource: be3_port_3 } + - port: { get_resource: be3_port_4 } + - port: { get_resource: be3_port_5 } + - port: { get_resource: be3_port_7 } + metadata: + vnf_id: { get_param: vnf_id } + user_data: + str_replace: + template: | + #!/bin/bash + #todo: provision $vm_name + wc_notify --data-binary '{"status": "SUCCESS"}' + params: + $vm_name: {get_param: vm_scp_be3_name} +# wc_notify: { get_attr: ['scp_be_wait_handle', 'curl_cli'] } + + be3_port_3: + type: OS::Neutron::Port + properties: + network_id: { get_resource: Clustering_Network } + + be3_port_4: + type: OS::Neutron::Port + properties: + network_id: { get_resource: DB_Network } + + be3_port_5: + type: OS::Neutron::Port + properties: + network: { get_param: Cricket_OCS_protected_net_id } + fixed_ips: [{"ip_address": {get_param: be3_Cricket_OCS_protected_ips}}] + + be3_port_7: + type: OS::Neutron::Port + properties: + network: { get_param: OAM_direct_net_id } + fixed_ips: [{"ip_address": {get_param: be3_OAM_direct_ips}}] + + server_scp_be4: + type: OS::Nova::Server +# depends on: db_wait_condition + properties: + name: { get_param: vm_scp_be4_name } + image: { get_param: image_scp_be_id } +# availability_zone: { get_param: availability_zone_be4 } + flavor: { get_param: flavor_scp_be_id } + scheduler_hints: { group: { get_resource: BE_Affinity } } + networks: + - port: { get_resource: be4_port_3 } + - port: { get_resource: be4_port_4 } + - port: { get_resource: be4_port_5 } + - port: { get_resource: be4_port_7 } + metadata: + vnf_id: { get_param: vnf_id } + user_data: + str_replace: + template: | + #!/bin/bash + #todo: provision $vm_name + wc_notify --data-binary '{"status": "SUCCESS"}' + params: + $vm_name: {get_param: vm_scp_be4_name} +# wc_notify: { get_attr: ['scp_be_wait_handle', 'curl_cli'] } + + be4_port_3: + type: OS::Neutron::Port + properties: + network_id: { get_resource: Clustering_Network } + + be4_port_4: + type: OS::Neutron::Port + properties: + network_id: { get_resource: DB_Network } + + be4_port_5: + type: OS::Neutron::Port + properties: + network: { get_param: Cricket_OCS_protected_net_id } + fixed_ips: [{"ip_address": {get_param: be4_Cricket_OCS_protected_ips}}] + + be4_port_7: + type: OS::Neutron::Port + properties: + network: { get_param: OAM_direct_net_id } + fixed_ips: [{"ip_address": {get_param: be4_OAM_direct_ips}}] + + server_scp_fe0: + type: OS::Nova::Server +# depends on: scp_be_wait_condition + properties: + name: { get_param: vm_scp_fe0_name } + image: { get_param: image_scp_fe_id } +# availability_zone: { get_param: availability_zone_fe0 } + flavor: { get_param: flavor_scp_fe_id } + scheduler_hints: { group: { get_resource: FE_Affinity } } + networks: + - port: { get_resource: fe0_port_0 } + - port: { get_resource: fe0_port_2 } + - port: { get_resource: fe0_port_3 } + - port: { get_resource: fe0_port_7 } + metadata: + vnf_id: { get_param: vnf_id } + user_data: + str_replace: + template: | + #!/bin/bash + #todo: provision $vm_name + wc_notify --data-binary '{"status": "SUCCESS"}' + params: + $vm_name: {get_param: vm_scp_fe0_name} +# wc_notify: { get_attr: ['scp_fe_wait_handle', 'curl_cli'] } + + fe0_port_0: + type: OS::Neutron::Port + properties: + network: { get_param: SIGNET_vrf_A1_direct_net_id } + fixed_ips: [{"ip_address": {get_param: fe0_SIGNET_vrf_A1_direct_ips}}] + + fe0_port_2: + type: OS::Neutron::Port + properties: + network_id: { get_resource: FE_Clustering_KA } + + fe0_port_3: + type: OS::Neutron::Port + properties: + network_id: { get_resource: Clustering_Network } + + fe0_port_7: + type: OS::Neutron::Port + properties: + network: { get_param: OAM_direct_net_id } + fixed_ips: [{"ip_address": {get_param: fe0_OAM_direct_ips}}] + + server_scp_fe1: + type: OS::Nova::Server +# depends on: scp_be_wait_condition + properties: + name: { get_param: vm_scp_fe1_name } + image: { get_param: image_scp_fe_id } +# availability_zone: { get_param: availability_zone_fe1 } + flavor: { get_param: flavor_scp_fe_id } + scheduler_hints: { group: { get_resource: FE_Affinity } } + networks: + - port: { get_resource: fe1_port_1 } + - port: { get_resource: fe1_port_2 } + - port: { get_resource: fe1_port_3 } + - port: { get_resource: fe1_port_7 } + metadata: + vnf_id: { get_param: vnf_id } + user_data: + str_replace: + template: | + #!/bin/bash + #todo: provision $vm_name + wc_notify --data-binary '{"status": "SUCCESS"}' + params: + $vm_name: {get_param: vm_scp_fe1_name} +# wc_notify: { get_attr: ['scp_fe_wait_handle', 'curl_cli'] } + + fe1_port_1: + type: OS::Neutron::Port + properties: + network: { get_param: SIGNET_vrf_B1_direct_net_id } + fixed_ips: [{"ip_address": {get_param: fe1_SIGNET_vrf_B1_direct_ips}}] + + fe1_port_2: + type: OS::Neutron::Port + properties: + network_id: { get_resource: FE_Clustering_KA } + + fe1_port_3: + type: OS::Neutron::Port + properties: + network_id: { get_resource: Clustering_Network } + + fe1_port_7: + type: OS::Neutron::Port + properties: + network: { get_param: OAM_direct_net_id } + fixed_ips: [{"ip_address": {get_param: fe1_OAM_direct_ips}}] + + server_smp0: + type: OS::Nova::Server + properties: + name: { get_param: vm_smp0_name } + image: { get_param: image_smp_id } +# availability_zone: { get_param: availability_zone_smp0 } + flavor: { get_param: flavor_smp_id } + scheduler_hints: { group: { get_resource: SMP_Affinity } } + networks: + - port: { get_resource: smp0_port_7 } + metadata: + vnf_id: { get_param: vnf_id } + user_data: + str_replace: + template: | + #!/bin/bash + #todo: provision $vm_name + wc_notify --data-binary '{"status": "SUCCESS"}' + params: + $vm_name: {get_param: vm_smp0_name} +# wc_notify: { get_attr: ['smp_wait_handle', 'curl_cli'] } + + smp0_port_7: + type: OS::Neutron::Port + properties: + network: { get_param: OAM_direct_net_id } + fixed_ips: [{"ip_address": {get_param: smp0_OAM_direct_ips}}] + + server_smp1: + type: OS::Nova::Server + properties: + name: { get_param: vm_smp1_name } + image: { get_param: image_smp_id } +# availability_zone: { get_param: availability_zone_smp1 } + flavor: { get_param: flavor_smp_id } + scheduler_hints: { group: { get_resource: SMP_Affinity } } + networks: + - port: { get_resource: smp1_port_7 } + metadata: + vnf_id: { get_param: vnf_id } + user_data: + str_replace: + template: | + #!/bin/bash + #todo: provision $vm_name + wc_notify --data-binary '{"status": "SUCCESS"}' + params: + $vm_name: {get_param: vm_smp1_name} +# wc_notify: { get_attr: ['smp_wait_handle', 'curl_cli'] } + + smp1_port_7: + type: OS::Neutron::Port + properties: + network: { get_param: OAM_direct_net_id } + fixed_ips: [{"ip_address": {get_param: smp1_OAM_direct_ips}}] + + server_db0: + type: OS::Nova::Server +# depends_on: smp_wait_condition + properties: + name: { get_param: vm_db0_name } + image: { get_param: image_db_id } +# availability_zone: { get_param: availability_zone_db0 } + flavor: { get_param: flavor_db_id } + scheduler_hints: { group: { get_resource: DB_Affinity } } + networks: + - port: { get_resource: db0_port_4 } + - port: { get_resource: db0_port_7 } + metadata: + vnf_id: { get_param: vnf_id } + user_data: + str_replace: + template: | + #!/bin/bash + #todo: provision $vm_name + wc_notify --data-binary '{"status": "SUCCESS"}' + params: + $vm_name: {get_param: vm_db0_name} +# wc_notify: { get_attr: ['db_wait_handle', 'curl_cli'] } + + db0_port_4: + type: OS::Neutron::Port + properties: + network_id: { get_resource: DB_Network } + + db0_port_7: + type: OS::Neutron::Port + properties: + network: { get_param: OAM_direct_net_id } + fixed_ips: [{"ip_address": {get_param: db0_OAM_direct_ips}}] + + server_db1: + type: OS::Nova::Server +# depends_on: smp_wait_condition + properties: + name: { get_param: vm_db1_name } + image: { get_param: image_db_id } +# availability_zone: { get_param: availability_zone_db1 } + flavor: { get_param: flavor_db_id } + scheduler_hints: { group: { get_resource: DB_Affinity } } + networks: + - port: { get_resource: db1_port_4 } + - port: { get_resource: db1_port_7 } + metadata: + vnf_id: { get_param: vnf_id } + user_data: + str_replace: + template: | + #!/bin/bash + #todo: provision $vm_name + wc_notify --data-binary '{"status": "SUCCESS"}' + params: + $vm_name: {get_param: vm_db1_name} +# wc_notify: { get_attr: ['db_wait_handle', 'curl_cli'] } + + db1_port_4: + type: OS::Neutron::Port + properties: + network_id: { get_resource: DB_Network } + + db1_port_7: + type: OS::Neutron::Port + properties: + network: { get_param: OAM_direct_net_id } + fixed_ips: [{"ip_address": {get_param: db1_OAM_direct_ips}}] \ No newline at end of file diff --git a/ui-ci-dev/src/main/resources/Files/Heat-File.yaml b/ui-ci-dev/src/main/resources/Files/Heat-File.yaml new file mode 100644 index 0000000000..d332078d35 --- /dev/null +++ b/ui-ci-dev/src/main/resources/Files/Heat-File.yaml @@ -0,0 +1,791 @@ +heat_template_version: 2013-05-23 +################################# +# +# Changes in v0.2: +# - Unique availability zone for each VM +# - LAN8 and SLAN networks removed according to latest Prod/Type I diagram +# - 2 DB VMs added +# - Images corrected +# - VM start-up order: SMP->DB->BE->FE (no error handling yet) +# - Provisioning scripts placeholders +# +################################# + +description: ASC Template + +parameters: +# availability_zone_smp0: +# type: string +# default: nova +# availability_zone_smp1: +# type: string +# default: nova +# availability_zone_fe0: +# type: string +# default: nova +# availability_zone_fe1: +# type: string +# default: nova +# availability_zone_db0: +# type: string +# default: nova +# availability_zone_db1: +# type: string +# default: nova +# availability_zone_be0: +# type: string +# default: nova +# availability_zone_be1: +# type: string +# default: nova +# availability_zone_be2: +# type: string +# default: nova +# availability_zone_be3: +# type: string +# default: nova +# availability_zone_be4: +# type: string +# default: nova + + vnf_name: + type: string + description: Unique name for this VNF instance + default: This_is_the_SCP_name + vnf_id: + type: string + description: Unique ID for this VNF instance + default: This_is_ths_SCP_id + + flavor_scp_be_id: + type: string + description: flavor type + default: a1.Small + flavor_scp_fe_id: + type: string + description: flavor type + default: a1.Small + flavor_smp_id: + type: string + description: flavor type + default: a1.Small + flavor_db_id: + type: string + description: flavor type + default: a1.Small + image_scp_be_id: + type: string + description: Image use to boot a server + default: asc_base_image_be + image_scp_fe_id: + type: string + description: Image use to boot a server + default: asc_base_image_fe + image_smp_id: + type: string + description: Image use to boot a server + default: asc_base_image_smp + image_db_id: + type: string + description: Image use to boot a server + default: asc_base_image_db + + int_vscp_fe_cluster_net_id: + type: string + description: LAN2 FE Cluster/KA + int_vscp_fe_cluster_cidr: + type: string + description: Private Network2 Address (CIDR notation) + int_vscp_cluster_net_id: + type: string + description: LAN3 Cluster + int_vscp_cluster_cidr: + type: string + description: Private Network3 Address (CIDR notation) + int_vscp_db_network_net_id: + type: string + description: LAN4 DB + int_vscp_db_network_cidr: + type: string + description: Private Network4 Address (CIDR notation) + SIGNET_vrf_A1_direct_net_id: + type: string + description: Network name for SIGTRAN_A + SIGNET_vrf_B1_direct_net_id: + type: string + description: Network name for SIGTRAN_B + Cricket_OCS_protected_net_id: + type: string + description: Network name for CRICKET_OCS + OAM_direct_net_id: + type: string + description: Network name for OAM + be0_Cricket_OCS_protected_ips: + type: string + label: be0 port 5 OAM ip address + description: be0 port 5 OAM ip address + be1_Cricket_OCS_protected_ips: + type: string + label: be1 port 5 OAM ip address + description: be1 port 5 OAM ip address + be2_Cricket_OCS_protected_ips: + type: string + label: be2 port 5 OAM ip address + description: be2 port 5 OAM ip address + be3_Cricket_OCS_protected_ips: + type: string + label: be3 port 5 OAM ip address + description: be3 port 5 OAM ip address + be4_Cricket_OCS_protected_ips: + type: string + label: be4 port 5 OAM ip address + description: be4 port 5 OAM ip address + be0_OAM_direct_ips: + type: string + label: be0 port 7 OAM ip address + description: be0 port 7 OAM ip address + be1_OAM_direct_ips: + type: string + label: be1 port 7 OAM ip address + description: be1 port 7 OAM ip address + be2_OAM_direct_ips: + type: string + label: be2 port 7 OAM ip address + description: be2 port 7 OAM ip address + be3_OAM_direct_ips: + type: string + label: be3 port 7 OAM ip address + description: be3 port 7 OAM ip address + be4_OAM_direct_ips: + type: string + label: be4 port 7 OAM ip address + description: be4 port 7 OAM ip address + fe0_SIGNET_vrf_A1_direct_ips: + type: string + label: fe0 port 0 SIGTRAN ip address + description: fe0 port 0 SIGTRAN ip address + fe0_OAM_direct_ips: + type: string + label: fe0 port 7 OAM ip address + description: fe0 port 7 OAM ip address + fe1_SIGNET_vrf_B1_direct_ips: + type: string + label: fe1 port 1 SIGTRAN ip address + description: fe1 port 1 SIGTRAN ip address + fe1_OAM_direct_ips: + type: string + label: fe1 port 7 OAM ip address + description: fe1 port 7 OAM ip address + smp0_OAM_direct_ips: + type: string + label: smp0 port 7 OAM ip address + description: smp0 port 7 OAM ip address + smp1_OAM_direct_ips: + type: string + label: smp1 port 7 OAM ip address + description: smp1 port 7 OAM ip address + db0_OAM_direct_ips: + type: string + label: db0 port 7 OAM ip address + description: smp0 port 7 OAM ip address + db1_OAM_direct_ips: + type: string + label: smp1 port 7 OAM ip address + description: db1 port 7 OAM ip address + vm_scp_be0_name: + type: string + default: vSCP_BE0 + description: name of VM + vm_scp_be1_name: + type: string + default: vSCP_BE1 + description: name of VM + vm_scp_be2_name: + type: string + default: vSCP_BE2 + description: name of VM + vm_scp_be3_name: + type: string + default: vSCP_BE3 + description: name of VM + vm_scp_be4_name: + type: string + default: vSCP_BE4 + description: name of VM + vm_scp_fe0_name: + type: string + default: vSCP_FE0 + description: name of VM + vm_scp_fe1_name: + type: string + default: vSCP_FE1 + description: name of VM + vm_smp0_name: + type: string + default: vSMP0 + description: name of VM + vm_smp1_name: + type: string + default: vSMP1 + description: name of VM + vm_db0_name: + type: string + default: vDB0 + description: name of VM + vm_db1_name: + type: string + default: vDB1 + description: name of VM + +resources: +# scp_be_wait_condition: +# type: OS::Heat::WaitCondition +# properties: +# handle: { get_resource: scp_be_wait_handle } +# count: 5 +# timeout: 300 +# scp_be_wait_handle: +# type: OS::Heat::WaitConditionHandle +# +# scp_fe_wait_condition: +# type: OS::Heat::WaitCondition +# properties: +# handle: { get_resource: scp_fe_wait_handle } +# count: 2 +# timeout: 300 +# scp_fe_wait_handle: +# type: OS::Heat::WaitConditionHandle +# +# smp_wait_condition: +# type: OS::Heat::WaitCondition +# properties: +# handle: { get_resource: smp_wait_handle } +# count: 2 +# timeout: 300 +# smp_wait_handle: +# type: OS::Heat::WaitConditionHandle +# +# db_wait_condition: +# type: OS::Heat::WaitCondition +# properties: +# handle: { get_resource: db_wait_handle } +# count: 2 +# timeout: 300 +# db_wait_handle: +# type: OS::Heat::WaitConditionHandle + + FE_Affinity: + type: OS::Nova::ServerGroup + properties: + policies: ["anti-affinity"] + BE_Affinity: + type: OS::Nova::ServerGroup + properties: + policies: ["anti-affinity"] + SMP_Affinity: + type: OS::Nova::ServerGroup + properties: + policies: ["anti-affinity"] + DB_Affinity: + type: OS::Nova::ServerGroup + properties: + policies: ["anti-affinity"] + + FE_Clustering_KA: + type: OS::Contrail::VirtualNetwork + properties: + name: { get_param: int_vscp_fe_cluster_net_id } + + FE_Clustering_subnet: + type: OS::Neutron::Subnet + properties: + network_id: { get_resource: FE_Clustering_KA } + cidr: { get_param: int_vscp_fe_cluster_cidr } + + Clustering_Network: + type: OS::Contrail::VirtualNetwork + properties: + name: { get_param: int_vscp_cluster_net_id } + + Clustering_Network_subnet: + type: OS::Neutron::Subnet + properties: + network_id: { get_resource: Clustering_Network } + cidr: { get_param: int_vscp_cluster_cidr } + + DB_Network: + type: OS::Contrail::VirtualNetwork + properties: + name: { get_param: int_vscp_db_network_net_id } + + DB_Network_subnet: + type: OS::Neutron::Subnet + properties: + network_id: { get_resource: DB_Network } + cidr: { get_param: int_vscp_db_network_cidr } + + server_scp_be0: + type: OS::Nova::Server +# depends on: db_wait_condition + properties: + name: { get_param: vm_scp_be0_name } + image: { get_param: image_scp_be_id } +# availability_zone: { get_param: availability_zone_be0 } + flavor: { get_param: flavor_scp_be_id } + scheduler_hints: { group: { get_resource: BE_Affinity } } + networks: + - port: { get_resource: be0_port_3 } + - port: { get_resource: be0_port_4 } + - port: { get_resource: be0_port_5 } + - port: { get_resource: be0_port_7 } + metadata: + vnf_id: { get_param: vnf_id } + user_data: + str_replace: + template: | + #!/bin/bash + #todo: provision $vm_name + wc_notify --data-binary '{"status": "SUCCESS"}' + params: + $vm_name: {get_param: vm_scp_be0_name} +# wc_notify: { get_attr: ['scp_be_wait_handle', 'curl_cli'] } + be0_port_2: + type: OS::Neutron::Port + properties: + network_id: { get_resource: Clustering_Network } + + be0_port_3: + type: OS::Neutron::Port + properties: + network_id: { get_resource: Clustering_Network } + + be0_port_4: + type: OS::Neutron::Port + properties: + network_id: { get_resource: DB_Network } + + be0_port_5: + type: OS::Neutron::Port + properties: + network: { get_param: Cricket_OCS_protected_net_id } + fixed_ips: [{"ip_address": {get_param: be0_Cricket_OCS_protected_ips}}] + + be0_port_7: + type: OS::Neutron::Port + properties: + network: { get_param: OAM_direct_net_id } + fixed_ips: [{"ip_address": {get_param: be0_OAM_direct_ips}}] + + server_scp_be1: + type: OS::Nova::Server +# depends on: db_wait_condition + properties: + name: { get_param: vm_scp_be1_name } + image: { get_param: image_scp_be_id } +# availability_zone: { get_param: availability_zone_be1 } + flavor: { get_param: flavor_scp_be_id } + scheduler_hints: { group: { get_resource: BE_Affinity } } + networks: + - port: { get_resource: be1_port_3 } + - port: { get_resource: be1_port_4 } + - port: { get_resource: be1_port_5 } + - port: { get_resource: be1_port_7 } + metadata: + vnf_id: { get_param: vnf_id } + user_data: + str_replace: + template: | + #!/bin/bash + #todo: provision $vm_name + wc_notify --data-binary '{"status": "SUCCESS"}' + params: + $vm_name: {get_param: vm_scp_be1_name} +# wc_notify: { get_attr: ['scp_be_wait_handle', 'curl_cli'] } + + be1_port_3: + type: OS::Neutron::Port + properties: + network_id: { get_resource: Clustering_Network } + + be1_port_4: + type: OS::Neutron::Port + properties: + network_id: { get_resource: DB_Network } + + be1_port_5: + type: OS::Neutron::Port + properties: + network: { get_param: Cricket_OCS_protected_net_id } + fixed_ips: [{"ip_address": {get_param: be1_Cricket_OCS_protected_ips}}] + + be1_port_7: + type: OS::Neutron::Port + properties: + network: { get_param: OAM_direct_net_id } + fixed_ips: [{"ip_address": {get_param: be1_OAM_direct_ips}}] + + server_scp_be2: + type: OS::Nova::Server +# depends on: db_wait_condition + properties: + name: { get_param: vm_scp_be2_name } + image: { get_param: image_scp_be_id } +# availability_zone: { get_param: availability_zone_be2 } + flavor: { get_param: flavor_scp_be_id } + scheduler_hints: { group: { get_resource: BE_Affinity } } + networks: + - port: { get_resource: be2_port_3 } + - port: { get_resource: be2_port_4 } + - port: { get_resource: be2_port_5 } + - port: { get_resource: be2_port_7 } + metadata: + vnf_id: { get_param: vnf_id } + user_data: + str_replace: + template: | + #!/bin/bash + #todo: provision $vm_name + wc_notify --data-binary '{"status": "SUCCESS"}' + params: + $vm_name: {get_param: vm_scp_be2_name} +# wc_notify: { get_attr: ['scp_be_wait_handle', 'curl_cli'] } + + be2_port_3: + type: OS::Neutron::Port + properties: + network_id: { get_resource: Clustering_Network } + + be2_port_4: + type: OS::Neutron::Port + properties: + network_id: { get_resource: DB_Network } + + be2_port_5: + type: OS::Neutron::Port + properties: + network: { get_param: Cricket_OCS_protected_net_id } + fixed_ips: [{"ip_address": {get_param: be2_Cricket_OCS_protected_ips}}] + + be2_port_7: + type: OS::Neutron::Port + properties: + network: { get_param: OAM_direct_net_id } + fixed_ips: [{"ip_address": {get_param: be2_OAM_direct_ips}}] + + server_scp_be3: + type: OS::Nova::Server +# depends on: db_wait_condition + properties: + name: { get_param: vm_scp_be3_name } + image: { get_param: image_scp_be_id } +# availability_zone: { get_param: availability_zone_be3 } + flavor: { get_param: flavor_scp_be_id } + scheduler_hints: { group: { get_resource: BE_Affinity } } + networks: + - port: { get_resource: be3_port_3 } + - port: { get_resource: be3_port_4 } + - port: { get_resource: be3_port_5 } + - port: { get_resource: be3_port_7 } + metadata: + vnf_id: { get_param: vnf_id } + user_data: + str_replace: + template: | + #!/bin/bash + #todo: provision $vm_name + wc_notify --data-binary '{"status": "SUCCESS"}' + params: + $vm_name: {get_param: vm_scp_be3_name} +# wc_notify: { get_attr: ['scp_be_wait_handle', 'curl_cli'] } + + be3_port_3: + type: OS::Neutron::Port + properties: + network_id: { get_resource: Clustering_Network } + + be3_port_4: + type: OS::Neutron::Port + properties: + network_id: { get_resource: DB_Network } + + be3_port_5: + type: OS::Neutron::Port + properties: + network: { get_param: Cricket_OCS_protected_net_id } + fixed_ips: [{"ip_address": {get_param: be3_Cricket_OCS_protected_ips}}] + + be3_port_7: + type: OS::Neutron::Port + properties: + network: { get_param: OAM_direct_net_id } + fixed_ips: [{"ip_address": {get_param: be3_OAM_direct_ips}}] + + server_scp_be4: + type: OS::Nova::Server +# depends on: db_wait_condition + properties: + name: { get_param: vm_scp_be4_name } + image: { get_param: image_scp_be_id } +# availability_zone: { get_param: availability_zone_be4 } + flavor: { get_param: flavor_scp_be_id } + scheduler_hints: { group: { get_resource: BE_Affinity } } + networks: + - port: { get_resource: be4_port_3 } + - port: { get_resource: be4_port_4 } + - port: { get_resource: be4_port_5 } + - port: { get_resource: be4_port_7 } + metadata: + vnf_id: { get_param: vnf_id } + user_data: + str_replace: + template: | + #!/bin/bash + #todo: provision $vm_name + wc_notify --data-binary '{"status": "SUCCESS"}' + params: + $vm_name: {get_param: vm_scp_be4_name} +# wc_notify: { get_attr: ['scp_be_wait_handle', 'curl_cli'] } + + be4_port_3: + type: OS::Neutron::Port + properties: + network_id: { get_resource: Clustering_Network } + + be4_port_4: + type: OS::Neutron::Port + properties: + network_id: { get_resource: DB_Network } + + be4_port_5: + type: OS::Neutron::Port + properties: + network: { get_param: Cricket_OCS_protected_net_id } + fixed_ips: [{"ip_address": {get_param: be4_Cricket_OCS_protected_ips}}] + + be4_port_7: + type: OS::Neutron::Port + properties: + network: { get_param: OAM_direct_net_id } + fixed_ips: [{"ip_address": {get_param: be4_OAM_direct_ips}}] + + server_scp_fe0: + type: OS::Nova::Server +# depends on: scp_be_wait_condition + properties: + name: { get_param: vm_scp_fe0_name } + image: { get_param: image_scp_fe_id } +# availability_zone: { get_param: availability_zone_fe0 } + flavor: { get_param: flavor_scp_fe_id } + scheduler_hints: { group: { get_resource: FE_Affinity } } + networks: + - port: { get_resource: fe0_port_0 } + - port: { get_resource: fe0_port_2 } + - port: { get_resource: fe0_port_3 } + - port: { get_resource: fe0_port_7 } + metadata: + vnf_id: { get_param: vnf_id } + user_data: + str_replace: + template: | + #!/bin/bash + #todo: provision $vm_name + wc_notify --data-binary '{"status": "SUCCESS"}' + params: + $vm_name: {get_param: vm_scp_fe0_name} +# wc_notify: { get_attr: ['scp_fe_wait_handle', 'curl_cli'] } + + fe0_port_0: + type: OS::Neutron::Port + properties: + network: { get_param: SIGNET_vrf_A1_direct_net_id } + fixed_ips: [{"ip_address": {get_param: fe0_SIGNET_vrf_A1_direct_ips}}] + + fe0_port_2: + type: OS::Neutron::Port + properties: + network_id: { get_resource: FE_Clustering_KA } + + fe0_port_3: + type: OS::Neutron::Port + properties: + network_id: { get_resource: Clustering_Network } + + fe0_port_7: + type: OS::Neutron::Port + properties: + network: { get_param: OAM_direct_net_id } + fixed_ips: [{"ip_address": {get_param: fe0_OAM_direct_ips}}] + + server_scp_fe1: + type: OS::Nova::Server +# depends on: scp_be_wait_condition + properties: + name: { get_param: vm_scp_fe1_name } + image: { get_param: image_scp_fe_id } +# availability_zone: { get_param: availability_zone_fe1 } + flavor: { get_param: flavor_scp_fe_id } + scheduler_hints: { group: { get_resource: FE_Affinity } } + networks: + - port: { get_resource: fe1_port_1 } + - port: { get_resource: fe1_port_2 } + - port: { get_resource: fe1_port_3 } + - port: { get_resource: fe1_port_7 } + metadata: + vnf_id: { get_param: vnf_id } + user_data: + str_replace: + template: | + #!/bin/bash + #todo: provision $vm_name + wc_notify --data-binary '{"status": "SUCCESS"}' + params: + $vm_name: {get_param: vm_scp_fe1_name} +# wc_notify: { get_attr: ['scp_fe_wait_handle', 'curl_cli'] } + + fe1_port_1: + type: OS::Neutron::Port + properties: + network: { get_param: SIGNET_vrf_B1_direct_net_id } + fixed_ips: [{"ip_address": {get_param: fe1_SIGNET_vrf_B1_direct_ips}}] + + fe1_port_2: + type: OS::Neutron::Port + properties: + network_id: { get_resource: FE_Clustering_KA } + + fe1_port_3: + type: OS::Neutron::Port + properties: + network_id: { get_resource: Clustering_Network } + + fe1_port_7: + type: OS::Neutron::Port + properties: + network: { get_param: OAM_direct_net_id } + fixed_ips: [{"ip_address": {get_param: fe1_OAM_direct_ips}}] + + server_smp0: + type: OS::Nova::Server + properties: + name: { get_param: vm_smp0_name } + image: { get_param: image_smp_id } +# availability_zone: { get_param: availability_zone_smp0 } + flavor: { get_param: flavor_smp_id } + scheduler_hints: { group: { get_resource: SMP_Affinity } } + networks: + - port: { get_resource: smp0_port_7 } + metadata: + vnf_id: { get_param: vnf_id } + user_data: + str_replace: + template: | + #!/bin/bash + #todo: provision $vm_name + wc_notify --data-binary '{"status": "SUCCESS"}' + params: + $vm_name: {get_param: vm_smp0_name} +# wc_notify: { get_attr: ['smp_wait_handle', 'curl_cli'] } + + smp0_port_7: + type: OS::Neutron::Port + properties: + network: { get_param: OAM_direct_net_id } + fixed_ips: [{"ip_address": {get_param: smp0_OAM_direct_ips}}] + + server_smp1: + type: OS::Nova::Server + properties: + name: { get_param: vm_smp1_name } + image: { get_param: image_smp_id } +# availability_zone: { get_param: availability_zone_smp1 } + flavor: { get_param: flavor_smp_id } + scheduler_hints: { group: { get_resource: SMP_Affinity } } + networks: + - port: { get_resource: smp1_port_7 } + metadata: + vnf_id: { get_param: vnf_id } + user_data: + str_replace: + template: | + #!/bin/bash + #todo: provision $vm_name + wc_notify --data-binary '{"status": "SUCCESS"}' + params: + $vm_name: {get_param: vm_smp1_name} +# wc_notify: { get_attr: ['smp_wait_handle', 'curl_cli'] } + + smp1_port_7: + type: OS::Neutron::Port + properties: + network: { get_param: OAM_direct_net_id } + fixed_ips: [{"ip_address": {get_param: smp1_OAM_direct_ips}}] + + server_db0: + type: OS::Nova::Server +# depends_on: smp_wait_condition + properties: + name: { get_param: vm_db0_name } + image: { get_param: image_db_id } +# availability_zone: { get_param: availability_zone_db0 } + flavor: { get_param: flavor_db_id } + scheduler_hints: { group: { get_resource: DB_Affinity } } + networks: + - port: { get_resource: db0_port_4 } + - port: { get_resource: db0_port_7 } + metadata: + vnf_id: { get_param: vnf_id } + user_data: + str_replace: + template: | + #!/bin/bash + #todo: provision $vm_name + wc_notify --data-binary '{"status": "SUCCESS"}' + params: + $vm_name: {get_param: vm_db0_name} +# wc_notify: { get_attr: ['db_wait_handle', 'curl_cli'] } + + db0_port_4: + type: OS::Neutron::Port + properties: + network_id: { get_resource: DB_Network } + + db0_port_7: + type: OS::Neutron::Port + properties: + network: { get_param: OAM_direct_net_id } + fixed_ips: [{"ip_address": {get_param: db0_OAM_direct_ips}}] + + server_db1: + type: OS::Nova::Server +# depends_on: smp_wait_condition + properties: + name: { get_param: vm_db1_name } + image: { get_param: image_db_id } +# availability_zone: { get_param: availability_zone_db1 } + flavor: { get_param: flavor_db_id } + scheduler_hints: { group: { get_resource: DB_Affinity } } + networks: + - port: { get_resource: db1_port_4 } + - port: { get_resource: db1_port_7 } + metadata: + vnf_id: { get_param: vnf_id } + user_data: + str_replace: + template: | + #!/bin/bash + #todo: provision $vm_name + wc_notify --data-binary '{"status": "SUCCESS"}' + params: + $vm_name: {get_param: vm_db1_name} +# wc_notify: { get_attr: ['db_wait_handle', 'curl_cli'] } + + db1_port_4: + type: OS::Neutron::Port + properties: + network_id: { get_resource: DB_Network } + + db1_port_7: + type: OS::Neutron::Port + properties: + network: { get_param: OAM_direct_net_id } + fixed_ips: [{"ip_address": {get_param: db1_OAM_direct_ips}}] \ No newline at end of file diff --git a/ui-ci-dev/src/main/resources/Files/InValid_tosca_File .yml b/ui-ci-dev/src/main/resources/Files/InValid_tosca_File .yml new file mode 100644 index 0000000000..4eea0a15ac --- /dev/null +++ b/ui-ci-dev/src/main/resources/Files/InValid_tosca_File .yml @@ -0,0 +1,34 @@ +node_types: + org.openecomp.resource.MyCompute: + derived_from: tosca.nodes.Root + attributes: + private_address: + type: string + public_address: + type: string + networks: + type: map + entry_schema: + type: tosca.datatypes.network.NetworkInfo + ports: + type: map + entry_schema: + type: tosca.datatypes.network.PortInfo + requirements: + - local_storage: + capability: tosca.capabilities.Attachment + node: tosca.nodes.BlockStorage + relationship: tosca.relationships.AttachesTo + occurrences: [0, UNBOUNDED] + capabilities: + host: + type: tosca.capabilities.Container + valid_source_types: [tosca.nodes.SoftwareComponent] + endpoint : + type: tosca.capabilities.Endpoint.Admin + os: + type: tosca.capabilities.OperatingSystem + scalable: + type: tosca.capabilities.Scalable + binding: + type: tosca.capabilities.network.Bindable diff --git a/ui-ci-dev/src/main/resources/Files/JDM_vf.yml b/ui-ci-dev/src/main/resources/Files/JDM_vf.yml new file mode 100644 index 0000000000..5a7edd4aaf --- /dev/null +++ b/ui-ci-dev/src/main/resources/Files/JDM_vf.yml @@ -0,0 +1,57 @@ +tosca_definitions_version: tosca_simple_yaml_1_0_0 + +node_types: + org.openecomp.resource.vf.JDM: + derived_from: tosca.nodes.Root + properties: + att-part-number: + type: string + vendor-name: + type: string + vendor-part-number: + type: string + vendor-model: + type: string + vendor-model-description: + type: string + vcpu-default: + type: integer + vcpu-min: + type: integer + vcpu-max: + type: integer + vmemory-default: + type: integer + vmemory-units: + type: string + default: "GB" + vmemory-min: + type: integer + vmemory-max: + type: integer + vdisk-default: + type: integer + vdisk-units: + type: string + default: "GB" + vdisk-min: + type: integer + vdisk-max: + type: integer + vnf-type: + type: string + software-version: + type: string + software-version-state: + type: integer + software-file-name: + type: string + vnf-feature: + type: string + requirements: + - host: + capability: tosca.capabilities.Container + relationship: tosca.relationships.HostedOn + capabilities: + binding: + type: tosca.capabilities.network.Bindable \ No newline at end of file diff --git a/ui-ci-dev/src/main/resources/Files/JDM_vfc.yml b/ui-ci-dev/src/main/resources/Files/JDM_vfc.yml new file mode 100644 index 0000000000..b9c9ca0c4a --- /dev/null +++ b/ui-ci-dev/src/main/resources/Files/JDM_vfc.yml @@ -0,0 +1,57 @@ +tosca_definitions_version: tosca_simple_yaml_1_0_0 + +node_types: + org.openecomp.resource.vfc.JDM: + derived_from: tosca.nodes.Root + properties: + att-part-number: + type: string + vendor-name: + type: string + vendor-part-number: + type: string + vendor-model: + type: string + vendor-model-description: + type: string + vcpu-default: + type: integer + vcpu-min: + type: integer + vcpu-max: + type: integer + vmemory-default: + type: integer + vmemory-units: + type: string + default: "GB" + vmemory-min: + type: integer + vmemory-max: + type: integer + vdisk-default: + type: integer + vdisk-units: + type: string + default: "GB" + vdisk-min: + type: integer + vdisk-max: + type: integer + vnf-type: + type: string + software-version: + type: string + software-version-state: + type: integer + software-file-name: + type: string + vnf-feature: + type: string + requirements: + - host: + capability: tosca.capabilities.Container + relationship: tosca.relationships.HostedOn + capabilities: + binding: + type: tosca.capabilities.network.Bindable \ No newline at end of file diff --git a/ui-ci-dev/src/main/resources/Files/Sample_CSAR.csar b/ui-ci-dev/src/main/resources/Files/Sample_CSAR.csar new file mode 100644 index 0000000000..fe95e79473 Binary files /dev/null and b/ui-ci-dev/src/main/resources/Files/Sample_CSAR.csar differ diff --git a/ui-ci-dev/src/main/resources/Files/Sample_CSAR2.csar b/ui-ci-dev/src/main/resources/Files/Sample_CSAR2.csar new file mode 100644 index 0000000000..3001fe8222 Binary files /dev/null and b/ui-ci-dev/src/main/resources/Files/Sample_CSAR2.csar differ diff --git a/ui-ci-dev/src/main/resources/Files/UCPE_VFC.yml b/ui-ci-dev/src/main/resources/Files/UCPE_VFC.yml new file mode 100644 index 0000000000..ef3966b68f --- /dev/null +++ b/ui-ci-dev/src/main/resources/Files/UCPE_VFC.yml @@ -0,0 +1,65 @@ +tosca_definitions_version: tosca_simple_yaml_1_0_0 + +node_types: + org.openecomp.resource.vfc.uCPE: + derived_from: tosca.nodes.Root + properties: + att-ucpe-part-number: + type: string + vendor-name: + type: string + required: true + vendor-model: + type: string + required: true + total-vcpu: + type: integer + description: number of vCPUs + total-memory: + type: integer + description: GB + total-disk: + type: integer + description: GB + base-system-image-file-name: + type: string + linux-host-vendor: + type: string + linux-host-os-version: + type: version + base-system-software: + type: string + jdm-vcpu: + type: integer + jdm-memory: + type: integer + description: GB + jdm-disk: + type: integer + description: GB + jdm-version: + type: string + jcp-vcpu: + type: integer + jcp-memory: + type: integer + description: GB + jcp-disk: + type: integer + description: GB + jcp-version: + type: version + capabilities: + vnf_hosting: + type: tosca.capabilities.Container + description: Provides hosting capability for VNFs + WAN_connectivity: + type: tosca.capabilities.network.Bindable + valid_source_types: [org.openecomp.cp.Wan] + description: external WAN1 n/w interface + occurrences: [1,2] + LAN_connectivity: + type: tosca.capabilities.network.Bindable + valid_source_types: [org.openecomp.cp.Lan] + description: external LAN n/w interface + occurrences: [1,8] \ No newline at end of file diff --git a/ui-ci-dev/src/main/resources/Files/VF.yml b/ui-ci-dev/src/main/resources/Files/VF.yml new file mode 100644 index 0000000000..ec089900ad --- /dev/null +++ b/ui-ci-dev/src/main/resources/Files/VF.yml @@ -0,0 +1,17 @@ +tosca_definitions_version: tosca_simple_yaml_1_0_0 + +node_types: + org.openecomp.resource.vf.VFF: + derived_from: tosca.nodes.Root + properties: + vendor: + type: string + required: false + vl_name: + type: string + required: false + capabilities: + virtual_linkable: + type: tosca.capabilities.network.Linkable + + \ No newline at end of file diff --git a/ui-ci-dev/src/main/resources/Files/VFC.yml b/ui-ci-dev/src/main/resources/Files/VFC.yml new file mode 100644 index 0000000000..853ed35374 --- /dev/null +++ b/ui-ci-dev/src/main/resources/Files/VFC.yml @@ -0,0 +1,77 @@ +tosca_definitions_version: tosca_simple_yaml_1_0_0 + +node_types: + org.openecomp.resource.vfc.vRouter: + derived_from: tosca.nodes.Root + properties: + att-part-number: + type: string + vendor-name: + type: string + vendor-part-number: + type: string + vendor-model: + type: string + vendor-model-description: + type: string + vcpu-default: + type: integer + vcpu-min: + type: integer + vcpu-max: + type: integer + vmemory-default: + type: integer + vmemory-units: + type: string + default: "GB" + vmemory-min: + type: integer + vmemory-max: + type: integer + vdisk-default: + type: integer + vdisk-units: + type: string + default: "GB" + vdisk-min: + type: integer + vdisk-max: + type: integer + vnf-type: + type: string + software-version: + type: string + software-version-state: + type: integer + software-file-name: + type: string + vnf-feature: + type: string + management-v6-address: + type: string + nm-lan-v6-address: + type: string + nm-lan-v6-prefix-length: + type: string + management-v4-address: + type: string + nm-lan-v4-address: + type: string + nm-lan-v4-prefix-length: + type: string + routing-instance-name: + type: string + routing-instances: + type: map + entry_schema: + type: string + requirements: + - host: + capability: tosca.capabilities.Container + relationship: tosca.relationships.HostedOn + capabilities: + binding: + type: tosca.capabilities.network.Bindable + occurrences: [1,UNBOUNDED] + \ No newline at end of file diff --git a/ui-ci-dev/src/main/resources/Files/VFCWithAttributes.yml b/ui-ci-dev/src/main/resources/Files/VFCWithAttributes.yml new file mode 100644 index 0000000000..133fd02cef --- /dev/null +++ b/ui-ci-dev/src/main/resources/Files/VFCWithAttributes.yml @@ -0,0 +1,43 @@ +tosca_definitions_version: tosca_simple_yaml_1_0_0 + +node_types: + org.openecomp.resource.vfc.VFC: + derived_from: tosca.nodes.Root + properties: + jcp-memory: + type: integer + description: GB + jcp-disk: + type: integer + description: GB + jcp-version: + type: version + attributes: + my_attr: + type: integer + private_address: + type: string + public_address: + type: string + networks: + type: map + entry_schema: + type: tosca.datatypes.network.NetworkInfo + ports: + type: map + entry_schema: + type: tosca.datatypes.network.PortInfo + capabilities: + vnf_hosting: + type: tosca.capabilities.Container + description: Provides hosting capability for VNFs + WAN_connectivity: + type: tosca.capabilities.network.Bindable + valid_source_types: [org.openecomp.cp.Wan] + description: external WAN1 n/w interface + occurrences: [1,2] + LAN_connectivity: + type: tosca.capabilities.network.Bindable + valid_source_types: [org.openecomp.cp.Lan] + description: external LAN n/w interface + occurrences: [1,8] \ No newline at end of file diff --git a/ui-ci-dev/src/main/resources/Files/VL.yml b/ui-ci-dev/src/main/resources/Files/VL.yml new file mode 100644 index 0000000000..74a2405af2 --- /dev/null +++ b/ui-ci-dev/src/main/resources/Files/VL.yml @@ -0,0 +1,17 @@ +tosca_definitions_version: tosca_simple_yaml_1_0_0 + +node_types: + org.openecomp.resource.vl.VL1we23: + derived_from: tosca.nodes.Root + properties: + vendor: + type: string + required: false + vl_name: + type: string + required: false + capabilities: + virtual_linkable: + type: tosca.capabilities.network.Linkable + + \ No newline at end of file diff --git a/ui-ci-dev/src/main/resources/Files/Valid xml.xml b/ui-ci-dev/src/main/resources/Files/Valid xml.xml new file mode 100644 index 0000000000..0d67e48340 --- /dev/null +++ b/ui-ci-dev/src/main/resources/Files/Valid xml.xml @@ -0,0 +1,4 @@ + + + + diff --git a/ui-ci-dev/src/main/resources/Files/Valid_tosca_Mycompute.yml b/ui-ci-dev/src/main/resources/Files/Valid_tosca_Mycompute.yml new file mode 100644 index 0000000000..8fac5e16a8 --- /dev/null +++ b/ui-ci-dev/src/main/resources/Files/Valid_tosca_Mycompute.yml @@ -0,0 +1,35 @@ +tosca_definitions_version: tosca_simple_yaml_1_0_0 +node_types: + org.openecomp.resource.vf.Database: + derived_from: tosca.nodes.Root + attributes: + private_address: + type: string + public_address: + type: string + networks: + type: map + entry_schema: + type: tosca.datatypes.network.NetworkInfo + ports: + type: map + entry_schema: + type: tosca.datatypes.network.PortInfo + requirements: + - local_storage: + capability: tosca.capabilities.Attachment + node: tosca.nodes.BlockStorage + relationship: tosca.relationships.AttachesTo + occurrences: [0, UNBOUNDED] + capabilities: + host: + type: tosca.capabilities.Container + valid_source_types: [tosca.nodes.SoftwareComponent] + endpoint : + type: tosca.capabilities.Endpoint.Admin + os: + type: tosca.capabilities.OperatingSystem + scalable: + type: tosca.capabilities.Scalable + binding: + type: tosca.capabilities.network.Bindable diff --git a/ui-ci-dev/src/main/resources/Files/Valid_tosca_ReplaceTest.yml b/ui-ci-dev/src/main/resources/Files/Valid_tosca_ReplaceTest.yml new file mode 100644 index 0000000000..90e771dab1 --- /dev/null +++ b/ui-ci-dev/src/main/resources/Files/Valid_tosca_ReplaceTest.yml @@ -0,0 +1,35 @@ +tosca_definitions_version: tosca_simple_yaml_1_0_0 +node_types: + org.openecomp.resource.VF.MyCompute: + derived_from: tosca.nodes.Root + attributes: + private_address: + type: string + public_address: + type: string + networks: + type: map + entry_schema: + type: tosca.datatypes.network.NetworkInfo + ports: + type: map + entry_schema: + type: tosca.datatypes.network.PortInfo + requirements: + - local_storage: + capability: tosca.capabilities.Attachment + node: tosca.nodes.BlockStorage + relationship: tosca.relationships.AttachesTo + occurrences: [0, UNBOUNDED] + capabilities: + host: + type: tosca.capabilities.Container + valid_source_types: [tosca.nodes.SoftwareComponent] + endpoint : + type: tosca.capabilities.Endpoint.Admin + os: + type: tosca.capabilities.OperatingSystem + scalable: + type: tosca.capabilities.Scalable + binding: + type: tosca.capabilities.network.Bindable diff --git a/ui-ci-dev/src/main/resources/Files/hot-nimbus-oam-volumes_v0.3.env b/ui-ci-dev/src/main/resources/Files/hot-nimbus-oam-volumes_v0.3.env new file mode 100644 index 0000000000..b494d8c270 --- /dev/null +++ b/ui-ci-dev/src/main/resources/Files/hot-nimbus-oam-volumes_v0.3.env @@ -0,0 +1,6 @@ +parameters: + pcrf_oam_vol_size: 500 + pcrf_oam_volume_silver-1: Silver + pcrf_oam_volume_silver-2: Silver + pcrf_oam_vol_name_1: sde1-pcrfx01-oam001-vol-1 + pcrf_oam_vol_name_2: sde1-pcrfx01-oam001-vol-2 \ No newline at end of file diff --git a/ui-ci-dev/src/main/resources/Files/hot-nimbus-oam_v0.6.env b/ui-ci-dev/src/main/resources/Files/hot-nimbus-oam_v0.6.env new file mode 100644 index 0000000000..cf7cf710ce --- /dev/null +++ b/ui-ci-dev/src/main/resources/Files/hot-nimbus-oam_v0.6.env @@ -0,0 +1,18 @@ +parameters: + pcrf_oam_server_names: ZRDM1PCRF01OAM001,ZRDM1PCRF01OAM002 + pcrf_oam_image_name: PCRF_8.995-ATTM1.0.3.qcow2 + pcrf_oam_flavor_name: lc.4xlarge4 + availabilityzone_name: nova + pcrf_cps_net_name: int_pcrf_net_0 + pcrf_cps_net_ips: 172.26.16.111,172.26.16.112 + pcrf_arbiter_vip: 172.26.16.115 + pcrf_cps_net_mask: 255.255.255.0 + pcrf_oam_net_name: oam_protected_net_0 + pcrf_oam_net_ips: 107.239.64.117,107.239.64.118 + pcrf_oam_net_gw: 107.239.64.1 + pcrf_oam_net_mask: 255.255.248.0 + pcrf_oam_volume_id_1: a4aa05fb-fcdc-457b-8077-6845fdfc3257 + pcrf_oam_volume_id_2: 93d8fc1f-f1c3-4933-86b2-039881ee910f + pcrf_security_group_name: nimbus_security_group + pcrf_vnf_id: 730797234b4a40aa99335157b02871cd + diff --git a/ui-ci-dev/src/main/resources/Files/hot-nimbus-oam_v0.6.yaml b/ui-ci-dev/src/main/resources/Files/hot-nimbus-oam_v0.6.yaml new file mode 100644 index 0000000000..6636eba210 --- /dev/null +++ b/ui-ci-dev/src/main/resources/Files/hot-nimbus-oam_v0.6.yaml @@ -0,0 +1,108 @@ +heat_template_version: 2013-05-23 + +description: heat template that creates multiple PCRF OAM nodes stack + +parameters: + pcrf_oam_server_names: + type: comma_delimited_list + label: PCRF OAM server names + description: name of the PCRF OAM instance + pcrf_oam_image_name: + type: string + label: PCRF OAM image name + description: PCRF OAM image name + pcrf_oam_flavor_name: + type: string + label: PCRF OAM flavor name + description: flavor name of PCRF OAM instance + availabilityzone_name: + type: string + label: availabilityzone name + description: availabilityzone name + pcrf_cps_net_name: + type: string + label: CPS network name + description: CPS network name + pcrf_cps_net_ips: + type: comma_delimited_list + label: CPS network ips + description: CPS network ips + pcrf_cps_net_mask: + type: string + label: CPS network mask + description: CPS network mask + pcrf_arbiter_vip: + type: string + label: OAM Arbiter LB VIP + description: OAM Arbiter LB VIP + pcrf_oam_net_name: + type: string + label: OAM network name + description: OAM network name + pcrf_oam_net_ips: + type: comma_delimited_list + label: OAM network ips + description: OAM network ips + pcrf_oam_net_gw: + type: string + label: CPS network gateway + description: CPS network gateway + pcrf_oam_net_mask: + type: string + label: CPS network mask + description: CPS network mask + pcrf_oam_volume_id_1: + type: string + label: CPS OAM 001 Cinder Volume + description: CPS OAM 001 Cinder Volumes + pcrf_oam_volume_id_2: + type: string + label: CPS OAM 002 Cinder Volume + description: CPS OAM 002 Cinder Volumes + pcrf_security_group_name: + type: string + label: security group name + description: the name of security group + pcrf_vnf_id: + type: string + label: PCRF VNF Id + description: PCRF VNF Id + +resources: + server_pcrf_oam_001: + type: nested-oam_v0.2.yaml + properties: + pcrf_oam_server_name: { get_param: [pcrf_oam_server_names, 0] } + pcrf_oam_image_name: { get_param: pcrf_oam_image_name } + pcrf_oam_flavor_name: { get_param: pcrf_oam_flavor_name } + availabilityzone_name: { get_param: availabilityzone_name } + pcrf_security_group_name: { get_param: pcrf_security_group_name } + pcrf_oam_volume_id: { get_param: pcrf_oam_volume_id_1 } + pcrf_cps_net_name: { get_param: pcrf_cps_net_name } + pcrf_cps_net_ip: { get_param: [pcrf_cps_net_ips, 0] } + pcrf_cps_net_mask: { get_param: pcrf_cps_net_mask } + pcrf_oam_net_name: { get_param: pcrf_oam_net_name } + pcrf_oam_net_ip: { get_param: [pcrf_oam_net_ips, 0] } + pcrf_oam_net_mask: { get_param: pcrf_oam_net_mask } + pcrf_oam_net_gw: { get_param: pcrf_oam_net_gw } + pcrf_arbiter_vip: { get_param: pcrf_arbiter_vip } + pcrf_vnf_id: {get_param: pcrf_vnf_id} + + server_pcrf_oam_002: + type: nested-oam_v0.2.yaml + properties: + pcrf_oam_server_name: { get_param: [pcrf_oam_server_names, 1] } + pcrf_oam_image_name: { get_param: pcrf_oam_image_name } + pcrf_oam_flavor_name: { get_param: pcrf_oam_flavor_name } + availabilityzone_name: { get_param: availabilityzone_name } + pcrf_security_group_name: { get_param: pcrf_security_group_name } + pcrf_oam_volume_id: { get_param: pcrf_oam_volume_id_2 } + pcrf_cps_net_name: { get_param: pcrf_cps_net_name } + pcrf_cps_net_ip: { get_param: [pcrf_cps_net_ips, 1] } + pcrf_cps_net_mask: { get_param: pcrf_cps_net_mask } + pcrf_oam_net_name: { get_param: pcrf_oam_net_name } + pcrf_oam_net_ip: { get_param: [pcrf_oam_net_ips, 1] } + pcrf_oam_net_mask: { get_param: pcrf_oam_net_mask } + pcrf_oam_net_gw: { get_param: pcrf_oam_net_gw } + pcrf_arbiter_vip: { get_param: pcrf_arbiter_vip } + pcrf_vnf_id: {get_param: pcrf_vnf_id} diff --git a/ui-ci-dev/src/main/resources/Files/hot-nimbus-pcm_v0.6.yaml b/ui-ci-dev/src/main/resources/Files/hot-nimbus-pcm_v0.6.yaml new file mode 100644 index 0000000000..564104174a --- /dev/null +++ b/ui-ci-dev/src/main/resources/Files/hot-nimbus-pcm_v0.6.yaml @@ -0,0 +1,80 @@ +heat_template_version: 2013-05-23 + +description: heat template that creates PCRF Cluman stack + +parameters: + pcrf_pcm_server_names: + type: comma_delimited_list + label: PCRF CM server names + description: name of the PCRF CM instance + pcrf_pcm_image_name: + type: string + label: PCRF CM image name + description: PCRF CM image name + pcrf_pcm_flavor_name: + type: string + label: PCRF CM flavor name + description: flavor name of PCRF CM instance + availabilityzone_name: + type: string + label: availabilityzone name + description: availabilityzone name + pcrf_cps_net_name: + type: string + label: CPS network name + description: CPS network name + pcrf_cps_net_ips: + type: comma_delimited_list + label: CPS network ips + description: CPS network ips + pcrf_cps_net_mask: + type: string + label: CPS network mask + description: CPS network mask + pcrf_oam_net_name: + type: string + label: OAM network name + description: OAM network name + pcrf_oam_net_ips: + type: comma_delimited_list + label: OAM network ips + description: OAM network ips + pcrf_oam_net_gw: + type: string + label: CPS network gateway + description: CPS network gateway + pcrf_oam_net_mask: + type: string + label: CPS network mask + description: CPS network mask + pcrf_pcm_volume_id_1: + type: string + label: CPS Cluman Cinder Volume + description: CPS Cluman Cinder Volume + pcrf_security_group_name: + type: string + label: security group name + description: the name of security group + pcrf_vnf_id: + type: string + label: PCRF VNF Id + description: PCRF VNF Id + +resources: + server_pcrf_pcm_001: + type: nested-pcm_v0.2.yaml + properties: + pcrf_pcm_server_name: { get_param: [pcrf_pcm_server_names, 0] } + pcrf_pcm_image_name: { get_param: pcrf_pcm_image_name } + pcrf_pcm_flavor_name: { get_param: pcrf_pcm_flavor_name } + availabilityzone_name: { get_param: availabilityzone_name } + pcrf_security_group_name: { get_param: pcrf_security_group_name } + pcrf_pcm_volume_id: { get_param: pcrf_pcm_volume_id_1 } + pcrf_cps_net_name: { get_param: pcrf_cps_net_name } + pcrf_cps_net_ip: { get_param: [pcrf_cps_net_ips, 0] } + pcrf_cps_net_mask: { get_param: pcrf_cps_net_mask } + pcrf_oam_net_name: { get_param: pcrf_oam_net_name } + pcrf_oam_net_ip: { get_param: [pcrf_oam_net_ips, 0] } + pcrf_oam_net_mask: { get_param: pcrf_oam_net_mask } + pcrf_oam_net_gw: { get_param: pcrf_oam_net_gw } + pcrf_vnf_id: {get_param: pcrf_vnf_id} diff --git a/ui-ci-dev/src/main/resources/Files/myYang.xml b/ui-ci-dev/src/main/resources/Files/myYang.xml new file mode 100644 index 0000000000..0d86d213e6 --- /dev/null +++ b/ui-ci-dev/src/main/resources/Files/myYang.xml @@ -0,0 +1,8 @@ + + + + + \ No newline at end of file diff --git a/ui-ci-dev/src/main/resources/Files/mycompute.yml b/ui-ci-dev/src/main/resources/Files/mycompute.yml new file mode 100644 index 0000000000..c8a0c03384 --- /dev/null +++ b/ui-ci-dev/src/main/resources/Files/mycompute.yml @@ -0,0 +1,18 @@ +tosca_definitions_version: tosca_simple_yaml_1_0_0 +node_types: + org.openecomp.resource.vfc.mycompute: + derived_from: tosca.nodes.Compute + capabilities: + scalable: + type: tosca.capabilities.Scalable + properties: + propertyForTest: + type: string + description: test + required: true + default: success + # min_instances property should override property from tosca.capabilities.Scalable + min_instances: + type: integer + default: 3 + diff --git a/ui-ci-dev/src/main/resources/Files/service_with_inputs.csar b/ui-ci-dev/src/main/resources/Files/service_with_inputs.csar new file mode 100644 index 0000000000..c4d4881fec Binary files /dev/null and b/ui-ci-dev/src/main/resources/Files/service_with_inputs.csar differ diff --git a/ui-ci-dev/src/main/resources/Files/vADTRAN.zip b/ui-ci-dev/src/main/resources/Files/vADTRAN.zip new file mode 100644 index 0000000000..3ecde2c7ec Binary files /dev/null and b/ui-ci-dev/src/main/resources/Files/vADTRAN.zip differ diff --git a/ui-ci-dev/src/main/resources/Files/vCDN.zip b/ui-ci-dev/src/main/resources/Files/vCDN.zip new file mode 100644 index 0000000000..51e654a841 Binary files /dev/null and b/ui-ci-dev/src/main/resources/Files/vCDN.zip differ diff --git a/ui-ci-dev/src/main/resources/Files/vFW_VF.yml b/ui-ci-dev/src/main/resources/Files/vFW_VF.yml new file mode 100644 index 0000000000..100883e399 --- /dev/null +++ b/ui-ci-dev/src/main/resources/Files/vFW_VF.yml @@ -0,0 +1,58 @@ +tosca_definitions_version: tosca_simple_yaml_1_0_0 + +node_types: + + org.openecomp.resource.vf.vFW: + derived_from: tosca.nodes.Root + properties: + att-part-number: + type: string + vendor-name: + type: string + vendor-part-number: + type: string + vendor-model: + type: string + vendor-model-description: + type: string + vcpu-default: + type: integer + vcpu-min: + type: integer + vcpu-max: + type: integer + vmemory-default: + type: integer + vmemory-units: + type: string + default: "GB" + vmemory-min: + type: integer + vmemory-max: + type: integer + vdisk-default: + type: integer + vdisk-units: + type: string + default: "GB" + vdisk-min: + type: integer + vdisk-max: + type: integer + vnf-type: + type: string + software-version: + type: version + software-version-state: + type: integer + software-file-name: + type: string + vnf-feature: + type: string + requirements: + - host: + capability: tosca.capabilities.Container + relationship: tosca.relationships.HostedOn + capabilities: + binding: + type: tosca.capabilities.network.Bindable \ No newline at end of file diff --git a/ui-ci-dev/src/main/resources/Files/vFW_VFC.yml b/ui-ci-dev/src/main/resources/Files/vFW_VFC.yml new file mode 100644 index 0000000000..d0814c43aa --- /dev/null +++ b/ui-ci-dev/src/main/resources/Files/vFW_VFC.yml @@ -0,0 +1,58 @@ +tosca_definitions_version: tosca_simple_yaml_1_0_0 + +node_types: + + org.openecomp.resource.vfc.vFW: + derived_from: tosca.nodes.Root + properties: + att-part-number: + type: string + vendor-name: + type: string + vendor-part-number: + type: string + vendor-model: + type: string + vendor-model-description: + type: string + vcpu-default: + type: integer + vcpu-min: + type: integer + vcpu-max: + type: integer + vmemory-default: + type: integer + vmemory-units: + type: string + default: "GB" + vmemory-min: + type: integer + vmemory-max: + type: integer + vdisk-default: + type: integer + vdisk-units: + type: string + default: "GB" + vdisk-min: + type: integer + vdisk-max: + type: integer + vnf-type: + type: string + software-version: + type: version + software-version-state: + type: integer + software-file-name: + type: string + vnf-feature: + type: string + requirements: + - host: + capability: tosca.capabilities.Container + relationship: tosca.relationships.HostedOn + capabilities: + binding: + type: tosca.capabilities.network.Bindable \ No newline at end of file diff --git a/ui-ci-dev/src/main/resources/Files/vRouter_vfc.yml b/ui-ci-dev/src/main/resources/Files/vRouter_vfc.yml new file mode 100644 index 0000000000..95ffe959de --- /dev/null +++ b/ui-ci-dev/src/main/resources/Files/vRouter_vfc.yml @@ -0,0 +1,78 @@ +tosca_definitions_version: tosca_simple_yaml_1_0_0 + + +node_types: + org.openecomp.resource.vfc.vRouter: + derived_from: tosca.nodes.Root + properties: + att-part-number: + type: string + vendor-name: + type: string + vendor-part-number: + type: string + vendor-model: + type: string + vendor-model-description: + type: string + vcpu-default: + type: integer + vcpu-min: + type: integer + vcpu-max: + type: integer + vmemory-default: + type: integer + vmemory-units: + type: string + default: "GB" + vmemory-min: + type: integer + vmemory-max: + type: integer + vdisk-default: + type: integer + vdisk-units: + type: string + default: "GB" + vdisk-min: + type: integer + vdisk-max: + type: integer + vnf-type: + type: string + software-version: + type: string + software-version-state: + type: integer + software-file-name: + type: string + vnf-feature: + type: string + management-v6-address: + type: string + nm-lan-v6-address: + type: string + nm-lan-v6-prefix-length: + type: string + management-v4-address: + type: string + nm-lan-v4-address: + type: string + nm-lan-v4-prefix-length: + type: string + routing-instance-name: + type: string + routing-instances: + type: map + entry_schema: + type: string + requirements: + - host: + capability: tosca.capabilities.Container + relationship: tosca.relationships.HostedOn + capabilities: + binding: + type: tosca.capabilities.network.Bindable + occurrences: [1,UNBOUNDED] + \ No newline at end of file diff --git a/ui-ci-dev/src/main/resources/Files/valid HEAT_ENV files.env b/ui-ci-dev/src/main/resources/Files/valid HEAT_ENV files.env new file mode 100644 index 0000000000..e576c0f67d --- /dev/null +++ b/ui-ci-dev/src/main/resources/Files/valid HEAT_ENV files.env @@ -0,0 +1,54 @@ +parameters: + flavor_scp_be_id: m1.small + flavor_scp_fe_id: m1.small + flavor_smp_id: m1.small + flavor_db_id: m1.small + image_scp_be_id: CCLINUX + image_scp_fe_id: CCLINUX + image_smp_id: CCLINUX + image_db_id: CCLINUX + + int_vscp_fe_cluster_net_id: int_vscp_fe_cluster_net + int_vscp_fe_cluster_cidr: 172.26.2.0/24 + int_vscp_cluster_net_id: int_vscp_cluster_net + int_vscp_cluster_cidr: 172.26.3.0/24 + int_vscp_db_network_net_id: int_vscp_db_network_net + int_vscp_db_network_cidr: 172.26.1.0/24 + + SIGNET_vrf_A1_direct_net_id: SIGNET_vrf_A1_direct_net + SIGNET_vrf_B1_direct_net_id: SIGNET_vrf_B1_direct_net + Cricket_OCS_protected_net_id: Cricket_OCS_protected_net +# OAM_direct_net_id: OAM_net +# OAM_direct_net_id: oam-direct-net + OAM_direct_net_id: Marks_OAM_direct_net + + be0_Cricket_OCS_protected_ips: 107.239.15.17 + be1_Cricket_OCS_protected_ips: 107.239.15.18 + be2_Cricket_OCS_protected_ips: 107.239.15.19 + be3_Cricket_OCS_protected_ips: 107.239.15.20 + be4_Cricket_OCS_protected_ips: 107.239.15.21 + be0_OAM_direct_ips: 10.250.10.33 + be1_OAM_direct_ips: 10.250.10.34 + be2_OAM_direct_ips: 10.250.10.35 + be3_OAM_direct_ips: 10.250.10.36 + be4_OAM_direct_ips: 10.250.10.37 + fe0_SIGNET_vrf_A1_direct_ips: 172.26.4.1 + fe0_OAM_direct_ips: 10.250.10.38 + fe1_SIGNET_vrf_B1_direct_ips: 172.26.4.5 + fe1_OAM_direct_ips: 10.250.10.39 + smp0_OAM_direct_ips: 10.250.10.40 + smp1_OAM_direct_ips: 10.250.10.41 + db0_OAM_direct_ips: 10.250.10.42 + db1_OAM_direct_ips: 10.250.10.43 + + vm_scp_be0_name: vSCP_BE0 + vm_scp_be1_name: vSCP_BE1 + vm_scp_be2_name: vSCP_BE2 + vm_scp_be3_name: vSCP_BE3 + vm_scp_be4_name: vSCP_BE4 + vm_scp_fe0_name: vSCP_FE0 + vm_scp_fe1_name: vSCP_FE1 + vm_smp0_name: vSMP0 + vm_smp1_name: vSMP1 + vm_db0_name: vDB0 + vm_db1_name: vDB1 diff --git a/ui-ci-dev/src/main/resources/Files/validHEATfiles.yaml b/ui-ci-dev/src/main/resources/Files/validHEATfiles.yaml new file mode 100644 index 0000000000..6835485ca1 --- /dev/null +++ b/ui-ci-dev/src/main/resources/Files/validHEATfiles.yaml @@ -0,0 +1,787 @@ +heat_template_version: 2013-05-23 +################################# +# +# Changes in v0.2: +# - Unique availability zone for each VM +# - LAN8 and SLAN networks removed according to latest Prod/Type I diagram +# - 2 DB VMs added +# - Images corrected +# - VM start-up order: SMP->DB->BE->FE (no error handling yet) +# - Provisioning scripts placeholders +# +################################# + +description: ASC Template + +parameters: +# availability_zone_smp0: +# type: string +# default: nova +# availability_zone_smp1: +# type: string +# default: nova +# availability_zone_fe0: +# type: string +# default: nova +# availability_zone_fe1: +# type: string +# default: nova +# availability_zone_db0: +# type: string +# default: nova +# availability_zone_db1: +# type: string +# default: nova +# availability_zone_be0: +# type: string +# default: nova +# availability_zone_be1: +# type: string +# default: nova +# availability_zone_be2: +# type: string +# default: nova +# availability_zone_be3: +# type: string +# default: nova +# availability_zone_be4: +# type: string +# default: nova + + vnf_name: + type: string + description: Unique name for this VNF instance + default: This_is_the_SCP_name + vnf_id: + type: string + description: Unique ID for this VNF instance + default: This_is_ths_SCP_id + + flavor_scp_be_id: + type: string + description: flavor type + default: a1.Small + flavor_scp_fe_id: + type: string + description: flavor type + default: a1.Small + flavor_smp_id: + type: string + description: flavor type + default: a1.Small + flavor_db_id: + type: string + description: flavor type + default: a1.Small + image_scp_be_id: + type: string + description: Image use to boot a server + default: asc_base_image_be + image_scp_fe_id: + type: string + description: Image use to boot a server + default: asc_base_image_fe + image_smp_id: + type: string + description: Image use to boot a server + default: asc_base_image_smp + image_db_id: + type: string + description: Image use to boot a server + default: asc_base_image_db + + int_vscp_fe_cluster_net_id: + type: string + description: LAN2 FE Cluster/KA + int_vscp_fe_cluster_cidr: + type: string + description: Private Network2 Address (CIDR notation) + int_vscp_cluster_net_id: + type: string + description: LAN3 Cluster + int_vscp_cluster_cidr: + type: string + description: Private Network3 Address (CIDR notation) + int_vscp_db_network_net_id: + type: string + description: LAN4 DB + int_vscp_db_network_cidr: + type: string + description: Private Network4 Address (CIDR notation) + SIGNET_vrf_A1_direct_net_id: + type: string + description: Network name for SIGTRAN_A + SIGNET_vrf_B1_direct_net_id: + type: string + description: Network name for SIGTRAN_B + Cricket_OCS_protected_net_id: + type: string + description: Network name for CRICKET_OCS + OAM_direct_net_id: + type: string + description: Network name for OAM + be0_Cricket_OCS_protected_ips: + type: string + label: be0 port 5 OAM ip address + description: be0 port 5 OAM ip address + be1_Cricket_OCS_protected_ips: + type: string + label: be1 port 5 OAM ip address + description: be1 port 5 OAM ip address + be2_Cricket_OCS_protected_ips: + type: string + label: be2 port 5 OAM ip address + description: be2 port 5 OAM ip address + be3_Cricket_OCS_protected_ips: + type: string + label: be3 port 5 OAM ip address + description: be3 port 5 OAM ip address + be4_Cricket_OCS_protected_ips: + type: string + label: be4 port 5 OAM ip address + description: be4 port 5 OAM ip address + be0_OAM_direct_ips: + type: string + label: be0 port 7 OAM ip address + description: be0 port 7 OAM ip address + be1_OAM_direct_ips: + type: string + label: be1 port 7 OAM ip address + description: be1 port 7 OAM ip address + be2_OAM_direct_ips: + type: string + label: be2 port 7 OAM ip address + description: be2 port 7 OAM ip address + be3_OAM_direct_ips: + type: string + label: be3 port 7 OAM ip address + description: be3 port 7 OAM ip address + be4_OAM_direct_ips: + type: string + label: be4 port 7 OAM ip address + description: be4 port 7 OAM ip address + fe0_SIGNET_vrf_A1_direct_ips: + type: string + label: fe0 port 0 SIGTRAN ip address + description: fe0 port 0 SIGTRAN ip address + fe0_OAM_direct_ips: + type: string + label: fe0 port 7 OAM ip address + description: fe0 port 7 OAM ip address + fe1_SIGNET_vrf_B1_direct_ips: + type: string + label: fe1 port 1 SIGTRAN ip address + description: fe1 port 1 SIGTRAN ip address + fe1_OAM_direct_ips: + type: string + label: fe1 port 7 OAM ip address + description: fe1 port 7 OAM ip address + smp0_OAM_direct_ips: + type: string + label: smp0 port 7 OAM ip address + description: smp0 port 7 OAM ip address + smp1_OAM_direct_ips: + type: string + label: smp1 port 7 OAM ip address + description: smp1 port 7 OAM ip address + db0_OAM_direct_ips: + type: string + label: db0 port 7 OAM ip address + description: smp0 port 7 OAM ip address + db1_OAM_direct_ips: + type: string + label: smp1 port 7 OAM ip address + description: db1 port 7 OAM ip address + vm_scp_be0_name: + type: string + default: vSCP_BE0 + description: name of VM + vm_scp_be1_name: + type: string + default: vSCP_BE1 + description: name of VM + vm_scp_be2_name: + type: string + default: vSCP_BE2 + description: name of VM + vm_scp_be3_name: + type: string + default: vSCP_BE3 + description: name of VM + vm_scp_be4_name: + type: string + default: vSCP_BE4 + description: name of VM + vm_scp_fe0_name: + type: string + default: vSCP_FE0 + description: name of VM + vm_scp_fe1_name: + type: string + default: vSCP_FE1 + description: name of VM + vm_smp0_name: + type: string + default: vSMP0 + description: name of VM + vm_smp1_name: + type: string + default: vSMP1 + description: name of VM + vm_db0_name: + type: string + default: vDB0 + description: name of VM + vm_db1_name: + type: string + default: vDB1 + description: name of VM + +resources: +# scp_be_wait_condition: +# type: OS::Heat::WaitCondition +# properties: +# handle: { get_resource: scp_be_wait_handle } +# count: 5 +# timeout: 300 +# scp_be_wait_handle: +# type: OS::Heat::WaitConditionHandle +# +# scp_fe_wait_condition: +# type: OS::Heat::WaitCondition +# properties: +# handle: { get_resource: scp_fe_wait_handle } +# count: 2 +# timeout: 300 +# scp_fe_wait_handle: +# type: OS::Heat::WaitConditionHandle +# +# smp_wait_condition: +# type: OS::Heat::WaitCondition +# properties: +# handle: { get_resource: smp_wait_handle } +# count: 2 +# timeout: 300 +# smp_wait_handle: +# type: OS::Heat::WaitConditionHandle +# +# db_wait_condition: +# type: OS::Heat::WaitCondition +# properties: +# handle: { get_resource: db_wait_handle } +# count: 2 +# timeout: 300 +# db_wait_handle: +# type: OS::Heat::WaitConditionHandle + + FE_Affinity: + type: OS::Nova::ServerGroup + properties: + policies: ["anti-affinity"] + BE_Affinity: + type: OS::Nova::ServerGroup + properties: + policies: ["anti-affinity"] + SMP_Affinity: + type: OS::Nova::ServerGroup + properties: + policies: ["anti-affinity"] + DB_Affinity: + type: OS::Nova::ServerGroup + properties: + policies: ["anti-affinity"] + + FE_Clustering_KA: + type: OS::Contrail::VirtualNetwork + properties: + name: { get_param: int_vscp_fe_cluster_net_id } + + FE_Clustering_subnet: + type: OS::Neutron::Subnet + properties: + network_id: { get_resource: FE_Clustering_KA } + cidr: { get_param: int_vscp_fe_cluster_cidr } + + Clustering_Network: + type: OS::Contrail::VirtualNetwork + properties: + name: { get_param: int_vscp_cluster_net_id } + + Clustering_Network_subnet: + type: OS::Neutron::Subnet + properties: + network_id: { get_resource: Clustering_Network } + cidr: { get_param: int_vscp_cluster_cidr } + + DB_Network: + type: OS::Contrail::VirtualNetwork + properties: + name: { get_param: int_vscp_db_network_net_id } + + DB_Network_subnet: + type: OS::Neutron::Subnet + properties: + network_id: { get_resource: DB_Network } + cidr: { get_param: int_vscp_db_network_cidr } + + server_scp_be0: + type: OS::Nova::Server +# depends on: db_wait_condition + properties: + name: { get_param: vm_scp_be0_name } + image: { get_param: image_scp_be_id } +# availability_zone: { get_param: availability_zone_be0 } + flavor: { get_param: flavor_scp_be_id } + scheduler_hints: { group: { get_resource: BE_Affinity } } + networks: + - port: { get_resource: be0_port_3 } + - port: { get_resource: be0_port_4 } + - port: { get_resource: be0_port_5 } + - port: { get_resource: be0_port_7 } + metadata: + vnf_id: { get_param: vnf_id } + user_data: + str_replace: + template: | + #!/bin/bash + #todo: provision $vm_name + wc_notify --data-binary '{"status": "SUCCESS"}' + params: + $vm_name: {get_param: vm_scp_be0_name} +# wc_notify: { get_attr: ['scp_be_wait_handle', 'curl_cli'] } + + be0_port_3: + type: OS::Neutron::Port + properties: + network_id: { get_resource: Clustering_Network } + + be0_port_4: + type: OS::Neutron::Port + properties: + network_id: { get_resource: DB_Network } + + be0_port_5: + type: OS::Neutron::Port + properties: + network: { get_param: Cricket_OCS_protected_net_id } + fixed_ips: [{"ip_address": {get_param: be0_Cricket_OCS_protected_ips}}] + + be0_port_7: + type: OS::Neutron::Port + properties: + network: { get_param: OAM_direct_net_id } + fixed_ips: [{"ip_address": {get_param: be0_OAM_direct_ips}}] + + server_scp_be1: + type: OS::Nova::Server +# depends on: db_wait_condition + properties: + name: { get_param: vm_scp_be1_name } + image: { get_param: image_scp_be_id } +# availability_zone: { get_param: availability_zone_be1 } + flavor: { get_param: flavor_scp_be_id } + scheduler_hints: { group: { get_resource: BE_Affinity } } + networks: + - port: { get_resource: be1_port_3 } + - port: { get_resource: be1_port_4 } + - port: { get_resource: be1_port_5 } + - port: { get_resource: be1_port_7 } + metadata: + vnf_id: { get_param: vnf_id } + user_data: + str_replace: + template: | + #!/bin/bash + #todo: provision $vm_name + wc_notify --data-binary '{"status": "SUCCESS"}' + params: + $vm_name: {get_param: vm_scp_be1_name} +# wc_notify: { get_attr: ['scp_be_wait_handle', 'curl_cli'] } + + be1_port_3: + type: OS::Neutron::Port + properties: + network_id: { get_resource: Clustering_Network } + + be1_port_4: + type: OS::Neutron::Port + properties: + network_id: { get_resource: DB_Network } + + be1_port_5: + type: OS::Neutron::Port + properties: + network: { get_param: Cricket_OCS_protected_net_id } + fixed_ips: [{"ip_address": {get_param: be1_Cricket_OCS_protected_ips}}] + + be1_port_7: + type: OS::Neutron::Port + properties: + network: { get_param: OAM_direct_net_id } + fixed_ips: [{"ip_address": {get_param: be1_OAM_direct_ips}}] + + server_scp_be2: + type: OS::Nova::Server +# depends on: db_wait_condition + properties: + name: { get_param: vm_scp_be2_name } + image: { get_param: image_scp_be_id } +# availability_zone: { get_param: availability_zone_be2 } + flavor: { get_param: flavor_scp_be_id } + scheduler_hints: { group: { get_resource: BE_Affinity } } + networks: + - port: { get_resource: be2_port_3 } + - port: { get_resource: be2_port_4 } + - port: { get_resource: be2_port_5 } + - port: { get_resource: be2_port_7 } + metadata: + vnf_id: { get_param: vnf_id } + user_data: + str_replace: + template: | + #!/bin/bash + #todo: provision $vm_name + wc_notify --data-binary '{"status": "SUCCESS"}' + params: + $vm_name: {get_param: vm_scp_be2_name} +# wc_notify: { get_attr: ['scp_be_wait_handle', 'curl_cli'] } + + be2_port_3: + type: OS::Neutron::Port + properties: + network_id: { get_resource: Clustering_Network } + + be2_port_4: + type: OS::Neutron::Port + properties: + network_id: { get_resource: DB_Network } + + be2_port_5: + type: OS::Neutron::Port + properties: + network: { get_param: Cricket_OCS_protected_net_id } + fixed_ips: [{"ip_address": {get_param: be2_Cricket_OCS_protected_ips}}] + + be2_port_7: + type: OS::Neutron::Port + properties: + network: { get_param: OAM_direct_net_id } + fixed_ips: [{"ip_address": {get_param: be2_OAM_direct_ips}}] + + server_scp_be3: + type: OS::Nova::Server +# depends on: db_wait_condition + properties: + name: { get_param: vm_scp_be3_name } + image: { get_param: image_scp_be_id } +# availability_zone: { get_param: availability_zone_be3 } + flavor: { get_param: flavor_scp_be_id } + scheduler_hints: { group: { get_resource: BE_Affinity } } + networks: + - port: { get_resource: be3_port_3 } + - port: { get_resource: be3_port_4 } + - port: { get_resource: be3_port_5 } + - port: { get_resource: be3_port_7 } + metadata: + vnf_id: { get_param: vnf_id } + user_data: + str_replace: + template: | + #!/bin/bash + #todo: provision $vm_name + wc_notify --data-binary '{"status": "SUCCESS"}' + params: + $vm_name: {get_param: vm_scp_be3_name} +# wc_notify: { get_attr: ['scp_be_wait_handle', 'curl_cli'] } + + be3_port_3: + type: OS::Neutron::Port + properties: + network_id: { get_resource: Clustering_Network } + + be3_port_4: + type: OS::Neutron::Port + properties: + network_id: { get_resource: DB_Network } + + be3_port_5: + type: OS::Neutron::Port + properties: + network: { get_param: Cricket_OCS_protected_net_id } + fixed_ips: [{"ip_address": {get_param: be3_Cricket_OCS_protected_ips}}] + + be3_port_7: + type: OS::Neutron::Port + properties: + network: { get_param: OAM_direct_net_id } + fixed_ips: [{"ip_address": {get_param: be3_OAM_direct_ips}}] + + server_scp_be4: + type: OS::Nova::Server +# depends on: db_wait_condition + properties: + name: { get_param: vm_scp_be4_name } + image: { get_param: image_scp_be_id } +# availability_zone: { get_param: availability_zone_be4 } + flavor: { get_param: flavor_scp_be_id } + scheduler_hints: { group: { get_resource: BE_Affinity } } + networks: + - port: { get_resource: be4_port_3 } + - port: { get_resource: be4_port_4 } + - port: { get_resource: be4_port_5 } + - port: { get_resource: be4_port_7 } + metadata: + vnf_id: { get_param: vnf_id } + user_data: + str_replace: + template: | + #!/bin/bash + #todo: provision $vm_name + wc_notify --data-binary '{"status": "SUCCESS"}' + params: + $vm_name: {get_param: vm_scp_be4_name} +# wc_notify: { get_attr: ['scp_be_wait_handle', 'curl_cli'] } + + be4_port_3: + type: OS::Neutron::Port + properties: + network_id: { get_resource: Clustering_Network } + + be4_port_4: + type: OS::Neutron::Port + properties: + network_id: { get_resource: DB_Network } + + be4_port_5: + type: OS::Neutron::Port + properties: + network: { get_param: Cricket_OCS_protected_net_id } + fixed_ips: [{"ip_address": {get_param: be4_Cricket_OCS_protected_ips}}] + + be4_port_7: + type: OS::Neutron::Port + properties: + network: { get_param: OAM_direct_net_id } + fixed_ips: [{"ip_address": {get_param: be4_OAM_direct_ips}}] + + server_scp_fe0: + type: OS::Nova::Server +# depends on: scp_be_wait_condition + properties: + name: { get_param: vm_scp_fe0_name } + image: { get_param: image_scp_fe_id } +# availability_zone: { get_param: availability_zone_fe0 } + flavor: { get_param: flavor_scp_fe_id } + scheduler_hints: { group: { get_resource: FE_Affinity } } + networks: + - port: { get_resource: fe0_port_0 } + - port: { get_resource: fe0_port_2 } + - port: { get_resource: fe0_port_3 } + - port: { get_resource: fe0_port_7 } + metadata: + vnf_id: { get_param: vnf_id } + user_data: + str_replace: + template: | + #!/bin/bash + #todo: provision $vm_name + wc_notify --data-binary '{"status": "SUCCESS"}' + params: + $vm_name: {get_param: vm_scp_fe0_name} +# wc_notify: { get_attr: ['scp_fe_wait_handle', 'curl_cli'] } + + fe0_port_0: + type: OS::Neutron::Port + properties: + network: { get_param: SIGNET_vrf_A1_direct_net_id } + fixed_ips: [{"ip_address": {get_param: fe0_SIGNET_vrf_A1_direct_ips}}] + + fe0_port_2: + type: OS::Neutron::Port + properties: + network_id: { get_resource: FE_Clustering_KA } + + fe0_port_3: + type: OS::Neutron::Port + properties: + network_id: { get_resource: Clustering_Network } + + fe0_port_7: + type: OS::Neutron::Port + properties: + network: { get_param: OAM_direct_net_id } + fixed_ips: [{"ip_address": {get_param: fe0_OAM_direct_ips}}] + + server_scp_fe1: + type: OS::Nova::Server +# depends on: scp_be_wait_condition + properties: + name: { get_param: vm_scp_fe1_name } + image: { get_param: image_scp_fe_id } +# availability_zone: { get_param: availability_zone_fe1 } + flavor: { get_param: flavor_scp_fe_id } + scheduler_hints: { group: { get_resource: FE_Affinity } } + networks: + - port: { get_resource: fe1_port_1 } + - port: { get_resource: fe1_port_2 } + - port: { get_resource: fe1_port_3 } + - port: { get_resource: fe1_port_7 } + metadata: + vnf_id: { get_param: vnf_id } + user_data: + str_replace: + template: | + #!/bin/bash + #todo: provision $vm_name + wc_notify --data-binary '{"status": "SUCCESS"}' + params: + $vm_name: {get_param: vm_scp_fe1_name} +# wc_notify: { get_attr: ['scp_fe_wait_handle', 'curl_cli'] } + + fe1_port_1: + type: OS::Neutron::Port + properties: + network: { get_param: SIGNET_vrf_B1_direct_net_id } + fixed_ips: [{"ip_address": {get_param: fe1_SIGNET_vrf_B1_direct_ips}}] + + fe1_port_2: + type: OS::Neutron::Port + properties: + network_id: { get_resource: FE_Clustering_KA } + + fe1_port_3: + type: OS::Neutron::Port + properties: + network_id: { get_resource: Clustering_Network } + + fe1_port_7: + type: OS::Neutron::Port + properties: + network: { get_param: OAM_direct_net_id } + fixed_ips: [{"ip_address": {get_param: fe1_OAM_direct_ips}}] + + server_smp0: + type: OS::Nova::Server + properties: + name: { get_param: vm_smp0_name } + image: { get_param: image_smp_id } +# availability_zone: { get_param: availability_zone_smp0 } + flavor: { get_param: flavor_smp_id } + scheduler_hints: { group: { get_resource: SMP_Affinity } } + networks: + - port: { get_resource: smp0_port_7 } + metadata: + vnf_id: { get_param: vnf_id } + user_data: + str_replace: + template: | + #!/bin/bash + #todo: provision $vm_name + wc_notify --data-binary '{"status": "SUCCESS"}' + params: + $vm_name: {get_param: vm_smp0_name} +# wc_notify: { get_attr: ['smp_wait_handle', 'curl_cli'] } + + smp0_port_7: + type: OS::Neutron::Port + properties: + network: { get_param: OAM_direct_net_id } + fixed_ips: [{"ip_address": {get_param: smp0_OAM_direct_ips}}] + + server_smp1: + type: OS::Nova::Server + properties: + name: { get_param: vm_smp1_name } + image: { get_param: image_smp_id } +# availability_zone: { get_param: availability_zone_smp1 } + flavor: { get_param: flavor_smp_id } + scheduler_hints: { group: { get_resource: SMP_Affinity } } + networks: + - port: { get_resource: smp1_port_7 } + metadata: + vnf_id: { get_param: vnf_id } + user_data: + str_replace: + template: | + #!/bin/bash + #todo: provision $vm_name + wc_notify --data-binary '{"status": "SUCCESS"}' + params: + $vm_name: {get_param: vm_smp1_name} +# wc_notify: { get_attr: ['smp_wait_handle', 'curl_cli'] } + + smp1_port_7: + type: OS::Neutron::Port + properties: + network: { get_param: OAM_direct_net_id } + fixed_ips: [{"ip_address": {get_param: smp1_OAM_direct_ips}}] + + server_db0: + type: OS::Nova::Server +# depends_on: smp_wait_condition + properties: + name: { get_param: vm_db0_name } + image: { get_param: image_db_id } +# availability_zone: { get_param: availability_zone_db0 } + flavor: { get_param: flavor_db_id } + scheduler_hints: { group: { get_resource: DB_Affinity } } + networks: + - port: { get_resource: db0_port_4 } + - port: { get_resource: db0_port_7 } + metadata: + vnf_id: { get_param: vnf_id } + user_data: + str_replace: + template: | + #!/bin/bash + #todo: provision $vm_name + wc_notify --data-binary '{"status": "SUCCESS"}' + params: + $vm_name: {get_param: vm_db0_name} +# wc_notify: { get_attr: ['db_wait_handle', 'curl_cli'] } + + db0_port_4: + type: OS::Neutron::Port + properties: + network_id: { get_resource: DB_Network } + + db0_port_7: + type: OS::Neutron::Port + properties: + network: { get_param: OAM_direct_net_id } + fixed_ips: [{"ip_address": {get_param: db0_OAM_direct_ips}}] + + server_db1: + type: OS::Nova::Server +# depends_on: smp_wait_condition + properties: + name: { get_param: vm_db1_name } + image: { get_param: image_db_id } +# availability_zone: { get_param: availability_zone_db1 } + flavor: { get_param: flavor_db_id } + scheduler_hints: { group: { get_resource: DB_Affinity } } + networks: + - port: { get_resource: db1_port_4 } + - port: { get_resource: db1_port_7 } + metadata: + vnf_id: { get_param: vnf_id } + user_data: + str_replace: + template: | + #!/bin/bash + #todo: provision $vm_name + wc_notify --data-binary '{"status": "SUCCESS"}' + params: + $vm_name: {get_param: vm_db1_name} +# wc_notify: { get_attr: ['db_wait_handle', 'curl_cli'] } + + db1_port_4: + type: OS::Neutron::Port + properties: + network_id: { get_resource: DB_Network } + + db1_port_7: + type: OS::Neutron::Port + properties: + network: { get_param: OAM_direct_net_id } + fixed_ips: [{"ip_address": {get_param: db1_OAM_direct_ips}}] \ No newline at end of file diff --git a/ui-ci-dev/src/main/resources/Files/valid_vf.csar b/ui-ci-dev/src/main/resources/Files/valid_vf.csar new file mode 100644 index 0000000000..01bf159071 Binary files /dev/null and b/ui-ci-dev/src/main/resources/Files/valid_vf.csar differ diff --git a/ui-ci-dev/src/main/resources/Files/vf_with_groups.csar b/ui-ci-dev/src/main/resources/Files/vf_with_groups.csar new file mode 100644 index 0000000000..61ea8cee20 Binary files /dev/null and b/ui-ci-dev/src/main/resources/Files/vf_with_groups.csar differ diff --git a/ui-ci-dev/src/main/resources/Files/yamlSample.yml b/ui-ci-dev/src/main/resources/Files/yamlSample.yml new file mode 100644 index 0000000000..10ccf71d51 --- /dev/null +++ b/ui-ci-dev/src/main/resources/Files/yamlSample.yml @@ -0,0 +1,5 @@ +tosca_definitions_version: tosca_simple_yaml_1_0_0 + +node_types: + org.openecomp.resource.cp.CP: + derived_from: org.openecomp.resource.cp.root \ No newline at end of file diff --git a/ui-ci-dev/src/main/resources/Files/yamlSample2.yml b/ui-ci-dev/src/main/resources/Files/yamlSample2.yml new file mode 100644 index 0000000000..10ccf71d51 --- /dev/null +++ b/ui-ci-dev/src/main/resources/Files/yamlSample2.yml @@ -0,0 +1,5 @@ +tosca_definitions_version: tosca_simple_yaml_1_0_0 + +node_types: + org.openecomp.resource.cp.CP: + derived_from: org.openecomp.resource.cp.root \ No newline at end of file -- cgit 1.2.3-korg