diff options
Diffstat (limited to 'ui-ci-dev/src/main/resources')
47 files changed, 4588 insertions, 0 deletions
diff --git a/ui-ci-dev/src/main/resources/Files/CP.yml b/ui-ci-dev/src/main/resources/Files/CP.yml new file mode 100644 index 0000000000..48b592265f --- /dev/null +++ b/ui-ci-dev/src/main/resources/Files/CP.yml @@ -0,0 +1,65 @@ +tosca_definitions_version: tosca_simple_yaml_1_0_0 + +node_types: + org.openecomp.resource.cp.CP: + derived_from: tosca.nodes.Root + properties: + att-ucpe-part-number: + type: string + vendor-name: + type: string + required: true + vendor-model: + type: string + required: true + total-vcpu: + type: integer + description: number of vCPUs + total-memory: + type: integer + description: GB + total-disk: + type: integer + description: GB + base-system-image-file-name: + type: string + linux-host-vendor: + type: string + linux-host-os-version: + type: version + base-system-software: + type: string + jdm-vcpu: + type: integer + jdm-memory: + type: integer + description: GB + jdm-disk: + type: integer + description: GB + jdm-version: + type: string + jcp-vcpu: + type: integer + jcp-memory: + type: integer + description: GB + jcp-disk: + type: integer + description: GB + jcp-version: + type: version + capabilities: + vnf_hosting: + type: tosca.capabilities.Container + description: Provides hosting capability for VNFs + WAN_connectivity: + type: tosca.capabilities.network.Bindable + valid_source_types: [org.openecomp.cp.Wan] + description: external WAN1 n/w interface + occurrences: [1,2] + LAN_connectivity: + type: tosca.capabilities.network.Bindable + valid_source_types: [org.openecomp.cp.Lan] + description: external LAN n/w interface + occurrences: [1,8]
\ No newline at end of file diff --git a/ui-ci-dev/src/main/resources/Files/CP_LAN - Copy.yml b/ui-ci-dev/src/main/resources/Files/CP_LAN - Copy.yml new file mode 100644 index 0000000000..224d61f2c9 --- /dev/null +++ b/ui-ci-dev/src/main/resources/Files/CP_LAN - Copy.yml @@ -0,0 +1,13 @@ +tosca_definitions_version: tosca_simple_yaml_1_0_0 + +node_types: + org.openecomp.resource.vfc.uCPE: + derived_from: tosca.nodes.Root + properties: + type: + type: string + required: false + requirements: + - virtualLink: + capability: tosca.capabilities.network.Linkable + relationship: tosca.relationships.network.LinksTo diff --git a/ui-ci-dev/src/main/resources/Files/CP_LAN.yml b/ui-ci-dev/src/main/resources/Files/CP_LAN.yml new file mode 100644 index 0000000000..a96084ba34 --- /dev/null +++ b/ui-ci-dev/src/main/resources/Files/CP_LAN.yml @@ -0,0 +1,19 @@ +tosca_definitions_version: tosca_simple_yaml_1_0_0 + +node_types: + org.openecomp.resource.cp.LAN: + derived_from: org.openecomp.resource.cp.CP + properties: + type: + type: string + required: false + requirements: + - virtualLink_in: + capability: tosca.capabilities.network.Linkable + relationship: tosca.relationships.network.LinksTo + - virtualLink_out: + capability: tosca.capabilities.network.Linkable + relationship: tosca.relationships.network.LinksTo + - virtualbinding: + capability: tosca.capabilities.network.Bindable + relationship: tosca.relationships.network.BindsTo
\ No newline at end of file diff --git a/ui-ci-dev/src/main/resources/Files/CP_WAN.yml b/ui-ci-dev/src/main/resources/Files/CP_WAN.yml new file mode 100644 index 0000000000..1bce457d43 --- /dev/null +++ b/ui-ci-dev/src/main/resources/Files/CP_WAN.yml @@ -0,0 +1,19 @@ +tosca_definitions_version: tosca_simple_yaml_1_0_0 + +node_types: + org.openecomp.resource.cp.WAN: + derived_from: org.openecomp.resource.cp.CP + properties: + type: + type: string + required: false + requirements: + - virtualLink_in: + capability: tosca.capabilities.network.Linkable + relationship: tosca.relationships.network.LinksTo + - virtualLink_out: + capability: tosca.capabilities.network.Linkable + relationship: tosca.relationships.network.LinksTo + - virtualbinding: + capability: tosca.capabilities.network.Bindable + relationship: tosca.relationships.network.BindsTo
\ No newline at end of file diff --git a/ui-ci-dev/src/main/resources/Files/Heat-File 1.yaml b/ui-ci-dev/src/main/resources/Files/Heat-File 1.yaml new file mode 100644 index 0000000000..d332078d35 --- /dev/null +++ b/ui-ci-dev/src/main/resources/Files/Heat-File 1.yaml @@ -0,0 +1,791 @@ +heat_template_version: 2013-05-23 +################################# +# +# Changes in v0.2: +# - Unique availability zone for each VM +# - LAN8 and SLAN networks removed according to latest Prod/Type I diagram +# - 2 DB VMs added +# - Images corrected +# - VM start-up order: SMP->DB->BE->FE (no error handling yet) +# - Provisioning scripts placeholders +# +################################# + +description: ASC Template + +parameters: +# availability_zone_smp0: +# type: string +# default: nova +# availability_zone_smp1: +# type: string +# default: nova +# availability_zone_fe0: +# type: string +# default: nova +# availability_zone_fe1: +# type: string +# default: nova +# availability_zone_db0: +# type: string +# default: nova +# availability_zone_db1: +# type: string +# default: nova +# availability_zone_be0: +# type: string +# default: nova +# availability_zone_be1: +# type: string +# default: nova +# availability_zone_be2: +# type: string +# default: nova +# availability_zone_be3: +# type: string +# default: nova +# availability_zone_be4: +# type: string +# default: nova + + vnf_name: + type: string + description: Unique name for this VNF instance + default: This_is_the_SCP_name + vnf_id: + type: string + description: Unique ID for this VNF instance + default: This_is_ths_SCP_id + + flavor_scp_be_id: + type: string + description: flavor type + default: a1.Small + flavor_scp_fe_id: + type: string + description: flavor type + default: a1.Small + flavor_smp_id: + type: string + description: flavor type + default: a1.Small + flavor_db_id: + type: string + description: flavor type + default: a1.Small + image_scp_be_id: + type: string + description: Image use to boot a server + default: asc_base_image_be + image_scp_fe_id: + type: string + description: Image use to boot a server + default: asc_base_image_fe + image_smp_id: + type: string + description: Image use to boot a server + default: asc_base_image_smp + image_db_id: + type: string + description: Image use to boot a server + default: asc_base_image_db + + int_vscp_fe_cluster_net_id: + type: string + description: LAN2 FE Cluster/KA + int_vscp_fe_cluster_cidr: + type: string + description: Private Network2 Address (CIDR notation) + int_vscp_cluster_net_id: + type: string + description: LAN3 Cluster + int_vscp_cluster_cidr: + type: string + description: Private Network3 Address (CIDR notation) + int_vscp_db_network_net_id: + type: string + description: LAN4 DB + int_vscp_db_network_cidr: + type: string + description: Private Network4 Address (CIDR notation) + SIGNET_vrf_A1_direct_net_id: + type: string + description: Network name for SIGTRAN_A + SIGNET_vrf_B1_direct_net_id: + type: string + description: Network name for SIGTRAN_B + Cricket_OCS_protected_net_id: + type: string + description: Network name for CRICKET_OCS + OAM_direct_net_id: + type: string + description: Network name for OAM + be0_Cricket_OCS_protected_ips: + type: string + label: be0 port 5 OAM ip address + description: be0 port 5 OAM ip address + be1_Cricket_OCS_protected_ips: + type: string + label: be1 port 5 OAM ip address + description: be1 port 5 OAM ip address + be2_Cricket_OCS_protected_ips: + type: string + label: be2 port 5 OAM ip address + description: be2 port 5 OAM ip address + be3_Cricket_OCS_protected_ips: + type: string + label: be3 port 5 OAM ip address + description: be3 port 5 OAM ip address + be4_Cricket_OCS_protected_ips: + type: string + label: be4 port 5 OAM ip address + description: be4 port 5 OAM ip address + be0_OAM_direct_ips: + type: string + label: be0 port 7 OAM ip address + description: be0 port 7 OAM ip address + be1_OAM_direct_ips: + type: string + label: be1 port 7 OAM ip address + description: be1 port 7 OAM ip address + be2_OAM_direct_ips: + type: string + label: be2 port 7 OAM ip address + description: be2 port 7 OAM ip address + be3_OAM_direct_ips: + type: string + label: be3 port 7 OAM ip address + description: be3 port 7 OAM ip address + be4_OAM_direct_ips: + type: string + label: be4 port 7 OAM ip address + description: be4 port 7 OAM ip address + fe0_SIGNET_vrf_A1_direct_ips: + type: string + label: fe0 port 0 SIGTRAN ip address + description: fe0 port 0 SIGTRAN ip address + fe0_OAM_direct_ips: + type: string + label: fe0 port 7 OAM ip address + description: fe0 port 7 OAM ip address + fe1_SIGNET_vrf_B1_direct_ips: + type: string + label: fe1 port 1 SIGTRAN ip address + description: fe1 port 1 SIGTRAN ip address + fe1_OAM_direct_ips: + type: string + label: fe1 port 7 OAM ip address + description: fe1 port 7 OAM ip address + smp0_OAM_direct_ips: + type: string + label: smp0 port 7 OAM ip address + description: smp0 port 7 OAM ip address + smp1_OAM_direct_ips: + type: string + label: smp1 port 7 OAM ip address + description: smp1 port 7 OAM ip address + db0_OAM_direct_ips: + type: string + label: db0 port 7 OAM ip address + description: smp0 port 7 OAM ip address + db1_OAM_direct_ips: + type: string + label: smp1 port 7 OAM ip address + description: db1 port 7 OAM ip address + vm_scp_be0_name: + type: string + default: vSCP_BE0 + description: name of VM + vm_scp_be1_name: + type: string + default: vSCP_BE1 + description: name of VM + vm_scp_be2_name: + type: string + default: vSCP_BE2 + description: name of VM + vm_scp_be3_name: + type: string + default: vSCP_BE3 + description: name of VM + vm_scp_be4_name: + type: string + default: vSCP_BE4 + description: name of VM + vm_scp_fe0_name: + type: string + default: vSCP_FE0 + description: name of VM + vm_scp_fe1_name: + type: string + default: vSCP_FE1 + description: name of VM + vm_smp0_name: + type: string + default: vSMP0 + description: name of VM + vm_smp1_name: + type: string + default: vSMP1 + description: name of VM + vm_db0_name: + type: string + default: vDB0 + description: name of VM + vm_db1_name: + type: string + default: vDB1 + description: name of VM + +resources: +# scp_be_wait_condition: +# type: OS::Heat::WaitCondition +# properties: +# handle: { get_resource: scp_be_wait_handle } +# count: 5 +# timeout: 300 +# scp_be_wait_handle: +# type: OS::Heat::WaitConditionHandle +# +# scp_fe_wait_condition: +# type: OS::Heat::WaitCondition +# properties: +# handle: { get_resource: scp_fe_wait_handle } +# count: 2 +# timeout: 300 +# scp_fe_wait_handle: +# type: OS::Heat::WaitConditionHandle +# +# smp_wait_condition: +# type: OS::Heat::WaitCondition +# properties: +# handle: { get_resource: smp_wait_handle } +# count: 2 +# timeout: 300 +# smp_wait_handle: +# type: OS::Heat::WaitConditionHandle +# +# db_wait_condition: +# type: OS::Heat::WaitCondition +# properties: +# handle: { get_resource: db_wait_handle } +# count: 2 +# timeout: 300 +# db_wait_handle: +# type: OS::Heat::WaitConditionHandle + + FE_Affinity: + type: OS::Nova::ServerGroup + properties: + policies: ["anti-affinity"] + BE_Affinity: + type: OS::Nova::ServerGroup + properties: + policies: ["anti-affinity"] + SMP_Affinity: + type: OS::Nova::ServerGroup + properties: + policies: ["anti-affinity"] + DB_Affinity: + type: OS::Nova::ServerGroup + properties: + policies: ["anti-affinity"] + + FE_Clustering_KA: + type: OS::Contrail::VirtualNetwork + properties: + name: { get_param: int_vscp_fe_cluster_net_id } + + FE_Clustering_subnet: + type: OS::Neutron::Subnet + properties: + network_id: { get_resource: FE_Clustering_KA } + cidr: { get_param: int_vscp_fe_cluster_cidr } + + Clustering_Network: + type: OS::Contrail::VirtualNetwork + properties: + name: { get_param: int_vscp_cluster_net_id } + + Clustering_Network_subnet: + type: OS::Neutron::Subnet + properties: + network_id: { get_resource: Clustering_Network } + cidr: { get_param: int_vscp_cluster_cidr } + + DB_Network: + type: OS::Contrail::VirtualNetwork + properties: + name: { get_param: int_vscp_db_network_net_id } + + DB_Network_subnet: + type: OS::Neutron::Subnet + properties: + network_id: { get_resource: DB_Network } + cidr: { get_param: int_vscp_db_network_cidr } + + server_scp_be0: + type: OS::Nova::Server +# depends on: db_wait_condition + properties: + name: { get_param: vm_scp_be0_name } + image: { get_param: image_scp_be_id } +# availability_zone: { get_param: availability_zone_be0 } + flavor: { get_param: flavor_scp_be_id } + scheduler_hints: { group: { get_resource: BE_Affinity } } + networks: + - port: { get_resource: be0_port_3 } + - port: { get_resource: be0_port_4 } + - port: { get_resource: be0_port_5 } + - port: { get_resource: be0_port_7 } + metadata: + vnf_id: { get_param: vnf_id } + user_data: + str_replace: + template: | + #!/bin/bash + #todo: provision $vm_name + wc_notify --data-binary '{"status": "SUCCESS"}' + params: + $vm_name: {get_param: vm_scp_be0_name} +# wc_notify: { get_attr: ['scp_be_wait_handle', 'curl_cli'] } + be0_port_2: + type: OS::Neutron::Port + properties: + network_id: { get_resource: Clustering_Network } + + be0_port_3: + type: OS::Neutron::Port + properties: + network_id: { get_resource: Clustering_Network } + + be0_port_4: + type: OS::Neutron::Port + properties: + network_id: { get_resource: DB_Network } + + be0_port_5: + type: OS::Neutron::Port + properties: + network: { get_param: Cricket_OCS_protected_net_id } + fixed_ips: [{"ip_address": {get_param: be0_Cricket_OCS_protected_ips}}] + + be0_port_7: + type: OS::Neutron::Port + properties: + network: { get_param: OAM_direct_net_id } + fixed_ips: [{"ip_address": {get_param: be0_OAM_direct_ips}}] + + server_scp_be1: + type: OS::Nova::Server +# depends on: db_wait_condition + properties: + name: { get_param: vm_scp_be1_name } + image: { get_param: image_scp_be_id } +# availability_zone: { get_param: availability_zone_be1 } + flavor: { get_param: flavor_scp_be_id } + scheduler_hints: { group: { get_resource: BE_Affinity } } + networks: + - port: { get_resource: be1_port_3 } + - port: { get_resource: be1_port_4 } + - port: { get_resource: be1_port_5 } + - port: { get_resource: be1_port_7 } + metadata: + vnf_id: { get_param: vnf_id } + user_data: + str_replace: + template: | + #!/bin/bash + #todo: provision $vm_name + wc_notify --data-binary '{"status": "SUCCESS"}' + params: + $vm_name: {get_param: vm_scp_be1_name} +# wc_notify: { get_attr: ['scp_be_wait_handle', 'curl_cli'] } + + be1_port_3: + type: OS::Neutron::Port + properties: + network_id: { get_resource: Clustering_Network } + + be1_port_4: + type: OS::Neutron::Port + properties: + network_id: { get_resource: DB_Network } + + be1_port_5: + type: OS::Neutron::Port + properties: + network: { get_param: Cricket_OCS_protected_net_id } + fixed_ips: [{"ip_address": {get_param: be1_Cricket_OCS_protected_ips}}] + + be1_port_7: + type: OS::Neutron::Port + properties: + network: { get_param: OAM_direct_net_id } + fixed_ips: [{"ip_address": {get_param: be1_OAM_direct_ips}}] + + server_scp_be2: + type: OS::Nova::Server +# depends on: db_wait_condition + properties: + name: { get_param: vm_scp_be2_name } + image: { get_param: image_scp_be_id } +# availability_zone: { get_param: availability_zone_be2 } + flavor: { get_param: flavor_scp_be_id } + scheduler_hints: { group: { get_resource: BE_Affinity } } + networks: + - port: { get_resource: be2_port_3 } + - port: { get_resource: be2_port_4 } + - port: { get_resource: be2_port_5 } + - port: { get_resource: be2_port_7 } + metadata: + vnf_id: { get_param: vnf_id } + user_data: + str_replace: + template: | + #!/bin/bash + #todo: provision $vm_name + wc_notify --data-binary '{"status": "SUCCESS"}' + params: + $vm_name: {get_param: vm_scp_be2_name} +# wc_notify: { get_attr: ['scp_be_wait_handle', 'curl_cli'] } + + be2_port_3: + type: OS::Neutron::Port + properties: + network_id: { get_resource: Clustering_Network } + + be2_port_4: + type: OS::Neutron::Port + properties: + network_id: { get_resource: DB_Network } + + be2_port_5: + type: OS::Neutron::Port + properties: + network: { get_param: Cricket_OCS_protected_net_id } + fixed_ips: [{"ip_address": {get_param: be2_Cricket_OCS_protected_ips}}] + + be2_port_7: + type: OS::Neutron::Port + properties: + network: { get_param: OAM_direct_net_id } + fixed_ips: [{"ip_address": {get_param: be2_OAM_direct_ips}}] + + server_scp_be3: + type: OS::Nova::Server +# depends on: db_wait_condition + properties: + name: { get_param: vm_scp_be3_name } + image: { get_param: image_scp_be_id } +# availability_zone: { get_param: availability_zone_be3 } + flavor: { get_param: flavor_scp_be_id } + scheduler_hints: { group: { get_resource: BE_Affinity } } + networks: + - port: { get_resource: be3_port_3 } + - port: { get_resource: be3_port_4 } + - port: { get_resource: be3_port_5 } + - port: { get_resource: be3_port_7 } + metadata: + vnf_id: { get_param: vnf_id } + user_data: + str_replace: + template: | + #!/bin/bash + #todo: provision $vm_name + wc_notify --data-binary '{"status": "SUCCESS"}' + params: + $vm_name: {get_param: vm_scp_be3_name} +# wc_notify: { get_attr: ['scp_be_wait_handle', 'curl_cli'] } + + be3_port_3: + type: OS::Neutron::Port + properties: + network_id: { get_resource: Clustering_Network } + + be3_port_4: + type: OS::Neutron::Port + properties: + network_id: { get_resource: DB_Network } + + be3_port_5: + type: OS::Neutron::Port + properties: + network: { get_param: Cricket_OCS_protected_net_id } + fixed_ips: [{"ip_address": {get_param: be3_Cricket_OCS_protected_ips}}] + + be3_port_7: + type: OS::Neutron::Port + properties: + network: { get_param: OAM_direct_net_id } + fixed_ips: [{"ip_address": {get_param: be3_OAM_direct_ips}}] + + server_scp_be4: + type: OS::Nova::Server +# depends on: db_wait_condition + properties: + name: { get_param: vm_scp_be4_name } + image: { get_param: image_scp_be_id } +# availability_zone: { get_param: availability_zone_be4 } + flavor: { get_param: flavor_scp_be_id } + scheduler_hints: { group: { get_resource: BE_Affinity } } + networks: + - port: { get_resource: be4_port_3 } + - port: { get_resource: be4_port_4 } + - port: { get_resource: be4_port_5 } + - port: { get_resource: be4_port_7 } + metadata: + vnf_id: { get_param: vnf_id } + user_data: + str_replace: + template: | + #!/bin/bash + #todo: provision $vm_name + wc_notify --data-binary '{"status": "SUCCESS"}' + params: + $vm_name: {get_param: vm_scp_be4_name} +# wc_notify: { get_attr: ['scp_be_wait_handle', 'curl_cli'] } + + be4_port_3: + type: OS::Neutron::Port + properties: + network_id: { get_resource: Clustering_Network } + + be4_port_4: + type: OS::Neutron::Port + properties: + network_id: { get_resource: DB_Network } + + be4_port_5: + type: OS::Neutron::Port + properties: + network: { get_param: Cricket_OCS_protected_net_id } + fixed_ips: [{"ip_address": {get_param: be4_Cricket_OCS_protected_ips}}] + + be4_port_7: + type: OS::Neutron::Port + properties: + network: { get_param: OAM_direct_net_id } + fixed_ips: [{"ip_address": {get_param: be4_OAM_direct_ips}}] + + server_scp_fe0: + type: OS::Nova::Server +# depends on: scp_be_wait_condition + properties: + name: { get_param: vm_scp_fe0_name } + image: { get_param: image_scp_fe_id } +# availability_zone: { get_param: availability_zone_fe0 } + flavor: { get_param: flavor_scp_fe_id } + scheduler_hints: { group: { get_resource: FE_Affinity } } + networks: + - port: { get_resource: fe0_port_0 } + - port: { get_resource: fe0_port_2 } + - port: { get_resource: fe0_port_3 } + - port: { get_resource: fe0_port_7 } + metadata: + vnf_id: { get_param: vnf_id } + user_data: + str_replace: + template: | + #!/bin/bash + #todo: provision $vm_name + wc_notify --data-binary '{"status": "SUCCESS"}' + params: + $vm_name: {get_param: vm_scp_fe0_name} +# wc_notify: { get_attr: ['scp_fe_wait_handle', 'curl_cli'] } + + fe0_port_0: + type: OS::Neutron::Port + properties: + network: { get_param: SIGNET_vrf_A1_direct_net_id } + fixed_ips: [{"ip_address": {get_param: fe0_SIGNET_vrf_A1_direct_ips}}] + + fe0_port_2: + type: OS::Neutron::Port + properties: + network_id: { get_resource: FE_Clustering_KA } + + fe0_port_3: + type: OS::Neutron::Port + properties: + network_id: { get_resource: Clustering_Network } + + fe0_port_7: + type: OS::Neutron::Port + properties: + network: { get_param: OAM_direct_net_id } + fixed_ips: [{"ip_address": {get_param: fe0_OAM_direct_ips}}] + + server_scp_fe1: + type: OS::Nova::Server +# depends on: scp_be_wait_condition + properties: + name: { get_param: vm_scp_fe1_name } + image: { get_param: image_scp_fe_id } +# availability_zone: { get_param: availability_zone_fe1 } + flavor: { get_param: flavor_scp_fe_id } + scheduler_hints: { group: { get_resource: FE_Affinity } } + networks: + - port: { get_resource: fe1_port_1 } + - port: { get_resource: fe1_port_2 } + - port: { get_resource: fe1_port_3 } + - port: { get_resource: fe1_port_7 } + metadata: + vnf_id: { get_param: vnf_id } + user_data: + str_replace: + template: | + #!/bin/bash + #todo: provision $vm_name + wc_notify --data-binary '{"status": "SUCCESS"}' + params: + $vm_name: {get_param: vm_scp_fe1_name} +# wc_notify: { get_attr: ['scp_fe_wait_handle', 'curl_cli'] } + + fe1_port_1: + type: OS::Neutron::Port + properties: + network: { get_param: SIGNET_vrf_B1_direct_net_id } + fixed_ips: [{"ip_address": {get_param: fe1_SIGNET_vrf_B1_direct_ips}}] + + fe1_port_2: + type: OS::Neutron::Port + properties: + network_id: { get_resource: FE_Clustering_KA } + + fe1_port_3: + type: OS::Neutron::Port + properties: + network_id: { get_resource: Clustering_Network } + + fe1_port_7: + type: OS::Neutron::Port + properties: + network: { get_param: OAM_direct_net_id } + fixed_ips: [{"ip_address": {get_param: fe1_OAM_direct_ips}}] + + server_smp0: + type: OS::Nova::Server + properties: + name: { get_param: vm_smp0_name } + image: { get_param: image_smp_id } +# availability_zone: { get_param: availability_zone_smp0 } + flavor: { get_param: flavor_smp_id } + scheduler_hints: { group: { get_resource: SMP_Affinity } } + networks: + - port: { get_resource: smp0_port_7 } + metadata: + vnf_id: { get_param: vnf_id } + user_data: + str_replace: + template: | + #!/bin/bash + #todo: provision $vm_name + wc_notify --data-binary '{"status": "SUCCESS"}' + params: + $vm_name: {get_param: vm_smp0_name} +# wc_notify: { get_attr: ['smp_wait_handle', 'curl_cli'] } + + smp0_port_7: + type: OS::Neutron::Port + properties: + network: { get_param: OAM_direct_net_id } + fixed_ips: [{"ip_address": {get_param: smp0_OAM_direct_ips}}] + + server_smp1: + type: OS::Nova::Server + properties: + name: { get_param: vm_smp1_name } + image: { get_param: image_smp_id } +# availability_zone: { get_param: availability_zone_smp1 } + flavor: { get_param: flavor_smp_id } + scheduler_hints: { group: { get_resource: SMP_Affinity } } + networks: + - port: { get_resource: smp1_port_7 } + metadata: + vnf_id: { get_param: vnf_id } + user_data: + str_replace: + template: | + #!/bin/bash + #todo: provision $vm_name + wc_notify --data-binary '{"status": "SUCCESS"}' + params: + $vm_name: {get_param: vm_smp1_name} +# wc_notify: { get_attr: ['smp_wait_handle', 'curl_cli'] } + + smp1_port_7: + type: OS::Neutron::Port + properties: + network: { get_param: OAM_direct_net_id } + fixed_ips: [{"ip_address": {get_param: smp1_OAM_direct_ips}}] + + server_db0: + type: OS::Nova::Server +# depends_on: smp_wait_condition + properties: + name: { get_param: vm_db0_name } + image: { get_param: image_db_id } +# availability_zone: { get_param: availability_zone_db0 } + flavor: { get_param: flavor_db_id } + scheduler_hints: { group: { get_resource: DB_Affinity } } + networks: + - port: { get_resource: db0_port_4 } + - port: { get_resource: db0_port_7 } + metadata: + vnf_id: { get_param: vnf_id } + user_data: + str_replace: + template: | + #!/bin/bash + #todo: provision $vm_name + wc_notify --data-binary '{"status": "SUCCESS"}' + params: + $vm_name: {get_param: vm_db0_name} +# wc_notify: { get_attr: ['db_wait_handle', 'curl_cli'] } + + db0_port_4: + type: OS::Neutron::Port + properties: + network_id: { get_resource: DB_Network } + + db0_port_7: + type: OS::Neutron::Port + properties: + network: { get_param: OAM_direct_net_id } + fixed_ips: [{"ip_address": {get_param: db0_OAM_direct_ips}}] + + server_db1: + type: OS::Nova::Server +# depends_on: smp_wait_condition + properties: + name: { get_param: vm_db1_name } + image: { get_param: image_db_id } +# availability_zone: { get_param: availability_zone_db1 } + flavor: { get_param: flavor_db_id } + scheduler_hints: { group: { get_resource: DB_Affinity } } + networks: + - port: { get_resource: db1_port_4 } + - port: { get_resource: db1_port_7 } + metadata: + vnf_id: { get_param: vnf_id } + user_data: + str_replace: + template: | + #!/bin/bash + #todo: provision $vm_name + wc_notify --data-binary '{"status": "SUCCESS"}' + params: + $vm_name: {get_param: vm_db1_name} +# wc_notify: { get_attr: ['db_wait_handle', 'curl_cli'] } + + db1_port_4: + type: OS::Neutron::Port + properties: + network_id: { get_resource: DB_Network } + + db1_port_7: + type: OS::Neutron::Port + properties: + network: { get_param: OAM_direct_net_id } + fixed_ips: [{"ip_address": {get_param: db1_OAM_direct_ips}}]
\ No newline at end of file diff --git a/ui-ci-dev/src/main/resources/Files/Heat-File 2.yaml b/ui-ci-dev/src/main/resources/Files/Heat-File 2.yaml new file mode 100644 index 0000000000..d332078d35 --- /dev/null +++ b/ui-ci-dev/src/main/resources/Files/Heat-File 2.yaml @@ -0,0 +1,791 @@ +heat_template_version: 2013-05-23 +################################# +# +# Changes in v0.2: +# - Unique availability zone for each VM +# - LAN8 and SLAN networks removed according to latest Prod/Type I diagram +# - 2 DB VMs added +# - Images corrected +# - VM start-up order: SMP->DB->BE->FE (no error handling yet) +# - Provisioning scripts placeholders +# +################################# + +description: ASC Template + +parameters: +# availability_zone_smp0: +# type: string +# default: nova +# availability_zone_smp1: +# type: string +# default: nova +# availability_zone_fe0: +# type: string +# default: nova +# availability_zone_fe1: +# type: string +# default: nova +# availability_zone_db0: +# type: string +# default: nova +# availability_zone_db1: +# type: string +# default: nova +# availability_zone_be0: +# type: string +# default: nova +# availability_zone_be1: +# type: string +# default: nova +# availability_zone_be2: +# type: string +# default: nova +# availability_zone_be3: +# type: string +# default: nova +# availability_zone_be4: +# type: string +# default: nova + + vnf_name: + type: string + description: Unique name for this VNF instance + default: This_is_the_SCP_name + vnf_id: + type: string + description: Unique ID for this VNF instance + default: This_is_ths_SCP_id + + flavor_scp_be_id: + type: string + description: flavor type + default: a1.Small + flavor_scp_fe_id: + type: string + description: flavor type + default: a1.Small + flavor_smp_id: + type: string + description: flavor type + default: a1.Small + flavor_db_id: + type: string + description: flavor type + default: a1.Small + image_scp_be_id: + type: string + description: Image use to boot a server + default: asc_base_image_be + image_scp_fe_id: + type: string + description: Image use to boot a server + default: asc_base_image_fe + image_smp_id: + type: string + description: Image use to boot a server + default: asc_base_image_smp + image_db_id: + type: string + description: Image use to boot a server + default: asc_base_image_db + + int_vscp_fe_cluster_net_id: + type: string + description: LAN2 FE Cluster/KA + int_vscp_fe_cluster_cidr: + type: string + description: Private Network2 Address (CIDR notation) + int_vscp_cluster_net_id: + type: string + description: LAN3 Cluster + int_vscp_cluster_cidr: + type: string + description: Private Network3 Address (CIDR notation) + int_vscp_db_network_net_id: + type: string + description: LAN4 DB + int_vscp_db_network_cidr: + type: string + description: Private Network4 Address (CIDR notation) + SIGNET_vrf_A1_direct_net_id: + type: string + description: Network name for SIGTRAN_A + SIGNET_vrf_B1_direct_net_id: + type: string + description: Network name for SIGTRAN_B + Cricket_OCS_protected_net_id: + type: string + description: Network name for CRICKET_OCS + OAM_direct_net_id: + type: string + description: Network name for OAM + be0_Cricket_OCS_protected_ips: + type: string + label: be0 port 5 OAM ip address + description: be0 port 5 OAM ip address + be1_Cricket_OCS_protected_ips: + type: string + label: be1 port 5 OAM ip address + description: be1 port 5 OAM ip address + be2_Cricket_OCS_protected_ips: + type: string + label: be2 port 5 OAM ip address + description: be2 port 5 OAM ip address + be3_Cricket_OCS_protected_ips: + type: string + label: be3 port 5 OAM ip address + description: be3 port 5 OAM ip address + be4_Cricket_OCS_protected_ips: + type: string + label: be4 port 5 OAM ip address + description: be4 port 5 OAM ip address + be0_OAM_direct_ips: + type: string + label: be0 port 7 OAM ip address + description: be0 port 7 OAM ip address + be1_OAM_direct_ips: + type: string + label: be1 port 7 OAM ip address + description: be1 port 7 OAM ip address + be2_OAM_direct_ips: + type: string + label: be2 port 7 OAM ip address + description: be2 port 7 OAM ip address + be3_OAM_direct_ips: + type: string + label: be3 port 7 OAM ip address + description: be3 port 7 OAM ip address + be4_OAM_direct_ips: + type: string + label: be4 port 7 OAM ip address + description: be4 port 7 OAM ip address + fe0_SIGNET_vrf_A1_direct_ips: + type: string + label: fe0 port 0 SIGTRAN ip address + description: fe0 port 0 SIGTRAN ip address + fe0_OAM_direct_ips: + type: string + label: fe0 port 7 OAM ip address + description: fe0 port 7 OAM ip address + fe1_SIGNET_vrf_B1_direct_ips: + type: string + label: fe1 port 1 SIGTRAN ip address + description: fe1 port 1 SIGTRAN ip address + fe1_OAM_direct_ips: + type: string + label: fe1 port 7 OAM ip address + description: fe1 port 7 OAM ip address + smp0_OAM_direct_ips: + type: string + label: smp0 port 7 OAM ip address + description: smp0 port 7 OAM ip address + smp1_OAM_direct_ips: + type: string + label: smp1 port 7 OAM ip address + description: smp1 port 7 OAM ip address + db0_OAM_direct_ips: + type: string + label: db0 port 7 OAM ip address + description: smp0 port 7 OAM ip address + db1_OAM_direct_ips: + type: string + label: smp1 port 7 OAM ip address + description: db1 port 7 OAM ip address + vm_scp_be0_name: + type: string + default: vSCP_BE0 + description: name of VM + vm_scp_be1_name: + type: string + default: vSCP_BE1 + description: name of VM + vm_scp_be2_name: + type: string + default: vSCP_BE2 + description: name of VM + vm_scp_be3_name: + type: string + default: vSCP_BE3 + description: name of VM + vm_scp_be4_name: + type: string + default: vSCP_BE4 + description: name of VM + vm_scp_fe0_name: + type: string + default: vSCP_FE0 + description: name of VM + vm_scp_fe1_name: + type: string + default: vSCP_FE1 + description: name of VM + vm_smp0_name: + type: string + default: vSMP0 + description: name of VM + vm_smp1_name: + type: string + default: vSMP1 + description: name of VM + vm_db0_name: + type: string + default: vDB0 + description: name of VM + vm_db1_name: + type: string + default: vDB1 + description: name of VM + +resources: +# scp_be_wait_condition: +# type: OS::Heat::WaitCondition +# properties: +# handle: { get_resource: scp_be_wait_handle } +# count: 5 +# timeout: 300 +# scp_be_wait_handle: +# type: OS::Heat::WaitConditionHandle +# +# scp_fe_wait_condition: +# type: OS::Heat::WaitCondition +# properties: +# handle: { get_resource: scp_fe_wait_handle } +# count: 2 +# timeout: 300 +# scp_fe_wait_handle: +# type: OS::Heat::WaitConditionHandle +# +# smp_wait_condition: +# type: OS::Heat::WaitCondition +# properties: +# handle: { get_resource: smp_wait_handle } +# count: 2 +# timeout: 300 +# smp_wait_handle: +# type: OS::Heat::WaitConditionHandle +# +# db_wait_condition: +# type: OS::Heat::WaitCondition +# properties: +# handle: { get_resource: db_wait_handle } +# count: 2 +# timeout: 300 +# db_wait_handle: +# type: OS::Heat::WaitConditionHandle + + FE_Affinity: + type: OS::Nova::ServerGroup + properties: + policies: ["anti-affinity"] + BE_Affinity: + type: OS::Nova::ServerGroup + properties: + policies: ["anti-affinity"] + SMP_Affinity: + type: OS::Nova::ServerGroup + properties: + policies: ["anti-affinity"] + DB_Affinity: + type: OS::Nova::ServerGroup + properties: + policies: ["anti-affinity"] + + FE_Clustering_KA: + type: OS::Contrail::VirtualNetwork + properties: + name: { get_param: int_vscp_fe_cluster_net_id } + + FE_Clustering_subnet: + type: OS::Neutron::Subnet + properties: + network_id: { get_resource: FE_Clustering_KA } + cidr: { get_param: int_vscp_fe_cluster_cidr } + + Clustering_Network: + type: OS::Contrail::VirtualNetwork + properties: + name: { get_param: int_vscp_cluster_net_id } + + Clustering_Network_subnet: + type: OS::Neutron::Subnet + properties: + network_id: { get_resource: Clustering_Network } + cidr: { get_param: int_vscp_cluster_cidr } + + DB_Network: + type: OS::Contrail::VirtualNetwork + properties: + name: { get_param: int_vscp_db_network_net_id } + + DB_Network_subnet: + type: OS::Neutron::Subnet + properties: + network_id: { get_resource: DB_Network } + cidr: { get_param: int_vscp_db_network_cidr } + + server_scp_be0: + type: OS::Nova::Server +# depends on: db_wait_condition + properties: + name: { get_param: vm_scp_be0_name } + image: { get_param: image_scp_be_id } +# availability_zone: { get_param: availability_zone_be0 } + flavor: { get_param: flavor_scp_be_id } + scheduler_hints: { group: { get_resource: BE_Affinity } } + networks: + - port: { get_resource: be0_port_3 } + - port: { get_resource: be0_port_4 } + - port: { get_resource: be0_port_5 } + - port: { get_resource: be0_port_7 } + metadata: + vnf_id: { get_param: vnf_id } + user_data: + str_replace: + template: | + #!/bin/bash + #todo: provision $vm_name + wc_notify --data-binary '{"status": "SUCCESS"}' + params: + $vm_name: {get_param: vm_scp_be0_name} +# wc_notify: { get_attr: ['scp_be_wait_handle', 'curl_cli'] } + be0_port_2: + type: OS::Neutron::Port + properties: + network_id: { get_resource: Clustering_Network } + + be0_port_3: + type: OS::Neutron::Port + properties: + network_id: { get_resource: Clustering_Network } + + be0_port_4: + type: OS::Neutron::Port + properties: + network_id: { get_resource: DB_Network } + + be0_port_5: + type: OS::Neutron::Port + properties: + network: { get_param: Cricket_OCS_protected_net_id } + fixed_ips: [{"ip_address": {get_param: be0_Cricket_OCS_protected_ips}}] + + be0_port_7: + type: OS::Neutron::Port + properties: + network: { get_param: OAM_direct_net_id } + fixed_ips: [{"ip_address": {get_param: be0_OAM_direct_ips}}] + + server_scp_be1: + type: OS::Nova::Server +# depends on: db_wait_condition + properties: + name: { get_param: vm_scp_be1_name } + image: { get_param: image_scp_be_id } +# availability_zone: { get_param: availability_zone_be1 } + flavor: { get_param: flavor_scp_be_id } + scheduler_hints: { group: { get_resource: BE_Affinity } } + networks: + - port: { get_resource: be1_port_3 } + - port: { get_resource: be1_port_4 } + - port: { get_resource: be1_port_5 } + - port: { get_resource: be1_port_7 } + metadata: + vnf_id: { get_param: vnf_id } + user_data: + str_replace: + template: | + #!/bin/bash + #todo: provision $vm_name + wc_notify --data-binary '{"status": "SUCCESS"}' + params: + $vm_name: {get_param: vm_scp_be1_name} +# wc_notify: { get_attr: ['scp_be_wait_handle', 'curl_cli'] } + + be1_port_3: + type: OS::Neutron::Port + properties: + network_id: { get_resource: Clustering_Network } + + be1_port_4: + type: OS::Neutron::Port + properties: + network_id: { get_resource: DB_Network } + + be1_port_5: + type: OS::Neutron::Port + properties: + network: { get_param: Cricket_OCS_protected_net_id } + fixed_ips: [{"ip_address": {get_param: be1_Cricket_OCS_protected_ips}}] + + be1_port_7: + type: OS::Neutron::Port + properties: + network: { get_param: OAM_direct_net_id } + fixed_ips: [{"ip_address": {get_param: be1_OAM_direct_ips}}] + + server_scp_be2: + type: OS::Nova::Server +# depends on: db_wait_condition + properties: + name: { get_param: vm_scp_be2_name } + image: { get_param: image_scp_be_id } +# availability_zone: { get_param: availability_zone_be2 } + flavor: { get_param: flavor_scp_be_id } + scheduler_hints: { group: { get_resource: BE_Affinity } } + networks: + - port: { get_resource: be2_port_3 } + - port: { get_resource: be2_port_4 } + - port: { get_resource: be2_port_5 } + - port: { get_resource: be2_port_7 } + metadata: + vnf_id: { get_param: vnf_id } + user_data: + str_replace: + template: | + #!/bin/bash + #todo: provision $vm_name + wc_notify --data-binary '{"status": "SUCCESS"}' + params: + $vm_name: {get_param: vm_scp_be2_name} +# wc_notify: { get_attr: ['scp_be_wait_handle', 'curl_cli'] } + + be2_port_3: + type: OS::Neutron::Port + properties: + network_id: { get_resource: Clustering_Network } + + be2_port_4: + type: OS::Neutron::Port + properties: + network_id: { get_resource: DB_Network } + + be2_port_5: + type: OS::Neutron::Port + properties: + network: { get_param: Cricket_OCS_protected_net_id } + fixed_ips: [{"ip_address": {get_param: be2_Cricket_OCS_protected_ips}}] + + be2_port_7: + type: OS::Neutron::Port + properties: + network: { get_param: OAM_direct_net_id } + fixed_ips: [{"ip_address": {get_param: be2_OAM_direct_ips}}] + + server_scp_be3: + type: OS::Nova::Server +# depends on: db_wait_condition + properties: + name: { get_param: vm_scp_be3_name } + image: { get_param: image_scp_be_id } +# availability_zone: { get_param: availability_zone_be3 } + flavor: { get_param: flavor_scp_be_id } + scheduler_hints: { group: { get_resource: BE_Affinity } } + networks: + - port: { get_resource: be3_port_3 } + - port: { get_resource: be3_port_4 } + - port: { get_resource: be3_port_5 } + - port: { get_resource: be3_port_7 } + metadata: + vnf_id: { get_param: vnf_id } + user_data: + str_replace: + template: | + #!/bin/bash + #todo: provision $vm_name + wc_notify --data-binary '{"status": "SUCCESS"}' + params: + $vm_name: {get_param: vm_scp_be3_name} +# wc_notify: { get_attr: ['scp_be_wait_handle', 'curl_cli'] } + + be3_port_3: + type: OS::Neutron::Port + properties: + network_id: { get_resource: Clustering_Network } + + be3_port_4: + type: OS::Neutron::Port + properties: + network_id: { get_resource: DB_Network } + + be3_port_5: + type: OS::Neutron::Port + properties: + network: { get_param: Cricket_OCS_protected_net_id } + fixed_ips: [{"ip_address": {get_param: be3_Cricket_OCS_protected_ips}}] + + be3_port_7: + type: OS::Neutron::Port + properties: + network: { get_param: OAM_direct_net_id } + fixed_ips: [{"ip_address": {get_param: be3_OAM_direct_ips}}] + + server_scp_be4: + type: OS::Nova::Server +# depends on: db_wait_condition + properties: + name: { get_param: vm_scp_be4_name } + image: { get_param: image_scp_be_id } +# availability_zone: { get_param: availability_zone_be4 } + flavor: { get_param: flavor_scp_be_id } + scheduler_hints: { group: { get_resource: BE_Affinity } } + networks: + - port: { get_resource: be4_port_3 } + - port: { get_resource: be4_port_4 } + - port: { get_resource: be4_port_5 } + - port: { get_resource: be4_port_7 } + metadata: + vnf_id: { get_param: vnf_id } + user_data: + str_replace: + template: | + #!/bin/bash + #todo: provision $vm_name + wc_notify --data-binary '{"status": "SUCCESS"}' + params: + $vm_name: {get_param: vm_scp_be4_name} +# wc_notify: { get_attr: ['scp_be_wait_handle', 'curl_cli'] } + + be4_port_3: + type: OS::Neutron::Port + properties: + network_id: { get_resource: Clustering_Network } + + be4_port_4: + type: OS::Neutron::Port + properties: + network_id: { get_resource: DB_Network } + + be4_port_5: + type: OS::Neutron::Port + properties: + network: { get_param: Cricket_OCS_protected_net_id } + fixed_ips: [{"ip_address": {get_param: be4_Cricket_OCS_protected_ips}}] + + be4_port_7: + type: OS::Neutron::Port + properties: + network: { get_param: OAM_direct_net_id } + fixed_ips: [{"ip_address": {get_param: be4_OAM_direct_ips}}] + + server_scp_fe0: + type: OS::Nova::Server +# depends on: scp_be_wait_condition + properties: + name: { get_param: vm_scp_fe0_name } + image: { get_param: image_scp_fe_id } +# availability_zone: { get_param: availability_zone_fe0 } + flavor: { get_param: flavor_scp_fe_id } + scheduler_hints: { group: { get_resource: FE_Affinity } } + networks: + - port: { get_resource: fe0_port_0 } + - port: { get_resource: fe0_port_2 } + - port: { get_resource: fe0_port_3 } + - port: { get_resource: fe0_port_7 } + metadata: + vnf_id: { get_param: vnf_id } + user_data: + str_replace: + template: | + #!/bin/bash + #todo: provision $vm_name + wc_notify --data-binary '{"status": "SUCCESS"}' + params: + $vm_name: {get_param: vm_scp_fe0_name} +# wc_notify: { get_attr: ['scp_fe_wait_handle', 'curl_cli'] } + + fe0_port_0: + type: OS::Neutron::Port + properties: + network: { get_param: SIGNET_vrf_A1_direct_net_id } + fixed_ips: [{"ip_address": {get_param: fe0_SIGNET_vrf_A1_direct_ips}}] + + fe0_port_2: + type: OS::Neutron::Port + properties: + network_id: { get_resource: FE_Clustering_KA } + + fe0_port_3: + type: OS::Neutron::Port + properties: + network_id: { get_resource: Clustering_Network } + + fe0_port_7: + type: OS::Neutron::Port + properties: + network: { get_param: OAM_direct_net_id } + fixed_ips: [{"ip_address": {get_param: fe0_OAM_direct_ips}}] + + server_scp_fe1: + type: OS::Nova::Server +# depends on: scp_be_wait_condition + properties: + name: { get_param: vm_scp_fe1_name } + image: { get_param: image_scp_fe_id } +# availability_zone: { get_param: availability_zone_fe1 } + flavor: { get_param: flavor_scp_fe_id } + scheduler_hints: { group: { get_resource: FE_Affinity } } + networks: + - port: { get_resource: fe1_port_1 } + - port: { get_resource: fe1_port_2 } + - port: { get_resource: fe1_port_3 } + - port: { get_resource: fe1_port_7 } + metadata: + vnf_id: { get_param: vnf_id } + user_data: + str_replace: + template: | + #!/bin/bash + #todo: provision $vm_name + wc_notify --data-binary '{"status": "SUCCESS"}' + params: + $vm_name: {get_param: vm_scp_fe1_name} +# wc_notify: { get_attr: ['scp_fe_wait_handle', 'curl_cli'] } + + fe1_port_1: + type: OS::Neutron::Port + properties: + network: { get_param: SIGNET_vrf_B1_direct_net_id } + fixed_ips: [{"ip_address": {get_param: fe1_SIGNET_vrf_B1_direct_ips}}] + + fe1_port_2: + type: OS::Neutron::Port + properties: + network_id: { get_resource: FE_Clustering_KA } + + fe1_port_3: + type: OS::Neutron::Port + properties: + network_id: { get_resource: Clustering_Network } + + fe1_port_7: + type: OS::Neutron::Port + properties: + network: { get_param: OAM_direct_net_id } + fixed_ips: [{"ip_address": {get_param: fe1_OAM_direct_ips}}] + + server_smp0: + type: OS::Nova::Server + properties: + name: { get_param: vm_smp0_name } + image: { get_param: image_smp_id } +# availability_zone: { get_param: availability_zone_smp0 } + flavor: { get_param: flavor_smp_id } + scheduler_hints: { group: { get_resource: SMP_Affinity } } + networks: + - port: { get_resource: smp0_port_7 } + metadata: + vnf_id: { get_param: vnf_id } + user_data: + str_replace: + template: | + #!/bin/bash + #todo: provision $vm_name + wc_notify --data-binary '{"status": "SUCCESS"}' + params: + $vm_name: {get_param: vm_smp0_name} +# wc_notify: { get_attr: ['smp_wait_handle', 'curl_cli'] } + + smp0_port_7: + type: OS::Neutron::Port + properties: + network: { get_param: OAM_direct_net_id } + fixed_ips: [{"ip_address": {get_param: smp0_OAM_direct_ips}}] + + server_smp1: + type: OS::Nova::Server + properties: + name: { get_param: vm_smp1_name } + image: { get_param: image_smp_id } +# availability_zone: { get_param: availability_zone_smp1 } + flavor: { get_param: flavor_smp_id } + scheduler_hints: { group: { get_resource: SMP_Affinity } } + networks: + - port: { get_resource: smp1_port_7 } + metadata: + vnf_id: { get_param: vnf_id } + user_data: + str_replace: + template: | + #!/bin/bash + #todo: provision $vm_name + wc_notify --data-binary '{"status": "SUCCESS"}' + params: + $vm_name: {get_param: vm_smp1_name} +# wc_notify: { get_attr: ['smp_wait_handle', 'curl_cli'] } + + smp1_port_7: + type: OS::Neutron::Port + properties: + network: { get_param: OAM_direct_net_id } + fixed_ips: [{"ip_address": {get_param: smp1_OAM_direct_ips}}] + + server_db0: + type: OS::Nova::Server +# depends_on: smp_wait_condition + properties: + name: { get_param: vm_db0_name } + image: { get_param: image_db_id } +# availability_zone: { get_param: availability_zone_db0 } + flavor: { get_param: flavor_db_id } + scheduler_hints: { group: { get_resource: DB_Affinity } } + networks: + - port: { get_resource: db0_port_4 } + - port: { get_resource: db0_port_7 } + metadata: + vnf_id: { get_param: vnf_id } + user_data: + str_replace: + template: | + #!/bin/bash + #todo: provision $vm_name + wc_notify --data-binary '{"status": "SUCCESS"}' + params: + $vm_name: {get_param: vm_db0_name} +# wc_notify: { get_attr: ['db_wait_handle', 'curl_cli'] } + + db0_port_4: + type: OS::Neutron::Port + properties: + network_id: { get_resource: DB_Network } + + db0_port_7: + type: OS::Neutron::Port + properties: + network: { get_param: OAM_direct_net_id } + fixed_ips: [{"ip_address": {get_param: db0_OAM_direct_ips}}] + + server_db1: + type: OS::Nova::Server +# depends_on: smp_wait_condition + properties: + name: { get_param: vm_db1_name } + image: { get_param: image_db_id } +# availability_zone: { get_param: availability_zone_db1 } + flavor: { get_param: flavor_db_id } + scheduler_hints: { group: { get_resource: DB_Affinity } } + networks: + - port: { get_resource: db1_port_4 } + - port: { get_resource: db1_port_7 } + metadata: + vnf_id: { get_param: vnf_id } + user_data: + str_replace: + template: | + #!/bin/bash + #todo: provision $vm_name + wc_notify --data-binary '{"status": "SUCCESS"}' + params: + $vm_name: {get_param: vm_db1_name} +# wc_notify: { get_attr: ['db_wait_handle', 'curl_cli'] } + + db1_port_4: + type: OS::Neutron::Port + properties: + network_id: { get_resource: DB_Network } + + db1_port_7: + type: OS::Neutron::Port + properties: + network: { get_param: OAM_direct_net_id } + fixed_ips: [{"ip_address": {get_param: db1_OAM_direct_ips}}]
\ No newline at end of file diff --git a/ui-ci-dev/src/main/resources/Files/Heat-File.yaml b/ui-ci-dev/src/main/resources/Files/Heat-File.yaml new file mode 100644 index 0000000000..d332078d35 --- /dev/null +++ b/ui-ci-dev/src/main/resources/Files/Heat-File.yaml @@ -0,0 +1,791 @@ +heat_template_version: 2013-05-23 +################################# +# +# Changes in v0.2: +# - Unique availability zone for each VM +# - LAN8 and SLAN networks removed according to latest Prod/Type I diagram +# - 2 DB VMs added +# - Images corrected +# - VM start-up order: SMP->DB->BE->FE (no error handling yet) +# - Provisioning scripts placeholders +# +################################# + +description: ASC Template + +parameters: +# availability_zone_smp0: +# type: string +# default: nova +# availability_zone_smp1: +# type: string +# default: nova +# availability_zone_fe0: +# type: string +# default: nova +# availability_zone_fe1: +# type: string +# default: nova +# availability_zone_db0: +# type: string +# default: nova +# availability_zone_db1: +# type: string +# default: nova +# availability_zone_be0: +# type: string +# default: nova +# availability_zone_be1: +# type: string +# default: nova +# availability_zone_be2: +# type: string +# default: nova +# availability_zone_be3: +# type: string +# default: nova +# availability_zone_be4: +# type: string +# default: nova + + vnf_name: + type: string + description: Unique name for this VNF instance + default: This_is_the_SCP_name + vnf_id: + type: string + description: Unique ID for this VNF instance + default: This_is_ths_SCP_id + + flavor_scp_be_id: + type: string + description: flavor type + default: a1.Small + flavor_scp_fe_id: + type: string + description: flavor type + default: a1.Small + flavor_smp_id: + type: string + description: flavor type + default: a1.Small + flavor_db_id: + type: string + description: flavor type + default: a1.Small + image_scp_be_id: + type: string + description: Image use to boot a server + default: asc_base_image_be + image_scp_fe_id: + type: string + description: Image use to boot a server + default: asc_base_image_fe + image_smp_id: + type: string + description: Image use to boot a server + default: asc_base_image_smp + image_db_id: + type: string + description: Image use to boot a server + default: asc_base_image_db + + int_vscp_fe_cluster_net_id: + type: string + description: LAN2 FE Cluster/KA + int_vscp_fe_cluster_cidr: + type: string + description: Private Network2 Address (CIDR notation) + int_vscp_cluster_net_id: + type: string + description: LAN3 Cluster + int_vscp_cluster_cidr: + type: string + description: Private Network3 Address (CIDR notation) + int_vscp_db_network_net_id: + type: string + description: LAN4 DB + int_vscp_db_network_cidr: + type: string + description: Private Network4 Address (CIDR notation) + SIGNET_vrf_A1_direct_net_id: + type: string + description: Network name for SIGTRAN_A + SIGNET_vrf_B1_direct_net_id: + type: string + description: Network name for SIGTRAN_B + Cricket_OCS_protected_net_id: + type: string + description: Network name for CRICKET_OCS + OAM_direct_net_id: + type: string + description: Network name for OAM + be0_Cricket_OCS_protected_ips: + type: string + label: be0 port 5 OAM ip address + description: be0 port 5 OAM ip address + be1_Cricket_OCS_protected_ips: + type: string + label: be1 port 5 OAM ip address + description: be1 port 5 OAM ip address + be2_Cricket_OCS_protected_ips: + type: string + label: be2 port 5 OAM ip address + description: be2 port 5 OAM ip address + be3_Cricket_OCS_protected_ips: + type: string + label: be3 port 5 OAM ip address + description: be3 port 5 OAM ip address + be4_Cricket_OCS_protected_ips: + type: string + label: be4 port 5 OAM ip address + description: be4 port 5 OAM ip address + be0_OAM_direct_ips: + type: string + label: be0 port 7 OAM ip address + description: be0 port 7 OAM ip address + be1_OAM_direct_ips: + type: string + label: be1 port 7 OAM ip address + description: be1 port 7 OAM ip address + be2_OAM_direct_ips: + type: string + label: be2 port 7 OAM ip address + description: be2 port 7 OAM ip address + be3_OAM_direct_ips: + type: string + label: be3 port 7 OAM ip address + description: be3 port 7 OAM ip address + be4_OAM_direct_ips: + type: string + label: be4 port 7 OAM ip address + description: be4 port 7 OAM ip address + fe0_SIGNET_vrf_A1_direct_ips: + type: string + label: fe0 port 0 SIGTRAN ip address + description: fe0 port 0 SIGTRAN ip address + fe0_OAM_direct_ips: + type: string + label: fe0 port 7 OAM ip address + description: fe0 port 7 OAM ip address + fe1_SIGNET_vrf_B1_direct_ips: + type: string + label: fe1 port 1 SIGTRAN ip address + description: fe1 port 1 SIGTRAN ip address + fe1_OAM_direct_ips: + type: string + label: fe1 port 7 OAM ip address + description: fe1 port 7 OAM ip address + smp0_OAM_direct_ips: + type: string + label: smp0 port 7 OAM ip address + description: smp0 port 7 OAM ip address + smp1_OAM_direct_ips: + type: string + label: smp1 port 7 OAM ip address + description: smp1 port 7 OAM ip address + db0_OAM_direct_ips: + type: string + label: db0 port 7 OAM ip address + description: smp0 port 7 OAM ip address + db1_OAM_direct_ips: + type: string + label: smp1 port 7 OAM ip address + description: db1 port 7 OAM ip address + vm_scp_be0_name: + type: string + default: vSCP_BE0 + description: name of VM + vm_scp_be1_name: + type: string + default: vSCP_BE1 + description: name of VM + vm_scp_be2_name: + type: string + default: vSCP_BE2 + description: name of VM + vm_scp_be3_name: + type: string + default: vSCP_BE3 + description: name of VM + vm_scp_be4_name: + type: string + default: vSCP_BE4 + description: name of VM + vm_scp_fe0_name: + type: string + default: vSCP_FE0 + description: name of VM + vm_scp_fe1_name: + type: string + default: vSCP_FE1 + description: name of VM + vm_smp0_name: + type: string + default: vSMP0 + description: name of VM + vm_smp1_name: + type: string + default: vSMP1 + description: name of VM + vm_db0_name: + type: string + default: vDB0 + description: name of VM + vm_db1_name: + type: string + default: vDB1 + description: name of VM + +resources: +# scp_be_wait_condition: +# type: OS::Heat::WaitCondition +# properties: +# handle: { get_resource: scp_be_wait_handle } +# count: 5 +# timeout: 300 +# scp_be_wait_handle: +# type: OS::Heat::WaitConditionHandle +# +# scp_fe_wait_condition: +# type: OS::Heat::WaitCondition +# properties: +# handle: { get_resource: scp_fe_wait_handle } +# count: 2 +# timeout: 300 +# scp_fe_wait_handle: +# type: OS::Heat::WaitConditionHandle +# +# smp_wait_condition: +# type: OS::Heat::WaitCondition +# properties: +# handle: { get_resource: smp_wait_handle } +# count: 2 +# timeout: 300 +# smp_wait_handle: +# type: OS::Heat::WaitConditionHandle +# +# db_wait_condition: +# type: OS::Heat::WaitCondition +# properties: +# handle: { get_resource: db_wait_handle } +# count: 2 +# timeout: 300 +# db_wait_handle: +# type: OS::Heat::WaitConditionHandle + + FE_Affinity: + type: OS::Nova::ServerGroup + properties: + policies: ["anti-affinity"] + BE_Affinity: + type: OS::Nova::ServerGroup + properties: + policies: ["anti-affinity"] + SMP_Affinity: + type: OS::Nova::ServerGroup + properties: + policies: ["anti-affinity"] + DB_Affinity: + type: OS::Nova::ServerGroup + properties: + policies: ["anti-affinity"] + + FE_Clustering_KA: + type: OS::Contrail::VirtualNetwork + properties: + name: { get_param: int_vscp_fe_cluster_net_id } + + FE_Clustering_subnet: + type: OS::Neutron::Subnet + properties: + network_id: { get_resource: FE_Clustering_KA } + cidr: { get_param: int_vscp_fe_cluster_cidr } + + Clustering_Network: + type: OS::Contrail::VirtualNetwork + properties: + name: { get_param: int_vscp_cluster_net_id } + + Clustering_Network_subnet: + type: OS::Neutron::Subnet + properties: + network_id: { get_resource: Clustering_Network } + cidr: { get_param: int_vscp_cluster_cidr } + + DB_Network: + type: OS::Contrail::VirtualNetwork + properties: + name: { get_param: int_vscp_db_network_net_id } + + DB_Network_subnet: + type: OS::Neutron::Subnet + properties: + network_id: { get_resource: DB_Network } + cidr: { get_param: int_vscp_db_network_cidr } + + server_scp_be0: + type: OS::Nova::Server +# depends on: db_wait_condition + properties: + name: { get_param: vm_scp_be0_name } + image: { get_param: image_scp_be_id } +# availability_zone: { get_param: availability_zone_be0 } + flavor: { get_param: flavor_scp_be_id } + scheduler_hints: { group: { get_resource: BE_Affinity } } + networks: + - port: { get_resource: be0_port_3 } + - port: { get_resource: be0_port_4 } + - port: { get_resource: be0_port_5 } + - port: { get_resource: be0_port_7 } + metadata: + vnf_id: { get_param: vnf_id } + user_data: + str_replace: + template: | + #!/bin/bash + #todo: provision $vm_name + wc_notify --data-binary '{"status": "SUCCESS"}' + params: + $vm_name: {get_param: vm_scp_be0_name} +# wc_notify: { get_attr: ['scp_be_wait_handle', 'curl_cli'] } + be0_port_2: + type: OS::Neutron::Port + properties: + network_id: { get_resource: Clustering_Network } + + be0_port_3: + type: OS::Neutron::Port + properties: + network_id: { get_resource: Clustering_Network } + + be0_port_4: + type: OS::Neutron::Port + properties: + network_id: { get_resource: DB_Network } + + be0_port_5: + type: OS::Neutron::Port + properties: + network: { get_param: Cricket_OCS_protected_net_id } + fixed_ips: [{"ip_address": {get_param: be0_Cricket_OCS_protected_ips}}] + + be0_port_7: + type: OS::Neutron::Port + properties: + network: { get_param: OAM_direct_net_id } + fixed_ips: [{"ip_address": {get_param: be0_OAM_direct_ips}}] + + server_scp_be1: + type: OS::Nova::Server +# depends on: db_wait_condition + properties: + name: { get_param: vm_scp_be1_name } + image: { get_param: image_scp_be_id } +# availability_zone: { get_param: availability_zone_be1 } + flavor: { get_param: flavor_scp_be_id } + scheduler_hints: { group: { get_resource: BE_Affinity } } + networks: + - port: { get_resource: be1_port_3 } + - port: { get_resource: be1_port_4 } + - port: { get_resource: be1_port_5 } + - port: { get_resource: be1_port_7 } + metadata: + vnf_id: { get_param: vnf_id } + user_data: + str_replace: + template: | + #!/bin/bash + #todo: provision $vm_name + wc_notify --data-binary '{"status": "SUCCESS"}' + params: + $vm_name: {get_param: vm_scp_be1_name} +# wc_notify: { get_attr: ['scp_be_wait_handle', 'curl_cli'] } + + be1_port_3: + type: OS::Neutron::Port + properties: + network_id: { get_resource: Clustering_Network } + + be1_port_4: + type: OS::Neutron::Port + properties: + network_id: { get_resource: DB_Network } + + be1_port_5: + type: OS::Neutron::Port + properties: + network: { get_param: Cricket_OCS_protected_net_id } + fixed_ips: [{"ip_address": {get_param: be1_Cricket_OCS_protected_ips}}] + + be1_port_7: + type: OS::Neutron::Port + properties: + network: { get_param: OAM_direct_net_id } + fixed_ips: [{"ip_address": {get_param: be1_OAM_direct_ips}}] + + server_scp_be2: + type: OS::Nova::Server +# depends on: db_wait_condition + properties: + name: { get_param: vm_scp_be2_name } + image: { get_param: image_scp_be_id } +# availability_zone: { get_param: availability_zone_be2 } + flavor: { get_param: flavor_scp_be_id } + scheduler_hints: { group: { get_resource: BE_Affinity } } + networks: + - port: { get_resource: be2_port_3 } + - port: { get_resource: be2_port_4 } + - port: { get_resource: be2_port_5 } + - port: { get_resource: be2_port_7 } + metadata: + vnf_id: { get_param: vnf_id } + user_data: + str_replace: + template: | + #!/bin/bash + #todo: provision $vm_name + wc_notify --data-binary '{"status": "SUCCESS"}' + params: + $vm_name: {get_param: vm_scp_be2_name} +# wc_notify: { get_attr: ['scp_be_wait_handle', 'curl_cli'] } + + be2_port_3: + type: OS::Neutron::Port + properties: + network_id: { get_resource: Clustering_Network } + + be2_port_4: + type: OS::Neutron::Port + properties: + network_id: { get_resource: DB_Network } + + be2_port_5: + type: OS::Neutron::Port + properties: + network: { get_param: Cricket_OCS_protected_net_id } + fixed_ips: [{"ip_address": {get_param: be2_Cricket_OCS_protected_ips}}] + + be2_port_7: + type: OS::Neutron::Port + properties: + network: { get_param: OAM_direct_net_id } + fixed_ips: [{"ip_address": {get_param: be2_OAM_direct_ips}}] + + server_scp_be3: + type: OS::Nova::Server +# depends on: db_wait_condition + properties: + name: { get_param: vm_scp_be3_name } + image: { get_param: image_scp_be_id } +# availability_zone: { get_param: availability_zone_be3 } + flavor: { get_param: flavor_scp_be_id } + scheduler_hints: { group: { get_resource: BE_Affinity } } + networks: + - port: { get_resource: be3_port_3 } + - port: { get_resource: be3_port_4 } + - port: { get_resource: be3_port_5 } + - port: { get_resource: be3_port_7 } + metadata: + vnf_id: { get_param: vnf_id } + user_data: + str_replace: + template: | + #!/bin/bash + #todo: provision $vm_name + wc_notify --data-binary '{"status": "SUCCESS"}' + params: + $vm_name: {get_param: vm_scp_be3_name} +# wc_notify: { get_attr: ['scp_be_wait_handle', 'curl_cli'] } + + be3_port_3: + type: OS::Neutron::Port + properties: + network_id: { get_resource: Clustering_Network } + + be3_port_4: + type: OS::Neutron::Port + properties: + network_id: { get_resource: DB_Network } + + be3_port_5: + type: OS::Neutron::Port + properties: + network: { get_param: Cricket_OCS_protected_net_id } + fixed_ips: [{"ip_address": {get_param: be3_Cricket_OCS_protected_ips}}] + + be3_port_7: + type: OS::Neutron::Port + properties: + network: { get_param: OAM_direct_net_id } + fixed_ips: [{"ip_address": {get_param: be3_OAM_direct_ips}}] + + server_scp_be4: + type: OS::Nova::Server +# depends on: db_wait_condition + properties: + name: { get_param: vm_scp_be4_name } + image: { get_param: image_scp_be_id } +# availability_zone: { get_param: availability_zone_be4 } + flavor: { get_param: flavor_scp_be_id } + scheduler_hints: { group: { get_resource: BE_Affinity } } + networks: + - port: { get_resource: be4_port_3 } + - port: { get_resource: be4_port_4 } + - port: { get_resource: be4_port_5 } + - port: { get_resource: be4_port_7 } + metadata: + vnf_id: { get_param: vnf_id } + user_data: + str_replace: + template: | + #!/bin/bash + #todo: provision $vm_name + wc_notify --data-binary '{"status": "SUCCESS"}' + params: + $vm_name: {get_param: vm_scp_be4_name} +# wc_notify: { get_attr: ['scp_be_wait_handle', 'curl_cli'] } + + be4_port_3: + type: OS::Neutron::Port + properties: + network_id: { get_resource: Clustering_Network } + + be4_port_4: + type: OS::Neutron::Port + properties: + network_id: { get_resource: DB_Network } + + be4_port_5: + type: OS::Neutron::Port + properties: + network: { get_param: Cricket_OCS_protected_net_id } + fixed_ips: [{"ip_address": {get_param: be4_Cricket_OCS_protected_ips}}] + + be4_port_7: + type: OS::Neutron::Port + properties: + network: { get_param: OAM_direct_net_id } + fixed_ips: [{"ip_address": {get_param: be4_OAM_direct_ips}}] + + server_scp_fe0: + type: OS::Nova::Server +# depends on: scp_be_wait_condition + properties: + name: { get_param: vm_scp_fe0_name } + image: { get_param: image_scp_fe_id } +# availability_zone: { get_param: availability_zone_fe0 } + flavor: { get_param: flavor_scp_fe_id } + scheduler_hints: { group: { get_resource: FE_Affinity } } + networks: + - port: { get_resource: fe0_port_0 } + - port: { get_resource: fe0_port_2 } + - port: { get_resource: fe0_port_3 } + - port: { get_resource: fe0_port_7 } + metadata: + vnf_id: { get_param: vnf_id } + user_data: + str_replace: + template: | + #!/bin/bash + #todo: provision $vm_name + wc_notify --data-binary '{"status": "SUCCESS"}' + params: + $vm_name: {get_param: vm_scp_fe0_name} +# wc_notify: { get_attr: ['scp_fe_wait_handle', 'curl_cli'] } + + fe0_port_0: + type: OS::Neutron::Port + properties: + network: { get_param: SIGNET_vrf_A1_direct_net_id } + fixed_ips: [{"ip_address": {get_param: fe0_SIGNET_vrf_A1_direct_ips}}] + + fe0_port_2: + type: OS::Neutron::Port + properties: + network_id: { get_resource: FE_Clustering_KA } + + fe0_port_3: + type: OS::Neutron::Port + properties: + network_id: { get_resource: Clustering_Network } + + fe0_port_7: + type: OS::Neutron::Port + properties: + network: { get_param: OAM_direct_net_id } + fixed_ips: [{"ip_address": {get_param: fe0_OAM_direct_ips}}] + + server_scp_fe1: + type: OS::Nova::Server +# depends on: scp_be_wait_condition + properties: + name: { get_param: vm_scp_fe1_name } + image: { get_param: image_scp_fe_id } +# availability_zone: { get_param: availability_zone_fe1 } + flavor: { get_param: flavor_scp_fe_id } + scheduler_hints: { group: { get_resource: FE_Affinity } } + networks: + - port: { get_resource: fe1_port_1 } + - port: { get_resource: fe1_port_2 } + - port: { get_resource: fe1_port_3 } + - port: { get_resource: fe1_port_7 } + metadata: + vnf_id: { get_param: vnf_id } + user_data: + str_replace: + template: | + #!/bin/bash + #todo: provision $vm_name + wc_notify --data-binary '{"status": "SUCCESS"}' + params: + $vm_name: {get_param: vm_scp_fe1_name} +# wc_notify: { get_attr: ['scp_fe_wait_handle', 'curl_cli'] } + + fe1_port_1: + type: OS::Neutron::Port + properties: + network: { get_param: SIGNET_vrf_B1_direct_net_id } + fixed_ips: [{"ip_address": {get_param: fe1_SIGNET_vrf_B1_direct_ips}}] + + fe1_port_2: + type: OS::Neutron::Port + properties: + network_id: { get_resource: FE_Clustering_KA } + + fe1_port_3: + type: OS::Neutron::Port + properties: + network_id: { get_resource: Clustering_Network } + + fe1_port_7: + type: OS::Neutron::Port + properties: + network: { get_param: OAM_direct_net_id } + fixed_ips: [{"ip_address": {get_param: fe1_OAM_direct_ips}}] + + server_smp0: + type: OS::Nova::Server + properties: + name: { get_param: vm_smp0_name } + image: { get_param: image_smp_id } +# availability_zone: { get_param: availability_zone_smp0 } + flavor: { get_param: flavor_smp_id } + scheduler_hints: { group: { get_resource: SMP_Affinity } } + networks: + - port: { get_resource: smp0_port_7 } + metadata: + vnf_id: { get_param: vnf_id } + user_data: + str_replace: + template: | + #!/bin/bash + #todo: provision $vm_name + wc_notify --data-binary '{"status": "SUCCESS"}' + params: + $vm_name: {get_param: vm_smp0_name} +# wc_notify: { get_attr: ['smp_wait_handle', 'curl_cli'] } + + smp0_port_7: + type: OS::Neutron::Port + properties: + network: { get_param: OAM_direct_net_id } + fixed_ips: [{"ip_address": {get_param: smp0_OAM_direct_ips}}] + + server_smp1: + type: OS::Nova::Server + properties: + name: { get_param: vm_smp1_name } + image: { get_param: image_smp_id } +# availability_zone: { get_param: availability_zone_smp1 } + flavor: { get_param: flavor_smp_id } + scheduler_hints: { group: { get_resource: SMP_Affinity } } + networks: + - port: { get_resource: smp1_port_7 } + metadata: + vnf_id: { get_param: vnf_id } + user_data: + str_replace: + template: | + #!/bin/bash + #todo: provision $vm_name + wc_notify --data-binary '{"status": "SUCCESS"}' + params: + $vm_name: {get_param: vm_smp1_name} +# wc_notify: { get_attr: ['smp_wait_handle', 'curl_cli'] } + + smp1_port_7: + type: OS::Neutron::Port + properties: + network: { get_param: OAM_direct_net_id } + fixed_ips: [{"ip_address": {get_param: smp1_OAM_direct_ips}}] + + server_db0: + type: OS::Nova::Server +# depends_on: smp_wait_condition + properties: + name: { get_param: vm_db0_name } + image: { get_param: image_db_id } +# availability_zone: { get_param: availability_zone_db0 } + flavor: { get_param: flavor_db_id } + scheduler_hints: { group: { get_resource: DB_Affinity } } + networks: + - port: { get_resource: db0_port_4 } + - port: { get_resource: db0_port_7 } + metadata: + vnf_id: { get_param: vnf_id } + user_data: + str_replace: + template: | + #!/bin/bash + #todo: provision $vm_name + wc_notify --data-binary '{"status": "SUCCESS"}' + params: + $vm_name: {get_param: vm_db0_name} +# wc_notify: { get_attr: ['db_wait_handle', 'curl_cli'] } + + db0_port_4: + type: OS::Neutron::Port + properties: + network_id: { get_resource: DB_Network } + + db0_port_7: + type: OS::Neutron::Port + properties: + network: { get_param: OAM_direct_net_id } + fixed_ips: [{"ip_address": {get_param: db0_OAM_direct_ips}}] + + server_db1: + type: OS::Nova::Server +# depends_on: smp_wait_condition + properties: + name: { get_param: vm_db1_name } + image: { get_param: image_db_id } +# availability_zone: { get_param: availability_zone_db1 } + flavor: { get_param: flavor_db_id } + scheduler_hints: { group: { get_resource: DB_Affinity } } + networks: + - port: { get_resource: db1_port_4 } + - port: { get_resource: db1_port_7 } + metadata: + vnf_id: { get_param: vnf_id } + user_data: + str_replace: + template: | + #!/bin/bash + #todo: provision $vm_name + wc_notify --data-binary '{"status": "SUCCESS"}' + params: + $vm_name: {get_param: vm_db1_name} +# wc_notify: { get_attr: ['db_wait_handle', 'curl_cli'] } + + db1_port_4: + type: OS::Neutron::Port + properties: + network_id: { get_resource: DB_Network } + + db1_port_7: + type: OS::Neutron::Port + properties: + network: { get_param: OAM_direct_net_id } + fixed_ips: [{"ip_address": {get_param: db1_OAM_direct_ips}}]
\ No newline at end of file diff --git a/ui-ci-dev/src/main/resources/Files/InValid_tosca_File .yml b/ui-ci-dev/src/main/resources/Files/InValid_tosca_File .yml new file mode 100644 index 0000000000..4eea0a15ac --- /dev/null +++ b/ui-ci-dev/src/main/resources/Files/InValid_tosca_File .yml @@ -0,0 +1,34 @@ +node_types: + org.openecomp.resource.MyCompute: + derived_from: tosca.nodes.Root + attributes: + private_address: + type: string + public_address: + type: string + networks: + type: map + entry_schema: + type: tosca.datatypes.network.NetworkInfo + ports: + type: map + entry_schema: + type: tosca.datatypes.network.PortInfo + requirements: + - local_storage: + capability: tosca.capabilities.Attachment + node: tosca.nodes.BlockStorage + relationship: tosca.relationships.AttachesTo + occurrences: [0, UNBOUNDED] + capabilities: + host: + type: tosca.capabilities.Container + valid_source_types: [tosca.nodes.SoftwareComponent] + endpoint : + type: tosca.capabilities.Endpoint.Admin + os: + type: tosca.capabilities.OperatingSystem + scalable: + type: tosca.capabilities.Scalable + binding: + type: tosca.capabilities.network.Bindable diff --git a/ui-ci-dev/src/main/resources/Files/JDM_vf.yml b/ui-ci-dev/src/main/resources/Files/JDM_vf.yml new file mode 100644 index 0000000000..5a7edd4aaf --- /dev/null +++ b/ui-ci-dev/src/main/resources/Files/JDM_vf.yml @@ -0,0 +1,57 @@ +tosca_definitions_version: tosca_simple_yaml_1_0_0 + +node_types: + org.openecomp.resource.vf.JDM: + derived_from: tosca.nodes.Root + properties: + att-part-number: + type: string + vendor-name: + type: string + vendor-part-number: + type: string + vendor-model: + type: string + vendor-model-description: + type: string + vcpu-default: + type: integer + vcpu-min: + type: integer + vcpu-max: + type: integer + vmemory-default: + type: integer + vmemory-units: + type: string + default: "GB" + vmemory-min: + type: integer + vmemory-max: + type: integer + vdisk-default: + type: integer + vdisk-units: + type: string + default: "GB" + vdisk-min: + type: integer + vdisk-max: + type: integer + vnf-type: + type: string + software-version: + type: string + software-version-state: + type: integer + software-file-name: + type: string + vnf-feature: + type: string + requirements: + - host: + capability: tosca.capabilities.Container + relationship: tosca.relationships.HostedOn + capabilities: + binding: + type: tosca.capabilities.network.Bindable
\ No newline at end of file diff --git a/ui-ci-dev/src/main/resources/Files/JDM_vfc.yml b/ui-ci-dev/src/main/resources/Files/JDM_vfc.yml new file mode 100644 index 0000000000..b9c9ca0c4a --- /dev/null +++ b/ui-ci-dev/src/main/resources/Files/JDM_vfc.yml @@ -0,0 +1,57 @@ +tosca_definitions_version: tosca_simple_yaml_1_0_0 + +node_types: + org.openecomp.resource.vfc.JDM: + derived_from: tosca.nodes.Root + properties: + att-part-number: + type: string + vendor-name: + type: string + vendor-part-number: + type: string + vendor-model: + type: string + vendor-model-description: + type: string + vcpu-default: + type: integer + vcpu-min: + type: integer + vcpu-max: + type: integer + vmemory-default: + type: integer + vmemory-units: + type: string + default: "GB" + vmemory-min: + type: integer + vmemory-max: + type: integer + vdisk-default: + type: integer + vdisk-units: + type: string + default: "GB" + vdisk-min: + type: integer + vdisk-max: + type: integer + vnf-type: + type: string + software-version: + type: string + software-version-state: + type: integer + software-file-name: + type: string + vnf-feature: + type: string + requirements: + - host: + capability: tosca.capabilities.Container + relationship: tosca.relationships.HostedOn + capabilities: + binding: + type: tosca.capabilities.network.Bindable
\ No newline at end of file diff --git a/ui-ci-dev/src/main/resources/Files/Sample_CSAR.csar b/ui-ci-dev/src/main/resources/Files/Sample_CSAR.csar Binary files differnew file mode 100644 index 0000000000..fe95e79473 --- /dev/null +++ b/ui-ci-dev/src/main/resources/Files/Sample_CSAR.csar diff --git a/ui-ci-dev/src/main/resources/Files/Sample_CSAR2.csar b/ui-ci-dev/src/main/resources/Files/Sample_CSAR2.csar Binary files differnew file mode 100644 index 0000000000..3001fe8222 --- /dev/null +++ b/ui-ci-dev/src/main/resources/Files/Sample_CSAR2.csar diff --git a/ui-ci-dev/src/main/resources/Files/UCPE_VFC.yml b/ui-ci-dev/src/main/resources/Files/UCPE_VFC.yml new file mode 100644 index 0000000000..ef3966b68f --- /dev/null +++ b/ui-ci-dev/src/main/resources/Files/UCPE_VFC.yml @@ -0,0 +1,65 @@ +tosca_definitions_version: tosca_simple_yaml_1_0_0 + +node_types: + org.openecomp.resource.vfc.uCPE: + derived_from: tosca.nodes.Root + properties: + att-ucpe-part-number: + type: string + vendor-name: + type: string + required: true + vendor-model: + type: string + required: true + total-vcpu: + type: integer + description: number of vCPUs + total-memory: + type: integer + description: GB + total-disk: + type: integer + description: GB + base-system-image-file-name: + type: string + linux-host-vendor: + type: string + linux-host-os-version: + type: version + base-system-software: + type: string + jdm-vcpu: + type: integer + jdm-memory: + type: integer + description: GB + jdm-disk: + type: integer + description: GB + jdm-version: + type: string + jcp-vcpu: + type: integer + jcp-memory: + type: integer + description: GB + jcp-disk: + type: integer + description: GB + jcp-version: + type: version + capabilities: + vnf_hosting: + type: tosca.capabilities.Container + description: Provides hosting capability for VNFs + WAN_connectivity: + type: tosca.capabilities.network.Bindable + valid_source_types: [org.openecomp.cp.Wan] + description: external WAN1 n/w interface + occurrences: [1,2] + LAN_connectivity: + type: tosca.capabilities.network.Bindable + valid_source_types: [org.openecomp.cp.Lan] + description: external LAN n/w interface + occurrences: [1,8]
\ No newline at end of file diff --git a/ui-ci-dev/src/main/resources/Files/VF.yml b/ui-ci-dev/src/main/resources/Files/VF.yml new file mode 100644 index 0000000000..ec089900ad --- /dev/null +++ b/ui-ci-dev/src/main/resources/Files/VF.yml @@ -0,0 +1,17 @@ +tosca_definitions_version: tosca_simple_yaml_1_0_0 + +node_types: + org.openecomp.resource.vf.VFF: + derived_from: tosca.nodes.Root + properties: + vendor: + type: string + required: false + vl_name: + type: string + required: false + capabilities: + virtual_linkable: + type: tosca.capabilities.network.Linkable + +
\ No newline at end of file diff --git a/ui-ci-dev/src/main/resources/Files/VFC.yml b/ui-ci-dev/src/main/resources/Files/VFC.yml new file mode 100644 index 0000000000..853ed35374 --- /dev/null +++ b/ui-ci-dev/src/main/resources/Files/VFC.yml @@ -0,0 +1,77 @@ +tosca_definitions_version: tosca_simple_yaml_1_0_0 + +node_types: + org.openecomp.resource.vfc.vRouter: + derived_from: tosca.nodes.Root + properties: + att-part-number: + type: string + vendor-name: + type: string + vendor-part-number: + type: string + vendor-model: + type: string + vendor-model-description: + type: string + vcpu-default: + type: integer + vcpu-min: + type: integer + vcpu-max: + type: integer + vmemory-default: + type: integer + vmemory-units: + type: string + default: "GB" + vmemory-min: + type: integer + vmemory-max: + type: integer + vdisk-default: + type: integer + vdisk-units: + type: string + default: "GB" + vdisk-min: + type: integer + vdisk-max: + type: integer + vnf-type: + type: string + software-version: + type: string + software-version-state: + type: integer + software-file-name: + type: string + vnf-feature: + type: string + management-v6-address: + type: string + nm-lan-v6-address: + type: string + nm-lan-v6-prefix-length: + type: string + management-v4-address: + type: string + nm-lan-v4-address: + type: string + nm-lan-v4-prefix-length: + type: string + routing-instance-name: + type: string + routing-instances: + type: map + entry_schema: + type: string + requirements: + - host: + capability: tosca.capabilities.Container + relationship: tosca.relationships.HostedOn + capabilities: + binding: + type: tosca.capabilities.network.Bindable + occurrences: [1,UNBOUNDED] +
\ No newline at end of file diff --git a/ui-ci-dev/src/main/resources/Files/VFCWithAttributes.yml b/ui-ci-dev/src/main/resources/Files/VFCWithAttributes.yml new file mode 100644 index 0000000000..133fd02cef --- /dev/null +++ b/ui-ci-dev/src/main/resources/Files/VFCWithAttributes.yml @@ -0,0 +1,43 @@ +tosca_definitions_version: tosca_simple_yaml_1_0_0 + +node_types: + org.openecomp.resource.vfc.VFC: + derived_from: tosca.nodes.Root + properties: + jcp-memory: + type: integer + description: GB + jcp-disk: + type: integer + description: GB + jcp-version: + type: version + attributes: + my_attr: + type: integer + private_address: + type: string + public_address: + type: string + networks: + type: map + entry_schema: + type: tosca.datatypes.network.NetworkInfo + ports: + type: map + entry_schema: + type: tosca.datatypes.network.PortInfo + capabilities: + vnf_hosting: + type: tosca.capabilities.Container + description: Provides hosting capability for VNFs + WAN_connectivity: + type: tosca.capabilities.network.Bindable + valid_source_types: [org.openecomp.cp.Wan] + description: external WAN1 n/w interface + occurrences: [1,2] + LAN_connectivity: + type: tosca.capabilities.network.Bindable + valid_source_types: [org.openecomp.cp.Lan] + description: external LAN n/w interface + occurrences: [1,8]
\ No newline at end of file diff --git a/ui-ci-dev/src/main/resources/Files/VL.yml b/ui-ci-dev/src/main/resources/Files/VL.yml new file mode 100644 index 0000000000..74a2405af2 --- /dev/null +++ b/ui-ci-dev/src/main/resources/Files/VL.yml @@ -0,0 +1,17 @@ +tosca_definitions_version: tosca_simple_yaml_1_0_0 + +node_types: + org.openecomp.resource.vl.VL1we23: + derived_from: tosca.nodes.Root + properties: + vendor: + type: string + required: false + vl_name: + type: string + required: false + capabilities: + virtual_linkable: + type: tosca.capabilities.network.Linkable + +
\ No newline at end of file diff --git a/ui-ci-dev/src/main/resources/Files/Valid xml.xml b/ui-ci-dev/src/main/resources/Files/Valid xml.xml new file mode 100644 index 0000000000..0d67e48340 --- /dev/null +++ b/ui-ci-dev/src/main/resources/Files/Valid xml.xml @@ -0,0 +1,4 @@ +<?xml version="1.0" encoding="UTF-8" standalone="no"?> +<dirs> +<entry loc="C:\Program Files\Java\jdk1.7.0_79" stamp="1449076618518"/> +</dirs> diff --git a/ui-ci-dev/src/main/resources/Files/Valid_tosca_Mycompute.yml b/ui-ci-dev/src/main/resources/Files/Valid_tosca_Mycompute.yml new file mode 100644 index 0000000000..8fac5e16a8 --- /dev/null +++ b/ui-ci-dev/src/main/resources/Files/Valid_tosca_Mycompute.yml @@ -0,0 +1,35 @@ +tosca_definitions_version: tosca_simple_yaml_1_0_0 +node_types: + org.openecomp.resource.vf.Database: + derived_from: tosca.nodes.Root + attributes: + private_address: + type: string + public_address: + type: string + networks: + type: map + entry_schema: + type: tosca.datatypes.network.NetworkInfo + ports: + type: map + entry_schema: + type: tosca.datatypes.network.PortInfo + requirements: + - local_storage: + capability: tosca.capabilities.Attachment + node: tosca.nodes.BlockStorage + relationship: tosca.relationships.AttachesTo + occurrences: [0, UNBOUNDED] + capabilities: + host: + type: tosca.capabilities.Container + valid_source_types: [tosca.nodes.SoftwareComponent] + endpoint : + type: tosca.capabilities.Endpoint.Admin + os: + type: tosca.capabilities.OperatingSystem + scalable: + type: tosca.capabilities.Scalable + binding: + type: tosca.capabilities.network.Bindable diff --git a/ui-ci-dev/src/main/resources/Files/Valid_tosca_ReplaceTest.yml b/ui-ci-dev/src/main/resources/Files/Valid_tosca_ReplaceTest.yml new file mode 100644 index 0000000000..90e771dab1 --- /dev/null +++ b/ui-ci-dev/src/main/resources/Files/Valid_tosca_ReplaceTest.yml @@ -0,0 +1,35 @@ +tosca_definitions_version: tosca_simple_yaml_1_0_0 +node_types: + org.openecomp.resource.VF.MyCompute: + derived_from: tosca.nodes.Root + attributes: + private_address: + type: string + public_address: + type: string + networks: + type: map + entry_schema: + type: tosca.datatypes.network.NetworkInfo + ports: + type: map + entry_schema: + type: tosca.datatypes.network.PortInfo + requirements: + - local_storage: + capability: tosca.capabilities.Attachment + node: tosca.nodes.BlockStorage + relationship: tosca.relationships.AttachesTo + occurrences: [0, UNBOUNDED] + capabilities: + host: + type: tosca.capabilities.Container + valid_source_types: [tosca.nodes.SoftwareComponent] + endpoint : + type: tosca.capabilities.Endpoint.Admin + os: + type: tosca.capabilities.OperatingSystem + scalable: + type: tosca.capabilities.Scalable + binding: + type: tosca.capabilities.network.Bindable diff --git a/ui-ci-dev/src/main/resources/Files/hot-nimbus-oam-volumes_v0.3.env b/ui-ci-dev/src/main/resources/Files/hot-nimbus-oam-volumes_v0.3.env new file mode 100644 index 0000000000..b494d8c270 --- /dev/null +++ b/ui-ci-dev/src/main/resources/Files/hot-nimbus-oam-volumes_v0.3.env @@ -0,0 +1,6 @@ +parameters: + pcrf_oam_vol_size: 500 + pcrf_oam_volume_silver-1: Silver + pcrf_oam_volume_silver-2: Silver + pcrf_oam_vol_name_1: sde1-pcrfx01-oam001-vol-1 + pcrf_oam_vol_name_2: sde1-pcrfx01-oam001-vol-2
\ No newline at end of file diff --git a/ui-ci-dev/src/main/resources/Files/hot-nimbus-oam_v0.6.env b/ui-ci-dev/src/main/resources/Files/hot-nimbus-oam_v0.6.env new file mode 100644 index 0000000000..cf7cf710ce --- /dev/null +++ b/ui-ci-dev/src/main/resources/Files/hot-nimbus-oam_v0.6.env @@ -0,0 +1,18 @@ +parameters: + pcrf_oam_server_names: ZRDM1PCRF01OAM001,ZRDM1PCRF01OAM002 + pcrf_oam_image_name: PCRF_8.995-ATTM1.0.3.qcow2 + pcrf_oam_flavor_name: lc.4xlarge4 + availabilityzone_name: nova + pcrf_cps_net_name: int_pcrf_net_0 + pcrf_cps_net_ips: 172.26.16.111,172.26.16.112 + pcrf_arbiter_vip: 172.26.16.115 + pcrf_cps_net_mask: 255.255.255.0 + pcrf_oam_net_name: oam_protected_net_0 + pcrf_oam_net_ips: 107.239.64.117,107.239.64.118 + pcrf_oam_net_gw: 107.239.64.1 + pcrf_oam_net_mask: 255.255.248.0 + pcrf_oam_volume_id_1: a4aa05fb-fcdc-457b-8077-6845fdfc3257 + pcrf_oam_volume_id_2: 93d8fc1f-f1c3-4933-86b2-039881ee910f + pcrf_security_group_name: nimbus_security_group + pcrf_vnf_id: 730797234b4a40aa99335157b02871cd + diff --git a/ui-ci-dev/src/main/resources/Files/hot-nimbus-oam_v0.6.yaml b/ui-ci-dev/src/main/resources/Files/hot-nimbus-oam_v0.6.yaml new file mode 100644 index 0000000000..6636eba210 --- /dev/null +++ b/ui-ci-dev/src/main/resources/Files/hot-nimbus-oam_v0.6.yaml @@ -0,0 +1,108 @@ +heat_template_version: 2013-05-23 + +description: heat template that creates multiple PCRF OAM nodes stack + +parameters: + pcrf_oam_server_names: + type: comma_delimited_list + label: PCRF OAM server names + description: name of the PCRF OAM instance + pcrf_oam_image_name: + type: string + label: PCRF OAM image name + description: PCRF OAM image name + pcrf_oam_flavor_name: + type: string + label: PCRF OAM flavor name + description: flavor name of PCRF OAM instance + availabilityzone_name: + type: string + label: availabilityzone name + description: availabilityzone name + pcrf_cps_net_name: + type: string + label: CPS network name + description: CPS network name + pcrf_cps_net_ips: + type: comma_delimited_list + label: CPS network ips + description: CPS network ips + pcrf_cps_net_mask: + type: string + label: CPS network mask + description: CPS network mask + pcrf_arbiter_vip: + type: string + label: OAM Arbiter LB VIP + description: OAM Arbiter LB VIP + pcrf_oam_net_name: + type: string + label: OAM network name + description: OAM network name + pcrf_oam_net_ips: + type: comma_delimited_list + label: OAM network ips + description: OAM network ips + pcrf_oam_net_gw: + type: string + label: CPS network gateway + description: CPS network gateway + pcrf_oam_net_mask: + type: string + label: CPS network mask + description: CPS network mask + pcrf_oam_volume_id_1: + type: string + label: CPS OAM 001 Cinder Volume + description: CPS OAM 001 Cinder Volumes + pcrf_oam_volume_id_2: + type: string + label: CPS OAM 002 Cinder Volume + description: CPS OAM 002 Cinder Volumes + pcrf_security_group_name: + type: string + label: security group name + description: the name of security group + pcrf_vnf_id: + type: string + label: PCRF VNF Id + description: PCRF VNF Id + +resources: + server_pcrf_oam_001: + type: nested-oam_v0.2.yaml + properties: + pcrf_oam_server_name: { get_param: [pcrf_oam_server_names, 0] } + pcrf_oam_image_name: { get_param: pcrf_oam_image_name } + pcrf_oam_flavor_name: { get_param: pcrf_oam_flavor_name } + availabilityzone_name: { get_param: availabilityzone_name } + pcrf_security_group_name: { get_param: pcrf_security_group_name } + pcrf_oam_volume_id: { get_param: pcrf_oam_volume_id_1 } + pcrf_cps_net_name: { get_param: pcrf_cps_net_name } + pcrf_cps_net_ip: { get_param: [pcrf_cps_net_ips, 0] } + pcrf_cps_net_mask: { get_param: pcrf_cps_net_mask } + pcrf_oam_net_name: { get_param: pcrf_oam_net_name } + pcrf_oam_net_ip: { get_param: [pcrf_oam_net_ips, 0] } + pcrf_oam_net_mask: { get_param: pcrf_oam_net_mask } + pcrf_oam_net_gw: { get_param: pcrf_oam_net_gw } + pcrf_arbiter_vip: { get_param: pcrf_arbiter_vip } + pcrf_vnf_id: {get_param: pcrf_vnf_id} + + server_pcrf_oam_002: + type: nested-oam_v0.2.yaml + properties: + pcrf_oam_server_name: { get_param: [pcrf_oam_server_names, 1] } + pcrf_oam_image_name: { get_param: pcrf_oam_image_name } + pcrf_oam_flavor_name: { get_param: pcrf_oam_flavor_name } + availabilityzone_name: { get_param: availabilityzone_name } + pcrf_security_group_name: { get_param: pcrf_security_group_name } + pcrf_oam_volume_id: { get_param: pcrf_oam_volume_id_2 } + pcrf_cps_net_name: { get_param: pcrf_cps_net_name } + pcrf_cps_net_ip: { get_param: [pcrf_cps_net_ips, 1] } + pcrf_cps_net_mask: { get_param: pcrf_cps_net_mask } + pcrf_oam_net_name: { get_param: pcrf_oam_net_name } + pcrf_oam_net_ip: { get_param: [pcrf_oam_net_ips, 1] } + pcrf_oam_net_mask: { get_param: pcrf_oam_net_mask } + pcrf_oam_net_gw: { get_param: pcrf_oam_net_gw } + pcrf_arbiter_vip: { get_param: pcrf_arbiter_vip } + pcrf_vnf_id: {get_param: pcrf_vnf_id} diff --git a/ui-ci-dev/src/main/resources/Files/hot-nimbus-pcm_v0.6.yaml b/ui-ci-dev/src/main/resources/Files/hot-nimbus-pcm_v0.6.yaml new file mode 100644 index 0000000000..564104174a --- /dev/null +++ b/ui-ci-dev/src/main/resources/Files/hot-nimbus-pcm_v0.6.yaml @@ -0,0 +1,80 @@ +heat_template_version: 2013-05-23 + +description: heat template that creates PCRF Cluman stack + +parameters: + pcrf_pcm_server_names: + type: comma_delimited_list + label: PCRF CM server names + description: name of the PCRF CM instance + pcrf_pcm_image_name: + type: string + label: PCRF CM image name + description: PCRF CM image name + pcrf_pcm_flavor_name: + type: string + label: PCRF CM flavor name + description: flavor name of PCRF CM instance + availabilityzone_name: + type: string + label: availabilityzone name + description: availabilityzone name + pcrf_cps_net_name: + type: string + label: CPS network name + description: CPS network name + pcrf_cps_net_ips: + type: comma_delimited_list + label: CPS network ips + description: CPS network ips + pcrf_cps_net_mask: + type: string + label: CPS network mask + description: CPS network mask + pcrf_oam_net_name: + type: string + label: OAM network name + description: OAM network name + pcrf_oam_net_ips: + type: comma_delimited_list + label: OAM network ips + description: OAM network ips + pcrf_oam_net_gw: + type: string + label: CPS network gateway + description: CPS network gateway + pcrf_oam_net_mask: + type: string + label: CPS network mask + description: CPS network mask + pcrf_pcm_volume_id_1: + type: string + label: CPS Cluman Cinder Volume + description: CPS Cluman Cinder Volume + pcrf_security_group_name: + type: string + label: security group name + description: the name of security group + pcrf_vnf_id: + type: string + label: PCRF VNF Id + description: PCRF VNF Id + +resources: + server_pcrf_pcm_001: + type: nested-pcm_v0.2.yaml + properties: + pcrf_pcm_server_name: { get_param: [pcrf_pcm_server_names, 0] } + pcrf_pcm_image_name: { get_param: pcrf_pcm_image_name } + pcrf_pcm_flavor_name: { get_param: pcrf_pcm_flavor_name } + availabilityzone_name: { get_param: availabilityzone_name } + pcrf_security_group_name: { get_param: pcrf_security_group_name } + pcrf_pcm_volume_id: { get_param: pcrf_pcm_volume_id_1 } + pcrf_cps_net_name: { get_param: pcrf_cps_net_name } + pcrf_cps_net_ip: { get_param: [pcrf_cps_net_ips, 0] } + pcrf_cps_net_mask: { get_param: pcrf_cps_net_mask } + pcrf_oam_net_name: { get_param: pcrf_oam_net_name } + pcrf_oam_net_ip: { get_param: [pcrf_oam_net_ips, 0] } + pcrf_oam_net_mask: { get_param: pcrf_oam_net_mask } + pcrf_oam_net_gw: { get_param: pcrf_oam_net_gw } + pcrf_vnf_id: {get_param: pcrf_vnf_id} diff --git a/ui-ci-dev/src/main/resources/Files/myYang.xml b/ui-ci-dev/src/main/resources/Files/myYang.xml new file mode 100644 index 0000000000..0d86d213e6 --- /dev/null +++ b/ui-ci-dev/src/main/resources/Files/myYang.xml @@ -0,0 +1,8 @@ +<beans xmlns="http://www.springframework.org/schema/beans" + xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" + xsi:schemaLocation="http://www.springframework.org/schema/beans + http://www.springframework.org/schema/beans/spring-beans-2.5.xsd"> + + <bean id="muranoLogic" class="org.openecomp.sdc.impl.MuranoLogic"> </bean> + +</beans>
\ No newline at end of file diff --git a/ui-ci-dev/src/main/resources/Files/mycompute.yml b/ui-ci-dev/src/main/resources/Files/mycompute.yml new file mode 100644 index 0000000000..c8a0c03384 --- /dev/null +++ b/ui-ci-dev/src/main/resources/Files/mycompute.yml @@ -0,0 +1,18 @@ +tosca_definitions_version: tosca_simple_yaml_1_0_0 +node_types: + org.openecomp.resource.vfc.mycompute: + derived_from: tosca.nodes.Compute + capabilities: + scalable: + type: tosca.capabilities.Scalable + properties: + propertyForTest: + type: string + description: test + required: true + default: success + # min_instances property should override property from tosca.capabilities.Scalable + min_instances: + type: integer + default: 3 + diff --git a/ui-ci-dev/src/main/resources/Files/service_with_inputs.csar b/ui-ci-dev/src/main/resources/Files/service_with_inputs.csar Binary files differnew file mode 100644 index 0000000000..c4d4881fec --- /dev/null +++ b/ui-ci-dev/src/main/resources/Files/service_with_inputs.csar diff --git a/ui-ci-dev/src/main/resources/Files/vADTRAN.zip b/ui-ci-dev/src/main/resources/Files/vADTRAN.zip Binary files differnew file mode 100644 index 0000000000..3ecde2c7ec --- /dev/null +++ b/ui-ci-dev/src/main/resources/Files/vADTRAN.zip diff --git a/ui-ci-dev/src/main/resources/Files/vCDN.zip b/ui-ci-dev/src/main/resources/Files/vCDN.zip Binary files differnew file mode 100644 index 0000000000..51e654a841 --- /dev/null +++ b/ui-ci-dev/src/main/resources/Files/vCDN.zip diff --git a/ui-ci-dev/src/main/resources/Files/vFW_VF.yml b/ui-ci-dev/src/main/resources/Files/vFW_VF.yml new file mode 100644 index 0000000000..100883e399 --- /dev/null +++ b/ui-ci-dev/src/main/resources/Files/vFW_VF.yml @@ -0,0 +1,58 @@ +tosca_definitions_version: tosca_simple_yaml_1_0_0 + +node_types: + + org.openecomp.resource.vf.vFW: + derived_from: tosca.nodes.Root + properties: + att-part-number: + type: string + vendor-name: + type: string + vendor-part-number: + type: string + vendor-model: + type: string + vendor-model-description: + type: string + vcpu-default: + type: integer + vcpu-min: + type: integer + vcpu-max: + type: integer + vmemory-default: + type: integer + vmemory-units: + type: string + default: "GB" + vmemory-min: + type: integer + vmemory-max: + type: integer + vdisk-default: + type: integer + vdisk-units: + type: string + default: "GB" + vdisk-min: + type: integer + vdisk-max: + type: integer + vnf-type: + type: string + software-version: + type: version + software-version-state: + type: integer + software-file-name: + type: string + vnf-feature: + type: string + requirements: + - host: + capability: tosca.capabilities.Container + relationship: tosca.relationships.HostedOn + capabilities: + binding: + type: tosca.capabilities.network.Bindable
\ No newline at end of file diff --git a/ui-ci-dev/src/main/resources/Files/vFW_VFC.yml b/ui-ci-dev/src/main/resources/Files/vFW_VFC.yml new file mode 100644 index 0000000000..d0814c43aa --- /dev/null +++ b/ui-ci-dev/src/main/resources/Files/vFW_VFC.yml @@ -0,0 +1,58 @@ +tosca_definitions_version: tosca_simple_yaml_1_0_0 + +node_types: + + org.openecomp.resource.vfc.vFW: + derived_from: tosca.nodes.Root + properties: + att-part-number: + type: string + vendor-name: + type: string + vendor-part-number: + type: string + vendor-model: + type: string + vendor-model-description: + type: string + vcpu-default: + type: integer + vcpu-min: + type: integer + vcpu-max: + type: integer + vmemory-default: + type: integer + vmemory-units: + type: string + default: "GB" + vmemory-min: + type: integer + vmemory-max: + type: integer + vdisk-default: + type: integer + vdisk-units: + type: string + default: "GB" + vdisk-min: + type: integer + vdisk-max: + type: integer + vnf-type: + type: string + software-version: + type: version + software-version-state: + type: integer + software-file-name: + type: string + vnf-feature: + type: string + requirements: + - host: + capability: tosca.capabilities.Container + relationship: tosca.relationships.HostedOn + capabilities: + binding: + type: tosca.capabilities.network.Bindable
\ No newline at end of file diff --git a/ui-ci-dev/src/main/resources/Files/vRouter_vfc.yml b/ui-ci-dev/src/main/resources/Files/vRouter_vfc.yml new file mode 100644 index 0000000000..95ffe959de --- /dev/null +++ b/ui-ci-dev/src/main/resources/Files/vRouter_vfc.yml @@ -0,0 +1,78 @@ +tosca_definitions_version: tosca_simple_yaml_1_0_0 + + +node_types: + org.openecomp.resource.vfc.vRouter: + derived_from: tosca.nodes.Root + properties: + att-part-number: + type: string + vendor-name: + type: string + vendor-part-number: + type: string + vendor-model: + type: string + vendor-model-description: + type: string + vcpu-default: + type: integer + vcpu-min: + type: integer + vcpu-max: + type: integer + vmemory-default: + type: integer + vmemory-units: + type: string + default: "GB" + vmemory-min: + type: integer + vmemory-max: + type: integer + vdisk-default: + type: integer + vdisk-units: + type: string + default: "GB" + vdisk-min: + type: integer + vdisk-max: + type: integer + vnf-type: + type: string + software-version: + type: string + software-version-state: + type: integer + software-file-name: + type: string + vnf-feature: + type: string + management-v6-address: + type: string + nm-lan-v6-address: + type: string + nm-lan-v6-prefix-length: + type: string + management-v4-address: + type: string + nm-lan-v4-address: + type: string + nm-lan-v4-prefix-length: + type: string + routing-instance-name: + type: string + routing-instances: + type: map + entry_schema: + type: string + requirements: + - host: + capability: tosca.capabilities.Container + relationship: tosca.relationships.HostedOn + capabilities: + binding: + type: tosca.capabilities.network.Bindable + occurrences: [1,UNBOUNDED] +
\ No newline at end of file diff --git a/ui-ci-dev/src/main/resources/Files/valid HEAT_ENV files.env b/ui-ci-dev/src/main/resources/Files/valid HEAT_ENV files.env new file mode 100644 index 0000000000..e576c0f67d --- /dev/null +++ b/ui-ci-dev/src/main/resources/Files/valid HEAT_ENV files.env @@ -0,0 +1,54 @@ +parameters: + flavor_scp_be_id: m1.small + flavor_scp_fe_id: m1.small + flavor_smp_id: m1.small + flavor_db_id: m1.small + image_scp_be_id: CCLINUX + image_scp_fe_id: CCLINUX + image_smp_id: CCLINUX + image_db_id: CCLINUX + + int_vscp_fe_cluster_net_id: int_vscp_fe_cluster_net + int_vscp_fe_cluster_cidr: 172.26.2.0/24 + int_vscp_cluster_net_id: int_vscp_cluster_net + int_vscp_cluster_cidr: 172.26.3.0/24 + int_vscp_db_network_net_id: int_vscp_db_network_net + int_vscp_db_network_cidr: 172.26.1.0/24 + + SIGNET_vrf_A1_direct_net_id: SIGNET_vrf_A1_direct_net + SIGNET_vrf_B1_direct_net_id: SIGNET_vrf_B1_direct_net + Cricket_OCS_protected_net_id: Cricket_OCS_protected_net +# OAM_direct_net_id: OAM_net +# OAM_direct_net_id: oam-direct-net + OAM_direct_net_id: Marks_OAM_direct_net + + be0_Cricket_OCS_protected_ips: 107.239.15.17 + be1_Cricket_OCS_protected_ips: 107.239.15.18 + be2_Cricket_OCS_protected_ips: 107.239.15.19 + be3_Cricket_OCS_protected_ips: 107.239.15.20 + be4_Cricket_OCS_protected_ips: 107.239.15.21 + be0_OAM_direct_ips: 10.250.10.33 + be1_OAM_direct_ips: 10.250.10.34 + be2_OAM_direct_ips: 10.250.10.35 + be3_OAM_direct_ips: 10.250.10.36 + be4_OAM_direct_ips: 10.250.10.37 + fe0_SIGNET_vrf_A1_direct_ips: 172.26.4.1 + fe0_OAM_direct_ips: 10.250.10.38 + fe1_SIGNET_vrf_B1_direct_ips: 172.26.4.5 + fe1_OAM_direct_ips: 10.250.10.39 + smp0_OAM_direct_ips: 10.250.10.40 + smp1_OAM_direct_ips: 10.250.10.41 + db0_OAM_direct_ips: 10.250.10.42 + db1_OAM_direct_ips: 10.250.10.43 + + vm_scp_be0_name: vSCP_BE0 + vm_scp_be1_name: vSCP_BE1 + vm_scp_be2_name: vSCP_BE2 + vm_scp_be3_name: vSCP_BE3 + vm_scp_be4_name: vSCP_BE4 + vm_scp_fe0_name: vSCP_FE0 + vm_scp_fe1_name: vSCP_FE1 + vm_smp0_name: vSMP0 + vm_smp1_name: vSMP1 + vm_db0_name: vDB0 + vm_db1_name: vDB1 diff --git a/ui-ci-dev/src/main/resources/Files/validHEATfiles.yaml b/ui-ci-dev/src/main/resources/Files/validHEATfiles.yaml new file mode 100644 index 0000000000..6835485ca1 --- /dev/null +++ b/ui-ci-dev/src/main/resources/Files/validHEATfiles.yaml @@ -0,0 +1,787 @@ +heat_template_version: 2013-05-23 +################################# +# +# Changes in v0.2: +# - Unique availability zone for each VM +# - LAN8 and SLAN networks removed according to latest Prod/Type I diagram +# - 2 DB VMs added +# - Images corrected +# - VM start-up order: SMP->DB->BE->FE (no error handling yet) +# - Provisioning scripts placeholders +# +################################# + +description: ASC Template + +parameters: +# availability_zone_smp0: +# type: string +# default: nova +# availability_zone_smp1: +# type: string +# default: nova +# availability_zone_fe0: +# type: string +# default: nova +# availability_zone_fe1: +# type: string +# default: nova +# availability_zone_db0: +# type: string +# default: nova +# availability_zone_db1: +# type: string +# default: nova +# availability_zone_be0: +# type: string +# default: nova +# availability_zone_be1: +# type: string +# default: nova +# availability_zone_be2: +# type: string +# default: nova +# availability_zone_be3: +# type: string +# default: nova +# availability_zone_be4: +# type: string +# default: nova + + vnf_name: + type: string + description: Unique name for this VNF instance + default: This_is_the_SCP_name + vnf_id: + type: string + description: Unique ID for this VNF instance + default: This_is_ths_SCP_id + + flavor_scp_be_id: + type: string + description: flavor type + default: a1.Small + flavor_scp_fe_id: + type: string + description: flavor type + default: a1.Small + flavor_smp_id: + type: string + description: flavor type + default: a1.Small + flavor_db_id: + type: string + description: flavor type + default: a1.Small + image_scp_be_id: + type: string + description: Image use to boot a server + default: asc_base_image_be + image_scp_fe_id: + type: string + description: Image use to boot a server + default: asc_base_image_fe + image_smp_id: + type: string + description: Image use to boot a server + default: asc_base_image_smp + image_db_id: + type: string + description: Image use to boot a server + default: asc_base_image_db + + int_vscp_fe_cluster_net_id: + type: string + description: LAN2 FE Cluster/KA + int_vscp_fe_cluster_cidr: + type: string + description: Private Network2 Address (CIDR notation) + int_vscp_cluster_net_id: + type: string + description: LAN3 Cluster + int_vscp_cluster_cidr: + type: string + description: Private Network3 Address (CIDR notation) + int_vscp_db_network_net_id: + type: string + description: LAN4 DB + int_vscp_db_network_cidr: + type: string + description: Private Network4 Address (CIDR notation) + SIGNET_vrf_A1_direct_net_id: + type: string + description: Network name for SIGTRAN_A + SIGNET_vrf_B1_direct_net_id: + type: string + description: Network name for SIGTRAN_B + Cricket_OCS_protected_net_id: + type: string + description: Network name for CRICKET_OCS + OAM_direct_net_id: + type: string + description: Network name for OAM + be0_Cricket_OCS_protected_ips: + type: string + label: be0 port 5 OAM ip address + description: be0 port 5 OAM ip address + be1_Cricket_OCS_protected_ips: + type: string + label: be1 port 5 OAM ip address + description: be1 port 5 OAM ip address + be2_Cricket_OCS_protected_ips: + type: string + label: be2 port 5 OAM ip address + description: be2 port 5 OAM ip address + be3_Cricket_OCS_protected_ips: + type: string + label: be3 port 5 OAM ip address + description: be3 port 5 OAM ip address + be4_Cricket_OCS_protected_ips: + type: string + label: be4 port 5 OAM ip address + description: be4 port 5 OAM ip address + be0_OAM_direct_ips: + type: string + label: be0 port 7 OAM ip address + description: be0 port 7 OAM ip address + be1_OAM_direct_ips: + type: string + label: be1 port 7 OAM ip address + description: be1 port 7 OAM ip address + be2_OAM_direct_ips: + type: string + label: be2 port 7 OAM ip address + description: be2 port 7 OAM ip address + be3_OAM_direct_ips: + type: string + label: be3 port 7 OAM ip address + description: be3 port 7 OAM ip address + be4_OAM_direct_ips: + type: string + label: be4 port 7 OAM ip address + description: be4 port 7 OAM ip address + fe0_SIGNET_vrf_A1_direct_ips: + type: string + label: fe0 port 0 SIGTRAN ip address + description: fe0 port 0 SIGTRAN ip address + fe0_OAM_direct_ips: + type: string + label: fe0 port 7 OAM ip address + description: fe0 port 7 OAM ip address + fe1_SIGNET_vrf_B1_direct_ips: + type: string + label: fe1 port 1 SIGTRAN ip address + description: fe1 port 1 SIGTRAN ip address + fe1_OAM_direct_ips: + type: string + label: fe1 port 7 OAM ip address + description: fe1 port 7 OAM ip address + smp0_OAM_direct_ips: + type: string + label: smp0 port 7 OAM ip address + description: smp0 port 7 OAM ip address + smp1_OAM_direct_ips: + type: string + label: smp1 port 7 OAM ip address + description: smp1 port 7 OAM ip address + db0_OAM_direct_ips: + type: string + label: db0 port 7 OAM ip address + description: smp0 port 7 OAM ip address + db1_OAM_direct_ips: + type: string + label: smp1 port 7 OAM ip address + description: db1 port 7 OAM ip address + vm_scp_be0_name: + type: string + default: vSCP_BE0 + description: name of VM + vm_scp_be1_name: + type: string + default: vSCP_BE1 + description: name of VM + vm_scp_be2_name: + type: string + default: vSCP_BE2 + description: name of VM + vm_scp_be3_name: + type: string + default: vSCP_BE3 + description: name of VM + vm_scp_be4_name: + type: string + default: vSCP_BE4 + description: name of VM + vm_scp_fe0_name: + type: string + default: vSCP_FE0 + description: name of VM + vm_scp_fe1_name: + type: string + default: vSCP_FE1 + description: name of VM + vm_smp0_name: + type: string + default: vSMP0 + description: name of VM + vm_smp1_name: + type: string + default: vSMP1 + description: name of VM + vm_db0_name: + type: string + default: vDB0 + description: name of VM + vm_db1_name: + type: string + default: vDB1 + description: name of VM + +resources: +# scp_be_wait_condition: +# type: OS::Heat::WaitCondition +# properties: +# handle: { get_resource: scp_be_wait_handle } +# count: 5 +# timeout: 300 +# scp_be_wait_handle: +# type: OS::Heat::WaitConditionHandle +# +# scp_fe_wait_condition: +# type: OS::Heat::WaitCondition +# properties: +# handle: { get_resource: scp_fe_wait_handle } +# count: 2 +# timeout: 300 +# scp_fe_wait_handle: +# type: OS::Heat::WaitConditionHandle +# +# smp_wait_condition: +# type: OS::Heat::WaitCondition +# properties: +# handle: { get_resource: smp_wait_handle } +# count: 2 +# timeout: 300 +# smp_wait_handle: +# type: OS::Heat::WaitConditionHandle +# +# db_wait_condition: +# type: OS::Heat::WaitCondition +# properties: +# handle: { get_resource: db_wait_handle } +# count: 2 +# timeout: 300 +# db_wait_handle: +# type: OS::Heat::WaitConditionHandle + + FE_Affinity: + type: OS::Nova::ServerGroup + properties: + policies: ["anti-affinity"] + BE_Affinity: + type: OS::Nova::ServerGroup + properties: + policies: ["anti-affinity"] + SMP_Affinity: + type: OS::Nova::ServerGroup + properties: + policies: ["anti-affinity"] + DB_Affinity: + type: OS::Nova::ServerGroup + properties: + policies: ["anti-affinity"] + + FE_Clustering_KA: + type: OS::Contrail::VirtualNetwork + properties: + name: { get_param: int_vscp_fe_cluster_net_id } + + FE_Clustering_subnet: + type: OS::Neutron::Subnet + properties: + network_id: { get_resource: FE_Clustering_KA } + cidr: { get_param: int_vscp_fe_cluster_cidr } + + Clustering_Network: + type: OS::Contrail::VirtualNetwork + properties: + name: { get_param: int_vscp_cluster_net_id } + + Clustering_Network_subnet: + type: OS::Neutron::Subnet + properties: + network_id: { get_resource: Clustering_Network } + cidr: { get_param: int_vscp_cluster_cidr } + + DB_Network: + type: OS::Contrail::VirtualNetwork + properties: + name: { get_param: int_vscp_db_network_net_id } + + DB_Network_subnet: + type: OS::Neutron::Subnet + properties: + network_id: { get_resource: DB_Network } + cidr: { get_param: int_vscp_db_network_cidr } + + server_scp_be0: + type: OS::Nova::Server +# depends on: db_wait_condition + properties: + name: { get_param: vm_scp_be0_name } + image: { get_param: image_scp_be_id } +# availability_zone: { get_param: availability_zone_be0 } + flavor: { get_param: flavor_scp_be_id } + scheduler_hints: { group: { get_resource: BE_Affinity } } + networks: + - port: { get_resource: be0_port_3 } + - port: { get_resource: be0_port_4 } + - port: { get_resource: be0_port_5 } + - port: { get_resource: be0_port_7 } + metadata: + vnf_id: { get_param: vnf_id } + user_data: + str_replace: + template: | + #!/bin/bash + #todo: provision $vm_name + wc_notify --data-binary '{"status": "SUCCESS"}' + params: + $vm_name: {get_param: vm_scp_be0_name} +# wc_notify: { get_attr: ['scp_be_wait_handle', 'curl_cli'] } + + be0_port_3: + type: OS::Neutron::Port + properties: + network_id: { get_resource: Clustering_Network } + + be0_port_4: + type: OS::Neutron::Port + properties: + network_id: { get_resource: DB_Network } + + be0_port_5: + type: OS::Neutron::Port + properties: + network: { get_param: Cricket_OCS_protected_net_id } + fixed_ips: [{"ip_address": {get_param: be0_Cricket_OCS_protected_ips}}] + + be0_port_7: + type: OS::Neutron::Port + properties: + network: { get_param: OAM_direct_net_id } + fixed_ips: [{"ip_address": {get_param: be0_OAM_direct_ips}}] + + server_scp_be1: + type: OS::Nova::Server +# depends on: db_wait_condition + properties: + name: { get_param: vm_scp_be1_name } + image: { get_param: image_scp_be_id } +# availability_zone: { get_param: availability_zone_be1 } + flavor: { get_param: flavor_scp_be_id } + scheduler_hints: { group: { get_resource: BE_Affinity } } + networks: + - port: { get_resource: be1_port_3 } + - port: { get_resource: be1_port_4 } + - port: { get_resource: be1_port_5 } + - port: { get_resource: be1_port_7 } + metadata: + vnf_id: { get_param: vnf_id } + user_data: + str_replace: + template: | + #!/bin/bash + #todo: provision $vm_name + wc_notify --data-binary '{"status": "SUCCESS"}' + params: + $vm_name: {get_param: vm_scp_be1_name} +# wc_notify: { get_attr: ['scp_be_wait_handle', 'curl_cli'] } + + be1_port_3: + type: OS::Neutron::Port + properties: + network_id: { get_resource: Clustering_Network } + + be1_port_4: + type: OS::Neutron::Port + properties: + network_id: { get_resource: DB_Network } + + be1_port_5: + type: OS::Neutron::Port + properties: + network: { get_param: Cricket_OCS_protected_net_id } + fixed_ips: [{"ip_address": {get_param: be1_Cricket_OCS_protected_ips}}] + + be1_port_7: + type: OS::Neutron::Port + properties: + network: { get_param: OAM_direct_net_id } + fixed_ips: [{"ip_address": {get_param: be1_OAM_direct_ips}}] + + server_scp_be2: + type: OS::Nova::Server +# depends on: db_wait_condition + properties: + name: { get_param: vm_scp_be2_name } + image: { get_param: image_scp_be_id } +# availability_zone: { get_param: availability_zone_be2 } + flavor: { get_param: flavor_scp_be_id } + scheduler_hints: { group: { get_resource: BE_Affinity } } + networks: + - port: { get_resource: be2_port_3 } + - port: { get_resource: be2_port_4 } + - port: { get_resource: be2_port_5 } + - port: { get_resource: be2_port_7 } + metadata: + vnf_id: { get_param: vnf_id } + user_data: + str_replace: + template: | + #!/bin/bash + #todo: provision $vm_name + wc_notify --data-binary '{"status": "SUCCESS"}' + params: + $vm_name: {get_param: vm_scp_be2_name} +# wc_notify: { get_attr: ['scp_be_wait_handle', 'curl_cli'] } + + be2_port_3: + type: OS::Neutron::Port + properties: + network_id: { get_resource: Clustering_Network } + + be2_port_4: + type: OS::Neutron::Port + properties: + network_id: { get_resource: DB_Network } + + be2_port_5: + type: OS::Neutron::Port + properties: + network: { get_param: Cricket_OCS_protected_net_id } + fixed_ips: [{"ip_address": {get_param: be2_Cricket_OCS_protected_ips}}] + + be2_port_7: + type: OS::Neutron::Port + properties: + network: { get_param: OAM_direct_net_id } + fixed_ips: [{"ip_address": {get_param: be2_OAM_direct_ips}}] + + server_scp_be3: + type: OS::Nova::Server +# depends on: db_wait_condition + properties: + name: { get_param: vm_scp_be3_name } + image: { get_param: image_scp_be_id } +# availability_zone: { get_param: availability_zone_be3 } + flavor: { get_param: flavor_scp_be_id } + scheduler_hints: { group: { get_resource: BE_Affinity } } + networks: + - port: { get_resource: be3_port_3 } + - port: { get_resource: be3_port_4 } + - port: { get_resource: be3_port_5 } + - port: { get_resource: be3_port_7 } + metadata: + vnf_id: { get_param: vnf_id } + user_data: + str_replace: + template: | + #!/bin/bash + #todo: provision $vm_name + wc_notify --data-binary '{"status": "SUCCESS"}' + params: + $vm_name: {get_param: vm_scp_be3_name} +# wc_notify: { get_attr: ['scp_be_wait_handle', 'curl_cli'] } + + be3_port_3: + type: OS::Neutron::Port + properties: + network_id: { get_resource: Clustering_Network } + + be3_port_4: + type: OS::Neutron::Port + properties: + network_id: { get_resource: DB_Network } + + be3_port_5: + type: OS::Neutron::Port + properties: + network: { get_param: Cricket_OCS_protected_net_id } + fixed_ips: [{"ip_address": {get_param: be3_Cricket_OCS_protected_ips}}] + + be3_port_7: + type: OS::Neutron::Port + properties: + network: { get_param: OAM_direct_net_id } + fixed_ips: [{"ip_address": {get_param: be3_OAM_direct_ips}}] + + server_scp_be4: + type: OS::Nova::Server +# depends on: db_wait_condition + properties: + name: { get_param: vm_scp_be4_name } + image: { get_param: image_scp_be_id } +# availability_zone: { get_param: availability_zone_be4 } + flavor: { get_param: flavor_scp_be_id } + scheduler_hints: { group: { get_resource: BE_Affinity } } + networks: + - port: { get_resource: be4_port_3 } + - port: { get_resource: be4_port_4 } + - port: { get_resource: be4_port_5 } + - port: { get_resource: be4_port_7 } + metadata: + vnf_id: { get_param: vnf_id } + user_data: + str_replace: + template: | + #!/bin/bash + #todo: provision $vm_name + wc_notify --data-binary '{"status": "SUCCESS"}' + params: + $vm_name: {get_param: vm_scp_be4_name} +# wc_notify: { get_attr: ['scp_be_wait_handle', 'curl_cli'] } + + be4_port_3: + type: OS::Neutron::Port + properties: + network_id: { get_resource: Clustering_Network } + + be4_port_4: + type: OS::Neutron::Port + properties: + network_id: { get_resource: DB_Network } + + be4_port_5: + type: OS::Neutron::Port + properties: + network: { get_param: Cricket_OCS_protected_net_id } + fixed_ips: [{"ip_address": {get_param: be4_Cricket_OCS_protected_ips}}] + + be4_port_7: + type: OS::Neutron::Port + properties: + network: { get_param: OAM_direct_net_id } + fixed_ips: [{"ip_address": {get_param: be4_OAM_direct_ips}}] + + server_scp_fe0: + type: OS::Nova::Server +# depends on: scp_be_wait_condition + properties: + name: { get_param: vm_scp_fe0_name } + image: { get_param: image_scp_fe_id } +# availability_zone: { get_param: availability_zone_fe0 } + flavor: { get_param: flavor_scp_fe_id } + scheduler_hints: { group: { get_resource: FE_Affinity } } + networks: + - port: { get_resource: fe0_port_0 } + - port: { get_resource: fe0_port_2 } + - port: { get_resource: fe0_port_3 } + - port: { get_resource: fe0_port_7 } + metadata: + vnf_id: { get_param: vnf_id } + user_data: + str_replace: + template: | + #!/bin/bash + #todo: provision $vm_name + wc_notify --data-binary '{"status": "SUCCESS"}' + params: + $vm_name: {get_param: vm_scp_fe0_name} +# wc_notify: { get_attr: ['scp_fe_wait_handle', 'curl_cli'] } + + fe0_port_0: + type: OS::Neutron::Port + properties: + network: { get_param: SIGNET_vrf_A1_direct_net_id } + fixed_ips: [{"ip_address": {get_param: fe0_SIGNET_vrf_A1_direct_ips}}] + + fe0_port_2: + type: OS::Neutron::Port + properties: + network_id: { get_resource: FE_Clustering_KA } + + fe0_port_3: + type: OS::Neutron::Port + properties: + network_id: { get_resource: Clustering_Network } + + fe0_port_7: + type: OS::Neutron::Port + properties: + network: { get_param: OAM_direct_net_id } + fixed_ips: [{"ip_address": {get_param: fe0_OAM_direct_ips}}] + + server_scp_fe1: + type: OS::Nova::Server +# depends on: scp_be_wait_condition + properties: + name: { get_param: vm_scp_fe1_name } + image: { get_param: image_scp_fe_id } +# availability_zone: { get_param: availability_zone_fe1 } + flavor: { get_param: flavor_scp_fe_id } + scheduler_hints: { group: { get_resource: FE_Affinity } } + networks: + - port: { get_resource: fe1_port_1 } + - port: { get_resource: fe1_port_2 } + - port: { get_resource: fe1_port_3 } + - port: { get_resource: fe1_port_7 } + metadata: + vnf_id: { get_param: vnf_id } + user_data: + str_replace: + template: | + #!/bin/bash + #todo: provision $vm_name + wc_notify --data-binary '{"status": "SUCCESS"}' + params: + $vm_name: {get_param: vm_scp_fe1_name} +# wc_notify: { get_attr: ['scp_fe_wait_handle', 'curl_cli'] } + + fe1_port_1: + type: OS::Neutron::Port + properties: + network: { get_param: SIGNET_vrf_B1_direct_net_id } + fixed_ips: [{"ip_address": {get_param: fe1_SIGNET_vrf_B1_direct_ips}}] + + fe1_port_2: + type: OS::Neutron::Port + properties: + network_id: { get_resource: FE_Clustering_KA } + + fe1_port_3: + type: OS::Neutron::Port + properties: + network_id: { get_resource: Clustering_Network } + + fe1_port_7: + type: OS::Neutron::Port + properties: + network: { get_param: OAM_direct_net_id } + fixed_ips: [{"ip_address": {get_param: fe1_OAM_direct_ips}}] + + server_smp0: + type: OS::Nova::Server + properties: + name: { get_param: vm_smp0_name } + image: { get_param: image_smp_id } +# availability_zone: { get_param: availability_zone_smp0 } + flavor: { get_param: flavor_smp_id } + scheduler_hints: { group: { get_resource: SMP_Affinity } } + networks: + - port: { get_resource: smp0_port_7 } + metadata: + vnf_id: { get_param: vnf_id } + user_data: + str_replace: + template: | + #!/bin/bash + #todo: provision $vm_name + wc_notify --data-binary '{"status": "SUCCESS"}' + params: + $vm_name: {get_param: vm_smp0_name} +# wc_notify: { get_attr: ['smp_wait_handle', 'curl_cli'] } + + smp0_port_7: + type: OS::Neutron::Port + properties: + network: { get_param: OAM_direct_net_id } + fixed_ips: [{"ip_address": {get_param: smp0_OAM_direct_ips}}] + + server_smp1: + type: OS::Nova::Server + properties: + name: { get_param: vm_smp1_name } + image: { get_param: image_smp_id } +# availability_zone: { get_param: availability_zone_smp1 } + flavor: { get_param: flavor_smp_id } + scheduler_hints: { group: { get_resource: SMP_Affinity } } + networks: + - port: { get_resource: smp1_port_7 } + metadata: + vnf_id: { get_param: vnf_id } + user_data: + str_replace: + template: | + #!/bin/bash + #todo: provision $vm_name + wc_notify --data-binary '{"status": "SUCCESS"}' + params: + $vm_name: {get_param: vm_smp1_name} +# wc_notify: { get_attr: ['smp_wait_handle', 'curl_cli'] } + + smp1_port_7: + type: OS::Neutron::Port + properties: + network: { get_param: OAM_direct_net_id } + fixed_ips: [{"ip_address": {get_param: smp1_OAM_direct_ips}}] + + server_db0: + type: OS::Nova::Server +# depends_on: smp_wait_condition + properties: + name: { get_param: vm_db0_name } + image: { get_param: image_db_id } +# availability_zone: { get_param: availability_zone_db0 } + flavor: { get_param: flavor_db_id } + scheduler_hints: { group: { get_resource: DB_Affinity } } + networks: + - port: { get_resource: db0_port_4 } + - port: { get_resource: db0_port_7 } + metadata: + vnf_id: { get_param: vnf_id } + user_data: + str_replace: + template: | + #!/bin/bash + #todo: provision $vm_name + wc_notify --data-binary '{"status": "SUCCESS"}' + params: + $vm_name: {get_param: vm_db0_name} +# wc_notify: { get_attr: ['db_wait_handle', 'curl_cli'] } + + db0_port_4: + type: OS::Neutron::Port + properties: + network_id: { get_resource: DB_Network } + + db0_port_7: + type: OS::Neutron::Port + properties: + network: { get_param: OAM_direct_net_id } + fixed_ips: [{"ip_address": {get_param: db0_OAM_direct_ips}}] + + server_db1: + type: OS::Nova::Server +# depends_on: smp_wait_condition + properties: + name: { get_param: vm_db1_name } + image: { get_param: image_db_id } +# availability_zone: { get_param: availability_zone_db1 } + flavor: { get_param: flavor_db_id } + scheduler_hints: { group: { get_resource: DB_Affinity } } + networks: + - port: { get_resource: db1_port_4 } + - port: { get_resource: db1_port_7 } + metadata: + vnf_id: { get_param: vnf_id } + user_data: + str_replace: + template: | + #!/bin/bash + #todo: provision $vm_name + wc_notify --data-binary '{"status": "SUCCESS"}' + params: + $vm_name: {get_param: vm_db1_name} +# wc_notify: { get_attr: ['db_wait_handle', 'curl_cli'] } + + db1_port_4: + type: OS::Neutron::Port + properties: + network_id: { get_resource: DB_Network } + + db1_port_7: + type: OS::Neutron::Port + properties: + network: { get_param: OAM_direct_net_id } + fixed_ips: [{"ip_address": {get_param: db1_OAM_direct_ips}}]
\ No newline at end of file diff --git a/ui-ci-dev/src/main/resources/Files/valid_vf.csar b/ui-ci-dev/src/main/resources/Files/valid_vf.csar Binary files differnew file mode 100644 index 0000000000..01bf159071 --- /dev/null +++ b/ui-ci-dev/src/main/resources/Files/valid_vf.csar diff --git a/ui-ci-dev/src/main/resources/Files/vf_with_groups.csar b/ui-ci-dev/src/main/resources/Files/vf_with_groups.csar Binary files differnew file mode 100644 index 0000000000..61ea8cee20 --- /dev/null +++ b/ui-ci-dev/src/main/resources/Files/vf_with_groups.csar diff --git a/ui-ci-dev/src/main/resources/Files/yamlSample.yml b/ui-ci-dev/src/main/resources/Files/yamlSample.yml new file mode 100644 index 0000000000..10ccf71d51 --- /dev/null +++ b/ui-ci-dev/src/main/resources/Files/yamlSample.yml @@ -0,0 +1,5 @@ +tosca_definitions_version: tosca_simple_yaml_1_0_0 + +node_types: + org.openecomp.resource.cp.CP: + derived_from: org.openecomp.resource.cp.root
\ No newline at end of file diff --git a/ui-ci-dev/src/main/resources/Files/yamlSample2.yml b/ui-ci-dev/src/main/resources/Files/yamlSample2.yml new file mode 100644 index 0000000000..10ccf71d51 --- /dev/null +++ b/ui-ci-dev/src/main/resources/Files/yamlSample2.yml @@ -0,0 +1,5 @@ +tosca_definitions_version: tosca_simple_yaml_1_0_0 + +node_types: + org.openecomp.resource.cp.CP: + derived_from: org.openecomp.resource.cp.root
\ No newline at end of file diff --git a/ui-ci-dev/src/main/resources/ci/conf/credentials.yaml b/ui-ci-dev/src/main/resources/ci/conf/credentials.yaml new file mode 100644 index 0000000000..63a4280264 --- /dev/null +++ b/ui-ci-dev/src/main/resources/ci/conf/credentials.yaml @@ -0,0 +1,48 @@ + designer: { + username: m99121, + password: 66-Percent, + firstname: ASDC, + lastname: KASPIN + } + admin: { + username: m99122, + password: 98-Degrees, + firstname: ASDC, + lastname: KASPIN + } + ops: { + username: m99123, + password: 17-Diameter, + firstname: ASDC, + lastname: KASPIN + } + tester: { + username: m99124, + password: 802-NotaGroup, + firstname: ASDC, + lastname: KASPIN + } + governor: { + username: m99125, + password: 142-Officiant, + firstname: ASDC, + lastname: KASPIN + } + product_strategist: { + username: m99126, + password: 1910-FruitGum, + firstname: ASDC, + lastname: KASPIN + } + product_manager: { + username: m99127, + password: 747-Airplane, + firstname: ASDC, + lastname: KASPIN + } + product_local: { + username: pm0001, + password: 123123a, + firstname: ASDC, + lastname: KASPIN + }
\ No newline at end of file diff --git a/ui-ci-dev/src/main/resources/ci/conf/log4j.properties b/ui-ci-dev/src/main/resources/ci/conf/log4j.properties new file mode 100644 index 0000000000..3e159ec8df --- /dev/null +++ b/ui-ci-dev/src/main/resources/ci/conf/log4j.properties @@ -0,0 +1,34 @@ +# Define the root logger with appender file +log4j.rootLogger = DEBUG, FILE, stdout + +# Define the file appender +log4j.appender.FILE=org.apache.log4j.RollingFileAppender +log4j.appender.FILE.File=${targetlog}logs/ci-log.out + +# Define the layout for file appender +log4j.appender.FILE.layout=org.apache.log4j.PatternLayout +log4j.appender.FILE.layout.conversionPattern=%d{yyyy-MM-dd HH:mm:ss} %5p [%10c] : %m%n + +# Set the maximum file size before rollover +log4j.appender.FILE.maxFileSize=5MB + +# Set the the backup index +log4j.appender.FILE.maxBackupIndex=10 + + +############################################################# + +# Direct log messages to stdout +log4j.appender.stdout=org.apache.log4j.ConsoleAppender +log4j.appender.stdout.Target=System.out +log4j.appender.stdout.layout=org.apache.log4j.PatternLayout +#log4j.appender.stdout.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss} %-5p %c{1}:%L - %m%n +log4j.appender.stdout.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss} %5p %10c:%L - %m%n + +log4j.logger.org.apache.cassandra.service.StorageProxy=DEBUG +log4j.logger.com.thinkaurelius.titan.diskstorage.cassandra.CassandraTransaction=INFO, FILE, stdout + +log4j.logger.org.openecomp.sdc.ci.tests.utils=TRACE, FILE, stdout +log4j.additivity.org.openecomp.sdc.ci.tests.utils=false + + diff --git a/ui-ci-dev/src/main/resources/ci/conf/sdc-packages.yaml b/ui-ci-dev/src/main/resources/ci/conf/sdc-packages.yaml new file mode 100644 index 0000000000..dcb78eefc1 --- /dev/null +++ b/ui-ci-dev/src/main/resources/ci/conf/sdc-packages.yaml @@ -0,0 +1,2 @@ +packages: + - org.openecomp.sdc.ci.tests.execute.resourceui
\ No newline at end of file diff --git a/ui-ci-dev/src/main/resources/ci/conf/sdc.yaml b/ui-ci-dev/src/main/resources/ci/conf/sdc.yaml new file mode 100644 index 0000000000..bdca2ab972 --- /dev/null +++ b/ui-ci-dev/src/main/resources/ci/conf/sdc.yaml @@ -0,0 +1,80 @@ +outputFolder: target +reportName: index.html +catalogBeHost: behost +catalogFeHost: fehost +esHost: eshost +disributionClientHost: disClient +catalogFePort: 8181 +catalogBePort: 8080 +disributionClientPort: 8181 +esPort: 9200 +neoHost: neoHost +neoPort: 7474 +neoDBusername: neo4j +neoDBpassword: 123456 +#url: http://localhost:9000/#/dashboard +url: http://localhost:8181/sdc1/proxy-designer1#/dashboard + +webSealSimulatorUrl: http://localhost:8285/sdc1 +remoteTestingMachineIP: 1.2.3.4 +remoteTestingMachinePort: 5555 +remoteTesting: false + +resourceConfigDir: src/test/resources/CI/tests +componentsConfigDir: src/test/resources/CI/components +importResourceConfigDir: ../catalog-be/src/main/resources/import/tosca/capability-types +importResourceTestsConfigDir: src/test/resources/CI/importResourceTests +errorConfigurationFile: ../catalog-be/src/main/resources/config/error-configuration.yaml +configurationFile: ../catalog-be/src/main/resources/config/configuration.yaml +importTypesConfigDir: src/test/resources/CI/importTypesTest + + +titanPropertiesFile: src/main/resources/ci/conf/titan.properties +cassandraHost: 127.0.0.1 +cassandraAuthenticate: false +cassandraUsername: koko +cassandraPassword: bobo +cassandraSsl: false +cassandraTruststorePath : /path/path +cassandraTruststorePassword : 123123 +cassandraAuditKeySpace: sdcAudit +cassandraArtifactKeySpace: sdcArtifact + +stopOnClassFailure: false + +#List of non-abstract resources to keep during titan cleanup between tests +#Only 1.0 version will be kept +resourcesNotToDelete: + - Compute + - Database + - ObjectStorage + - BlockStorage + - LoadBalancer + - Port + - Network + - Root + - ContainerApplication + - ContainerRuntime + - DBMS + - SoftwareComponent + - WebApplication + - WebServer + - CinderVolume + - ContrailVirtualNetwork + - NeutronNet + - NeutronPort + - NovaServer +#Resource categories to keep (including all their subcategories) +resourceCategoriesNotToDelete: + - Generic + - Network L2-3 + - Network L4+ + - Application L4+ + - Network Connectivity + +#Service categories to keep +serviceCategoriesNotToDelete: + - Mobility + - Network L1-3 + - Network L4 + - VoIP Call Control
\ No newline at end of file diff --git a/ui-ci-dev/src/main/resources/ci/conf/titan.properties b/ui-ci-dev/src/main/resources/ci/conf/titan.properties new file mode 100644 index 0000000000..94d12cfba0 --- /dev/null +++ b/ui-ci-dev/src/main/resources/ci/conf/titan.properties @@ -0,0 +1,7 @@ +storage.backend=cassandra +storage.hostname=cassandrahost +storage.port=9160 + +cache.db-cache-clean-wait = 20 +cache.db-cache-time = 180000 +cache.db-cache-size = 0.5
\ No newline at end of file diff --git a/ui-ci-dev/src/main/resources/ci/scripts/startTest.sh b/ui-ci-dev/src/main/resources/ci/scripts/startTest.sh new file mode 100644 index 0000000000..27933699a5 --- /dev/null +++ b/ui-ci-dev/src/main/resources/ci/scripts/startTest.sh @@ -0,0 +1,123 @@ +#!/bin/bash + +TOMCAT_DIR=/home/apache-tomcat-7.0.41/webapps/sdc-ci + +function usage { + echo "Usage: $0 <jar file>" +} + +function exitOnError() { + if [ $1 -ne 0 ] + then + echo "Failed running task $2" + exit 2 + fi +} + +if [ $# -lt 1 ] +then + usage + exit 2 +fi + +CURRENT_DIR=`pwd` +BASEDIR=$(dirname $0) + +if [ ${BASEDIR:0:1} = "/" ] +then + FULL_PATH=$BASEDIR +else + FULL_PATH=$CURRENT_DIR/$BASEDIR +fi + +LOGS_PROP_FILE=file:${FULL_PATH}/../conf/log4j.properties +############################################# +TARGET_DIR=${FULL_PATH}/../target +TARGET_LOG_DIR="${TARGET_DIR}/" +CONF_FILE=${FULL_PATH}/../conf/sdc.yaml +DEBUG=true +MainClass=org.openecomp.sdc.ci.tests.run.StartTest + +JAR_FILE=$1 + +#TARGET_DIR=`echo ${TARGET_DIR} | sed 's/\//\//g'` +#echo $TARGET_DIR + +TESTS_DIR=/opt/app/sdc/ci/resources/tests +COMPONENTS_DIR=/opt/app/sdc/ci/resources/components + + +sed -i 's#\(outputFolder:\).*#\1 '${TARGET_DIR}'#g' $CONF_FILE +sed -i 's#\(resourceConfigDir:\).*#\1 '${TESTS_DIR}'#g' $CONF_FILE +sed -i 's#\(componentsConfigDir:\).*#\1 '${COMPONENTS_DIR}'#g' $CONF_FILE + + + +mkdir -p ${TARGET_DIR} +if [ -d ${TARGET_DIR} ] +then + rm -rf ${TARGET_DIR}/* + exitOnError $? "Failed_to_delete_target_dir" +fi + + +debug_port=8800 +#JAVA_OPTION="-javaagent:/var/tmp/jacoco/lib/jacocoagent.jar=destfile=jacoco-it.exec" +JAVA_OPTION="" +case "$2" in + -debug) echo "Debug mode, Listen on port $debug_port"; JAVA_OPTION="-Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=y,address=${debug_port}" ;; + "") echo "Standard mode";; + *) echo "USAGE: startTest.sh [-debug]";; +esac + +#cmd="java $JAVA_OPTION -Dconfig.resource=attodlit.conf -Dlog4j.configuration=file:./conf/log4j.properties -cp #att-odl-it_0.0.1-SNAPSHOT-jar-with-dependencies.jar org.openecomp.d2.it.StartTest" + +#cmd="java $JAVA_OPTION -Dconfig.resource=attsdc.conf -Ddebug=true -Dlog4j.configuration=file:./conf/log4j.properties -cp uber-ci-1.0.0-SNAPSHOT.jar org.openecomp.sdc.ci.tests.run.StartTest" + + +cmd="java $JAVA_OPTION -DdisplayException=true -Dtargetlog=${TARGET_LOG_DIR} -Dconfig.resource=${CONF_FILE} -Ddebug=${DEBUG} -Dlog4j.configuration=${LOGS_PROP_FILE} -cp $JAR_FILE ${MainClass}" + +#echo $cmd +#console=`$cmd` + + + +if [ $DEBUG == "true" ] +then + $cmd +else + $cmd >> /dev/null +fi +status=`echo $?` + +#echo "console=$console" +#echo "status=$status" +#tomcat=`ps -ef | grep tomcat | grep java | wc -l` + +#if [ $tomcat == 0 ]; then +# echo "Bring tomcat up" +# apache-tomcat-7.0.41/bin/startup.sh +#fi + +#`rm -rf ./html/*.html` +#`mv *.html ./html/` + + +if [ -d ${TOMCAT_DIR} ] +then + + cp ${TARGET_DIR}/*.html ${TOMCAT_DIR} + mv ${TOMCAT_DIR}/SDC-testReport.html ${TOMCAT_DIR}/index.html +fi + +#echo "tomcat=$tomcat" +#ip=`ifconfig | sed -En 's/127.0.0.1//;s/.*inet (addr:)?(([0-9]*\.){3}[0-9]*).*/\2/p' | grep 172.20` + +#echo "Report url: http://$ip:8090/att-odl-it/" + +echo "##################################################" +echo "################# status is $status " +echo "##################################################" + +exit $status + diff --git a/ui-ci-dev/src/main/resources/ci/testSuites/fullTests.xml b/ui-ci-dev/src/main/resources/ci/testSuites/fullTests.xml new file mode 100644 index 0000000000..9f912e58b8 --- /dev/null +++ b/ui-ci-dev/src/main/resources/ci/testSuites/fullTests.xml @@ -0,0 +1,26 @@ +<?xml version="1.0" encoding="UTF-8"?> +<!DOCTYPE suite SYSTEM "http://testng.org/testng-1.0.dtd"> +<suite name="uiFullTests" configfailurepolicy="continue" verbose="2"> + <parameter name="clean-type" value="FULL" /> <!--Valid Values are: PARTIAL, FULL, NONE --> + <test name="Vfc Tests"> + <classes> + <class name="org.openecomp.sdc.uici.tests.execute.vfc.VfcBasicTests"></class> + </classes> + </test> + + <test name="VF Tests"> + <classes> + <class name="org.openecomp.sdc.uici.tests.execute.vf.VfBasicTests" /> + <class name="org.openecomp.sdc.uici.tests.execute.vf.VfCanvasTests" /> + <class name="org.openecomp.sdc.uici.tests.execute.vf.VfOnboardingTests" /> + <class name="org.openecomp.sdc.uici.tests.execute.vf.VfDeploymentTests"/> + </classes> + </test> + + <test name="Service Tests"> + <classes> + <class name="org.openecomp.sdc.uici.tests.execute.service.ServiceBasicTests"></class> + <class name="org.openecomp.sdc.uici.tests.execute.service.ServiceInputsTests"></class> + </classes> + </test> +</suite> diff --git a/ui-ci-dev/src/main/resources/ci/testSuites/sanity.xml b/ui-ci-dev/src/main/resources/ci/testSuites/sanity.xml new file mode 100644 index 0000000000..99e8765038 --- /dev/null +++ b/ui-ci-dev/src/main/resources/ci/testSuites/sanity.xml @@ -0,0 +1,55 @@ +<?xml version="1.0" encoding="UTF-8"?> +<!DOCTYPE suite SYSTEM "http://testng.org/testng-1.0.dtd"> +<suite name="uiSanity" configfailurepolicy="continue" verbose="2"> + <parameter name="clean-type" value="FULL" /> <!--Valid Values are: PARTIAL, FULL, NONE --> + <test name="Vfc Tests"> + <classes> + <class name="org.openecomp.sdc.uici.tests.execute.vfc.VfcBasicTests"> + <methods> + <include name="testImportVfc" /> + <include name="testUpdateTypeForAttributeOfVfc" /> + </methods> + </class> + </classes> + </test> + + <test name="VF Tests"> + <classes> + <class name="org.openecomp.sdc.uici.tests.execute.vf.VfBasicTests"> + <methods> + <include name="testImportVf" /> + <include name="testUpdateInstanceAttributeValue" /> + <include name="testVfCertification" /> + </methods> + </class> + <class name="org.openecomp.sdc.uici.tests.execute.vf.VfCanvasTests"> + <methods> + <include name="testCanvasVFSanity" /> + </methods> + </class> + <class name="org.openecomp.sdc.uici.tests.execute.vf.VfDeploymentTests"> + <methods> + <include name="testUpdateModuleNameSanity" /> + <include name="testTabsViewSanity"/> + </methods> + </class> + </classes> + </test> + + <test name="Service Tests"> + <classes> + <class name="org.openecomp.sdc.uici.tests.execute.service.ServiceBasicTests"> + <methods> + <include name="testLinkTwoRI" /> + <include name="testBuildServiceForDistribution" /> + </methods> + </class> + <class name="org.openecomp.sdc.uici.tests.execute.service.ServiceInputsTests"> + <methods> + <include name="testInputsSanity" /> + </methods> + </class> + </classes> + </test> + +</suite> diff --git a/ui-ci-dev/src/main/resources/images/gizmorambo.jpg b/ui-ci-dev/src/main/resources/images/gizmorambo.jpg Binary files differnew file mode 100644 index 0000000000..c9a8fe8a64 --- /dev/null +++ b/ui-ci-dev/src/main/resources/images/gizmorambo.jpg |