summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--README.md126
-rw-r--r--heat/ONAP/cloud-config/.env17
-rw-r--r--heat/ONAP/cloud-config/docker-compose.yml41
-rw-r--r--heat/ONAP/cloud-config/sdc_install.sh13
-rw-r--r--heat/ONAP/cloud-config/sdc_wfd_vm_init.sh18
-rw-r--r--heat/ONAP/onap_openstack.yaml3
-rw-r--r--heat/vFW/base_vfw.env3
-rw-r--r--heat/vFW/base_vfw.yaml6
-rw-r--r--heat/vFW_HPA/vFW/base_vfw.env3
-rw-r--r--heat/vFW_HPA/vFW/base_vfw.yaml13
-rw-r--r--heat/vFW_HPA/vFWCL/vFWSNK/base_vfw.yaml5
-rw-r--r--heat/vFW_HPA/vFWCL/vPKG/base_vpkg.yaml2
-rw-r--r--tosca/vCPE/Artifacts/Deployment/OTHER/authorized_keys (renamed from tosca/vCPE/Artifacts/Deployment/Other/authorized_keys)0
-rw-r--r--tosca/vCPE/Artifacts/Deployment/OTHER/id_rsa (renamed from tosca/vCPE/Artifacts/Deployment/Other/id_rsa)0
-rw-r--r--tosca/vCPE/Artifacts/Deployment/OTHER/id_rsa.pub (renamed from tosca/vCPE/Artifacts/Deployment/Other/id_rsa.pub)0
-rw-r--r--tosca/vCPE/Artifacts/Deployment/OTHER/image (renamed from tosca/vCPE/Artifacts/Deployment/Other/image)0
-rw-r--r--tosca/vCPE/infra/MainServiceTemplate.yaml20
-rw-r--r--tosca/vCPE/infra/MainServiceTemplate_sriov.yaml20
-rw-r--r--tosca/vCPE/vbng/MainServiceTemplate.yaml8
-rw-r--r--tosca/vCPE/vbng/MainServiceTemplate_sriov.yaml8
-rw-r--r--tosca/vCPE/vbrgemu/MainServiceTemplate.yaml4
-rw-r--r--tosca/vCPE/vbrgemu/MainServiceTemplate_sriov.yaml2
-rw-r--r--tosca/vCPE/vgmux/MainServiceTemplate.yaml8
-rw-r--r--tosca/vCPE/vgmux/MainServiceTemplate_sriov.yaml8
-rw-r--r--tosca/vCPE/vgw/MainServiceTemplate.yaml12
-rw-r--r--tosca/vCPE/vgw/MainServiceTemplate_sriov.yaml12
26 files changed, 215 insertions, 137 deletions
diff --git a/README.md b/README.md
index 945e3d34..9049995c 100644
--- a/README.md
+++ b/README.md
@@ -9,26 +9,30 @@ The Demo repository contains the HEAT templates and scripts for the instantiatio
- pom.xml: POM file used to build the software hosted in this repository.
- - version.properties: current version number of the Demo repository. Format: MAJOR.MINOR.PATCH (e.g. 1.1.0)
+ - version.properties: current version number of the Demo repository. Format: MAJOR.MINOR.PATCH (e.g. 1.3.0)
- - The "boot" directory contains the scripts that install and configure ONAP:
+ - The "boot" and "heat/ONAP/cloud-config" directories contain the scripts that install and configure ONAP. This separation is due to size limits imposed by importing files into a VM for cloud-init. The two directories contain:
- install.sh: sets up the host VM for specific components. This script runs only once, soon after the VM is created.
- vm\_init.sh: contains component-specific configuration, downloads and runs docker containers. For some components, this script may either call a component-specific script (cloned from Gerrit repository) or call docker-compose.
- serv.sh: it is installed in /etc/init.d, calls vm\_init.sh at each VM (re)boot.
- configuration files for the Bind DNS Server installed with ONAP. Currently, both simpledemo.openecomp.org and simpledemo.onap.org domains are supported.
- sdc\_ext\_volume_partitions.txt: file that contains external volume partitions for SDC.
+
+ - The "boot" directory also contains a "robot" sub-directory that includes scripts to run Robot from the VM.
- - The "docker\_update\_scripts" directory contains scripts that update all the docker containers of an ONAP instance.
+ - The "docker\_update\_scripts" directory contains scripts that update all the docker containers of an ONAP instance (NOT UPDATED SINCE AMSTERDAM RELEASE).
- The "heat" directory contains the following sub-directories:
+
+ - OAM-Network: contains the Heat files for creating the ONAP private management network. This is required only if that network is needed out of the Heat stack.
- - ONAP: contains the HEAT files for the installation of the ONAP platform. NOTE: onap\_openstack.yaml AND onap\_openstack.env ARE THE HEAT TEMPLATE AND ENVIRONMENT FILE CURRENTLY SUPPORTED. onap\_openstack\_float.yaml, onap\_openstack\_float.env, onap\_openstack\_nofloat.yaml, onap\_openstack\_nofloat.env AND onap\_rackspace.yaml, onap\_rackspace.env AREN'T UPDATED AND THEIR USAGE IS DEPRECATED.
+ - ONAP: contains the HEAT files for the installation of the ONAP platform.
- vCPE: contains sub-directories with HEAT templates for the installation of vCPE Infrastructure (Radius Server, DHCP, DNS, Web Server), vBNG, vBRG Emulator, vGMUX, and vGW.
- - vFW: contains the HEAT template for the instantiation of the vFirewall VNF (base\_vfw.yaml) and the environment file (base\_vfw.env) For Amsterdam release, this template is used for testing and demonstrating VNF instantiation only (no closed-loop).
+ - vFW: contains the HEAT template for the instantiation of the vFirewall VNF (base\_vfw.yaml) and the environment file (base\_vfw.env). This template is used for testing and demonstrating VNF instantiation only (no closed-loop).
- - vFWCL: contains two sub-directories, one that hosts the HEAT template for the vFirewall and vSink (vFWSNK/base\_vfw.yaml), and one that hosts the HEAT template for the vPacketGenerator (vPKG/base\_vpkg.yaml). For Amsterdam release, these templates are used for testing and demonstrating VNF instantiation and closed-loop.
+ - vFWCL: contains two sub-directories, one that hosts the HEAT template for the vFirewall and vSink (vFWSNK/base\_vfw.yaml), and one that hosts the HEAT template for the vPacketGenerator (vPKG/base\_vpkg.yaml). These templates are used for testing and demonstrating VNF instantiation and closed-loop.
- vLB: contains the HEAT template for the instantiation of the vPacketGenerator/vLoadBalancer/vDNS VNF (base\_vlb.yaml) and the environment file (base\_vlb.env). The directory also contains the HEAT template for the DNS scaling-up scenario (dnsscaling.yaml) with its environment file (dnsscaling.env).
@@ -40,7 +44,7 @@ The Demo repository contains the HEAT templates and scripts for the instantiatio
- The "tutorials" directory contains tutorials for Clearwater\_IMS and for creating a Netconf mount point in APPC. The "VoLTE" sub-directory is currently not used.
- - The "vagrant" directory contains the scripts that install ONAP using Vagrant.
+ - The "vagrant" directory contains the scripts that install ONAP using Vagrant (NOT UPDATED SINCE AMSTERDAM RELEASE).
- The "vnfs" directory: contains the following directories:
@@ -54,17 +58,17 @@ The Demo repository contains the HEAT templates and scripts for the instantiatio
- VESreporting_vLB: VES client for vLoadBalancer/vDNS demo application. (DEPRECATED SINCE AMSTERDAM RELEASE)
- - VES5.0: source code of the ONAP Vendor Event Listener (VES) Library, version 5.0. (SUPPORTED FOR AMSTERDAM AND BEIJING RELEASES)
+ - VES5.0: source code of the ONAP Vendor Event Listener (VES) Library, version 5.0. (CURRENTLY SUPPORTED)
- - VESreporting_vFW5.0: VES v5.0 client for vFirewall demo application. (SUPPORTED FOR AMSTERDAM AND BEIJING RELEASES)
+ - VESreporting_vFW5.0: VES v5.0 client for vFirewall demo application. (CURRENTLY SUPPORTED)
- - VESreporting_vLB5.0: VES v5.0 client for vLoadBalancer/vDNS demo application. (SUPPORTED FOR AMSTERDAM AND BEIJING RELEASES)
+ - VESreporting_vLB5.0: VES v5.0 client for vLoadBalancer/vDNS demo application. (CURRENTLY SUPPORTED)
- vFW: scripts that download, install and run packages for the vFirewall use case.
- vLB: scripts that download, install and run packages for the vLoadBalancer/vDNS use case.
- - vLBMS: scripts that download, install and run packages for the vLoadBalancer/vDNS used for Manual Scale Out use case.
+ - vLBMS: scripts that download, install and run packages for the vLoadBalancer/vDNS used for the Scale Out use case.
ONAP Installation in OpenStack Clouds via HEAT Template
@@ -72,8 +76,6 @@ ONAP Installation in OpenStack Clouds via HEAT Template
The ONAP HEAT template spins up the entire ONAP platform in OpenStack-based clouds. The template, onap\_openstack.yaml, comes with an environment file, onap\_openstack.env, in which all the default values are defined.
-NOTE: onap\_openstack.yaml AND onap\_openstack.env ARE THE HEAT TEMPLATE AND ENVIRONMENT FILE CURRENTLY SUPPORTED. onap\_openstack\_float.yaml, onap\_openstack\_float.env, onap\_openstack\_nofloat.yaml, onap\_openstack\_nofloat.env AND onap\_rackspace.yaml, onap\_rackspace.env AREN'T UPDATED AND THEIR USAGE IS DEPRECATED. As such, the following description refers to onap\_openstack.yaml and onap\_openstack.env.
-
The HEAT template is composed of two sections: (i) parameters, and (ii) resources.
- The "parameters" section contains the declarations and descriptions of the parameters that will be used to spin up ONAP, such as public network identifier, URLs of code and artifacts repositories, etc. The default values of these parameters can be found in the environment file.
@@ -147,28 +149,14 @@ Some network parameters must be configured:
dns_forwarder: PUT THE IP OF DNS FORWARDER FOR ONAP DEPLOYMENT'S OWN DNS SERVER
oam_network_cidr: 10.0.0.0/16
-ONAP installs a DNS server used to resolve IP addresses in the ONAP OAM private network. Unlike Amsterdam Release, ONAP Beijing does not requires OpenStack Designate DNS support for the DCAE platform. For Beijing Release, in fact, all the DCAE containers are installed in a single VM that has access to the OAM network. Originally, dns\_list and external\_dns were both used to circumvent some limitations of older OpenStack versions. In future releases, the DNS settings and parameters in HEAT will be consolidated.
-
-Due to the new DCAE installation methodology, the following parameters are deprecated and no longer needed for DCAE instantiation:
-
- dcae_keystone_url: PUT THE MULTIVIM PROVIDED KEYSTONE API URL HERE
- dcae_centos_7_image: PUT THE CENTOS7 VM IMAGE NAME HERE FOR DCAE LAUNCHED CENTOS7 VM
- dcae_domain: PUT THE NAME OF DOMAIN THAT DCAE VMS REGISTER UNDER
- dcae_public_key: PUT THE PUBLIC KEY OF A KEYPAIR HERE TO BE USED BETWEEN DCAE LAUNCHED VMS
- dcae_private_key: PUT THE SECRET KEY OF A KEYPAIR HERE TO BE USED BETWEEN DCAE LAUNCHED VMS
- dnsaas_config_enabled: PUT WHETHER TO USE PROXYED DESIGNATE
- dnsaas_region: PUT THE DESIGNATE PROVIDING OPENSTACK'S REGION HERE
- dnsaas_keystone_url: PUT THE DESIGNATE PROVIDING OPENSTACK'S KEYSTONE URL HERE
- dnsaas_tenant_name: PUT THE TENANT NAME IN THE DESIGNATE PROVIDING OPENSTACK HERE (FOR R1 USE THE SAME AS openstack_tenant_name)
- dnsaas_username: PUT THE DESIGNATE PROVIDING OPENSTACK'S USERNAME HERE
- dnsaas_password: PUT THE DESIGNATE PROVIDING OPENSTACK'S PASSWORD HERE
+ONAP installs a DNS server used to resolve IP addresses in the ONAP OAM private network. Originally, dns\_list and external\_dns were both used to circumvent some limitations of older OpenStack versions.
-For Beijing Release, DCAE requires a new parameter called dcae\_deployment\_profile. It accepts one of the following values:
- - R2MVP: Installs only the basic DCAE functionalities that will support the vFW/vDNS, vCPE and vVoLTE use cases;
- - R2: Full DCAE installation;
- - R2PLUS: This profile deploys the DCAE R2 stretch goal service components.
+DCAE requires a parameter called dcae\_deployment\_profile. It accepts one of the following values:
+ - R3MVP: Installs only the basic DCAE functionalities that will support the vFW/vDNS, vCPE and vVoLTE use cases;
+ - R3: Full DCAE installation;
+ - R3PLUS: This profile deploys the DCAE R3 stretch goal service components.
-The recommended DCAE profile for Beijing Release is R2. For more information about DCAE deployment with HEAT, please refer to the ONAP documentation: https://onap.readthedocs.io/en/latest/submodules/dcaegen2.git/docs/sections/installation_heat.html
+The recommended DCAE profile for Casablanca Release is R3. For more information about DCAE deployment with HEAT, please refer to the ONAP documentation: https://onap.readthedocs.io/en/latest/submodules/dcaegen2.git/docs/sections/installation_heat.html
The ONAP platform can be instantiated via Horizon (OpenStack dashboard) or Command Line.
@@ -242,7 +230,7 @@ To adjust the traffic volume produced by the packet generator, run the following
The command above enables 5 streams.
-vLoadBalancer/vDNS Use Case
+vLoadBalancer/vDNS Use Case (old scale out use case)
---
The use case is composed of three VFs: packet generator, load balancer, and DNS server. These VFs run in three separate VMs. The packet generator issues DNS lookup queries that reach the DNS server via the load balancer. DNS replies reach the packet generator via the load balancer as well. The load balancer reports the average amount of traffic per DNS over a time interval to the DCAE collector. When the average amount of traffic per DNS server crosses a predefined threshold, the closed-loop is triggered and a new DNS server is instantiated.
@@ -316,17 +304,21 @@ The HEAT environment file contains two parameters:
volume\_size is the size (in gigabytes) of the volume group. nova\_instance is the name or UUID of the VM to which the volume group will be attached. This parameter should be changed appropriately.
-VNF component Auto Scale Out with Manual Trigger use case via VID and APPC
+VF Module Scale Out Use Case
---
-The Auto Scale Out with Manual Trigger use case shows how users/network operators can add capacity to an existing VNF. ONAP Beijing release supports scale out of VNF components in two ways, so as to demonstrate flexibility of the ONAP platform and the use case itself. One way involves triggering the scale out operations via the Virtual Infrastructure Deployment (VID) GUI, and uses the Application Controller (APPC) as a generic VNF Manager. This is demonstrated against the vLB/vDNS VNFs. The second example involves triggering scale out operations from the Use case UI (UUI) and uses Virtual Function Controller (VF-C) as generic VNF Manager. This is demonstrated against VoLTE VNFs (MME, SAE-GW, CSCF, TAS). Both scale out blueprints use the Service Orchestrator (SO) as workflow execution engine.
+The Scale Out use case shows how users/network operators can add capacity to an existing VNF. ONAP Casablanca release supports scale out with manual trigger from VID and closed-loop enabled automation from Policy. This is demonstrated against the vLB/vDNS VNFs. For Casablanca, both APPC and SDNC controllers are supported. APPC is the official controller used for this use case and it can be used to scale multiple VNF types. SDNC is experimental for now and it can scale only the vDNS VNF developed for ONAP.
-This repository hosts the source code and scripts that implement the vLB/vDNS VNFs for the scale out blueprint that uses VID, SO, and APPC. At high level, the use case works as follows:
- - The user/network operator triggers the scale out operation from the VID portal. VID translates the operation into a call to SO;
- - SO instantiates a new VNF component and sends APPC a request for reconfiguring the VNF;
- - APPC reconfigures the VNF, without interrupting the service.
+This repository hosts the source code and scripts that implement the vLB/vDNS VNFs. The remainder of this section describes the use case at high level, using APPC as VNF controller.
+
+Scaling VF modules manually requires the user/network operator to trigger the scale out operation from the VID portal. VID translates the operation into a call to SO. Scaling VF modules in an automated manner instead requires the user/network operator to design and deploy a closed loop for scale out that includes policies (e.g. threshold-crossing conditions), guard policies that determine when it's safe to scale out, and microservices that analyze events coming from the network in order to discover situations. Both manual and automated scale out activate the scale out workflow in the Service Orchestrator (SO). The workflow runs as follows:
+
+ - SO sends a request to APPC to run health check against the VNF;
+ - If the VNF is healthy, SO instantiates a new VF module and sends a request to APPC to reconfigure the VNF;
+ - APPC reconfigures the VNF, without interrupting the service;
+ - SO sends a request to APPC to run health check against the VNF again, to validate that the scale out operation didn't impact the running VNF.
-For this use case, we created a modified version of the vLB/vDNS, contained in vnfs/VLBMS. Unlike the vLB/vDNS VNF described before, in this modified version the vLB and the vDNS do not run any automated discovery service. Instead, the vLB has a Northbound API that allows an upstream system (e.g. ONAP) to change the internal configuration by updating the list of active vDNS instances. The Northbound API framework has been built using FD.io-based Honeycomb 1707, and supports both RESTconf and NETCONF protocols. Below is an example of vDNS instances contained in the vLB, in JSON format:
+For this use case, we created a new version of the vLB/vDNS, contained in vnfs/VLBMS. Unlike the vLB/vDNS VNF described before, in this modified version the vLB and the vDNS do not run any automated discovery service. Instead, the vLB has a Northbound API that allows an upstream system (e.g. ONAP) to change the internal configuration by updating the list of active vDNS instances (i.e. VNF reconfiguration). The Northbound API framework has been built using FD.io-based Honeycomb 1707, and supports both RESTconf and NETCONF protocols. Below is an example of vDNS instances contained in the vLB, in JSON format:
{
"vlb-business-vnf-onap-plugin": {
@@ -346,14 +338,22 @@ For this use case, we created a modified version of the vLB/vDNS, contained in v
}
}
-According to the flow described above, during an execution of the use case against the vLB/vDNS VNF:
- - The user/network operator triggers the instantiation of a new vDNS from the VID GUI;
- - VID sends the request to SO, which spins up a new vDNS and sends APPC the details about the new vDNS (i.e. ip-addr, oam-ip-addr, enabled);
- - APPC runs a NETCONF operation against the vLB to update the list of vDNS instances with the vDNS just created.
+The parameters required for VNF reconfiguration (i.e. "ip-addr", "oam-ip-addr", and "enabled" in case of vLB/vDNS) can be specified in the VID GUI when triggering the workflow manually or in CLAMP when designing a closed loop for the automated case. In both cases, the format used for specifying the parameters and their values is a JSON path. SO will use the provided paths to access parameters' name and value in the VF module preload received from SDNC before instantiating a new VF module.
+
+VID accepts a JSON array in the "Configuration Parameter" box (see later), for example:
+
+ [{"ip-addr":"$.vf-module-topology.vf-module-parameters.param[10].value","oam-ip-addr":"$.vf-module-topology.vf-module-parameters.param[15].value","enabled":"$.vf-module-topology.vf-module-parameters.param[22].value"}]
-Although the VNF supports the update of multiple vDNS records in the same call, for Beijing release APPC updates a single vDNS instance at a time.
+CLAMP, instead, accepts a YAML file in the "Payload" box in the Policy Creation form, for example:
+
+ requestParameters: '{"usePreload":true,"userParams":[]}'
+ configurationParameters: '[{"ip-addr":"$.vf-module-topology.vf-module-parameters.param[10].value","oam-ip-addr":"$.vf-module-topology.vf-module-parameters.param[15].value","enabled":"$.vf-module-topology.vf-module-parameters.param[22].value"}]'
-The use case includes design-time and run-time operations. For Beijing release, APPC has a new component called Controller Design Tool (CDT), a design-time tool that allows users to create and on-board VNF templates into the APPC. The template describes which control operation can be executed against the VNF (e.g. scale out, health check, modify configuration, etc.), the protocols that the VNF supports, port numbers, VNF APIs, and credentials for authentication. Being VNF agnostic, APPC uses these templates to "learn" about specific VNFs and the supported operations.
+Note that Policy requires an additional object, called "requestParameters" in which "usePreload" should be set to true and the "userParams" array should be left empty.
+
+The JSON path to the parameters used for VNF reconfiguration, including array locations, should be set as described above. Finally, although the VNF supports to update multiple vDNS records in the same call, for Casablanca release APPC updates a single vDNS instance at a time.
+
+When using APPC, before running scale out, the user needs to create a VNF template using the Controller Design Tool (CDT), a design-time tool that allows users to create and on-board VNF templates into the APPC. The template describes which control operation can be executed against the VNF (e.g. scale out, health check, modify configuration, etc.), the protocols that the VNF supports, port numbers, VNF APIs, and credentials for authentication. Being VNF agnostic, APPC uses these templates to "learn" about specific VNFs and the supported operations.
CDT requires two input: 1) the list of parameters that APPC will receive (ip-addr, oam-ip-addr, enabled in the example above); 2) the VNF API that APPC will use to reconfigure the VNF.
@@ -389,7 +389,7 @@ Below is an example of the parameters file (yaml format), which we call paramete
request-keys: null
response-keys: null
-Here is an example of API for the vLB VNF used for this use case. We name the file after the vnf-type contained in SDNC (i.e. Vloadbalancerms..base_vlb..module-0.xml):
+Here is an example of API for the vLB VNF used for this use case. We name the file after the vnf-type contained in SDNC (i.e. Vloadbalancerms..dnsscaling..module-1):
<vlb-business-vnf-onap-plugin xmlns="urn:opendaylight:params:xml:ns:yang:vlb-business-vnf-onap-plugin">
<vdns-instances>
@@ -406,17 +406,39 @@ To create the VNF template in CDT, the following steps are required:
- Click "My VNF" Tab. Create your user ID, if necessary
- Click "Create new VNF" entering the VNF type as reported in VID or AAI, e.g. vLoadBalancerMS/vLoadBalancerMS 0
- Select "ConfigScaleOut" action
- - Create a new template identifier using the vnf-type name in SDNC as template name, e.g. Vloadbalancerms..base_vlb..module-0
+ - Create a new template identifier using the vnf-type name in SDNC as template name, e.g. Vloadbalancerms..dnsscaling..module-1
- Select protocol (Netconf-XML), VNF username (admin), and VNF port number (2831 for NETCONF)
- Click "Parameter Definition" Tab and upload the parameters (.yaml) file
- Click "Template Tab" and upload API template (.yaml) file
- Click "Reference Data" Tab
- Click "Save All to APPC"
-Finally, log into the APPC controller container and set the VNF password (ConfigScaleOut.password) in /opt/onap/appc/data/properties/appc_southbound.properties to admin. Note that in an ONAP instance created with OOM, APPC may use redundancy to make the controller resilient to failures. For Beijing, CDT only updates one replica of APPC. As such, in a multi-replica environment, the property file should be copied over to the other replicas. If redundancy is used, APPC has 3 replicas. CDT typically updates APPC-0 only, so the property file should be copied over to APPC-1 and APPC-2. This will be addressed in future ONAP releases.
+For health check operation, we just need to specify the protocol, the port number and username of the VNF (REST, 8183, and "admin" respectively, in the case of vLB/vDNS) and the API. For the vLB/vDNS, the API is:
+
+ restconf/operational/health-vnf-onap-plugin:health-vnf-onap-plugin-state/health-check
+
+Note that we don't need to create a VNF template for health check, so the "Template" flag can be set to "N". Again, the user has to click "Save All to APPC" to update the APPC database.
+
+At this time, CDT doesn't allow users to provide VNF password from the GUI. To update the VNF password we need to log into the APPC Maria DB container and change the password manually:
-To trigger the scale out workflow, the user/network operator can log into VID from the ONAP Portal (demo/demo123456! as username/password), select "VNF Changes" and then the "New (+)" button. The user/network operator needs to fill in the "VNF Change Form" by selecting Subscriber, Service Type, NF Role, Model Version, VNF, Scale Out from the Workflow drop down window, and APPC from the Controller drop down window. After clicking "Next", in the following window the user/network operator has to select the VF Module to scale by clicking on the VNF and then on the appropriate VF Module checkbox. Finally, by clicking on the "Schedule" button, the scale out use case will run as described above.
+ mysql -u sdnctl -p (type "gamma" when password is prompted)
+ use sdnctl;
+ UPDATE DEVICE_AUTHENTICATION SET PASSWORD='admin' WHERE VNF_TYPE='vLoadBalancerMS/vLoadBalancerMS 0'; (use your VNF type)
+To trigger the scale out workflow manually, the user/network operator can log into VID from the ONAP Portal (demo/demo123456! as username/password), select "VNF Changes" and then the "New (+)" button. The user/network operator needs to fill in the "VNF Change Form" by selecting Subscriber, Service Type, NF Role, Model Version, VNF, Scale Out from the Workflow dropdown window, and insert the JSON path array described above in the "Configuration Parameter" box. After clicking "Next", in the following window the user/network operator has to select the VF Module to scale by clicking on the VNF and then on the appropriate VF Module checkbox. Finally, by clicking on the "Schedule" button, the scale out use case will run as described above.
+
+Automated scale out requires the user to onboard a DCAE blueprint in SDC when creating the service. To design a closed loop for scale out, the user needs to access the CLAMP GUI (https://clamp.api.simpledemo.onap.org:30258/designer/index.html) and execute the following operations:
+- Click the "Closed loop" dropdown window and select "Open CL"
+- Select the closed loop model and click "OK"
+- In the next screen, click the "Policy" box to create a policy for closed loop, including guard policies
+- After creating the policies, click "TCA" and review the blueprint uploaded during service creation and distributed by SDC to CLAMP
+- Click the "Manage" dropdown and then "Submit" to push the policies to the Policy Engine
+- From the same "Manage" dropdown, click "Deploy" to deploy the TCA blueprint to DCAE
+
+The vLB/vDNS VNF generates traffic and reports metrics to the VES collector in the DCAE platform. The number of incoming packets to the vLB is used to evaluate the policy defined for closed loop. If the provided threshold is crossed, DCAE generates an event that reaches the Policy Engine, which in turn activates the scale out closed loop described above.
+
+For more information about scale out, known issues and resolution, and material used for running the use case, please look at the wiki page: https://wiki.onap.org/display/DW/Running+Scale+Out+Use+Case+for+Casablanca
+
ONAP Use Cases HEAT Templates
---
@@ -459,7 +481,7 @@ The change management workflow is defined as a composition of building blocks th
- The CM workflow for the in-place software upgrade is defined and executed by the service orchestrator (SO).
- A&AI is used to lock/unlock the NF instance
- - The pre/post health checks and software upgrade execution are implemented in App-C (L4+ NFs) and SDN-C (L1-L3 NFs) by leveraging Ansible services to communicate with the NF instances.
+ - The pre/post health checks and software upgrade execution are implemented in APPC (L4+ NFs) and SDNC (L1-L3 NFs) by leveraging Ansible services to communicate with the NF instances.
- The user (or, operator) interfaces with the CM workflow using ONAP's VID. SO communicates with A&AI using a REST API and with the controllers SDNC/APPC via DMaaP.
We setup the use case demonstration for the software upgrade on the virtual gateway (vGW) as part of the vCPE use case in ONAP's Beijing release.
diff --git a/heat/ONAP/cloud-config/.env b/heat/ONAP/cloud-config/.env
new file mode 100644
index 00000000..cbf25f8d
--- /dev/null
+++ b/heat/ONAP/cloud-config/.env
@@ -0,0 +1,17 @@
+TAG=latest
+REGISTRY=nexus3.onap.org:10001/
+CS_HOST=yyy
+CS_AUTHENTICATE=true
+CS_USER=asdc_user
+CS_PASSWORD=Aa1234%^!
+WF_BE_INIT_CS_PORT=9160
+WF_BE_JAVA_OPTIONS=-Xdebug -agentlib:jdwp=transport=dt_socket,address=7001,server=y,suspend=n -Xmx1536m -Xms1536m
+WF_BE_CS_PORT=9042
+WF_BE_DEBUG_PORT=7001
+SDC_PROTOCOL=HTTP
+SDC_HOST=yyy
+SDC_USER=workflow
+SDC_PASSWORD=Kp8bJ4SXszM0WXlhak3eHlcse2gAw84vaoGGmJvUy2U
+WF_FE_JAVA_OPTIONS=-Xdebug -agentlib:jdwp=transport=dt_socket,address=7000,server=y,suspend=n -Xmx1536m -Xms1536m
+WF_FE_HOST_PORT=8184
+WF_FE_DEBUG_PORT=7000 \ No newline at end of file
diff --git a/heat/ONAP/cloud-config/docker-compose.yml b/heat/ONAP/cloud-config/docker-compose.yml
new file mode 100644
index 00000000..4aedf61a
--- /dev/null
+++ b/heat/ONAP/cloud-config/docker-compose.yml
@@ -0,0 +1,41 @@
+version: '3'
+services:
+
+ workflow-cassandra-init:
+ image: "${REGISTRY}onap/workflow-init:${TAG}"
+ environment:
+ - CS_HOST=${CS_HOST}
+ - CS_AUTHENTICATE
+ - CS_USER
+ - CS_PASSWORD
+ - CS_PORT=${WF_BE_INIT_CS_PORT}
+
+ workflow-backend:
+ depends_on:
+ - workflow-cassandra-init
+ image: "${REGISTRY}onap/workflow-backend:${TAG}"
+ ports:
+ - "5603:5603"
+ - "${WF_BE_DEBUG_PORT}:${WF_BE_DEBUG_PORT}"
+ environment:
+ - JAVA_OPTIONS=${WF_BE_JAVA_OPTIONS}
+ - CS_HOSTS=${CS_HOST}
+ - CS_USER
+ - CS_PASSWORD
+ - CS_PORT=${WF_BE_CS_PORT}
+ - SDC_PROTOCOL
+ - SDC_ENDPOINT="${SDC_HOST}:8080"
+ - SDC_USER
+ - SDC_PASSWORD
+
+ workflow-frontend:
+ depends_on:
+ - workflow-cassandra-init
+ - workflow-backend
+ image: "${REGISTRY}onap/workflow-frontend:${TAG}"
+ ports:
+ - "${WF_FE_HOST_PORT}:8080"
+ - "${WF_FE_DEBUG_PORT}:${WF_FE_DEBUG_PORT}"
+ environment:
+ - JAVA_OPTIONS=${WF_FE_JAVA_OPTIONS}
+ - BACKEND=http://workflow-backend:8080
diff --git a/heat/ONAP/cloud-config/sdc_install.sh b/heat/ONAP/cloud-config/sdc_install.sh
index caeff2f4..bf6b5929 100644
--- a/heat/ONAP/cloud-config/sdc_install.sh
+++ b/heat/ONAP/cloud-config/sdc_install.sh
@@ -52,4 +52,15 @@ EOF
# Run docker containers. For openstack Ubuntu 16.04 images this will run as a service after the VM has restarted
./sdc_vm_init.sh
-./sdc_wfd_vm_init.sh
+
+#Install docker-compose for workflow installation
+mkdir /opt/docker
+curl -L https://github.com/docker/compose/releases/download/1.23.1/docker-compose-`uname -s`-`uname -m` > /opt/docker/docker-compose
+chmod +x /opt/docker/docker-compose
+
+#Prepare env for docker compose
+IP_ADDRESS=$(cat /opt/config/private_ip.txt)
+sed -i "s/yyy/$IP_ADDRESS/g" .env
+
+#Setup sdc workflow using docker compose
+docker-compose up -d \ No newline at end of file
diff --git a/heat/ONAP/cloud-config/sdc_wfd_vm_init.sh b/heat/ONAP/cloud-config/sdc_wfd_vm_init.sh
deleted file mode 100644
index bcbcb4ac..00000000
--- a/heat/ONAP/cloud-config/sdc_wfd_vm_init.sh
+++ /dev/null
@@ -1,18 +0,0 @@
-#!/bin/bash
-
-NEXUS_USERNAME=$(cat /opt/config/nexus_username.txt)
-NEXUS_PASSWD=$(cat /opt/config/nexus_password.txt)
-NEXUS_DOCKER_REPO=$(cat /opt/config/nexus_docker_repo.txt)
-RELEASE=$(cat /opt/config/sdc_wfd_docker.txt)
-
-source /opt/config/onap_ips.txt
-
-# pull sdc-workflow-designer docker image
-docker login -u $NEXUS_USERNAME -p $NEXUS_PASSWD $NEXUS_DOCKER_REPO
-docker pull $NEXUS_DOCKER_REPO/onap/sdc/sdc-workflow-designer:$RELEASE
-
-docker rm -f sdc-workflow-designer
-
-
-# setup sdc-workflow-designer docker image
-docker run --detach --name sdc-workflow-designer --ulimit memlock=-1:-1 --memory 1g --memory-swap=1g --ulimit nofile=4096:100000 --publish 9527:8080 $NEXUS_DOCKER_REPO/onap/sdc/sdc-workflow-designer:$RELEASE
diff --git a/heat/ONAP/onap_openstack.yaml b/heat/ONAP/onap_openstack.yaml
index decb41c6..3cc01e76 100644
--- a/heat/ONAP/onap_openstack.yaml
+++ b/heat/ONAP/onap_openstack.yaml
@@ -1494,9 +1494,6 @@ resources:
- path: /opt/sdc_vm_init.sh
permissions: '0755'
content: { get_file: cloud-config/sdc_vm_init.sh }
- - path: /opt/sdc_wfd_vm_init.sh
- permissions: '0755'
- content: { get_file: cloud-config/sdc_wfd_vm_init.sh }
- path: /etc/init.d/serv.sh
permissions: '0755'
content:
diff --git a/heat/vFW/base_vfw.env b/heat/vFW/base_vfw.env
index 269bb097..83ae97b4 100644
--- a/heat/vFW/base_vfw.env
+++ b/heat/vFW/base_vfw.env
@@ -29,6 +29,3 @@ parameters:
pub_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDQXYJYYi3/OUZXUiCYWdtc7K0m5C0dJKVxPG0eI8EWZrEHYdfYe6WoTSDJCww+1qlBSpA5ac/Ba4Wn9vh+lR1vtUKkyIC/nrYb90ReUd385Glkgzrfh5HdR5y5S2cL/Frh86lAn9r6b3iWTJD8wBwXFyoe1S2nMTOIuG4RPNvfmyCTYVh8XTCCE8HPvh3xv2r4egawG1P4Q4UDwk+hDBXThY2KS8M5/8EMyxHV0ImpLbpYCTBA6KYDIRtqmgS6iKyy8v2D1aSY5mc9J0T5t9S2Gv+VZQNWQDDKNFnxqYaAo1uEoq/i1q63XC5AD3ckXb2VT6dp23BQMdDfbHyUWfJN
cloud_env: openstack
sec_group: PUT THE ONAP SECURITY GROUP HERE
- sdnc_model_name: vFW_spinup
- sdnc_model_version: 1.0.0
- sdnc_artifact_name: vFW_vNF_Artifact
diff --git a/heat/vFW/base_vfw.yaml b/heat/vFW/base_vfw.yaml
index 8df04956..00e03ce4 100644
--- a/heat/vFW/base_vfw.yaml
+++ b/heat/vFW/base_vfw.yaml
@@ -250,7 +250,7 @@ resources:
- port: { get_resource: vfw_private_0_port }
- port: { get_resource: vfw_private_1_port }
- port: { get_resource: vfw_private_2_port }
- metadata: {vnf_id: { get_param: vnf_id }, vf_module_id: { get_param: vf_module_id }, sdnc_model_name: { get_param: sdnc_model_name }, sdnc_model_version: { get_param: sdnc_model_version }, sdnc_artifact_name: { get_param: sdnc_artifact_name }}
+ metadata: {vnf_id: { get_param: vnf_id }, vf_module_id: { get_param: vf_module_id }}
user_data_format: RAW
user_data:
str_replace:
@@ -324,7 +324,7 @@ resources:
- network: { get_param: public_net_id }
- port: { get_resource: vpg_private_0_port }
- port: { get_resource: vpg_private_1_port }
- metadata: {vnf_id: { get_param: vnf_id }, vf_module_id: { get_param: vf_module_id }, sdnc_model_name: { get_param: sdnc_model_name }, sdnc_model_version: { get_param: sdnc_model_version }, sdnc_artifact_name: { get_param: sdnc_artifact_name }}
+ metadata: {vnf_id: { get_param: vnf_id }, vf_module_id: { get_param: vf_module_id }}
user_data_format: RAW
user_data:
str_replace:
@@ -396,7 +396,7 @@ resources:
- network: { get_param: public_net_id }
- port: { get_resource: vsn_private_0_port }
- port: { get_resource: vsn_private_1_port }
- metadata: {vnf_id: { get_param: vnf_id }, vf_module_id: { get_param: vf_module_id }, sdnc_model_name: { get_param: sdnc_model_name }, sdnc_model_version: { get_param: sdnc_model_version }, sdnc_artifact_name: { get_param: sdnc_artifact_name }}
+ metadata: {vnf_id: { get_param: vnf_id }, vf_module_id: { get_param: vf_module_id }}
user_data_format: RAW
user_data:
str_replace:
diff --git a/heat/vFW_HPA/vFW/base_vfw.env b/heat/vFW_HPA/vFW/base_vfw.env
index cc53e413..01fd24a0 100644
--- a/heat/vFW_HPA/vFW/base_vfw.env
+++ b/heat/vFW_HPA/vFW/base_vfw.env
@@ -38,6 +38,3 @@ parameters:
pub_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDQXYJYYi3/OUZXUiCYWdtc7K0m5C0dJKVxPG0eI8EWZrEHYdfYe6WoTSDJCww+1qlBSpA5ac/Ba4Wn9vh+lR1vtUKkyIC/nrYb90ReUd385Glkgzrfh5HdR5y5S2cL/Frh86lAn9r6b3iWTJD8wBwXFyoe1S2nMTOIuG4RPNvfmyCTYVh8XTCCE8HPvh3xv2r4egawG1P4Q4UDwk+hDBXThY2KS8M5/8EMyxHV0ImpLbpYCTBA6KYDIRtqmgS6iKyy8v2D1aSY5mc9J0T5t9S2Gv+VZQNWQDDKNFnxqYaAo1uEoq/i1q63XC5AD3ckXb2VT6dp23BQMdDfbHyUWfJN
cloud_env: openstack
sec_group: PUT THE ONAP SECURITY GROUP HERE
- sdnc_model_name: vFW_spinup
- sdnc_model_version: 1.0.0
- sdnc_artifact_name: vFW_vNF_Artifact
diff --git a/heat/vFW_HPA/vFW/base_vfw.yaml b/heat/vFW_HPA/vFW/base_vfw.yaml
index df3765ef..4b6d6488 100644
--- a/heat/vFW_HPA/vFW/base_vfw.yaml
+++ b/heat/vFW_HPA/vFW/base_vfw.yaml
@@ -112,24 +112,31 @@ parameters:
vfw_private_0_port_vnic_type:
type: string
description: vfw port 0 vnic type (normal, direct)
+ default: normal
vfw_private_1_port_vnic_type:
type: string
description: vfw port 1 vnic type (normal, direct)
+ default: normal
vfw_private_2_port_vnic_type:
type: string
description: vfw port 2 vnic type (normal, direct)
+ default: normal
vsn_private_0_port_vnic_type:
type: string
description: vsn port 0 vnic type (normal, direct)
+ default: normal
vsn_private_1_port_vnic_type:
type: string
description: vsn port 1 vnic type (normal, direct)
+ default: normal
vpg_private_0_port_vnic_type:
type: string
description: vpg port 0 vnic type (normal, direct)
+ default: normal
vpg_private_1_port_vnic_type:
type: string
description: vpg port 1 vnic type (normal, direct)
+ default: normal
vfw_name_0:
type: string
label: vFirewall name
@@ -282,7 +289,7 @@ resources:
- port: { get_resource: vfw_private_0_port }
- port: { get_resource: vfw_private_1_port }
- port: { get_resource: vfw_private_2_port }
- metadata: {vnf_id: { get_param: vnf_id }, vf_module_id: { get_param: vf_module_id }, sdnc_model_name: { get_param: sdnc_model_name }, sdnc_model_version: { get_param: sdnc_model_version }, sdnc_artifact_name: { get_param: sdnc_artifact_name }}
+ metadata: {vnf_id: { get_param: vnf_id }, vf_module_id: { get_param: vf_module_id }}
user_data_format: RAW
user_data:
str_replace:
@@ -358,7 +365,7 @@ resources:
- network: { get_param: public_net_id }
- port: { get_resource: vpg_private_0_port }
- port: { get_resource: vpg_private_1_port }
- metadata: {vnf_id: { get_param: vnf_id }, vf_module_id: { get_param: vf_module_id }, sdnc_model_name: { get_param: sdnc_model_name }, sdnc_model_version: { get_param: sdnc_model_version }, sdnc_artifact_name: { get_param: sdnc_artifact_name }}
+ metadata: {vnf_id: { get_param: vnf_id }, vf_module_id: { get_param: vf_module_id }}
user_data_format: RAW
user_data:
str_replace:
@@ -432,7 +439,7 @@ resources:
- network: { get_param: public_net_id }
- port: { get_resource: vsn_private_0_port }
- port: { get_resource: vsn_private_1_port }
- metadata: {vnf_id: { get_param: vnf_id }, vf_module_id: { get_param: vf_module_id }, sdnc_model_name: { get_param: sdnc_model_name }, sdnc_model_version: { get_param: sdnc_model_version }, sdnc_artifact_name: { get_param: sdnc_artifact_name }}
+ metadata: {vnf_id: { get_param: vnf_id }, vf_module_id: { get_param: vf_module_id }}
user_data_format: RAW
user_data:
str_replace:
diff --git a/heat/vFW_HPA/vFWCL/vFWSNK/base_vfw.yaml b/heat/vFW_HPA/vFWCL/vFWSNK/base_vfw.yaml
index 13045840..c46d8996 100644
--- a/heat/vFW_HPA/vFWCL/vFWSNK/base_vfw.yaml
+++ b/heat/vFW_HPA/vFWCL/vFWSNK/base_vfw.yaml
@@ -112,18 +112,23 @@ parameters:
vfw_private_0_port_vnic_type:
type: string
description: vfw port 0 vnic type (normal, direct)
+ default: normal
vfw_private_1_port_vnic_type:
type: string
description: vfw port 1 vnic type (normal, direct)
+ default: normal
vfw_private_2_port_vnic_type:
type: string
description: vfw port 2 vnic type (normal, direct)
+ default: normal
vsn_private_0_port_vnic_type:
type: string
description: vsn port 0 vnic type (normal, direct)
+ default: normal
vsn_private_1_port_vnic_type:
type: string
description: vsn port 1 vnic type (normal, direct)
+ default: normal
vfw_name_0:
type: string
label: vFirewall name
diff --git a/heat/vFW_HPA/vFWCL/vPKG/base_vpkg.yaml b/heat/vFW_HPA/vFWCL/vPKG/base_vpkg.yaml
index 20d76a28..8ce1225b 100644
--- a/heat/vFW_HPA/vFWCL/vPKG/base_vpkg.yaml
+++ b/heat/vFW_HPA/vFWCL/vPKG/base_vpkg.yaml
@@ -92,9 +92,11 @@ parameters:
vpg_private_0_port_vnic_type:
type: string
description: vpg port 0 vnic type (normal, direct)
+ default: normal
vpg_private_1_port_vnic_type:
type: string
description: vpg port 1 vnic type (normal, direct)
+ default: normal
vpg_name_0:
type: string
label: vPacketGenerator name
diff --git a/tosca/vCPE/Artifacts/Deployment/Other/authorized_keys b/tosca/vCPE/Artifacts/Deployment/OTHER/authorized_keys
index 444ca79c..444ca79c 100644
--- a/tosca/vCPE/Artifacts/Deployment/Other/authorized_keys
+++ b/tosca/vCPE/Artifacts/Deployment/OTHER/authorized_keys
diff --git a/tosca/vCPE/Artifacts/Deployment/Other/id_rsa b/tosca/vCPE/Artifacts/Deployment/OTHER/id_rsa
index 60597577..60597577 100644
--- a/tosca/vCPE/Artifacts/Deployment/Other/id_rsa
+++ b/tosca/vCPE/Artifacts/Deployment/OTHER/id_rsa
diff --git a/tosca/vCPE/Artifacts/Deployment/Other/id_rsa.pub b/tosca/vCPE/Artifacts/Deployment/OTHER/id_rsa.pub
index 444ca79c..444ca79c 100644
--- a/tosca/vCPE/Artifacts/Deployment/Other/id_rsa.pub
+++ b/tosca/vCPE/Artifacts/Deployment/OTHER/id_rsa.pub
diff --git a/tosca/vCPE/Artifacts/Deployment/Other/image b/tosca/vCPE/Artifacts/Deployment/OTHER/image
index f31d15df..f31d15df 100644
--- a/tosca/vCPE/Artifacts/Deployment/Other/image
+++ b/tosca/vCPE/Artifacts/Deployment/OTHER/image
diff --git a/tosca/vCPE/infra/MainServiceTemplate.yaml b/tosca/vCPE/infra/MainServiceTemplate.yaml
index 728cb21a..f2c63618 100644
--- a/tosca/vCPE/infra/MainServiceTemplate.yaml
+++ b/tosca/vCPE/infra/MainServiceTemplate.yaml
@@ -52,9 +52,9 @@ topology_template:
description: dcae collector ip
default: 10.0.4.102
dcae_collector_port:
- type: integer
+ type: string
description: dcae collector port
- default: 8080
+ default: "8080"
nexus_artifact_repo:
type: string
description: Root URL for the Nexus repository for Maven artifacts
@@ -299,7 +299,7 @@ topology_template:
max_number_of_instances: 1
watchdog: none
inject_files: #TODO SDC Bug
- source_path: ../Artifacts/Deployment/Other/authorized_keys #SSH authorized_keys
+ source_path: ../Artifacts/Deployment/OTHER/authorized_keys #SSH authorized_keys
dest_path: /home/ubuntu/.ssh/authorized_keys
meta_data:
vnf_id: { get_input: vnf_id }
@@ -359,7 +359,7 @@ topology_template:
artifacts:
sw_image: #TODO need to put glance image name here
#file: { get_input: vcpe_image_name }
- file: ../Artifacts/Deployment/Other/image
+ file: ../Artifacts/Deployment/OTHER/image
type: tosca.artifacts.nfv.SwImage
# vdns related
@@ -427,7 +427,7 @@ topology_template:
max_number_of_instances: 1
watchdog: none
inject_files: # TODO SDC Bug
- source_path: ../Artifacts/Deployment/Other/authorized_keys #SSH authorized_keys
+ source_path: ../Artifacts/Deployment/OTHER/authorized_keys #SSH authorized_keys
dest_path: /home/ubuntu/.ssh/authorized_keys
meta_data:
vnf_id: { get_input: vnf_id }
@@ -483,7 +483,7 @@ topology_template:
artifacts:
sw_image: #TODO need to put glance image name here
#file: { get_input: vcpe_image_name }
- file: ../Artifacts/Deployment/Other/image
+ file: ../Artifacts/Deployment/OTHER/image
type: tosca.artifacts.nfv.SwImage
# vdhcp related
@@ -551,7 +551,7 @@ topology_template:
max_number_of_instances: 1
watchdog: none
inject_files: #SDC Bug
- source_path: ../Artifacts/Deployment/Other/authorized_keys #SSH authorized_keys
+ source_path: ../Artifacts/Deployment/OTHER/authorized_keys #SSH authorized_keys
dest_path: /home/ubuntu/.ssh/authorized_keys
meta_data:
vnf_id: { get_input: vnf_id }
@@ -611,7 +611,7 @@ topology_template:
artifacts:
sw_image: #TODO need to put glance image name here
#file: { get_input: vcpe_image_name }
- file: ../Artifacts/Deployment/Other/image
+ file: ../Artifacts/Deployment/OTHER/image
type: tosca.artifacts.nfv.SwImage
# vweb related
@@ -679,7 +679,7 @@ topology_template:
max_number_of_instances: 1
watchdog: none
inject_files: # TODO SDC bug
- source_path: ../Artifacts/Deployment/Other/authorized_keys #SSH authorized_keys
+ source_path: ../Artifacts/Deployment/OTHER/authorized_keys #SSH authorized_keys
dest_path: /home/ubuntu/.ssh/authorized_keys
meta_data:
vnf_id: { get_input: vnf_id }
@@ -735,6 +735,6 @@ topology_template:
artifacts:
sw_image: #TODO need to put glance image name here
#file: { get_input: vcpe_image_name }
- file: ../Artifacts/Deployment/Other/image
+ file: ../Artifacts/Deployment/OTHER/image
type: tosca.artifacts.nfv.SwImage
diff --git a/tosca/vCPE/infra/MainServiceTemplate_sriov.yaml b/tosca/vCPE/infra/MainServiceTemplate_sriov.yaml
index b362d4c2..31c18f5c 100644
--- a/tosca/vCPE/infra/MainServiceTemplate_sriov.yaml
+++ b/tosca/vCPE/infra/MainServiceTemplate_sriov.yaml
@@ -51,9 +51,9 @@ topology_template:
description: dcae collector ip
default: 10.0.4.102
dcae_collector_port:
- type: integer
+ type: string
description: dcae collector port
- default: 8080
+ default: "8080"
nexus_artifact_repo:
type: string
description: Root URL for the Nexus repository for Maven artifacts
@@ -298,7 +298,7 @@ topology_template:
max_number_of_instances: 1
watchdog: none
inject_files: #TODO SDC Bug
- source_path: ../Artifacts/Deployment/Other/authorized_keys #SSH authorized_keys
+ source_path: ../Artifacts/Deployment/OTHER/authorized_keys #SSH authorized_keys
dest_path: /home/ubuntu/.ssh/authorized_keys
meta_data:
vnf_id: { get_input: vnf_id }
@@ -358,7 +358,7 @@ topology_template:
artifacts:
sw_image: #TODO need to put glance image name here
#file: { get_input: vcpe_image_name }
- file: ../Artifacts/Deployment/Other/image
+ file: ../Artifacts/Deployment/OTHER/image
type: tosca.artifacts.nfv.SwImage
# vdns related
@@ -426,7 +426,7 @@ topology_template:
max_number_of_instances: 1
watchdog: none
inject_files: # TODO SDC Bug
- source_path: ../Artifacts/Deployment/Other/authorized_keys #SSH authorized_keys
+ source_path: ../Artifacts/Deployment/OTHER/authorized_keys #SSH authorized_keys
dest_path: /home/ubuntu/.ssh/authorized_keys
meta_data:
vnf_id: { get_input: vnf_id }
@@ -482,7 +482,7 @@ topology_template:
artifacts:
sw_image: #TODO need to put glance image name here
#file: { get_input: vcpe_image_name }
- file: ../Artifacts/Deployment/Other/image
+ file: ../Artifacts/Deployment/OTHER/image
type: tosca.artifacts.nfv.SwImage
# vdhcp related
@@ -550,7 +550,7 @@ topology_template:
max_number_of_instances: 1
watchdog: none
inject_files: #SDC Bug
- source_path: ../Artifacts/Deployment/Other/authorized_keys #SSH authorized_keys
+ source_path: ../Artifacts/Deployment/OTHER/authorized_keys #SSH authorized_keys
dest_path: /home/ubuntu/.ssh/authorized_keys
meta_data:
vnf_id: { get_input: vnf_id }
@@ -610,7 +610,7 @@ topology_template:
artifacts:
sw_image: #TODO need to put glance image name here
#file: { get_input: vcpe_image_name }
- file: ../Artifacts/Deployment/Other/image
+ file: ../Artifacts/Deployment/OTHER/image
type: tosca.artifacts.nfv.SwImage
# vweb related
@@ -678,7 +678,7 @@ topology_template:
max_number_of_instances: 1
watchdog: none
inject_files: # TODO SDC bug
- source_path: ../Artifacts/Deployment/Other/authorized_keys #SSH authorized_keys
+ source_path: ../Artifacts/Deployment/OTHER/authorized_keys #SSH authorized_keys
dest_path: /home/ubuntu/.ssh/authorized_keys
meta_data:
vnf_id: { get_input: vnf_id }
@@ -734,6 +734,6 @@ topology_template:
artifacts:
sw_image: #TODO need to put glance image name here
#file: { get_input: vcpe_image_name }
- file: ../Artifacts/Deployment/Other/image
+ file: ../Artifacts/Deployment/OTHER/image
type: tosca.artifacts.nfv.SwImage
diff --git a/tosca/vCPE/vbng/MainServiceTemplate.yaml b/tosca/vCPE/vbng/MainServiceTemplate.yaml
index a4666149..3bff4a38 100644
--- a/tosca/vCPE/vbng/MainServiceTemplate.yaml
+++ b/tosca/vCPE/vbng/MainServiceTemplate.yaml
@@ -89,9 +89,9 @@ topology_template:
description: dcae collector ip
default: 10.0.4.102
dcae_collector_port:
- type: integer
+ type: string
description: dcae collector port
- default: 8080
+ default: "8080"
pub_key:
type: string
description: ssh public key
@@ -333,7 +333,7 @@ topology_template:
max_number_of_instances: 1
watchdog: none
inject_files: #TODO SDC bug
- source_path: ../Artifacts/Deployment/Other/authorized_keys #SSH authorized_keys
+ source_path: ../Artifacts/Deployment/OTHER/authorized_keys #SSH authorized_keys
dest_path: /home/ubuntu/.ssh/authorized_keys
meta_data:
vnf_id: { get_input: vnf_id }
@@ -408,5 +408,5 @@ topology_template:
artifacts:
sw_image: #TODO need to put glance image name here
#file: { get_input: vcpe_image_name }
- file: ../Artifacts/Deployment/Other/image
+ file: ../Artifacts/Deployment/OTHER/image
type: tosca.artifacts.nfv.SwImage
diff --git a/tosca/vCPE/vbng/MainServiceTemplate_sriov.yaml b/tosca/vCPE/vbng/MainServiceTemplate_sriov.yaml
index cc81addf..f04cd98a 100644
--- a/tosca/vCPE/vbng/MainServiceTemplate_sriov.yaml
+++ b/tosca/vCPE/vbng/MainServiceTemplate_sriov.yaml
@@ -87,9 +87,9 @@ topology_template:
description: dcae collector ip
default: 10.0.4.102
dcae_collector_port:
- type: integer
+ type: string
description: dcae collector port
- default: 8080
+ default: "8080"
pub_key:
type: string
description: ssh public key
@@ -343,7 +343,7 @@ topology_template:
max_number_of_instances: 1
watchdog: none
inject_files: #TODO SDC bug
- source_path: ../Artifacts/Deployment/Other/authorized_keys #SSH authorized_keys
+ source_path: ../Artifacts/Deployment/OTHER/authorized_keys #SSH authorized_keys
dest_path: /home/ubuntu/.ssh/authorized_keys
meta_data:
vnf_id: { get_input: vnf_id }
@@ -418,5 +418,5 @@ topology_template:
artifacts:
sw_image: #TODO need to put glance image name here
#file: { get_input: vcpe_image_name }
- file: ../Artifacts/Deployment/Other/image
+ file: ../Artifacts/Deployment/OTHER/image
type: tosca.artifacts.nfv.SwImage
diff --git a/tosca/vCPE/vbrgemu/MainServiceTemplate.yaml b/tosca/vCPE/vbrgemu/MainServiceTemplate.yaml
index 3f132b2b..ba6a14ec 100644
--- a/tosca/vCPE/vbrgemu/MainServiceTemplate.yaml
+++ b/tosca/vCPE/vbrgemu/MainServiceTemplate.yaml
@@ -206,7 +206,7 @@ topology_template:
max_number_of_instances: 1
watchdog: none
inject_files: #TODO SDC bug
- source_path: ../Artifacts/Deployment/Other/authorized_keys #SSH authorized_keys
+ source_path: ../Artifacts/Deployment/OTHER/authorized_keys #SSH authorized_keys
dest_path: /home/ubuntu/.ssh/authorized_keys
meta_data:
vnf_id: { get_input: vnf_id }
@@ -262,5 +262,5 @@ topology_template:
artifacts:
sw_image: #TODO need to put glance image name here
#file: { get_input: vcpe_image_name }
- file: ../Artifacts/Deployment/Other/image
+ file: ../Artifacts/Deployment/OTHER/image
type: tosca.artifacts.nfv.SwImage
diff --git a/tosca/vCPE/vbrgemu/MainServiceTemplate_sriov.yaml b/tosca/vCPE/vbrgemu/MainServiceTemplate_sriov.yaml
index 241ace07..60161908 100644
--- a/tosca/vCPE/vbrgemu/MainServiceTemplate_sriov.yaml
+++ b/tosca/vCPE/vbrgemu/MainServiceTemplate_sriov.yaml
@@ -265,5 +265,5 @@ topology_template:
artifacts:
sw_image: #TODO need to put glance image name here
#file: { get_input: vcpe_image_name }
- file: ../Artifacts/Deployment/Other/image
+ file: ../Artifacts/Deployment/OTHER/image
type: tosca.artifacts.nfv.SwImage
diff --git a/tosca/vCPE/vgmux/MainServiceTemplate.yaml b/tosca/vCPE/vgmux/MainServiceTemplate.yaml
index b028db5f..fb30b005 100644
--- a/tosca/vCPE/vgmux/MainServiceTemplate.yaml
+++ b/tosca/vCPE/vgmux/MainServiceTemplate.yaml
@@ -85,9 +85,9 @@ topology_template:
description: dcae collector ip
default: 10.0.4.102
dcae_collector_port:
- type: integer
+ type: string
description: dcae collector port
- default: 8080
+ default: "8080"
pub_key:
type: string
description: ssh public key
@@ -286,7 +286,7 @@ topology_template:
max_number_of_instances: 1
watchdog: none
inject_files: #TODO SDC bug
- source_path: ../Artifacts/Deployment/Other/authorized_keys #SSH authorized_keys
+ source_path: ../Artifacts/Deployment/OTHER/authorized_keys #SSH authorized_keys
dest_path: /home/ubuntu/.ssh/authorized_keys
meta_data:
vnf_id: { get_input: vnf_id }
@@ -352,6 +352,6 @@ topology_template:
artifacts:
sw_image: #TODO need to put glance image name here
#file: { get_input: vcpe_image_name }
- file: ../Artifacts/Deployment/Other/image
+ file: ../Artifacts/Deployment/OTHER/image
type: tosca.artifacts.nfv.SwImage
diff --git a/tosca/vCPE/vgmux/MainServiceTemplate_sriov.yaml b/tosca/vCPE/vgmux/MainServiceTemplate_sriov.yaml
index 392f1051..ee4f56f8 100644
--- a/tosca/vCPE/vgmux/MainServiceTemplate_sriov.yaml
+++ b/tosca/vCPE/vgmux/MainServiceTemplate_sriov.yaml
@@ -84,9 +84,9 @@ topology_template:
description: dcae collector ip
default: 10.0.4.102
dcae_collector_port:
- type: integer
+ type: string
description: dcae collector port
- default: 8080
+ default: "8080"
pub_key:
type: string
description: ssh public key
@@ -297,7 +297,7 @@ topology_template:
max_number_of_instances: 1
watchdog: none
inject_files: #TODO SDC bug
- source_path: ../Artifacts/Deployment/Other/authorized_keys #SSH authorized_keys
+ source_path: ../Artifacts/Deployment/OTHER/authorized_keys #SSH authorized_keys
dest_path: /home/ubuntu/.ssh/authorized_keys
meta_data:
vnf_id: { get_input: vnf_id }
@@ -363,6 +363,6 @@ topology_template:
artifacts:
sw_image: #TODO need to put glance image name here
#file: { get_input: vcpe_image_name }
- file: ../Artifacts/Deployment/Other/image
+ file: ../Artifacts/Deployment/OTHER/image
type: tosca.artifacts.nfv.SwImage
diff --git a/tosca/vCPE/vgw/MainServiceTemplate.yaml b/tosca/vCPE/vgw/MainServiceTemplate.yaml
index 4925f388..2676007f 100644
--- a/tosca/vCPE/vgw/MainServiceTemplate.yaml
+++ b/tosca/vCPE/vgw/MainServiceTemplate.yaml
@@ -69,9 +69,9 @@ topology_template:
description: dcae collector ip
default: 10.0.4.102
dcae_collector_port:
- type: integer
+ type: string
description: dcae collector port
- default: 8080
+ default: "8080"
pub_key:
type: string
description: ssh public key
@@ -97,9 +97,9 @@ topology_template:
description: IP address of vGMUX
default: 10.5.0.20
vg_vgmux_tunnel_vni:
- type: integer
+ type: string
description: vni value of vG-vGMUX vxlan tunnel
- default: 100
+ default: "100"
vnf_id:
type: string
description: The VNF ID is provided by ONAP
@@ -284,7 +284,7 @@ topology_template:
max_number_of_instances: 1
watchdog: none
inject_files: # TODO SDC BUG
- source_path: ../Artifacts/Deployment/Other/authorized_keys #SSH authorized_keys
+ source_path: ../Artifacts/Deployment/OTHER/authorized_keys #SSH authorized_keys
dest_path: /home/ubuntu/.ssh/authorized_keys
meta_data:
vnf_id: { get_input: vnf_id }
@@ -349,5 +349,5 @@ topology_template:
artifacts:
sw_image: #TODO need to put glance image name here
#file: { get_input: vcpe_image_name }
- file: ../Artifacts/Deployment/Other/image
+ file: ../Artifacts/Deployment/OTHER/image
type: tosca.artifacts.nfv.SwImage
diff --git a/tosca/vCPE/vgw/MainServiceTemplate_sriov.yaml b/tosca/vCPE/vgw/MainServiceTemplate_sriov.yaml
index 804e2c4b..13d2487f 100644
--- a/tosca/vCPE/vgw/MainServiceTemplate_sriov.yaml
+++ b/tosca/vCPE/vgw/MainServiceTemplate_sriov.yaml
@@ -67,9 +67,9 @@ topology_template:
description: dcae collector ip
default: 10.0.4.102
dcae_collector_port:
- type: integer
+ type: string
description: dcae collector port
- default: 8080
+ default: "8080"
pub_key:
type: string
description: ssh public key
@@ -95,9 +95,9 @@ topology_template:
description: IP address of vGMUX
default: 10.5.0.20
vg_vgmux_tunnel_vni:
- type: integer
+ type: string
description: vni value of vG-vGMUX vxlan tunnel
- default: 100
+ default: "100"
vnf_id:
type: string
description: The VNF ID is provided by ONAP
@@ -286,7 +286,7 @@ topology_template:
max_number_of_instances: 1
watchdog: none
inject_files: # TODO SDC BUG
- source_path: ../Artifacts/Deployment/Other/authorized_keys #SSH authorized_keys
+ source_path: ../Artifacts/Deployment/OTHER/authorized_keys #SSH authorized_keys
dest_path: /home/ubuntu/.ssh/authorized_keys
meta_data:
vnf_id: { get_input: vnf_id }
@@ -351,5 +351,5 @@ topology_template:
artifacts:
sw_image: #TODO need to put glance image name here
#file: { get_input: vcpe_image_name }
- file: ../Artifacts/Deployment/Other/image
+ file: ../Artifacts/Deployment/OTHER/image
type: tosca.artifacts.nfv.SwImage