aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--deployments/kubernetes/onap4k8s.yaml4
-rw-r--r--kud/demo/composite-firewall/firewall/.helmignore22
-rw-r--r--kud/demo/composite-firewall/firewall/Chart.yaml5
-rw-r--r--kud/demo/composite-firewall/firewall/templates/_helpers.tpl32
-rw-r--r--kud/demo/composite-firewall/firewall/templates/deployment.yaml63
-rw-r--r--kud/demo/composite-firewall/firewall/values.yaml50
-rw-r--r--kud/demo/composite-firewall/manifest.yaml4
-rw-r--r--kud/demo/composite-firewall/networks/emco-private-net.yaml18
-rw-r--r--kud/demo/composite-firewall/networks/onap-private-net-fwsink.yaml19
-rw-r--r--kud/demo/composite-firewall/networks/onap-private-net-pktgen.yaml19
-rw-r--r--kud/demo/composite-firewall/networks/protected-private-net-fwsink.yaml19
-rw-r--r--kud/demo/composite-firewall/networks/protected-private-net-pktgen.yaml19
-rw-r--r--kud/demo/composite-firewall/networks/protected-private-net.yaml18
-rw-r--r--kud/demo/composite-firewall/networks/unprotected-private-net-fwsink.yaml19
-rw-r--r--kud/demo/composite-firewall/networks/unprotected-private-net-pktgen.yaml19
-rw-r--r--kud/demo/composite-firewall/networks/unprotected-private-net.yaml18
-rw-r--r--kud/demo/composite-firewall/override_values.yaml1
-rw-r--r--kud/demo/composite-firewall/packetgen/.helmignore22
-rw-r--r--kud/demo/composite-firewall/packetgen/Chart.yaml5
-rw-r--r--kud/demo/composite-firewall/packetgen/templates/_helpers.tpl32
-rw-r--r--kud/demo/composite-firewall/packetgen/templates/deployment.yaml65
-rw-r--r--kud/demo/composite-firewall/packetgen/templates/service.yaml16
-rw-r--r--kud/demo/composite-firewall/packetgen/values.yaml57
-rw-r--r--kud/demo/composite-firewall/sink/.helmignore22
-rw-r--r--kud/demo/composite-firewall/sink/Chart.yaml5
-rw-r--r--kud/demo/composite-firewall/sink/templates/_helpers.tpl32
-rw-r--r--kud/demo/composite-firewall/sink/templates/configmap.yaml7
-rw-r--r--kud/demo/composite-firewall/sink/templates/deployment.yaml38
-rw-r--r--kud/demo/composite-firewall/sink/templates/service.yaml16
-rw-r--r--kud/demo/composite-firewall/sink/values.yaml61
-rw-r--r--kud/demo/firewall/values.yaml4
-rw-r--r--kud/tests/README-composite-vfw.txt122
-rwxr-xr-xkud/tests/_functions.sh31
-rw-r--r--kud/tests/cleanup-composite-vfw.sh21
-rwxr-xr-xkud/tests/ncm-test.sh53
-rw-r--r--kud/tests/vfw-test-clean-cluster.sh6
-rw-r--r--kud/tests/vfw-test-setenv.sh7
-rwxr-xr-xkud/tests/vfw-test.sh1054
38 files changed, 2013 insertions, 12 deletions
diff --git a/deployments/kubernetes/onap4k8s.yaml b/deployments/kubernetes/onap4k8s.yaml
index 1bd4ce94..c27b9042 100644
--- a/deployments/kubernetes/onap4k8s.yaml
+++ b/deployments/kubernetes/onap4k8s.yaml
@@ -31,6 +31,7 @@ spec:
ports:
- name: http
port: 9015
+ nodePort: 31298
protocol: TCP
targetPort: 9015
@@ -102,6 +103,7 @@ spec:
ports:
- name: http
port: 9031
+ nodePort: 32737
protocol: TCP
targetPort: 9031
@@ -246,6 +248,7 @@ spec:
targetPort: 9053
- name: http
port: 9051
+ nodePort: 31181
protocol: TCP
targetPort: 9051
@@ -318,6 +321,7 @@ spec:
ports:
- name: http
port: 9061
+ nodePort: 31856
protocol: TCP
targetPort: 9061
diff --git a/kud/demo/composite-firewall/firewall/.helmignore b/kud/demo/composite-firewall/firewall/.helmignore
new file mode 100644
index 00000000..50af0317
--- /dev/null
+++ b/kud/demo/composite-firewall/firewall/.helmignore
@@ -0,0 +1,22 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
+.vscode/
diff --git a/kud/demo/composite-firewall/firewall/Chart.yaml b/kud/demo/composite-firewall/firewall/Chart.yaml
new file mode 100644
index 00000000..18201ddd
--- /dev/null
+++ b/kud/demo/composite-firewall/firewall/Chart.yaml
@@ -0,0 +1,5 @@
+apiVersion: v1
+appVersion: "1.0"
+description: A Helm chart to deploy Firewall app for vFirewall
+name: firewall
+version: 0.1.0
diff --git a/kud/demo/composite-firewall/firewall/templates/_helpers.tpl b/kud/demo/composite-firewall/firewall/templates/_helpers.tpl
new file mode 100644
index 00000000..7593e779
--- /dev/null
+++ b/kud/demo/composite-firewall/firewall/templates/_helpers.tpl
@@ -0,0 +1,32 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Expand the name of the chart.
+*/}}
+{{- define "firewall.name" -}}
+{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create a default fully qualified app name.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+If release name contains chart name it will be used as a full name.
+*/}}
+{{- define "firewall.fullname" -}}
+{{- if .Values.fullnameOverride -}}
+{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- $name := default .Chart.Name .Values.nameOverride -}}
+{{- if contains $name .Release.Name -}}
+{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Create chart name and version as used by the chart label.
+*/}}
+{{- define "firewall.chart" -}}
+{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
diff --git a/kud/demo/composite-firewall/firewall/templates/deployment.yaml b/kud/demo/composite-firewall/firewall/templates/deployment.yaml
new file mode 100644
index 00000000..632a50bf
--- /dev/null
+++ b/kud/demo/composite-firewall/firewall/templates/deployment.yaml
@@ -0,0 +1,63 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: {{ include "firewall.fullname" . }}
+ labels:
+ release: {{ .Release.Name }}
+ app: {{ include "firewall.name" . }}
+ chart: {{ .Chart.Name }}
+spec:
+ replicas: {{ .Values.replicaCount }}
+ selector:
+ matchLabels:
+ app: {{ include "firewall.name" . }}
+ release: {{ .Release.Name }}
+ template:
+ metadata:
+ labels:
+ app: {{ include "firewall.name" . }}
+ release: {{ .Release.Name }}
+ annotations:
+ VirtletLibvirtCPUSetting: |
+ mode: host-model
+ VirtletCloudInitUserData: |
+ ssh_pwauth: True
+ users:
+ - name: admin
+ gecos: User
+ primary-group: admin
+ groups: users
+ sudo: ALL=(ALL) NOPASSWD:ALL
+ lock_passwd: false
+ passwd: "$6$rounds=4096$QA5OCKHTE41$jRACivoPMJcOjLRgxl3t.AMfU7LhCFwOWv2z66CQX.TSxBy50JoYtycJXSPr2JceG.8Tq/82QN9QYt3euYEZW/"
+ runcmd:
+ - export demo_artifacts_version={{ .Values.global.demoArtifactsVersion }}
+ - export vfw_private_ip_0={{ .Values.global.vfwPrivateIp0 }}
+ - export vsn_private_ip_0={{ .Values.global.vsnPrivateIp0 }}
+ - export protected_net_cidr={{ .Values.global.protectedNetCidr }}
+ - export dcae_collector_ip={{ .Values.global.dcaeCollectorIp }}
+ - export dcae_collector_port={{ .Values.global.dcaeCollectorPort }}
+ - export protected_net_gw={{ .Values.global.protectedNetGw }}
+ - export protected_private_net_cidr={{ .Values.global.protectedPrivateNetCidr }}
+ - wget -O - https://git.onap.org/multicloud/k8s/plain/kud/tests/vFW/firewall | sudo -E bash
+ VirtletRootVolumeSize: 5Gi
+ kubernetes.io/target-runtime: virtlet.cloud
+ spec:
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: extraRuntime
+ operator: In
+ values:
+ - virtlet
+ containers:
+ - name: {{ .Chart.Name }}
+ image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
+ imagePullPolicy: {{ .Values.image.pullPolicy }}
+ tty: true
+ stdin: true
+ resources:
+ limits:
+ memory: {{ .Values.resources.memory }}
diff --git a/kud/demo/composite-firewall/firewall/values.yaml b/kud/demo/composite-firewall/firewall/values.yaml
new file mode 100644
index 00000000..3a6c8983
--- /dev/null
+++ b/kud/demo/composite-firewall/firewall/values.yaml
@@ -0,0 +1,50 @@
+# Default values for firewall.
+# This is a YAML-formatted file.
+# Declare variables to be passed into your templates.
+
+replicaCount: 1
+
+image:
+ repository: virtlet.cloud/ubuntu/16.04
+ tag: latest
+ pullPolicy: IfNotPresent
+
+nameOverride: ""
+fullnameOverride: ""
+
+resources:
+ memory: 4Gi
+
+#global vars for parent and subcharts.
+global:
+
+ #Networks
+ unprotectedNetworkName: unprotected-private-net
+ protectedPrivateNetCidr: 192.168.10.0/24
+
+ emcoPrivateNetworkName: emco-private-net
+
+ protectedNetworkName: protected-private-net
+ protectedNetCidr: 192.168.20.0/24
+ protectedNetGwIp: 192.168.20.100
+ protectedNetGw: 192.168.20.100/24
+
+ #vFirewall container
+ vfwPrivateIp0: 192.168.10.3
+ vfwPrivateIp1: 192.168.20.2
+ vfwPrivateIp2: 10.10.20.3
+
+ #Packetgen container
+ vpgPrivateIp0: 192.168.10.200
+ vpgPrivateIp1: 10.10.20.200
+
+ #Sink container
+ vsnPrivateIp0: 192.168.20.3
+ vsnPrivateIp1: 10.10.20.4
+
+ #########
+ ovnMultusNetworkName: ovn-networkobj
+ demoArtifactsVersion: 1.5.0
+ dcaeCollectorIp: 10.0.4.1
+ dcaeCollectorPort: 8081
+
diff --git a/kud/demo/composite-firewall/manifest.yaml b/kud/demo/composite-firewall/manifest.yaml
new file mode 100644
index 00000000..4d381d02
--- /dev/null
+++ b/kud/demo/composite-firewall/manifest.yaml
@@ -0,0 +1,4 @@
+---
+version: v1
+type:
+ values: "override_values.yaml"
diff --git a/kud/demo/composite-firewall/networks/emco-private-net.yaml b/kud/demo/composite-firewall/networks/emco-private-net.yaml
new file mode 100644
index 00000000..701ef54d
--- /dev/null
+++ b/kud/demo/composite-firewall/networks/emco-private-net.yaml
@@ -0,0 +1,18 @@
+apiVersion: k8s.plugin.opnfv.org/v1alpha1
+kind: ProviderNetwork
+metadata:
+ name: emco-private-net
+spec:
+ cniType : ovn4nfv
+ ipv4Subnets:
+ - name: subnet1
+ subnet: 10.10.20.0/24
+ gateway: 10.10.20.1/24
+ providerNetType: VLAN
+ vlan:
+ vlanId: "102"
+ providerInterfaceName: eth1
+ logicalInterfaceName: eth1.102
+ vlanNodeSelector: specific
+ nodeLabelList:
+ - kubernetes.io/hostname=localhost
diff --git a/kud/demo/composite-firewall/networks/onap-private-net-fwsink.yaml b/kud/demo/composite-firewall/networks/onap-private-net-fwsink.yaml
new file mode 100644
index 00000000..c5135e93
--- /dev/null
+++ b/kud/demo/composite-firewall/networks/onap-private-net-fwsink.yaml
@@ -0,0 +1,19 @@
+apiVersion: k8s.plugin.opnfv.org/v1alpha1
+kind: ProviderNetwork
+metadata:
+ name: emco-private-net
+spec:
+ cniType : ovn4nfv
+ ipv4Subnets:
+ - name: subnet1
+ subnet: 10.10.20.0/24
+ gateway: 10.10.20.1/24
+ excludeIps: 10.10.20.100..10.10.20.255
+ providerNetType: VLAN
+ vlan:
+ vlanId: "102"
+ providerInterfaceName: eth1
+ logicalInterfaceName: eth1.102
+ vlanNodeSelector: specific
+ nodeLabelList:
+ - kubernetes.io/hostname=localhost
diff --git a/kud/demo/composite-firewall/networks/onap-private-net-pktgen.yaml b/kud/demo/composite-firewall/networks/onap-private-net-pktgen.yaml
new file mode 100644
index 00000000..18fafcc7
--- /dev/null
+++ b/kud/demo/composite-firewall/networks/onap-private-net-pktgen.yaml
@@ -0,0 +1,19 @@
+apiVersion: k8s.plugin.opnfv.org/v1alpha1
+kind: ProviderNetwork
+metadata:
+ name: emco-private-net
+spec:
+ cniType : ovn4nfv
+ ipv4Subnets:
+ - name: subnet1
+ subnet: 10.10.20.0/24
+ gateway: 10.10.20.1/24
+ excludeIps: 10.10.20.2..10.10.20.99
+ providerNetType: VLAN
+ vlan:
+ vlanId: "102"
+ providerInterfaceName: eth1
+ logicalInterfaceName: eth1.102
+ vlanNodeSelector: specific
+ nodeLabelList:
+ - kubernetes.io/hostname=localhost
diff --git a/kud/demo/composite-firewall/networks/protected-private-net-fwsink.yaml b/kud/demo/composite-firewall/networks/protected-private-net-fwsink.yaml
new file mode 100644
index 00000000..fce66313
--- /dev/null
+++ b/kud/demo/composite-firewall/networks/protected-private-net-fwsink.yaml
@@ -0,0 +1,19 @@
+apiVersion: k8s.plugin.opnfv.org/v1alpha1
+kind: ProviderNetwork
+metadata:
+ name: protected-private-net
+spec:
+ cniType : ovn4nfv
+ ipv4Subnets:
+ - name: subnet1
+ subnet: 192.168.20.0/24
+ gateway: 192.168.20.100/24
+ excludeIps: 192.168.20.101..192.168.20.255
+ providerNetType: VLAN
+ vlan:
+ vlanId: "101"
+ providerInterfaceName: eth1
+ logicalInterfaceName: eth1.101
+ vlanNodeSelector: specific
+ nodeLabelList:
+ - kubernetes.io/hostname=localhost
diff --git a/kud/demo/composite-firewall/networks/protected-private-net-pktgen.yaml b/kud/demo/composite-firewall/networks/protected-private-net-pktgen.yaml
new file mode 100644
index 00000000..58909de1
--- /dev/null
+++ b/kud/demo/composite-firewall/networks/protected-private-net-pktgen.yaml
@@ -0,0 +1,19 @@
+apiVersion: k8s.plugin.opnfv.org/v1alpha1
+kind: ProviderNetwork
+metadata:
+ name: protected-private-net
+spec:
+ cniType : ovn4nfv
+ ipv4Subnets:
+ - name: subnet1
+ subnet: 192.168.20.0/24
+ gateway: 192.168.20.100/24
+ excludeIps: 192.168.20.1..192.168.20.99
+ providerNetType: VLAN
+ vlan:
+ vlanId: "101"
+ providerInterfaceName: eth1
+ logicalInterfaceName: eth1.101
+ vlanNodeSelector: specific
+ nodeLabelList:
+ - kubernetes.io/hostname=localhost
diff --git a/kud/demo/composite-firewall/networks/protected-private-net.yaml b/kud/demo/composite-firewall/networks/protected-private-net.yaml
new file mode 100644
index 00000000..213b3541
--- /dev/null
+++ b/kud/demo/composite-firewall/networks/protected-private-net.yaml
@@ -0,0 +1,18 @@
+apiVersion: k8s.plugin.opnfv.org/v1alpha1
+kind: ProviderNetwork
+metadata:
+ name: protected-private-net
+spec:
+ cniType : ovn4nfv
+ ipv4Subnets:
+ - name: subnet1
+ subnet: 192.168.20.0/24
+ gateway: 192.168.20.100/24
+ providerNetType: VLAN
+ vlan:
+ vlanId: "101"
+ providerInterfaceName: eth1
+ logicalInterfaceName: eth1.101
+ vlanNodeSelector: specific
+ nodeLabelList:
+ - kubernetes.io/hostname=localhost
diff --git a/kud/demo/composite-firewall/networks/unprotected-private-net-fwsink.yaml b/kud/demo/composite-firewall/networks/unprotected-private-net-fwsink.yaml
new file mode 100644
index 00000000..5ab730b5
--- /dev/null
+++ b/kud/demo/composite-firewall/networks/unprotected-private-net-fwsink.yaml
@@ -0,0 +1,19 @@
+apiVersion: k8s.plugin.opnfv.org/v1alpha1
+kind: ProviderNetwork
+metadata:
+ name: unprotected-private-net
+spec:
+ cniType : ovn4nfv
+ ipv4Subnets:
+ - name: subnet1
+ subnet: 192.168.10.0/24
+ gateway: 192.168.10.1/24
+ excludeIps: 192.168.10.101..192.168.10.255
+ providerNetType: VLAN
+ vlan:
+ vlanId: "100"
+ providerInterfaceName: eth1
+ logicalInterfaceName: eth1.100
+ vlanNodeSelector: specific
+ nodeLabelList:
+ - kubernetes.io/hostname=localhost
diff --git a/kud/demo/composite-firewall/networks/unprotected-private-net-pktgen.yaml b/kud/demo/composite-firewall/networks/unprotected-private-net-pktgen.yaml
new file mode 100644
index 00000000..388eeb0d
--- /dev/null
+++ b/kud/demo/composite-firewall/networks/unprotected-private-net-pktgen.yaml
@@ -0,0 +1,19 @@
+apiVersion: k8s.plugin.opnfv.org/v1alpha1
+kind: ProviderNetwork
+metadata:
+ name: unprotected-private-net
+spec:
+ cniType : ovn4nfv
+ ipv4Subnets:
+ - name: subnet1
+ subnet: 192.168.10.0/24
+ gateway: 192.168.10.1/24
+ excludeIps: 192.168.10.2..192.168.10.100
+ providerNetType: VLAN
+ vlan:
+ vlanId: "100"
+ providerInterfaceName: eth1
+ logicalInterfaceName: eth1.100
+ vlanNodeSelector: specific
+ nodeLabelList:
+ - kubernetes.io/hostname=localhost
diff --git a/kud/demo/composite-firewall/networks/unprotected-private-net.yaml b/kud/demo/composite-firewall/networks/unprotected-private-net.yaml
new file mode 100644
index 00000000..f09f7608
--- /dev/null
+++ b/kud/demo/composite-firewall/networks/unprotected-private-net.yaml
@@ -0,0 +1,18 @@
+apiVersion: k8s.plugin.opnfv.org/v1alpha1
+kind: ProviderNetwork
+metadata:
+ name: unprotected-private-net
+spec:
+ cniType : ovn4nfv
+ ipv4Subnets:
+ - name: subnet1
+ subnet: 192.168.10.0/24
+ gateway: 192.168.10.1/24
+ providerNetType: VLAN
+ vlan:
+ vlanId: "100"
+ providerInterfaceName: eth1
+ logicalInterfaceName: eth1.100
+ vlanNodeSelector: specific
+ nodeLabelList:
+ - kubernetes.io/hostname=localhost
diff --git a/kud/demo/composite-firewall/override_values.yaml b/kud/demo/composite-firewall/override_values.yaml
new file mode 100644
index 00000000..8b137891
--- /dev/null
+++ b/kud/demo/composite-firewall/override_values.yaml
@@ -0,0 +1 @@
+
diff --git a/kud/demo/composite-firewall/packetgen/.helmignore b/kud/demo/composite-firewall/packetgen/.helmignore
new file mode 100644
index 00000000..50af0317
--- /dev/null
+++ b/kud/demo/composite-firewall/packetgen/.helmignore
@@ -0,0 +1,22 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
+.vscode/
diff --git a/kud/demo/composite-firewall/packetgen/Chart.yaml b/kud/demo/composite-firewall/packetgen/Chart.yaml
new file mode 100644
index 00000000..d21cadec
--- /dev/null
+++ b/kud/demo/composite-firewall/packetgen/Chart.yaml
@@ -0,0 +1,5 @@
+apiVersion: v1
+appVersion: "1.0"
+description: A Helm chart to deploy packet generator for vFirewall
+name: packetgen
+version: 0.1.0
diff --git a/kud/demo/composite-firewall/packetgen/templates/_helpers.tpl b/kud/demo/composite-firewall/packetgen/templates/_helpers.tpl
new file mode 100644
index 00000000..322b7c68
--- /dev/null
+++ b/kud/demo/composite-firewall/packetgen/templates/_helpers.tpl
@@ -0,0 +1,32 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Expand the name of the chart.
+*/}}
+{{- define "packetgen.name" -}}
+{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create a default fully qualified app name.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+If release name contains chart name it will be used as a full name.
+*/}}
+{{- define "packetgen.fullname" -}}
+{{- if .Values.fullnameOverride -}}
+{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- $name := default .Chart.Name .Values.nameOverride -}}
+{{- if contains $name .Release.Name -}}
+{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Create chart name and version as used by the chart label.
+*/}}
+{{- define "packetgen.chart" -}}
+{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
diff --git a/kud/demo/composite-firewall/packetgen/templates/deployment.yaml b/kud/demo/composite-firewall/packetgen/templates/deployment.yaml
new file mode 100644
index 00000000..827d2838
--- /dev/null
+++ b/kud/demo/composite-firewall/packetgen/templates/deployment.yaml
@@ -0,0 +1,65 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: {{ include "packetgen.fullname" . }}
+ labels:
+ release: {{ .Release.Name }}
+ app: {{ include "packetgen.name" . }}
+ chart: {{ .Chart.Name }}
+spec:
+ replicas: {{ .Values.replicaCount }}
+ selector:
+ matchLabels:
+ app: {{ include "packetgen.name" .}}
+ release: {{ .Release.Name }}
+ template:
+ metadata:
+ labels:
+ app: {{ include "packetgen.name" .}}
+ release: {{ .Release.Name }}
+ annotations:
+ app: {{ include "packetgen.name" . }}
+ release: {{ .Release.Name }}
+ VirtletLibvirtCPUSetting: |
+ mode: host-model
+ VirtletCloudInitUserData: |
+ ssh_pwauth: True
+ users:
+ - name: admin
+ gecos: User
+ primary-group: admin
+ groups: users
+ sudo: ALL=(ALL) NOPASSWD:ALL
+ lock_passwd: false
+ passwd: "$6$rounds=4096$QA5OCKHTE41$jRACivoPMJcOjLRgxl3t.AMfU7LhCFwOWv2z66CQX.TSxBy50JoYtycJXSPr2JceG.8Tq/82QN9QYt3euYEZW/"
+ runcmd:
+ - export demo_artifacts_version={{ .Values.global.demoArtifactsVersion }}
+ - export vfw_private_ip_0={{ .Values.global.vfwPrivateIp0 }}
+ - export vsn_private_ip_0={{ .Values.global.vsnPrivateIp0 }}
+ - export protected_net_cidr={{ .Values.global.protectedNetCidr }}
+ - export dcae_collector_ip={{ .Values.global.dcaeCollectorIp }}
+ - export dcae_collector_port={{ .Values.global.dcaeCollectorPort }}
+ - export protected_net_gw={{ .Values.global.protectedNetGw }}
+ - export protected_private_net_cidr={{ .Values.global.protectedPrivateNetCidr }}
+ - wget -O - https://git.onap.org/multicloud/k8s/plain/kud/tests/vFW/packetgen | sudo -E bash
+ VirtletRootVolumeSize: 5Gi
+ kubernetes.io/target-runtime: virtlet.cloud
+ spec:
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: extraRuntime
+ operator: In
+ values:
+ - virtlet
+ containers:
+ - name: {{ .Chart.Name }}
+ image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
+ imagePullPolicy: {{ .Values.image.pullPolicy }}
+ tty: true
+ stdin: true
+ resources:
+ limits:
+ memory: {{ .Values.resources.limits.memory }}
diff --git a/kud/demo/composite-firewall/packetgen/templates/service.yaml b/kud/demo/composite-firewall/packetgen/templates/service.yaml
new file mode 100644
index 00000000..7b8fd9db
--- /dev/null
+++ b/kud/demo/composite-firewall/packetgen/templates/service.yaml
@@ -0,0 +1,16 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: packetgen-service
+ labels:
+ app: {{ include "packetgen.name" . }}
+ release: {{ .Release.Name }}
+ chart: {{ .Chart.Name }}
+spec:
+ type: {{ .Values.service.type }}
+ ports:
+ - port: {{ .Values.service.ports.port }}
+ nodePort: {{ .Values.service.ports.nodePort }}
+ selector:
+ app: {{ include "packetgen.name" . }}
+ release: {{ .Release.Name }}
diff --git a/kud/demo/composite-firewall/packetgen/values.yaml b/kud/demo/composite-firewall/packetgen/values.yaml
new file mode 100644
index 00000000..300947d5
--- /dev/null
+++ b/kud/demo/composite-firewall/packetgen/values.yaml
@@ -0,0 +1,57 @@
+# Default values for packetgen.
+# This is a YAML-formatted file.
+# Declare variables to be passed into your templates.
+
+replicaCount: 1
+
+image:
+ repository: virtlet.cloud/ubuntu/16.04
+ tag: latest
+ pullPolicy: IfNotPresent
+
+nameOverride: ""
+fullnameOverride: ""
+
+service:
+#serivce port value for packetgen service
+ type: NodePort
+ ports:
+ port: 2831
+ nodePort: 30831
+
+resources:
+ limits:
+ memory: 4Gi
+
+#global vars for parent and subcharts.
+global:
+
+ #Networks
+ unprotectedNetworkName: unprotected-private-net
+ protectedPrivateNetCidr: 192.168.10.0/24
+
+ emcoPrivateNetworkName: emco-private-net
+
+ protectedNetworkName: protected-private-net
+ protectedNetCidr: 192.168.20.0/24
+ protectedNetGwIp: 192.168.20.100
+ protectedNetGw: 192.168.20.100/24
+
+ #vFirewall container
+ vfwPrivateIp0: 192.168.10.3
+ vfwPrivateIp1: 192.168.20.2
+ vfwPrivateIp2: 10.10.20.3
+
+ #Packetgen container
+ vpgPrivateIp0: 192.168.10.200
+ vpgPrivateIp1: 10.10.20.200
+
+ #Sink container
+ vsnPrivateIp0: 192.168.20.3
+ vsnPrivateIp1: 10.10.20.4
+
+ #########
+ ovnMultusNetworkName: ovn-networkobj
+ demoArtifactsVersion: 1.5.0
+ dcaeCollectorIp: 10.0.4.1
+ dcaeCollectorPort: 8081
diff --git a/kud/demo/composite-firewall/sink/.helmignore b/kud/demo/composite-firewall/sink/.helmignore
new file mode 100644
index 00000000..50af0317
--- /dev/null
+++ b/kud/demo/composite-firewall/sink/.helmignore
@@ -0,0 +1,22 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
+.vscode/
diff --git a/kud/demo/composite-firewall/sink/Chart.yaml b/kud/demo/composite-firewall/sink/Chart.yaml
new file mode 100644
index 00000000..f83182e5
--- /dev/null
+++ b/kud/demo/composite-firewall/sink/Chart.yaml
@@ -0,0 +1,5 @@
+apiVersion: v1
+appVersion: "1.0"
+description: A Helm chart to deploy sink for vFirewall
+name: sink
+version: 0.1.0
diff --git a/kud/demo/composite-firewall/sink/templates/_helpers.tpl b/kud/demo/composite-firewall/sink/templates/_helpers.tpl
new file mode 100644
index 00000000..7d82d08d
--- /dev/null
+++ b/kud/demo/composite-firewall/sink/templates/_helpers.tpl
@@ -0,0 +1,32 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Expand the name of the chart.
+*/}}
+{{- define "sink.name" -}}
+{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create a default fully qualified app name.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+If release name contains chart name it will be used as a full name.
+*/}}
+{{- define "sink.fullname" -}}
+{{- if .Values.fullnameOverride -}}
+{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- $name := default .Chart.Name .Values.nameOverride -}}
+{{- if contains $name .Release.Name -}}
+{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Create chart name and version as used by the chart label.
+*/}}
+{{- define "sink.chart" -}}
+{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
diff --git a/kud/demo/composite-firewall/sink/templates/configmap.yaml b/kud/demo/composite-firewall/sink/templates/configmap.yaml
new file mode 100644
index 00000000..89be1f77
--- /dev/null
+++ b/kud/demo/composite-firewall/sink/templates/configmap.yaml
@@ -0,0 +1,7 @@
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ include "sink.name" .}}-configmap
+data:
+ protected_net_gw: {{ .Values.global.protectedNetGwIp }}
+ protected_private_net_cidr: {{ .Values.global.protectedPrivateNetCidr }}
diff --git a/kud/demo/composite-firewall/sink/templates/deployment.yaml b/kud/demo/composite-firewall/sink/templates/deployment.yaml
new file mode 100644
index 00000000..f1f56b28
--- /dev/null
+++ b/kud/demo/composite-firewall/sink/templates/deployment.yaml
@@ -0,0 +1,38 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: {{ include "sink.fullname" . }}
+ labels:
+ release: {{ .Release.Name }}
+ app: {{ include "sink.name" . }}
+ chart: {{ .Chart.Name }}
+spec:
+ replicas: {{ .Values.replicaCount }}
+ selector:
+ matchLabels:
+ app: {{ include "sink.name" . }}
+ release: {{ .Release.Name }}
+ template:
+ metadata:
+ labels:
+ app: {{ include "sink.name" . }}
+ release: {{ .Release.Name }}
+ spec:
+ containers:
+ - name: {{ .Chart.Name }}
+ image: "{{ .Values.image.sinkrepo }}:{{ .Values.image.sinktag }}"
+ envFrom:
+ - configMapRef:
+ name: {{ include "sink.name" . }}-configmap
+ imagePullPolicy: {{ .Values.image.pullPolicy }}
+ tty: true
+ stdin: true
+ securityContext:
+ privileged: true
+ - name: darkstat
+ image: "{{ .Values.image.darkstatrepo }}:{{ .Values.image.darkstattag }}"
+ imagePullPolicy: {{ .Values.image.pullPolicy }}
+ tty: true
+ stdin: true
+ ports:
+ - containerPort: {{ .Values.service.ports.port }}
diff --git a/kud/demo/composite-firewall/sink/templates/service.yaml b/kud/demo/composite-firewall/sink/templates/service.yaml
new file mode 100644
index 00000000..99da7de7
--- /dev/null
+++ b/kud/demo/composite-firewall/sink/templates/service.yaml
@@ -0,0 +1,16 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: sink-service
+ labels:
+ app: {{ include "sink.name" . }}
+ release: {{ .Release.Name }}
+ chart: {{ .Chart.Name }}
+spec:
+ type: {{ .Values.service.type }}
+ ports:
+ - port: {{ .Values.service.ports.port }}
+ nodePort: {{ .Values.service.ports.nodePort }}
+ selector:
+ app: {{ include "sink.name" . }}
+ release: {{ .Release.Name }}
diff --git a/kud/demo/composite-firewall/sink/values.yaml b/kud/demo/composite-firewall/sink/values.yaml
new file mode 100644
index 00000000..a6fa1c46
--- /dev/null
+++ b/kud/demo/composite-firewall/sink/values.yaml
@@ -0,0 +1,61 @@
+# Default values for sink.
+# This is a YAML-formatted file.
+# Declare variables to be passed into your templates.
+
+replicaCount: 1
+
+image:
+ sinkrepo: rtsood/onap-vfw-demo-sink
+ sinktag: 0.2.0
+ pullPolicy: IfNotPresent
+ darkstatrepo: electrocucaracha/darkstat
+ darkstattag: latest
+
+nameOverride: ""
+fullnameOverride: ""
+
+service:
+#serivce port value for sink service
+ type: NodePort
+ ports:
+ port: 667
+ nodePort: 30667
+
+nodeSelector: {}
+
+tolerations: []
+
+affinity: {}
+
+#global vars for parent and subcharts.
+global:
+
+ #Networks
+ unprotectedNetworkName: unprotected-private-net
+ protectedPrivateNetCidr: 192.168.10.0/24
+
+ emcoPrivateNetworkName: emco-private-net
+
+ protectedNetworkName: protected-private-net
+ protectedNetCidr: 192.168.20.0/24
+ protectedNetGwIp: 192.168.20.100
+ protectedNetGw: 192.168.20.100/24
+
+ #vFirewall container
+ vfwPrivateIp0: 192.168.10.3
+ vfwPrivateIp1: 192.168.20.2
+ vfwPrivateIp2: 10.10.20.3
+
+ #Packetgen container
+ vpgPrivateIp0: 192.168.10.200
+ vpgPrivateIp1: 10.10.20.200
+
+ #Sink container
+ vsnPrivateIp0: 192.168.20.3
+ vsnPrivateIp1: 10.10.20.4
+
+ #########
+ ovnMultusNetworkName: ovn-networkobj
+ demoArtifactsVersion: 1.5.0
+ dcaeCollectorIp: 10.0.4.1
+ dcaeCollectorPort: 8081
diff --git a/kud/demo/firewall/values.yaml b/kud/demo/firewall/values.yaml
index abc39f01..f589760f 100644
--- a/kud/demo/firewall/values.yaml
+++ b/kud/demo/firewall/values.yaml
@@ -24,8 +24,8 @@ global:
protectedPrivateNetGw: 192.168.10.1/24
onapPrivateNetworkName: onap-private-net
- onapPrivateNetCidr: 10.10.0.0/16
- onapPrivateNetGw: 10.10.0.1/16
+ onapPrivateNetCidr: 10.10.20.0/24
+ onapPrivateNetGw: 10.10.20.1/24
protectedNetworkName: protected-private-net
protectedNetCidr: 192.168.20.0/24
diff --git a/kud/tests/README-composite-vfw.txt b/kud/tests/README-composite-vfw.txt
new file mode 100644
index 00000000..d2018c09
--- /dev/null
+++ b/kud/tests/README-composite-vfw.txt
@@ -0,0 +1,122 @@
+# Notes on running the composite vFW test case
+
+# Infrastructure
+As written, the vfw-test.sh script assumes 3 clusters
+1 - the cluster in which the EMCO microservices are running
+2 - two edge clusters in which the vFW will be instantiated
+
+The edge cluster in which vFW will be instantiated should be KUD clusters.
+
+# Preparations
+
+## Prepare the Composite vFW Application Charts and Profiles
+
+1. In the multicloud-k8s/kud/demo/composite-vfw directory, prepare the 3 helm
+ charts for the vfw.
+
+ tar cvf packetgen.tar packetgen
+ tar cvf firewall.tar firewall
+ tar cvf sink.tar sink
+ gzip *.tar
+
+2. Prepare the profile file (same one will be used for all profiles in this demo)
+
+ tar cvf profile.tar manifest.yaml override_values.yaml
+ gzip profile.tar
+
+## Set up environment variables for the vfw-test.sh script
+
+The vfw-test.sh script expects a number of files to be provided via environment
+variables.
+
+Change directory to multicloud-k8s/kud/tests
+
+1. Edge cluster kubeconfig files - the script expects 2 of these
+
+ export kubeconfigfile=<path to first cluster kube config file>
+ export kubeconfigfile2=<path to second cluster kube config file>
+
+ for example: export kubeconfigfile=/home/vagrant/multicloud-k8s/cluster-configs/config-edge01
+
+
+2. Composite app helm chart files (as prepared above)
+
+ export packetgen_helm_chart=../demo/composite-firewall/packetgen.tar.gz
+ export firewall_helm_chart=../demo/composite-firewall/firewall.tar.gz
+ export sink_helm_chart=../demo/composite-firewall/sink.tar.gz
+
+3. Composite profile application profiles (as prepared above)
+
+ export packetgen_profile_file=../demo/composite-firewall/profile.tar.gz
+ export firewall_profile_file=../demo/composite-firewall/profile.tar.gz
+ export sink_profile_file=../demo/composite-firewall/profile.tar.gz
+
+4. Modify the script to address the EMCO cluster
+
+ Modifiy the the urls at the top part of the script to point to the
+ cluster IP address of the EMCO cluster.
+
+ That is, modify the IP address 10.10.10.6 to the correct value for
+ your environment.
+
+ Note also that the node ports used in the following are based on the values
+ defined in multicloud-k8s/deployments/kubernetes/onap4k8s.yaml
+
+ base_url_clm=${base_url_clm:-"http://10.10.10.6:31856/v2"}
+ base_url_ncm=${base_url_ncm:-"http://10.10.10.6:32737/v2"}
+ base_url_orchestrator=${base_url_orchestrator:-"http://10.10.10.6:31298/v2"}
+ base_url_ovnaction=${base_url_ovnaction:-"http://10.10.10.6:31181/v2"}
+
+
+# Run the vfw-test.sh
+
+The rest of the data needed for the test is present in the script.
+
+1. Invoke API calls to create the data
+
+ vfw-test.sh create
+
+ This does all of the data setup
+ - registers clusters
+ - registers controllers
+ - sets up the composite app and profile
+ - sets up all of the intents
+
+2. Query results (optional)
+
+ vfw-test.sh get
+
+3. Apply the network intents
+
+ For the vFW test, the 3 networks used by the vFW are created by using network intents.
+ Both virtual and provider networks are used.
+
+ vfw-test.sh apply
+
+ On the edge clusters, check to see the networks were created:
+
+ kubectl get network
+ kubectl get providernetwork
+
+4. Instantiate the vFW
+
+ vfw-test.sh instantiate
+
+ This will instantiate the vFW on the two edge clusters (as defined by the generic
+ placement intent).
+
+
+# Removing resources (until termination sequences are completed)
+
+1. Run the cleanup script (or equivalent) in the edge clusters.
+ (once the terminate flow via EMCO is complete, this step will not be necessary)
+
+ bash cleanup-composite-vfw.sh
+
+2. Terminate the network intents
+
+ vfw-test.sh terminate
+
+3. Delete everything from the Mongo DB
+
+ vfw-test.sh delete
diff --git a/kud/tests/_functions.sh b/kud/tests/_functions.sh
index 7687f3fa..8e715ef4 100755
--- a/kud/tests/_functions.sh
+++ b/kud/tests/_functions.sh
@@ -88,6 +88,37 @@ function call_api {
fi
}
+function call_api_nox {
+ # this version doesn't exit the script if there's
+ # an error.
+
+ #Runs curl with passed flags and provides
+ #additional error handling and debug information
+
+ #Function outputs server response body
+ #and performs validation of http_code
+
+ local status
+ local curl_response_file="$(mktemp -p /tmp)"
+ local curl_common_flags=(-s -w "%{http_code}" -o "${curl_response_file}")
+ local command=(curl "${curl_common_flags[@]}" "$@")
+
+ echo "[INFO] Running '${command[@]}'" >&2
+ if ! status="$("${command[@]}")"; then
+ echo "[ERROR] Internal curl error! '$status'" >&2
+ cat "${curl_response_file}"
+ rm "${curl_response_file}"
+ else
+ echo "[INFO] Server replied with status: ${status}" >&2
+ if [[ "${status:0:1}" =~ [45] ]]; then
+ cat "${curl_response_file}"
+ else
+ cat "${curl_response_file}" | jq .
+ fi
+ rm "${curl_response_file}"
+ fi
+}
+
function delete_resource {
#Issues DELETE http call to provided endpoint
#and further validates by following GET request
diff --git a/kud/tests/cleanup-composite-vfw.sh b/kud/tests/cleanup-composite-vfw.sh
new file mode 100644
index 00000000..7f96e8ac
--- /dev/null
+++ b/kud/tests/cleanup-composite-vfw.sh
@@ -0,0 +1,21 @@
+# To clean up composite vfw demo resources in a cluster
+kubectl -n onap4k8s delete deployment clm
+kubectl -n onap4k8s delete deployment orchestrator
+kubectl -n onap4k8s delete deployment ncm
+kubectl -n onap4k8s delete deployment ovnaction
+kubectl -n onap4k8s delete deployment rsync
+kubectl -n onap4k8s delete service clm
+kubectl -n onap4k8s delete service orchestrator
+kubectl -n onap4k8s delete service ncm
+kubectl -n onap4k8s delete service ovnaction
+kubectl -n onap4k8s delete service rsync
+kubectl -n onap4k8s delete configmap clm
+kubectl -n onap4k8s delete configmap orchestrator
+kubectl -n onap4k8s delete configmap ncm
+kubectl -n onap4k8s delete configmap ovnaction
+kubectl -n onap4k8s delete configmap rsync
+
+# delete the networks
+kubectl delete network protected-private-net
+kubectl delete providernetwork emco-private-net
+kubectl delete providernetwork unprotected-private-net
diff --git a/kud/tests/ncm-test.sh b/kud/tests/ncm-test.sh
index 7eb83dfc..74f46979 100755
--- a/kud/tests/ncm-test.sh
+++ b/kud/tests/ncm-test.sh
@@ -5,8 +5,27 @@ set -o pipefail
source _functions.sh
-base_url_clm=${base_url:-"http://localhost:9019/v2"}
-base_url_ncm=${base_url:-"http://localhost:9016/v2"}
+base_url_clm=${base_url:-"http://10.10.10.6:31044/v2"}
+base_url_ncm=${base_url:-"http://10.10.10.6:31983/v2"}
+base_url_orchestrator=${base_url:-"http://10.10.10.6:30186/v2"}
+
+# add the rsync controller entry
+rsynccontrollername="rsync"
+rsynccontrollerdata="$(cat<<EOF
+{
+ "metadata": {
+ "name": "rsync",
+ "description": "description of $rsynccontrollername controller",
+ "userData1": "$rsynccontrollername user data 1",
+ "userData2": "$rsynccontrollername user data 2"
+ },
+ "spec": {
+ "host": "${rsynccontrollername}",
+ "port": 9041
+ }
+}
+EOF
+)"
# ncm data samples
clusterprovidername="cluster-provider-a"
@@ -22,7 +41,7 @@ clusterproviderdata="$(cat<<EOF
EOF
)"
-clustername="cluster-a"
+clustername="edge01"
clusterdata="$(cat<<EOF
{
"metadata": {
@@ -143,6 +162,9 @@ providernetworkdata="$(cat<<EOF
EOF
)"
+function createOrchData {
+ call_api -d "${rsynccontrollerdata}" "${base_url_orchestrator}/controllers"
+}
function createNcmData {
call_api -d "${clusterproviderdata}" "${base_url_clm}/cluster-providers"
@@ -163,14 +185,22 @@ function terminateNcmData {
call_api -d "{ }" "${base_url_ncm}/cluster-providers/${clusterprovidername}/clusters/${clustername}/terminate"
}
+function getOrchData {
+ call_api_nox "${base_url_orchestrator}/controllers"
+}
+
function getNcmData {
- call_api "${base_url_clm}/cluster-providers/${clusterprovidername}" | jq .
- call_api -H "Accept: application/json" "${base_url_clm}/cluster-providers/${clusterprovidername}/clusters/${clustername}" | jq .
- call_api "${base_url_clm}/cluster-providers/${clusterprovidername}/clusters?label=${labelname}" | jq .
- call_api "${base_url_clm}/cluster-providers/${clusterprovidername}/clusters/${clustername}/kv-pairs/${kvname}" | jq .
- call_api "${base_url_ncm}/cluster-providers/${clusterprovidername}/clusters/${clustername}/networks/${networkname}" | jq .
- call_api "${base_url_ncm}/cluster-providers/${clusterprovidername}/clusters/${clustername}/provider-networks/${providernetworkname}" | jq .
+ call_api_nox "${base_url_clm}/cluster-providers/${clusterprovidername}"
+ call_api_nox -H "Accept: application/json" "${base_url_clm}/cluster-providers/${clusterprovidername}/clusters/${clustername}"
+ call_api_nox "${base_url_clm}/cluster-providers/${clusterprovidername}/clusters?label=${labelname}"
+ call_api_nox "${base_url_clm}/cluster-providers/${clusterprovidername}/clusters/${clustername}/kv-pairs/${kvname}"
+ call_api_nox "${base_url_ncm}/cluster-providers/${clusterprovidername}/clusters/${clustername}/networks/${networkname}"
+ call_api_nox "${base_url_ncm}/cluster-providers/${clusterprovidername}/clusters/${clustername}/provider-networks/${providernetworkname}"
+
+}
+function deleteOrchData {
+ call_api -X DELETE "${base_url_orchestrator}/controllers/${rsynccontrollername}" | jq .
}
function deleteNcmData {
@@ -183,7 +213,7 @@ function deleteNcmData {
}
function usage {
- echo "Usage: $0 create|apply|get|terminate|delete"
+ echo "Usage: $0 create|creatersync|apply|get|getrsync|terminate|delete|deletersync"
exit
}
@@ -193,10 +223,13 @@ if [ "$#" -ne 1 ] ; then
fi
case "$1" in
+ "creatersync" ) createOrchData ;;
"create" ) createNcmData ;;
"apply" ) applyNcmData ;;
"terminate" ) terminateNcmData ;;
"get" ) getNcmData ;;
+ "getrsync" ) getOrchData ;;
"delete" ) deleteNcmData ;;
+ "deletersync" ) deleteOrchData ;;
*) usage ;;
esac
diff --git a/kud/tests/vfw-test-clean-cluster.sh b/kud/tests/vfw-test-clean-cluster.sh
new file mode 100644
index 00000000..3d386466
--- /dev/null
+++ b/kud/tests/vfw-test-clean-cluster.sh
@@ -0,0 +1,6 @@
+kubectl delete deploy fw0-packetgen
+kubectl delete deploy fw0-firewall
+kubectl delete deploy fw0-sink
+kubectl delete service packetgen-service
+kubectl delete service sink-service
+kubectl delete configmap sink-configmap
diff --git a/kud/tests/vfw-test-setenv.sh b/kud/tests/vfw-test-setenv.sh
new file mode 100644
index 00000000..77031f9f
--- /dev/null
+++ b/kud/tests/vfw-test-setenv.sh
@@ -0,0 +1,7 @@
+export packetgen_helm_path=/home/vagrant/multicloud-k8s/kud/demo/composite-firewall/packetgen.tar.gz
+export firewall_helm_path=/home/vagrant/multicloud-k8s/kud/demo/composite-firewall/firewall.tar.gz
+export sink_helm_path=/home/vagrant/multicloud-k8s/kud/demo/composite-firewall/sink.tar.gz
+export kubeconfigfile=/home/vagrant/multicloud-k8s/cluster-configs/config-edge02
+export packetgen_profile_targz=/home/vagrant/multicloud-k8s/kud/demo/composite-firewall/profile.tar.gz
+export firewall_profile_targz=/home/vagrant/multicloud-k8s/kud/demo/composite-firewall/profile.tar.gz
+export sink_profile_targz=/home/vagrant/multicloud-k8s/kud/demo/composite-firewall/profile.tar.gz
diff --git a/kud/tests/vfw-test.sh b/kud/tests/vfw-test.sh
new file mode 100755
index 00000000..2bdddcd7
--- /dev/null
+++ b/kud/tests/vfw-test.sh
@@ -0,0 +1,1054 @@
+#!/bin/bash
+set -o errexit
+set -o nounset
+set -o pipefail
+
+source _functions.sh
+
+base_url_clm=${base_url_clm:-"http://10.10.10.6:31856/v2"}
+base_url_ncm=${base_url_ncm:-"http://10.10.10.6:32737/v2"}
+base_url_orchestrator=${base_url_orchestrator:-"http://10.10.10.6:31298/v2"}
+base_url_ovnaction=${base_url_ovnaction:-"http://10.10.10.6:31181/v2"}
+
+# add clusters to clm
+# TODO one is added by default, add more if vfw demo is
+# extended to multiple clusters
+clusterprovidername="vfw-cluster-provider"
+clusterproviderdata="$(cat<<EOF
+{
+ "metadata": {
+ "name": "$clusterprovidername",
+ "description": "description of $clusterprovidername",
+ "userData1": "$clusterprovidername user data 1",
+ "userData2": "$clusterprovidername user data 2"
+ }
+}
+EOF
+)"
+
+clustername="edge01"
+clusterdata="$(cat<<EOF
+{
+ "metadata": {
+ "name": "$clustername",
+ "description": "description of $clustername",
+ "userData1": "$clustername user data 1",
+ "userData2": "$clustername user data 2"
+ }
+}
+EOF
+)"
+
+# set $kubeconfigfile before running script to point to the desired config file
+kubeconfigfile=${kubeconfigfile:-"oops"}
+
+# TODO consider demo of cluster label based placement
+# could use to onboard multiple clusters for vfw
+# but still deploy to just 1 cluster based on label
+labelname="LabelA"
+labeldata="$(cat<<EOF
+{"label-name": "$labelname"}
+EOF
+)"
+
+clustername2="edge02"
+clusterdata2="$(cat<<EOF
+{
+ "metadata": {
+ "name": "$clustername2",
+ "description": "description of $clustername2",
+ "userData1": "$clustername2 user data 1",
+ "userData2": "$clustername2 user data 2"
+ }
+}
+EOF
+)"
+
+# set $kubeconfigfile2 before running script to point to the desired config file
+kubeconfigfile2=${kubeconfigfile2:-"oops"}
+
+# TODO consider demo of cluster label based placement
+# could use to onboard multiple clusters for vfw
+# but still deploy to just 1 cluster based on label
+labelname2="LabelA"
+labeldata2="$(cat<<EOF
+{"label-name": "$labelname"}
+EOF
+)"
+
+# add the rsync controller entry
+rsynccontrollername="rsync"
+rsynccontrollerdata="$(cat<<EOF
+{
+ "metadata": {
+ "name": "rsync",
+ "description": "description of $rsynccontrollername controller",
+ "userData1": "user data 1 for $rsynccontrollername",
+ "userData2": "user data 2 for $rsynccontrollername"
+ },
+ "spec": {
+ "host": "${rsynccontrollername}",
+ "port": 9041
+ }
+}
+EOF
+)"
+
+# add the rsync controller entry
+ovnactioncontrollername="ovnaction"
+ovnactioncontrollerdata="$(cat<<EOF
+{
+ "metadata": {
+ "name": "$ovnactioncontrollername",
+ "description": "description of $ovnactioncontrollername controller",
+ "userData1": "user data 2 for $ovnactioncontrollername",
+ "userData2": "user data 2 for $ovnactioncontrollername"
+ },
+ "spec": {
+ "host": "${ovnactioncontrollername}",
+ "type": "action",
+ "priority": 1,
+ "port": 9053
+ }
+}
+EOF
+)"
+
+
+# define networks and providernetworks intents to ncm for the clusters
+# define emco-private-net and unprotexted-private-net as provider networks
+
+emcoprovidernetworkname="emco-private-net"
+emcoprovidernetworkdata="$(cat<<EOF
+{
+ "metadata": {
+ "name": "$emcoprovidernetworkname",
+ "description": "description of $emcoprovidernetworkname",
+ "userData1": "user data 1 for $emcoprovidernetworkname",
+ "userData2": "user data 2 for $emcoprovidernetworkname"
+ },
+ "spec": {
+ "cniType": "ovn4nfv",
+ "ipv4Subnets": [
+ {
+ "subnet": "10.10.20.0/24",
+ "name": "subnet1",
+ "gateway": "10.10.20.1/24"
+ }
+ ],
+ "providerNetType": "VLAN",
+ "vlan": {
+ "vlanId": "102",
+ "providerInterfaceName": "eth1",
+ "logicalInterfaceName": "eth1.102",
+ "vlanNodeSelector": "specific",
+ "nodeLabelList": [
+ "kubernetes.io/hostname=localhost"
+ ]
+ }
+ }
+}
+EOF
+)"
+
+unprotectedprovidernetworkname="unprotected-private-net"
+unprotectedprovidernetworkdata="$(cat<<EOF
+{
+ "metadata": {
+ "name": "$unprotectedprovidernetworkname",
+ "description": "description of $unprotectedprovidernetworkname",
+ "userData1": "user data 2 for $unprotectedprovidernetworkname",
+ "userData2": "user data 2 for $unprotectedprovidernetworkname"
+ },
+ "spec": {
+ "cniType": "ovn4nfv",
+ "ipv4Subnets": [
+ {
+ "subnet": "192.168.10.0/24",
+ "name": "subnet1",
+ "gateway": "192.168.10.1/24"
+ }
+ ],
+ "providerNetType": "VLAN",
+ "vlan": {
+ "vlanId": "100",
+ "providerInterfaceName": "eth1",
+ "logicalInterfaceName": "eth1.100",
+ "vlanNodeSelector": "specific",
+ "nodeLabelList": [
+ "kubernetes.io/hostname=localhost"
+ ]
+ }
+ }
+}
+EOF
+)"
+
+protectednetworkname="protected-private-net"
+protectednetworkdata="$(cat<<EOF
+{
+ "metadata": {
+ "name": "$protectednetworkname",
+ "description": "description of $protectednetworkname",
+ "userData1": "user data 1 for $protectednetworkname",
+ "userData2": "user data 1 for $protectednetworkname"
+ },
+ "spec": {
+ "cniType": "ovn4nfv",
+ "ipv4Subnets": [
+ {
+ "subnet": "192.168.20.0/24",
+ "name": "subnet1",
+ "gateway": "192.168.20.100/32"
+ }
+ ]
+ }
+}
+EOF
+)"
+
+# define a project
+projectname="testvfw"
+projectdata="$(cat<<EOF
+{
+ "metadata": {
+ "name": "$projectname",
+ "description": "description of $projectname controller",
+ "userData1": "$projectname user data 1",
+ "userData2": "$projectname user data 2"
+ }
+}
+EOF
+)"
+
+# define a composite application
+vfw_compositeapp_name="compositevfw"
+vfw_compositeapp_version="v1"
+vfw_compositeapp_data="$(cat <<EOF
+{
+ "metadata": {
+ "name": "${vfw_compositeapp_name}",
+ "description": "description of ${vfw_compositeapp_name}",
+ "userData1": "user data 1 for ${vfw_compositeapp_name}",
+ "userData2": "user data 2 for ${vfw_compositeapp_name}"
+ },
+ "spec":{
+ "version":"${vfw_compositeapp_version}"
+ }
+}
+EOF
+)"
+
+# define app entries for the composite application
+# includes the multipart tgz of the helm chart for vfw
+# BEGIN: Create entries for app1&app2 in the database
+packetgen_app_name="packetgen"
+packetgen_helm_chart=${packetgen_helm_path:-"oops"}
+packetgen_app_data="$(cat <<EOF
+{
+ "metadata": {
+ "name": "${packetgen_app_name}",
+ "description": "description for app ${packetgen_app_name}",
+ "userData1": "user data 2 for ${packetgen_app_name}",
+ "userData2": "user data 2 for ${packetgen_app_name}"
+ }
+}
+EOF
+)"
+
+firewall_app_name="firewall"
+firewall_helm_chart=${firewall_helm_path:-"oops"}
+firewall_app_data="$(cat <<EOF
+{
+ "metadata": {
+ "name": "${firewall_app_name}",
+ "description": "description for app ${firewall_app_name}",
+ "userData1": "user data 2 for ${firewall_app_name}",
+ "userData2": "user data 2 for ${firewall_app_name}"
+ }
+}
+EOF
+)"
+
+sink_app_name="sink"
+sink_helm_chart=${sink_helm_path:-"oops"}
+sink_app_data="$(cat <<EOF
+{
+ "metadata": {
+ "name": "${sink_app_name}",
+ "description": "description for app ${sink_app_name}",
+ "userData1": "user data 2 for ${sink_app_name}",
+ "userData2": "user data 2 for ${sink_app_name}"
+ }
+}
+EOF
+)"
+
+
+# Add the composite profile
+vfw_composite_profile_name="vfw_composite-profile"
+vfw_composite_profile_data="$(cat <<EOF
+{
+ "metadata":{
+ "name":"${vfw_composite_profile_name}",
+ "description":"description of ${vfw_composite_profile_name}",
+ "userData1":"user data 1 for ${vfw_composite_profile_name}",
+ "userData2":"user data 2 for ${vfw_composite_profile_name}"
+ }
+}
+EOF
+)"
+
+
+# define the packetgen profile data
+packetgen_profile_name="packetgen-profile"
+packetgen_profile_file=${packetgen_profile_targz:-"oops"}
+packetgen_profile_data="$(cat <<EOF
+{
+ "metadata":{
+ "name":"${packetgen_profile_name}",
+ "description":"description of ${packetgen_profile_name}",
+ "userData1":"user data 1 for ${packetgen_profile_name}",
+ "userData2":"user data 2 for ${packetgen_profile_name}"
+ },
+ "spec":{
+ "app-name": "${packetgen_app_name}"
+ }
+}
+EOF
+)"
+
+# define the firewall profile data
+firewall_profile_name="firewall-profile"
+firewall_profile_file=${firewall_profile_targz:-"oops"}
+firewall_profile_data="$(cat <<EOF
+{
+ "metadata":{
+ "name":"${firewall_profile_name}",
+ "description":"description of ${firewall_profile_name}",
+ "userData1":"user data 1 for ${firewall_profile_name}",
+ "userData2":"user data 2 for ${firewall_profile_name}"
+ },
+ "spec":{
+ "app-name": "${firewall_app_name}"
+ }
+}
+EOF
+)"
+
+# define the sink profile data
+sink_profile_name="sink-profile"
+sink_profile_file=${sink_profile_targz:-"oops"}
+sink_profile_data="$(cat <<EOF
+{
+ "metadata":{
+ "name":"${sink_profile_name}",
+ "description":"description of ${sink_profile_name}",
+ "userData1":"user data 1 for ${sink_profile_name}",
+ "userData2":"user data 2 for ${sink_profile_name}"
+ },
+ "spec":{
+ "app-name": "${sink_app_name}"
+ }
+}
+EOF
+)"
+
+
+# define the generic placement intent
+generic_placement_intent_name="generic-placement-intent"
+generic_placement_intent_data="$(cat <<EOF
+{
+ "metadata":{
+ "name":"${generic_placement_intent_name}",
+ "description":"${generic_placement_intent_name}",
+ "userData1":"${generic_placement_intent_name}",
+ "userData2":"${generic_placement_intent_name}"
+ },
+ "spec":{
+ "logical-cloud":"unused_logical_cloud"
+ }
+}
+EOF
+)"
+
+
+# define app placement intent for packetgen
+packetgen_placement_intent_name="packetgen-placement-intent"
+packetgen_placement_intent_data="$(cat <<EOF
+{
+ "metadata":{
+ "name":"${packetgen_placement_intent_name}",
+ "description":"description of ${packetgen_placement_intent_name}",
+ "userData1":"user data 1 for ${packetgen_placement_intent_name}",
+ "userData2":"user data 2 for ${packetgen_placement_intent_name}"
+ },
+ "spec":{
+ "app-name":"${packetgen_app_name}",
+ "intent":{
+ "allOf":[
+ { "provider-name":"${clusterprovidername}",
+ "cluster-label-name":"${labelname}"
+ }
+ ]
+ }
+ }
+}
+EOF
+)"
+
+# define app placement intent for firewall
+firewall_placement_intent_name="firewall-placement-intent"
+firewall_placement_intent_data="$(cat <<EOF
+{
+ "metadata":{
+ "name":"${firewall_placement_intent_name}",
+ "description":"description of ${firewall_placement_intent_name}",
+ "userData1":"user data 1 for ${firewall_placement_intent_name}",
+ "userData2":"user data 2 for ${firewall_placement_intent_name}"
+ },
+ "spec":{
+ "app-name":"${firewall_app_name}",
+ "intent":{
+ "allOf":[
+ { "provider-name":"${clusterprovidername}",
+ "cluster-label-name":"${labelname}"
+ }
+ ]
+ }
+ }
+}
+EOF
+)"
+
+# define app placement intent for sink
+sink_placement_intent_name="sink-placement-intent"
+sink_placement_intent_data="$(cat <<EOF
+{
+ "metadata":{
+ "name":"${sink_placement_intent_name}",
+ "description":"description of ${sink_placement_intent_name}",
+ "userData1":"user data 1 for ${sink_placement_intent_name}",
+ "userData2":"user data 2 for ${sink_placement_intent_name}"
+ },
+ "spec":{
+ "app-name":"${sink_app_name}",
+ "intent":{
+ "allOf":[
+ { "provider-name":"${clusterprovidername}",
+ "cluster-label-name":"${labelname}"
+ }
+ ]
+ }
+ }
+}
+EOF
+)"
+
+# define a deployment intent group
+release="fw0"
+deployment_intent_group_name="vfw_deployment_intent_group"
+deployment_intent_group_data="$(cat <<EOF
+{
+ "metadata":{
+ "name":"${deployment_intent_group_name}",
+ "description":"descriptiont of ${deployment_intent_group_name}",
+ "userData1":"user data 1 for ${deployment_intent_group_name}",
+ "userData2":"user data 2 for ${deployment_intent_group_name}"
+ },
+ "spec":{
+ "profile":"${vfw_composite_profile_name}",
+ "version":"${release}",
+ "override-values":[
+ {
+ "app-name":"${packetgen_app_name}",
+ "values": {
+ ".Values.service.ports.nodePort":"30888"
+ }
+ },
+ {
+ "app-name":"${firewall_app_name}",
+ "values": {
+ ".Values.global.dcaeCollectorIp":"1.2.3.4",
+ ".Values.global.dcaeCollectorPort":"8888"
+ }
+ },
+ {
+ "app-name":"${sink_app_name}",
+ "values": {
+ ".Values.service.ports.nodePort":"30677"
+ }
+ }
+ ]
+ }
+}
+EOF
+)"
+
+# define the network-control-intent for the vfw composite app
+vfw_ovnaction_intent_name="vfw_ovnaction_intent"
+vfw_ovnaction_intent_data="$(cat <<EOF
+{
+ "metadata":{
+ "name":"${vfw_ovnaction_intent_name}",
+ "description":"descriptionf of ${vfw_ovnaction_intent_name}",
+ "userData1":"user data 1 for ${vfw_ovnaction_intent_name}",
+ "userData2":"user data 2 for ${vfw_ovnaction_intent_name}"
+ }
+}
+EOF
+)"
+
+# define the network workload intent for packetgen app
+packetgen_workload_intent_name="packetgen_workload_intent"
+packetgen_workload_intent_data="$(cat <<EOF
+{
+ "metadata": {
+ "name": "${packetgen_workload_intent_name}",
+ "description": "description of ${packetgen_workload_intent_name}",
+ "userData1": "useer data 2 for ${packetgen_workload_intent_name}",
+ "userData2": "useer data 2 for ${packetgen_workload_intent_name}"
+ },
+ "spec": {
+ "application-name": "${packetgen_app_name}",
+ "workload-resource": "${release}-${packetgen_app_name}",
+ "type": "Deployment"
+ }
+}
+EOF
+)"
+
+# define the network workload intent for firewall app
+firewall_workload_intent_name="firewall_workload_intent"
+firewall_workload_intent_data="$(cat <<EOF
+{
+ "metadata": {
+ "name": "${firewall_workload_intent_name}",
+ "description": "description of ${firewall_workload_intent_name}",
+ "userData1": "useer data 2 for ${firewall_workload_intent_name}",
+ "userData2": "useer data 2 for ${firewall_workload_intent_name}"
+ },
+ "spec": {
+ "application-name": "${firewall_app_name}",
+ "workload-resource": "${release}-${firewall_app_name}",
+ "type": "Deployment"
+ }
+}
+EOF
+)"
+
+# define the network workload intent for sink app
+sink_workload_intent_name="sink_workload_intent"
+sink_workload_intent_data="$(cat <<EOF
+{
+ "metadata": {
+ "name": "${sink_workload_intent_name}",
+ "description": "description of ${sink_workload_intent_name}",
+ "userData1": "useer data 2 for ${sink_workload_intent_name}",
+ "userData2": "useer data 2 for ${sink_workload_intent_name}"
+ },
+ "spec": {
+ "application-name": "${sink_app_name}",
+ "workload-resource": "${release}-${sink_app_name}",
+ "type": "Deployment"
+ }
+}
+EOF
+)"
+
+# define the network interface intents for the packetgen workload intent
+packetgen_unprotected_interface_name="packetgen_unprotected_if"
+packetgen_unprotected_interface_data="$(cat <<EOF
+{
+ "metadata": {
+ "name": "${packetgen_unprotected_interface_name}",
+ "description": "description of ${packetgen_unprotected_interface_name}",
+ "userData1": "useer data 2 for ${packetgen_unprotected_interface_name}",
+ "userData2": "useer data 2 for ${packetgen_unprotected_interface_name}"
+ },
+ "spec": {
+ "interface": "eth1",
+ "name": "${unprotectedprovidernetworkname}",
+ "defaultGateway": "false",
+ "ipAddress": "192.168.10.2"
+ }
+}
+EOF
+)"
+
+packetgen_emco_interface_name="packetgen_emco_if"
+packetgen_emco_interface_data="$(cat <<EOF
+{
+ "metadata": {
+ "name": "${packetgen_emco_interface_name}",
+ "description": "description of ${packetgen_emco_interface_name}",
+ "userData1": "useer data 2 for ${packetgen_emco_interface_name}",
+ "userData2": "useer data 2 for ${packetgen_emco_interface_name}"
+ },
+ "spec": {
+ "interface": "eth2",
+ "name": "${emcoprovidernetworkname}",
+ "defaultGateway": "false",
+ "ipAddress": "10.10.20.2"
+ }
+}
+EOF
+)"
+
+# define the network interface intents for the firewall workload intent
+firewall_unprotected_interface_name="firewall_unprotected_if"
+firewall_unprotected_interface_data="$(cat <<EOF
+{
+ "metadata": {
+ "name": "${firewall_unprotected_interface_name}",
+ "description": "description of ${firewall_unprotected_interface_name}",
+ "userData1": "useer data 2 for ${firewall_unprotected_interface_name}",
+ "userData2": "useer data 2 for ${firewall_unprotected_interface_name}"
+ },
+ "spec": {
+ "interface": "eth1",
+ "name": "${unprotectedprovidernetworkname}",
+ "defaultGateway": "false",
+ "ipAddress": "192.168.10.3"
+ }
+}
+EOF
+)"
+
+firewall_protected_interface_name="firewall_protected_if"
+firewall_protected_interface_data="$(cat <<EOF
+{
+ "metadata": {
+ "name": "${firewall_protected_interface_name}",
+ "description": "description of ${firewall_protected_interface_name}",
+ "userData1": "useer data 2 for ${firewall_protected_interface_name}",
+ "userData2": "useer data 2 for ${firewall_protected_interface_name}"
+ },
+ "spec": {
+ "interface": "eth2",
+ "name": "${protectednetworkname}",
+ "defaultGateway": "false",
+ "ipAddress": "192.168.20.2"
+ }
+}
+EOF
+)"
+
+firewall_emco_interface_name="firewall_emco_if"
+firewall_emco_interface_data="$(cat <<EOF
+{
+ "metadata": {
+ "name": "${firewall_emco_interface_name}",
+ "description": "description of ${firewall_emco_interface_name}",
+ "userData1": "useer data 2 for ${firewall_emco_interface_name}",
+ "userData2": "useer data 2 for ${firewall_emco_interface_name}"
+ },
+ "spec": {
+ "interface": "eth3",
+ "name": "${emcoprovidernetworkname}",
+ "defaultGateway": "false",
+ "ipAddress": "10.10.20.3"
+ }
+}
+EOF
+)"
+
+# define the network interface intents for the sink workload intent
+sink_protected_interface_name="sink_protected_if"
+sink_protected_interface_data="$(cat <<EOF
+{
+ "metadata": {
+ "name": "${sink_protected_interface_name}",
+ "description": "description of ${sink_protected_interface_name}",
+ "userData1": "useer data 2 for ${sink_protected_interface_name}",
+ "userData2": "useer data 2 for ${sink_protected_interface_name}"
+ },
+ "spec": {
+ "interface": "eth1",
+ "name": "${protectednetworkname}",
+ "defaultGateway": "false",
+ "ipAddress": "192.168.20.3"
+ }
+}
+EOF
+)"
+
+sink_emco_interface_name="sink_emco_if"
+sink_emco_interface_data="$(cat <<EOF
+{
+ "metadata": {
+ "name": "${sink_emco_interface_name}",
+ "description": "description of ${sink_emco_interface_name}",
+ "userData1": "useer data 2 for ${sink_emco_interface_name}",
+ "userData2": "useer data 2 for ${sink_emco_interface_name}"
+ },
+ "spec": {
+ "interface": "eth2",
+ "name": "${emcoprovidernetworkname}",
+ "defaultGateway": "false",
+ "ipAddress": "10.10.20.4"
+ }
+}
+EOF
+)"
+
+# define the intents to be used by the group
+deployment_intents_in_group_name="vfw_deploy_intents"
+deployment_intents_in_group_data="$(cat <<EOF
+{
+ "metadata":{
+ "name":"${deployment_intents_in_group_name}",
+ "description":"descriptionf of ${deployment_intents_in_group_name}",
+ "userData1":"user data 1 for ${deployment_intents_in_group_name}",
+ "userData2":"user data 2 for ${deployment_intents_in_group_name}"
+ },
+ "spec":{
+ "intent":{
+ "genericPlacementIntent":"${generic_placement_intent_name}",
+ "ovnaction" : "${vfw_ovnaction_intent_name}"
+ }
+ }
+}
+EOF
+)"
+
+
+function createOvnactionData {
+ call_api -d "${vfw_ovnaction_intent_data}" \
+ "${base_url_ovnaction}/projects/${projectname}/composite-apps/${vfw_compositeapp_name}/${vfw_compositeapp_version}/network-controller-intent"
+
+ call_api -d "${packetgen_workload_intent_data}" \
+ "${base_url_ovnaction}/projects/${projectname}/composite-apps/${vfw_compositeapp_name}/${vfw_compositeapp_version}/network-controller-intent/${vfw_ovnaction_intent_name}/workload-intents"
+ call_api -d "${firewall_workload_intent_data}" \
+ "${base_url_ovnaction}/projects/${projectname}/composite-apps/${vfw_compositeapp_name}/${vfw_compositeapp_version}/network-controller-intent/${vfw_ovnaction_intent_name}/workload-intents"
+ call_api -d "${sink_workload_intent_data}" \
+ "${base_url_ovnaction}/projects/${projectname}/composite-apps/${vfw_compositeapp_name}/${vfw_compositeapp_version}/network-controller-intent/${vfw_ovnaction_intent_name}/workload-intents"
+
+ call_api -d "${packetgen_emco_interface_data}" \
+ "${base_url_ovnaction}/projects/${projectname}/composite-apps/${vfw_compositeapp_name}/${vfw_compositeapp_version}/network-controller-intent/${vfw_ovnaction_intent_name}/workload-intents/${packetgen_workload_intent_name}/interfaces"
+ call_api -d "${packetgen_unprotected_interface_data}" \
+ "${base_url_ovnaction}/projects/${projectname}/composite-apps/${vfw_compositeapp_name}/${vfw_compositeapp_version}/network-controller-intent/${vfw_ovnaction_intent_name}/workload-intents/${packetgen_workload_intent_name}/interfaces"
+
+ call_api -d "${firewall_emco_interface_data}" \
+ "${base_url_ovnaction}/projects/${projectname}/composite-apps/${vfw_compositeapp_name}/${vfw_compositeapp_version}/network-controller-intent/${vfw_ovnaction_intent_name}/workload-intents/${firewall_workload_intent_name}/interfaces"
+ call_api -d "${firewall_unprotected_interface_data}" \
+ "${base_url_ovnaction}/projects/${projectname}/composite-apps/${vfw_compositeapp_name}/${vfw_compositeapp_version}/network-controller-intent/${vfw_ovnaction_intent_name}/workload-intents/${firewall_workload_intent_name}/interfaces"
+ call_api -d "${firewall_protected_interface_data}" \
+ "${base_url_ovnaction}/projects/${projectname}/composite-apps/${vfw_compositeapp_name}/${vfw_compositeapp_version}/network-controller-intent/${vfw_ovnaction_intent_name}/workload-intents/${firewall_workload_intent_name}/interfaces"
+
+ call_api -d "${sink_emco_interface_data}" \
+ "${base_url_ovnaction}/projects/${projectname}/composite-apps/${vfw_compositeapp_name}/${vfw_compositeapp_version}/network-controller-intent/${vfw_ovnaction_intent_name}/workload-intents/${sink_workload_intent_name}/interfaces"
+ call_api -d "${sink_protected_interface_data}" \
+ "${base_url_ovnaction}/projects/${projectname}/composite-apps/${vfw_compositeapp_name}/${vfw_compositeapp_version}/network-controller-intent/${vfw_ovnaction_intent_name}/workload-intents/${sink_workload_intent_name}/interfaces"
+}
+
+function createOrchData {
+ print_msg "creating controller entries"
+ call_api -d "${rsynccontrollerdata}" "${base_url_orchestrator}/controllers"
+ call_api -d "${ovnactioncontrollerdata}" "${base_url_orchestrator}/controllers"
+
+ print_msg "creating project entry"
+ call_api -d "${projectdata}" "${base_url_orchestrator}/projects"
+
+ print_msg "creating vfw composite app entry"
+ call_api -d "${vfw_compositeapp_data}" "${base_url_orchestrator}/projects/${projectname}/composite-apps"
+
+ print_msg "adding vfw apps to the composite app"
+ call_api -F "metadata=${packetgen_app_data}" \
+ -F "file=@${packetgen_helm_chart}" \
+ "${base_url_orchestrator}/projects/${projectname}/composite-apps/${vfw_compositeapp_name}/${vfw_compositeapp_version}/apps"
+ call_api -F "metadata=${firewall_app_data}" \
+ -F "file=@${firewall_helm_chart}" \
+ "${base_url_orchestrator}/projects/${projectname}/composite-apps/${vfw_compositeapp_name}/${vfw_compositeapp_version}/apps"
+ call_api -F "metadata=${sink_app_data}" \
+ -F "file=@${sink_helm_chart}" \
+ "${base_url_orchestrator}/projects/${projectname}/composite-apps/${vfw_compositeapp_name}/${vfw_compositeapp_version}/apps"
+
+ print_msg "creating vfw composite profile entry"
+ call_api -d "${vfw_composite_profile_data}" "${base_url_orchestrator}/projects/${projectname}/composite-apps/${vfw_compositeapp_name}/${vfw_compositeapp_version}/composite-profiles"
+
+ print_msg "adding vfw app profiles to the composite profile"
+ call_api -F "metadata=${packetgen_profile_data}" \
+ -F "file=@${packetgen_profile_file}" \
+ "${base_url_orchestrator}/projects/${projectname}/composite-apps/${vfw_compositeapp_name}/${vfw_compositeapp_version}/composite-profiles/${vfw_composite_profile_name}/profiles"
+ call_api -F "metadata=${firewall_profile_data}" \
+ -F "file=@${firewall_profile_file}" \
+ "${base_url_orchestrator}/projects/${projectname}/composite-apps/${vfw_compositeapp_name}/${vfw_compositeapp_version}/composite-profiles/${vfw_composite_profile_name}/profiles"
+ call_api -F "metadata=${sink_profile_data}" \
+ -F "file=@${sink_profile_file}" \
+ "${base_url_orchestrator}/projects/${projectname}/composite-apps/${vfw_compositeapp_name}/${vfw_compositeapp_version}/composite-profiles/${vfw_composite_profile_name}/profiles"
+
+ print_msg "create the generic placement intent"
+ call_api -d "${generic_placement_intent_data}" \
+ "${base_url_orchestrator}/projects/${projectname}/composite-apps/${vfw_compositeapp_name}/${vfw_compositeapp_version}/generic-placement-intents"
+
+ print_msg "add the vfw app placement intents to the generic placement intent"
+ call_api -d "${packetgen_placement_intent_data}" \
+ "${base_url_orchestrator}/projects/${projectname}/composite-apps/${vfw_compositeapp_name}/${vfw_compositeapp_version}/generic-placement-intents/${generic_placement_intent_name}/app-intents"
+ call_api -d "${firewall_placement_intent_data}" \
+ "${base_url_orchestrator}/projects/${projectname}/composite-apps/${vfw_compositeapp_name}/${vfw_compositeapp_version}/generic-placement-intents/${generic_placement_intent_name}/app-intents"
+ call_api -d "${sink_placement_intent_data}" \
+ "${base_url_orchestrator}/projects/${projectname}/composite-apps/${vfw_compositeapp_name}/${vfw_compositeapp_version}/generic-placement-intents/${generic_placement_intent_name}/app-intents"
+
+ createOvnactionData
+
+ print_msg "create the deployment intent group"
+ call_api -d "${deployment_intent_group_data}" \
+ "${base_url_orchestrator}/projects/${projectname}/composite-apps/${vfw_compositeapp_name}/${vfw_compositeapp_version}/deployment-intent-groups"
+ call_api -d "${deployment_intents_in_group_data}" \
+ "${base_url_orchestrator}/projects/${projectname}/composite-apps/${vfw_compositeapp_name}/${vfw_compositeapp_version}/deployment-intent-groups/${deployment_intent_group_name}/intents"
+}
+
+function createNcmData {
+ print_msg "Creating cluster provider and cluster"
+ call_api -d "${clusterproviderdata}" "${base_url_clm}/cluster-providers"
+ call_api -H "Content-Type: multipart/form-data" -F "metadata=$clusterdata" -F "file=@$kubeconfigfile" "${base_url_clm}/cluster-providers/${clusterprovidername}/clusters"
+ call_api -d "${labeldata}" "${base_url_clm}/cluster-providers/${clusterprovidername}/clusters/${clustername}/labels"
+ call_api -H "Content-Type: multipart/form-data" -F "metadata=$clusterdata2" -F "file=@$kubeconfigfile2" "${base_url_clm}/cluster-providers/${clusterprovidername}/clusters"
+ call_api -d "${labeldata}" "${base_url_clm}/cluster-providers/${clusterprovidername}/clusters/${clustername2}/labels"
+
+ print_msg "Creating provider network and network intents"
+ call_api -d "${emcoprovidernetworkdata}" "${base_url_ncm}/cluster-providers/${clusterprovidername}/clusters/${clustername}/provider-networks"
+ call_api -d "${unprotectedprovidernetworkdata}" "${base_url_ncm}/cluster-providers/${clusterprovidername}/clusters/${clustername}/provider-networks"
+ call_api -d "${protectednetworkdata}" "${base_url_ncm}/cluster-providers/${clusterprovidername}/clusters/${clustername}/networks"
+
+ call_api -d "${emcoprovidernetworkdata}" "${base_url_ncm}/cluster-providers/${clusterprovidername}/clusters/${clustername2}/provider-networks"
+ call_api -d "${unprotectedprovidernetworkdata}" "${base_url_ncm}/cluster-providers/${clusterprovidername}/clusters/${clustername2}/provider-networks"
+ call_api -d "${protectednetworkdata}" "${base_url_ncm}/cluster-providers/${clusterprovidername}/clusters/${clustername2}/networks"
+}
+
+
+function createData {
+ createNcmData
+ createOrchData # this will call createOvnactionData
+}
+
+function getOvnactionData {
+ call_api_nox "${base_url_ovnaction}/projects/${projectname}/composite-apps/${vfw_compositeapp_name}/${vfw_compositeapp_version}/network-controller-intent/${vfw_ovnaction_intent_name}"
+
+ call_api_nox "${base_url_ovnaction}/projects/${projectname}/composite-apps/${vfw_compositeapp_name}/${vfw_compositeapp_version}/network-controller-intent/${vfw_ovnaction_intent_name}/workload-intents/${packetgen_workload_intent_name}"
+ call_api_nox "${base_url_ovnaction}/projects/${projectname}/composite-apps/${vfw_compositeapp_name}/${vfw_compositeapp_version}/network-controller-intent/${vfw_ovnaction_intent_name}/workload-intents/${firewall_workload_intent_name}"
+ call_api_nox "${base_url_ovnaction}/projects/${projectname}/composite-apps/${vfw_compositeapp_name}/${vfw_compositeapp_version}/network-controller-intent/${vfw_ovnaction_intent_name}/workload-intents/${sink_workload_intent_name}"
+
+ call_api_nox "${base_url_ovnaction}/projects/${projectname}/composite-apps/${vfw_compositeapp_name}/${vfw_compositeapp_version}/network-controller-intent/${vfw_ovnaction_intent_name}/workload-intents/${packetgen_workload_intent_name}/interfaces/${packetgen_emco_interface_name}"
+ call_api_nox "${base_url_ovnaction}/projects/${projectname}/composite-apps/${vfw_compositeapp_name}/${vfw_compositeapp_version}/network-controller-intent/${vfw_ovnaction_intent_name}/workload-intents/${packetgen_workload_intent_name}/interfaces/${packetgen_unprotected_interface_name}"
+
+ call_api_nox "${base_url_ovnaction}/projects/${projectname}/composite-apps/${vfw_compositeapp_name}/${vfw_compositeapp_version}/network-controller-intent/${vfw_ovnaction_intent_name}/workload-intents/${firewall_workload_intent_name}/interfaces/${firewall_emco_interface_name}"
+ call_api_nox "${base_url_ovnaction}/projects/${projectname}/composite-apps/${vfw_compositeapp_name}/${vfw_compositeapp_version}/network-controller-intent/${vfw_ovnaction_intent_name}/workload-intents/${firewall_workload_intent_name}/interfaces/${firewall_unprotected_interface_name}"
+ call_api_nox "${base_url_ovnaction}/projects/${projectname}/composite-apps/${vfw_compositeapp_name}/${vfw_compositeapp_version}/network-controller-intent/${vfw_ovnaction_intent_name}/workload-intents/${firewall_workload_intent_name}/interfaces/${firewall_protected_interface_name}"
+
+ call_api_nox "${base_url_ovnaction}/projects/${projectname}/composite-apps/${vfw_compositeapp_name}/${vfw_compositeapp_version}/network-controller-intent/${vfw_ovnaction_intent_name}/workload-intents/${sink_workload_intent_name}/interfaces/${sink_emco_interface_name}"
+ call_api_nox "${base_url_ovnaction}/projects/${projectname}/composite-apps/${vfw_compositeapp_name}/${vfw_compositeapp_version}/network-controller-intent/${vfw_ovnaction_intent_name}/workload-intents/${sink_workload_intent_name}/interfaces/${sink_protected_interface_name}"
+}
+
+function getOrchData {
+ call_api_nox "${base_url_orchestrator}/controllers/${rsynccontrollername}"
+ call_api_nox "${base_url_orchestrator}/controllers/${ovnactioncontrollername}"
+
+
+ call_api_nox "${base_url_orchestrator}/projects/${projectname}"
+
+ call_api_nox "${base_url_orchestrator}/projects/${projectname}/composite-apps/${vfw_compositeapp_name}/${vfw_compositeapp_version}"
+
+ call_api_nox -H "Accept: application/json" "${base_url_orchestrator}/projects/${projectname}/composite-apps/${vfw_compositeapp_name}/${vfw_compositeapp_version}/apps/${packetgen_app_name}"
+ call_api_nox -H "Accept: application/json" "${base_url_orchestrator}/projects/${projectname}/composite-apps/${vfw_compositeapp_name}/${vfw_compositeapp_version}/apps/${firewall_app_name}"
+ call_api_nox -H "Accept: application/json" "${base_url_orchestrator}/projects/${projectname}/composite-apps/${vfw_compositeapp_name}/${vfw_compositeapp_version}/apps/${sink_app_name}"
+
+ call_api_nox "${base_url_orchestrator}/projects/${projectname}/composite-apps/${vfw_compositeapp_name}/${vfw_compositeapp_version}/composite-profiles/${vfw_composite_profile_name}"
+
+ call_api_nox -H "Accept: application/json" "${base_url_orchestrator}/projects/${projectname}/composite-apps/${vfw_compositeapp_name}/${vfw_compositeapp_version}/composite-profiles/${vfw_composite_profile_name}/profiles/${packetgen_profile_name}"
+ call_api_nox -H "Accept: application/json" "${base_url_orchestrator}/projects/${projectname}/composite-apps/${vfw_compositeapp_name}/${vfw_compositeapp_version}/composite-profiles/${vfw_composite_profile_name}/profiles/${firewall_profile_name}"
+ call_api_nox -H "Accept: application/json" "${base_url_orchestrator}/projects/${projectname}/composite-apps/${vfw_compositeapp_name}/${vfw_compositeapp_version}/composite-profiles/${vfw_composite_profile_name}/profiles/${sink_profile_name}"
+
+ call_api_nox "${base_url_orchestrator}/projects/${projectname}/composite-apps/${vfw_compositeapp_name}/${vfw_compositeapp_version}/generic-placement-intents/${generic_placement_intent_name}"
+
+ call_api_nox "${base_url_orchestrator}/projects/${projectname}/composite-apps/${vfw_compositeapp_name}/${vfw_compositeapp_version}/generic-placement-intents/${generic_placement_intent_name}/app-intents/${packetgen_placement_intent_name}"
+ call_api_nox "${base_url_orchestrator}/projects/${projectname}/composite-apps/${vfw_compositeapp_name}/${vfw_compositeapp_version}/generic-placement-intents/${generic_placement_intent_name}/app-intents/${firewall_placement_intent_name}"
+ call_api_nox "${base_url_orchestrator}/projects/${projectname}/composite-apps/${vfw_compositeapp_name}/${vfw_compositeapp_version}/generic-placement-intents/${generic_placement_intent_name}/app-intents/${sink_placement_intent_name}"
+
+ call_api_nox "${base_url_orchestrator}/projects/${projectname}/composite-apps/${vfw_compositeapp_name}/${vfw_compositeapp_version}/deployment-intent-groups/${deployment_intent_group_name}"
+ call_api_nox "${base_url_orchestrator}/projects/${projectname}/composite-apps/${vfw_compositeapp_name}/${vfw_compositeapp_version}/deployment-intent-groups/${deployment_intent_group_name}/intents/${deployment_intents_in_group_name}"
+}
+
+function getNcmData {
+ call_api_nox "${base_url_clm}/cluster-providers/${clusterprovidername}"
+ call_api_nox -H "Accept: application/json" "${base_url_clm}/cluster-providers/${clusterprovidername}/clusters/${clustername}"
+ call_api_nox -H "Accept: application/json" "${base_url_clm}/cluster-providers/${clusterprovidername}/clusters/${clustername2}"
+ call_api_nox "${base_url_clm}/cluster-providers/${clusterprovidername}/clusters/${clustername}/labels/${labelname}"
+ call_api_nox "${base_url_clm}/cluster-providers/${clusterprovidername}/clusters?label=${labelname}"
+
+ call_api_nox "${base_url_ncm}/cluster-providers/${clusterprovidername}/clusters/${clustername}/provider-networks/${emcoprovidernetworkname}"
+ call_api_nox "${base_url_ncm}/cluster-providers/${clusterprovidername}/clusters/${clustername}/provider-networks/${unprotectedprovidernetworkname}"
+ call_api_nox "${base_url_ncm}/cluster-providers/${clusterprovidername}/clusters/${clustername}/networks/${protectednetworkname}"
+
+ call_api_nox "${base_url_ncm}/cluster-providers/${clusterprovidername}/clusters/${clustername2}/provider-networks/${emcoprovidernetworkname}"
+ call_api_nox "${base_url_ncm}/cluster-providers/${clusterprovidername}/clusters/${clustername2}/provider-networks/${unprotectedprovidernetworkname}"
+ call_api_nox "${base_url_ncm}/cluster-providers/${clusterprovidername}/clusters/${clustername2}/networks/${protectednetworkname}"
+}
+
+function getData {
+ getNcmData
+ getOrchData
+ getOvnactionData
+}
+
+function deleteOvnactionData {
+ delete_resource "${base_url_ovnaction}/projects/${projectname}/composite-apps/${vfw_compositeapp_name}/${vfw_compositeapp_version}/network-controller-intent/${vfw_ovnaction_intent_name}/workload-intents/${sink_workload_intent_name}/interfaces/${sink_protected_interface_name}"
+ delete_resource "${base_url_ovnaction}/projects/${projectname}/composite-apps/${vfw_compositeapp_name}/${vfw_compositeapp_version}/network-controller-intent/${vfw_ovnaction_intent_name}/workload-intents/${sink_workload_intent_name}/interfaces/${sink_emco_interface_name}"
+ delete_resource "${base_url_ovnaction}/projects/${projectname}/composite-apps/${vfw_compositeapp_name}/${vfw_compositeapp_version}/network-controller-intent/${vfw_ovnaction_intent_name}/workload-intents/${firewall_workload_intent_name}/interfaces/${firewall_protected_interface_name}"
+ delete_resource "${base_url_ovnaction}/projects/${projectname}/composite-apps/${vfw_compositeapp_name}/${vfw_compositeapp_version}/network-controller-intent/${vfw_ovnaction_intent_name}/workload-intents/${firewall_workload_intent_name}/interfaces/${firewall_unprotected_interface_name}"
+ delete_resource "${base_url_ovnaction}/projects/${projectname}/composite-apps/${vfw_compositeapp_name}/${vfw_compositeapp_version}/network-controller-intent/${vfw_ovnaction_intent_name}/workload-intents/${firewall_workload_intent_name}/interfaces/${firewall_emco_interface_name}"
+ delete_resource "${base_url_ovnaction}/projects/${projectname}/composite-apps/${vfw_compositeapp_name}/${vfw_compositeapp_version}/network-controller-intent/${vfw_ovnaction_intent_name}/workload-intents/${packetgen_workload_intent_name}/interfaces/${packetgen_unprotected_interface_name}"
+ delete_resource "${base_url_ovnaction}/projects/${projectname}/composite-apps/${vfw_compositeapp_name}/${vfw_compositeapp_version}/network-controller-intent/${vfw_ovnaction_intent_name}/workload-intents/${packetgen_workload_intent_name}/interfaces/${packetgen_emco_interface_name}"
+ delete_resource "${base_url_ovnaction}/projects/${projectname}/composite-apps/${vfw_compositeapp_name}/${vfw_compositeapp_version}/network-controller-intent/${vfw_ovnaction_intent_name}/workload-intents/${sink_workload_intent_name}"
+ delete_resource "${base_url_ovnaction}/projects/${projectname}/composite-apps/${vfw_compositeapp_name}/${vfw_compositeapp_version}/network-controller-intent/${vfw_ovnaction_intent_name}/workload-intents/${firewall_workload_intent_name}"
+ delete_resource "${base_url_ovnaction}/projects/${projectname}/composite-apps/${vfw_compositeapp_name}/${vfw_compositeapp_version}/network-controller-intent/${vfw_ovnaction_intent_name}/workload-intents/${packetgen_workload_intent_name}"
+ delete_resource "${base_url_ovnaction}/projects/${projectname}/composite-apps/${vfw_compositeapp_name}/${vfw_compositeapp_version}/network-controller-intent/${vfw_ovnaction_intent_name}"
+}
+
+function deleteOrchData {
+ delete_resource "${base_url_orchestrator}/controllers/${rsynccontrollername}"
+ delete_resource "${base_url_orchestrator}/controllers/${ovnactioncontrollername}"
+
+ delete_resource "${base_url_orchestrator}/projects/${projectname}/composite-apps/${vfw_compositeapp_name}/${vfw_compositeapp_version}/deployment-intent-groups/${deployment_intent_group_name}/intents/${deployment_intents_in_group_name}"
+ delete_resource "${base_url_orchestrator}/projects/${projectname}/composite-apps/${vfw_compositeapp_name}/${vfw_compositeapp_version}/deployment-intent-groups/${deployment_intent_group_name}"
+
+ delete_resource "${base_url_orchestrator}/projects/${projectname}/composite-apps/${vfw_compositeapp_name}/${vfw_compositeapp_version}/generic-placement-intents/${generic_placement_intent_name}/app-intents/${sink_placement_intent_name}"
+ delete_resource "${base_url_orchestrator}/projects/${projectname}/composite-apps/${vfw_compositeapp_name}/${vfw_compositeapp_version}/generic-placement-intents/${generic_placement_intent_name}/app-intents/${firewall_placement_intent_name}"
+ delete_resource "${base_url_orchestrator}/projects/${projectname}/composite-apps/${vfw_compositeapp_name}/${vfw_compositeapp_version}/generic-placement-intents/${generic_placement_intent_name}/app-intents/${packetgen_placement_intent_name}"
+ delete_resource "${base_url_orchestrator}/projects/${projectname}/composite-apps/${vfw_compositeapp_name}/${vfw_compositeapp_version}/generic-placement-intents/${generic_placement_intent_name}"
+
+ delete_resource "${base_url_orchestrator}/projects/${projectname}/composite-apps/${vfw_compositeapp_name}/${vfw_compositeapp_version}/composite-profiles/${vfw_composite_profile_name}/profiles/${sink_profile_name}"
+ delete_resource "${base_url_orchestrator}/projects/${projectname}/composite-apps/${vfw_compositeapp_name}/${vfw_compositeapp_version}/composite-profiles/${vfw_composite_profile_name}/profiles/${firewall_profile_name}"
+ delete_resource "${base_url_orchestrator}/projects/${projectname}/composite-apps/${vfw_compositeapp_name}/${vfw_compositeapp_version}/composite-profiles/${vfw_composite_profile_name}/profiles/${packetgen_profile_name}"
+ delete_resource "${base_url_orchestrator}/projects/${projectname}/composite-apps/${vfw_compositeapp_name}/${vfw_compositeapp_version}/composite-profiles/${vfw_composite_profile_name}"
+
+ delete_resource "${base_url_orchestrator}/projects/${projectname}/composite-apps/${vfw_compositeapp_name}/${vfw_compositeapp_version}/apps/${sink_app_name}"
+ delete_resource "${base_url_orchestrator}/projects/${projectname}/composite-apps/${vfw_compositeapp_name}/${vfw_compositeapp_version}/apps/${firewall_app_name}"
+ delete_resource "${base_url_orchestrator}/projects/${projectname}/composite-apps/${vfw_compositeapp_name}/${vfw_compositeapp_version}/apps/${packetgen_app_name}"
+
+ deleteOvnactionData
+
+ delete_resource "${base_url_orchestrator}/projects/${projectname}/composite-apps/${vfw_compositeapp_name}/${vfw_compositeapp_version}"
+ delete_resource "${base_url_orchestrator}/projects/${projectname}"
+}
+
+function deleteNcmData {
+ delete_resource "${base_url_ncm}/cluster-providers/${clusterprovidername}/clusters/${clustername}/networks/${protectednetworkname}"
+ delete_resource "${base_url_ncm}/cluster-providers/${clusterprovidername}/clusters/${clustername}/provider-networks/${unprotectedprovidernetworkname}"
+ delete_resource "${base_url_ncm}/cluster-providers/${clusterprovidername}/clusters/${clustername}/provider-networks/${emcoprovidernetworkname}"
+ delete_resource "${base_url_clm}/cluster-providers/${clusterprovidername}/clusters/${clustername}/labels/${labelname}"
+ delete_resource "${base_url_clm}/cluster-providers/${clusterprovidername}/clusters/${clustername}"
+ delete_resource "${base_url_ncm}/cluster-providers/${clusterprovidername}/clusters/${clustername2}/networks/${protectednetworkname}"
+ delete_resource "${base_url_ncm}/cluster-providers/${clusterprovidername}/clusters/${clustername2}/provider-networks/${unprotectedprovidernetworkname}"
+ delete_resource "${base_url_ncm}/cluster-providers/${clusterprovidername}/clusters/${clustername2}/provider-networks/${emcoprovidernetworkname}"
+ delete_resource "${base_url_clm}/cluster-providers/${clusterprovidername}/clusters/${clustername2}/labels/${labelname}"
+ delete_resource "${base_url_clm}/cluster-providers/${clusterprovidername}/clusters/${clustername2}"
+ delete_resource "${base_url_clm}/cluster-providers/${clusterprovidername}"
+}
+
+function deleteData {
+ deleteNcmData
+ deleteOrchData
+}
+
+# apply the network and providernetwork to an appcontext and instantiate with rsync
+function applyNcmData {
+ call_api -d "{ }" "${base_url_ncm}/cluster-providers/${clusterprovidername}/clusters/${clustername}/apply"
+ call_api -d "{ }" "${base_url_ncm}/cluster-providers/${clusterprovidername}/clusters/${clustername2}/apply"
+}
+
+# deletes the appcontext (eventually will terminate from resource synchronizer when that funcationality is ready)
+function terminateNcmData {
+ call_api -d "{ }" "${base_url_ncm}/cluster-providers/${clusterprovidername}/clusters/${clustername}/terminate"
+ call_api -d "{ }" "${base_url_ncm}/cluster-providers/${clusterprovidername}/clusters/${clustername2}/terminate"
+}
+
+function instantiateVfw {
+ # call_api -d "{ }" "${base_url_orchestrator}/projects/${projectname}/composite-apps/${vfw_compositeapp_name}/${vfw_compositeapp_version}/deployment-intent-groups/${deployment_intent_group_name}/approve"
+ call_api -d "{ }" "${base_url_orchestrator}/projects/${projectname}/composite-apps/${vfw_compositeapp_name}/${vfw_compositeapp_version}/deployment-intent-groups/${deployment_intent_group_name}/instantiate"
+}
+
+
+function usage {
+ echo "Usage: $0 create|get|delete|apply|terminate|instantiate"
+ echo " create - creates all ncm, ovnaction, clm resources needed for vfw"
+ echo " following env variables need to be set for create:"
+ echo " kubeconfigfile=<path of kubeconfig file for destination cluster>"
+ echo " kubeconfigfile2=<path of kubeconfig file for second destination cluster>"
+ echo " packetgen_helm_path=<path to helm chart file for the packet generator>"
+ echo " firewall_helm_path=<path to helm chart file for the firewall>"
+ echo " sink_helm_path=<path to helm chart file for the sink>"
+ echo " packetgen_profile_targz=<path to profile tar.gz file for the packet generator>"
+ echo " firewall_profile_targz=<path to profile tar.gz file for the firewall>"
+ echo " sink_profile_targz=<path to profile tar.gz file for the sink>"
+ echo " get - queries all resources in ncm, ovnaction, clm resources created for vfw"
+ echo " delete - deletes all resources in ncm, ovnaction, clm resources created for vfw"
+ echo " apply - applys the network intents - e.g. networks created in ncm"
+ echo " instantiate - approves and instantiates the composite app via the generic deployment intent"
+ echo " terminate - remove the network inents created by ncm"
+ echo ""
+ echo " a reasonable test sequence:"
+ echo " 1. create"
+ echo " 2. apply"
+ echo " 3. instantiate"
+
+ exit
+}
+
+function check_for_env_settings {
+ ok=""
+ if [ "${kubeconfigfile}" == "oops" ] ; then
+ echo -e "ERROR - kubeconfigfile environment variable needs to be set"
+ ok="no"
+ fi
+ if [ "${kubeconfigfile2}" == "oops" ] ; then
+ echo -e "ERROR - kubeconfigfile2 environment variable needs to be set"
+ ok="no"
+ fi
+ if [ "${packetgen_helm_chart}" == "oops" ] ; then
+ echo -e "ERROR - packetgen_helm_path environment variable needs to be set"
+ ok="no"
+ fi
+ if [ "${firewall_helm_chart}" == "oops" ] ; then
+ echo -e "ERROR - firewall_helm_path environment variable needs to be set"
+ ok="no"
+ fi
+ if [ "${sink_helm_chart}" == "oops" ] ; then
+ echo -e "ERROR - sink_helm_path environment variable needs to be set"
+ ok="no"
+ fi
+ if [ "${packetgen_profile_file}" == "oops" ] ; then
+ echo -e "ERROR - packetgen_profile_targz environment variable needs to be set"
+ ok="no"
+ fi
+ if [ "${firewall_profile_file}" == "oops" ] ; then
+ echo -e "ERROR - firewall_profile_targz environment variable needs to be set"
+ ok="no"
+ fi
+ if [ "${sink_profile_file}" == "oops" ] ; then
+ echo -e "ERROR - sink_profile_targz environment variable needs to be set"
+ ok="no"
+ fi
+ if [ "${ok}" == "no" ] ; then
+ echo ""
+ usage
+ fi
+}
+
+if [ "$#" -ne 1 ] ; then
+ usage
+fi
+
+case "$1" in
+ "create" )
+ check_for_env_settings
+ createData
+ ;;
+ "get" ) getData ;;
+ "delete" ) deleteData ;;
+ "apply" ) applyNcmData ;;
+ "terminate" ) terminateNcmData ;;
+ "instantiate" ) instantiateVfw ;;
+ *) usage ;;
+esac