summaryrefslogtreecommitdiffstats
path: root/vnfs/vCPE/scripts
diff options
context:
space:
mode:
Diffstat (limited to 'vnfs/vCPE/scripts')
-rw-r--r--vnfs/vCPE/scripts/v_bng_init.sh3
-rw-r--r--vnfs/vCPE/scripts/v_bng_install.sh273
-rw-r--r--vnfs/vCPE/scripts/v_brgemu_init.sh5
-rw-r--r--vnfs/vCPE/scripts/v_brgemu_install.sh342
-rw-r--r--vnfs/vCPE/scripts/v_gmux_init.sh5
-rw-r--r--vnfs/vCPE/scripts/v_gmux_install.sh471
-rw-r--r--vnfs/vCPE/scripts/v_gw_init.sh4
-rw-r--r--vnfs/vCPE/scripts/v_gw_install.sh308
8 files changed, 1407 insertions, 4 deletions
diff --git a/vnfs/vCPE/scripts/v_bng_init.sh b/vnfs/vCPE/scripts/v_bng_init.sh
index a9bf588e..6fb2eadc 100644
--- a/vnfs/vCPE/scripts/v_bng_init.sh
+++ b/vnfs/vCPE/scripts/v_bng_init.sh
@@ -1 +1,4 @@
#!/bin/bash
+
+systemctl start vpp
+
diff --git a/vnfs/vCPE/scripts/v_bng_install.sh b/vnfs/vCPE/scripts/v_bng_install.sh
index 8f035b44..02025b2e 100644
--- a/vnfs/vCPE/scripts/v_bng_install.sh
+++ b/vnfs/vCPE/scripts/v_bng_install.sh
@@ -4,6 +4,9 @@ REPO_URL_BLOB=$(cat /opt/config/repo_url_blob.txt)
REPO_URL_ARTIFACTS=$(cat /opt/config/repo_url_artifacts.txt)
DEMO_ARTIFACTS_VERSION=$(cat /opt/config/demo_artifacts_version.txt)
INSTALL_SCRIPT_VERSION=$(cat /opt/config/install_script_version.txt)
+VPP_SOURCE_REPO_URL=$(cat /opt/config/vpp_source_repo_url.txt)
+VPP_SOURCE_REPO_BRANCH=$(cat /opt/config/vpp_source_repo_branch.txt)
+VPP_PATCH_URL=$(cat /opt/config/vpp_patch_url.txt)
CLOUD_ENV=$(cat /opt/config/cloud_env.txt)
# Convert Network CIDR to Netmask
@@ -74,6 +77,274 @@ apt-get update
apt-get install --allow-unauthenticated -y wget openjdk-8-jdk apt-transport-https ca-certificates g++ libcurl4-gnutls-dev
sleep 1
+# Install the tools required for download codes
+apt-get install -y expect git patch
+
+#Download and build the VPP codes
+cd /opt
+git clone ${VPP_SOURCE_REPO_URL} -b ${VPP_SOURCE_REPO_BRANCH} vpp
+wget -O Vpp-Integrate-FreeRADIUS-Client-for-vBNG.patch ${VPP_PATCH_URL}
+
+cd vpp
+patch -p1 < Vpp-Integrate-FreeRADIUS-Client-for-vBNG.patch
+expect -c "
+ set timeout 60;
+ spawn make install-dep;
+ expect {
+ \"Do you want to continue?*\" {send \"Y\r\"; interact}
+ }
+"
+
+cd build-root
+./bootstrap.sh
+make V=0 PLATFORM=vpp TAG=vpp install-deb
+
+# Install the FreeRADIUS client since we need the lib
+cd /opt
+git clone https://github.com/FreeRADIUS/freeradius-client.git
+cd freeradius-client
+./configure
+make && make install
+cd /usr/local/lib && ln -s -f libfreeradius-client.so.2.0.0 libfreeradiusclient.so
+ldconfig
+
+# Install the VPP package
+cd /opt/vpp/build-root
+dpkg -i *.deb
+systemctl stop vpp
+
+# Auto-start configuration for the VPP
+cat > /etc/vpp/startup.conf << EOF
+
+unix {
+ nodaemon
+ log /tmp/vpp.log
+ full-coredump
+ cli-listen localhost:5002
+ startup-config /etc/vpp/setup.gate
+}
+
+api-trace {
+ on
+}
+
+api-segment {
+ gid vpp
+}
+
+cpu {
+ ## In the VPP there is one main thread and optionally the user can create worker(s)
+ ## The main thread and worker thread(s) can be pinned to CPU core(s) manually or automatically
+
+ ## Manual pinning of thread(s) to CPU core(s)
+
+ ## Set logical CPU core where main thread runs
+ # main-core 1
+
+ ## Set logical CPU core(s) where worker threads are running
+ # corelist-workers 2-3,18-19
+
+ ## Automatic pinning of thread(s) to CPU core(s)
+
+ ## Sets number of CPU core(s) to be skipped (1 ... N-1)
+ ## Skipped CPU core(s) are not used for pinning main thread and working thread(s).
+ ## The main thread is automatically pinned to the first available CPU core and worker(s)
+ ## are pinned to next free CPU core(s) after core assigned to main thread
+ # skip-cores 4
+
+ ## Specify a number of workers to be created
+ ## Workers are pinned to N consecutive CPU cores while skipping "skip-cores" CPU core(s)
+ ## and main thread's CPU core
+ # workers 2
+
+ ## Set scheduling policy and priority of main and worker threads
+
+ ## Scheduling policy options are: other (SCHED_OTHER), batch (SCHED_BATCH)
+ ## idle (SCHED_IDLE), fifo (SCHED_FIFO), rr (SCHED_RR)
+ # scheduler-policy fifo
+
+ ## Scheduling priority is used only for "real-time policies (fifo and rr),
+ ## and has to be in the range of priorities supported for a particular policy
+ # scheduler-priority 50
+}
+
+# dpdk {
+ ## Change default settings for all intefaces
+ # dev default {
+ ## Number of receive queues, enables RSS
+ ## Default is 1
+ # num-rx-queues 3
+
+ ## Number of transmit queues, Default is equal
+ ## to number of worker threads or 1 if no workers treads
+ # num-tx-queues 3
+
+ ## Number of descriptors in transmit and receive rings
+ ## increasing or reducing number can impact performance
+ ## Default is 1024 for both rx and tx
+ # num-rx-desc 512
+ # num-tx-desc 512
+
+ ## VLAN strip offload mode for interface
+ ## Default is off
+ # vlan-strip-offload on
+ # }
+
+ ## Whitelist specific interface by specifying PCI address
+ # dev 0000:02:00.0
+
+ ## Whitelist specific interface by specifying PCI address and in
+ ## addition specify custom parameters for this interface
+ # dev 0000:02:00.1 {
+ # num-rx-queues 2
+ # }
+
+ ## Change UIO driver used by VPP, Options are: igb_uio, vfio-pci
+ ## and uio_pci_generic (default)
+ # uio-driver vfio-pci
+
+ ## Disable mutli-segment buffers, improves performance but
+ ## disables Jumbo MTU support
+ # no-multi-seg
+
+ ## Increase number of buffers allocated, needed only in scenarios with
+ ## large number of interfaces and worker threads. Value is per CPU socket.
+ ## Default is 16384
+ # num-mbufs 128000
+
+ ## Change hugepages allocation per-socket, needed only if there is need for
+ ## larger number of mbufs. Default is 256M on each detected CPU socket
+ # socket-mem 2048,2048
+# }
+
+EOF
+
+cat > /etc/vpp/setup.gate << EOF
+set int state GigabitEthernet0/8/0 up
+set interface ip address GigabitEthernet0/8/0 10.4.0.4/24
+
+set int state GigabitEthernet0/9/0 up
+set interface ip address GigabitEthernet0/9/0 10.4.0.3/24
+
+set vbng dhcp4 remote 10.4.0.1 local 10.4.0.3
+set vbng aaa config /etc/vpp/vbng-aaa.cfg nas-port 5060
+EOF
+
+cat > /etc/vpp/vbng-aaa.cfg << EOF
+# General settings
+
+# specify which authentication comes first respectively which
+# authentication is used. possible values are: "radius" and "local".
+# if you specify "radius,local" then the RADIUS server is asked
+# first then the local one. if only one keyword is specified only
+# this server is asked.
+auth_order radius,local
+
+# maximum login tries a user has
+login_tries 2
+
+# timeout for all login tries
+# if this time is exceeded the user is kicked out
+login_timeout 5
+
+# name of the nologin file which when it exists disables logins.
+# it may be extended by the ttyname which will result in
+# a terminal specific lock (e.g. /etc/nologin.ttyS2 will disable
+# logins on /dev/ttyS2)
+nologin /etc/nologin
+
+# name of the issue file. it's only display when no username is passed
+# on the radlogin command line
+issue /usr/local/etc/radiusclient/issue
+
+# RADIUS settings
+
+# RADIUS server to use for authentication requests. this config
+# item can appear more then one time. if multiple servers are
+# defined they are tried in a round robin fashion if one
+# server is not answering.
+# optionally you can specify a the port number on which is remote
+# RADIUS listens separated by a colon from the hostname. if
+# no port is specified /etc/services is consulted of the radius
+# service. if this fails also a compiled in default is used.
+#authserver 10.4.0.2
+authserver localhost
+
+# RADIUS server to use for accouting requests. All that I
+# said for authserver applies, too.
+#
+#acctserver 10.4.0.2
+acctserver localhost
+
+# file holding shared secrets used for the communication
+# between the RADIUS client and server
+servers /usr/local/etc/radiusclient/servers
+
+# dictionary of allowed attributes and values
+# just like in the normal RADIUS distributions
+dictionary /usr/local/etc/radiusclient/dictionary
+
+# program to call for a RADIUS authenticated login
+login_radius /usr/local/sbin/login.radius
+
+# file which holds sequence number for communication with the
+# RADIUS server
+seqfile /var/run/radius.seq
+
+# file which specifies mapping between ttyname and NAS-Port attribute
+mapfile /usr/local/etc/radiusclient/port-id-map
+
+# default authentication realm to append to all usernames if no
+# realm was explicitly specified by the user
+# the radiusd directly form Livingston doesnt use any realms, so leave
+# it blank then
+default_realm
+
+# time to wait for a reply from the RADIUS server
+radius_timeout 10
+
+# resend request this many times before trying the next server
+radius_retries 3
+
+# The length of time in seconds that we skip a nonresponsive RADIUS
+# server for transaction requests. Server(s) being in the "dead" state
+# are tried only after all other non-dead servers have been tried and
+# failed or timeouted. The deadtime interval starts when the server
+# does not respond to an authentication/accounting request transmissions.
+# When the interval expires, the "dead" server would be re-tried again,
+# and if it's still down then it will be considered "dead" for another
+# such interval and so on. This option is no-op if there is only one
+# server in the list. Set to 0 in order to disable the feature.
+radius_deadtime 0
+
+# local address from which radius packets have to be sent
+bindaddr *
+
+# LOCAL settings
+
+# program to execute for local login
+# it must support the -f flag for preauthenticated login
+login_local /bin/login
+EOF
+
+cat >> /usr/local/etc/radiusclient/dictionary << EOF
+
+#
+# DHCP Proxy/Relay attributes
+#
+ATTRIBUTE DHCP-Agent-Circuit-Id 82.1 integer
+ATTRIBUTE DHCP-Agent-Remote-Id 82.2 string
+ATTRIBUTE DHCP-Relay-Circuit-Id 82.1 integer
+ATTRIBUTE DHCP-Relay-Remote-Id 82.2 string
+
+EOF
+
+cat >> /usr/local/etc/radiusclient/servers << EOF
+10.4.0.2 testing123
+localhost/localhost testing123
+
+EOF
+
# Download DHCP config files
cd /opt
wget $REPO_URL_BLOB/org.onap.demo/vnfs/vcpe/$INSTALL_SCRIPT_VERSION/v_bng_init.sh
@@ -95,4 +366,4 @@ then
reboot
fi
-./v_bng_init.sh \ No newline at end of file
+./v_bng_init.sh
diff --git a/vnfs/vCPE/scripts/v_brgemu_init.sh b/vnfs/vCPE/scripts/v_brgemu_init.sh
index a9bf588e..1bf8a500 100644
--- a/vnfs/vCPE/scripts/v_brgemu_init.sh
+++ b/vnfs/vCPE/scripts/v_brgemu_init.sh
@@ -1 +1,6 @@
#!/bin/bash
+
+systemctl start vpp
+systemctl start honeycomb
+
+/opt/set_nat.sh
diff --git a/vnfs/vCPE/scripts/v_brgemu_install.sh b/vnfs/vCPE/scripts/v_brgemu_install.sh
index 71d9dffb..c4626a41 100644
--- a/vnfs/vCPE/scripts/v_brgemu_install.sh
+++ b/vnfs/vCPE/scripts/v_brgemu_install.sh
@@ -4,6 +4,11 @@ REPO_URL_BLOB=$(cat /opt/config/repo_url_blob.txt)
REPO_URL_ARTIFACTS=$(cat /opt/config/repo_url_artifacts.txt)
DEMO_ARTIFACTS_VERSION=$(cat /opt/config/demo_artifacts_version.txt)
INSTALL_SCRIPT_VERSION=$(cat /opt/config/install_script_version.txt)
+VPP_SOURCE_REPO_URL=$(cat /opt/config/vpp_source_repo_url.txt)
+VPP_SOURCE_REPO_BRANCH=$(cat /opt/config/vpp_source_repo_branch.txt)
+VPP_PATCH_URL=$(cat /opt/config/vpp_patch_url.txt)
+HC2VPP_SOURCE_REPO_URL=$(cat /opt/config/hc2vpp_source_repo_url.txt)
+HC2VPP_SOURCE_REPO_BRANCH=$(cat /opt/config/hc2vpp_source_repo_branch.txt)
CLOUD_ENV=$(cat /opt/config/cloud_env.txt)
# Convert Network CIDR to Netmask
@@ -44,6 +49,341 @@ apt-get update
apt-get install --allow-unauthenticated -y wget openjdk-8-jdk apt-transport-https ca-certificates g++ libcurl4-gnutls-dev
sleep 1
+# Install the tools required for download codes
+apt-get install -y expect git patch
+
+#Download and build the VPP codes
+cd /opt
+git clone ${VPP_SOURCE_REPO_URL} -b ${VPP_SOURCE_REPO_BRANCH} vpp
+wget -O VPP-Add-Option82-Nat-Filter-For-vBRG.patch ${VPP_PATCH_URL}
+
+cd vpp
+patch -p1 < ../VPP-Add-Option82-Nat-Filter-For-vBRG.patch
+expect -c "
+ set timeout 60;
+ spawn make install-dep;
+ expect {
+ \"Do you want to continue?*\" {send \"Y\r\"; interact}
+ }
+"
+
+cd build-root
+./bootstrap.sh
+make V=0 PLATFORM=vpp TAG=vpp install-deb
+
+# Install the VPP package
+dpkg -i *.deb
+systemctl stop vpp
+
+# Auto-start configuration for the VPP
+cat > /etc/vpp/startup.conf << EOF
+
+unix {
+ nodaemon
+ log /tmp/vpp.log
+ full-coredump
+ cli-listen localhost:5002
+ startup-config /etc/vpp/setup.gate
+}
+
+api-trace {
+ on
+}
+
+api-segment {
+ gid vpp
+}
+
+cpu {
+ ## In the VPP there is one main thread and optionally the user can create worker(s)
+ ## The main thread and worker thread(s) can be pinned to CPU core(s) manually or automatically
+
+ ## Manual pinning of thread(s) to CPU core(s)
+
+ ## Set logical CPU core where main thread runs
+ # main-core 1
+
+ ## Set logical CPU core(s) where worker threads are running
+ # corelist-workers 2-3,18-19
+
+ ## Automatic pinning of thread(s) to CPU core(s)
+
+ ## Sets number of CPU core(s) to be skipped (1 ... N-1)
+ ## Skipped CPU core(s) are not used for pinning main thread and working thread(s).
+ ## The main thread is automatically pinned to the first available CPU core and worker(s)
+ ## are pinned to next free CPU core(s) after core assigned to main thread
+ # skip-cores 4
+
+ ## Specify a number of workers to be created
+ ## Workers are pinned to N consecutive CPU cores while skipping "skip-cores" CPU core(s)
+ ## and main thread's CPU core
+ # workers 2
+
+ ## Set scheduling policy and priority of main and worker threads
+
+ ## Scheduling policy options are: other (SCHED_OTHER), batch (SCHED_BATCH)
+ ## idle (SCHED_IDLE), fifo (SCHED_FIFO), rr (SCHED_RR)
+ # scheduler-policy fifo
+
+ ## Scheduling priority is used only for "real-time policies (fifo and rr),
+ ## and has to be in the range of priorities supported for a particular policy
+ # scheduler-priority 50
+}
+
+# dpdk {
+ ## Change default settings for all intefaces
+ # dev default {
+ ## Number of receive queues, enables RSS
+ ## Default is 1
+ # num-rx-queues 3
+
+ ## Number of transmit queues, Default is equal
+ ## to number of worker threads or 1 if no workers treads
+ # num-tx-queues 3
+
+ ## Number of descriptors in transmit and receive rings
+ ## increasing or reducing number can impact performance
+ ## Default is 1024 for both rx and tx
+ # num-rx-desc 512
+ # num-tx-desc 512
+
+ ## VLAN strip offload mode for interface
+ ## Default is off
+ # vlan-strip-offload on
+ # }
+
+ ## Whitelist specific interface by specifying PCI address
+ # dev 0000:02:00.0
+
+ ## Whitelist specific interface by specifying PCI address and in
+ ## addition specify custom parameters for this interface
+ # dev 0000:02:00.1 {
+ # num-rx-queues 2
+ # }
+
+ ## Change UIO driver used by VPP, Options are: igb_uio, vfio-pci
+ ## and uio_pci_generic (default)
+ # uio-driver vfio-pci
+
+ ## Disable mutli-segment buffers, improves performance but
+ ## disables Jumbo MTU support
+ # no-multi-seg
+
+ ## Increase number of buffers allocated, needed only in scenarios with
+ ## large number of interfaces and worker threads. Value is per CPU socket.
+ ## Default is 16384
+ # num-mbufs 128000
+
+ ## Change hugepages allocation per-socket, needed only if there is need for
+ ## larger number of mbufs. Default is 256M on each detected CPU socket
+ # socket-mem 2048,2048
+# }
+
+EOF
+
+cat > /etc/vpp/setup.gate << EOF
+set int state GigabitEthernet0/8/0 up
+set dhcp client intfc GigabitEthernet0/8/0 hostname brg-emulator
+
+tap connect lstack
+set int state tap-0 up
+
+set interface l2 bridge tap-0 10 0
+set bridge-domain arp term 10
+EOF
+
+cat >> /opt/config/ip.txt << EOF
+hcip: 192.168.1.20
+EOF
+
+#set nat rule
+cat > /opt/set_nat.sh << EOF
+#! /bin/bash
+
+while :
+do
+ if [[ ! $(ps -aux | grep [[:alnum:]]*/vpp/startup.conf | wc -l) = 2 ]]; then
+ #echo "vpp not running"
+ continue
+ fi
+ flag=0
+ while read -r line
+ do
+ if [ flag = 0 ]; then
+ re=${line#*/[0-9]/[0-9]}
+ if [ "$line" != "$re" ]; then
+ flag=1
+ else
+ flag=0
+ continue
+ fi
+ else
+ ip=${line%/*}
+ if [[ $ip = *\.*\.*\.* ]]; then
+ #echo "ip address is $ip"
+ if [ ! -f /opt/config/ip.txt ]; then
+ echo "file /opt/config/ip.txt doesn't exists"
+ continue
+ fi
+ while read -r tap_ip
+ do
+ if [[ $tap_ip = hcip* ]]; then
+ tap_ip=${tap_ip#*" "}
+ echo "hc tap ip address is $tap_ip"
+ vppctl snat add static mapping local $tap_ip external $ip
+ exit 0
+ fi
+ done < /opt/config/ip.txt
+ else
+ if [[ ! $ip = */[0-9] ]]; then
+ flag=0
+ #echo "not correct"
+ fi
+ fi
+ fi
+ done < <(vppctl show int addr)
+ sleep 1
+done
+EOF
+# Download and install HC2VPP from source
+cd /opt
+git clone ${HC2VPP_SOURCE_REPO_URL} -b ${HC2VPP_SOURCE_REPO_BRANCH} hc2vpp
+
+apt-get install -y maven
+cat > ~/.m2/settings.xml << EOF
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- vi: set et smarttab sw=2 tabstop=2: -->
+<settings xmlns="http://maven.apache.org/SETTINGS/1.0.0"
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+ xsi:schemaLocation="http://maven.apache.org/SETTINGS/1.0.0 http://maven.apache.org/xsd/settings-1.0.0.xsd">
+
+ <profiles>
+ <profile>
+ <id>fd.io-release</id>
+ <repositories>
+ <repository>
+ <id>fd.io-mirror</id>
+ <name>fd.io-mirror</name>
+ <url>https://nexus.fd.io/content/groups/public/</url>
+ <releases>
+ <enabled>true</enabled>
+ <updatePolicy>never</updatePolicy>
+ </releases>
+ <snapshots>
+ <enabled>false</enabled>
+ </snapshots>
+ </repository>
+ </repositories>
+ <pluginRepositories>
+ <pluginRepository>
+ <id>fd.io-mirror</id>
+ <name>fd.io-mirror</name>
+ <url>https://nexus.fd.io/content/repositories/public/</url>
+ <releases>
+ <enabled>true</enabled>
+ <updatePolicy>never</updatePolicy>
+ </releases>
+ <snapshots>
+ <enabled>false</enabled>
+ </snapshots>
+ </pluginRepository>
+ </pluginRepositories>
+ </profile>
+
+ <profile>
+ <id>fd.io-snapshots</id>
+ <repositories>
+ <repository>
+ <id>fd.io-snapshot</id>
+ <name>fd.io-snapshot</name>
+ <url>https://nexus.fd.io/content/repositories/fd.io.snapshot/</url>
+ <releases>
+ <enabled>false</enabled>
+ </releases>
+ <snapshots>
+ <enabled>true</enabled>
+ </snapshots>
+ </repository>
+ </repositories>
+ <pluginRepositories>
+ <pluginRepository>
+ <id>fd.io-snapshot</id>
+ <name>fd.io-snapshot</name>
+ <url>https://nexus.fd.io/content/repositories/fd.io.snapshot/</url>
+ <releases>
+ <enabled>false</enabled>
+ </releases>
+ <snapshots>
+ <enabled>true</enabled>
+ </snapshots>
+ </pluginRepository>
+ </pluginRepositories>
+ </profile>
+ <profile>
+ <id>opendaylight-snapshots</id>
+ <repositories>
+ <repository>
+ <id>opendaylight-snapshot</id>
+ <name>opendaylight-snapshot</name>
+ <url>https://nexus.opendaylight.org/content/repositories/opendaylight.snapshot/</url>
+ <releases>
+ <enabled>false</enabled>
+ </releases>
+ <snapshots>
+ <enabled>true</enabled>
+ </snapshots>
+ </repository>
+ </repositories>
+ <pluginRepositories>
+ <pluginRepository>
+ <id>opendaylight-shapshot</id>
+ <name>opendaylight-snapshot</name>
+ <url>https://nexus.opendaylight.org/content/repositories/opendaylight.snapshot/</url>
+ <releases>
+ <enabled>false</enabled>
+ </releases>
+ <snapshots>
+ <enabled>true</enabled>
+ </snapshots>
+ </pluginRepository>
+ </pluginRepositories>
+ </profile>
+ </profiles>
+
+ <activeProfiles>
+ <activeProfile>fd.io-release</activeProfile>
+ <activeProfile>fd.io-snapshots</activeProfile>
+ <activeProfile>opendaylight-snapshots</activeProfile>
+ </activeProfiles>
+</settings>
+EOF
+
+cd hc2vpp
+mvn clean install
+l_version=$(cat pom.xml | grep "<version>" | head -1)
+l_version=$(echo "${l_version%<*}")
+l_version=$(echo "${l_version#*>}")
+mv vpp-integration/minimal-distribution/target/vpp-integration-distribution-${l_version}-hc/vpp-integration-distribution-${l_version} /opt/honeycomb
+sed -i 's/127.0.0.1/0.0.0.0/g' /opt/honeycomb/config/honeycomb.json
+
+# Create systemctl service for Honeycomb
+cat > /etc/systemd/system/honeycomb.service << EOF
+[Unit]
+Description=Honeycomb Agent for the VPP control plane
+Documentation=https://wiki.fd.io/view/Honeycomb
+Requires=vpp.service
+After=vpp.service
+
+[Service]
+ExecStart=/opt/honeycomb/honeycomb
+Restart=always
+RestartSec=10
+
+[Install]
+WantedBy=multi-user.target
+EOF
+systemctl enable /etc/systemd/system/honeycomb.service
+
# Download DHCP config files
cd /opt
wget $REPO_URL_BLOB/org.onap.demo/vnfs/vcpe/$INSTALL_SCRIPT_VERSION/v_brgemu_init.sh
@@ -65,4 +405,4 @@ then
reboot
fi
-./v_brgemu_init.sh \ No newline at end of file
+./v_brgemu_init.sh
diff --git a/vnfs/vCPE/scripts/v_gmux_init.sh b/vnfs/vCPE/scripts/v_gmux_init.sh
index a9bf588e..41730d3a 100644
--- a/vnfs/vCPE/scripts/v_gmux_init.sh
+++ b/vnfs/vCPE/scripts/v_gmux_init.sh
@@ -1 +1,6 @@
#!/bin/bash
+
+systemctl start vpp
+systemctl start honeycomb
+systemctl start autosave
+
diff --git a/vnfs/vCPE/scripts/v_gmux_install.sh b/vnfs/vCPE/scripts/v_gmux_install.sh
index 23eaacba..e7d39377 100644
--- a/vnfs/vCPE/scripts/v_gmux_install.sh
+++ b/vnfs/vCPE/scripts/v_gmux_install.sh
@@ -4,6 +4,12 @@ REPO_URL_BLOB=$(cat /opt/config/repo_url_blob.txt)
REPO_URL_ARTIFACTS=$(cat /opt/config/repo_url_artifacts.txt)
DEMO_ARTIFACTS_VERSION=$(cat /opt/config/demo_artifacts_version.txt)
INSTALL_SCRIPT_VERSION=$(cat /opt/config/install_script_version.txt)
+VPP_SOURCE_REPO_URL=$(cat /opt/config/vpp_source_repo_url.txt)
+VPP_SOURCE_REPO_BRANCH=$(cat /opt/config/vpp_source_repo_branch.txt)
+VPP_PATCH_URL=$(cat /opt/config/vpp_patch_url.txt)
+HC2VPP_SOURCE_REPO_URL=$(cat /opt/config/hc2vpp_source_repo_url.txt)
+HC2VPP_SOURCE_REPO_BRANCH=$(cat /opt/config/hc2vpp_source_repo_branch.txt)
+HC2VPP_PATCH_URL=$(cat /opt/config/hc2vpp_patch_url.txt)
CLOUD_ENV=$(cat /opt/config/cloud_env.txt)
# Convert Network CIDR to Netmask
@@ -64,6 +70,469 @@ apt-get update
apt-get install --allow-unauthenticated -y wget openjdk-8-jdk apt-transport-https ca-certificates g++ libcurl4-gnutls-dev
sleep 1
+# Install the tools required for download codes
+apt-get install -y expect git patch
+
+#Download and build the VPP codes
+cd /opt
+git clone ${VPP_SOURCE_REPO_URL} -b ${VPP_SOURCE_REPO_BRANCH} vpp
+wget -O Vpp-Add-VES-agent-for-vG-MUX.patch ${VPP_PATCH_URL}
+
+cd vpp
+patch -p1 < ../Vpp-Add-VES-agent-for-vG-MUX.patch
+expect -c "
+ set timeout 60;
+ spawn make install-dep;
+ expect {
+ \"Do you want to continue?*\" {send \"Y\r\"; interact}
+ }
+"
+
+cd build-root
+./bootstrap.sh
+make V=0 PLATFORM=vpp TAG=vpp install-deb
+
+# Install the evel-library first since we need the lib
+cd /opt
+apt-get install -y libcurl4-openssl-dev
+git clone https://github.com/att/evel-library.git
+cd evel-library/bldjobs
+make
+cp /opt/evel-library/libs/libevel.so /usr/lib
+ldconfig
+
+# Install the VPP package
+cd /opt/vpp/build-root
+dpkg -i *.deb
+systemctl stop vpp
+
+# Auto-start configuration for the VPP
+cat > /etc/vpp/startup.conf << EOF
+
+unix {
+ nodaemon
+ log /tmp/vpp.log
+ full-coredump
+ cli-listen localhost:5002
+ startup-config /etc/vpp/setup.gate
+}
+
+api-trace {
+ on
+}
+
+api-segment {
+ gid vpp
+}
+
+cpu {
+ ## In the VPP there is one main thread and optionally the user can create worker(s)
+ ## The main thread and worker thread(s) can be pinned to CPU core(s) manually or automatically
+
+ ## Manual pinning of thread(s) to CPU core(s)
+
+ ## Set logical CPU core where main thread runs
+ # main-core 1
+
+ ## Set logical CPU core(s) where worker threads are running
+ # corelist-workers 2-3,18-19
+
+ ## Automatic pinning of thread(s) to CPU core(s)
+
+ ## Sets number of CPU core(s) to be skipped (1 ... N-1)
+ ## Skipped CPU core(s) are not used for pinning main thread and working thread(s).
+ ## The main thread is automatically pinned to the first available CPU core and worker(s)
+ ## are pinned to next free CPU core(s) after core assigned to main thread
+ # skip-cores 4
+
+ ## Specify a number of workers to be created
+ ## Workers are pinned to N consecutive CPU cores while skipping "skip-cores" CPU core(s)
+ ## and main thread's CPU core
+ # workers 2
+
+ ## Set scheduling policy and priority of main and worker threads
+
+ ## Scheduling policy options are: other (SCHED_OTHER), batch (SCHED_BATCH)
+ ## idle (SCHED_IDLE), fifo (SCHED_FIFO), rr (SCHED_RR)
+ # scheduler-policy fifo
+
+ ## Scheduling priority is used only for "real-time policies (fifo and rr),
+ ## and has to be in the range of priorities supported for a particular policy
+ # scheduler-priority 50
+}
+
+# dpdk {
+ ## Change default settings for all intefaces
+ # dev default {
+ ## Number of receive queues, enables RSS
+ ## Default is 1
+ # num-rx-queues 3
+
+ ## Number of transmit queues, Default is equal
+ ## to number of worker threads or 1 if no workers treads
+ # num-tx-queues 3
+
+ ## Number of descriptors in transmit and receive rings
+ ## increasing or reducing number can impact performance
+ ## Default is 1024 for both rx and tx
+ # num-rx-desc 512
+ # num-tx-desc 512
+
+ ## VLAN strip offload mode for interface
+ ## Default is off
+ # vlan-strip-offload on
+ # }
+
+ ## Whitelist specific interface by specifying PCI address
+ # dev 0000:02:00.0
+
+ ## Whitelist specific interface by specifying PCI address and in
+ ## addition specify custom parameters for this interface
+ # dev 0000:02:00.1 {
+ # num-rx-queues 2
+ # }
+
+ ## Change UIO driver used by VPP, Options are: igb_uio, vfio-pci
+ ## and uio_pci_generic (default)
+ # uio-driver vfio-pci
+
+ ## Disable mutli-segment buffers, improves performance but
+ ## disables Jumbo MTU support
+ # no-multi-seg
+
+ ## Increase number of buffers allocated, needed only in scenarios with
+ ## large number of interfaces and worker threads. Value is per CPU socket.
+ ## Default is 16384
+ # num-mbufs 128000
+
+ ## Change hugepages allocation per-socket, needed only if there is need for
+ ## larger number of mbufs. Default is 256M on each detected CPU socket
+ # socket-mem 2048,2048
+# }
+
+EOF
+
+cat > /etc/vpp/setup.gate << EOF
+set int state GigabitEthernet0/8/0 up
+set int ip address GigabitEthernet0/8/0 10.1.0.20/24
+
+set int state GigabitEthernet0/9/0 up
+set int ip address GigabitEthernet0/9/0 10.5.0.20/24
+
+create vxlan tunnel src 10.5.0.20 dst 10.5.0.21 vni 100
+EOF
+
+# Download and install HC2VPP from source
+cd /opt
+git clone ${HC2VPP_SOURCE_REPO_URL} -b ${HC2VPP_SOURCE_REPO_BRANCH} hc2vpp
+wget -O Hc2vpp-Add-VES-agent-for-vG-MUX.patch ${HC2VPP_PATCH_URL}
+
+apt-get install -y maven
+cat > ~/.m2/settings.xml << EOF
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- vi: set et smarttab sw=2 tabstop=2: -->
+<settings xmlns="http://maven.apache.org/SETTINGS/1.0.0"
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+ xsi:schemaLocation="http://maven.apache.org/SETTINGS/1.0.0 http://maven.apache.org/xsd/settings-1.0.0.xsd">
+
+ <profiles>
+ <profile>
+ <id>fd.io-release</id>
+ <repositories>
+ <repository>
+ <id>fd.io-mirror</id>
+ <name>fd.io-mirror</name>
+ <url>https://nexus.fd.io/content/groups/public/</url>
+ <releases>
+ <enabled>true</enabled>
+ <updatePolicy>never</updatePolicy>
+ </releases>
+ <snapshots>
+ <enabled>false</enabled>
+ </snapshots>
+ </repository>
+ </repositories>
+ <pluginRepositories>
+ <pluginRepository>
+ <id>fd.io-mirror</id>
+ <name>fd.io-mirror</name>
+ <url>https://nexus.fd.io/content/repositories/public/</url>
+ <releases>
+ <enabled>true</enabled>
+ <updatePolicy>never</updatePolicy>
+ </releases>
+ <snapshots>
+ <enabled>false</enabled>
+ </snapshots>
+ </pluginRepository>
+ </pluginRepositories>
+ </profile>
+
+ <profile>
+ <id>fd.io-snapshots</id>
+ <repositories>
+ <repository>
+ <id>fd.io-snapshot</id>
+ <name>fd.io-snapshot</name>
+ <url>https://nexus.fd.io/content/repositories/fd.io.snapshot/</url>
+ <releases>
+ <enabled>false</enabled>
+ </releases>
+ <snapshots>
+ <enabled>true</enabled>
+ </snapshots>
+ </repository>
+ </repositories>
+ <pluginRepositories>
+ <pluginRepository>
+ <id>fd.io-snapshot</id>
+ <name>fd.io-snapshot</name>
+ <url>https://nexus.fd.io/content/repositories/fd.io.snapshot/</url>
+ <releases>
+ <enabled>false</enabled>
+ </releases>
+ <snapshots>
+ <enabled>true</enabled>
+ </snapshots>
+ </pluginRepository>
+ </pluginRepositories>
+ </profile>
+ <profile>
+ <id>opendaylight-snapshots</id>
+ <repositories>
+ <repository>
+ <id>opendaylight-snapshot</id>
+ <name>opendaylight-snapshot</name>
+ <url>https://nexus.opendaylight.org/content/repositories/opendaylight.snapshot/</url>
+ <releases>
+ <enabled>false</enabled>
+ </releases>
+ <snapshots>
+ <enabled>true</enabled>
+ </snapshots>
+ </repository>
+ </repositories>
+ <pluginRepositories>
+ <pluginRepository>
+ <id>opendaylight-shapshot</id>
+ <name>opendaylight-snapshot</name>
+ <url>https://nexus.opendaylight.org/content/repositories/opendaylight.snapshot/</url>
+ <releases>
+ <enabled>false</enabled>
+ </releases>
+ <snapshots>
+ <enabled>true</enabled>
+ </snapshots>
+ </pluginRepository>
+ </pluginRepositories>
+ </profile>
+ </profiles>
+
+ <activeProfiles>
+ <activeProfile>fd.io-release</activeProfile>
+ <activeProfile>fd.io-snapshots</activeProfile>
+ <activeProfile>opendaylight-snapshots</activeProfile>
+ </activeProfiles>
+</settings>
+EOF
+
+cd hc2vpp
+patch -p1 < ../Hc2vpp-Add-VES-agent-for-vG-MUX.patch
+mvn clean install
+l_version=$(cat pom.xml | grep "<version>" | head -1)
+l_version=$(echo "${l_version%<*}")
+l_version=$(echo "${l_version#*>}")
+mv vpp-integration/minimal-distribution/target/vpp-integration-distribution-${l_version}-hc/vpp-integration-distribution-${l_version} /opt/honeycomb
+sed -i 's/127.0.0.1/0.0.0.0/g' /opt/honeycomb/config/honeycomb.json
+
+# Create systemctl service for Honeycomb
+cat > /etc/systemd/system/honeycomb.service << EOF
+[Unit]
+Description=Honeycomb Agent for the VPP control plane
+Documentation=https://wiki.fd.io/view/Honeycomb
+Requires=vpp.service
+After=vpp.service
+
+[Service]
+ExecStart=/opt/honeycomb/honeycomb
+Restart=always
+RestartSec=10
+
+[Install]
+WantedBy=multi-user.target
+EOF
+systemctl enable /etc/systemd/system/honeycomb.service
+
+#Create a systemd service for auto-save
+cat > /usr/bin/save_config << EOF
+#!/bin/bash
+
+#########################################################################
+#
+# Copyright (c) 2017 Intel and/or its affiliates.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+##########################################################################
+
+############################### Variables ################################
+VPP_SETUP_GATE=/etc/vpp/setup.gate
+
+############################### Functions ################################
+
+# Write the commands to the startup scripts.
+#
+# We could set VPP configuration to the startup.conf.
+# Write the configuration to the startup scripts so we could
+# restore the system after rebooting.
+#
+write_startup_scripts()
+{
+ local cmd=${2}
+ local is_add=${1}
+
+ if [[ ${is_add} == add ]] ;then
+ while read -r line
+ do
+ if [[ ${line} == ${cmd} ]] ;then
+ return 0
+ fi
+ done < ${VPP_SETUP_GATE}
+
+ echo "${cmd}" >> ${VPP_SETUP_GATE}
+ else
+ while read -r line
+ do
+ if [[ ${line} == ${cmd} ]] ;then
+ sed -i "/${line}/d" ${VPP_SETUP_GATE}
+ return 0
+ fi
+ done < ${VPP_SETUP_GATE}
+ fi
+}
+
+# Saves the VES agent configuration to the startup script.
+#
+# Get the current VES agent configuration from the bash command:
+# $vppctl show ves agent
+# Server Addr Server Port Interval Enabled
+# 127.0.0.1 8080 10 True
+# Set the VES agent configuration with the bash command:
+# $vppctl set ves agent server 127.0.0.1 port 8080 intval 10
+#
+save_ves_config()
+{
+ local server=""
+ local port=""
+ local intval=""
+
+ local ves_config=`vppctl show ves agent | head -2 | tail -1`
+ if [ "${ves_config}" != "" ] ;then
+ server=`echo ${ves_config} | awk '{ print $1 }'`
+ port=`echo ${ves_config} | awk '{ print $2 }'`
+ intval=`echo ${ves_config} | awk '{ print $3 }'`
+ write_startup_scripts add "set ves agent server ${server} port ${port} intval ${intval}"
+ fi
+}
+
+# Save the VxLAN Tunnel Configuration to the startup script.
+#
+# Get the current VxLAN tunnel configuration with bash command:
+# $vppctl show vxlan tunnel
+# [0] src 10.3.0.2 dst 10.1.0.20 vni 100 sw_if_index 1 encap_fib_index 0 fib_entry_index 7 decap_next l2
+# [1] src 10.5.0.20 dst 10.5.0.21 vni 100 sw_if_index 2 encap_fib_index 0 fib_entry_index 8 decap_next l2
+# Set the VxLAN Tunnel with the bash command:
+# $vppctl create vxlan tunnel src 10.3.0.2 dst 10.1.0.20 vni 100
+# vxlan_tunnel0
+save_vxlan_tunnel()
+{
+ local src=""
+ local dst=""
+ local vni=""
+
+ vppctl show vxlan tunnel | while read line
+ do
+ if [ "${line}" != "" ] ;then
+ src=`echo ${line} | awk '{ print $3 }'`
+ dst=`echo ${line} | awk '{ print $5 }'`
+ vni=`echo ${line} | awk '{ print $7 }'`
+
+ write_startup_scripts add "create vxlan tunnel src ${src} dst ${dst} vni ${vni}"
+ fi
+ done
+}
+
+# Save the VxLAN tunnel L2 xconnect configuration to the startup script.
+#
+# Get the Current L2 Address configuration with bash command:
+# $vppctl show int addr
+# local0 (dn):
+# vxlan_tunnel0 (up):
+# l2 xconnect vxlan_tunnel1
+# vxlan_tunnel1 (up):
+# l2 xconnect vxlan_tunnel0
+# Save the VxLAN tunnel L2 xconnect configuration with bash command:
+# $vppctl set interface l2 xconnect vxlan_tunnel0 vxlan_tunnel1
+#
+save_vxlan_xconnect()
+{
+ local ingress=""
+ local egress=""
+
+ vppctl show int addr | while read line
+ do
+ if [[ ${line} == vxlan_tunnel* ]] ;then
+ read next
+ while [[ ${next} != l2* ]] || [[ ${next} == "" ]]
+ do
+ line=`echo ${next}`
+ read next
+ done
+ if [[ ${next} == l2* ]] ;then
+ ingress=`echo ${line} | awk '{ print $1 }'`
+ egress=`echo ${next} | awk '{ print $3 }'`
+ write_startup_scripts add "set interface l2 xconnect ${ingress} ${egress}"
+ fi
+ fi
+ done
+}
+
+################################# MAIN ###################################
+
+save_ves_config
+
+save_vxlan_tunnel
+
+save_vxlan_xconnect
+
+EOF
+chmod a+x /usr/bin/save_config
+cat > /etc/systemd/system/autosave.service << EOF
+[Unit]
+Description=Run Scripts at Start and Stop
+Requires=vpp.service
+After=vpp.service
+
+[Service]
+Type=oneshot
+RemainAfterExit=true
+ExecStop=/usr/bin/save_config
+
+[Install]
+WantedBy=multi-user.target
+EOF
+systemctl enable /etc/systemd/system/autosave.service
+
# Download DHCP config files
cd /opt
wget $REPO_URL_BLOB/org.onap.demo/vnfs/vcpe/$INSTALL_SCRIPT_VERSION/v_gmux_init.sh
@@ -85,4 +554,4 @@ then
reboot
fi
-./v_gmux_init.sh \ No newline at end of file
+./v_gmux_init.sh
diff --git a/vnfs/vCPE/scripts/v_gw_init.sh b/vnfs/vCPE/scripts/v_gw_init.sh
index a9bf588e..344374d3 100644
--- a/vnfs/vCPE/scripts/v_gw_init.sh
+++ b/vnfs/vCPE/scripts/v_gw_init.sh
@@ -1 +1,5 @@
#!/bin/bash
+
+systemctl start vpp
+systemctl start honeycomb
+
diff --git a/vnfs/vCPE/scripts/v_gw_install.sh b/vnfs/vCPE/scripts/v_gw_install.sh
index 688bce13..3a12d7ed 100644
--- a/vnfs/vCPE/scripts/v_gw_install.sh
+++ b/vnfs/vCPE/scripts/v_gw_install.sh
@@ -4,6 +4,10 @@ REPO_URL_BLOB=$(cat /opt/config/repo_url_blob.txt)
REPO_URL_ARTIFACTS=$(cat /opt/config/repo_url_artifacts.txt)
DEMO_ARTIFACTS_VERSION=$(cat /opt/config/demo_artifacts_version.txt)
INSTALL_SCRIPT_VERSION=$(cat /opt/config/install_script_version.txt)
+VPP_SOURCE_REPO_URL=$(cat /opt/config/vpp_source_repo_url.txt)
+VPP_SOURCE_REPO_BRANCH=$(cat /opt/config/vpp_source_repo_branch.txt)
+HC2VPP_SOURCE_REPO_URL=$(cat /opt/config/hc2vpp_source_repo_url.txt)
+HC2VPP_SOURCE_REPO_BRANCH=$(cat /opt/config/hc2vpp_source_repo_branch.txt)
CLOUD_ENV=$(cat /opt/config/cloud_env.txt)
# Convert Network CIDR to Netmask
@@ -54,6 +58,308 @@ apt-get update
apt-get install --allow-unauthenticated -y wget openjdk-8-jdk apt-transport-https ca-certificates g++ libcurl4-gnutls-dev
sleep 1
+# Install the tools required for download codes
+apt-get install -y expect git
+
+#Download and build the VPP codes
+cd /opt
+git clone ${VPP_SOURCE_REPO_URL} -b ${VPP_SOURCE_REPO_BRANCH} vpp
+
+cd vpp
+expect -c "
+ set timeout 60;
+ spawn make install-dep;
+ expect {
+ \"Do you want to continue?*\" {send \"Y\r\"; interact}
+ }
+"
+
+cd build-root
+./bootstrap.sh
+make V=0 PLATFORM=vpp TAG=vpp install-deb
+
+# Install the VPP package
+dpkg -i *.deb
+systemctl stop vpp
+
+# Auto-start configuration for the VPP
+cat > /etc/vpp/startup.conf << EOF
+
+unix {
+ nodaemon
+ log /tmp/vpp.log
+ full-coredump
+ cli-listen localhost:5002
+ startup-config /etc/vpp/setup.gate
+}
+
+api-trace {
+ on
+}
+
+api-segment {
+ gid vpp
+}
+
+cpu {
+ ## In the VPP there is one main thread and optionally the user can create worker(s)
+ ## The main thread and worker thread(s) can be pinned to CPU core(s) manually or automatically
+
+ ## Manual pinning of thread(s) to CPU core(s)
+
+ ## Set logical CPU core where main thread runs
+ # main-core 1
+
+ ## Set logical CPU core(s) where worker threads are running
+ # corelist-workers 2-3,18-19
+
+ ## Automatic pinning of thread(s) to CPU core(s)
+
+ ## Sets number of CPU core(s) to be skipped (1 ... N-1)
+ ## Skipped CPU core(s) are not used for pinning main thread and working thread(s).
+ ## The main thread is automatically pinned to the first available CPU core and worker(s)
+ ## are pinned to next free CPU core(s) after core assigned to main thread
+ # skip-cores 4
+
+ ## Specify a number of workers to be created
+ ## Workers are pinned to N consecutive CPU cores while skipping "skip-cores" CPU core(s)
+ ## and main thread's CPU core
+ # workers 2
+
+ ## Set scheduling policy and priority of main and worker threads
+
+ ## Scheduling policy options are: other (SCHED_OTHER), batch (SCHED_BATCH)
+ ## idle (SCHED_IDLE), fifo (SCHED_FIFO), rr (SCHED_RR)
+ # scheduler-policy fifo
+
+ ## Scheduling priority is used only for "real-time policies (fifo and rr),
+ ## and has to be in the range of priorities supported for a particular policy
+ # scheduler-priority 50
+}
+
+# dpdk {
+ ## Change default settings for all intefaces
+ # dev default {
+ ## Number of receive queues, enables RSS
+ ## Default is 1
+ # num-rx-queues 3
+
+ ## Number of transmit queues, Default is equal
+ ## to number of worker threads or 1 if no workers treads
+ # num-tx-queues 3
+
+ ## Number of descriptors in transmit and receive rings
+ ## increasing or reducing number can impact performance
+ ## Default is 1024 for both rx and tx
+ # num-rx-desc 512
+ # num-tx-desc 512
+
+ ## VLAN strip offload mode for interface
+ ## Default is off
+ # vlan-strip-offload on
+ # }
+
+ ## Whitelist specific interface by specifying PCI address
+ # dev 0000:02:00.0
+
+ ## Whitelist specific interface by specifying PCI address and in
+ ## addition specify custom parameters for this interface
+ # dev 0000:02:00.1 {
+ # num-rx-queues 2
+ # }
+
+ ## Change UIO driver used by VPP, Options are: igb_uio, vfio-pci
+ ## and uio_pci_generic (default)
+ # uio-driver vfio-pci
+
+ ## Disable mutli-segment buffers, improves performance but
+ ## disables Jumbo MTU support
+ # no-multi-seg
+
+ ## Increase number of buffers allocated, needed only in scenarios with
+ ## large number of interfaces and worker threads. Value is per CPU socket.
+ ## Default is 16384
+ # num-mbufs 128000
+
+ ## Change hugepages allocation per-socket, needed only if there is need for
+ ## larger number of mbufs. Default is 256M on each detected CPU socket
+ # socket-mem 2048,2048
+# }
+
+EOF
+
+cat > /etc/vpp/setup.gate << EOF
+set int state GigabitEthernet0/8/0 up
+set int ip address GigabitEthernet0/8/0 10.5.0.21/24
+
+set int state GigabitEthernet0/9/0 up
+set dhcp client intfc GigabitEthernet0/9/0 hostname vg-1
+
+tap connect lstack address 192.168.1.1/24
+set int state tap-0 up
+
+create vxlan tunnel src 10.5.0.21 dst 10.5.0.20 vni 100
+
+set interface l2 bridge tap-0 10 0
+set interface l2 bridge vxlan_tunnel0 10 1
+set bridge-domain arp term 10
+
+set int ip address vxlan_tunnel0 192.168.1.254/24
+set interface snat in vxlan_tunnel0 out GigabitEthernet0/9/0
+EOF
+
+# Download and install HC2VPP from source
+cd /opt
+git clone ${HC2VPP_SOURCE_REPO_URL} -b ${HC2VPP_SOURCE_REPO_BRANCH} hc2vpp
+
+apt-get install -y maven
+cat > ~/.m2/settings.xml << EOF
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- vi: set et smarttab sw=2 tabstop=2: -->
+<settings xmlns="http://maven.apache.org/SETTINGS/1.0.0"
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+ xsi:schemaLocation="http://maven.apache.org/SETTINGS/1.0.0 http://maven.apache.org/xsd/settings-1.0.0.xsd">
+
+ <profiles>
+ <profile>
+ <id>fd.io-release</id>
+ <repositories>
+ <repository>
+ <id>fd.io-mirror</id>
+ <name>fd.io-mirror</name>
+ <url>https://nexus.fd.io/content/groups/public/</url>
+ <releases>
+ <enabled>true</enabled>
+ <updatePolicy>never</updatePolicy>
+ </releases>
+ <snapshots>
+ <enabled>false</enabled>
+ </snapshots>
+ </repository>
+ </repositories>
+ <pluginRepositories>
+ <pluginRepository>
+ <id>fd.io-mirror</id>
+ <name>fd.io-mirror</name>
+ <url>https://nexus.fd.io/content/repositories/public/</url>
+ <releases>
+ <enabled>true</enabled>
+ <updatePolicy>never</updatePolicy>
+ </releases>
+ <snapshots>
+ <enabled>false</enabled>
+ </snapshots>
+ </pluginRepository>
+ </pluginRepositories>
+ </profile>
+
+ <profile>
+ <id>fd.io-snapshots</id>
+ <repositories>
+ <repository>
+ <id>fd.io-snapshot</id>
+ <name>fd.io-snapshot</name>
+ <url>https://nexus.fd.io/content/repositories/fd.io.snapshot/</url>
+ <releases>
+ <enabled>false</enabled>
+ </releases>
+ <snapshots>
+ <enabled>true</enabled>
+ </snapshots>
+ </repository>
+ </repositories>
+ <pluginRepositories>
+ <pluginRepository>
+ <id>fd.io-snapshot</id>
+ <name>fd.io-snapshot</name>
+ <url>https://nexus.fd.io/content/repositories/fd.io.snapshot/</url>
+ <releases>
+ <enabled>false</enabled>
+ </releases>
+ <snapshots>
+ <enabled>true</enabled>
+ </snapshots>
+ </pluginRepository>
+ </pluginRepositories>
+ </profile>
+ <profile>
+ <id>opendaylight-snapshots</id>
+ <repositories>
+ <repository>
+ <id>opendaylight-snapshot</id>
+ <name>opendaylight-snapshot</name>
+ <url>https://nexus.opendaylight.org/content/repositories/opendaylight.snapshot/</url>
+ <releases>
+ <enabled>false</enabled>
+ </releases>
+ <snapshots>
+ <enabled>true</enabled>
+ </snapshots>
+ </repository>
+ </repositories>
+ <pluginRepositories>
+ <pluginRepository>
+ <id>opendaylight-shapshot</id>
+ <name>opendaylight-snapshot</name>
+ <url>https://nexus.opendaylight.org/content/repositories/opendaylight.snapshot/</url>
+ <releases>
+ <enabled>false</enabled>
+ </releases>
+ <snapshots>
+ <enabled>true</enabled>
+ </snapshots>
+ </pluginRepository>
+ </pluginRepositories>
+ </profile>
+ </profiles>
+
+ <activeProfiles>
+ <activeProfile>fd.io-release</activeProfile>
+ <activeProfile>fd.io-snapshots</activeProfile>
+ <activeProfile>opendaylight-snapshots</activeProfile>
+ </activeProfiles>
+</settings>
+EOF
+
+cd hc2vpp
+mvn clean install
+l_version=$(cat pom.xml | grep "<version>" | head -1)
+l_version=$(echo "${l_version%<*}")
+l_version=$(echo "${l_version#*>}")
+mv vpp-integration/minimal-distribution/target/vpp-integration-distribution-${l_version}-hc/vpp-integration-distribution-${l_version} /opt/honeycomb
+sed -i 's/127.0.0.1/0.0.0.0/g' /opt/honeycomb/config/honeycomb.json
+
+# Create systemctl service for Honeycomb
+cat > /etc/systemd/system/honeycomb.service << EOF
+[Unit]
+Description=Honeycomb Agent for the VPP control plane
+Documentation=https://wiki.fd.io/view/Honeycomb
+Requires=vpp.service
+After=vpp.service
+
+[Service]
+ExecStart=/opt/honeycomb/honeycomb
+Restart=always
+RestartSec=10
+
+[Install]
+WantedBy=multi-user.target
+EOF
+systemctl enable /etc/systemd/system/honeycomb.service
+
+# Install the DHCP server and config
+apt-get install -y isc-dhcp-server
+cat >> /etc/dhcp/dhcpd.conf << EOF
+subnet 192.168.1.0 netmask 255.255.255.0 {
+ range 192.168.1.2 192.168.1.253;
+ option subnet-mask 255.255.255.0;
+ option routers 192.168.1.254;
+ option broadcast-address 192.168.1.255;
+ default-lease-time 600;
+ max-lease-time 7200;
+}
+EOF
+
# Download DHCP config files
cd /opt
wget $REPO_URL_BLOB/org.onap.demo/vnfs/vcpe/$INSTALL_SCRIPT_VERSION/v_gw_init.sh
@@ -75,4 +381,4 @@ then
reboot
fi
-./v_gw_init.sh \ No newline at end of file
+./v_gw_init.sh