summaryrefslogtreecommitdiffstats
path: root/deployment/Azure_ARM_Template
diff options
context:
space:
mode:
authorSudhakar Reddy <Sudhakar.Reddy@amdocs.com>2018-08-16 17:40:56 +0530
committerSudhakar Reddy <Sudhakar.Reddy@amdocs.com>2018-08-19 15:09:48 +0530
commitd10ae125d70e91e88c1c8bbf67d8da10fb0da5f5 (patch)
tree6b673c88ca24b11cbbeed6d0761e1cd955d680b4 /deployment/Azure_ARM_Template
parentacbbe67aa4fd06c3c445d2a93b6700ae0ecddc23 (diff)
ARM template to create a k8s cluster on Azure
Also, updated the Helm version to 2.9.1 Change-Id: I8ac57c19c135370d9189b83947b4e67216e63740 Issue-ID: INT-626 Signed-off-by: Sudhakar Reddy <Sudhakar.Reddy@amdocs.com>
Diffstat (limited to 'deployment/Azure_ARM_Template')
-rw-r--r--deployment/Azure_ARM_Template/arm_cluster_deploy_beijing.json422
-rw-r--r--deployment/Azure_ARM_Template/arm_cluster_deploy_parameters.json11
-rw-r--r--deployment/Azure_ARM_Template/scripts/azure-k8s-node.sh54
-rw-r--r--deployment/Azure_ARM_Template/scripts/azure-rancher-server.sh190
4 files changed, 677 insertions, 0 deletions
diff --git a/deployment/Azure_ARM_Template/arm_cluster_deploy_beijing.json b/deployment/Azure_ARM_Template/arm_cluster_deploy_beijing.json
new file mode 100644
index 000000000..79dda59e8
--- /dev/null
+++ b/deployment/Azure_ARM_Template/arm_cluster_deploy_beijing.json
@@ -0,0 +1,422 @@
+{
+ "$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#",
+ "contentVersion": "1.0.0.0",
+ "parameters": {
+ "centosOSVersion": {
+ "type": "string",
+ "defaultValue": "16.04.0-LTS",
+ "allowedValues": [
+ "12.04.5-LTS","14.04.5-LTS","15.10","16.04.0-LTS"
+ ],
+ "metadata": {
+ "description": "The OS"
+ }
+ },
+ "numberOfVms": {
+ "type": "int",
+ "defaultValue": 5,
+ "minValue": 1,
+ "maxValue": 15,
+ "metadata": {
+ "description": "Number of VMs to provision"
+ }
+ },
+ "privateIPAddress": {
+ "type": "string",
+ "defaultValue": "10.0.0.2",
+ "metadata": {
+ "description": "Static Private IP will be assigned to the machine"
+ }
+ },
+ "masterScriptName": {
+ "type": "string",
+ "metadata": {
+ "description": "entrypoint script name for k8s master"
+ }
+ },
+ "nodeScriptName": {
+ "type": "string",
+ "metadata": {
+ "description": "entrypoint script name for k8s node"
+ }
+ },
+ "vmSize": {
+ "type": "string",
+ "defaultValue": "Standard_D14_v2",
+ "allowedValues": [
+ "Standard_E8_v3",
+ "Standard_E2_v3",
+ "Standard_D1",
+ "Standard_D16s_v3",
+ "Standard_D4_v2",
+ "Standard_D32s_v3",
+ "Standard_E16_v3",
+ "Standard_D14_v2",
+ "Standard_D13_v2",
+ "Standard_E64_v3"
+ ],
+ "metadata": {
+ "description": "VM size"
+ }
+ },
+ "dnslabel": {
+ "type": "string",
+ "metadata": {
+ "description": "Unique DNS label to assign DNS name"
+ }
+ }
+
+ },
+ "variables": {
+ "dnsLabelPrefix": "[concat('dns-',uniquestring(resourceGroup().id))]",
+ "customData": [ "userdata.txt" ],
+ "vmName": "[concat('k8s-host-', substring(uniquestring(resourceGroup().id),0,4))]",
+ "adminUsername": "[concat('ubuntu')]",
+ "adminPassword": "Qwertyuiop@@1",
+ "sshKeyData": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQD5zrmH1dHgXbNwP2qbNVySScnFVcEP25HBd2VJu2PiJLDhwgHj44Lj9ZvLyRFCetqd8CAKnLV5qy37rwaCtlH/t8Qb36cUGPhegxpF2++uTY0b6K7Zb6hEMBNw3J1z+GU7OoVwZJhsNAw4t8/7WWmJA4Owo99TJkEKvhCYjBCLoC5sIvG/lJsaFIG8A5MjnBlwgSZ3FsUU+aY1KYZUztodkyv7laDMOinwSvJggKrCugsqZdVo5bhmcSFbqrZa/a/wgqeok+79W0/DLh5Tlf7By46ASDKGnFlwDshPu++I3KMU3eRz0rJLOKeIUCz7k80X0WJ6BrSS7l+IrpDXV1M5 ubuntu@aria",
+ "storageAccountName": "[concat('salinuxvm', substring(uniquestring(resourceGroup().id),0,4))]",
+ "imagePublisher": "Canonical",
+ "imageOffer": "UbuntuServer",
+ "nicName": "[concat('VMNic-',variables('vmName'))]",
+ "dnsPrefix": "[concat(variables('dnsLabelPrefix'),'-',substring(uniquestring(resourceGroup().id),0,4))]",
+ "addressPrefix": "10.0.0.0/16",
+ "subnetName": "Subnet",
+ "subnetPrefix": "10.0.0.0/24",
+ "storageAccountType": "Standard_LRS",
+ "publicIPAddressType": "Dynamic",
+ "publicIPAddressName": "nicLoop100",
+ "virtualNetworkName": "[concat('VNET-',variables('vmName'))]",
+ "subnetRef": "[resourceId('Microsoft.Network/virtualNetworks/subnets/', variables('virtualNetworkName'), variables('subnetName'))]",
+ "networkSecurityGroupName": "[concat(variables('vmName'), '_obrien_local_nsg')]",
+ "sshKeyPathRoot": "[concat('/root/','/.ssh/authorized_keys')]",
+ "sshKeyPath": "[concat('/home/',variables('adminUsername'),'/.ssh/authorized_keys')]",
+ "availabilitySetName": "[concat('availabilitySet-',substring(uniquestring(resourceGroup().id),0,4))]"
+ },
+ "resources": [
+ {
+ "type": "Microsoft.Compute/availabilitySets",
+ "name": "[variables('availabilitySetName')]",
+ "apiVersion": "2016-04-30-preview",
+ "location": "[resourceGroup().location]",
+ "properties": {
+ "managed": true,
+ "platformFaultDomainCount": 3,
+ "platformUpdateDomainCount": 3
+ }
+ },
+ {
+ "type": "Microsoft.Storage/storageAccounts",
+ "name": "[variables('storageAccountName')]",
+ "apiVersion": "2017-06-01",
+ "location": "[resourceGroup().location]",
+ "sku": {
+ "name": "[variables('storageAccountType')]"
+ },
+ "kind": "Storage",
+ "properties": {}
+ },
+ {
+ "apiVersion": "2017-03-01",
+ "type": "Microsoft.Network/networkSecurityGroups",
+ "name": "[variables('networkSecurityGroupName')]",
+ "location": "[resourceGroup().location]",
+ "tags": {
+ "displayName": "NSG"
+ },
+ "properties": {
+ "securityRules": [
+ {
+ "name": "SSHAllowAny",
+ "properties": {
+ "description": "SSHAllowAny",
+ "protocol": "TCP",
+ "sourcePortRange": "*",
+ "destinationPortRange": "22",
+ "sourceAddressPrefix": "*",
+ "destinationAddressPrefix": "*",
+ "access": "Allow",
+ "priority": 100,
+ "direction": "Inbound"
+ }
+ },
+ {
+ "name": "DockerAllowAny",
+ "properties": {
+ "description": "DockerAllowAny",
+ "protocol": "TCP",
+ "sourcePortRange": "*",
+ "destinationPortRange": "2376",
+ "sourceAddressPrefix": "*",
+ "destinationAddressPrefix": "*",
+ "access": "Allow",
+ "priority": 110,
+ "direction": "Inbound"
+ }
+ },
+ {
+ "name": "port500-UdpAllowAny",
+ "properties": {
+ "description": "port500-udpAllowAny",
+ "protocol": "UDP",
+ "sourcePortRange": "*",
+ "destinationPortRange": "500",
+ "sourceAddressPrefix": "*",
+ "destinationAddressPrefix": "*",
+ "access": "Allow",
+ "priority": 120,
+ "direction": "Inbound"
+ }
+ },
+ {
+ "name": "port4500-UdpAllowAny",
+ "properties": {
+ "description": "port4500-udpAllowAny",
+ "protocol": "UDP",
+ "sourcePortRange": "*",
+ "destinationPortRange": "4500",
+ "sourceAddressPrefix": "*",
+ "destinationAddressPrefix": "*",
+ "access": "Allow",
+ "priority": 130,
+ "direction": "Inbound"
+ }
+ },
+
+ {
+ "name": "port_10249-10255_172",
+ "properties": {
+ "description": "port_10249-10255_172",
+ "protocol": "*",
+ "sourcePortRange": "*",
+ "destinationPortRange": "10249-10255",
+ "sourceAddressPrefix": "*",
+ "destinationAddressPrefix": "*",
+ "access": "Allow",
+ "priority": 140,
+ "direction": "Inbound"
+ }
+ },
+ {
+ "name": "in-rule",
+ "properties": {
+ "description": "All in",
+ "protocol": "Tcp",
+ "sourcePortRange": "*",
+ "destinationPortRange": "*",
+ "sourceAddressPrefix": "Internet",
+ "destinationAddressPrefix": "*",
+ "access": "Allow",
+ "priority": 170,
+ "direction": "Inbound"
+ }
+ },
+ {
+ "name": "block-8080",
+ "properties": {
+ "description": "block-8080",
+ "protocol": "Tcp",
+ "sourcePortRange": "8080",
+ "destinationPortRange": "*",
+ "sourceAddressPrefix": "Internet",
+ "destinationAddressPrefix": "*",
+ "access": "Deny",
+ "priority": 104,
+ "direction": "Outbound"
+ }
+ },
+ {
+ "name": "out-rule",
+ "properties": {
+ "description": "All out",
+ "protocol": "Tcp",
+ "sourcePortRange": "*",
+ "destinationPortRange": "*",
+ "sourceAddressPrefix": "Internet",
+ "destinationAddressPrefix": "*",
+ "access": "Allow",
+ "priority": 110,
+ "direction": "Outbound"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "apiVersion": "2017-04-01",
+ "type": "Microsoft.Network/virtualNetworks",
+ "name": "[variables('virtualNetworkName')]",
+ "location": "[resourceGroup().location]",
+ "dependson": [
+ "[concat('Microsoft.Network/networkSecurityGroups/', variables('networkSecurityGroupName'))]"
+ ],
+ "properties": {
+ "addressSpace": {
+ "addressPrefixes": [
+ "[variables('addressPrefix')]"
+ ]
+ },
+ "subnets": [
+ {
+ "name": "[variables('subnetName')]",
+ "properties": {
+ "addressPrefix": "[variables('subnetPrefix')]",
+ "networkSecurityGroup": {
+ "id": "[resourceId('Microsoft.Network/networkSecurityGroups', variables('networkSecurityGroupName'))]"
+ }
+ }
+ }
+ ]
+ }
+ },
+ {
+ "apiVersion": "2017-08-01",
+ "type": "Microsoft.Network/networkInterfaces",
+ "name": "[concat(variables('nicName'), copyindex())]",
+ "location": "[resourceGroup().location]",
+ "copy": {
+ "name": "nicLoop",
+ "count": "[parameters('numberOfVms')]"
+ },
+ "dependsOn": [
+ "[resourceId('Microsoft.Network/publicIPAddresses/', concat('nicLoop',copyIndex(100)))]",
+ "[resourceId('Microsoft.Network/virtualNetworks/', variables('virtualNetworkName'))]"
+ ],
+ "properties": {
+ "ipConfigurations": [
+ {
+ "name": "ipconfig1",
+ "properties": {
+ "privateIPAllocationMethod": "Static",
+ "privateIPAddress": "[concat(parameters('privateIPAddress'),copyindex())]",
+ "publicIPAddress": {
+ "id": "[resourceId('Microsoft.Network/publicIPAddresses', concat('nicLoop',copyIndex(100)))]"
+ },
+ "subnet": {
+ "id": "[variables('subnetRef')]"
+ }
+ }
+ }
+ ]
+ }
+ },
+ {
+ "apiVersion": "2017-04-01",
+ "type": "Microsoft.Network/publicIPAddresses",
+ "name": "[concat('nicLoop',copyIndex(100))]",
+ "location": "[resourceGroup().location]",
+ "copy": {
+ "name": "nicLoop",
+ "count": "[parameters('numberOfVms')]"
+ },
+ "properties": {
+ "publicIPAllocationMethod": "Dynamic",
+ "dnsSettings": {
+ "domainNameLabel": "[concat(variables('vmName'),parameters('dnslabel'), copyIndex(1000))]"
+ }
+ }
+ },
+ {
+ "apiVersion": "2017-03-30",
+ "type": "Microsoft.Compute/virtualMachines",
+ "name": "[concat(variables('vmName'), copyindex())]",
+ "copy": {
+ "name": "virtualMachineLoop",
+ "count": "[parameters('numberOfVms')]"
+ },
+ "location": "[resourceGroup().location]",
+ "dependsOn": [
+ "nicLoop",
+ "[concat('Microsoft.Compute/availabilitySets/',variables('availabilitySetName'))]"
+ ],
+ "properties": {
+ "hardwareProfile": {
+ "vmSize": "[parameters('vmSize')]"
+ },
+ "availabilitySet": {
+ "id": "[resourceId('Microsoft.Compute/availabilitySets',variables('availabilitySetName'))]"
+ },
+ "osProfile": {
+ "computerName": "[concat(variables('vmName'), copyindex())]",
+ "adminUsername": "[variables('adminUsername')]",
+ "adminPassword": "[variables('adminPassword')]",
+ "linuxConfiguration": {
+ "disablePasswordAuthentication": false,
+ "ssh": {
+ "publicKeys": [
+ {
+ "path": "[variables('sshKeyPath')]",
+ "keyData": "[variables('sshKeyData')]"
+ }
+ ]
+ }
+ }
+ },
+ "storageProfile": {
+ "imageReference": {
+ "publisher": "[variables('imagePublisher')]",
+ "offer": "[variables('imageOffer')]",
+ "sku": "[parameters('centosOSVersion')]",
+ "version": "latest"
+ },
+ "osDisk": {
+ "diskSizeGB": 127,
+ "createOption": "FromImage"
+ }
+ },
+ "networkProfile": {
+ "networkInterfaces": [
+ {
+ "id": "[resourceId('Microsoft.Network/networkInterfaces',concat(variables('nicName'),copyindex()))]"
+ }
+ ]
+ },
+ "diagnosticsProfile": {
+ "bootDiagnostics": {
+ "enabled": true,
+ "storageUri": "[concat(reference(concat('Microsoft.Storage/storageAccounts/', variables('storageAccountName')), '2016-01-01').primaryEndpoints.blob)]"
+ }
+ }
+ }
+ },
+ {
+ "apiVersion": "2015-06-15",
+ "type": "Microsoft.Compute/virtualMachines/extensions",
+ "name": "[concat(variables('vmName'), '0','/onap')]",
+ "location": "[resourceGroup().location]",
+ "dependsOn": ["virtualMachineLoop"],
+ "properties": {
+ "publisher": "Microsoft.Azure.Extensions",
+ "type": "CustomScript",
+ "typeHandlerVersion": "2.0",
+ "autoUpgradeMinorVersion": true,
+ "settings": {
+ "fileUris": [ "https://gerrit.onap.org/r/gitweb?p=integration.git;a=blob_plain;f=Azure_ARM_Template/scripts/azure-rancher-server.sh;hb=refs/heads/master" ],
+ "commandToExecute": "[concat('./' , parameters('masterScriptName'),' ',reference(variables('publicIPAddressName')).dnsSettings.fqdn,' ',parameters('privateIPAddress'),' ',parameters('numberOfVms'))]"
+ }
+ }
+ },
+ {
+ "apiVersion": "2015-06-15",
+ "type": "Microsoft.Compute/virtualMachines/extensions",
+ "name": "[concat(variables('vmName'), copyindex(1),'/onap')]",
+ "copy": {
+ "name": "virtualMachineExtnLoop",
+ "count": "[sub(parameters('numberOfVms'),1)]"
+ },
+ "location": "[resourceGroup().location]",
+ "dependsOn": [
+ "virtualMachineLoop"
+ ],
+ "properties": {
+ "publisher": "Microsoft.Azure.Extensions",
+ "type": "CustomScript",
+ "typeHandlerVersion": "2.0",
+ "autoUpgradeMinorVersion": true,
+ "settings": {
+ "fileUris": [ "https://gerrit.onap.org/r/gitweb?p=integration.git;a=blob_plain;f=Azure_ARM_Template/scripts/azure-k8s-node.sh;hb=refs/heads/master" ],
+ "commandToExecute": "[concat('./' , parameters('nodeScriptName'),' ',concat(parameters('privateIPAddress'),'0'))]"
+ }
+ }
+ }
+ ]
+}
diff --git a/deployment/Azure_ARM_Template/arm_cluster_deploy_parameters.json b/deployment/Azure_ARM_Template/arm_cluster_deploy_parameters.json
new file mode 100644
index 000000000..081ef7ffc
--- /dev/null
+++ b/deployment/Azure_ARM_Template/arm_cluster_deploy_parameters.json
@@ -0,0 +1,11 @@
+{
+ "$schema": "http://schema.management.azure.com/schemas/2015-01-01/deploymentParameters.json#",
+ "contentVersion": "1.0.0.0",
+ "parameters": {
+ "numberOfVms": { "value": 12 },
+ "vmSize": { "value": "Standard_D4_v2" },
+ "masterScriptName": { "value": "azure-rancher-server.sh" },
+ "nodeScriptName": { "value": "azure-k8s-node.sh" },
+ "dnslabel": { "value": "ranchercluster" }
+ }
+}
diff --git a/deployment/Azure_ARM_Template/scripts/azure-k8s-node.sh b/deployment/Azure_ARM_Template/scripts/azure-k8s-node.sh
new file mode 100644
index 000000000..919d14806
--- /dev/null
+++ b/deployment/Azure_ARM_Template/scripts/azure-k8s-node.sh
@@ -0,0 +1,54 @@
+#!/bin/bash
+
+DOCKER_VERSION=17.03
+KUBECTL_VERSION=1.8.10
+HELM_VERSION=2.9.1
+
+# setup root access - default login: oom/oom - comment out to restrict access too ssh key only
+sed -i 's/PermitRootLogin.*/PermitRootLogin yes/' /etc/ssh/sshd_config
+sed -i 's/PasswordAuthentication.*/PasswordAuthentication yes/' /etc/ssh/sshd_config
+service sshd restart
+echo -e "oom\noom" | passwd root
+
+apt-get update
+curl https://releases.rancher.com/install-docker/$DOCKER_VERSION.sh | sh
+mkdir -p /etc/systemd/system/docker.service.d/
+cat > /etc/systemd/system/docker.service.d/docker.conf << EOF
+[Service]
+ExecStart=
+ExecStart=/usr/bin/dockerd -H fd:// --insecure-registry=nexus3.onap.org:10001
+EOF
+systemctl daemon-reload
+systemctl restart docker
+apt-mark hold docker-ce
+
+#IP_ADDY=`ip address |grep ens|grep inet|awk '{print $2}'| awk -F / '{print $1}'`
+#HOSTNAME=`hostname`
+
+#echo "$IP_ADDY $HOSTNAME" >> /etc/hosts
+
+docker login -u docker -p docker nexus3.onap.org:10001
+
+sudo apt-get install make -y
+
+sudo curl -LO https://storage.googleapis.com/kubernetes-release/release/v$KUBECTL_VERSION/bin/linux/amd64/kubectl
+sudo chmod +x ./kubectl
+sudo mv ./kubectl /usr/local/bin/kubectl
+sudo mkdir ~/.kube
+wget http://storage.googleapis.com/kubernetes-helm/helm-v${HELM_VERSION}-linux-amd64.tar.gz
+sudo tar -zxvf helm-v${HELM_VERSION}-linux-amd64.tar.gz
+sudo mv linux-amd64/helm /usr/local/bin/helm
+
+# install nfs
+sudo apt-get install nfs-common -y
+
+MASTER_IP=$1
+
+#Create NFS directory
+sudo mkdir -p /dockerdata-nfs
+
+#Mount the remote NFS directory to the local one
+sudo mount $MASTER_IP:/dockerdata-nfs /dockerdata-nfs/
+echo "$MASTER_IP:/dockerdata-nfsĀ /dockerdata-nfs nfs auto,nofail,noatime,nolock,intr,tcp,actimeo=1800 0 0" | sudo tee -a /etc/fstab
+
+exit 0
diff --git a/deployment/Azure_ARM_Template/scripts/azure-rancher-server.sh b/deployment/Azure_ARM_Template/scripts/azure-rancher-server.sh
new file mode 100644
index 000000000..1c4373c4d
--- /dev/null
+++ b/deployment/Azure_ARM_Template/scripts/azure-rancher-server.sh
@@ -0,0 +1,190 @@
+#!/bin/bash
+
+set -x
+
+DOCKER_VERSION=17.03
+RANCHER_VERSION=1.6.14
+KUBECTL_VERSION=1.8.10
+HELM_VERSION=2.9.1
+
+# setup root access - default login: oom/oom - comment out to restrict access too ssh key only
+sed -i 's/PermitRootLogin.*/PermitRootLogin yes/' /etc/ssh/sshd_config
+sed -i 's/PasswordAuthentication.*/PasswordAuthentication yes/' /etc/ssh/sshd_config
+service sshd restart
+echo -e "oom\noom" | passwd root
+
+apt-get update
+curl https://releases.rancher.com/install-docker/$DOCKER_VERSION.sh | sh
+mkdir -p /etc/systemd/system/docker.service.d/
+cat > /etc/systemd/system/docker.service.d/docker.conf << EOF
+[Service]
+ExecStart=
+ExecStart=/usr/bin/dockerd -H fd:// --insecure-registry=nexus3.onap.org:10001
+EOF
+systemctl daemon-reload
+systemctl restart docker
+apt-mark hold docker-ce
+
+#IP_ADDY=`ip address |grep ens|grep inet|awk '{print $2}'| awk -F / '{print $1}'`
+#HOSTNAME=`hostname`
+
+#echo "$IP_ADDY $HOSTNAME" >> /etc/hosts
+
+docker login -u docker -p docker nexus3.onap.org:10001
+
+sudo apt-get install make -y
+
+sudo docker run -d --restart=unless-stopped -p 8080:8080 --name rancher_server rancher/server:v$RANCHER_VERSION
+sudo curl -LO https://storage.googleapis.com/kubernetes-release/release/v$KUBECTL_VERSION/bin/linux/amd64/kubectl
+sudo chmod +x ./kubectl
+sudo mv ./kubectl /usr/local/bin/kubectl
+sudo mkdir ~/.kube
+wget http://storage.googleapis.com/kubernetes-helm/helm-v${HELM_VERSION}-linux-amd64.tar.gz
+sudo tar -zxvf helm-v${HELM_VERSION}-linux-amd64.tar.gz
+sudo mv linux-amd64/helm /usr/local/bin/helm
+
+# nfs server
+sudo apt-get install nfs-kernel-server -y
+
+sudo mkdir -p /nfs_share
+sudo chown nobody:nogroup /nfs_share/
+
+
+sudo mkdir -p /dockerdata-nfs
+sudo chmod 777 -R /dockerdata-nfs
+sudo chown nobody:nogroup /dockerdata-nfs/
+
+NFS_EXP="*(rw,sync,no_root_squash,no_subtree_check) "
+
+echo "/dockerdata-nfs "$NFS_EXP | sudo tee -a /etc/exports
+
+#Restart the NFS service
+sudo exportfs -a
+sudo systemctl restart nfs-kernel-server
+
+echo "wait before installing rancher server"
+sleep 60
+
+# Create ONAP environment on rancher and register the nodes...
+SERVER=$1
+PRIVATE_IP=$2
+NODE_COUNT=$3
+
+echo "SERVER: ${SERVER}"
+echo "PRIVATE_IP: ${PRIVATE_IP}"
+echo "NODE_COUNT: ${NODE_COUNT}"
+#install sshpass to login to the k8s nodes to run rancher agent
+sudo apt-get install sshpass
+
+# create kubernetes environment on rancher using cli
+RANCHER_CLI_VER=0.6.7
+KUBE_ENV_NAME='onap'
+wget https://releases.rancher.com/cli/v${RANCHER_CLI_VER}/rancher-linux-amd64-v${RANCHER_CLI_VER}.tar.gz
+sudo tar -zxvf rancher-linux-amd64-v${RANCHER_CLI_VER}.tar.gz
+sudo cp rancher-v${RANCHER_CLI_VER}/rancher .
+sudo chmod +x ./rancher
+
+sudo apt install jq -y
+echo "wait for rancher server container to finish - 3 min"
+sleep 60
+echo "2 more min"
+sleep 60
+echo "1 min left"
+sleep 60
+echo "get public and private tokens back to the rancher server so we can register the client later"
+API_RESPONSE=`curl -s 'http://$SERVER:8080/v2-beta/apikey' -d '{"type":"apikey","accountId":"1a1","name":"autoinstall","description":"autoinstall","created":null,"kind":null,"removeTime":null,"removed":null,"uuid":null}'`
+# Extract and store token
+echo "API_RESPONSE: $API_RESPONSE"
+KEY_PUBLIC=`echo $API_RESPONSE | jq -r .publicValue`
+KEY_SECRET=`echo $API_RESPONSE | jq -r .secretValue`
+echo "publicValue: $KEY_PUBLIC secretValue: $KEY_SECRET"
+
+export RANCHER_URL=http://${SERVER}:8080
+export RANCHER_ACCESS_KEY=$KEY_PUBLIC
+export RANCHER_SECRET_KEY=$KEY_SECRET
+./rancher env ls
+echo "wait 60 sec for rancher environments can settle before we create the onap kubernetes one"
+sleep 60
+
+echo "Creating kubernetes environment named ${KUBE_ENV_NAME}"
+./rancher env create -t kubernetes $KUBE_ENV_NAME > kube_env_id.json
+PROJECT_ID=$(<kube_env_id.json)
+echo "env id: $PROJECT_ID"
+export RANCHER_HOST_URL=http://${SERVER}:8080/v1/projects/$PROJECT_ID
+echo "you should see an additional kubernetes environment usually with id 1a7"
+./rancher env ls
+# optionally disable cattle env
+
+# add host registration url
+# https://github.com/rancher/rancher/issues/2599
+# wait for REGISTERING to ACTIVE
+echo "sleep 60 to wait for REG to ACTIVE"
+./rancher env ls
+sleep 30
+echo "check on environments again before registering the URL response"
+./rancher env ls
+sleep 30
+REG_URL_RESPONSE=`curl -X POST -u $KEY_PUBLIC:$KEY_SECRET -H 'Accept: application/json' -H 'ContentType: application/json' -d '{"name":"$SERVER"}' "http://$SERVER:8080/v1/projects/$PROJECT_ID/registrationtokens"`
+echo "REG_URL_RESPONSE: $REG_URL_RESPONSE"
+echo "wait for server to finish url configuration - 2 min"
+sleep 60
+echo "60 more sec"
+sleep 60
+
+# see registrationUrl in
+REGISTRATION_TOKENS=`curl http://$SERVER:8080/v2-beta/registrationtokens`
+echo "REGISTRATION_TOKENS: $REGISTRATION_TOKENS"
+REGISTRATION_URL=`echo $REGISTRATION_TOKENS | jq -r .data[0].registrationUrl`
+REGISTRATION_DOCKER=`echo $REGISTRATION_TOKENS | jq -r .data[0].image`
+REGISTRATION_TOKEN=`echo $REGISTRATION_TOKENS | jq -r .data[0].token`
+echo "Registering host for image: $REGISTRATION_DOCKER url: $REGISTRATION_URL registrationToken: $REGISTRATION_TOKEN"
+HOST_REG_COMMAND=`echo $REGISTRATION_TOKENS | jq -r .data[0].command`
+
+#Loop using the private IP and the no of VMS to SSH into each machine
+for i in `seq 1 $((${NODE_COUNT}-1))`;
+do
+ NODE_IP=${PRIVATE_IP}$i
+ sshpass -p "oom" ssh -o StrictHostKeyChecking=no root@${NODE_IP} "hostnamectl set-hostname node$i && docker run --rm --privileged -v /var/run/docker.sock:/var/run/docker.sock -v /var/lib/racher:/var/lib/rancher $REGISTRATION_DOCKER $RANCHER_URL/v1/scripts/$REGISTRATION_TOKEN"
+done
+
+echo "waiting 10 min for host registration to finish"
+sleep 540
+echo "1 more min"
+sleep 60
+#read -p "wait for host registration to complete before generating the client token....."
+
+# base64 encode the kubectl token from the auth pair
+# generate this after the host is registered
+KUBECTL_TOKEN=$(echo -n 'Basic '$(echo -n "$RANCHER_ACCESS_KEY:$RANCHER_SECRET_KEY" | base64 -w 0) | base64 -w 0)
+echo "KUBECTL_TOKEN base64 encoded: ${KUBECTL_TOKEN}"
+# add kubectl config - NOTE: the following spacing has to be "exact" or kubectl will not connect - with a localhost:8080 error
+cat > ~/.kube/config <<EOF
+apiVersion: v1
+kind: Config
+clusters:
+- cluster:
+ api-version: v1
+ insecure-skip-tls-verify: true
+ server: "https://$SERVER:8080/r/projects/$PROJECT_ID/kubernetes:6443"
+ name: "${ENVIRON}"
+contexts:
+- context:
+ cluster: "${ENVIRON}"
+ user: "${ENVIRON}"
+ name: "${ENVIRON}"
+current-context: "${ENVIRON}"
+users:
+- name: "${ENVIRON}"
+ user:
+ token: "$KUBECTL_TOKEN"
+
+EOF
+
+echo "run the following if you installed a higher kubectl version than the server"
+echo "helm init --upgrade"
+echo "Verify all pods up on the kubernetes system - will return localhost:8080 until a host is added"
+echo "kubectl get pods --all-namespaces"
+kubectl get pods --all-namespaces
+
+
+exit 0