aboutsummaryrefslogtreecommitdiffstats
path: root/deployment
diff options
context:
space:
mode:
authorGary Wu <gary.i.wu@huawei.com>2018-03-09 13:38:28 -0800
committerGary Wu <gary.i.wu@huawei.com>2018-03-12 15:48:30 -0700
commit57abb1a50bd5d08d9df687210af913f7b5d48a09 (patch)
tree44fc62ccb894a4ee7fe4aecf97b44caf0adad103 /deployment
parent5adbe338af8ab9334525ebd1ded241302fed8a6b (diff)
Switch to OOM master branch
Change-Id: Id22337bd61425c69be88fd80f86bc501f46d7e78 Issue-ID: INT-439 Signed-off-by: Gary Wu <gary.i.wu@huawei.com>
Diffstat (limited to 'deployment')
-rw-r--r--deployment/heat/onap-oom/k8s_vm_entrypoint.sh178
-rw-r--r--deployment/heat/onap-oom/onap-oom.yaml1
-rw-r--r--deployment/heat/onap-oom/rancher_vm_entrypoint.sh2
-rwxr-xr-xdeployment/heat/onap-oom/scripts/deploy.sh2
4 files changed, 21 insertions, 162 deletions
diff --git a/deployment/heat/onap-oom/k8s_vm_entrypoint.sh b/deployment/heat/onap-oom/k8s_vm_entrypoint.sh
index 9118c8e24..119e40a04 100644
--- a/deployment/heat/onap-oom/k8s_vm_entrypoint.sh
+++ b/deployment/heat/onap-oom/k8s_vm_entrypoint.sh
@@ -23,8 +23,8 @@ apt-get -y install linux-image-extra-$(uname -r) jq
cd ~
-# install docker 1.12
-curl -s https://releases.rancher.com/install-docker/1.12.sh | sh
+# install docker 17.03
+curl -s https://releases.rancher.com/install-docker/17.03.sh | sh
usermod -aG docker ubuntu
# install kubernetes 1.8.6
@@ -33,10 +33,14 @@ chmod +x ./kubectl
sudo mv ./kubectl /usr/local/bin/kubectl
mkdir ~/.kube
-# install helm 2.3
-wget -q http://storage.googleapis.com/kubernetes-helm/helm-v2.3.0-linux-amd64.tar.gz
-tar -zxvf helm-v2.3.0-linux-amd64.tar.gz
+# install helm
+wget -q http://storage.googleapis.com/kubernetes-helm/helm-v2.6.1-linux-amd64.tar.gz
+tar -zxvf helm-v2.6.1-linux-amd64.tar.gz
sudo mv linux-amd64/helm /usr/local/bin/helm
+# verify version
+helm version
+# Rancher 1.6.14 installs 2.6.1 - if you upgrade to 2.8.0 - you will need to upgrade helm on the server to the version to level of client
+helm init --upgrade
# Fix virtual memory allocation for onap-log:elasticsearch:
echo "vm.max_map_count=262144" >> /etc/sysctl.conf
@@ -91,7 +95,7 @@ tar xvf rancher-linux-amd64-v0.6.7.tar
# Clone OOM:
cd ~
-git clone -b amsterdam http://gerrit.onap.org/r/oom
+git clone -b master http://gerrit.onap.org/r/oom
# Update values.yaml to point to docker-proxy instead of nexus3:
cd ~/oom/kubernetes
@@ -131,165 +135,21 @@ sed -i "s/kubeMasterAuthToken:.*/kubeMasterAuthToken: $KUBETOKEN/" ~/oom/kuberne
# Create or edit ~/oom/kubernetes/config/onap-parameters.yaml
cat > ~/oom/kubernetes/config/onap-parameters.yaml <<EOF
-# For information regarding those parameters, please visit http://onap.readthedocs.io/en/latest/submodules/dcaegen2.git/docs/sections/installation_heat.html
-
-#################
-# COMMON CONFIG #
-#################
-
-# NEXUS
-NEXUS_HTTP_REPO: https://nexus.onap.org/content/sites/raw
-NEXUS_DOCKER_REPO: nexus3.onap.org:10001
-NEXUS_USERNAME: docker
-NEXUS_PASSWORD: docker
-
-# ONAP config
-# Do not change unless you know what you're doing
-DMAAP_TOPIC: "AUTO"
-DEMO_ARTIFACTS_VERSION: "1.1.1"
-
-# ------------------------------------------------#
-# OpenStack Config on which VNFs will be deployed #
-# ------------------------------------------------#
-
-# The four below parameters are only used by Robot.
-# As Robot is able to perform some automated actions,
-# e.g. onboard/distribute/instantiate, it has to be
-# configured with four below parameters (in addition
-# to the OPENSTACK ones).
-# If you don't intend to use Robot for those actions,
-# you can put dummy values, but you will have to provide
-# those values when deploying VNF anyway.
-# --------------------------------------------------
-# This is the OAM Network ID used for internal network by VNFs.
-# You could create 10.10.10.0/24 (256 IPs should be enough) in your cloud instance.
+OPENSTACK_UBUNTU_14_IMAGE: "__ubuntu_1404_image__"
+OPENSTACK_PUBLIC_NET_ID: "__public_net_id__"
OPENSTACK_OAM_NETWORK_ID: "__oam_network_id__"
-# This is the public Network ID. Public = external network in OpenStack.
-# Floating IPs will be created and assigned to VNFs from this network,
-# to provide external reachability.
-OPENSTACK_PUBLIC_NETWORK_ID: "__public_net_id__"
-# VM Flavor to be used by VNF.
-OPENSTACK_FLAVOR: "m1.medium"
-# VM image to be used by VNF. Here ubuntu 14.04 is provided.
-OPENSTACK_IMAGE: "__ubuntu_1604_image__"
-
+OPENSTACK_OAM_SUBNET_ID: "__oam_subnet_id__"
+OPENSTACK_OAM_NETWORK_CIDR: "__oam_network_cidr__"
OPENSTACK_USERNAME: "__openstack_username__"
-OPENSTACK_PASSWORD: "__openstack_api_key__"
+OPENSTACK_API_KEY: "__openstack_api_key__"
OPENSTACK_TENANT_NAME: "__openstack_tenant_name__"
OPENSTACK_TENANT_ID: "__openstack_tenant_id__"
OPENSTACK_REGION: "RegionOne"
-# Either v2.0 or v3
-OPENSTACK_API_VERSION: "v2.0"
OPENSTACK_KEYSTONE_URL: "__keystone_url__"
-# Don't change this if you don't know what it is
+OPENSTACK_FLAVOUR_MEDIUM: "m1.medium"
OPENSTACK_SERVICE_TENANT_NAME: "service"
-
-########
-# DCAE #
-########
-
-# Whether or not to deploy DCAE
-# If set to false, all the parameters below can be left empty or removed
-# If set to false, update ../dcaegen2/values.yaml disableDcae value to true,
-# this is to avoid deploying the DCAE deployments and services.
-DEPLOY_DCAE: "true"
-
-# DCAE Config
-DCAE_DOCKER_VERSION: v1.1.1
-DCAE_VM_BASE_NAME: "dcae"
-
-# ------------------------------------------------#
-# OpenStack Config on which DCAE will be deployed #
-# ------------------------------------------------#
-
-# Whether to have DCAE deployed on the same OpenStack instance on which VNF will be deployed.
-# (e.g. re-use the same config as defined above)
-# If set to true, discard the next config block, else provide the values.
-IS_SAME_OPENSTACK_AS_VNF: "true"
-
-# Fill in the values in below block only if IS_SAME_OPENSTACK_AS_VNF set to "false"
-# ---
-# Either v2.0 or v3
-DCAE_OS_API_VERSION: "v2.0"
-DCAE_OS_KEYSTONE_URL: "__keystone_url__"
-DCAE_OS_USERNAME: ""
-DCAE_OS_PASSWORD: ""
-DCAE_OS_TENANT_NAME: ""
-DCAE_OS_TENANT_ID: ""
-DCAE_OS_REGION: ""
-# ---
-
-# We need to provide the config of the public network here, because the DCAE VMs will be
-# assigned a floating IP on this network so one can access them, to debug for instance.
-# The ID of the public network.
-DCAE_OS_PUBLIC_NET_ID: "__public_net_id__"
-# The name of the public network.
-DCAE_OS_PUBLIC_NET_NAME: "__public_net_name__"
-# This is the private network that will be used by DCAE VMs. The network will be created during the DCAE boostrap process,
-# and will the subnet created will use this CIDR. (/28 provides 16 IPs, DCAE requires 15.)
-DCAE_OS_OAM_NETWORK_CIDR: "10.99.0.0/16"
-# This will be the private ip of the DCAE boostrap VM. This VM is responsible for spinning up the whole DCAE stack (14 VMs total)
-DCAE_IP_ADDR: "10.99.4.1"
-
-# The flavors' name to be used by DCAE VMs
-DCAE_OS_FLAVOR_SMALL: "m1.small"
-DCAE_OS_FLAVOR_MEDIUM: "m1.medium"
-DCAE_OS_FLAVOR_LARGE: "m1.large"
-# The images' name to be used by DCAE VMs
-DCAE_OS_UBUNTU_14_IMAGE: "__ubuntu_1404_image__"
-DCAE_OS_UBUNTU_16_IMAGE: "__ubuntu_1604_image__"
-DCAE_OS_CENTOS_7_IMAGE: "__centos_7_image__"
-
-# This is the keypair that will be created in OpenStack, and that one can use to access DCAE VMs using ssh.
-# The private key needs to be in a specific format so at the end of the process, it's formatted properly
-# when ending up in the DCAE HEAT stack. The best way is to do the following:
-# - copy paste your key
-# - surround it with quote
-# - add \n at the end of each line
-# - escape the result using https://www.freeformatter.com/java-dotnet-escape.html#ad-output
-DCAE_OS_KEY_NAME: "onap_key"
-DCAE_OS_PUB_KEY: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDKXDgoo3+WOqcUG8/5uUbk81+yczgwC4Y8ywTmuQqbNxlY1oQ0YxdMUqUnhitSXs5S/yRuAVOYHwGg2mCs20oAINrP+mxBI544AMIb9itPjCtgqtE2EWo6MmnFGbHB4Sx3XioE7F4VPsh7japsIwzOjbrQe+Mua1TGQ5d4nfEOQaaglXLLPFfuc7WbhbJbK6Q7rHqZfRcOwAMXgDoBqlyqKeiKwnumddo2RyNT8ljYmvB6buz7KnMinzo7qB0uktVT05FH9Rg0CTWH5norlG5qXgP2aukL0gk1ph8iAt7uYLf1ktp+LJI2gaF6L0/qli9EmVCSLr1uJ38Q8CBflhkh"
-DCAE_OS_PRIVATE_KEY: \"-----BEGIN RSA PRIVATE KEY-----\\n\r\nMIIEpQIBAAKCAQEAylw4KKN/ljqnFBvP+blG5PNfsnM4MAuGPMsE5rkKmzcZWNaE\\n\r\nNGMXTFKlJ4YrUl7OUv8kbgFTmB8BoNpgrNtKACDaz/psQSOeOADCG/YrT4wrYKrR\\n\r\nNhFqOjJpxRmxweEsd14qBOxeFT7Ie42qbCMMzo260HvjLmtUxkOXeJ3xDkGmoJVy\\n\r\nyzxX7nO1m4WyWyukO6x6mX0XDsADF4A6AapcqinoisJ7pnXaNkcjU/JY2Jrwem7s\\n\r\n+ypzIp86O6gdLpLVU9ORR/UYNAk1h+Z6K5Rual4D9mrpC9IJNaYfIgLe7mC39ZLa\\n\r\nfiySNoGhei9P6pYvRJlQki69bid/EPAgX5YZIQIDAQABAoIBAQClDekkhI9ZqseC\\n\r\nqFjPuKaxsizZMg+faJb6WSHLSxzyk1OSWY6F6FklgLeC8HW/fuLNYZyGOYDEsG20\\n\r\nlMqL02Wdiy7OutS3oOS5iyzIf9a90HfFJi706el6RIpvINETcaXCS0T8tQrcS1Rd\\n\r\nKqTaBRC6HXJGAPbBcvw3pwQSdskatU6a/Kt2a3x6DsqqinQcgEB/SbrDaJCUX9sb\\n\r\nF2HVUwdq7aZK1Lk0ozr1FID9mrhjwWuQ6XC+vjG0FqtyXeMpR5iaQ73hex3FXQ8z\\n\r\nOjkFbMwuHWSh1DSx70r5yFrrBqwQKnMsBqx4QDRf3fIENUnWviaL+n+gwcXA07af\\n\r\n4kaNUFUtAoGBAPuNNRAGhZnyZ9zguns9PM56nmeMUikV5dPN2DTbQb79cpfV+7pC\\n\r\n6PeSH/dTKFLz62d6qAM2EsNXQvewf8fipBVBRPsRqKOv+uepd01dHNy62I5B+zRm\\n\r\nbe9Kbe+EN60qdzvyPM+2hV6CnvGv1dirimS9pu6RrxD2Rmz1ectnJE+rAoGBAM3w\\n\r\nUbSEemyZ6EKjck2RfdipzY0MNBnIZ2cUqHh8mmPXjdTLzpXb9vmPbHb01Qwo8MP+\\n\r\ngMnTbTBOzyNAaHdIrCO9FHW6C85j3ot5Yzcr+EcBVcua+7KHU0Sgn44JNH8DisJ7\\n\r\nY63UP/1Xb4d1/QvHfxYy3WOvvRdVZ7pPo8JNX95jAoGAIe5CIg8/JizUZa7KeKUh\\n\r\n9pgDleQPkQsrHQ6/AyIwFBsLwf9THSS5V+uV9D57SfUs46Bf2U8J6N90YQSlt8iS\\n\r\naWuManFPVgT+yxDIzt6obf2mCEpOIBtQ6N4ZRh2HhQwdWTCrkzkDdGQaHG+jYL6C\\n\r\nxGPwiG2ON7OAfGIAM7eN5lECgYEAhoRLWlaOgRGnHKAWsYQvZ67CjTdDcPPuVu6v\\n\r\nfMQnNMA/7JeTwV+E205L0wfpgZ/cZKmBBlQMJlnUA3q2wfO+PTnse1mjDJU/cGtB\\n\r\n22/lJLxChlQdxGeQhGtGzUhF+hEeOhrO6WSSx7CtMRZoy6Dr6lwfMFZCdVNcBd6v\\n\r\nYOOZk3ECgYEAseUKGb6E80XTVVNziyuiVbQCsI0ZJuRfqMZ2IIDQJU9u6AnGAway\\n\r\nitqHbkGsmDT+4HUz01+1JKnnw42RdSrHdU/LaOonD+RIGqe2x800QXzqASKLdCXr\\n\r\ny7RoiFqJtkdFQykzJemA+xOXvHLgKi/MXFsU90PCD0VJKLj8vwpX78Y=\\n\r\n-----END RSA PRIVATE KEY-----\\n\r\n\"
-
-# This below settings allows one to configure the /etc/resolv.conf nameserver resolution for all the DCAE VMs.
-# -
-# In the HEAT setup, it's meant to be a DNS list, as the HEAT setup deploys a DNS Server VM in addition to DNS Designate
-# and this DNS Server is setup to forward request to the DNS Designate backend when it cannot resolve, hence the
-# DNS_FORWARDER config here. The DCAE Boostrap requires both inputs, even though they are now similar, we have to pass
-# them.
-# -
-# ATTENTION: Assumption is made the DNS Designate backend is configure to forward request to a public DNS (e.g. 8.8.8.8)
-# -
-# Put the IP of the DNS Designate backend (e.g. the OpenStack IP supporting DNS Designate)
-DNS_IP: "__dns_forwarder__"
-DNS_FORWARDER: "__dns_forwarder__"
-
-# Public DNS - not used but required by the DCAE boostrap container
-EXTERNAL_DNS: "__external_dns__"
-
-# DNS domain for the DCAE VMs
-DCAE_DOMAIN: "dcaeg2.onap.org"
-
-# Proxy DNS Designate. This means DCAE will run in an instance not support Designate, and Designate will be provided by another instance.
-# Set to true if you wish to use it
-DNSAAS_PROXY_ENABLE: "__dnsaas_proxy_enable__"
-# Provide this only if DNSAAS_PROXY_ENABLE set to true. The IP has to be the IP of one of the K8S hosts.
-# e.g. http://10.195.197.164/api/multicloud-titanium_cloud/v0/pod25_RegionOne/identity/v2.0
-DCAE_PROXIED_KEYSTONE_URL: "http://__k8s_ip_addr__/__dnsaas_proxied_keystone_url_path__"
-
-# -----------------------------------------------------#
-# OpenStack Config on which DNS Designate is supported #
-# -----------------------------------------------------#
-
-# If this is the same OpenStack used for the VNF or DCAE, please re-enter the values here.
-
-DNSAAS_API_VERSION: "v3"
-DNSAAS_REGION: "RegionOne"
-DNSAAS_KEYSTONE_URL: "__dnsaas_keystone_url__"
-DNSAAS_TENANT_ID: "__dnsaas_tenant_id__"
-DNSAAS_TENANT_NAME: "__dnsaas_tenant_name__"
-DNSAAS_USERNAME: "__dnsaas_username__"
-DNSAAS_PASSWORD: "__dnsaas_password__"
+DMAAP_TOPIC: "AUTO"
+DEMO_ARTIFACTS_VERSION: "1.1.1"
EOF
cat ~/oom/kubernetes/config/onap-parameters.yaml
@@ -322,8 +182,6 @@ git config user.name "root"
git add -A
git commit -m "initial commit"
-cat /dockerdata-nfs/onap/dcaegen2/heat/onap_dcae.env
-
# Run ONAP:
cd ~/oom/kubernetes/oneclick/
./createAll.bash -n onap
diff --git a/deployment/heat/onap-oom/onap-oom.yaml b/deployment/heat/onap-oom/onap-oom.yaml
index 760a9575d..0a40ee224 100644
--- a/deployment/heat/onap-oom/onap-oom.yaml
+++ b/deployment/heat/onap-oom/onap-oom.yaml
@@ -238,6 +238,7 @@ resources:
__public_net_id__: { get_param: public_net_id }
__public_net_name__: { get_param: public_net_name }
__oam_network_id__: { get_resource: oam_network }
+ __oam_subnet_id__: { get_resource: oam_subnet }
__oam_network_cidr__: { get_param: oam_network_cidr }
__ubuntu_1404_image__: { get_param: ubuntu_1404_image }
__ubuntu_1604_image__: { get_param: ubuntu_1604_image }
diff --git a/deployment/heat/onap-oom/rancher_vm_entrypoint.sh b/deployment/heat/onap-oom/rancher_vm_entrypoint.sh
index ac704f106..12ed51f13 100644
--- a/deployment/heat/onap-oom/rancher_vm_entrypoint.sh
+++ b/deployment/heat/onap-oom/rancher_vm_entrypoint.sh
@@ -19,4 +19,4 @@ fi
apt-get -y update
apt-get -y install docker.io
usermod -aG docker ubuntu
-docker run --restart unless-stopped -d -p 8080:8080 rancher/server:v1.6.10
+docker run --restart unless-stopped -d -p 8080:8080 rancher/server:v1.6.14
diff --git a/deployment/heat/onap-oom/scripts/deploy.sh b/deployment/heat/onap-oom/scripts/deploy.sh
index de33b08e0..60ceab663 100755
--- a/deployment/heat/onap-oom/scripts/deploy.sh
+++ b/deployment/heat/onap-oom/scripts/deploy.sh
@@ -38,7 +38,7 @@ fi
ssh-keygen -R $K8S_IP
for n in $(seq 1 10); do
- timeout 2m ssh -o StrictHostKeychecking=no -i ~/.ssh/onap_key ubuntu@$K8S_IP 'sudo su -l root -c "/root/oom/kubernetes/robot/ete-k8s.sh health"'
+ timeout 2m ssh -o StrictHostKeychecking=no -i ~/.ssh/onap_key ubuntu@$K8S_IP 'sudo su -l root -c "/root/oom/kubernetes/robot/ete-k8s.sh onap health"'
RESULT=$?
if [ $RESULT -eq 0 ]; then
break