aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorVictor Morales <victor.morales@intel.com>2018-04-23 01:05:02 -0700
committerVictor Morales <victor.morales@intel.com>2018-04-23 01:05:02 -0700
commit53c52795b9008893dbf5739dfdafbaf100fa81ef (patch)
tree96702e7650d0b1f9099e350bebbbdfa888681438
parent8be83219fd8fc9983ac90a700b25c35555f45c49 (diff)
Update OOM script
The instructions to deploy ONAP thru OOM tool has been changed, it's using Helm Charts for performing the deployment. This change reflects that change. Given that using Rancher adds an additional layer to the deployment, it was provided an alternative method to deploy Kubernetes via kubespray. This tool provides a production ready k8s deployment. Change-Id: Ied3f5fc9e5c97b4c0f8e623b9d6f3c4f52fc822e Signed-off-by: Victor Morales <victor.morales@intel.com> Issue-ID: INT-478
-rw-r--r--Vagrantfile3
-rw-r--r--docs/source/index.rst13
-rwxr-xr-xlib/_installers179
-rw-r--r--lib/files/aio_inventory.cfg20
-rw-r--r--lib/files/dev.yaml135
-rw-r--r--lib/files/k8s-cluster.yml292
-rwxr-xr-xlib/oom149
-rwxr-xr-xvagrant_utils/postinstall.sh7
8 files changed, 664 insertions, 134 deletions
diff --git a/Vagrantfile b/Vagrantfile
index 9ff0340..7f379ed 100644
--- a/Vagrantfile
+++ b/Vagrantfile
@@ -171,6 +171,7 @@ nodes = [
:fwds => [
{ :guest => 8880, :host => 8880, :guest_ip => '192.168.50.21' },
{ :guest => 8989, :host => 8989, :guest_ip => '192.168.50.21' },
+ { :guest => 8080, :host => 8888, :guest_ip => '192.168.50.21' },
]
},
{
@@ -487,7 +488,7 @@ Vagrant.configure("2") do |config|
end
# Set Box type
- if "openstack" == node[:name]
+ if ["openstack", "oom"].include? node[:name]
box = {
:virtualbox => 'ubuntu/xenial64',
:libvirt => 'elastic/ubuntu-16.04-x86_64'
diff --git a/docs/source/index.rst b/docs/source/index.rst
index af5777c..6e812f8 100644
--- a/docs/source/index.rst
+++ b/docs/source/index.rst
@@ -1,8 +1,13 @@
-ONAP on Vagrant tool
-====================
+Devtool
+=======
+
+This project provides an automated tool for provisioning ONAP
+development environment. It covers some common development tasks such
+as cloning source code repositories, compiling java artifacts and
+building Docker images. This has been vehicle to standardize
+development process and dependencies associated with their components
+through an automated provisioning mechanism.
-This project collects instructions to setup a development environment
-using different providers like VirtualBox, Libvirt or OpenStack.
.. seealso::
diff --git a/lib/_installers b/lib/_installers
index 45fbd84..e01b15c 100755
--- a/lib/_installers
+++ b/lib/_installers
@@ -3,6 +3,11 @@
source /var/onap/_commons
source /var/onap/_onap_functions
+RANCHER_PORT=8880
+export RANCHER_URL=http://localhost:$RANCHER_PORT
+export RANCHER_ACCESS_KEY='access_key'
+export RANCHER_SECRET_KEY='secret_key'
+
# _install_bind() - Install bind utils
function _install_bind {
install_packages bind9 bind9utils
@@ -83,6 +88,7 @@ function _install_pip {
if ! which pip; then
curl -sL https://bootstrap.pypa.io/get-pip.py | python
fi
+ pip install --upgrade pip
}
# install_python_package() - Install python modules
@@ -297,3 +303,176 @@ function install_haproxy {
;;
esac
}
+# _install_rancher() - Function that installs Rancher CLI and container
+function _install_rancher {
+ local rancher_version=v0.6.5
+ local rancher_server_version=v1.6.10
+ local rancher_server=rancher/server:$rancher_server_version
+
+ if [ ! -d /opt/rancher/current ]; then
+ mkdir -p /opt/rancher/current
+ wget https://github.com/rancher/cli/releases/download/$rancher_version/rancher-linux-amd64-$rancher_version.tar.gz
+ tar -xzf rancher-linux-amd64-$rancher_version.tar.gz -C /tmp
+ mv /tmp/rancher-$rancher_version/rancher /opt/rancher/current/
+ fi
+
+ if ! $(docker version &>/dev/null); then
+ curl https://releases.rancher.com/install-docker/1.12.sh | sh
+ _configure_docker_settings 15
+ fi
+ pull_docker_image $rancher_server
+ run_docker_image -d --restart=unless-stopped -p $RANCHER_PORT:8080 $rancher_server
+ while true; do
+ if curl --fail -X GET $RANCHER_URL; then
+ break
+ fi
+ echo "waiting for racher"
+ sleep $oom_delay
+ done
+}
+
+# install_kubernetes() - Function that deploys kubernetes
+function install_kubernetes {
+ local installer_k8s_type=${1:-rancher}
+
+ case $installer_k8s_type in
+ kubespray)
+ _install_kubespray_k8s
+ ;;
+ rancher)
+ _install_rancher_k8s
+ ;;
+ esac
+}
+
+# _install_kubespray_k8s() - Function that installs Kubernetes using kubespray tool
+function _install_kubespray_k8s {
+ local src_folder=/opt/kubespray
+
+ clone_repo kubernetes-incubator/kubespray $src_folder https://github.com/
+ install_python_package netaddr
+ install_python_package ansible==2.4.0.0
+ pushd $src_folder
+ rm -rf inventory/*
+ mkdir -p inventory/group_vars
+ cp /var/onap/files/aio_inventory.cfg ./inventory/inventory.cfg
+ cp /var/onap/files/k8s-cluster.yml ./inventory/group_vars/
+
+ swapoff -a
+ if [ $http_proxy ]; then
+ sed -i "s|#http_proxy: \"\"|http_proxy: \"$http_proxy\"|g" ./inventory/group_vars/k8s-cluster.yml
+ fi
+ if [ $https_proxy ]; then
+ sed -i "s|#https_proxy: \"\"|https_proxy: \"$https_proxy\"|g" ./inventory/group_vars/k8s-cluster.yml
+ fi
+ if [ $no_proxy ]; then
+ sed -i "s|#no_proxy: \"\"|no_proxy: \"$no_proxy\"|g" ./inventory/group_vars/k8s-cluster.yml
+ fi
+
+ echo " type: NodePort" >> roles/kubernetes-apps/ansible/templates/dashboard.yml.j2
+ ansible-playbook -vvv -i inventory/inventory.cfg cluster.yml -b | tee setup-kubernetes.log
+ popd
+}
+
+# _pull_rancher_images() - Function that retrieves Rancher images required for k8s
+function _pull_rancher_images {
+ for image in "net:v0.13.5" "k8s:v1.8.5-rancher3" \
+"lb-service-rancher:v0.7.17" "network-manager:v0.7.18" "metadata:v0.9.5" \
+"kubectld:v0.8.5" "kubernetes-agent:v0.6.6" "dns:v0.15.3" \
+"kubernetes-auth:v0.0.8" "healthcheck:v0.3.3" "etcd:v2.3.7-13" \
+"etc-host-updater:v0.0.3" "net:holder"; do
+ pull_docker_image rancher/$image &
+ done
+}
+
+# _pull_k8s_images() - Function that retrieves Google k8s images
+function _pull_k8s_images {
+ for image in "kubernetes-dashboard-amd64:v1.7.1" \
+"k8s-dns-sidecar-amd64:1.14.5" "k8s-dns-kube-dns-amd64:1.14.5" \
+"k8s-dns-dnsmasq-nanny-amd64:1.14.5" "heapster-influxdb-amd64:v1.3.3" \
+"heapster-grafana-amd64:v4.4.3" "heapster-amd64:v1.4.0" "pause-amd64:3.0"; do
+ pull_docker_image gcr.io/google_containers/$image &
+ done
+}
+
+# _install_rancher_k8s() - Function that installs Kubernetes thru Rancher Container
+function _install_rancher_k8s {
+ local rancher_agent_version=v1.2.7
+ local rancher_agent=rancher/agent:$rancher_agent_version
+
+ _install_rancher
+
+ _pull_rancher_images
+ _pull_k8s_images
+ pull_docker_image $rancher_agent
+ wait_docker_pull
+
+ pushd /opt/rancher/current/
+ export RANCHER_ENVIRONMENT=`./rancher env create -t kubernetes onap_on_kubernetes`
+ popd
+
+ install_python_package rancher-agent-registration
+ export no_proxy=$no_proxy,$IP_ADDRESS
+ rancher-agent-registration --host-ip $IP_ADDRESS --url http://$IP_ADDRESS:$RANCHER_PORT --environment $RANCHER_ENVIRONMENT --key $RANCHER_ACCESS_KEY --secret $RANCHER_SECRET_KEY
+}
+
+# _install_kubectl() - Function that installs kubectl as client for kubernetes
+function _install_kubectl {
+ local version=${1:-$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)}
+
+ if ! $(kubectl version &>/dev/null); then
+ rm -rf ~/.kube
+ curl -LO https://storage.googleapis.com/kubernetes-release/release/$version/bin/linux/amd64/kubectl
+ chmod +x ./kubectl
+ mv ./kubectl /usr/local/bin/kubectl
+ mkdir ~/.kube
+ pushd ~/.kube
+ python /var/onap/files/kubectl_config_generator.py
+ popd
+ fi
+}
+
+# install_helm() - Function that install Kubernetes Package Manager
+function install_helm {
+ local helm_version=v2.7.2
+ local helm_tarball=helm-${helm_version}-linux-amd64.tar.gz
+
+ if ! $(helm version &>/dev/null); then
+ wget http://storage.googleapis.com/kubernetes-helm/$helm_tarball
+ tar -zxvf $helm_tarball -C /tmp
+ rm $helm_tarball
+ mv /tmp/linux-amd64/helm /usr/local/bin/helm
+ _install_kubectl
+ kubectl create serviceaccount --namespace kube-system tiller
+ kubectl create clusterrolebinding tiller-cluster-rule --clusterrole=cluster-admin --serviceaccount=kube-system:tiller
+ kubectl patch deploy --namespace kube-system tiller-deploy -p '{"spec":{"template":{"spec":{"serviceAccount":"tiller"}}}}'
+ helm init --service-account tiller
+ helm repo update
+ fi
+}
+
+# _install_openstack() - Function that installs OpenStack services thru OpenStack-Helm project
+function _install_openstack {
+ local src_folder=/opt/openstack-helm
+
+ clone_repo openstack/openstack-helm $src_folder https://github.com/
+ install_python_package python-openstackclient python-heatclient
+ mkdir -p /etc/openstack
+ chown -R $(id -un): /etc/openstack
+ tee /etc/openstack/clouds.yaml << EOF
+clouds:
+ openstack_helm:
+ region_name: RegionOne
+ identity_api_version: 3
+ auth:
+ username: 'admin'
+ password: 'password'
+ project_name: 'admin'
+ project_domain_name: 'default'
+ user_domain_name: 'default'
+ auth_url: 'http://keystone.openstack.svc.cluster.local/v3'
+EOF
+ pushd $src_folder
+ make all
+ popd
+}
diff --git a/lib/files/aio_inventory.cfg b/lib/files/aio_inventory.cfg
new file mode 100644
index 0000000..d7ab09b
--- /dev/null
+++ b/lib/files/aio_inventory.cfg
@@ -0,0 +1,20 @@
+[all]
+localhost ansible_connection=local
+
+[kube-master]
+localhost
+
+[kube-node]
+localhost
+
+[etcd]
+localhost
+
+[k8s-cluster:children]
+kube-node
+kube-master
+
+[calico-rr]
+
+[vault]
+localhost
diff --git a/lib/files/dev.yaml b/lib/files/dev.yaml
new file mode 100644
index 0000000..a8dd524
--- /dev/null
+++ b/lib/files/dev.yaml
@@ -0,0 +1,135 @@
+# Copyright © 2017 Amdocs, Bell Canada
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#################################################################
+# Global configuration overrides.
+#
+# These overrides will affect all helm charts (ie. applications)
+# that are listed below and are 'enabled'.
+#################################################################
+global:
+ # Change to an unused port prefix range to prevent port conflicts
+ # with other instances running within the same k8s cluster
+ nodePortPrefix: 302
+
+ # ONAP Repository
+ # Uncomment the following to enable the use of a single docker
+ # repository but ONLY if your repository mirrors all ONAP
+ # docker images. This includes all images from dockerhub and
+ # any other repository that hosts images for ONAP components.
+ #repository: nexus3.onap.org:10001
+ #repositorySecret: eyJuZXh1czMub25hcC5vcmc6MTAwMDEiOnsidXNlcm5hbWUiOiJkb2NrZXIiLCJwYXNzd29yZCI6ImRvY2tlciIsImVtYWlsIjoiQCIsImF1dGgiOiJaRzlqYTJWeU9tUnZZMnRsY2c9PSJ9fQ==
+
+ # readiness check - temporary repo until images migrated to nexus3
+ readinessRepository: oomk8s
+ # logging agent - temporary repo until images migrated to nexus3
+ loggingRepository: docker.elastic.co
+
+ # image pull policy
+ pullPolicy: IfNotPresent
+
+ # override default mount path root directory
+ # referenced by persistent volumes and log files
+ persistence:
+ mountPath: /dockerdata
+
+ # flag to enable debugging - application support required
+ debugEnabled: true
+
+#################################################################
+# Enable/disable and configure helm charts (ie. applications)
+# to customize the ONAP deployment.
+#################################################################
+aaf:
+ enabled: false
+aai:
+ enabled: false
+appc:
+ enabled: false
+clamp:
+ enabled: true
+cli:
+ enabled: false
+consul:
+ enabled: false
+dcaegen2:
+ enabled: false
+esr:
+ enabled: false
+log:
+ enabled: false
+message-router:
+ enabled: false
+mock:
+ enabled: false
+msb:
+ enabled: false
+multicloud:
+ enabled: false
+policy:
+ enabled: false
+portal:
+ enabled: false
+robot:
+ enabled: true
+sdc:
+ enabled: false
+sdnc:
+ enabled: false
+
+ replicaCount: 1
+
+ config:
+ enableClustering: false
+
+ dmaap-listener:
+ config:
+ dmaapPort: 3904
+
+ mysql:
+ disableNfsProvisioner: false
+ replicaCount: 1
+so:
+ enabled: true
+
+ replicaCount: 1
+
+ liveness:
+ # necessary to disable liveness probe when setting breakpoints
+ # in debugger so K8s doesn't restart unresponsive container
+ enabled: true
+
+ # so server configuration
+ config:
+ # message router configuration
+ dmaapTopic: "AUTO"
+ # openstack configuration
+ openStackUserName: "vnf_user"
+ openStackRegion: "RegionOne"
+ openStackKeyStoneUrl: "http://1.2.3.4:5000"
+ openStackServiceTenantName: "service"
+ openStackEncryptedPasswordHere: "c124921a3a0efbe579782cde8227681e"
+
+ # configure embedded mariadb
+ mariadb:
+ config:
+ mariadbRootPassword: password
+uui:
+ enabled: false
+vfc:
+ enabled: false
+vid:
+ enabled: false
+vnfsdk:
+ enabled: false \ No newline at end of file
diff --git a/lib/files/k8s-cluster.yml b/lib/files/k8s-cluster.yml
new file mode 100644
index 0000000..0e6f6c0
--- /dev/null
+++ b/lib/files/k8s-cluster.yml
@@ -0,0 +1,292 @@
+# Valid bootstrap options (required): ubuntu, coreos, centos, none
+bootstrap_os: none
+
+#Directory where etcd data stored
+etcd_data_dir: /var/lib/etcd
+
+# Directory where the binaries will be installed
+bin_dir: /usr/local/bin
+
+## The access_ip variable is used to define how other nodes should access
+## the node. This is used in flannel to allow other flannel nodes to see
+## this node for example. The access_ip is really useful AWS and Google
+## environments where the nodes are accessed remotely by the "public" ip,
+## but don't know about that address themselves.
+#access_ip: 1.1.1.1
+
+### LOADBALANCING AND ACCESS MODES
+## Enable multiaccess to configure etcd clients to access all of the etcd members directly
+## as the "http://hostX:port, http://hostY:port, ..." and ignore the proxy loadbalancers.
+## This may be the case if clients support and loadbalance multiple etcd servers natively.
+#etcd_multiaccess: true
+
+## Internal loadbalancers for apiservers
+#loadbalancer_apiserver_localhost: true
+
+## Local loadbalancer should use this port instead, if defined.
+## Defaults to kube_apiserver_port (6443)
+#nginx_kube_apiserver_port: 8443
+
+### OTHER OPTIONAL VARIABLES
+## For some things, kubelet needs to load kernel modules. For example, dynamic kernel services are needed
+## for mounting persistent volumes into containers. These may not be loaded by preinstall kubernetes
+## processes. For example, ceph and rbd backed volumes. Set to true to allow kubelet to load kernel
+## modules.
+# kubelet_load_modules: false
+
+## Internal network total size. This is the prefix of the
+## entire network. Must be unused in your environment.
+#kube_network_prefix: 18
+
+## With calico it is possible to distributed routes with border routers of the datacenter.
+## Warning : enabling router peering will disable calico's default behavior ('node mesh').
+## The subnets of each nodes will be distributed by the datacenter router
+#peer_with_router: false
+
+## Upstream dns servers used by dnsmasq
+#upstream_dns_servers:
+# - 8.8.8.8
+# - 8.8.4.4
+
+## There are some changes specific to the cloud providers
+## for instance we need to encapsulate packets with some network plugins
+## If set the possible values are either 'gce', 'aws', 'azure', 'openstack', 'vsphere', or 'external'
+## When openstack is used make sure to source in the openstack credentials
+## like you would do when using nova-client before starting the playbook.
+#cloud_provider:
+
+## When OpenStack is used, Cinder version can be explicitly specified if autodetection fails (https://github.com/kubernetes/kubernetes/issues/50461)
+#openstack_blockstorage_version: "v1/v2/auto (default)"
+## When OpenStack is used, if LBaaSv2 is available you can enable it with the following variables.
+#openstack_lbaas_enabled: True
+#openstack_lbaas_subnet_id: "Neutron subnet ID (not network ID) to create LBaaS VIP"
+#openstack_lbaas_floating_network_id: "Neutron network ID (not subnet ID) to get floating IP from, disabled by default"
+#openstack_lbaas_create_monitor: "yes"
+#openstack_lbaas_monitor_delay: "1m"
+#openstack_lbaas_monitor_timeout: "30s"
+#openstack_lbaas_monitor_max_retries: "3"
+
+## Uncomment to enable experimental kubeadm deployment mode
+#kubeadm_enabled: false
+#kubeadm_token_first: "{{ lookup('password', 'credentials/kubeadm_token_first length=6 chars=ascii_lowercase,digits') }}"
+#kubeadm_token_second: "{{ lookup('password', 'credentials/kubeadm_token_second length=16 chars=ascii_lowercase,digits') }}"
+#kubeadm_token: "{{ kubeadm_token_first }}.{{ kubeadm_token_second }}"
+#
+## Set these proxy values in order to update package manager and docker daemon to use proxies
+#http_proxy: ""
+#https_proxy: ""
+## Refer to roles/kubespray-defaults/defaults/main.yml before modifying no_proxy
+#no_proxy: ""
+
+## Uncomment this if you want to force overlay/overlay2 as docker storage driver
+## Please note that overlay2 is only supported on newer kernels
+#docker_storage_options: -s overlay2
+
+# Uncomment this if you have more than 3 nameservers, then we'll only use the first 3.
+docker_dns_servers_strict: false
+
+## Default packages to install within the cluster, f.e:
+#kpm_packages:
+# - name: kube-system/grafana
+
+## Certificate Management
+## This setting determines whether certs are generated via scripts or whether a
+## cluster of Hashicorp's Vault is started to issue certificates (using etcd
+## as a backend). Options are "script" or "vault"
+#cert_management: script
+
+# Set to true to allow pre-checks to fail and continue deployment
+#ignore_assert_errors: false
+
+## Etcd auto compaction retention for mvcc key value store in hour
+#etcd_compaction_retention: 0
+
+## Set level of detail for etcd exported metrics, specify 'extensive' to include histogram metrics.
+#etcd_metrics: basic
+
+
+# Kubernetes configuration dirs and system namespace.
+# Those are where all the additional config stuff goes
+# kubernetes normally puts in /srv/kubernetes.
+# This puts them in a sane location and namespace.
+# Editing those values will almost surely break something.
+kube_config_dir: /etc/kubernetes
+kube_script_dir: "{{ bin_dir }}/kubernetes-scripts"
+kube_manifest_dir: "{{ kube_config_dir }}/manifests"
+system_namespace: kube-system
+
+# Logging directory (sysvinit systems)
+kube_log_dir: "/var/log/kubernetes"
+
+# This is where all the cert scripts and certs will be located
+kube_cert_dir: "{{ kube_config_dir }}/ssl"
+
+# This is where all of the bearer tokens will be stored
+kube_token_dir: "{{ kube_config_dir }}/tokens"
+
+# This is where to save basic auth file
+kube_users_dir: "{{ kube_config_dir }}/users"
+
+kube_api_anonymous_auth: false
+
+## Change this to use another Kubernetes version, e.g. a current beta release
+#kube_version: v1.9.0
+
+# Where the binaries will be downloaded.
+# Note: ensure that you've enough disk space (about 1G)
+local_release_dir: "/tmp/releases"
+# Random shifts for retrying failed ops like pushing/downloading
+retry_stagger: 5
+
+# This is the group that the cert creation scripts chgrp the
+# cert files to. Not really changable...
+kube_cert_group: kube-cert
+
+# Cluster Loglevel configuration
+kube_log_level: 2
+
+# Users to create for basic auth in Kubernetes API via HTTP
+# Optionally add groups for user
+kube_api_pwd: "{{ lookup('password', 'credentials/kube_user length=15 chars=ascii_letters,digits') }}"
+kube_users:
+ kube:
+ pass: "{{kube_api_pwd}}"
+ role: admin
+ groups:
+ - system:masters
+
+## It is possible to activate / deactivate selected authentication methods (basic auth, static token auth)
+#kube_oidc_auth: false
+kube_basic_auth: true
+#kube_token_auth: false
+
+
+## Variables for OpenID Connect Configuration https://kubernetes.io/docs/admin/authentication/
+## To use OpenID you have to deploy additional an OpenID Provider (e.g Dex, Keycloak, ...)
+
+# kube_oidc_url: https:// ...
+# kube_oidc_client_id: kubernetes
+## Optional settings for OIDC
+# kube_oidc_ca_file: {{ kube_cert_dir }}/ca.pem
+# kube_oidc_username_claim: sub
+# kube_oidc_groups_claim: groups
+
+
+# Choose network plugin (calico, contiv, weave or flannel)
+# Can also be set to 'cloud', which lets the cloud provider setup appropriate routing
+kube_network_plugin: cloud
+
+# weave's network password for encryption
+# if null then no network encryption
+# you can use --extra-vars to pass the password in command line
+weave_password: EnterPasswordHere
+
+# Weave uses consensus mode by default
+# Enabling seed mode allow to dynamically add or remove hosts
+# https://www.weave.works/docs/net/latest/ipam/
+weave_mode_seed: false
+
+# This two variable are automatically changed by the weave's role, do not manually change these values
+# To reset values :
+# weave_seed: uninitialized
+# weave_peers: uninitialized
+weave_seed: uninitialized
+weave_peers: uninitialized
+
+# Enable kubernetes network policies
+enable_network_policy: false
+
+# Kubernetes internal network for services, unused block of space.
+kube_service_addresses: 10.233.0.0/18
+
+# internal network. When used, it will assign IP
+# addresses from this range to individual pods.
+# This network must be unused in your network infrastructure!
+kube_pods_subnet: 10.233.64.0/18
+
+# internal network node size allocation (optional). This is the size allocated
+# to each node on your network. With these defaults you should have
+# room for 4096 nodes with 254 pods per node.
+kube_network_node_prefix: 24
+
+# The port the API Server will be listening on.
+kube_apiserver_ip: "{{ kube_service_addresses|ipaddr('net')|ipaddr(1)|ipaddr('address') }}"
+kube_apiserver_port: 6443 # (https)
+kube_apiserver_insecure_port: 8080 # (http)
+
+# DNS configuration.
+# Kubernetes cluster name, also will be used as DNS domain
+cluster_name: cluster.local
+# Subdomains of DNS domain to be resolved via /etc/resolv.conf for hostnet pods
+ndots: 2
+# Can be dnsmasq_kubedns, kubedns or none
+dns_mode: kubedns
+# Can be docker_dns, host_resolvconf or none
+resolvconf_mode: docker_dns
+# Deploy netchecker app to verify DNS resolve as an HTTP service
+deploy_netchecker: false
+# Ip address of the kubernetes skydns service
+skydns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(3)|ipaddr('address') }}"
+dnsmasq_dns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(2)|ipaddr('address') }}"
+dns_domain: "{{ cluster_name }}"
+
+# Path used to store Docker data
+docker_daemon_graph: "/var/lib/docker"
+
+## A string of extra options to pass to the docker daemon.
+## This string should be exactly as you wish it to appear.
+## An obvious use case is allowing insecure-registry access
+## to self hosted registries like so:
+
+docker_options: "--insecure-registry={{ kube_service_addresses }} --graph={{ docker_daemon_graph }} {{ docker_log_opts }}"
+docker_bin_dir: "/usr/bin"
+
+# Settings for containerized control plane (etcd/kubelet/secrets)
+etcd_deployment_type: docker
+kubelet_deployment_type: host
+vault_deployment_type: docker
+helm_deployment_type: host
+
+# K8s image pull policy (imagePullPolicy)
+k8s_image_pull_policy: IfNotPresent
+
+# Kubernetes dashboard
+# RBAC required. see docs/getting-started.md for access details.
+dashboard_enabled: true
+
+# Monitoring apps for k8s
+efk_enabled: false
+
+# Helm deployment
+helm_enabled: false
+
+# Istio deployment
+istio_enabled: false
+
+# Local volume provisioner deployment
+local_volumes_enabled: false
+
+# Add Persistent Volumes Storage Class for corresponding cloud provider ( OpenStack is only supported now )
+persistent_volumes_enabled: false
+
+# Make a copy of kubeconfig on the host that runs Ansible in GITDIR/artifacts
+kubeconfig_localhost: true
+# Download kubectl onto the host that runs Ansible in GITDIR/artifacts
+kubectl_localhost: true
+artifacts_dir: "{{ ansible_env.HOME }}"
+
+# dnsmasq
+# dnsmasq_upstream_dns_servers:
+# - /resolvethiszone.with/10.0.4.250
+# - 8.8.8.8
+
+# Enable creation of QoS cgroup hierarchy, if true top level QoS and pod cgroups are created. (default true)
+# kubelet_cgroups_per_qos: true
+
+# A comma separated list of levels of node allocatable enforcement to be enforced by kubelet.
+# Acceptible options are 'pods', 'system-reserved', 'kube-reserved' and ''. Default is "".
+# kubelet_enforce_node_allocatable: pods
+
+## Supplementary addresses that can be added in kubernetes ssl keys.
+## That can be usefull for example to setup a keepalived virtual IP
+# supplementary_addresses_in_ssl_keys: [10.0.0.1, 10.0.0.2, 10.0.0.3]
diff --git a/lib/oom b/lib/oom
index d52c029..988c574 100755
--- a/lib/oom
+++ b/lib/oom
@@ -2,112 +2,7 @@
source /var/onap/functions
-RANCHER_PORT=8880
oom_delay=30
-export RANCHER_URL=http://localhost:$RANCHER_PORT
-export RANCHER_ACCESS_KEY='access_key'
-export RANCHER_SECRET_KEY='secret_key'
-
-# _install_docker() - Function that installs Docker version for Rancher
-function _install_docker {
- if ! $(docker version &>/dev/null); then
- curl https://releases.rancher.com/install-docker/1.12.sh | sh
- _configure_docker_settings 15
- fi
-}
-
-# _pull_rancher_images() - Function that retrieves Rancher images required for k8s
-function _pull_rancher_images {
- for image in "net:v0.13.5" "k8s:v1.8.5-rancher3" \
-"lb-service-rancher:v0.7.17" "network-manager:v0.7.18" "metadata:v0.9.5" \
-"kubectld:v0.8.5" "kubernetes-agent:v0.6.6" "dns:v0.15.3" \
-"kubernetes-auth:v0.0.8" "healthcheck:v0.3.3" "etcd:v2.3.7-13" \
-"etc-host-updater:v0.0.3" "net:holder"; do
- pull_docker_image rancher/$image &
- done
-}
-
-# _pull_k8s_images() - Function that retrieves Google k8s images
-function _pull_k8s_images {
- for image in "kubernetes-dashboard-amd64:v1.7.1" \
-"k8s-dns-sidecar-amd64:1.14.5" "k8s-dns-kube-dns-amd64:1.14.5" \
-"k8s-dns-dnsmasq-nanny-amd64:1.14.5" "heapster-influxdb-amd64:v1.3.3" \
-"heapster-grafana-amd64:v4.4.3" "heapster-amd64:v1.4.0" "pause-amd64:3.0"; do
- pull_docker_image gcr.io/google_containers/$image &
- done
-}
-
-# _install_rancher() - Function that installs Rancher CLI and container
-function _install_rancher {
- local rancher_version=v0.6.5
- local rancher_server_version=v1.6.10
- local rancher_server=rancher/server:$rancher_server_version
-
- if [ ! -d /opt/rancher/current ]; then
- mkdir -p /opt/rancher/current
- wget https://github.com/rancher/cli/releases/download/$rancher_version/rancher-linux-amd64-$rancher_version.tar.gz
- tar -xzf rancher-linux-amd64-$rancher_version.tar.gz -C /tmp
- mv /tmp/rancher-$rancher_version/rancher /opt/rancher/current/
- fi
-
- _install_docker
- pull_docker_image $rancher_server
- run_docker_image -d --restart=unless-stopped -p $RANCHER_PORT:8080 $rancher_server
- while true; do
- if curl --fail -X GET $RANCHER_URL; then
- break
- fi
- echo "waiting for racher"
- sleep $oom_delay
- done
-}
-
-# _install_kubernetes() - Function that deploys kubernetes via RancherOS host registration
-function _install_kubernetes {
- local rancher_agent_version=v1.2.7
- local rancher_agent=rancher/agent:$rancher_agent_version
-
- _install_rancher
-
- _pull_rancher_images
- _pull_k8s_images
- pull_docker_image $rancher_agent
- wait_docker_pull
-
- pushd /opt/rancher/current/
- export RANCHER_ENVIRONMENT=`./rancher env create -t kubernetes onap_on_kubernetes`
- popd
-
- install_python_package rancher-agent-registration
- export no_proxy=$no_proxy,$IP_ADDRESS
- rancher-agent-registration --host-ip $IP_ADDRESS --url http://$IP_ADDRESS:$RANCHER_PORT --environment $RANCHER_ENVIRONMENT --key $RANCHER_ACCESS_KEY --secret $RANCHER_SECRET_KEY
-}
-
-# _install_kubectl() - Function that installs kubectl as client for kubernetes
-function _install_kubectl {
- if ! $(kubectl version &>/dev/null); then
- rm -rf ~/.kube
- curl -LO https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/amd64/kubectl
- chmod +x ./kubectl
- mv ./kubectl /usr/local/bin/kubectl
- mkdir ~/.kube
- pushd ~/.kube
- python /var/onap/files/kubectl_config_generator.py
- popd
- fi
-}
-
-# _install_helm() - Function that install Kubernetes Package Manager
-function _install_helm {
- local helm_version=v2.3.0
-
- if ! $(helm version &>/dev/null); then
- wget http://storage.googleapis.com/kubernetes-helm/helm-${helm_version}-linux-amd64.tar.gz
- tar -zxvf helm-${helm_version}-linux-amd64.tar.gz -C /tmp
- mv /tmp/linux-amd64/helm /usr/local/bin/helm
- helm init
- fi
-}
# _pull_images_from_yaml() - Function that parses a yaml file and pull their images
function _pull_images_from_yaml_file {
@@ -165,43 +60,39 @@ function get_oom_images {
fi
}
-# _install_oom() - Function that clones OOM and deploys ONAP
+# install_oom() - Function that clones OOM and deploys ONAP
function install_oom {
+ mount_external_partition sda /var/lib/docker/
+ install_kubernetes kubespray
+ install_helm
+ until kubectl cluster-info; do
+ echo "waiting for kubernetes host"
+ sleep $oom_delay
+ done
+ printf "Kubernetes Info\n===============\n" > k8s_info.log
+ echo "Dashboard URL: http://$IP_ADDRESS:$(kubectl get service -n kube-system |grep kubernetes-dashboard | awk '{print $5}' |awk -F "[:/]" '{print $2}')" >> k8s_info.log
+ echo "Admin user: $(cat /etc/kubernetes/users/known_users.csv |awk -F ',' '{print $2}')" >> k8s_info.log
+ echo "Admin password: $(cat /etc/kubernetes/users/known_users.csv |awk -F ',' '{print $1}')" >> k8s_info.log
+
if [[ "$clone_repo" != "True" ]]; then
clone_repos "oom"
fi
- pushd ${src_folders[oom]}/kubernetes/oneclick
- source setenv.bash
-
- pushd ${src_folders[oom]}/kubernetes/config
- cp onap-parameters-sample.yaml onap-parameters.yaml
- ./createConfig.sh -n onap
- popd
-
- for app in consul msb mso message-router sdnc vid robot portal policy appc aai sdc dcaegen2 log cli multicloud clamp vnfsdk uui aaf vfc kube2msb; do
- ./createAll.bash -n onap -a $app
- done
- popd
+ pushd ${src_folders[oom]}/kubernetes
+ make repo
+ make all
+ helm install local/onap -n beijing -f /var/onap/files/dev.yaml
}
# init_oom() - Function that deploys ONAP using OOM
function init_oom {
- mount_external_partition sda /var/lib/docker/
- _install_kubernetes
- _install_kubectl
- _install_helm
if [[ "$clone_repo" == "True" ]]; then
clone_repos "oom"
fi
if [[ "$skip_get_images" == "False" ]]; then
get_oom_images
- if [[ "$skip_install" == "False" ]]; then
- until kubectl cluster-info; do
- echo "waiting for kubernetes host"
- sleep $oom_delay
- done
- install_oom
- fi
+ fi
+ if [[ "$skip_install" == "False" ]]; then
+ install_oom
fi
}
diff --git a/vagrant_utils/postinstall.sh b/vagrant_utils/postinstall.sh
index 3b5017a..ea13cc5 100755
--- a/vagrant_utils/postinstall.sh
+++ b/vagrant_utils/postinstall.sh
@@ -12,6 +12,13 @@ fi
source /var/onap/functions
+if [[ $no_proxy && $no_proxy != *$IP_ADDRESS* ]]; then
+ export no_proxy=$no_proxy,$IP_ADDRESS
+fi
+if [[ $NO_PROXY && $NO_PROXY != *$IP_ADDRESS* ]]; then
+ export NO_PROXY=$NO_PROXY,$IP_ADDRESS
+fi
+
update_repos
create_configuration_files
configure_bind