diff options
author | James MacNider <James.MacNider@amdocs.com> | 2019-05-09 00:39:57 +0000 |
---|---|---|
committer | Gerrit Code Review <gerrit@onap.org> | 2019-05-09 00:39:57 +0000 |
commit | 2fcb335690c76df13d694d1bfe42ef546884f7c7 (patch) | |
tree | da08d72fcd4b97008e9ee87d1ecdfb50e9f8d777 | |
parent | 69fba2e235a317be5597de7a5875cee96478ddb5 (diff) | |
parent | f137b2c51fadc73dd167477c626473c3a80e93e2 (diff) |
Merge "Add HA k8s documentation"
40 files changed, 537 insertions, 416 deletions
diff --git a/docs/cluster.yml b/docs/cluster.yml new file mode 100644 index 0000000000..d4962d3478 --- /dev/null +++ b/docs/cluster.yml @@ -0,0 +1,156 @@ +# An example of an HA Kubernetes cluster for ONAP +nodes: +- address: 10.12.6.85 + port: "22" + internal_address: 10.0.0.8 + role: + - controlplane + - etcd + hostname_override: "onap-control-1" + user: ubuntu + ssh_key_path: "~/.ssh/onap-key" +- address: 10.12.6.90 + port: "22" + internal_address: 10.0.0.11 + role: + - controlplane + - etcd + hostname_override: "onap-control-2" + user: ubuntu + ssh_key_path: "~/.ssh/onap-key" +- address: 10.12.6.89 + port: "22" + internal_address: 10.0.0.12 + role: + - controlplane + - etcd + hostname_override: "onap-control-3" + user: ubuntu + ssh_key_path: "~/.ssh/onap-key" +- address: 10.12.5.165 + port: "22" + internal_address: 10.0.0.14 + role: + - worker + hostname_override: "onap-k8s-1" + user: ubuntu + ssh_key_path: "~/.ssh/onap-key" +- address: 10.12.6.238 + port: "22" + internal_address: 10.0.0.26 + role: + - worker + hostname_override: "onap-k8s-2" + user: ubuntu + ssh_key_path: "~/.ssh/onap-key" +- address: 10.12.6.126 + port: "22" + internal_address: 10.0.0.5 + role: + - worker + hostname_override: "onap-k8s-3" + user: ubuntu + ssh_key_path: "~/.ssh/onap-key" +- address: 10.12.5.11 + port: "22" + internal_address: 10.0.0.6 + role: + - worker + hostname_override: "onap-k8s-4" + user: ubuntu + ssh_key_path: "~/.ssh/onap-key" +- address: 10.12.6.244 + port: "22" + internal_address: 10.0.0.9 + role: + - worker + hostname_override: "onap-k8s-5" + user: ubuntu + ssh_key_path: "~/.ssh/onap-key" +- address: 10.12.6.249 + port: "22" + internal_address: 10.0.0.17 + role: + - worker + hostname_override: "onap-k8s-6" + user: ubuntu + ssh_key_path: "~/.ssh/onap-key" +- address: 10.12.5.191 + port: "22" + internal_address: 10.0.0.20 + role: + - worker + hostname_override: "onap-k8s-7" + user: ubuntu + ssh_key_path: "~/.ssh/onap-key" +- address: 10.12.6.111 + port: "22" + internal_address: 10.0.0.10 + role: + - worker + hostname_override: "onap-k8s-8" + user: ubuntu + ssh_key_path: "~/.ssh/onap-key" +- address: 10.12.6.195 + port: "22" + internal_address: 10.0.0.4 + role: + - worker + hostname_override: "onap-k8s-9" + user: ubuntu + ssh_key_path: "~/.ssh/onap-key" +- address: 10.12.5.160 + port: "22" + internal_address: 10.0.0.16 + role: + - worker + hostname_override: "onap-k8s-10" + user: ubuntu + ssh_key_path: "~/.ssh/onap-key" +- address: 10.12.6.74 + port: "22" + internal_address: 10.0.0.18 + role: + - worker + hostname_override: "onap-k8s-11" + user: ubuntu + ssh_key_path: "~/.ssh/onap-key" +- address: 10.12.6.82 + port: "22" + internal_address: 10.0.0.7 + role: + - worker + hostname_override: "onap-k8s-12" + user: ubuntu + ssh_key_path: "~/.ssh/onap-key" +services: + kube-api: + service_cluster_ip_range: 10.43.0.0/16 + pod_security_policy: false + always_pull_images: false + kube-controller: + cluster_cidr: 10.42.0.0/16 + service_cluster_ip_range: 10.43.0.0/16 + kubelet: + cluster_domain: cluster.local + cluster_dns_server: 10.43.0.10 + fail_swap_on: false +network: + plugin: canal +authentication: + strategy: x509 +ssh_key_path: "~/.ssh/onap-key" +ssh_agent_auth: false +authorization: + mode: rbac +ignore_docker_version: false +kubernetes_version: "v1.13.5-rancher1-2" +private_registries: +- url: nexus3.onap.org:10001 + user: docker + password: docker + is_default: true +cluster_name: "onap" +restore: + restore: false + snapshot_name: "" diff --git a/docs/images/cp_vms/control_plane_1.png b/docs/images/cp_vms/control_plane_1.png Binary files differnew file mode 100644 index 0000000000..d59b9863b7 --- /dev/null +++ b/docs/images/cp_vms/control_plane_1.png diff --git a/docs/images/cp_vms/control_plane_2.png b/docs/images/cp_vms/control_plane_2.png Binary files differnew file mode 100644 index 0000000000..9a7d72f8a5 --- /dev/null +++ b/docs/images/cp_vms/control_plane_2.png diff --git a/docs/images/cp_vms/control_plane_3.png b/docs/images/cp_vms/control_plane_3.png Binary files differnew file mode 100644 index 0000000000..da329f20b5 --- /dev/null +++ b/docs/images/cp_vms/control_plane_3.png diff --git a/docs/images/cp_vms/control_plane_4.png b/docs/images/cp_vms/control_plane_4.png Binary files differnew file mode 100644 index 0000000000..817355a99e --- /dev/null +++ b/docs/images/cp_vms/control_plane_4.png diff --git a/docs/images/cp_vms/control_plane_5.png b/docs/images/cp_vms/control_plane_5.png Binary files differnew file mode 100644 index 0000000000..33805c50dd --- /dev/null +++ b/docs/images/cp_vms/control_plane_5.png diff --git a/docs/images/cp_vms/control_plane_6.png b/docs/images/cp_vms/control_plane_6.png Binary files differnew file mode 100644 index 0000000000..9e8ab638bc --- /dev/null +++ b/docs/images/cp_vms/control_plane_6.png diff --git a/docs/images/cp_vms/control_plane_7.png b/docs/images/cp_vms/control_plane_7.png Binary files differnew file mode 100644 index 0000000000..f0db6d3f3f --- /dev/null +++ b/docs/images/cp_vms/control_plane_7.png diff --git a/docs/images/cp_vms/control_plane_8.png b/docs/images/cp_vms/control_plane_8.png Binary files differnew file mode 100644 index 0000000000..e20f631e60 --- /dev/null +++ b/docs/images/cp_vms/control_plane_8.png diff --git a/docs/images/floating_ips/floating_1.png b/docs/images/floating_ips/floating_1.png Binary files differnew file mode 100644 index 0000000000..9f413164ab --- /dev/null +++ b/docs/images/floating_ips/floating_1.png diff --git a/docs/images/floating_ips/floating_2.png b/docs/images/floating_ips/floating_2.png Binary files differnew file mode 100644 index 0000000000..0001ef068c --- /dev/null +++ b/docs/images/floating_ips/floating_2.png diff --git a/docs/images/keys/key_pair_1.png b/docs/images/keys/key_pair_1.png Binary files differnew file mode 100644 index 0000000000..1135c93320 --- /dev/null +++ b/docs/images/keys/key_pair_1.png diff --git a/docs/images/keys/key_pair_2.png b/docs/images/keys/key_pair_2.png Binary files differnew file mode 100644 index 0000000000..ac3bfc5ca2 --- /dev/null +++ b/docs/images/keys/key_pair_2.png diff --git a/docs/images/keys/key_pair_3.png b/docs/images/keys/key_pair_3.png Binary files differnew file mode 100644 index 0000000000..1e0c0200f8 --- /dev/null +++ b/docs/images/keys/key_pair_3.png diff --git a/docs/images/keys/key_pair_4.png b/docs/images/keys/key_pair_4.png Binary files differnew file mode 100644 index 0000000000..031a9ba785 --- /dev/null +++ b/docs/images/keys/key_pair_4.png diff --git a/docs/images/nfs_server/nfs_server_1.png b/docs/images/nfs_server/nfs_server_1.png Binary files differnew file mode 100644 index 0000000000..912a10f055 --- /dev/null +++ b/docs/images/nfs_server/nfs_server_1.png diff --git a/docs/images/nfs_server/nfs_server_10.png b/docs/images/nfs_server/nfs_server_10.png Binary files differnew file mode 100644 index 0000000000..7d87d1ca56 --- /dev/null +++ b/docs/images/nfs_server/nfs_server_10.png diff --git a/docs/images/nfs_server/nfs_server_2.png b/docs/images/nfs_server/nfs_server_2.png Binary files differnew file mode 100644 index 0000000000..d59b9863b7 --- /dev/null +++ b/docs/images/nfs_server/nfs_server_2.png diff --git a/docs/images/nfs_server/nfs_server_3.png b/docs/images/nfs_server/nfs_server_3.png Binary files differnew file mode 100644 index 0000000000..9a7d72f8a5 --- /dev/null +++ b/docs/images/nfs_server/nfs_server_3.png diff --git a/docs/images/nfs_server/nfs_server_4.png b/docs/images/nfs_server/nfs_server_4.png Binary files differnew file mode 100644 index 0000000000..da329f20b5 --- /dev/null +++ b/docs/images/nfs_server/nfs_server_4.png diff --git a/docs/images/nfs_server/nfs_server_5.png b/docs/images/nfs_server/nfs_server_5.png Binary files differnew file mode 100644 index 0000000000..817355a99e --- /dev/null +++ b/docs/images/nfs_server/nfs_server_5.png diff --git a/docs/images/nfs_server/nfs_server_6.png b/docs/images/nfs_server/nfs_server_6.png Binary files differnew file mode 100644 index 0000000000..33805c50dd --- /dev/null +++ b/docs/images/nfs_server/nfs_server_6.png diff --git a/docs/images/nfs_server/nfs_server_7.png b/docs/images/nfs_server/nfs_server_7.png Binary files differnew file mode 100644 index 0000000000..9e8ab638bc --- /dev/null +++ b/docs/images/nfs_server/nfs_server_7.png diff --git a/docs/images/nfs_server/nfs_server_8.png b/docs/images/nfs_server/nfs_server_8.png Binary files differnew file mode 100644 index 0000000000..14103fb9c3 --- /dev/null +++ b/docs/images/nfs_server/nfs_server_8.png diff --git a/docs/images/nfs_server/nfs_server_9.png b/docs/images/nfs_server/nfs_server_9.png Binary files differnew file mode 100644 index 0000000000..aa8bc140e1 --- /dev/null +++ b/docs/images/nfs_server/nfs_server_9.png diff --git a/docs/images/rke/rke_1.png b/docs/images/rke/rke_1.png Binary files differnew file mode 100644 index 0000000000..b27fc517df --- /dev/null +++ b/docs/images/rke/rke_1.png diff --git a/docs/images/wk_vms/worker_1.png b/docs/images/wk_vms/worker_1.png Binary files differnew file mode 100644 index 0000000000..01314d1557 --- /dev/null +++ b/docs/images/wk_vms/worker_1.png diff --git a/docs/images/wk_vms/worker_2.png b/docs/images/wk_vms/worker_2.png Binary files differnew file mode 100644 index 0000000000..9a7d72f8a5 --- /dev/null +++ b/docs/images/wk_vms/worker_2.png diff --git a/docs/images/wk_vms/worker_3.png b/docs/images/wk_vms/worker_3.png Binary files differnew file mode 100644 index 0000000000..93d5e28cf2 --- /dev/null +++ b/docs/images/wk_vms/worker_3.png diff --git a/docs/images/wk_vms/worker_4.png b/docs/images/wk_vms/worker_4.png Binary files differnew file mode 100644 index 0000000000..817355a99e --- /dev/null +++ b/docs/images/wk_vms/worker_4.png diff --git a/docs/images/wk_vms/worker_5.png b/docs/images/wk_vms/worker_5.png Binary files differnew file mode 100644 index 0000000000..33805c50dd --- /dev/null +++ b/docs/images/wk_vms/worker_5.png diff --git a/docs/images/wk_vms/worker_6.png b/docs/images/wk_vms/worker_6.png Binary files differnew file mode 100644 index 0000000000..c71c122217 --- /dev/null +++ b/docs/images/wk_vms/worker_6.png diff --git a/docs/images/wk_vms/worker_7.png b/docs/images/wk_vms/worker_7.png Binary files differnew file mode 100644 index 0000000000..ecb13c1809 --- /dev/null +++ b/docs/images/wk_vms/worker_7.png diff --git a/docs/oom_quickstart_guide.rst b/docs/oom_quickstart_guide.rst index 0e1d3591f7..20eb8fab79 100644 --- a/docs/oom_quickstart_guide.rst +++ b/docs/oom_quickstart_guide.rst @@ -1,7 +1,7 @@ .. This work is licensed under a .. Creative Commons Attribution 4.0 International License. .. http://creativecommons.org/licenses/by/4.0 -.. Copyright 2018 Amdocs, Bell Canada +.. Copyright 2019 Amdocs, Bell Canada .. _quick-start-label: @@ -17,10 +17,10 @@ available), follow the following instructions to deploy ONAP. **Step 1.** Clone the OOM repository from ONAP gerrit:: - > git clone -b 4.0.0-ONAP http://gerrit.onap.org/r/oom + > git clone -b 4.0.0-ONAP http://gerrit.onap.org/r/oom --recurse-submodules > cd oom/kubernetes -**Step 2.** Install Helm Plugins required to deploy the ONAP Casablanca release:: +**Step 2.** Install Helm Plugins required to deploy ONAP:: > sudo cp -R ~/oom/kubernetes/helm/plugins/ ~/.helm @@ -49,8 +49,8 @@ OpenStack tenant information. a. Enabling/Disabling Components: -Here is an example of the nominal entries that need to be provided. We have different -values file available for different contexts. +Here is an example of the nominal entries that need to be provided. +We have different values file available for different contexts. .. literalinclude:: onap-values.yaml :language: yaml diff --git a/docs/oom_setup_kubernetes_rancher.rst b/docs/oom_setup_kubernetes_rancher.rst index 5159377386..ebc44e6a96 100644 --- a/docs/oom_setup_kubernetes_rancher.rst +++ b/docs/oom_setup_kubernetes_rancher.rst @@ -16,456 +16,435 @@ .. _onap-on-kubernetes-with-rancher: -ONAP on Kubernetes with Rancher -############################### +ONAP on HA Kubernetes Cluster +############################# -The following instructions will step you through the installation of Kubernetes -on an OpenStack environment with Rancher. The development lab used for this -installation is the ONAP Windriver lab. +This guide provides instructions on how to setup a Highly-Available Kubernetes Cluster. +For this, we are hosting our cluster on OpenStack VMs and using the Rancher Kubernetes Engine (RKE) +to deploy and manage our Kubernetes Cluster. -This guide does not cover all of the steps required to setup your OpenStack -environment: e.g. OAM networks and security groups but there is a wealth of -OpenStack information on the web. +The result at the end of this tutorial will be: -Rancher Installation -==================== +*1.* Creation of a Key Pair to use with Open Stack and RKE -The following instructions describe how to create an Openstack VM running -Rancher. This node will not be used to host ONAP itself, it will be used -exclusively by Rancher. +*2.* Creation of OpenStack VMs to host Kubernetes Control Plane -Launch new VM instance to host the Rancher Server -------------------------------------------------- +*3.* Creation of OpenStack VMs to host Kubernetes Workers -.. image:: Rancher-Launch_new_VM_instance_to_host_the_Rancher_Server.jpeg +*4.* Installation and configuration of RKE to setup an HA Kubernetes -Select Ubuntu 16.04 as base image +*5.* Installation and configuration of kubectl + +*5.* Installation and configuration of helm + +*7.* Creation of an NFS Server to be used by ONAP as shared persistance + +There are many ways one can execute the above steps. Including automation through the use of HEAT to setup the OpenStack VMs. +To better illustrate the steps involved, we have captured the manual creation of such an environment using the ONAP Wind River Open Lab. + +.. contents:: + :depth: 1 + :local: +.. + +Create Key Pair +=============== +A Key Pair is required to access the created OpenStack VMs and will be used by +RKE to configure the VMs for Kubernetes. + +Use an existing key pair, import one or create a new one to assign. + +.. image:: images/keys/key_pair_1.png + +.. Note:: + If you're creating a new Key Pair, ensure to create a local copy of the Private Key through the use of "Copy Private Key to Clipboard". + +For the purpose of this guide, we will assume a new local key called "onap-key" +has been downloaded and is copied into **~/.ssh/**, from which it can be referenced. + +Example: + $ mv onap-key ~/.ssh + + $ chmod 600 ~/.ssh/onap-key + + +Create Kubernetes Control Plane VMs +=================================== + +The following instructions describe how to create 3 OpenStack VMs to host the +Highly-Available Kubernetes Control Plane. +ONAP workloads will not be scheduled on these Control Plane nodes. + +Launch new VM instances +----------------------- + +.. image:: images/cp_vms/control_plane_1.png + +Select Ubuntu 18.04 as base image --------------------------------- -Select "No" on "Create New Volume" +Select "No" for "Create New Volume" -.. image:: Rancher-Select_Ubuntu_16.04_as_base_image.jpeg +.. image:: images/cp_vms/control_plane_2.png Select Flavor ------------- -Known issues exist if flavor is too small for Rancher. Please select a flavor -with at least 4 vCPU and 8GB ram. A size of 8 vCPU and 16GB ram is recommended. +The recommended flavor is at least 4 vCPU and 8GB ram. -.. image:: Rancher-Select_Flavor.jpeg +.. image:: images/cp_vms/control_plane_3.png Networking ---------- -.. image:: Rancher-Networking.jpeg +.. image:: images/cp_vms/control_plane_4.png Security Groups --------------- -.. image:: Rancher-Security_Groups.jpeg +.. image:: images/cp_vms/control_plane_5.png Key Pair -------- -Use an existing key pair (e.g. onap_key), import an existing one or create a -new one to assign. +Assign the key pair that was created/selected previously (e.g. onap_key). -.. image:: Rancher-Key_Pair.jpeg +.. image:: images/cp_vms/control_plane_6.png -Apply customization script for the Rancher VM ---------------------------------------------- +Apply customization script for Control Plane VMs +------------------------------------------------ -Click :download:`openstack-rancher.sh <openstack-rancher.sh>` to download the -script. +Click :download:`openstack-k8s-controlnode.sh <openstack-k8s-controlnode.sh>` +to download the script. -.. literalinclude:: openstack-rancher.sh +.. literalinclude:: openstack-k8s-controlnode.sh :language: bash This customization script will: -* setup root access to the VM (comment out if you wish to disable this - capability and restrict access to ssh access only) +* update ubuntu * install docker -* install rancher -* install kubectl -* install helm -* install nfs server - -.. note:: - The Casablanca release of OOM only supports Helm 2.9.1 not the 2.7.2 shown in - the screen capture below. The supported versions of all the software components - are listed in the :ref:`cloud-setup-guide-label`. -.. image:: Apply_customization_script_for_the_Rancher_VM.jpeg +.. image:: images/cp_vms/control_plane_7.png Launch Instance --------------- -.. image:: Rancher-Launch_Instance.jpeg +.. image:: images/cp_vms/control_plane_8.png -Assign Floating IP for external access --------------------------------------- -.. image:: Rancher-Allocate_Floating_IP.jpeg -.. image:: Rancher-Manage_Floating_IP_Associations.jpeg +Create Kubernetes Worker VMs +============================ +The following instructions describe how to create OpenStack VMs to host the +Highly-Available Kubernetes Workers. ONAP workloads will only be scheduled on these nodes. -.. image:: Rancher-Launch_Instance.jpeg - -Kubernetes Installation -======================= +Launch new VM instances +----------------------- -Launch new VM instance(s) to create a Kubernetes single host or cluster ------------------------------------------------------------------------ +The number and size of Worker VMs is depenedent on the size of the ONAP deployment. +By default, all ONAP applications are deployed. It's possible to customize the deployment +and enable a subset of the ONAP applications. For the purpose of this guide, however, +we will deploy 12 Kubernetes Workers that have been sized to handle the entire ONAP +application workload. -To create a cluster: +.. image:: images/wk_vms/worker_1.png -.. note:: - #. do not append a '-1' suffix (e.g. sb4-k8s) - #. increase count to the # of of kubernetes worker nodes you want (eg. 3) - -.. image:: K8s-Launch_new_VM_instance_to_create_a_Kubernetes_single_host_or_cluster.jpeg - -Select Ubuntu 16.04 as base image +Select Ubuntu 18.04 as base image --------------------------------- Select "No" on "Create New Volume" -.. image:: K8s-Select_Ubuntu_16.04_as_base_image.jpeg +.. image:: images/wk_vms/worker_2.png Select Flavor ------------- -The size of a Kubernetes host depends on the size of the ONAP deployment that -will be installed. - -As of the Casablanca release a minimum 224GB will be needed to run a -full ONAP deployment (all components). It is recommended that more hosts are -used with fewer resources instead of only a few large hosts. For example 14 x -16GB hosts. +The size of Kubernetes hosts depend on the size of the ONAP deployment +being installed. -If a small subset of ONAP components are being deployed for testing purposes, -then a single 16GB or 32GB host should suffice. +If a small subset of ONAP applications are being deployed +(i.e. for testing purposes), then 16GB or 32GB may be sufficient. -.. image:: K8s-Select_Flavor.jpeg +.. image:: images/wk_vms/worker_3.png Networking ----------- -.. image:: K8s-Networking.jpeg +.. image:: images/wk_vms/worker_4.png Security Group --------------- -.. image:: K8s-Security_Group.jpeg +.. image:: images/wk_vms/worker_5.png Key Pair -------- -Use an existing key pair (e.g. onap_key), import an existing one or create a -new one to assign. +Assign the key pair that was created/selected previously (e.g. onap_key). -.. image:: K8s-Key_Pair.jpeg +.. image:: images/wk_vms/worker_6.png Apply customization script for Kubernetes VM(s) ----------------------------------------------- -Click :download:`openstack-k8s-node.sh <openstack-k8s-node.sh>` to -download the script. +Click :download:`openstack-k8s-workernode.sh <openstack-k8s-workernode.sh>` to download the +script. -.. literalinclude:: openstack-k8s-node.sh +.. literalinclude:: openstack-k8s-workernode.sh :language: bash This customization script will: -* setup root access to the VM (comment out if you wish to disable this - capability and restrict access to ssh access only) +* update ubuntu * install docker -* install kubectl -* install helm -* install nfs common (see configuration step here) +* install nfs common -.. note:: - Ensure you are using the correct versions as described in the - :ref:`cloud-setup-guide-label` Launch Instance --------------- -.. image:: K8s-Launch_Instance.jpeg +.. image:: images/wk_vms/worker_7.png -Assign Floating IP for external access --------------------------------------- -.. image:: K8s-Manage_Floating_IP_Associations.jpeg -.. image:: K8s-Launch_Instance.jpeg -Setting up an NFS share for Multinode Kubernetes Clusters -========================================================= -The figure below illustrates a possible topology of a multinode Kubernetes -cluster. +Assign Floating IP addresses +---------------------------- +Assign Floating IPs to all Control Plane and Worker VMs. +These addresses provide external access to the VMs and will be used by RKE +to configure kubernetes on to the VMs. -.. image:: k8s-topology.jpg +Repeat the following for each VM previously created: -One node, the Master Node, runs Rancher and Helm clients and connects to all -the Kubernetes nodes in the cluster. Kubernetes nodes, in turn, run Rancher, -Kubernetes and Tiller (Helm) agents, which receive, execute, and respond to -commands issued by the Master Node (e.g. kubectl or helm operations). Note that -the Master Node can be either a remote machine that the user can log in to or a -local machine (e.g. laptop, desktop) that has access to the Kubernetes cluster. +.. image:: images/floating_ips/floating_1.png -Deploying applications to a Kubernetes cluster requires Kubernetes nodes to -share a common, distributed filesystem. One node in the cluster plays the role -of NFS Master (not to confuse with the Master Node that runs Rancher and Helm -clients, which is located outside the cluster), while all the other cluster -nodes play the role of NFS slaves. In the figure above, the left-most cluster -node plays the role of NFS Master (indicated by the crown symbol). To properly -set up an NFS share on Master and Slave nodes, the user can run the scripts -below. +Resulting floating IP assignments in this example. -Click :download:`master_nfs_node.sh <master_nfs_node.sh>` to download the -script. +.. image:: images/floating_ips/floating_2.png -.. literalinclude:: master_nfs_node.sh - :language: bash -Click :download:`slave_nfs_node.sh <slave_nfs_node.sh>` to download the script. -.. literalinclude:: slave_nfs_node.sh - :language: bash -The master_nfs_node.sh script runs in the NFS Master node and needs the list of -NFS Slave nodes as input, e.g.:: +Configure Rancher Kubernetes Engine (RKE) +========================================= - > sudo ./master_nfs_node.sh node1_ip node2_ip ... nodeN_ip +Install RKE +----------- +Download and install RKE on a VM, desktop or laptop. +Binaries can be found here for Linux and Mac: https://github.com/rancher/rke/releases/tag/v0.2.1 -The slave_nfs_node.sh script runs in each NFS Slave node and needs the IP of -the NFS Master node as input, e.g.:: +RKE requires a *cluster.yml* as input. An example file is show below that +describes a Kubernetes cluster that will be mapped onto the OpenStack VMs +created earlier in this guide. - > sudo ./slave_nfs_node.sh master_node_ip +Example: **cluster.yml** -Configuration (Rancher and Kubernetes) -====================================== +.. image:: images/rke/rke_1.png -Access Rancher server via web browser -------------------------------------- -(e.g. http://10.12.6.16:8080/env/1a5/apps/stacks) +Click :download:`cluster.yml <cluster.yml>` to download the +configuration file. -.. image:: Access_Rancher_server_via_web_browser.jpeg +.. literalinclude:: cluster.yml + :language: yaml -Add Kubernetes Environment to Rancher -------------------------------------- +Prepare cluster.yml +------------------- +Before this configuration file can be used the external **address** +and the **internal_address** must be mapped for each control and worker node +in this file. + +Run RKE +------- +From within the same directory as the cluster.yml file, simply execute: + + $ rke up + +The output will look something like: + +.. code-block:: + INFO[0000] Initiating Kubernetes cluster + INFO[0000] [certificates] Generating admin certificates and kubeconfig + INFO[0000] Successfully Deployed state file at [./cluster.rkestate] + INFO[0000] Building Kubernetes cluster + INFO[0000] [dialer] Setup tunnel for host [10.12.6.82] + INFO[0000] [dialer] Setup tunnel for host [10.12.6.249] + INFO[0000] [dialer] Setup tunnel for host [10.12.6.74] + INFO[0000] [dialer] Setup tunnel for host [10.12.6.85] + INFO[0000] [dialer] Setup tunnel for host [10.12.6.238] + INFO[0000] [dialer] Setup tunnel for host [10.12.6.89] + INFO[0000] [dialer] Setup tunnel for host [10.12.5.11] + INFO[0000] [dialer] Setup tunnel for host [10.12.6.90] + INFO[0000] [dialer] Setup tunnel for host [10.12.6.244] + INFO[0000] [dialer] Setup tunnel for host [10.12.5.165] + INFO[0000] [dialer] Setup tunnel for host [10.12.6.126] + INFO[0000] [dialer] Setup tunnel for host [10.12.6.111] + INFO[0000] [dialer] Setup tunnel for host [10.12.5.160] + INFO[0000] [dialer] Setup tunnel for host [10.12.5.191] + INFO[0000] [dialer] Setup tunnel for host [10.12.6.195] + INFO[0002] [network] Deploying port listener containers + INFO[0002] [network] Pulling image [nexus3.onap.org:10001/rancher/rke-tools:v0.1.27] on host [10.12.6.85] + INFO[0002] [network] Pulling image [nexus3.onap.org:10001/rancher/rke-tools:v0.1.27] on host [10.12.6.89] + INFO[0002] [network] Pulling image [nexus3.onap.org:10001/rancher/rke-tools:v0.1.27] on host [10.12.6.90] + INFO[0011] [network] Successfully pulled image [nexus3.onap.org:10001/rancher/rke-tools:v0.1.27] on host [10.12.6.89] + . . . . + INFO[0309] [addons] Setting up Metrics Server + INFO[0309] [addons] Saving ConfigMap for addon rke-metrics-addon to Kubernetes + INFO[0309] [addons] Successfully saved ConfigMap for addon rke-metrics-addon to Kubernetes + INFO[0309] [addons] Executing deploy job rke-metrics-addon + INFO[0315] [addons] Metrics Server deployed successfully + INFO[0315] [ingress] Setting up nginx ingress controller + INFO[0315] [addons] Saving ConfigMap for addon rke-ingress-controller to Kubernetes + INFO[0316] [addons] Successfully saved ConfigMap for addon rke-ingress-controller to Kubernetes + INFO[0316] [addons] Executing deploy job rke-ingress-controller + INFO[0322] [ingress] ingress controller nginx deployed successfully + INFO[0322] [addons] Setting up user addons + INFO[0322] [addons] no user addons defined + INFO[0322] Finished building Kubernetes cluster successfully + +Install Kubectl +=============== + +Download and install kubectl. Binaries can be found here for Linux and Mac: + +https://storage.googleapis.com/kubernetes-release/release/v1.13.5/bin/linux/amd64/kubectl +https://storage.googleapis.com/kubernetes-release/release/v1.13.5/bin/darwin/amd64/kubectl + +Validate deployment +------------------- + $ cp kube_config_cluster.yml ~/.kube/config.onap -1. Select “Manage Environments” + $ export KUBECONFIG=~/.kube/config.onap -.. image:: Add_Kubernetes_Environment_to_Rancher.png + $ kubectl config use-context onap -2. Select “Add Environment” + $ kubectl get nodes -o=wide -.. image:: Select_Add_Environment.png +.. code-block:: + NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME + onap-control-1 Ready controlplane,etcd 3h53m v1.13.5 10.0.0.8 <none> Ubuntu 18.04 LTS 4.15.0-22-generic docker://18.9.5 + onap-control-2 Ready controlplane,etcd 3h53m v1.13.5 10.0.0.11 <none> Ubuntu 18.04 LTS 4.15.0-22-generic docker://18.9.5 + onap-control-3 Ready controlplane,etcd 3h53m v1.13.5 10.0.0.12 <none> Ubuntu 18.04 LTS 4.15.0-22-generic docker://18.9.5 + onap-k8s-1 Ready worker 3h53m v1.13.5 10.0.0.14 <none> Ubuntu 18.04 LTS 4.15.0-22-generic docker://18.9.5 + onap-k8s-10 Ready worker 3h53m v1.13.5 10.0.0.16 <none> Ubuntu 18.04 LTS 4.15.0-22-generic docker://18.9.5 + onap-k8s-11 Ready worker 3h53m v1.13.5 10.0.0.18 <none> Ubuntu 18.04 LTS 4.15.0-22-generic docker://18.9.5 + onap-k8s-12 Ready worker 3h53m v1.13.5 10.0.0.7 <none> Ubuntu 18.04 LTS 4.15.0-22-generic docker://18.9.5 + onap-k8s-2 Ready worker 3h53m v1.13.5 10.0.0.26 <none> Ubuntu 18.04 LTS 4.15.0-22-generic docker://18.9.5 + onap-k8s-3 Ready worker 3h53m v1.13.5 10.0.0.5 <none> Ubuntu 18.04 LTS 4.15.0-22-generic docker://18.9.5 + onap-k8s-4 Ready worker 3h53m v1.13.5 10.0.0.6 <none> Ubuntu 18.04 LTS 4.15.0-22-generic docker://18.9.5 + onap-k8s-5 Ready worker 3h53m v1.13.5 10.0.0.9 <none> Ubuntu 18.04 LTS 4.15.0-22-generic docker://18.9.5 + onap-k8s-6 Ready worker 3h53m v1.13.5 10.0.0.17 <none> Ubuntu 18.04 LTS 4.15.0-22-generic docker://18.9.5 + onap-k8s-7 Ready worker 3h53m v1.13.5 10.0.0.20 <none> Ubuntu 18.04 LTS 4.15.0-22-generic docker://18.9.5 + onap-k8s-8 Ready worker 3h53m v1.13.5 10.0.0.10 <none> Ubuntu 18.04 LTS 4.15.0-22-generic docker://18.9.5 + onap-k8s-9 Ready worker 3h53m v1.13.5 10.0.0.4 <none> Ubuntu 18.04 LTS 4.15.0-22-generic docker://18.9.5 -3. Add unique name for your new Rancher environment -4. Select the Kubernetes template +Install Helm +============ -5. Click "create" + $ kubectl -n kube-system create serviceaccount tiller -.. image:: Click_create.jpeg + $ kubectl create clusterrolebinding tiller --clusterrole=cluster-admin --serviceaccount=kube-system:tiller -6. Select the new named environment (ie. SB4) from the dropdown list (top - left). + $ helm init --service-account tiller -Rancher is now waiting for a Kubernetes Host to be added. + $ kubectl -n kube-system rollout status deploy/tiller-deploy -.. image:: K8s-Assign_Floating_IP_for_external_access.jpeg -Add Kubernetes Host -------------------- -1. If this is the first (or only) host being added - click on the "Add a host" - link +Setting up an NFS share for Multinode Kubernetes Clusters +========================================================= +Deploying applications to a Kubernetes cluster requires Kubernetes nodes to +share a common, distributed filesystem. In this tutorial, we will setup an +NFS Master, and configure all Worker nodes a Kubernetes cluster to play +the role of NFS slaves. -.. image:: K8s-Assign_Floating_IP_for_external_access.jpeg +It is recommneded that a separate VM, outside of the kubernetes +cluster, be used. This is to ensure that the NFS Master does not compete for +resources with Kubernetes Control Plane or Worker Nodes. -and click on "Save" (accept defaults). -.. image:: and_click_on_Save_accept_defaults.jpeg +Launch new NFS Server VM instance +--------------------------------- +.. image:: images/nfs_server/nfs_server_1.png -otherwise select INFRASTRUCTURE→ Hosts and click on "Add Host" +Select Ubuntu 18.04 as base image +--------------------------------- +Select "No" on "Create New Volume" -.. image:: otherwise_select_INFRASTRUCTURE_Hosts_and_click_on_Add_Host.jpg +.. image:: images/nfs_server/nfs_server_2.png -2. Enter the management IP for the k8s VM (e.g. 10.0.0.4) that was just - created. +Select Flavor +------------- -3. Click on “Copy to Clipboard” button +.. image:: images/nfs_server/nfs_server_3.png -4. Click on “Close” button +Networking +----------- -.. image:: Click_on_Close_button.jpeg +.. image:: images/nfs_server/nfs_server_4.png -Without the 10.0.0.4 IP - the CATTLE_AGENT will be derived on the host - but it -may not be a routable IP. +Security Group +--------------- -Configure Kubernetes Host -------------------------- +.. image:: images/nfs_server/nfs_server_5.png -1. Login to the new Kubernetes Host:: +Key Pair +-------- +Assign the key pair that was created/selected previously (e.g. onap_key). + +.. image:: images/nfs_server/nfs_server_6.png - > ssh -i ~/oom-key.pem ubuntu@10.12.5.1 - The authenticity of host '10.12.5.172 (10.12.5.172)' can't be established. - ECDSA key fingerprint is SHA256:tqxayN58nCJKOJcWrEZzImkc0qKQHDDfUTHqk4WMcEI. - Are you sure you want to continue connecting (yes/no)? yes - Warning: Permanently added '10.12.5.172' (ECDSA) to the list of known hosts. - Welcome to Ubuntu 16.04.2 LTS (GNU/Linux 4.4.0-64-generic x86_64) +Apply customization script for NFS Server VM +-------------------------------------------- - * Documentation: https://help.ubuntu.com - * Management: https://landscape.canonical.com - * Support: https://ubuntu.com/advantage +Click :download:`openstack-nfs-server.sh <openstack-nfs-server.sh>` to download the +script. - Get cloud support with Ubuntu Advantage Cloud Guest: - http://www.ubuntu.com/business/services/cloud +.. literalinclude:: openstack-k8s-workernode.sh + :language: bash - 180 packages can be updated. - 100 updates are security updates. +This customization script will: - The programs included with the Ubuntu system are free software; - the exact distribution terms for each program are described in the - individual files in /usr/share/doc/*/copyright. +* update ubuntu +* install nfs server + + +Launch Instance +--------------- - Ubuntu comes with ABSOLUTELY NO WARRANTY, to the extent permitted by - applicable law. +.. image:: images/nfs_server/nfs_server_7.png - To run a command as administrator (user "root"), use "sudo <command>". - See "man sudo_root" for details. - ubuntu@sb4-k8s-1:~$ +Assign Floating IP addresses +---------------------------- -2. Paste Clipboard content and hit enter to install Rancher Agent:: +.. image:: images/nfs_server/nfs_server_8.png + +Resulting floating IP assignments in this example. + +.. image:: images/nfs_server/nfs_server_9.png + + +To properly set up an NFS share on Master and Slave nodes, the user can run the +scripts below. + +Click :download:`master_nfs_node.sh <master_nfs_node.sh>` to download the +script. + +.. literalinclude:: master_nfs_node.sh + :language: bash + +Click :download:`slave_nfs_node.sh <slave_nfs_node.sh>` to download the script. + +.. literalinclude:: slave_nfs_node.sh + :language: bash + +The master_nfs_node.sh script runs in the NFS Master node and needs the list of +NFS Slave nodes as input, e.g.:: + + $ sudo ./master_nfs_node.sh node1_ip node2_ip ... nodeN_ip + +The slave_nfs_node.sh script runs in each NFS Slave node and needs the IP of +the NFS Master node as input, e.g.:: - ubuntu@sb4-k8s-1:~$ sudo docker run -e CATTLE_AGENT_IP="10.0.0.4“ --rm --privileged -v /var/run/docker.sock:/var/run/docker.sock -v /var/lib/rancher:/var/lib/rancher rancher/agent:v1.2.9 http://10.12.6.16:8080/v1/scripts/5D757C68BD0A2125602A:1514678400000:yKW9xHGJDLvq6drz2eDzR2mjato - Unable to find image 'rancher/agent:v1.2.9' locally - v1.2.9: Pulling From rancher/agent - b3e1c725a85f: Pull complete - 6071086409fc: Pull complete - d0ac3b234321: Pull complete - 87f567b5cf58: Pull complete - a63e24b217c4: Pull complete - d0a3f58caef0: Pull complete - 16914729cfd3: Pull complete - dc5c21984c5b: Pull complete - d7e8f9784b20: Pull complete - Digest: sha256:c21255ac4d94ffbc7b523F870F20ea5189b68Fa3d642800adb4774aab4748e66 - Status: Downloaded newer image for rancher/agent:v1.2.9 + $ sudo ./slave_nfs_node.sh master_node_ip - INFO: Running Agent Registration Process, CATTLE_URL=http://10.12.6.16:8080/v1 - INFO: Attempting to connect to: http://10.12.6.16:8080/v1 - INFO: http://10.12.6.16:8080/v1 is accessible - INFO: Inspecting host capabilities - INFO: Boot2Docker: false - INFO: Host writable: true - INFO: Token: xxxxxxxx - INFO: Running registration - INFO: Printing Environment - INFO: ENV: CATTLE_ACCESS_KEY=98B35AC484FBF820E0AD - INFO: ENV: CATTLE_AGENT_IP=10.0.9.4 - INFO: ENV: CATTLE_HOME=/var/lib/cattle - INFO: ENV: CATTLE_REGISTRATION_ACCESS_KEY=registrationToken - INFO: ENV: CATTLE_REGISTRATION_SECRET_KEY=xxxxxxx - INFO: ENV: CATTLE_SECRET_KEY=xxxxxxx - INFO: ENV: CATTLE_URL=http://10.12.6.16:8080/v1 - INFO: ENV: DETECTED_CATTLE_AGENT_IP=10.12.5.172 - INFO: ENV: RANCHER_AGENT_IMAGE=rancher/agent:v1.2.9 - INFO: Launched Rancher Agent: c27ee0f3dc4c783b0db647ea1f73c35b3843a4b8d60b96375b1a05aa77d83136 - ubuntu@sb4-k8s-1:~$ - -3. Return to Rancher environment (e.g. SB4) and wait for services to complete - (~ 10-15 mins) - -.. image:: Return_to_Rancher_environment_eg_SB4_and_wait_for_services_to_complete_10-15_mins.jpeg - -Configure kubectl and helm -========================== -In this example we are configuring kubectl and helm that have been installed -(as a convenience) onto the rancher and kubernetes hosts. Typically you would -install them both on your PC and remotely connect to the cluster. The following -procedure would remain the same. - -1. Click on CLI and then click on “Generate Config” - -.. image:: Click_on_CLI_and_then_click_on_Generate_Config.jpeg - -2. Click on “Copy to Clipboard” - wait until you see a "token" - do not copy - user+password - the server is not ready at that point - -.. image:: Click_on_Copy_to_Clipboard-wait_until_you_see_a_token-do_not_copy_user+password-the_server_is_not_ready_at_that_point.jpeg - -3. Create a .kube directory in user directory (if one does not exist):: - - ubuntu@sb4-kSs-1:~$ mkdir .kube - ubuntu@sb4-kSs-1:~$ vi .kube/config - -4. Paste contents of Clipboard into a file called “config” and save the file:: - - apiVersion: v1 - kind : Config - clusters: - - cluster: - api-version: v1 - insecure-skip-tls-verify: true - server: "https://10.12.6.16:8080/r/projects/1a7/kubernetes:6443" - name: "SB4" - contexts: - - context: - cluster: "SB4" - user: "SB4" - name: "SB4" - current-context: "SB4" - users: - - name: "SB4" - user: - token: "QmFzaWMgTlRBd01qZzBOemc)TkRrMk1UWkNOMFpDTlVFNlExcHdSa1JhVZreE5XSm1TRGhWU2t0Vk1sQjVhalZaY0dWaFVtZGFVMHQzWW1WWVJtVmpSQT09" - -5. Validate that kubectl is able to connect to the kubernetes cluster:: - - ubuntu@sb4-k8s-1:~$ kubectl config get-contexts - CURRENT NAME CLUSTER AUTHINFO NAMESPACE - * SB4 SB4 SB4 - ubuntu@sb4-kSs-1:~$ - -and show running pods:: - - ubuntu@sb4-k8s-1:~$ kubectl get pods --all-namespaces -o=wide - NAMESPACE NAME READY STATUS RESTARTS AGE IP NODE - kube-system heapster—7Gb8cd7b5 -q7p42 1/1 Running 0 13m 10.42.213.49 sb4-k8s-1 - kube-system kube-dns-5d7bM87c9-c6f67 3/3 Running 0 13m 10.42.181.110 sb4-k8s-1 - kube-system kubernetes-dashboard-f9577fffd-kswjg 1/1 Running 0 13m 10.42.105.113 sb4-k8s-1 - kube-system monitoring-grafana-997796fcf-vg9h9 1/1 Running 0 13m 10.42,141.58 sb4-k8s-1 - kube-system monitoring-influxdb-56chd96b-hk66b 1/1 Running 0 13m 10.4Z.246.90 sb4-k8s-1 - kube-system tiller-deploy-cc96d4f6b-v29k9 1/1 Running 0 13m 10.42.147.248 sb4-k8s-1 - ubuntu@sb4-k8s-1:~$ - -6. Validate helm is running at the right version. If not, an error like this - will be displayed:: - - ubuntu@sb4-k8s-1:~$ helm list - Error: incompatible versions c1ient[v2.9.1] server[v2.6.1] - ubuntu@sb4-k8s-1:~$ - -7. Upgrade the server-side component of helm (tiller) via `helm init --upgrade`:: - - ubuntu@sb4-k8s-1:~$ helm init --upgrade - Creating /home/ubuntu/.helm - Creating /home/ubuntu/.helm/repository - Creating /home/ubuntu/.helm/repository/cache - Creating /home/ubuntu/.helm/repository/local - Creating /home/ubuntu/.helm/plugins - Creating /home/ubuntu/.helm/starters - Creating /home/ubuntu/.helm/cache/archive - Creating /home/ubuntu/.helm/repository/repositories.yaml - Adding stable repo with URL: https://kubernetes-charts.storage.googleapis.com - Adding local repo with URL: http://127.0.0.1:8879/charts - $HELM_HOME has been configured at /home/ubuntu/.helm. - - Tiller (the Helm server-side component) has been upgraded to the current version. - Happy Helming! - ubuntu@sb4-k8s-1:~$ ONAP Deployment via OOM ======================= diff --git a/docs/openstack-k8s-controlnode.sh b/docs/openstack-k8s-controlnode.sh new file mode 100644 index 0000000000..1d230c2da4 --- /dev/null +++ b/docs/openstack-k8s-controlnode.sh @@ -0,0 +1,31 @@ +#!/bin/bash + +DOCKER_VERSION=18.09.5 + +apt-get update + +curl https://releases.rancher.com/install-docker/$DOCKER_VERSION.sh | sh +mkdir -p /etc/systemd/system/docker.service.d/ +cat > /etc/systemd/system/docker.service.d/docker.conf << EOF +[Service] +ExecStart= +ExecStart=/usr/bin/dockerd -H fd:// --insecure-registry=nexus3.onap.org:10001 +EOF + +sudo usermod -aG docker ubuntu + +systemctl daemon-reload +systemctl restart docker +apt-mark hold docker-ce + +IP_ADDR=`ip address |grep ens|grep inet|awk '{print $2}'| awk -F / '{print $1}'` +HOSTNAME=`hostname` + +echo "$IP_ADDR $HOSTNAME" >> /etc/hosts + +docker login -u docker -p docker nexus3.onap.org:10001 + +sudo apt-get install make -y + + +exit 0 diff --git a/docs/openstack-k8s-node.sh b/docs/openstack-k8s-node.sh deleted file mode 100644 index 308f2204ff..0000000000 --- a/docs/openstack-k8s-node.sh +++ /dev/null @@ -1,46 +0,0 @@ -#!/bin/bash - -DOCKER_VERSION=17.03 -KUBECTL_VERSION=1.11.5 -HELM_VERSION=2.9.1 - -# setup root access - default login: oom/oom - comment out to restrict access too ssh key only -sed -i 's/PermitRootLogin.*/PermitRootLogin yes/' /etc/ssh/sshd_config -sed -i 's/PasswordAuthentication.*/PasswordAuthentication yes/' /etc/ssh/sshd_config -service sshd restart -echo -e "oom\noom" | passwd root - -apt-get update -curl https://releases.rancher.com/install-docker/$DOCKER_VERSION.sh | sh -mkdir -p /etc/systemd/system/docker.service.d/ -cat > /etc/systemd/system/docker.service.d/docker.conf << EOF -[Service] -ExecStart= -ExecStart=/usr/bin/dockerd -H fd:// --insecure-registry=nexus3.onap.org:10001 -EOF -systemctl daemon-reload -systemctl restart docker -apt-mark hold docker-ce - -IP_ADDY=`ip address |grep ens|grep inet|awk '{print $2}'| awk -F / '{print $1}'` -HOSTNAME=`hostname` - -echo "$IP_ADDY $HOSTNAME" >> /etc/hosts - -docker login -u docker -p docker nexus3.onap.org:10001 - -sudo apt-get install make -y - -sudo curl -LO https://storage.googleapis.com/kubernetes-release/release/v$KUBECTL_VERSION/bin/linux/amd64/kubectl -sudo chmod +x ./kubectl -sudo mv ./kubectl /usr/local/bin/kubectl -sudo mkdir ~/.kube -wget http://storage.googleapis.com/kubernetes-helm/helm-v${HELM_VERSION}-linux-amd64.tar.gz -sudo tar -zxvf helm-v${HELM_VERSION}-linux-amd64.tar.gz -sudo mv linux-amd64/helm /usr/local/bin/helm - -# install nfs -sudo apt-get install nfs-common -y - - -exit 0 diff --git a/docs/openstack-k8s-workernode.sh b/docs/openstack-k8s-workernode.sh new file mode 100644 index 0000000000..3f32d050a9 --- /dev/null +++ b/docs/openstack-k8s-workernode.sh @@ -0,0 +1,34 @@ +#!/bin/bash + +DOCKER_VERSION=18.09.5 + +apt-get update + +curl https://releases.rancher.com/install-docker/$DOCKER_VERSION.sh | sh +mkdir -p /etc/systemd/system/docker.service.d/ +cat > /etc/systemd/system/docker.service.d/docker.conf << EOF +[Service] +ExecStart= +ExecStart=/usr/bin/dockerd -H fd:// --insecure-registry=nexus3.onap.org:10001 +EOF + +sudo usermod -aG docker ubuntu + +systemctl daemon-reload +systemctl restart docker +apt-mark hold docker-ce + +IP_ADDR=`ip address |grep ens|grep inet|awk '{print $2}'| awk -F / '{print $1}'` +HOSTNAME=`hostname` + +echo "$IP_ADDR $HOSTNAME" >> /etc/hosts + +docker login -u docker -p docker nexus3.onap.org:10001 + +sudo apt-get install make -y + +# install nfs +sudo apt-get install nfs-common -y + + +exit 0 diff --git a/docs/openstack-nfs-server.sh b/docs/openstack-nfs-server.sh new file mode 100644 index 0000000000..1db04eaff6 --- /dev/null +++ b/docs/openstack-nfs-server.sh @@ -0,0 +1,18 @@ +#!/bin/bash + +apt-get update + +IP_ADDR=`ip address |grep ens|grep inet|awk '{print $2}'| awk -F / '{print $1}'` +HOSTNAME=`hostname` + +echo "$IP_ADDR $HOSTNAME" >> /etc/hosts + +sudo apt-get install make -y + +# nfs server +sudo apt-get install nfs-kernel-server -y + +sudo mkdir -p /nfs_share +sudo chown nobody:nogroup /nfs_share/ + +exit 0 diff --git a/docs/openstack-rancher.sh b/docs/openstack-rancher.sh deleted file mode 100644 index ac91ff5566..0000000000 --- a/docs/openstack-rancher.sh +++ /dev/null @@ -1,51 +0,0 @@ -#!/bin/bash - -DOCKER_VERSION=17.03 -RANCHER_VERSION=1.6.22 -KUBECTL_VERSION=1.11.5 -HELM_VERSION=2.9.1 - -# setup root access - default login: oom/oom - comment out to restrict access too ssh key only -sed -i 's/PermitRootLogin.*/PermitRootLogin yes/' /etc/ssh/sshd_config -sed -i 's/PasswordAuthentication.*/PasswordAuthentication yes/' /etc/ssh/sshd_config -service sshd restart -echo -e "oom\noom" | passwd root - -apt-get update -curl https://releases.rancher.com/install-docker/$DOCKER_VERSION.sh | sh -mkdir -p /etc/systemd/system/docker.service.d/ -cat > /etc/systemd/system/docker.service.d/docker.conf << EOF -[Service] -ExecStart= -ExecStart=/usr/bin/dockerd -H fd:// --insecure-registry=nexus3.onap.org:10001 -EOF -systemctl daemon-reload -systemctl restart docker -apt-mark hold docker-ce - -IP_ADDY=`ip address |grep ens|grep inet|awk '{print $2}'| awk -F / '{print $1}'` -HOSTNAME=`hostname` - -echo "$IP_ADDY $HOSTNAME" >> /etc/hosts - -docker login -u docker -p docker nexus3.onap.org:10001 - -sudo apt-get install make -y - -sudo docker run -d --restart=unless-stopped -p 8080:8080 --name rancher_server rancher/server:v$RANCHER_VERSION -sudo curl -LO https://storage.googleapis.com/kubernetes-release/release/v$KUBECTL_VERSION/bin/linux/amd64/kubectl -sudo chmod +x ./kubectl -sudo mv ./kubectl /usr/local/bin/kubectl -sudo mkdir ~/.kube -wget http://storage.googleapis.com/kubernetes-helm/helm-v${HELM_VERSION}-linux-amd64.tar.gz -sudo tar -zxvf helm-v${HELM_VERSION}-linux-amd64.tar.gz -sudo mv linux-amd64/helm /usr/local/bin/helm - -# nfs server -sudo apt-get install nfs-kernel-server -y - -sudo mkdir -p /nfs_share -sudo chown nobody:nogroup /nfs_share/ - - -exit 0 |