diff options
Diffstat (limited to 'docs')
43 files changed, 642 insertions, 481 deletions
diff --git a/docs/cluster.yml b/docs/cluster.yml new file mode 100644 index 0000000000..d4962d3478 --- /dev/null +++ b/docs/cluster.yml @@ -0,0 +1,156 @@ +# An example of an HA Kubernetes cluster for ONAP +nodes: +- address: 10.12.6.85 + port: "22" + internal_address: 10.0.0.8 + role: + - controlplane + - etcd + hostname_override: "onap-control-1" + user: ubuntu + ssh_key_path: "~/.ssh/onap-key" +- address: 10.12.6.90 + port: "22" + internal_address: 10.0.0.11 + role: + - controlplane + - etcd + hostname_override: "onap-control-2" + user: ubuntu + ssh_key_path: "~/.ssh/onap-key" +- address: 10.12.6.89 + port: "22" + internal_address: 10.0.0.12 + role: + - controlplane + - etcd + hostname_override: "onap-control-3" + user: ubuntu + ssh_key_path: "~/.ssh/onap-key" +- address: 10.12.5.165 + port: "22" + internal_address: 10.0.0.14 + role: + - worker + hostname_override: "onap-k8s-1" + user: ubuntu + ssh_key_path: "~/.ssh/onap-key" +- address: 10.12.6.238 + port: "22" + internal_address: 10.0.0.26 + role: + - worker + hostname_override: "onap-k8s-2" + user: ubuntu + ssh_key_path: "~/.ssh/onap-key" +- address: 10.12.6.126 + port: "22" + internal_address: 10.0.0.5 + role: + - worker + hostname_override: "onap-k8s-3" + user: ubuntu + ssh_key_path: "~/.ssh/onap-key" +- address: 10.12.5.11 + port: "22" + internal_address: 10.0.0.6 + role: + - worker + hostname_override: "onap-k8s-4" + user: ubuntu + ssh_key_path: "~/.ssh/onap-key" +- address: 10.12.6.244 + port: "22" + internal_address: 10.0.0.9 + role: + - worker + hostname_override: "onap-k8s-5" + user: ubuntu + ssh_key_path: "~/.ssh/onap-key" +- address: 10.12.6.249 + port: "22" + internal_address: 10.0.0.17 + role: + - worker + hostname_override: "onap-k8s-6" + user: ubuntu + ssh_key_path: "~/.ssh/onap-key" +- address: 10.12.5.191 + port: "22" + internal_address: 10.0.0.20 + role: + - worker + hostname_override: "onap-k8s-7" + user: ubuntu + ssh_key_path: "~/.ssh/onap-key" +- address: 10.12.6.111 + port: "22" + internal_address: 10.0.0.10 + role: + - worker + hostname_override: "onap-k8s-8" + user: ubuntu + ssh_key_path: "~/.ssh/onap-key" +- address: 10.12.6.195 + port: "22" + internal_address: 10.0.0.4 + role: + - worker + hostname_override: "onap-k8s-9" + user: ubuntu + ssh_key_path: "~/.ssh/onap-key" +- address: 10.12.5.160 + port: "22" + internal_address: 10.0.0.16 + role: + - worker + hostname_override: "onap-k8s-10" + user: ubuntu + ssh_key_path: "~/.ssh/onap-key" +- address: 10.12.6.74 + port: "22" + internal_address: 10.0.0.18 + role: + - worker + hostname_override: "onap-k8s-11" + user: ubuntu + ssh_key_path: "~/.ssh/onap-key" +- address: 10.12.6.82 + port: "22" + internal_address: 10.0.0.7 + role: + - worker + hostname_override: "onap-k8s-12" + user: ubuntu + ssh_key_path: "~/.ssh/onap-key" +services: + kube-api: + service_cluster_ip_range: 10.43.0.0/16 + pod_security_policy: false + always_pull_images: false + kube-controller: + cluster_cidr: 10.42.0.0/16 + service_cluster_ip_range: 10.43.0.0/16 + kubelet: + cluster_domain: cluster.local + cluster_dns_server: 10.43.0.10 + fail_swap_on: false +network: + plugin: canal +authentication: + strategy: x509 +ssh_key_path: "~/.ssh/onap-key" +ssh_agent_auth: false +authorization: + mode: rbac +ignore_docker_version: false +kubernetes_version: "v1.13.5-rancher1-2" +private_registries: +- url: nexus3.onap.org:10001 + user: docker + password: docker + is_default: true +cluster_name: "onap" +restore: + restore: false + snapshot_name: "" diff --git a/docs/example-integration-override.yaml b/docs/example-integration-override.yaml index 9c336d69ce..56699d921c 100644 --- a/docs/example-integration-override.yaml +++ b/docs/example-integration-override.yaml @@ -1,36 +1,46 @@ global: repository: 10.12.5.2:5000 pullPolicy: IfNotPresent +################################################################# +# This override file configures openstack parameters for ONAP +################################################################# +appc: + config: + enableClustering: false + openStackType: "OpenStackProvider" + openStackName: "OpenStack" + openStackKeyStoneUrl: "http://10.12.25.2:5000/v2.0" + openStackServiceTenantName: "OPENSTACK_TENANTNAME_HERE" + openStackDomain: "Default" + openStackUserName: "OPENSTACK_USERNAME_HERE" + openStackEncryptedPassword: "XXXXXXXXXXXXXXXXXXXXXXXX_OPENSTACK_PASSWORD_HERE_XXXXXXXXXXXXXXXX" robot: - enabled: true - flavor: large appcUsername: "appc@appc.onap.org" - appcPassword: "APPC_PASSWORD_HERE" + appcPassword: "demo123456!" openStackKeyStoneUrl: "http://10.12.25.2:5000" openStackPublicNetId: "971040b2-7059-49dc-b220-4fab50cb2ad4" openStackTenantId: "09d8566ea45e43aa974cf447ed591d77" openStackUserName: "OPENSTACK_USERNAME_HERE" ubuntu14Image: "ubuntu-14-04-cloud-amd64" ubuntu16Image: "ubuntu-16-04-cloud-amd64" - openStackPrivateNetId: "d4ab89ff-c735-4ce4-93f6-cff445157b98" - openStackPrivateSubnetId: "46c2391c-ed98-4fb0-8ab7-88678bc55b9f" + openStackPrivateNetId: "c7824f00-bef7-4864-81b9-f6c3afabd313" + openStackPrivateSubnetId: "2a0e8888-f93e-4615-8d28-fc3d4d087fc3" openStackPrivateNetCidr: "10.0.0.0/16" - openStackSecurityGroup: "3914301b-2996-414f-ba0a-da4b2275a753" + openStackSecurityGroup: "3a7a1e7e-6d15-4264-835d-fab1ae81e8b0" openStackOamNetworkCidrPrefix: "10.0" - dcaeCollectorIp: "10.12.5.46" + dcaeCollectorIp: "10.12.6.88" vnfPubKey: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDKXDgoo3+WOqcUG8/5uUbk81+yczgwC4Y8ywTmuQqbNxlY1oQ0YxdMUqUnhitSXs5S/yRuAVOYHwGg2mCs20oAINrP+mxBI544AMIb9itPjCtgqtE2EWo6MmnFGbHB4Sx3XioE7F4VPsh7japsIwzOjbrQe+Mua1TGQ5d4nfEOQaaglXLLPFfuc7WbhbJbK6Q7rHqZfRcOwAMXgDoBqlyqKeiKwnumddo2RyNT8ljYmvB6buz7KnMinzo7qB0uktVT05FH9Rg0CTWH5norlG5qXgP2aukL0gk1ph8iAt7uYLf1ktp+LJI2gaF6L0/qli9EmVCSLr1uJ38Q8CBflhkh" - demoArtifactsVersion: "1.3.0" + demoArtifactsVersion: "1.4.0-SNAPSHOT" demoArtifactsRepoUrl: "https://nexus.onap.org/content/repositories/releases" - scriptVersion: "1.3.0" - rancherIpAddress: "10.12.6.38" + scriptVersion: "1.4.0-SNAPSHOT" + rancherIpAddress: "10.12.5.127" config: - openStackEncryptedPasswordHere: "XXXXXXXXXXXXXXXXXXXXXXXX_OPENSTACK_ENCRYPTED_PASSWORD_HEREXXXXXXXXXXXXXXXX" + # openStackEncryptedPasswordHere should match the encrypted string used in SO and APPC and overridden per environment + openStackEncryptedPasswordHere: "XXXXXXXXXXXXXXXXXXXXXXXX_OPENSTACK_ENCRYPTED_PASSWORD_HERE_XXXXXXXXXXXXXXXX" so: - enabled: true + # so server configuration so-catalog-db-adapter: config: openStackUserName: "OPENSTACK_USERNAME_HERE" openStackKeyStoneUrl: "http://10.12.25.2:5000/v2.0" - openStackEncryptedPasswordHere: "XXXXXXXXXXXXXXXXXXXXXXXX_OPENSTACK_ENCRYPTED_PASSWORD_HEREXXXXXXXXXXXXXXXX" - - + openStackEncryptedPasswordHere: "XXXXXXXXXXXXXXXXXXXXXXXX_OPENSTACK_ENCRYPTED_PASSWORD_HERE_XXXXXXXXXXXXXXXX"
\ No newline at end of file diff --git a/docs/helm-search.txt b/docs/helm-search.txt index db95e4f7d7..036ad036f2 100644 --- a/docs/helm-search.txt +++ b/docs/helm-search.txt @@ -1,31 +1,42 @@ NAME CHART VERSION APP VERSION DESCRIPTION -local/onap 4.0.0 Dublin Open Network Automation Platform (ONAP) -local/aaf 4.0.0 ONAP Application Authorization Framework -local/aai 4.0.0 ONAP Active and Available Inventory -local/cassandra 4.0.0 ONAP cassandra -local/cds 4.0.0 ONAP Common Design Studio -local/clamp 4.0.0 ONAP Clamp -local/cli 4.0.0 ONAP Command Line Interface -local/consul 4.0.0 ONAP Consul Agent -local/contrib 4.0.0 ONAP optional tools -local/dcaegen2 4.0.0 ONAP DCAE Gen2 -local/dmaap 4.0.1 ONAP DMaaP components -local/esr 4.0.0 ONAP External System Register -local/log 4.0.0 ONAP Logging ElasticStack -local/msb 4.0.0 ONAP MicroServices Bus -local/multicloud 4.0.0 ONAP multicloud broker -local/nbi 4.0.0 ONAP Northbound Interface -local/oof 4.0.0 ONAP Optimization Framework -local/pnda 4.0.0 ONAP DCAE PNDA -local/policy 4.0.0 ONAP Policy Administration Point -local/pomba 4.0.0 ONAP Post Orchestration Model Based Audit -local/portal 4.0.0 ONAP Web Portal -local/postgres 4.0.0 ONAP Postgres Server -local/robot 4.0.0 A helm Chart for kubernetes-ONAP Robot -local/sdnc-prom 4.0.0 ONAP SDNC Policy Driven Ownership Management -local/sniro-emulator 4.0.0 ONAP Mock Sniro Emulator -local/so 4.0.0 ONAP Service Orchestrator -local/uui 4.0.0 ONAP uui -local/vfc 4.0.0 ONAP Virtual Function Controller (VF-C) -local/vid 4.0.0 ONAP Virtual Infrastructure Deployment -local/vnfsdk 4.0.0 ONAP VNF SDK +local/onap 4.0.0 Dublin Open Network Automation Platform (ONAP) +local/aaf 4.0.0 ONAP Application Authorization Framework +local/aai 4.0.0 ONAP Active and Available Inventory +local/appc 4.0.0 Application Controller +local/cassandra 4.0.0 ONAP cassandra +local/cds 4.0.0 ONAP Controller Design Studio (CDS) +local/clamp 4.0.0 ONAP Clamp +local/cli 4.0.0 ONAP Command Line Interface +local/common 4.0.0 Common templates for inclusion in other charts +local/consul 4.0.0 ONAP Consul Agent +local/contrib 4.0.0 ONAP optional tools +local/dcaegen2 4.0.0 ONAP DCAE Gen2 +local/dgbuilder 4.0.0 D.G. Builder application +local/dmaap 4.0.1 ONAP DMaaP components +local/esr 4.0.0 ONAP External System Register +local/log 4.0.0 ONAP Logging ElasticStack +local/mariadb-galera 4.0.0 Chart for MariaDB Galera cluster +local/mongo 4.0.0 MongoDB Server +local/msb 4.0.0 ONAP MicroServices Bus +local/multicloud 4.0.0 ONAP multicloud broker +local/music 4.0.0 MUSIC - Multi-site State Coordination Service +local/mysql 4.0.0 MySQL Server +local/nbi 4.0.0 ONAP Northbound Interface +local/network-name-gen 4.0.0 Name Generation Micro Service +local/nfs-provisioner 4.0.0 NFS provisioner +local/oof 4.0.0 ONAP Optimization Framework +local/pnda 4.0.0 ONAP DCAE PNDA +local/policy 4.0.0 ONAP Policy Administration Point +local/pomba 4.0.0 ONAP Post Orchestration Model Based Audit +local/portal 4.0.0 ONAP Web Portal +local/postgres 4.0.0 ONAP Postgres Server +local/robot 4.0.0 A helm Chart for kubernetes-ONAP Robot +local/sdc 4.0.0 Service Design and Creation Umbrella Helm charts +local/sdnc 4.0.0 SDN Controller +local/sdnc-prom 4.0.0 ONAP SDNC Policy Driven Ownership Management +local/sniro-emulator 4.0.0 ONAP Mock Sniro Emulator +local/so 4.0.0 ONAP Service Orchestrator +local/uui 4.0.0 ONAP uui +local/vfc 4.0.0 ONAP Virtual Function Controller (VF-C) +local/vid 4.0.0 ONAP Virtual Infrastructure Deployment +local/vnfsdk 4.0.0 ONAP VNF SDK
\ No newline at end of file diff --git a/docs/images/cp_vms/control_plane_1.png b/docs/images/cp_vms/control_plane_1.png Binary files differnew file mode 100644 index 0000000000..d59b9863b7 --- /dev/null +++ b/docs/images/cp_vms/control_plane_1.png diff --git a/docs/images/cp_vms/control_plane_2.png b/docs/images/cp_vms/control_plane_2.png Binary files differnew file mode 100644 index 0000000000..9a7d72f8a5 --- /dev/null +++ b/docs/images/cp_vms/control_plane_2.png diff --git a/docs/images/cp_vms/control_plane_3.png b/docs/images/cp_vms/control_plane_3.png Binary files differnew file mode 100644 index 0000000000..da329f20b5 --- /dev/null +++ b/docs/images/cp_vms/control_plane_3.png diff --git a/docs/images/cp_vms/control_plane_4.png b/docs/images/cp_vms/control_plane_4.png Binary files differnew file mode 100644 index 0000000000..817355a99e --- /dev/null +++ b/docs/images/cp_vms/control_plane_4.png diff --git a/docs/images/cp_vms/control_plane_5.png b/docs/images/cp_vms/control_plane_5.png Binary files differnew file mode 100644 index 0000000000..33805c50dd --- /dev/null +++ b/docs/images/cp_vms/control_plane_5.png diff --git a/docs/images/cp_vms/control_plane_6.png b/docs/images/cp_vms/control_plane_6.png Binary files differnew file mode 100644 index 0000000000..9e8ab638bc --- /dev/null +++ b/docs/images/cp_vms/control_plane_6.png diff --git a/docs/images/cp_vms/control_plane_7.png b/docs/images/cp_vms/control_plane_7.png Binary files differnew file mode 100644 index 0000000000..f0db6d3f3f --- /dev/null +++ b/docs/images/cp_vms/control_plane_7.png diff --git a/docs/images/cp_vms/control_plane_8.png b/docs/images/cp_vms/control_plane_8.png Binary files differnew file mode 100644 index 0000000000..e20f631e60 --- /dev/null +++ b/docs/images/cp_vms/control_plane_8.png diff --git a/docs/images/floating_ips/floating_1.png b/docs/images/floating_ips/floating_1.png Binary files differnew file mode 100644 index 0000000000..9f413164ab --- /dev/null +++ b/docs/images/floating_ips/floating_1.png diff --git a/docs/images/floating_ips/floating_2.png b/docs/images/floating_ips/floating_2.png Binary files differnew file mode 100644 index 0000000000..0001ef068c --- /dev/null +++ b/docs/images/floating_ips/floating_2.png diff --git a/docs/images/keys/key_pair_1.png b/docs/images/keys/key_pair_1.png Binary files differnew file mode 100644 index 0000000000..1135c93320 --- /dev/null +++ b/docs/images/keys/key_pair_1.png diff --git a/docs/images/keys/key_pair_2.png b/docs/images/keys/key_pair_2.png Binary files differnew file mode 100644 index 0000000000..ac3bfc5ca2 --- /dev/null +++ b/docs/images/keys/key_pair_2.png diff --git a/docs/images/keys/key_pair_3.png b/docs/images/keys/key_pair_3.png Binary files differnew file mode 100644 index 0000000000..1e0c0200f8 --- /dev/null +++ b/docs/images/keys/key_pair_3.png diff --git a/docs/images/keys/key_pair_4.png b/docs/images/keys/key_pair_4.png Binary files differnew file mode 100644 index 0000000000..031a9ba785 --- /dev/null +++ b/docs/images/keys/key_pair_4.png diff --git a/docs/images/nfs_server/nfs_server_1.png b/docs/images/nfs_server/nfs_server_1.png Binary files differnew file mode 100644 index 0000000000..912a10f055 --- /dev/null +++ b/docs/images/nfs_server/nfs_server_1.png diff --git a/docs/images/nfs_server/nfs_server_10.png b/docs/images/nfs_server/nfs_server_10.png Binary files differnew file mode 100644 index 0000000000..7d87d1ca56 --- /dev/null +++ b/docs/images/nfs_server/nfs_server_10.png diff --git a/docs/images/nfs_server/nfs_server_2.png b/docs/images/nfs_server/nfs_server_2.png Binary files differnew file mode 100644 index 0000000000..d59b9863b7 --- /dev/null +++ b/docs/images/nfs_server/nfs_server_2.png diff --git a/docs/images/nfs_server/nfs_server_3.png b/docs/images/nfs_server/nfs_server_3.png Binary files differnew file mode 100644 index 0000000000..9a7d72f8a5 --- /dev/null +++ b/docs/images/nfs_server/nfs_server_3.png diff --git a/docs/images/nfs_server/nfs_server_4.png b/docs/images/nfs_server/nfs_server_4.png Binary files differnew file mode 100644 index 0000000000..da329f20b5 --- /dev/null +++ b/docs/images/nfs_server/nfs_server_4.png diff --git a/docs/images/nfs_server/nfs_server_5.png b/docs/images/nfs_server/nfs_server_5.png Binary files differnew file mode 100644 index 0000000000..817355a99e --- /dev/null +++ b/docs/images/nfs_server/nfs_server_5.png diff --git a/docs/images/nfs_server/nfs_server_6.png b/docs/images/nfs_server/nfs_server_6.png Binary files differnew file mode 100644 index 0000000000..33805c50dd --- /dev/null +++ b/docs/images/nfs_server/nfs_server_6.png diff --git a/docs/images/nfs_server/nfs_server_7.png b/docs/images/nfs_server/nfs_server_7.png Binary files differnew file mode 100644 index 0000000000..9e8ab638bc --- /dev/null +++ b/docs/images/nfs_server/nfs_server_7.png diff --git a/docs/images/nfs_server/nfs_server_8.png b/docs/images/nfs_server/nfs_server_8.png Binary files differnew file mode 100644 index 0000000000..14103fb9c3 --- /dev/null +++ b/docs/images/nfs_server/nfs_server_8.png diff --git a/docs/images/nfs_server/nfs_server_9.png b/docs/images/nfs_server/nfs_server_9.png Binary files differnew file mode 100644 index 0000000000..aa8bc140e1 --- /dev/null +++ b/docs/images/nfs_server/nfs_server_9.png diff --git a/docs/images/rke/rke_1.png b/docs/images/rke/rke_1.png Binary files differnew file mode 100644 index 0000000000..b27fc517df --- /dev/null +++ b/docs/images/rke/rke_1.png diff --git a/docs/images/wk_vms/worker_1.png b/docs/images/wk_vms/worker_1.png Binary files differnew file mode 100644 index 0000000000..01314d1557 --- /dev/null +++ b/docs/images/wk_vms/worker_1.png diff --git a/docs/images/wk_vms/worker_2.png b/docs/images/wk_vms/worker_2.png Binary files differnew file mode 100644 index 0000000000..9a7d72f8a5 --- /dev/null +++ b/docs/images/wk_vms/worker_2.png diff --git a/docs/images/wk_vms/worker_3.png b/docs/images/wk_vms/worker_3.png Binary files differnew file mode 100644 index 0000000000..93d5e28cf2 --- /dev/null +++ b/docs/images/wk_vms/worker_3.png diff --git a/docs/images/wk_vms/worker_4.png b/docs/images/wk_vms/worker_4.png Binary files differnew file mode 100644 index 0000000000..817355a99e --- /dev/null +++ b/docs/images/wk_vms/worker_4.png diff --git a/docs/images/wk_vms/worker_5.png b/docs/images/wk_vms/worker_5.png Binary files differnew file mode 100644 index 0000000000..33805c50dd --- /dev/null +++ b/docs/images/wk_vms/worker_5.png diff --git a/docs/images/wk_vms/worker_6.png b/docs/images/wk_vms/worker_6.png Binary files differnew file mode 100644 index 0000000000..c71c122217 --- /dev/null +++ b/docs/images/wk_vms/worker_6.png diff --git a/docs/images/wk_vms/worker_7.png b/docs/images/wk_vms/worker_7.png Binary files differnew file mode 100644 index 0000000000..ecb13c1809 --- /dev/null +++ b/docs/images/wk_vms/worker_7.png diff --git a/docs/oom_cloud_setup_guide.rst b/docs/oom_cloud_setup_guide.rst index 7a5074f4ce..1ce260efbf 100644 --- a/docs/oom_cloud_setup_guide.rst +++ b/docs/oom_cloud_setup_guide.rst @@ -45,14 +45,14 @@ The versions of Kubernetes that are supported by OOM are as follows: .. table:: OOM Software Requirements - ============== =========== ===== ======== ======== - Release Kubernetes Helm kubectl Docker - ============== =========== ===== ======== ======== - amsterdam 1.7.x 2.3.x 1.7.x 1.12.x - beijing 1.8.10 2.8.2 1.8.10 17.03.x - casablanca 1.11.5 2.9.1 1.11.5 17.03.x - dublin 1.13.5 2.12.3 1.13.5 18.09.5 - ============== =========== ===== ======== ======== + ============== =========== ====== ======== ======== + Release Kubernetes Helm kubectl Docker + ============== =========== ====== ======== ======== + amsterdam 1.7.x 2.3.x 1.7.x 1.12.x + beijing 1.8.10 2.8.2 1.8.10 17.03.x + casablanca 1.11.5 2.9.1 1.11.5 17.03.x + dublin 1.13.5 2.12.3 1.13.5 18.09.5 + ============== =========== ====== ======== ======== Minimum Hardware Configuration ============================== diff --git a/docs/oom_quickstart_guide.rst b/docs/oom_quickstart_guide.rst index 0e1d3591f7..501deda7e4 100644 --- a/docs/oom_quickstart_guide.rst +++ b/docs/oom_quickstart_guide.rst @@ -1,7 +1,7 @@ .. This work is licensed under a .. Creative Commons Attribution 4.0 International License. .. http://creativecommons.org/licenses/by/4.0 -.. Copyright 2018 Amdocs, Bell Canada +.. Copyright 2019 Amdocs, Bell Canada .. _quick-start-label: @@ -17,29 +17,33 @@ available), follow the following instructions to deploy ONAP. **Step 1.** Clone the OOM repository from ONAP gerrit:: - > git clone -b 4.0.0-ONAP http://gerrit.onap.org/r/oom + > git clone -b 4.0.0-ONAP http://gerrit.onap.org/r/oom --recurse-submodules > cd oom/kubernetes -**Step 2.** Install Helm Plugins required to deploy the ONAP Casablanca release:: +**Step 2.** Install Helm Plugins required to deploy ONAP:: > sudo cp -R ~/oom/kubernetes/helm/plugins/ ~/.helm -**Step 3.** Customize the helm charts like onap.values.yaml or an override.yaml -like integration-override.yaml file to suit your deployment with items like the +**Step 3.** Customize the helm charts like oom/kubernetes/onap/values.yaml or an override +file like onap-all.yaml, onap-vfw.yaml or openstack.yaml file to suit your deployment with items like the OpenStack tenant information. +.. note:: + Standard and example override files (e.g. onap-all.yaml, openstack.yaml) can be found in + the oom/kubernetes/onap/resources/overrides/ directory. + a. You may want to selectively enable or disable ONAP components by changing the `enabled: true/false` flags. b. Encyrpt the OpenStack password using the shell tool for robot and put it in - the robot helm charts or robot section of integration-override.yaml + the robot helm charts or robot section of openstack.yaml c. Encrypt the OpenStack password using the java based script for SO helm charts - or SO section of integration-override.yaml. + or SO section of openstack.yaml. d. Update the OpenStack parameters that will be used by robot, SO and APPC helm @@ -49,8 +53,8 @@ OpenStack tenant information. a. Enabling/Disabling Components: -Here is an example of the nominal entries that need to be provided. We have different -values file available for different contexts. +Here is an example of the nominal entries that need to be provided. +We have different values file available for different contexts. .. literalinclude:: onap-values.yaml :language: yaml @@ -63,9 +67,9 @@ openssl algorithm that works with the python based Robot Framework. .. note:: To generate ROBOT openStackEncryptedPasswordHere : - ``root@olc-rancher:~# cd so/resources/config/mso/`` + ``cd so/resources/config/mso/`` - ``root@olc-rancher:~/oom/kubernetes/so/resources/config/mso# echo -n "<openstack tenant password>" | openssl aes-128-ecb -e -K `cat encryption.key` -nosalt | xxd -c 256 -p`` + ``/oom/kubernetes/so/resources/config/mso# echo -n "<openstack tenant password>" | openssl aes-128-ecb -e -K `cat encryption.key` -nosalt | xxd -c 256 -p`` c. Generating SO Encrypted Password: The SO Encrypted Password uses a java based encryption utility since the @@ -120,20 +124,24 @@ follows:: **Step 8.** Once the repo is setup, installation of ONAP can be done with a single command - a. If you updated the values directly use this command:: +.. note:: + The --timeout 900 is currently required in Dublin to address long running initialization tasks + for DMaaP and SO. Without this timeout value both applications may fail to deploy. - > helm deploy dev local/onap --namespace onap + a. To deploy all ONAP applications use this command:: + > cd oom/kubernetes + > helm deploy dev local/onap --namespace onap -f onap/resources/overrides/onap-all.yaml -f onap/resources/overrides/openstack.yaml --timeout 900 - b. If you are using an integration-override.yaml file use this command:: + b. If you are using a custom override (e.g. integration-override.yaml) use this command:: - > helm deploy dev local/onap -f /root/integration-override.yaml --namespace onap + > helm deploy dev local/onap -f /root/integration-override.yaml --namespace onap --timeout 900 c. If you have a slower cloud environment you may want to use the public-cloud.yaml which has longer delay intervals on database updates.:: - > helm deploy dev local/onap -f /root/oom/kubernetes/onap/resources/environments/public-cloud.yaml -f /root/integration-override.yaml --namespace onap + > helm deploy dev local/onap -f /root/oom/kubernetes/onap/resources/environments/public-cloud.yaml -f /root/integration-override.yaml --namespace onap --timeout 900 **Step 9.** Commands to interact with the OOM installation @@ -141,7 +149,7 @@ single command Use the following to monitor your deployment and determine when ONAP is ready for use:: - > kubectl get pods --all-namespaces -o=wide + > kubectl get pods -n onap -o=wide Undeploying onap can be done using the following command:: diff --git a/docs/oom_setup_kubernetes_rancher.rst b/docs/oom_setup_kubernetes_rancher.rst index 5159377386..3ccde8d418 100644 --- a/docs/oom_setup_kubernetes_rancher.rst +++ b/docs/oom_setup_kubernetes_rancher.rst @@ -16,456 +16,446 @@ .. _onap-on-kubernetes-with-rancher: -ONAP on Kubernetes with Rancher -############################### +ONAP on HA Kubernetes Cluster +############################# -The following instructions will step you through the installation of Kubernetes -on an OpenStack environment with Rancher. The development lab used for this -installation is the ONAP Windriver lab. +This guide provides instructions on how to setup a Highly-Available Kubernetes Cluster. +For this, we are hosting our cluster on OpenStack VMs and using the Rancher Kubernetes Engine (RKE) +to deploy and manage our Kubernetes Cluster. -This guide does not cover all of the steps required to setup your OpenStack -environment: e.g. OAM networks and security groups but there is a wealth of -OpenStack information on the web. +.. contents:: + :depth: 1 + :local: +.. -Rancher Installation -==================== +The result at the end of this tutorial will be: -The following instructions describe how to create an Openstack VM running -Rancher. This node will not be used to host ONAP itself, it will be used -exclusively by Rancher. +*1.* Creation of a Key Pair to use with Open Stack and RKE -Launch new VM instance to host the Rancher Server -------------------------------------------------- +*2.* Creation of OpenStack VMs to host Kubernetes Control Plane -.. image:: Rancher-Launch_new_VM_instance_to_host_the_Rancher_Server.jpeg +*3.* Creation of OpenStack VMs to host Kubernetes Workers -Select Ubuntu 16.04 as base image +*4.* Installation and configuration of RKE to setup an HA Kubernetes + +*5.* Installation and configuration of kubectl + +*5.* Installation and configuration of helm + +*7.* Creation of an NFS Server to be used by ONAP as shared persistance + +There are many ways one can execute the above steps. Including automation through the use of HEAT to setup the OpenStack VMs. +To better illustrate the steps involved, we have captured the manual creation of such an environment using the ONAP Wind River Open Lab. + +Create Key Pair +=============== +A Key Pair is required to access the created OpenStack VMs and will be used by +RKE to configure the VMs for Kubernetes. + +Use an existing key pair, import one or create a new one to assign. + +.. image:: images/keys/key_pair_1.png + +.. Note:: + If you're creating a new Key Pair, ensure to create a local copy of the Private Key through the use of "Copy Private Key to Clipboard". + +For the purpose of this guide, we will assume a new local key called "onap-key" +has been downloaded and is copied into **~/.ssh/**, from which it can be referenced. + +Example: + > mv onap-key ~/.ssh + + > chmod 600 ~/.ssh/onap-key + + +Create Kubernetes Control Plane VMs +=================================== + +The following instructions describe how to create 3 OpenStack VMs to host the +Highly-Available Kubernetes Control Plane. +ONAP workloads will not be scheduled on these Control Plane nodes. + +Launch new VM instances +----------------------- + +.. image:: images/cp_vms/control_plane_1.png + +Select Ubuntu 18.04 as base image --------------------------------- -Select "No" on "Create New Volume" +Select "No" for "Create New Volume" -.. image:: Rancher-Select_Ubuntu_16.04_as_base_image.jpeg +.. image:: images/cp_vms/control_plane_2.png Select Flavor ------------- -Known issues exist if flavor is too small for Rancher. Please select a flavor -with at least 4 vCPU and 8GB ram. A size of 8 vCPU and 16GB ram is recommended. +The recommended flavor is at least 4 vCPU and 8GB ram. -.. image:: Rancher-Select_Flavor.jpeg +.. image:: images/cp_vms/control_plane_3.png Networking ---------- -.. image:: Rancher-Networking.jpeg +.. image:: images/cp_vms/control_plane_4.png Security Groups --------------- -.. image:: Rancher-Security_Groups.jpeg +.. image:: images/cp_vms/control_plane_5.png Key Pair -------- -Use an existing key pair (e.g. onap_key), import an existing one or create a -new one to assign. +Assign the key pair that was created/selected previously (e.g. onap_key). -.. image:: Rancher-Key_Pair.jpeg +.. image:: images/cp_vms/control_plane_6.png -Apply customization script for the Rancher VM ---------------------------------------------- +Apply customization script for Control Plane VMs +------------------------------------------------ -Click :download:`openstack-rancher.sh <openstack-rancher.sh>` to download the -script. +Click :download:`openstack-k8s-controlnode.sh <openstack-k8s-controlnode.sh>` +to download the script. -.. literalinclude:: openstack-rancher.sh +.. literalinclude:: openstack-k8s-controlnode.sh :language: bash This customization script will: -* setup root access to the VM (comment out if you wish to disable this - capability and restrict access to ssh access only) +* update ubuntu * install docker -* install rancher -* install kubectl -* install helm -* install nfs server - -.. note:: - The Casablanca release of OOM only supports Helm 2.9.1 not the 2.7.2 shown in - the screen capture below. The supported versions of all the software components - are listed in the :ref:`cloud-setup-guide-label`. -.. image:: Apply_customization_script_for_the_Rancher_VM.jpeg +.. image:: images/cp_vms/control_plane_7.png Launch Instance --------------- -.. image:: Rancher-Launch_Instance.jpeg - -Assign Floating IP for external access --------------------------------------- - -.. image:: Rancher-Allocate_Floating_IP.jpeg - -.. image:: Rancher-Manage_Floating_IP_Associations.jpeg +.. image:: images/cp_vms/control_plane_8.png -.. image:: Rancher-Launch_Instance.jpeg -Kubernetes Installation -======================= -Launch new VM instance(s) to create a Kubernetes single host or cluster ------------------------------------------------------------------------ +Create Kubernetes Worker VMs +============================ +The following instructions describe how to create OpenStack VMs to host the +Highly-Available Kubernetes Workers. ONAP workloads will only be scheduled on these nodes. -To create a cluster: +Launch new VM instances +----------------------- -.. note:: - #. do not append a '-1' suffix (e.g. sb4-k8s) - #. increase count to the # of of kubernetes worker nodes you want (eg. 3) +The number and size of Worker VMs is depenedent on the size of the ONAP deployment. +By default, all ONAP applications are deployed. It's possible to customize the deployment +and enable a subset of the ONAP applications. For the purpose of this guide, however, +we will deploy 12 Kubernetes Workers that have been sized to handle the entire ONAP +application workload. -.. image:: K8s-Launch_new_VM_instance_to_create_a_Kubernetes_single_host_or_cluster.jpeg +.. image:: images/wk_vms/worker_1.png -Select Ubuntu 16.04 as base image +Select Ubuntu 18.04 as base image --------------------------------- Select "No" on "Create New Volume" -.. image:: K8s-Select_Ubuntu_16.04_as_base_image.jpeg +.. image:: images/wk_vms/worker_2.png Select Flavor ------------- -The size of a Kubernetes host depends on the size of the ONAP deployment that -will be installed. - -As of the Casablanca release a minimum 224GB will be needed to run a -full ONAP deployment (all components). It is recommended that more hosts are -used with fewer resources instead of only a few large hosts. For example 14 x -16GB hosts. +The size of Kubernetes hosts depend on the size of the ONAP deployment +being installed. -If a small subset of ONAP components are being deployed for testing purposes, -then a single 16GB or 32GB host should suffice. +If a small subset of ONAP applications are being deployed +(i.e. for testing purposes), then 16GB or 32GB may be sufficient. -.. image:: K8s-Select_Flavor.jpeg +.. image:: images/wk_vms/worker_3.png Networking ----------- -.. image:: K8s-Networking.jpeg +.. image:: images/wk_vms/worker_4.png Security Group --------------- -.. image:: K8s-Security_Group.jpeg +.. image:: images/wk_vms/worker_5.png Key Pair -------- -Use an existing key pair (e.g. onap_key), import an existing one or create a -new one to assign. +Assign the key pair that was created/selected previously (e.g. onap_key). -.. image:: K8s-Key_Pair.jpeg +.. image:: images/wk_vms/worker_6.png Apply customization script for Kubernetes VM(s) ----------------------------------------------- -Click :download:`openstack-k8s-node.sh <openstack-k8s-node.sh>` to -download the script. +Click :download:`openstack-k8s-workernode.sh <openstack-k8s-workernode.sh>` to download the +script. -.. literalinclude:: openstack-k8s-node.sh +.. literalinclude:: openstack-k8s-workernode.sh :language: bash This customization script will: -* setup root access to the VM (comment out if you wish to disable this - capability and restrict access to ssh access only) +* update ubuntu * install docker -* install kubectl -* install helm -* install nfs common (see configuration step here) +* install nfs common -.. note:: - Ensure you are using the correct versions as described in the - :ref:`cloud-setup-guide-label` Launch Instance --------------- -.. image:: K8s-Launch_Instance.jpeg +.. image:: images/wk_vms/worker_7.png -Assign Floating IP for external access --------------------------------------- -.. image:: K8s-Manage_Floating_IP_Associations.jpeg -.. image:: K8s-Launch_Instance.jpeg -Setting up an NFS share for Multinode Kubernetes Clusters -========================================================= -The figure below illustrates a possible topology of a multinode Kubernetes -cluster. +Assign Floating IP addresses +---------------------------- +Assign Floating IPs to all Control Plane and Worker VMs. +These addresses provide external access to the VMs and will be used by RKE +to configure kubernetes on to the VMs. -.. image:: k8s-topology.jpg +Repeat the following for each VM previously created: -One node, the Master Node, runs Rancher and Helm clients and connects to all -the Kubernetes nodes in the cluster. Kubernetes nodes, in turn, run Rancher, -Kubernetes and Tiller (Helm) agents, which receive, execute, and respond to -commands issued by the Master Node (e.g. kubectl or helm operations). Note that -the Master Node can be either a remote machine that the user can log in to or a -local machine (e.g. laptop, desktop) that has access to the Kubernetes cluster. +.. image:: images/floating_ips/floating_1.png -Deploying applications to a Kubernetes cluster requires Kubernetes nodes to -share a common, distributed filesystem. One node in the cluster plays the role -of NFS Master (not to confuse with the Master Node that runs Rancher and Helm -clients, which is located outside the cluster), while all the other cluster -nodes play the role of NFS slaves. In the figure above, the left-most cluster -node plays the role of NFS Master (indicated by the crown symbol). To properly -set up an NFS share on Master and Slave nodes, the user can run the scripts -below. +Resulting floating IP assignments in this example. -Click :download:`master_nfs_node.sh <master_nfs_node.sh>` to download the -script. +.. image:: images/floating_ips/floating_2.png -.. literalinclude:: master_nfs_node.sh - :language: bash -Click :download:`slave_nfs_node.sh <slave_nfs_node.sh>` to download the script. -.. literalinclude:: slave_nfs_node.sh - :language: bash -The master_nfs_node.sh script runs in the NFS Master node and needs the list of -NFS Slave nodes as input, e.g.:: +Configure Rancher Kubernetes Engine (RKE) +========================================= - > sudo ./master_nfs_node.sh node1_ip node2_ip ... nodeN_ip +Install RKE +----------- +Download and install RKE on a VM, desktop or laptop. +Binaries can be found here for Linux and Mac: https://github.com/rancher/rke/releases/tag/v0.2.1 -The slave_nfs_node.sh script runs in each NFS Slave node and needs the IP of -the NFS Master node as input, e.g.:: +RKE requires a *cluster.yml* as input. An example file is show below that +describes a Kubernetes cluster that will be mapped onto the OpenStack VMs +created earlier in this guide. - > sudo ./slave_nfs_node.sh master_node_ip +Example: **cluster.yml** + +.. image:: images/rke/rke_1.png -Configuration (Rancher and Kubernetes) -====================================== +Click :download:`cluster.yml <cluster.yml>` to download the +configuration file. -Access Rancher server via web browser -------------------------------------- -(e.g. http://10.12.6.16:8080/env/1a5/apps/stacks) +.. literalinclude:: cluster.yml + :language: yaml -.. image:: Access_Rancher_server_via_web_browser.jpeg +Prepare cluster.yml +------------------- +Before this configuration file can be used the external **address** +and the **internal_address** must be mapped for each control and worker node +in this file. + +Run RKE +------- +From within the same directory as the cluster.yml file, simply execute: + + > rke up + +The output will look something like: + +.. code-block:: + + INFO[0000] Initiating Kubernetes cluster + INFO[0000] [certificates] Generating admin certificates and kubeconfig + INFO[0000] Successfully Deployed state file at [./cluster.rkestate] + INFO[0000] Building Kubernetes cluster + INFO[0000] [dialer] Setup tunnel for host [10.12.6.82] + INFO[0000] [dialer] Setup tunnel for host [10.12.6.249] + INFO[0000] [dialer] Setup tunnel for host [10.12.6.74] + INFO[0000] [dialer] Setup tunnel for host [10.12.6.85] + INFO[0000] [dialer] Setup tunnel for host [10.12.6.238] + INFO[0000] [dialer] Setup tunnel for host [10.12.6.89] + INFO[0000] [dialer] Setup tunnel for host [10.12.5.11] + INFO[0000] [dialer] Setup tunnel for host [10.12.6.90] + INFO[0000] [dialer] Setup tunnel for host [10.12.6.244] + INFO[0000] [dialer] Setup tunnel for host [10.12.5.165] + INFO[0000] [dialer] Setup tunnel for host [10.12.6.126] + INFO[0000] [dialer] Setup tunnel for host [10.12.6.111] + INFO[0000] [dialer] Setup tunnel for host [10.12.5.160] + INFO[0000] [dialer] Setup tunnel for host [10.12.5.191] + INFO[0000] [dialer] Setup tunnel for host [10.12.6.195] + INFO[0002] [network] Deploying port listener containers + INFO[0002] [network] Pulling image [nexus3.onap.org:10001/rancher/rke-tools:v0.1.27] on host [10.12.6.85] + INFO[0002] [network] Pulling image [nexus3.onap.org:10001/rancher/rke-tools:v0.1.27] on host [10.12.6.89] + INFO[0002] [network] Pulling image [nexus3.onap.org:10001/rancher/rke-tools:v0.1.27] on host [10.12.6.90] + INFO[0011] [network] Successfully pulled image [nexus3.onap.org:10001/rancher/rke-tools:v0.1.27] on host [10.12.6.89] + . . . . + INFO[0309] [addons] Setting up Metrics Server + INFO[0309] [addons] Saving ConfigMap for addon rke-metrics-addon to Kubernetes + INFO[0309] [addons] Successfully saved ConfigMap for addon rke-metrics-addon to Kubernetes + INFO[0309] [addons] Executing deploy job rke-metrics-addon + INFO[0315] [addons] Metrics Server deployed successfully + INFO[0315] [ingress] Setting up nginx ingress controller + INFO[0315] [addons] Saving ConfigMap for addon rke-ingress-controller to Kubernetes + INFO[0316] [addons] Successfully saved ConfigMap for addon rke-ingress-controller to Kubernetes + INFO[0316] [addons] Executing deploy job rke-ingress-controller + INFO[0322] [ingress] ingress controller nginx deployed successfully + INFO[0322] [addons] Setting up user addons + INFO[0322] [addons] no user addons defined + INFO[0322] Finished building Kubernetes cluster successfully + +Install Kubectl +=============== + +Download and install kubectl. Binaries can be found here for Linux and Mac: + +https://storage.googleapis.com/kubernetes-release/release/v1.13.5/bin/linux/amd64/kubectl +https://storage.googleapis.com/kubernetes-release/release/v1.13.5/bin/darwin/amd64/kubectl + +Validate deployment +------------------- + > cp kube_config_cluster.yml ~/.kube/config.onap -Add Kubernetes Environment to Rancher -------------------------------------- + > export KUBECONFIG=~/.kube/config.onap -1. Select “Manage Environments” + > kubectl config use-context onap -.. image:: Add_Kubernetes_Environment_to_Rancher.png + > kubectl get nodes -o=wide -2. Select “Add Environment” +.. code-block:: -.. image:: Select_Add_Environment.png + NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME + onap-control-1 Ready controlplane,etcd 3h53m v1.13.5 10.0.0.8 <none> Ubuntu 18.04 LTS 4.15.0-22-generic docker://18.9.5 + onap-control-2 Ready controlplane,etcd 3h53m v1.13.5 10.0.0.11 <none> Ubuntu 18.04 LTS 4.15.0-22-generic docker://18.9.5 + onap-control-3 Ready controlplane,etcd 3h53m v1.13.5 10.0.0.12 <none> Ubuntu 18.04 LTS 4.15.0-22-generic docker://18.9.5 + onap-k8s-1 Ready worker 3h53m v1.13.5 10.0.0.14 <none> Ubuntu 18.04 LTS 4.15.0-22-generic docker://18.9.5 + onap-k8s-10 Ready worker 3h53m v1.13.5 10.0.0.16 <none> Ubuntu 18.04 LTS 4.15.0-22-generic docker://18.9.5 + onap-k8s-11 Ready worker 3h53m v1.13.5 10.0.0.18 <none> Ubuntu 18.04 LTS 4.15.0-22-generic docker://18.9.5 + onap-k8s-12 Ready worker 3h53m v1.13.5 10.0.0.7 <none> Ubuntu 18.04 LTS 4.15.0-22-generic docker://18.9.5 + onap-k8s-2 Ready worker 3h53m v1.13.5 10.0.0.26 <none> Ubuntu 18.04 LTS 4.15.0-22-generic docker://18.9.5 + onap-k8s-3 Ready worker 3h53m v1.13.5 10.0.0.5 <none> Ubuntu 18.04 LTS 4.15.0-22-generic docker://18.9.5 + onap-k8s-4 Ready worker 3h53m v1.13.5 10.0.0.6 <none> Ubuntu 18.04 LTS 4.15.0-22-generic docker://18.9.5 + onap-k8s-5 Ready worker 3h53m v1.13.5 10.0.0.9 <none> Ubuntu 18.04 LTS 4.15.0-22-generic docker://18.9.5 + onap-k8s-6 Ready worker 3h53m v1.13.5 10.0.0.17 <none> Ubuntu 18.04 LTS 4.15.0-22-generic docker://18.9.5 + onap-k8s-7 Ready worker 3h53m v1.13.5 10.0.0.20 <none> Ubuntu 18.04 LTS 4.15.0-22-generic docker://18.9.5 + onap-k8s-8 Ready worker 3h53m v1.13.5 10.0.0.10 <none> Ubuntu 18.04 LTS 4.15.0-22-generic docker://18.9.5 + onap-k8s-9 Ready worker 3h53m v1.13.5 10.0.0.4 <none> Ubuntu 18.04 LTS 4.15.0-22-generic docker://18.9.5 -3. Add unique name for your new Rancher environment -4. Select the Kubernetes template +Install Helm +============ -5. Click "create" +Example Helm client install on Linux: + > wget http://storage.googleapis.com/kubernetes-helm/helm-v2.12.3-linux-amd64.tar.gz -.. image:: Click_create.jpeg + > tar -zxvf helm-v2.12.3-linux-amd64.tar.gz -6. Select the new named environment (ie. SB4) from the dropdown list (top - left). + > sudo mv linux-amd64/helm /usr/local/bin/helm -Rancher is now waiting for a Kubernetes Host to be added. +Initialize Kubernetes Cluster for use by Helm +--------------------------------------------- + > kubectl -n kube-system create serviceaccount tiller -.. image:: K8s-Assign_Floating_IP_for_external_access.jpeg + > kubectl create clusterrolebinding tiller --clusterrole=cluster-admin --serviceaccount=kube-system:tiller -Add Kubernetes Host -------------------- + > helm init --service-account tiller -1. If this is the first (or only) host being added - click on the "Add a host" - link + > kubectl -n kube-system rollout status deploy/tiller-deploy -.. image:: K8s-Assign_Floating_IP_for_external_access.jpeg -and click on "Save" (accept defaults). -.. image:: and_click_on_Save_accept_defaults.jpeg +Setting up an NFS share for Multinode Kubernetes Clusters +========================================================= +Deploying applications to a Kubernetes cluster requires Kubernetes nodes to +share a common, distributed filesystem. In this tutorial, we will setup an +NFS Master, and configure all Worker nodes a Kubernetes cluster to play +the role of NFS slaves. + +It is recommneded that a separate VM, outside of the kubernetes +cluster, be used. This is to ensure that the NFS Master does not compete for +resources with Kubernetes Control Plane or Worker Nodes. + + +Launch new NFS Server VM instance +--------------------------------- +.. image:: images/nfs_server/nfs_server_1.png + +Select Ubuntu 18.04 as base image +--------------------------------- +Select "No" on "Create New Volume" + +.. image:: images/nfs_server/nfs_server_2.png + +Select Flavor +------------- + +.. image:: images/nfs_server/nfs_server_3.png + +Networking +----------- + +.. image:: images/nfs_server/nfs_server_4.png + +Security Group +--------------- + +.. image:: images/nfs_server/nfs_server_5.png + +Key Pair +-------- +Assign the key pair that was created/selected previously (e.g. onap_key). -otherwise select INFRASTRUCTURE→ Hosts and click on "Add Host" +.. image:: images/nfs_server/nfs_server_6.png -.. image:: otherwise_select_INFRASTRUCTURE_Hosts_and_click_on_Add_Host.jpg +Apply customization script for NFS Server VM +-------------------------------------------- -2. Enter the management IP for the k8s VM (e.g. 10.0.0.4) that was just - created. +Click :download:`openstack-nfs-server.sh <openstack-nfs-server.sh>` to download the +script. -3. Click on “Copy to Clipboard” button +.. literalinclude:: openstack-k8s-workernode.sh + :language: bash -4. Click on “Close” button +This customization script will: -.. image:: Click_on_Close_button.jpeg +* update ubuntu +* install nfs server -Without the 10.0.0.4 IP - the CATTLE_AGENT will be derived on the host - but it -may not be a routable IP. -Configure Kubernetes Host -------------------------- +Launch Instance +--------------- -1. Login to the new Kubernetes Host:: +.. image:: images/nfs_server/nfs_server_7.png - > ssh -i ~/oom-key.pem ubuntu@10.12.5.1 - The authenticity of host '10.12.5.172 (10.12.5.172)' can't be established. - ECDSA key fingerprint is SHA256:tqxayN58nCJKOJcWrEZzImkc0qKQHDDfUTHqk4WMcEI. - Are you sure you want to continue connecting (yes/no)? yes - Warning: Permanently added '10.12.5.172' (ECDSA) to the list of known hosts. - Welcome to Ubuntu 16.04.2 LTS (GNU/Linux 4.4.0-64-generic x86_64) - * Documentation: https://help.ubuntu.com - * Management: https://landscape.canonical.com - * Support: https://ubuntu.com/advantage - Get cloud support with Ubuntu Advantage Cloud Guest: - http://www.ubuntu.com/business/services/cloud +Assign Floating IP addresses +---------------------------- - 180 packages can be updated. - 100 updates are security updates. +.. image:: images/nfs_server/nfs_server_8.png - The programs included with the Ubuntu system are free software; - the exact distribution terms for each program are described in the - individual files in /usr/share/doc/*/copyright. +Resulting floating IP assignments in this example. - Ubuntu comes with ABSOLUTELY NO WARRANTY, to the extent permitted by - applicable law. +.. image:: images/nfs_server/nfs_server_9.png - To run a command as administrator (user "root"), use "sudo <command>". - See "man sudo_root" for details. - ubuntu@sb4-k8s-1:~$ +To properly set up an NFS share on Master and Slave nodes, the user can run the +scripts below. +Click :download:`master_nfs_node.sh <master_nfs_node.sh>` to download the +script. + +.. literalinclude:: master_nfs_node.sh + :language: bash -2. Paste Clipboard content and hit enter to install Rancher Agent:: +Click :download:`slave_nfs_node.sh <slave_nfs_node.sh>` to download the script. - ubuntu@sb4-k8s-1:~$ sudo docker run -e CATTLE_AGENT_IP="10.0.0.4“ --rm --privileged -v /var/run/docker.sock:/var/run/docker.sock -v /var/lib/rancher:/var/lib/rancher rancher/agent:v1.2.9 http://10.12.6.16:8080/v1/scripts/5D757C68BD0A2125602A:1514678400000:yKW9xHGJDLvq6drz2eDzR2mjato - Unable to find image 'rancher/agent:v1.2.9' locally - v1.2.9: Pulling From rancher/agent - b3e1c725a85f: Pull complete - 6071086409fc: Pull complete - d0ac3b234321: Pull complete - 87f567b5cf58: Pull complete - a63e24b217c4: Pull complete - d0a3f58caef0: Pull complete - 16914729cfd3: Pull complete - dc5c21984c5b: Pull complete - d7e8f9784b20: Pull complete - Digest: sha256:c21255ac4d94ffbc7b523F870F20ea5189b68Fa3d642800adb4774aab4748e66 - Status: Downloaded newer image for rancher/agent:v1.2.9 +.. literalinclude:: slave_nfs_node.sh + :language: bash + +The master_nfs_node.sh script runs in the NFS Master node and needs the list of +NFS Slave nodes as input, e.g.:: + + > sudo ./master_nfs_node.sh node1_ip node2_ip ... nodeN_ip + +The slave_nfs_node.sh script runs in each NFS Slave node and needs the IP of +the NFS Master node as input, e.g.:: + + > sudo ./slave_nfs_node.sh master_node_ip - INFO: Running Agent Registration Process, CATTLE_URL=http://10.12.6.16:8080/v1 - INFO: Attempting to connect to: http://10.12.6.16:8080/v1 - INFO: http://10.12.6.16:8080/v1 is accessible - INFO: Inspecting host capabilities - INFO: Boot2Docker: false - INFO: Host writable: true - INFO: Token: xxxxxxxx - INFO: Running registration - INFO: Printing Environment - INFO: ENV: CATTLE_ACCESS_KEY=98B35AC484FBF820E0AD - INFO: ENV: CATTLE_AGENT_IP=10.0.9.4 - INFO: ENV: CATTLE_HOME=/var/lib/cattle - INFO: ENV: CATTLE_REGISTRATION_ACCESS_KEY=registrationToken - INFO: ENV: CATTLE_REGISTRATION_SECRET_KEY=xxxxxxx - INFO: ENV: CATTLE_SECRET_KEY=xxxxxxx - INFO: ENV: CATTLE_URL=http://10.12.6.16:8080/v1 - INFO: ENV: DETECTED_CATTLE_AGENT_IP=10.12.5.172 - INFO: ENV: RANCHER_AGENT_IMAGE=rancher/agent:v1.2.9 - INFO: Launched Rancher Agent: c27ee0f3dc4c783b0db647ea1f73c35b3843a4b8d60b96375b1a05aa77d83136 - ubuntu@sb4-k8s-1:~$ - -3. Return to Rancher environment (e.g. SB4) and wait for services to complete - (~ 10-15 mins) - -.. image:: Return_to_Rancher_environment_eg_SB4_and_wait_for_services_to_complete_10-15_mins.jpeg - -Configure kubectl and helm -========================== -In this example we are configuring kubectl and helm that have been installed -(as a convenience) onto the rancher and kubernetes hosts. Typically you would -install them both on your PC and remotely connect to the cluster. The following -procedure would remain the same. - -1. Click on CLI and then click on “Generate Config” - -.. image:: Click_on_CLI_and_then_click_on_Generate_Config.jpeg - -2. Click on “Copy to Clipboard” - wait until you see a "token" - do not copy - user+password - the server is not ready at that point - -.. image:: Click_on_Copy_to_Clipboard-wait_until_you_see_a_token-do_not_copy_user+password-the_server_is_not_ready_at_that_point.jpeg - -3. Create a .kube directory in user directory (if one does not exist):: - - ubuntu@sb4-kSs-1:~$ mkdir .kube - ubuntu@sb4-kSs-1:~$ vi .kube/config - -4. Paste contents of Clipboard into a file called “config” and save the file:: - - apiVersion: v1 - kind : Config - clusters: - - cluster: - api-version: v1 - insecure-skip-tls-verify: true - server: "https://10.12.6.16:8080/r/projects/1a7/kubernetes:6443" - name: "SB4" - contexts: - - context: - cluster: "SB4" - user: "SB4" - name: "SB4" - current-context: "SB4" - users: - - name: "SB4" - user: - token: "QmFzaWMgTlRBd01qZzBOemc)TkRrMk1UWkNOMFpDTlVFNlExcHdSa1JhVZreE5XSm1TRGhWU2t0Vk1sQjVhalZaY0dWaFVtZGFVMHQzWW1WWVJtVmpSQT09" - -5. Validate that kubectl is able to connect to the kubernetes cluster:: - - ubuntu@sb4-k8s-1:~$ kubectl config get-contexts - CURRENT NAME CLUSTER AUTHINFO NAMESPACE - * SB4 SB4 SB4 - ubuntu@sb4-kSs-1:~$ - -and show running pods:: - - ubuntu@sb4-k8s-1:~$ kubectl get pods --all-namespaces -o=wide - NAMESPACE NAME READY STATUS RESTARTS AGE IP NODE - kube-system heapster—7Gb8cd7b5 -q7p42 1/1 Running 0 13m 10.42.213.49 sb4-k8s-1 - kube-system kube-dns-5d7bM87c9-c6f67 3/3 Running 0 13m 10.42.181.110 sb4-k8s-1 - kube-system kubernetes-dashboard-f9577fffd-kswjg 1/1 Running 0 13m 10.42.105.113 sb4-k8s-1 - kube-system monitoring-grafana-997796fcf-vg9h9 1/1 Running 0 13m 10.42,141.58 sb4-k8s-1 - kube-system monitoring-influxdb-56chd96b-hk66b 1/1 Running 0 13m 10.4Z.246.90 sb4-k8s-1 - kube-system tiller-deploy-cc96d4f6b-v29k9 1/1 Running 0 13m 10.42.147.248 sb4-k8s-1 - ubuntu@sb4-k8s-1:~$ - -6. Validate helm is running at the right version. If not, an error like this - will be displayed:: - - ubuntu@sb4-k8s-1:~$ helm list - Error: incompatible versions c1ient[v2.9.1] server[v2.6.1] - ubuntu@sb4-k8s-1:~$ - -7. Upgrade the server-side component of helm (tiller) via `helm init --upgrade`:: - - ubuntu@sb4-k8s-1:~$ helm init --upgrade - Creating /home/ubuntu/.helm - Creating /home/ubuntu/.helm/repository - Creating /home/ubuntu/.helm/repository/cache - Creating /home/ubuntu/.helm/repository/local - Creating /home/ubuntu/.helm/plugins - Creating /home/ubuntu/.helm/starters - Creating /home/ubuntu/.helm/cache/archive - Creating /home/ubuntu/.helm/repository/repositories.yaml - Adding stable repo with URL: https://kubernetes-charts.storage.googleapis.com - Adding local repo with URL: http://127.0.0.1:8879/charts - $HELM_HOME has been configured at /home/ubuntu/.helm. - - Tiller (the Helm server-side component) has been upgraded to the current version. - Happy Helming! - ubuntu@sb4-k8s-1:~$ ONAP Deployment via OOM ======================= diff --git a/docs/openstack-k8s-controlnode.sh b/docs/openstack-k8s-controlnode.sh new file mode 100644 index 0000000000..1d230c2da4 --- /dev/null +++ b/docs/openstack-k8s-controlnode.sh @@ -0,0 +1,31 @@ +#!/bin/bash + +DOCKER_VERSION=18.09.5 + +apt-get update + +curl https://releases.rancher.com/install-docker/$DOCKER_VERSION.sh | sh +mkdir -p /etc/systemd/system/docker.service.d/ +cat > /etc/systemd/system/docker.service.d/docker.conf << EOF +[Service] +ExecStart= +ExecStart=/usr/bin/dockerd -H fd:// --insecure-registry=nexus3.onap.org:10001 +EOF + +sudo usermod -aG docker ubuntu + +systemctl daemon-reload +systemctl restart docker +apt-mark hold docker-ce + +IP_ADDR=`ip address |grep ens|grep inet|awk '{print $2}'| awk -F / '{print $1}'` +HOSTNAME=`hostname` + +echo "$IP_ADDR $HOSTNAME" >> /etc/hosts + +docker login -u docker -p docker nexus3.onap.org:10001 + +sudo apt-get install make -y + + +exit 0 diff --git a/docs/openstack-k8s-node.sh b/docs/openstack-k8s-node.sh deleted file mode 100644 index 308f2204ff..0000000000 --- a/docs/openstack-k8s-node.sh +++ /dev/null @@ -1,46 +0,0 @@ -#!/bin/bash - -DOCKER_VERSION=17.03 -KUBECTL_VERSION=1.11.5 -HELM_VERSION=2.9.1 - -# setup root access - default login: oom/oom - comment out to restrict access too ssh key only -sed -i 's/PermitRootLogin.*/PermitRootLogin yes/' /etc/ssh/sshd_config -sed -i 's/PasswordAuthentication.*/PasswordAuthentication yes/' /etc/ssh/sshd_config -service sshd restart -echo -e "oom\noom" | passwd root - -apt-get update -curl https://releases.rancher.com/install-docker/$DOCKER_VERSION.sh | sh -mkdir -p /etc/systemd/system/docker.service.d/ -cat > /etc/systemd/system/docker.service.d/docker.conf << EOF -[Service] -ExecStart= -ExecStart=/usr/bin/dockerd -H fd:// --insecure-registry=nexus3.onap.org:10001 -EOF -systemctl daemon-reload -systemctl restart docker -apt-mark hold docker-ce - -IP_ADDY=`ip address |grep ens|grep inet|awk '{print $2}'| awk -F / '{print $1}'` -HOSTNAME=`hostname` - -echo "$IP_ADDY $HOSTNAME" >> /etc/hosts - -docker login -u docker -p docker nexus3.onap.org:10001 - -sudo apt-get install make -y - -sudo curl -LO https://storage.googleapis.com/kubernetes-release/release/v$KUBECTL_VERSION/bin/linux/amd64/kubectl -sudo chmod +x ./kubectl -sudo mv ./kubectl /usr/local/bin/kubectl -sudo mkdir ~/.kube -wget http://storage.googleapis.com/kubernetes-helm/helm-v${HELM_VERSION}-linux-amd64.tar.gz -sudo tar -zxvf helm-v${HELM_VERSION}-linux-amd64.tar.gz -sudo mv linux-amd64/helm /usr/local/bin/helm - -# install nfs -sudo apt-get install nfs-common -y - - -exit 0 diff --git a/docs/openstack-k8s-workernode.sh b/docs/openstack-k8s-workernode.sh new file mode 100644 index 0000000000..3f32d050a9 --- /dev/null +++ b/docs/openstack-k8s-workernode.sh @@ -0,0 +1,34 @@ +#!/bin/bash + +DOCKER_VERSION=18.09.5 + +apt-get update + +curl https://releases.rancher.com/install-docker/$DOCKER_VERSION.sh | sh +mkdir -p /etc/systemd/system/docker.service.d/ +cat > /etc/systemd/system/docker.service.d/docker.conf << EOF +[Service] +ExecStart= +ExecStart=/usr/bin/dockerd -H fd:// --insecure-registry=nexus3.onap.org:10001 +EOF + +sudo usermod -aG docker ubuntu + +systemctl daemon-reload +systemctl restart docker +apt-mark hold docker-ce + +IP_ADDR=`ip address |grep ens|grep inet|awk '{print $2}'| awk -F / '{print $1}'` +HOSTNAME=`hostname` + +echo "$IP_ADDR $HOSTNAME" >> /etc/hosts + +docker login -u docker -p docker nexus3.onap.org:10001 + +sudo apt-get install make -y + +# install nfs +sudo apt-get install nfs-common -y + + +exit 0 diff --git a/docs/openstack-nfs-server.sh b/docs/openstack-nfs-server.sh new file mode 100644 index 0000000000..1db04eaff6 --- /dev/null +++ b/docs/openstack-nfs-server.sh @@ -0,0 +1,18 @@ +#!/bin/bash + +apt-get update + +IP_ADDR=`ip address |grep ens|grep inet|awk '{print $2}'| awk -F / '{print $1}'` +HOSTNAME=`hostname` + +echo "$IP_ADDR $HOSTNAME" >> /etc/hosts + +sudo apt-get install make -y + +# nfs server +sudo apt-get install nfs-kernel-server -y + +sudo mkdir -p /nfs_share +sudo chown nobody:nogroup /nfs_share/ + +exit 0 diff --git a/docs/openstack-rancher.sh b/docs/openstack-rancher.sh deleted file mode 100644 index ac91ff5566..0000000000 --- a/docs/openstack-rancher.sh +++ /dev/null @@ -1,51 +0,0 @@ -#!/bin/bash - -DOCKER_VERSION=17.03 -RANCHER_VERSION=1.6.22 -KUBECTL_VERSION=1.11.5 -HELM_VERSION=2.9.1 - -# setup root access - default login: oom/oom - comment out to restrict access too ssh key only -sed -i 's/PermitRootLogin.*/PermitRootLogin yes/' /etc/ssh/sshd_config -sed -i 's/PasswordAuthentication.*/PasswordAuthentication yes/' /etc/ssh/sshd_config -service sshd restart -echo -e "oom\noom" | passwd root - -apt-get update -curl https://releases.rancher.com/install-docker/$DOCKER_VERSION.sh | sh -mkdir -p /etc/systemd/system/docker.service.d/ -cat > /etc/systemd/system/docker.service.d/docker.conf << EOF -[Service] -ExecStart= -ExecStart=/usr/bin/dockerd -H fd:// --insecure-registry=nexus3.onap.org:10001 -EOF -systemctl daemon-reload -systemctl restart docker -apt-mark hold docker-ce - -IP_ADDY=`ip address |grep ens|grep inet|awk '{print $2}'| awk -F / '{print $1}'` -HOSTNAME=`hostname` - -echo "$IP_ADDY $HOSTNAME" >> /etc/hosts - -docker login -u docker -p docker nexus3.onap.org:10001 - -sudo apt-get install make -y - -sudo docker run -d --restart=unless-stopped -p 8080:8080 --name rancher_server rancher/server:v$RANCHER_VERSION -sudo curl -LO https://storage.googleapis.com/kubernetes-release/release/v$KUBECTL_VERSION/bin/linux/amd64/kubectl -sudo chmod +x ./kubectl -sudo mv ./kubectl /usr/local/bin/kubectl -sudo mkdir ~/.kube -wget http://storage.googleapis.com/kubernetes-helm/helm-v${HELM_VERSION}-linux-amd64.tar.gz -sudo tar -zxvf helm-v${HELM_VERSION}-linux-amd64.tar.gz -sudo mv linux-amd64/helm /usr/local/bin/helm - -# nfs server -sudo apt-get install nfs-kernel-server -y - -sudo mkdir -p /nfs_share -sudo chown nobody:nogroup /nfs_share/ - - -exit 0 |