# -*- mode: ruby -*- # vi: set ft=ruby : configuration = { # Generic parameters used across all ONAP components 'key_name' => 'ecomp_key', 'pub_key' => '', 'nexus_repo' => 'https://nexus.onap.org/content/sites/raw', 'nexus_repo_root' => 'https://nexus.onap.org', 'nexus_url_snapshot' => 'https://nexus.onap.org/content/repositories/snapshots', 'nexus_docker_repo' => 'nexus3.onap.org:10001', 'nexus_username' => 'docker', 'nexus_password' => 'docker', 'dmaap_topic' => 'AUTO', 'artifacts_version' => '1.0.0', 'docker_version' => '1.0-STAGING-latest', # Parameters for DCAE instantiation 'dcae_zone' => 'iad4', 'dcae_state' => 'vi', 'openstack_tenant_id' => '', 'openstack_username' => '', 'openstack_api_key' => '', 'openstack_password' => '', 'odl_version' => '0.5.3-Boron-SR3', # Parameters for enabling features 'debug' => 'True', 'build_image' => 'True', 'clone_repo' => 'True', 'compile_repo' => 'False', 'enable_oparent' => 'True', 'skip_get_images' => 'False', 'skip_install' => 'True' } box = { :virtualbox => 'ubuntu/trusty64', :libvirt => 'sputnik13/trusty64', :openstack => nil } nodes = [ { :name => "aai", :ips => ['10.252.0.6', "192.168.50.6"], :macs => [], :cpus => 2, :cpu => "50", :ram => 4 * 1024, :groups => ["individual"], :args => ["aai"] }, { :name => "all-in-one", :ips => ['10.252.0.3', "192.168.50.3"], :macs => [], :cpus => 2, :cpu => "50", :ram => 12 * 1024, :groups => ["all-in-one"], :flavor => 'm1.xlarge', :args => ['mr', 'sdc', 'aai', 'mso', 'robot', 'vid', 'sdnc', 'portal', 'dcae', 'policy', 'appc', 'vfc', 'ccsdk'], }, { :name => "appc", :ips => ['10.252.0.14', "192.168.50.14"], :macs => [], :cpus => 2, :cpu => "50", :ram => 4 * 1024, :groups => ["individual"], :args => ["appc"], }, { :name => "ccsdk", :ips => ['10.252.0.14', "192.168.50.17"], :macs => [], :cpus => 2, :cpu => "50", :ram => 4 * 1024, :groups => ["individual"], :args => ["ccsdk"], }, { :name => "dcae", :ips => ['10.252.0.12', "192.168.50.12"], :macs => [], :cpus => 2, :cpu => "50", :ram => 4 * 1024, :groups => ["individual"], :args => ["dcae"], }, { :name => "dns", :ips => ['10.252.0.3', "192.168.50.3"], :macs => [], :cpus => 2, :cpu => "50", :ram => 1 * 1024, :groups => ["individual"], :flavor => 'm1.small', :args => [" "] }, { :name => "message-router", :ips => ['10.252.0.4', "192.168.50.4"], :macs => [], :cpus => 2, :cpu => "50", :ram => 4 * 1024, :groups => ["individual"], :args => ["mr"], }, { :name => "mso", :ips => ['10.252.0.7', "192.168.50.7"], :macs => [], :cpus => 2, :cpu => "50", :ram => 4 * 1024, :groups => ["individual"], :args => ["mso"], }, { :name => "multicloud", :ips => ['10.252.0.16', "192.168.50.16"], :macs => [], :cpus => 2, :cpu => "50", :ram => 4 * 1024, :groups => ["individual"], :args => ["multicloud"], }, { :name => "policy", :ips => ['10.252.0.13', "192.168.50.13"], :macs => [], :cpus => 2, :cpu => "50", :ram => 4 * 1024, :groups => ["individual"], :args => ["policy"], }, { :name => "portal", :ips => ['10.252.0.11', "192.168.50.11"], :macs => [], :cpus => 2, :cpu => "50", :ram => 4 * 1024, :groups => ["individual"], :args => ["portal"], }, { :name => "robot", :ips => ['10.252.0.8', "192.168.50.8"], :macs => [], :cpus => 2, :cpu => "50", :ram => 4 * 1024, :groups => ["individual"], :args => ["robot"], }, { :name => "sdc", :ips => ['10.252.0.5', "192.168.50.5"], :macs => [], :cpus => 2, :cpu => "50", :ram => 8 * 1024, :groups => ["individual"], :args => ["sdc"], }, { :name => "sdnc", :ips => ['10.252.0.10', "192.168.50.10"], :macs => [], :cpus => 2, :cpu => "50", :ram => 4 * 1024, :groups => ["individual"], :args => ["sdnc"], }, { :name => "testing", :ips => ['10.252.0.3', "192.168.50.3"], :macs => [], :cpus => 2, :cpu => "50", :ram => 4 * 1024, :groups => ["testing"], :flavor => 'm1.small', :args => [""], }, { :name => "vfc", :ips => ['10.252.0.15', "192.168.50.15"], :macs => [], :cpus => 2, :cpu => "50", :ram => 4 * 1024, :groups => ["individual"], :args => ['vfc'], }, { :name => "vid", :ips => ['10.252.0.9', "192.168.50.9"], :macs => [], :cpus => 2, :cpu => "50", :ram => 4 * 1024, :groups => ["individual"], :args => ['vid'], }, { :name => "vnfsdk", :ips => ['10.252.0.16', "192.168.50.16"], :macs => [], :cpus => 2, :cpu => "50", :ram => 4 * 1024, :groups => ["individual"], :args => ['vnfsdk'], }, { :name => "vvp", :ips => ['10.252.0.17', "192.168.50.17"], :macs => [], :cpus => 2, :cpu => "50", :ram => 4 * 1024, :groups => ["individual"], :args => ['vvp'], } ] run_path = 'vagrant_utils/postinstall.sh' sdc_volume = 'vol1-sdc-data.vdi' Vagrant.require_version ">= 1.8.6" # Determine the provider used provider = (ENV['VAGRANT_DEFAULT_PROVIDER'] || :virtualbox).to_sym puts "[INFO] Provider: #{provider} " vd_conf = ENV.fetch('VD_CONF', 'etc/settings.yaml') if File.exist?(vd_conf) require 'yaml' user_conf = YAML.load_file(vd_conf) configuration.update(user_conf) end # Set network interface net_interface = 'vboxnet0' is_windows = Gem.win_platform? if is_windows net_interface = 'VirtualBox Host-Only Ethernet Adapter #2' end puts "[INFO] Net interface: #{net_interface}" # If argument is given use it. Otherwise use Env: DEPLOY_MODE else use default requested_machine = ARGV[1] deploy_mode = ENV.fetch('DEPLOY_MODE', 'individual') if requested_machine != nil if requested_machine.include?("all-in-one") || requested_machine.include?("testing") deploy_mode = requested_machine end end # Catch the status of all machines if ARGV[0] == 'status' || ARGV[0] == 'destroy' deploy_mode = 'NA' end puts "[INFO] Deploy Mode: #{deploy_mode}" # In case of all-in-one or testing clean the nodes list case deploy_mode when 'all-in-one' nodes.select! do |node| if node[:name].include?("all-in-one") true if node[:name] end end when 'individual' nodes.select! do |node| if node[:groups][0].include?("individual") true if node[:name] end end when 'testing' nodes.select! do |node| if node[:name].include?("testing") true if node[:name] end end end Vagrant.configure("2") do |config| # PROXY definitions if ENV['http_proxy'] != nil and ENV['https_proxy'] != nil and ENV['no_proxy'] != nil if not Vagrant.has_plugin?('vagrant-proxyconf') system 'vagrant plugin install vagrant-proxyconf' raise 'vagrant-proxyconf was installed but it requires to execute again' end config.proxy.http = ENV['http_proxy'] config.proxy.https = ENV['https_proxy'] config.proxy.no_proxy = ENV['no_proxy'] configuration['socks_proxy'] = ENV['socks_proxy'] end if Vagrant.has_plugin?('vagrant-vbguest') puts 'vagrant-vbguest auto_update feature will be disable to avoid sharing conflicts' config.vbguest.auto_update = false end if provider == :libvirt if not Vagrant.has_plugin?('vagrant-libvirt') system 'vagrant plugin install vagrant-libvirt' raise 'vagrant-libvirt was installed but it requires to execute again' end end if provider == :openstack config.ssh.username = 'ubuntu' if not Vagrant.has_plugin?('vagrant-openstack-provider') system 'vagrant plugin install vagrant-openstack-provider' raise 'vagrant-openstack-provider was installed but it requires to execute again' end end nodes.each do |node| config.vm.define node[:name] do |nodeconfig| # Common Settings: nodeconfig.vm.provider "virtualbox" do |vbox| vbox.customize ['modifyvm', :id, '--nictype1', 'virtio'] vbox.customize ['modifyvm', :id, '--audio', 'none'] vbox.customize ['modifyvm', :id, '--vram', '1'] vbox.customize ['modifyvm', :id, "--cpuhotplug", "off"] vbox.customize ['modifyvm', :id, "--cpuexecutioncap", node[:cpu]] vbox.customize ['modifyvm', :id, "--cpus", node[:cpus]] vbox.customize ["modifyvm", :id, "--memory", node[:ram]] end nodeconfig.vm.provider "libvirt" do |lbox| lbox.memory = node[:ram] lbox.nested = true end nodeconfig.vm.provider :openstack do |obox| obox.openstack_auth_url = ENV.fetch('OS_AUTH_URL', '') obox.tenant_name = ENV.fetch('OS_TENANT_NAME', '') obox.username = ENV.fetch('OS_USERNAME', '') obox.password = ENV.fetch('OS_PASSWORD', '') obox.region = ENV.fetch('OS_REGION_NAME', '') obox.identity_api_version = ENV.fetch('OS_IDENTITY_API_VERSION', '') obox.domain_name = ENV.fetch('OS_PROJECT_DOMAIN_ID', '') obox.project_name = ENV.fetch('OS_PROJECT_NAME', '') obox.floating_ip_pool = ENV.fetch('OS_FLOATING_IP_POOL', '') obox.floating_ip_pool_always_allocate = (ENV['OS_FLOATING_IP_ALWAYS_ALLOCATE'] == 'true') obox.image = ENV.fetch('OS_IMAGE', '') obox.security_groups = [ENV.fetch('OS_SEC_GROUP', '')] obox.networks = ENV.fetch('OS_NETWORK', '') obox.flavor = node[:flavor] obox.server_name = node[:name] end # Set Box type nodeconfig.vm.box = box[provider] # Set Node name nodeconfig.vm.hostname = node[:name] # Set Sync Folder nodeconfig.vm.synced_folder ".", "/vagrant", disabled: true nodeconfig.vm.synced_folder './opt', '/opt/', create: true nodeconfig.vm.synced_folder './lib', '/var/onap/', create: true if !is_windows nodeconfig.vm.synced_folder '~/.m2', '/root/.m2/', create: true end # Set Network nodeconfig.vm.network :private_network, :adapter => 2, :name => net_interface, :ip => node[:ips][0] nodeconfig.vm.network :private_network, :adapter => 3, :ip => node[:ips][1], :type => :static # Specific settings: # Set Storage (For SDC or All-in-one) if node[:name].include?("all-in-one") || node[:name].include?("sdc") nodeconfig.vm.provider "virtualbox" do |v| unless File.exist?(sdc_volume) v.customize ['createhd', '--filename', sdc_volume, '--size', 20 * 1024] end v.customize ['storageattach', :id, '--storagectl', 'SATAController', '--port', 1, '--device', 0, '--type', 'hdd', '--medium', sdc_volume] end nodeconfig.vm.provider "libvirt" do |v| v.storage :file, path: sdc_volume, bus: 'sata', device: 'vdb', size: '2G' end end if node[:name].include? "testing" nodeconfig.vm.synced_folder './tests', '/var/onap_tests/', create: true test_suite = ENV.fetch('TEST_SUITE', '*') test_case = ENV.fetch('TEST_CASE', '*') # Override variables run_path = 'vagrant_utils/unit_testing.sh' node[:args] = [test_suite, test_case] else configuration['skip_get_images'] = ENV.fetch('SKIP_GET_IMAGES', configuration['skip_get_images']) configuration['skip_install'] = ENV.fetch('SKIP_INSTALL', configuration['skip_install']) end if node[:name].include? "vfc" nodeconfig.vm.provision 'docker' end nodeconfig.vm.provision 'shell' do |s| s.path = run_path s.args = node[:args] s.env = configuration end end #nodeconfig end #node end #config