summaryrefslogtreecommitdiffstats
path: root/vio
diff options
context:
space:
mode:
authorEthan Lynn <ethanlynnl@vmware.com>2019-03-25 16:31:59 +0800
committerEthan Lynn <ethanlynnl@vmware.com>2019-04-04 18:54:53 +0800
commit7a02c59adf4fdf4fe79ad34c248679850311a784 (patch)
tree90f31ee6c261629a2c8ff4dafac6cd579ecf0093 /vio
parent04ad05f0302fa1e059b3c3ab3470759dc333e33e (diff)
Add more functions to vsphere plugin
Add esxi ability to exec shell command. Add ovf upload function. Add vmdk validation function. Change-Id: I41e658e8d35f9ec2ae8c9238954b705997fa1b3b Issue-ID: MULTICLOUD-488 Signed-off-by: Ethan Lynn <ethanlynnl@vmware.com>
Diffstat (limited to 'vio')
-rw-r--r--vio/requirements.txt1
-rw-r--r--vio/vio/vsphere/ovf.py173
-rw-r--r--vio/vio/vsphere/templates/__init__.py0
-rw-r--r--vio/vio/vsphere/templates/template.ovf82
-rw-r--r--vio/vio/vsphere/utils.py57
-rw-r--r--vio/vio/vsphere/vc.py312
6 files changed, 597 insertions, 28 deletions
diff --git a/vio/requirements.txt b/vio/requirements.txt
index e7f743c..f218ec5 100644
--- a/vio/requirements.txt
+++ b/vio/requirements.txt
@@ -36,3 +36,4 @@ pyvmomi
pyvim
fire
requests
+paramiko
diff --git a/vio/vio/vsphere/ovf.py b/vio/vio/vsphere/ovf.py
new file mode 100644
index 0000000..d37db24
--- /dev/null
+++ b/vio/vio/vsphere/ovf.py
@@ -0,0 +1,173 @@
+# Copyright (c) 2019 VMware, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+
+from os import path
+# from sys import exit
+from threading import Thread
+from time import sleep
+# from argparse import ArgumentParser
+# from getpass import getpass
+import requests
+
+# from pyVim import connect
+from pyVmomi import vim
+from pyVmomi.VmomiSupport import long
+
+from vio.vsphere import utils
+
+
+def get_ovf_descriptor(ovf_path):
+ """
+ Read in the OVF descriptor.
+ """
+ if path.exists(ovf_path):
+ with open(ovf_path, 'r') as f:
+ try:
+ ovfd = f.read()
+ f.close()
+ return ovfd
+ except Exception as ex:
+ raise Exception("Could not read file %s: %s" % (
+ ovf_path, str(ex)))
+
+
+def get_obj_in_list(obj_name, obj_list):
+ """
+ Gets an object out of a list (obj_list) whos name matches obj_name.
+ """
+ for o in obj_list:
+ if o.name == obj_name:
+ return o
+ raise Exception("Unable to find object by the name of %s in list:%s" %
+ (o.name, map(lambda o: o.name, obj_list)))
+
+
+def get_objects(si, datacenter_name=None, datastore_name=None,
+ cluster_name=None):
+ """
+ Return a dict containing the necessary objects for deployment.
+ """
+ # Get datacenter object.
+ datacenter_list = si.content.rootFolder.childEntity
+ if datacenter_name:
+ datacenter_obj = get_obj_in_list(datacenter_name, datacenter_list)
+ else:
+ datacenter_obj = datacenter_list[0]
+
+ # Get datastore object.
+ datastore_list = datacenter_obj.datastoreFolder.childEntity
+ if datastore_name:
+ datastore_obj = get_obj_in_list(datastore_name, datastore_list)
+ elif len(datastore_list) > 0:
+ datastore_obj = datastore_list[0]
+ else:
+ print("No datastores found in DC (%s)." % datacenter_obj.name)
+
+ # Get cluster object.
+ cluster_list = datacenter_obj.hostFolder.childEntity
+ if cluster_name:
+ cluster_obj = get_obj_in_list(cluster_name, cluster_list)
+ elif len(cluster_list) > 0:
+ cluster_obj = cluster_list[0]
+ else:
+ print("No clusters found in DC (%s)." % datacenter_obj.name)
+
+ # Generate resource pool.
+ resource_pool_obj = cluster_obj.resourcePool
+
+ return {"datacenter": datacenter_obj,
+ "datastore": datastore_obj,
+ "resource pool": resource_pool_obj}
+
+
+def keep_lease_alive(lease):
+ """
+ Keeps the lease alive while POSTing the VMDK.
+ """
+ while(True):
+ sleep(5)
+ try:
+ # Choosing arbitrary percentage to keep the lease alive.
+ lease.HttpNfcLeaseProgress(50)
+ if (lease.state == vim.HttpNfcLease.State.done):
+ return
+ # If the lease is released, we get an exception.
+ # Returning to kill the thread.
+ except Exception:
+ return
+
+
+def deploy_ovf(si, vmdk_path, ovf_path, datacenter, cluster, datastore):
+ default_ovf = False
+ if ovf_path is None:
+ default_ovf = True
+ cpath = path.dirname(path.realpath(__file__))
+ ovf_path = cpath + "/templates/template.ovf"
+ # import ipdb; ipdb.set_trace()
+ vmdk_meta = utils.vmdk_metadata(vmdk_path)
+ # vmdk_size = path.getsize(vmdk_path)
+ ovfd = get_ovf_descriptor(ovf_path)
+ objs = get_objects(si, datacenter, datastore, cluster)
+ manager = si.content.ovfManager
+ spec_params = vim.OvfManager.CreateImportSpecParams()
+ print("Creating import ovf spec")
+ import_spec = manager.CreateImportSpec(ovfd,
+ objs["resource pool"],
+ objs["datastore"],
+ spec_params)
+ if default_ovf:
+ import_spec.importSpec.configSpec.deviceChange[
+ 1].device.capacityInKB = long(vmdk_meta['size'])
+ lease = objs["resource pool"].ImportVApp(import_spec.importSpec,
+ objs["datacenter"].vmFolder)
+ while(True):
+ if (lease.state == vim.HttpNfcLease.State.ready):
+ # Assuming single VMDK.
+ # url = lease.info.deviceUrl[0].url.replace('*', host)
+ url = lease.info.deviceUrl[0].url
+ # Spawn a dawmon thread to keep the lease active while POSTing
+ # VMDK.
+ keepalive_thread = Thread(target=keep_lease_alive, args=(lease,))
+ keepalive_thread.start()
+ print("Uploading %s to %s" % (vmdk_path, url))
+ # POST the VMDK to the host via curl. Requests library would work
+ # too.
+ # curl_cmd = (
+ # "curl -Ss -X POST --insecure -T %s -H 'Content-Type: \
+ # application/x-vnd.vmware-streamVmdk' %s" %
+ # (vmdk_path, url))
+ # system(curl_cmd)
+ headers = {'Content-Type': 'application/x-vnd.vmware-streamVmdk'}
+ client_cookie = si._stub.cookie
+ cookie_name = client_cookie.split("=", 1)[0]
+ cookie_value = client_cookie.split("=", 1)[1].split(";", 1)[0]
+ cookie_path = client_cookie.split("=", 1)[1].split(
+ ";", 1)[1].split(";", 1)[0].lstrip()
+ cookie_text = " " + cookie_value + "; $" + cookie_path
+ # Make a cookie
+ cookie = dict()
+ cookie[cookie_name] = cookie_text
+ with open(vmdk_path, "rb") as f:
+ resp = requests.post(url,
+ # params=params,
+ data=f,
+ # files={"file": f},
+ headers=headers,
+ cookies=cookie,
+ verify=False)
+ print("Upload results %s: %s" % (
+ resp.status_code, resp.content))
+ lease.HttpNfcLeaseComplete()
+ keepalive_thread.join()
+ return
+ elif (lease.state == vim.HttpNfcLease.State.error):
+ raise Exception("Lease error: " + lease.error.msg)
diff --git a/vio/vio/vsphere/templates/__init__.py b/vio/vio/vsphere/templates/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/vio/vio/vsphere/templates/__init__.py
diff --git a/vio/vio/vsphere/templates/template.ovf b/vio/vio/vsphere/templates/template.ovf
new file mode 100644
index 0000000..caa86ea
--- /dev/null
+++ b/vio/vio/vsphere/templates/template.ovf
@@ -0,0 +1,82 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<Envelope xmlns="http://schemas.dmtf.org/ovf/envelope/1" xmlns:ovf="http://schemas.dmtf.org/ovf/envelope/1" xmlns:vmw="http://www.vmware.com/schema/ovf" xmlns:rasd="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData" xmlns:vssd="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData">
+ <References>
+ <File ovf:id="file1" ovf:href="vmdisk1.vmdk"/>
+ </References>
+ <DiskSection>
+ <Info>List of the virtual disks</Info>
+ <Disk ovf:capacityAllocationUnits="byte" ovf:format="http://www.vmware.com/interfaces/specifications/vmdk.html#streamOptimized" ovf:diskId="vmdisk1" ovf:capacity="171798691840" ovf:fileRef="file1"/>
+ </DiskSection>
+ <VirtualSystem ovf:id="vmtest-template">
+ <Info>A Virtual system</Info>
+ <Name>vmtest-template</Name>
+ <OperatingSystemSection ovf:id="94" vmw:osType="ubuntu64Guest">
+ <Info>The operating system installed</Info>
+ <Description>Ubuntu Linux (64-bit)</Description>
+ </OperatingSystemSection>
+ <VirtualHardwareSection>
+ <Info>Virtual hardware requirements</Info>
+ <System>
+ <vssd:ElementName>Virtual Hardware Family</vssd:ElementName>
+ <vssd:InstanceID>0</vssd:InstanceID>
+ <vssd:VirtualSystemType>vmx-11</vssd:VirtualSystemType>
+ </System>
+ <Item>
+ <rasd:AllocationUnits>hertz * 10^6</rasd:AllocationUnits>
+ <rasd:Description>Number of Virtual CPUs</rasd:Description>
+ <rasd:ElementName>1 virtual CPU(s)</rasd:ElementName>
+ <rasd:InstanceID>1</rasd:InstanceID>
+ <rasd:ResourceType>3</rasd:ResourceType>
+ <rasd:VirtualQuantity>1</rasd:VirtualQuantity>
+ <vmw:CoresPerSocket ovf:required="false">1</vmw:CoresPerSocket>
+ </Item>
+ <Item>
+ <rasd:AllocationUnits>byte * 2^20</rasd:AllocationUnits>
+ <rasd:Description>Memory Size</rasd:Description>
+ <rasd:ElementName>2048MB of memory</rasd:ElementName>
+ <rasd:InstanceID>2</rasd:InstanceID>
+ <rasd:ResourceType>4</rasd:ResourceType>
+ <rasd:VirtualQuantity>2048</rasd:VirtualQuantity>
+ </Item>
+ <Item>
+ <rasd:Address>0</rasd:Address>
+ <rasd:Description>SCSI Controller</rasd:Description>
+ <rasd:ElementName>SCSI Controller 1</rasd:ElementName>
+ <rasd:InstanceID>3</rasd:InstanceID>
+ <rasd:ResourceSubType>lsilogic</rasd:ResourceSubType>
+ <rasd:ResourceType>6</rasd:ResourceType>
+ <vmw:Config ovf:required="false" vmw:key="slotInfo.pciSlotNumber" vmw:value="16"/>
+ </Item>
+ <Item>
+ <rasd:AddressOnParent>0</rasd:AddressOnParent>
+ <rasd:ElementName>Hard Disk 1</rasd:ElementName>
+ <rasd:HostResource>ovf:/disk/vmdisk1</rasd:HostResource>
+ <rasd:InstanceID>4</rasd:InstanceID>
+ <rasd:Parent>3</rasd:Parent>
+ <rasd:ResourceType>17</rasd:ResourceType>
+ </Item>
+ <Item ovf:required="false">
+ <rasd:ElementName>Video card</rasd:ElementName>
+ <rasd:InstanceID>5</rasd:InstanceID>
+ <rasd:ResourceType>24</rasd:ResourceType>
+ <vmw:Config ovf:required="false" vmw:key="numDisplays" vmw:value="1"/>
+ <vmw:Config ovf:required="false" vmw:key="enable3DSupport" vmw:value="false"/>
+ <vmw:Config ovf:required="false" vmw:key="useAutoDetect" vmw:value="true"/>
+ <vmw:Config ovf:required="false" vmw:key="videoRamSizeInKB" vmw:value="4096"/>
+ <vmw:Config ovf:required="false" vmw:key="graphicsMemorySizeInKB" vmw:value="262144"/>
+ <vmw:Config ovf:required="false" vmw:key="use3dRenderer" vmw:value="automatic"/>
+ </Item>
+ <vmw:Config ovf:required="false" vmw:key="flags.vbsEnabled" vmw:value="false"/>
+ <vmw:Config ovf:required="false" vmw:key="cpuHotAddEnabled" vmw:value="false"/>
+ <vmw:Config ovf:required="false" vmw:key="nestedHVEnabled" vmw:value="false"/>
+ <vmw:Config ovf:required="false" vmw:key="virtualSMCPresent" vmw:value="false"/>
+ <vmw:Config ovf:required="false" vmw:key="flags.vvtdEnabled" vmw:value="false"/>
+ <vmw:Config ovf:required="false" vmw:key="cpuHotRemoveEnabled" vmw:value="false"/>
+ <vmw:Config ovf:required="false" vmw:key="memoryHotAddEnabled" vmw:value="false"/>
+ <vmw:Config ovf:required="false" vmw:key="bootOptions.efiSecureBootEnabled" vmw:value="false"/>
+ <vmw:Config ovf:required="false" vmw:key="firmware" vmw:value="bios"/>
+ <vmw:Config ovf:required="false" vmw:key="virtualICH7MPresent" vmw:value="false"/>
+ <vmw:ExtraConfig ovf:required="false" vmw:key="svga.autodetect" vmw:value="TRUE"/>
+ </VirtualHardwareSection>
+ </VirtualSystem>
+</Envelope> \ No newline at end of file
diff --git a/vio/vio/vsphere/utils.py b/vio/vio/vsphere/utils.py
index 8d2a648..77bcccd 100644
--- a/vio/vio/vsphere/utils.py
+++ b/vio/vio/vsphere/utils.py
@@ -1,3 +1,15 @@
+# Copyright (c) 2019 VMware, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+
from pyVim import connect
from pyVmomi import vim
@@ -6,6 +18,7 @@ import os
import yaml
vcontent = None
+service_instance = None
def get_obj(content, vimtype, name):
@@ -54,6 +67,9 @@ def wait_for_task(task):
def GetClient():
global vcontent
+ # if vcontent is not None:
+ # return vcontent
+ global service_instance
if vcontent is not None:
return vcontent
vsphere_conf_path = os.getenv("VSPHERE_CONF", "/opt/etc/vsphere.yml")
@@ -92,3 +108,44 @@ def CloneVM(src, dst, power_on=False, wait=True):
def DeployOVA(src, datacenter, resource_pool, datastore):
pass
+
+
+def vmdk_metadata(vmdk_path):
+ ret = {}
+ with open(vmdk_path, "rb") as f:
+ # import ipdb; ipdb.set_trace()
+ for i in range(30):
+ try:
+ line = f.readline()
+ if not line:
+ break
+ text = line.decode()
+ text = text.strip("\n")
+ for k in ["version", "CID", "parentCID"]:
+ if text.startswith(k):
+ ret[k] = text.split("=")[-1]
+ if text.startswith("ddb.adapterType"):
+ ret["adapterType"] = text.split('"')[1]
+ elif text.startswith("createType"):
+ ret["createType"] = text.split('"')[1]
+ elif text.startswith("ddb.virtualHWVersion"):
+ ret["virtualHWVersion"] = text.split('"')[1]
+ elif text.startswith("ddb.thinProvisioned"):
+ ret["thinProvisioned"] = text.split('"')[1]
+ elif text.startswith("ddb.deletable"):
+ ret["deletable"] = text.split('"')[1]
+ elif text.startswith("ddb.longContentID"):
+ ret["longContentID"] = text.split('"')[1]
+ elif text.startswith("R"):
+ splits = text.split(" ")
+ ret["rwMode"] = splits[0]
+ ret["size"] = splits[1]
+ ret["diskType"] = splits[2]
+ # print(text)
+ # print(ret)
+ except UnicodeDecodeError:
+ continue
+ except Exception as ex:
+ print(i, str(ex))
+ # import ipdb; ipdb.set_trace()
+ return ret
diff --git a/vio/vio/vsphere/vc.py b/vio/vio/vsphere/vc.py
index 7cc6859..deb09a0 100644
--- a/vio/vio/vsphere/vc.py
+++ b/vio/vio/vsphere/vc.py
@@ -1,12 +1,96 @@
-from vmtest import utils
+# Copyright (c) 2019 VMware, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+
+from vio.vsphere import utils
+from vio.vsphere import ovf
from pyVmomi import vim
-from pyVmomi.vim.vm.device import VirtualEthernetCard
+# from pyVmomi.vim.vm.device import VirtualEthernetCard
+from pyVim import connect
+import paramiko
+
+import os
+# import yaml
+import requests
+
+
+def new_vc():
+ host = os.getenv("VSPHERE_HOST")
+ username = os.getenv("VSPHERE_USER")
+ password = os.getenv("VSPHERE_PASS")
+ nvc = VCenter(host, username, password)
+ return nvc
+
+
+def new_esxi():
+ host = os.getenv("ESXI_HOST")
+ username = os.getenv("ESXI_USER")
+ password = os.getenv("ESXI_PASS")
+ nesxi = Esxi(host, username, password)
+ return nesxi
+
+
+class Esxi(object):
+ def __init__(self, host=None, username=None, password=None):
+ if not (host and username and password):
+ raise Exception("host or username or password not specified")
+ self.host = host
+ self.username = username
+ self.password = password
+ self.ssh = self.get_ssh_session()
+
+ def get_ssh_session(self):
+ ssh = paramiko.SSHClient()
+ ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
+ ssh.connect(self.host, username=self.username, password=self.password)
+ return ssh
+
+ def exec_cmd(self, cmd):
+ ssh_stdin, ssh_stdout, ssh_stderr = self.ssh.exec_command(cmd)
+ # import ipdb; ipdb.set_trace()
+ return {
+ "stdin": ssh_stdin.read() if ssh_stdin.readable() else "",
+ "stdout": ssh_stdout.read() if ssh_stdout.readable() else "",
+ "stderr": ssh_stderr.read() if ssh_stderr.readable() else "",
+ }
+
+ def flat_vmdk(self, datastore, vmdk):
+ target_vmdk = vmdk.rstrip(".vmdk") + "-new.vmdk"
+ print("Extending %s to %s" % (vmdk, target_vmdk))
+ vmdk_path = "/vmfs/volumes/%s/%s" % (datastore, vmdk)
+ target_path = "/vmfs/volumes/%s/%s" % (datastore, target_vmdk)
+ cmd = "vmkfstools -i %s %s" % (vmdk_path, target_path)
+ output = self.exec_cmd(cmd)
+ if output["stderr"]:
+ raise Exception(output["stderr"])
+ output["target_vmdk"] = target_vmdk
+ return output
class VCenter(object):
- def __init__(self):
- self.vcontent = utils.GetClient()
+ def __init__(self, host=None, username=None, password=None):
+ if not (host and username and password):
+ raise Exception("host or username or password not specified")
+ self.host = host
+ self.username = username
+ self.password = password
+ self.datacenter = ""
+ self.cluster = ""
+ self.datastore = ""
+ self.insecure = True
+ self.service_instance = connect.SmartConnectNoSSL(
+ host=self.host, user=self.username, pwd=self.password)
+ self.vcontent = self.service_instance.RetrieveContent()
+ # self.GetClient()
def clone_vm(self, src, dst, power_on=False, wait=True):
pass
@@ -22,26 +106,186 @@ class VCenter(object):
# print("there was an error")
# task_done = True
+ def find_datastore(self, ds_name):
+ datacenters_object_view = \
+ self.vcontent.viewManager.CreateContainerView(
+ self.vcontent.rootFolder,
+ [vim.Datacenter],
+ True)
+ datacenter = None
+ datastore = None
+ for dc in datacenters_object_view.view:
+ datastores_object_view = \
+ self.vcontent.viewManager.CreateContainerView(
+ dc,
+ [vim.Datastore],
+ True)
+ for ds in datastores_object_view.view:
+ if ds.info.name == ds_name:
+ datacenter = dc
+ datastore = ds
+ return datacenter, datastore
+ return datacenter, datastore
+
+ def upload_file(self, filepath, datastore, folder="onap-test"):
+ if not os.path.exists(filepath):
+ raise Exception("%s not exists" % filepath)
+ print("Getting datastore %s" % datastore)
+ dc, ds = self.find_datastore(datastore)
+ files = [filepath, filepath.rstrip(".vmdk")+"-flat.vmdk"]
+ upload_count = 0
+ for fp in files:
+ if not os.path.exists(fp):
+ continue
+ upload_count += 1
+ file_name = fp.split("/")[-1]
+ remote_file = "/" + folder + "/" + file_name
+ resource = "/folder" + remote_file
+ params = {"dsName": ds.info.name,
+ "dcPath": dc.name}
+ http_url = "https://" + self.host + ":443" + resource
+ # print(http_url)
+ # si, vconetnt = self.GetClient()
+ # get cookies
+ client_cookie = self.service_instance._stub.cookie
+ cookie_name = client_cookie.split("=", 1)[0]
+ cookie_value = client_cookie.split("=", 1)[1].split(";", 1)[0]
+ cookie_path = client_cookie.split("=", 1)[1].split(
+ ";", 1)[1].split(";", 1)[0].lstrip()
+ cookie_text = " " + cookie_value + "; $" + cookie_path
+ # Make a cookie
+ cookie = dict()
+ cookie[cookie_name] = cookie_text
+
+ # Get the request headers set up
+ headers = {'Content-Type': 'application/octet-stream'}
+
+ with open(fp, "rb") as f:
+ # Connect and upload the file
+ print("Uploading file %s" % filepath)
+ resp = requests.put(http_url,
+ params=params,
+ data=f,
+ # files={"file": f},
+ headers=headers,
+ cookies=cookie,
+ verify=False)
+ # import ipdb; ipdb.set_trace()
+ if resp.status_code not in [200, 201]:
+ raise Exception("failed to upload %s to %s: %s" % (
+ filepath, datastore, resp.content))
+ print(resp)
+ print("upload success")
+ return upload_count
-class VM(VCenter):
- def __init__(self, name):
- super(VM, self).__init__()
- # self.vcontent = GetClient()
- vm = utils.get_obj(self.vcontent, [vim.VirtualMachine], name)
+ def deploy_ovf(self, vmdk_path, ovf_path=None, datacenter=None,
+ cluster=None, datastore=None):
+ if not datacenter and not self.datacenter:
+ raise Exception("not set datacenter")
+ if not cluster and not self.cluster:
+ raise Exception("not set cluster")
+ if not datastore and not self.datastore:
+ raise Exception("not set datastore")
+ # if not ovf_path:
+ # raise Exception("not set ovf_path")
+ ovf.deploy_ovf(self.service_instance, vmdk_path, ovf_path,
+ datacenter, cluster, datastore)
+ print("Deploy success.")
+
+ def deploy_ova(self, ova_path, datacenter=None, cluster=None,
+ datastore=None):
+ pass
+
+ def validate_image(self, vmdk_path, vm_name):
+ # import ipdb; ipdb.set_trace()
+ print("finding vm %s" % vm_name)
+ vmdk_name = vmdk_path.split("/")[-1]
+ vm = self.find_vm(vm_name)
+ dc = vm.datacenter
+ cluster = vm.cluster
+ ds = vm.status()['datastores'][0]
+ # vmdk_name = filepath.split("/")[-1]
+ print("uploading vmdk file %s" % vmdk_name)
+ ovf.deploy_ovf(self.service_instance, vmdk_path, ovf_path=None,
+ datacenter=dc.name, cluster=cluster.name,
+ datastore=ds.name)
+ tmp_vm = self.find_vm("vmtest-template")
+ print("attaching disk to vm %s" % vm_name)
+ # dsfilepath = "[%s] %s/%s" % (
+ # ds.name, "vmtest-template", "vmdisk1.vmdk")
+ dsfilepath = tmp_vm.disks()[0].backing.fileName
+ print("dsfilepath=%s" % dsfilepath)
+ vm.add_disk(filepath=dsfilepath)
+ print("power on vm %s" % vm_name)
+ ret = vm.power_on()
+ if ret is not None:
+ raise Exception("error to poweron vm: %s", ret)
+ print("power off vm %s" % vm_name)
+ vm.power_off()
+ print("Cleaning")
+ vm.remove_disk("Hard disk 2", retain_file=True)
+ tmp_vm.delete()
+
+ def find_ds(self, ds_name):
+ ds = utils.get_obj(self.vcontent, [vim.Datastore], ds_name)
+ return ds
+
+ def find_dc(self, dc_name):
+ dc = utils.get_obj(self.vcontent, [vim.Datacenter], dc_name)
+ return dc
+
+ # def find_cluster(self, cluster):
+ # cluster = utils.get_obj(self.vcontent, [vim.C], name)
+
+ def find_vm(self, vm_name):
+ return VM(self, vm_name)
+
+
+class VM(object):
+ def __init__(self, vc, name):
+ self.vc = vc
+ vm = utils.get_obj(self.vc.vcontent, [vim.VirtualMachine], name)
if vm is None:
raise Exception("VM %s not found" % name)
self.vm = vm
self.name = name
+ @property
+ def datacenter(self):
+ res = self.vm.resourcePool
+ while True:
+ if res is None:
+ break
+ if str(res).startswith("'vim.Datacenter"):
+ return res
+ res = res.parent
+ return None
+
+ @property
+ def cluster(self):
+ res = self.vm.resourcePool
+ while True:
+ if res is None:
+ break
+ if str(res).startswith("'vim.ClusterComputeResource"):
+ return res
+ res = res.parent
+ return None
+
def status(self):
- return {
+ ret = {
"name": self.vm.name,
- "resource_pool": self.vm.resourcePool.config.entity,
+ "resource_pool": self.vm.resourcePool,
+ "cluster": self.cluster,
+ "datacenter": self.datacenter,
"datastores": [ds for ds in self.vm.datastore],
- "networks": [net for net in self.vm.network],
- "snapshots": [ss for ss in self.vm.snapshots],
"power_state": self.vm.runtime.powerState,
}
+ if self.vm.network:
+ ret["networks"] = [net for net in self.vm.network]
+ if self.vm.snapshot:
+ ret["snapshots"] = [ss for ss in self.vm.snapshot]
+ return ret
@property
def power_state(self):
@@ -53,7 +297,7 @@ class VM(VCenter):
# print("power_on task:", task)
# result = wait_for_task(task)
# print("power_on result:", result)
- result = self.wait_for_task(self.vm.PowerOn())
+ result = self.vc.wait_for_task(self.vm.PowerOn())
return result
def power_off(self):
@@ -62,12 +306,12 @@ class VM(VCenter):
# print("power_off task:", task)
# result = wait_for_task(task)
# print("power_off result:", result)
- result = self.wait_for_task(self.vm.PowerOff())
+ result = self.vc.wait_for_task(self.vm.PowerOff())
return result
def delete(self):
self.power_off()
- result = self.wait_for_task(self.vm.Destroy())
+ result = self.vc.wait_for_task(self.vm.Destroy())
return result
def clone(self, dst, wait=True):
@@ -79,9 +323,9 @@ class VM(VCenter):
print("clone task:", task)
if wait:
print("wait for task:", task)
- result = self.wait_for_task(task)
+ result = self.vc.wait_for_task(task)
print("task result:", result)
- return VM(result.name)
+ return VM(self.vc, result.name)
return task
def add_nic(self, network_name, nic_type="vmxnet3", mac=None):
@@ -102,7 +346,8 @@ class VM(VCenter):
else:
raise Exception("not supported nic type %s" % nic_type)
- network = utils.get_obj(self.vcontent, [vim.Network], network_name)
+ network = utils.get_obj(self.vc.vcontent, [vim.Network], network_name)
+ VirtualEthernetCard = vim.vm.device.VirtualEthernetCard
if isinstance(network, vim.OpaqueNetwork):
nic_spec.device.backing = \
VirtualEthernetCard.OpaqueNetworkBackingInfo()
@@ -140,7 +385,7 @@ class VM(VCenter):
spec.deviceChange = [nic_spec]
task = self.vm.ReconfigVM_Task(spec=spec)
- result = self.wait_for_task(task)
+ result = self.vc.wait_for_task(task)
# result == None
return result
@@ -158,11 +403,12 @@ class VM(VCenter):
virtual_nic_spec.device = dev
spec.deviceChange.append(virtual_nic_spec)
task = self.vm.ReconfigVM_Task(spec=spec)
- result = self.wait_for_task(task)
+ result = self.vc.wait_for_task(task)
# result == None
return result
- def add_disk(self, disk_size, disk_type="thin", wait=True):
+ def add_disk(self, disk_size=0, disk_type="thin", filepath=None,
+ wait=True):
spec = vim.vm.ConfigSpec()
unit_number = 0
for dev in self.vm.config.hardware.device:
@@ -174,29 +420,39 @@ class VM(VCenter):
if isinstance(dev, vim.vm.device.VirtualSCSIController):
controller = dev
dev_changes = []
- new_disk_kb = int(disk_size) * 1024 * 1024
+ if disk_size <= 0 and not filepath:
+ raise Exception("Neither disk_size nor filepath specified")
disk_spec = vim.vm.device.VirtualDeviceSpec()
- disk_spec.fileOperation = "create"
+ if not filepath:
+ disk_spec.fileOperation = "create"
disk_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
disk_spec.device = vim.vm.device.VirtualDisk()
disk_spec.device.backing = \
vim.vm.device.VirtualDisk.FlatVer2BackingInfo()
if disk_type == 'thin':
disk_spec.device.backing.thinProvisioned = True
+ if filepath:
+ disk_spec.device.backing.fileName = filepath
disk_spec.device.backing.diskMode = 'persistent'
disk_spec.device.unitNumber = unit_number
- disk_spec.device.capacityInKB = new_disk_kb
+ if not filepath:
+ new_disk_kb = int(disk_size) * 1024 * 1024
+ disk_spec.device.capacityInKB = new_disk_kb
disk_spec.device.controllerKey = controller.key
dev_changes.append(disk_spec)
spec.deviceChange = dev_changes
task = self.vm.ReconfigVM_Task(spec=spec)
- print("Adding a %sGB disk to vm %s" % (disk_size, self.name))
+ if disk_size:
+ print("Adding a %sGB disk to vm %s" % (disk_size, self.name))
+ else:
+ print("Attaching %s disk to vm %s" % (filepath, self.name))
if wait:
- ret = self.wait_for_task(task)
+ ret = self.vc.wait_for_task(task)
return ret
return task
def remove_disk(self, disk_label, retain_file=False, wait=True):
+ print("Attempt to remove %s from %s" % (disk_label, self.name))
virtual_hdd_device = None
if isinstance(disk_label, int):
if disk_label < 1:
@@ -228,7 +484,7 @@ class VM(VCenter):
spec.deviceChange = [virtual_hdd_spec]
task = self.vm.ReconfigVM_Task(spec=spec)
if wait:
- ret = self.wait_for_task(task)
+ ret = self.vc.wait_for_task(task)
return ret
return task