Skip to content
This repository has been archived by the owner on Nov 9, 2020. It is now read-only.

Commit

Permalink
Using VC UUID for attach/detach where possible (#1526)
Browse files Browse the repository at this point in the history
* Added proper return on protocol version mismatch

* Using VC UUID  for attach/detach where possible

When communcaiton from VMCI vSocket is established, we find out VM ID from
the socket. There are 2 IDs - VC UUID and BIOS UUID. When .vmx file is
copied (e.g. VCD or other product), BIOS UUID can be dulicate. However,
VC UUID is unique on creation of VM, and unique as long as ESXi is a
part of a VC. So it is a much better candidate for using as a unique ID.

Thic change tries to use VC UUID first, and fails back to BIOS UUID
next. The change also checks that the VM name (for a found VM by ID)
matches the one we see from vSocket, and does a few other minor prints.

* Try to use VC UUID first for locating VMs, then fall back to BIOS UUID

When communication from VMCI vSocket is established n vmdk_ops.py, we
find out VM ID from
the socket, and then locate VM ManagedObject by this ID.

There are 2 IDs - VC UUID and BIOS UUID. When .vmx file is
copied (e.g. VCD or other products), BIOS UUID can be duplicate thus
failures like #1373 .

However, VC UUID is unique on creation of VM, and unique as long as ESXi
is a
part of a VC. So it is a much better candidate for using as a unique ID.

This change tries to use VC UUID first, and fails back to BIOS UUID
next. The change also checks that the VM name (for a found VM by ID)
matches the one we see from vSocket, and does a few other minor prints.

* Unbundled detachVMDK and attachVMD to address review comments

* fixed the VM instance ID
  • Loading branch information
Mark Sterin authored and shuklanirdesh82 committed Jun 30, 2017
1 parent 95ce352 commit b93c186
Showing 1 changed file with 67 additions and 40 deletions.
107 changes: 67 additions & 40 deletions esx_service/vmdk_ops.py
Original file line number Diff line number Diff line change
Expand Up @@ -164,8 +164,8 @@ def RunCommand(cmd):
# for now we care about size and (maybe) policy
def createVMDK(vmdk_path, vm_name, vol_name,
opts={}, vm_uuid=None, tenant_uuid=None, datastore_url=None):
logging.info("*** createVMDK: %s opts = %s vm_name=%s vm_uuid=%s tenant_uuid=%s datastore_url=%s",
vmdk_path, opts, vm_name, vm_uuid, tenant_uuid, datastore_url)
logging.info("*** createVMDK: %s opts=%s vm_name=%s vm_uuid=%s tenant_uuid=%s datastore_url=%s",
vmdk_path, opts, vm_name, vm_uuid, tenant_uuid, datastore_url)

if os.path.isfile(vmdk_path):
# We are mostly here due to race or Plugin VMCI retry #1076
Expand Down Expand Up @@ -244,7 +244,8 @@ def createVMDK(vmdk_path, vm_name, vol_name,


def cloneVMDK(vm_name, vmdk_path, opts={}, vm_uuid=None, datastore_url=None):
logging.info("*** cloneVMDK: %s opts = %s vm_uuid=%s datastore_url=%s", vmdk_path, opts, vm_uuid, datastore_url)
logging.info("*** cloneVMDK: %s opts = %s vm_uuid=%s datastore_url=%s",
vmdk_path, opts, vm_uuid, datastore_url)

# Get source volume path for cloning
error_info, tenant_uuid, tenant_name = auth.get_tenant(vm_uuid)
Expand Down Expand Up @@ -656,7 +657,8 @@ def listVMDK(tenant):
for x in vmdks]


# Return VM managed object, reconnect if needed. Throws if fails twice.
# Return VM managed object, reconnect if needed. Throws if connection fails twice.
# returns None if the uuid is not found
def findVmByUuid(vm_uuid):
si = get_si()
vm = si.content.searchIndex.FindByUuid(None, vm_uuid, True, False)
Expand All @@ -668,20 +670,36 @@ def vm_uuid2name(vm_uuid):
return None
return vm.config.name

# Return error, or None for OK.
def attachVMDK(vmdk_path, vm_uuid):
vm = findVmByUuid(vm_uuid)
logging.info("*** attachVMDK: %s to %s VM uuid = %s",
vmdk_path, vm.config.name, vm_uuid)
return disk_attach(vmdk_path, vm)

def attachVMDK(vmdk_path, vm_name, bios_uuid, vc_uuid):
return apply_action_VMDK(disk_attach, vmdk_path, vm_name, bios_uuid, vc_uuid)

def detachVMDK(vmdk_path, vm_name, bios_uuid, vc_uuid):
return apply_action_VMDK(disk_detach, vmdk_path, vm_name, bios_uuid, vc_uuid)

def apply_action_VMDK(action, vmdk_path, vm_name, bios_uuid, vc_uuid):
# note: vc_uuid is the last one to avoid reworkign tests which use positional args and
# not aware of vc_uuid
"""Finds the VM and applies action(path,vm_MO) to it.
Returns json reply from action to pass upstairs, or json with 'err'"""

logging.info("*** %s: VMDK %s to VM '%s' , bios uuid = %s, VC uuid=%s)",
action.__name__, vmdk_path, vm_name, bios_uuid, vc_uuid)
vm = None
if vc_uuid:
vm = findVmByUuid(vc_uuid)
if not vm: # either vc_uuid is not even passed, or we failed to find the VM by VC uuid:
logging.warning("Failed to find VM by VC UUID %s, trying BIOS UUID %s", vc_uuid, bios_uuid)
vm = findVmByUuid(bios_uuid)
if not vm: # can find VM by VC or BIOS uuid
msg = "Failed to find VM object for %s (bios %s vc %s)" % (vm_name, bios_uuid, vc_uuid)
logging.error(msg)
return err(msg)

# Return error, or None for OK.
def detachVMDK(vmdk_path, vm_uuid):
vm = findVmByUuid(vm_uuid)
logging.info("*** detachVMDK: %s from %s VM uuid = %s",
vmdk_path, vm.config.name, vm_uuid)
return disk_detach(vmdk_path, vm)
if vm.config.name != vm_name:
logging.warning("vm_name from vSocket '%s' does not match VM object '%s' ", vm_name, vm.config.name)

return action(vmdk_path, vm)


# Check existence (and creates if needed) the path for docker volume VMDKs
Expand Down Expand Up @@ -831,7 +849,7 @@ def authorize_check(vm_uuid, datastore_url, cmd, opts, use_default_ds, datastore


# gets the requests, calculates path for volumes, and calls the relevant handler
def executeRequest(vm_uuid, vm_name, config_path, cmd, full_vol_name, opts):
def executeRequest(vm_uuid, vm_name, config_path, cmd, full_vol_name, opts, vc_uuid=None):
"""
Executes a <cmd> request issused from a VM.
The request is about volume <full_volume_name> in format volume@datastore.
Expand Down Expand Up @@ -871,8 +889,8 @@ def executeRequest(vm_uuid, vm_name, config_path, cmd, full_vol_name, opts):
# default_datastore could be a real datastore name or a hard coded one "_VM_DS"
default_datastore = get_datastore_name(default_datastore_url)

logging.debug("executeRequest: vm uuid=%s name=%s, tenant_name=%s, default_datastore=%s",
vm_uuid, vm_name, tenant_name, default_datastore)
logging.debug("executeRequest: vm uuid=%s VC uuid=%s name=%s, tenant_name=%s, default_datastore=%s",
vm_uuid, vc_uuid, vm_name, tenant_name, default_datastore)

if cmd == "list":
threadutils.set_thread_name("{0}-nolock-{1}".format(vm_name, cmd))
Expand Down Expand Up @@ -902,7 +920,8 @@ def executeRequest(vm_uuid, vm_name, config_path, cmd, full_vol_name, opts):
"default_datastore_url=%s datastore_url=%s",
vm_uuid, vm_name, tenant_uuid, tenant_name, default_datastore_url, datastore_url)

error_info = authorize_check(vm_uuid, datastore_url, cmd, opts, use_default_ds, datastore, vm_datastore)
error_info = authorize_check(vm_uuid, datastore_url, cmd, opts, use_default_ds, datastore,
vm_datastore)
if error_info:
return err(error_info)

Expand Down Expand Up @@ -952,10 +971,12 @@ def executeRequest(vm_uuid, vm_name, config_path, cmd, full_vol_name, opts):
# For attach/detach reconfigure tasks, hold a per vm lock.
elif cmd == "attach":
with lockManager.get_lock(vm_uuid):
response = attachVMDK(vmdk_path, vm_uuid)
response = attachVMDK(vmdk_path=vmdk_path, vm_name=vm_name,
bios_uuid=vm_uuid, vc_uuid=vc_uuid)
elif cmd == "detach":
with lockManager.get_lock(vm_uuid):
response = detachVMDK(vmdk_path, vm_uuid)
response = detachVMDK(vmdk_path=vmdk_path, vm_name=vm_name,
bios_uuid=vm_uuid, vc_uuid=vc_uuid)
else:
return err("Unknown command:" + cmd)

Expand All @@ -973,7 +994,7 @@ def connectLocalSi():

# Connect to local server as user "dcui" since this is the Admin that does not lose its
# Admin permissions even when the host is in lockdown mode. User "dcui" does not have a
# password - it is used by the ESXi local application DCUI (Direct Console User Interface)
# password - it is used by local application DCUI (Direct Console User Interface)
# Version must be set to access newer features, such as VSAN.
_service_instance = pyVim.connect.Connect(
host='localhost',
Expand Down Expand Up @@ -1100,7 +1121,7 @@ def setStatusAttached(vmdk_path, vm, vm_dev_info=None):
if not vol_meta:
vol_meta = {}
vol_meta[kv.STATUS] = kv.ATTACHED
vol_meta[kv.ATTACHED_VM_UUID] = vm.config.uuid
vol_meta[kv.ATTACHED_VM_UUID] = vm.config.instanceUuid
vol_meta[kv.ATTACHED_VM_NAME] = vm.config.name
if vm_dev_info:
vol_meta[kv.ATTACHED_VM_DEV] = vm_dev_info
Expand Down Expand Up @@ -1155,19 +1176,19 @@ def getStatusAttached(vmdk_path):
return attached, uuid, attach_as, vm_name

def log_attached_volume(vmdk_path, kv_uuid, vol_name):
'''
Log appropriate message for volume thats already attached.
'''
cur_vm = findVmByUuid(kv_uuid)

if cur_vm:
msg = "Disk {0} is already attached to VM {1}".format(vmdk_path,
cur_vm.config.name)
else:
msg = "Failed to find VM {0}({1}), disk {2} is already attached".format(vol_name,
kv_uuid,
vmdk_path)
logging.warning(msg)
'''
Log appropriate message for volume thats already attached.
'''
cur_vm = findVmByUuid(kv_uuid)

if cur_vm:
msg = "Disk {0} is already attached to VM {1}".format(vmdk_path,
cur_vm.config.name)
else:
msg = "Failed to find VM {0}({1}), disk {2} is already attached".format(vol_name,
kv_uuid,
vmdk_path)
logging.warning(msg)

def add_pvscsi_controller(vm, controllers, max_scsi_controllers, offset_from_bus_number):
'''
Expand Down Expand Up @@ -1535,11 +1556,13 @@ def execRequestThread(client_socket, cartel, request):
group_info = vsi.get("/vm/%s/vmmGroupInfo" % vmm_leader)
vm_name = group_info["displayName"]
cfg_path = group_info["cfgPath"]
uuid = group_info["uuid"]
uuid = group_info["uuid"] # BIOS UUID, see http://www.virtu-al.net/2015/12/04/a-quick-reference-of-vsphere-ids/
vc_uuid = group_info["vcUuid"] # VC UUID
# pyVmomi expects uuid like this one: 564dac12-b1a0-f735-0df3-bceb00b30340
# to get it from uuid in VSI vms/<id>/vmmGroup, we use the following format:
UUID_FORMAT = "{0}{1}{2}{3}-{4}{5}-{6}{7}-{8}{9}-{10}{11}{12}{13}{14}{15}"
vm_uuid = UUID_FORMAT.format(*uuid.replace("-", " ").split())
vc_uuid = UUID_FORMAT.format(*vc_uuid.replace("-", " ").split())

try:
req = json.loads(request.decode('utf-8'))
Expand All @@ -1551,7 +1574,7 @@ def execRequestThread(client_socket, cartel, request):
# If req from client does not include version number, set the version to
# SERVER_PROTOCOL_VERSION by default to make backward compatible
client_protocol_version = int(req["version"]) if "version" in req else SERVER_PROTOCOL_VERSION
logging.debug("execRequestThread: version=%d", client_protocol_version)
logging.debug("execRequestThread: client protocol version=%d", client_protocol_version)
if client_protocol_version != SERVER_PROTOCOL_VERSION:
if client_protocol_version < SERVER_PROTOCOL_VERSION:
reply_string = err("vSphere Docker Volume Service client version ({}) is older than server version ({}), "
Expand All @@ -1560,9 +1583,13 @@ def execRequestThread(client_socket, cartel, request):
reply_string = err("vSphere Docker Volume Service client version ({}) is newer than server version ({}), "
"please update the server.".format(client_protocol_version, SERVER_PROTOCOL_VERSION))
send_vmci_reply(client_socket, reply_string)
logging.warning("executeRequest '%s' failed: %s", req["cmd"], reply_string)
return

opts = req["details"]["Opts"] if "Opts" in req["details"] else {}
reply_string = executeRequest(vm_uuid=vm_uuid,
reply_string = executeRequest(
vm_uuid=vm_uuid,
vc_uuid=vc_uuid,
vm_name=vm_name,
config_path=cfg_path,
cmd=req["cmd"],
Expand Down

0 comments on commit b93c186

Please # to comment.