libvirt/src/vmware/vmware_driver.c

1203 lines
32 KiB
C
Raw Normal View History

/*---------------------------------------------------------------------------*/
/*
* Copyright (C) 2011-2015 Red Hat, Inc.
* Copyright 2010, diateam (www.diateam.net)
* Copyright (C) 2013. Doug Goldstein <cardoe@cardoe.com>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library. If not, see
* <http://www.gnu.org/licenses/>.
*/
/*---------------------------------------------------------------------------*/
#include <config.h>
#include <fcntl.h>
#include "internal.h"
#include "virerror.h"
#include "datatypes.h"
#include "virfile.h"
2012-12-12 18:06:53 +00:00
#include "viralloc.h"
2012-12-13 18:01:25 +00:00
#include "viruuid.h"
#include "vircommand.h"
#include "vmx.h"
#include "vmware_conf.h"
#include "vmware_driver.h"
#include "virstring.h"
/* Various places we may find the "vmrun" binary,
* without a leading / it will be searched in PATH
*/
static const char * const vmrun_candidates[] = {
"vmrun",
#ifdef __APPLE__
"/Applications/VMware Fusion.app/Contents/Library/vmrun",
"/Library/Application Support/VMware Fusion/vmrun",
#endif /* __APPLE__ */
};
static void
vmwareDriverLock(struct vmware_driver *driver)
{
virMutexLock(&driver->lock);
}
static void
vmwareDriverUnlock(struct vmware_driver *driver)
{
virMutexUnlock(&driver->lock);
}
static virDomainObjPtr
vmwareDomObjFromDomainLocked(struct vmware_driver *driver,
const unsigned char *uuid)
{
virDomainObjPtr vm;
char uuidstr[VIR_UUID_STRING_BUFLEN];
if (!(vm = virDomainObjListFindByUUID(driver->domains, uuid))) {
virUUIDFormat(uuid, uuidstr);
virReportError(VIR_ERR_NO_DOMAIN,
_("no domain with matching uuid '%s'"), uuidstr);
return NULL;
}
return vm;
}
static virDomainObjPtr
vmwareDomObjFromDomain(struct vmware_driver *driver,
const unsigned char *uuid)
{
virDomainObjPtr vm;
vmwareDriverLock(driver);
vm = vmwareDomObjFromDomainLocked(driver, uuid);
vmwareDriverUnlock(driver);
return vm;
}
static void *
vmwareDataAllocFunc(void *opaque G_GNUC_UNUSED)
{
vmwareDomainPtr dom;
if (VIR_ALLOC(dom) < 0)
return NULL;
dom->vmxPath = NULL;
dom->gui = true;
return dom;
}
static void
vmwareDataFreeFunc(void *data)
{
vmwareDomainPtr dom = data;
VIR_FREE(dom->vmxPath);
VIR_FREE(dom);
}
static int
vmwareDomainDefPostParse(virDomainDefPtr def,
unsigned int parseFlags G_GNUC_UNUSED,
void *opaque G_GNUC_UNUSED,
void *parseOpaque G_GNUC_UNUSED)
{
struct vmware_driver *driver = opaque;
if (!virCapabilitiesDomainSupported(driver->caps, def->os.type,
def->os.arch,
def->virtType))
return -1;
return 0;
}
static int
vmwareDomainDeviceDefPostParse(virDomainDeviceDefPtr dev G_GNUC_UNUSED,
const virDomainDef *def G_GNUC_UNUSED,
unsigned int parseFlags G_GNUC_UNUSED,
void *opaque G_GNUC_UNUSED,
void *parseOpaque G_GNUC_UNUSED)
{
return 0;
}
virDomainDefParserConfig vmwareDomainDefParserConfig = {
.devicesPostParseCallback = vmwareDomainDeviceDefPostParse,
.domainPostParseCallback = vmwareDomainDefPostParse,
conf: pass in default architecture via domain XML options When parsing the guest XML we must fill in the default guest arch if it is not already present because later parts of the parsing process need this information. If no arch is specified we lookup the first guest in the capabilities data matching the os type and virt type. In most cases this will result in picking the host architecture but there are some exceptions... - The test driver is hardcoded to always use i686 arch - The VMWare/ESX drivers will always place i686 guests ahead of x86_64 guests in capabilities, so effectively they always use i686 - The QEMU driver can potentially return any arch at all depending on what combination of QEMU binaries are installed. The domain XML hardware configurations are inherently architecture specific in many places. As a result whomever/whatever created the domain XML will have had a particular architecture in mind when specifying the config. In pretty much any sensible case this arch will have been the native host architecture. i686 on x86_64 is the only sensible divergance because both these archs are compatible from a domaain XML config POV. IOW, although the QEMU driver can pick an almost arbitrary arch as its default, in the real world no application or user is likely to be relying on this default arch being anything other than native. With all this in mind, it is reasonable to change the XML parser to allow the default architecture to be passed via the domain XML options struct. If no info is explicitly given then it is safe & sane to pick the host native architecture as the default for the guest. Reviewed-by: Michal Privoznik <mprivozn@redhat.com> Signed-off-by: Daniel P. Berrangé <berrange@redhat.com>
2019-11-27 15:22:45 +00:00
.defArch = VIR_ARCH_I686,
};
static virDomainXMLOptionPtr
vmwareDomainXMLConfigInit(struct vmware_driver *driver)
{
virDomainXMLPrivateDataCallbacks priv = { .alloc = vmwareDataAllocFunc,
.free = vmwareDataFreeFunc };
vmwareDomainDefParserConfig.priv = driver;
return virDomainXMLOptionNew(&vmwareDomainDefParserConfig, &priv,
NULL, NULL, NULL);
}
static virDrvOpenStatus
vmwareConnectOpen(virConnectPtr conn,
virConnectAuthPtr auth G_GNUC_UNUSED,
virConfPtr conf G_GNUC_UNUSED,
unsigned int flags)
{
struct vmware_driver *driver;
size_t i;
char *tmp;
char *vmrun = NULL;
virCheckFlags(VIR_CONNECT_RO, VIR_DRV_OPEN_ERROR);
/* If path isn't /session, then they typoed, so tell them correct path */
if (STRNEQ(conn->uri->path, "/session")) {
virReportError(VIR_ERR_INTERNAL_ERROR,
_("unexpected VMware URI path '%s', try vmwareplayer:///session, vmwarews:///session or vmwarefusion:///session"),
NULLSTR(conn->uri->path));
return VIR_DRV_OPEN_ERROR;
}
/* We now know the URI is definitely for this driver, so beyond
* here, don't return DECLINED, always use ERROR */
if (VIR_ALLOC(driver) < 0)
return VIR_DRV_OPEN_ERROR;
/* Find vmrun, which is what this driver uses to communicate to
* the VMware hypervisor. We look this up first since we use it
* for auto detection of the backend
*/
for (i = 0; i < G_N_ELEMENTS(vmrun_candidates); i++) {
vmrun = virFindFileInPath(vmrun_candidates[i]);
if (vmrun == NULL)
continue;
if (virFileResolveLink(vmrun, &driver->vmrun) < 0) {
virReportSystemError(errno, _("unable to resolve symlink '%s'"), vmrun);
goto cleanup;
}
VIR_FREE(vmrun);
/* If we found one, we can stop looking */
if (driver->vmrun)
break;
}
if (driver->vmrun == NULL) {
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
_("vmrun utility is missing"));
goto cleanup;
}
if (virMutexInit(&driver->lock) < 0)
goto cleanup;
if ((tmp = STRSKIP(conn->uri->scheme, "vmware")) == NULL) {
virReportError(VIR_ERR_INTERNAL_ERROR, _("unable to parse URI "
"scheme '%s'"), conn->uri->scheme);
goto cleanup;
}
/* Match the non-'vmware' part of the scheme as the driver backend */
driver->type = vmwareDriverTypeFromString(tmp);
if (driver->type == -1) {
virReportError(VIR_ERR_INTERNAL_ERROR, _("unable to find valid "
"requested VMware backend '%s'"), tmp);
goto cleanup;
}
if (vmwareExtractVersion(driver) < 0)
goto cleanup;
if (!(driver->domains = virDomainObjListNew()))
goto cleanup;
if (!(driver->caps = vmwareCapsInit()))
goto cleanup;
if (!(driver->xmlopt = vmwareDomainXMLConfigInit(driver)))
goto cleanup;
if (vmwareLoadDomains(driver) < 0)
goto cleanup;
conn->privateData = driver;
return VIR_DRV_OPEN_SUCCESS;
cleanup:
vmwareFreeDriver(driver);
VIR_FREE(vmrun);
return VIR_DRV_OPEN_ERROR;
};
static int
vmwareConnectClose(virConnectPtr conn)
{
struct vmware_driver *driver = conn->privateData;
vmwareFreeDriver(driver);
conn->privateData = NULL;
return 0;
}
static const char *
vmwareConnectGetType(virConnectPtr conn G_GNUC_UNUSED)
{
return "VMware";
}
static int
vmwareConnectGetVersion(virConnectPtr conn, unsigned long *version)
{
struct vmware_driver *driver = conn->privateData;
vmwareDriverLock(driver);
*version = driver->version;
vmwareDriverUnlock(driver);
return 0;
}
static int
vmwareUpdateVMStatus(struct vmware_driver *driver, virDomainObjPtr vm)
{
virCommandPtr cmd;
char *outbuf = NULL;
char *vmxAbsolutePath = NULL;
char *parsedVmxPath = NULL;
char *str;
char *saveptr = NULL;
bool found = false;
int oldState = virDomainObjGetState(vm, NULL);
int newState;
int ret = -1;
cmd = virCommandNewArgList(driver->vmrun, "-T",
vmwareDriverTypeToString(driver->type),
"list", NULL);
virCommandSetOutputBuffer(cmd, &outbuf);
if (virCommandRun(cmd, NULL) < 0)
goto cleanup;
if (virFileResolveAllLinks(((vmwareDomainPtr) vm->privateData)->vmxPath,
&vmxAbsolutePath) < 0)
goto cleanup;
for (str = outbuf; (parsedVmxPath = strtok_r(str, "\n", &saveptr)) != NULL;
str = NULL) {
if (parsedVmxPath[0] != '/')
continue;
if (STREQ(parsedVmxPath, vmxAbsolutePath)) {
found = true;
/* If the vmx path is in the output, the domain is running or
* is paused but we have no way to detect if it is paused or not. */
if (oldState == VIR_DOMAIN_PAUSED)
newState = oldState;
else
newState = VIR_DOMAIN_RUNNING;
break;
}
}
if (!found) {
vm->def->id = -1;
newState = VIR_DOMAIN_SHUTOFF;
}
virDomainObjSetState(vm, newState, 0);
ret = 0;
cleanup:
virCommandFree(cmd);
VIR_FREE(outbuf);
VIR_FREE(vmxAbsolutePath);
return ret;
}
static int
vmwareStopVM(struct vmware_driver *driver,
virDomainObjPtr vm,
virDomainShutoffReason reason)
{
const char *cmd[] = {
driver->vmrun, "-T", PROGRAM_SENTINEL, "stop",
PROGRAM_SENTINEL, "soft", NULL
};
vmwareSetSentinal(cmd, vmwareDriverTypeToString(driver->type));
vmwareSetSentinal(cmd, ((vmwareDomainPtr) vm->privateData)->vmxPath);
if (virRun(cmd, NULL) < 0)
return -1;
vm->def->id = -1;
virDomainObjSetState(vm, VIR_DOMAIN_SHUTOFF, reason);
return 0;
}
static int
vmwareStartVM(struct vmware_driver *driver, virDomainObjPtr vm)
{
const char *cmd[] = {
driver->vmrun, "-T", PROGRAM_SENTINEL, "start",
PROGRAM_SENTINEL, PROGRAM_SENTINEL, NULL
};
const char *vmxPath = ((vmwareDomainPtr) vm->privateData)->vmxPath;
if (virDomainObjGetState(vm, NULL) != VIR_DOMAIN_SHUTOFF) {
virReportError(VIR_ERR_OPERATION_INVALID, "%s",
_("domain is not in shutoff state"));
return -1;
}
vmwareSetSentinal(cmd, vmwareDriverTypeToString(driver->type));
vmwareSetSentinal(cmd, vmxPath);
if (!((vmwareDomainPtr) vm->privateData)->gui)
vmwareSetSentinal(cmd, NOGUI);
else
vmwareSetSentinal(cmd, NULL);
if (virRun(cmd, NULL) < 0)
return -1;
if ((vm->def->id = vmwareExtractPid(vmxPath)) < 0) {
vmwareStopVM(driver, vm, VIR_DOMAIN_SHUTOFF_FAILED);
return -1;
}
virDomainObjSetState(vm, VIR_DOMAIN_RUNNING, VIR_DOMAIN_RUNNING_BOOTED);
return 0;
}
static virDomainPtr
vmwareDomainDefineXMLFlags(virConnectPtr conn, const char *xml, unsigned int flags)
{
struct vmware_driver *driver = conn->privateData;
virDomainDefPtr vmdef = NULL;
virDomainObjPtr vm = NULL;
virDomainPtr dom = NULL;
char *vmx = NULL;
char *directoryName = NULL;
char *fileName = NULL;
char *vmxPath = NULL;
vmwareDomainPtr pDomain = NULL;
virVMXContext ctx;
unsigned int parse_flags = VIR_DOMAIN_DEF_PARSE_INACTIVE;
virCheckFlags(VIR_DOMAIN_DEFINE_VALIDATE, NULL);
if (flags & VIR_DOMAIN_DEFINE_VALIDATE)
parse_flags |= VIR_DOMAIN_DEF_PARSE_VALIDATE_SCHEMA;
ctx.parseFileName = NULL;
ctx.formatFileName = vmwareCopyVMXFileName;
ctx.autodetectSCSIControllerModel = NULL;
ctx.datacenterPath = NULL;
vmwareDriverLock(driver);
if ((vmdef = virDomainDefParseString(xml, driver->caps, driver->xmlopt,
NULL, parse_flags)) == NULL)
goto cleanup;
if (virXMLCheckIllegalChars("name", vmdef->name, "\n") < 0)
goto cleanup;
/* generate vmx file */
vmx = virVMXFormatConfig(&ctx, driver->xmlopt, vmdef, 7);
if (vmx == NULL)
goto cleanup;
if (vmwareVmxPath(vmdef, &vmxPath) < 0)
goto cleanup;
/* create vmx file */
if (virFileWriteStr(vmxPath, vmx, S_IRUSR|S_IWUSR) < 0) {
virReportError(VIR_ERR_INTERNAL_ERROR,
_("Failed to write vmx file '%s'"), vmxPath);
goto cleanup;
}
/* assign def */
if (!(vm = virDomainObjListAdd(driver->domains,
vmdef,
driver->xmlopt,
VIR_DOMAIN_OBJ_LIST_ADD_CHECK_LIVE,
NULL)))
goto cleanup;
pDomain = vm->privateData;
pDomain->vmxPath = g_strdup(vmxPath);
vmwareDomainConfigDisplay(pDomain, vmdef);
vmdef = NULL;
vm->persistent = 1;
dom = virGetDomain(conn, vm->def->name, vm->def->uuid, -1);
cleanup:
virDomainDefFree(vmdef);
VIR_FREE(vmx);
VIR_FREE(directoryName);
VIR_FREE(fileName);
VIR_FREE(vmxPath);
if (vm)
virObjectUnlock(vm);
vmwareDriverUnlock(driver);
return dom;
}
static virDomainPtr
vmwareDomainDefineXML(virConnectPtr conn, const char *xml)
{
return vmwareDomainDefineXMLFlags(conn, xml, 0);
}
static int
vmwareDomainShutdownFlags(virDomainPtr dom,
unsigned int flags)
{
struct vmware_driver *driver = dom->conn->privateData;
virDomainObjPtr vm;
int ret = -1;
virCheckFlags(0, -1);
vmwareDriverLock(driver);
if (!(vm = vmwareDomObjFromDomainLocked(driver, dom->uuid)))
goto cleanup;
if (vmwareUpdateVMStatus(driver, vm) < 0)
goto cleanup;
if (virDomainObjGetState(vm, NULL) != VIR_DOMAIN_RUNNING) {
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
_("domain is not in running state"));
goto cleanup;
}
if (vmwareStopVM(driver, vm, VIR_DOMAIN_SHUTOFF_SHUTDOWN) < 0)
goto cleanup;
conf: Clean up object referencing for Add and Remove When adding a new object to the domain object list, there should have been 2 virObjectRef calls made one for each list into which the object was placed to match the 2 virObjectUnref calls that would occur during Remove as part of virHashRemoveEntry when virObjectFreeHashData is called when the element is removed from the hash table as set up in virDomainObjListNew. Some drivers (libxl, lxc, qemu, and vz) handled this inconsistency by calling virObjectRef upon successful return from virDomainObjListAdd in order to use virDomainObjEndAPI when done with the returned @vm. While others (bhyve, openvz, test, and vmware) handled this via only calling virObjectUnlock upon successful return from virDomainObjListAdd. This patch will "unify" the approach to use virDomainObjEndAPI for any @vm successfully returned from virDomainObjListAdd. Because list removal is so tightly coupled with list addition, this patch fixes the list removal algorithm to return the object as entered - "locked and reffed". This way, the callers can then decide how to uniformly handle add/remove success and failure. This removes the onus on the caller to "specially handle" the @vm during removal processing. The Add/Remove logic allows for some logic simplification such as in libxl where we can Remove the @vm directly rather than needing to set a @remove_dom boolean and removing after the libxlDomainObjEndJob completes as the @vm is locked/reffed. Signed-off-by: John Ferlan <jferlan@redhat.com> Reviewed-by: Erik Skultety <eskultet@redhat.com>
2018-04-23 14:40:48 +00:00
if (!vm->persistent)
virDomainObjListRemove(driver->domains, vm);
ret = 0;
cleanup:
virDomainObjEndAPI(&vm);
vmwareDriverUnlock(driver);
return ret;
}
static int
vmwareDomainShutdown(virDomainPtr dom)
{
return vmwareDomainShutdownFlags(dom, 0);
}
static int
vmwareDomainDestroy(virDomainPtr dom)
{
return vmwareDomainShutdownFlags(dom, 0);
}
static int
vmwareDomainDestroyFlags(virDomainPtr dom,
unsigned int flags)
{
return vmwareDomainShutdownFlags(dom, flags);
}
static int
vmwareDomainSuspend(virDomainPtr dom)
{
struct vmware_driver *driver = dom->conn->privateData;
virDomainObjPtr vm;
const char *cmd[] = {
driver->vmrun, "-T", PROGRAM_SENTINEL, "pause",
PROGRAM_SENTINEL, NULL
};
int ret = -1;
if (driver->type == VMWARE_DRIVER_PLAYER) {
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
_("vmplayer does not support libvirt suspend/resume"
" (vmware pause/unpause) operation "));
return ret;
}
if (!(vm = vmwareDomObjFromDomain(driver, dom->uuid)))
return -1;
vmwareSetSentinal(cmd, vmwareDriverTypeToString(driver->type));
vmwareSetSentinal(cmd, ((vmwareDomainPtr) vm->privateData)->vmxPath);
if (virDomainObjGetState(vm, NULL) != VIR_DOMAIN_RUNNING) {
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
_("domain is not in running state"));
goto cleanup;
}
if (virRun(cmd, NULL) < 0)
goto cleanup;
virDomainObjSetState(vm, VIR_DOMAIN_PAUSED, VIR_DOMAIN_PAUSED_USER);
ret = 0;
cleanup:
virDomainObjEndAPI(&vm);
return ret;
}
static int
vmwareDomainResume(virDomainPtr dom)
{
struct vmware_driver *driver = dom->conn->privateData;
virDomainObjPtr vm;
const char *cmd[] = {
driver->vmrun, "-T", PROGRAM_SENTINEL, "unpause", PROGRAM_SENTINEL,
NULL
};
int ret = -1;
if (driver->type == VMWARE_DRIVER_PLAYER) {
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
_("vmplayer does not support libvirt suspend/resume "
"(vmware pause/unpause) operation "));
return ret;
}
if (!(vm = vmwareDomObjFromDomain(driver, dom->uuid)))
return -1;
vmwareSetSentinal(cmd, vmwareDriverTypeToString(driver->type));
vmwareSetSentinal(cmd, ((vmwareDomainPtr) vm->privateData)->vmxPath);
if (virDomainObjGetState(vm, NULL) != VIR_DOMAIN_PAUSED) {
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
_("domain is not in suspend state"));
goto cleanup;
}
if (virRun(cmd, NULL) < 0)
goto cleanup;
virDomainObjSetState(vm, VIR_DOMAIN_RUNNING, VIR_DOMAIN_RUNNING_UNPAUSED);
ret = 0;
cleanup:
virDomainObjEndAPI(&vm);
return ret;
}
static int
vmwareDomainReboot(virDomainPtr dom, unsigned int flags)
{
struct vmware_driver *driver = dom->conn->privateData;
const char * vmxPath = NULL;
virDomainObjPtr vm;
const char *cmd[] = {
driver->vmrun, "-T", PROGRAM_SENTINEL,
"reset", PROGRAM_SENTINEL, "soft", NULL
};
int ret = -1;
virCheckFlags(0, -1);
if (!(vm = vmwareDomObjFromDomain(driver, dom->uuid)))
return -1;
vmxPath = ((vmwareDomainPtr) vm->privateData)->vmxPath;
vmwareSetSentinal(cmd, vmwareDriverTypeToString(driver->type));
vmwareSetSentinal(cmd, vmxPath);
if (vmwareUpdateVMStatus(driver, vm) < 0)
goto cleanup;
if (virDomainObjGetState(vm, NULL) != VIR_DOMAIN_RUNNING) {
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
_("domain is not in running state"));
goto cleanup;
}
if (virRun(cmd, NULL) < 0)
goto cleanup;
ret = 0;
cleanup:
virDomainObjEndAPI(&vm);
return ret;
}
static virDomainPtr
vmwareDomainCreateXML(virConnectPtr conn, const char *xml,
unsigned int flags)
{
struct vmware_driver *driver = conn->privateData;
virDomainDefPtr vmdef = NULL;
virDomainObjPtr vm = NULL;
virDomainPtr dom = NULL;
char *vmx = NULL;
char *vmxPath = NULL;
vmwareDomainPtr pDomain = NULL;
virVMXContext ctx;
unsigned int parse_flags = VIR_DOMAIN_DEF_PARSE_INACTIVE;
virCheckFlags(VIR_DOMAIN_START_VALIDATE, NULL);
if (flags & VIR_DOMAIN_START_VALIDATE)
parse_flags |= VIR_DOMAIN_DEF_PARSE_VALIDATE_SCHEMA;
ctx.parseFileName = NULL;
ctx.formatFileName = vmwareCopyVMXFileName;
ctx.autodetectSCSIControllerModel = NULL;
ctx.datacenterPath = NULL;
vmwareDriverLock(driver);
if ((vmdef = virDomainDefParseString(xml, driver->caps, driver->xmlopt,
NULL, parse_flags)) == NULL)
goto cleanup;
/* generate vmx file */
vmx = virVMXFormatConfig(&ctx, driver->xmlopt, vmdef, 7);
if (vmx == NULL)
goto cleanup;
if (vmwareVmxPath(vmdef, &vmxPath) < 0)
goto cleanup;
/* create vmx file */
if (virFileWriteStr(vmxPath, vmx, S_IRUSR|S_IWUSR) < 0) {
virReportError(VIR_ERR_INTERNAL_ERROR,
_("Failed to write vmx file '%s'"), vmxPath);
goto cleanup;
}
/* assign def */
if (!(vm = virDomainObjListAdd(driver->domains,
vmdef,
driver->xmlopt,
VIR_DOMAIN_OBJ_LIST_ADD_LIVE |
VIR_DOMAIN_OBJ_LIST_ADD_CHECK_LIVE,
NULL)))
goto cleanup;
pDomain = vm->privateData;
pDomain->vmxPath = g_strdup(vmxPath);
vmwareDomainConfigDisplay(pDomain, vmdef);
vmdef = NULL;
if (vmwareStartVM(driver, vm) < 0) {
conf: Clean up object referencing for Add and Remove When adding a new object to the domain object list, there should have been 2 virObjectRef calls made one for each list into which the object was placed to match the 2 virObjectUnref calls that would occur during Remove as part of virHashRemoveEntry when virObjectFreeHashData is called when the element is removed from the hash table as set up in virDomainObjListNew. Some drivers (libxl, lxc, qemu, and vz) handled this inconsistency by calling virObjectRef upon successful return from virDomainObjListAdd in order to use virDomainObjEndAPI when done with the returned @vm. While others (bhyve, openvz, test, and vmware) handled this via only calling virObjectUnlock upon successful return from virDomainObjListAdd. This patch will "unify" the approach to use virDomainObjEndAPI for any @vm successfully returned from virDomainObjListAdd. Because list removal is so tightly coupled with list addition, this patch fixes the list removal algorithm to return the object as entered - "locked and reffed". This way, the callers can then decide how to uniformly handle add/remove success and failure. This removes the onus on the caller to "specially handle" the @vm during removal processing. The Add/Remove logic allows for some logic simplification such as in libxl where we can Remove the @vm directly rather than needing to set a @remove_dom boolean and removing after the libxlDomainObjEndJob completes as the @vm is locked/reffed. Signed-off-by: John Ferlan <jferlan@redhat.com> Reviewed-by: Erik Skultety <eskultet@redhat.com>
2018-04-23 14:40:48 +00:00
if (!vm->persistent)
virDomainObjListRemove(driver->domains, vm);
goto cleanup;
}
dom = virGetDomain(conn, vm->def->name, vm->def->uuid, vm->def->id);
cleanup:
virDomainDefFree(vmdef);
VIR_FREE(vmx);
VIR_FREE(vmxPath);
conf: Clean up object referencing for Add and Remove When adding a new object to the domain object list, there should have been 2 virObjectRef calls made one for each list into which the object was placed to match the 2 virObjectUnref calls that would occur during Remove as part of virHashRemoveEntry when virObjectFreeHashData is called when the element is removed from the hash table as set up in virDomainObjListNew. Some drivers (libxl, lxc, qemu, and vz) handled this inconsistency by calling virObjectRef upon successful return from virDomainObjListAdd in order to use virDomainObjEndAPI when done with the returned @vm. While others (bhyve, openvz, test, and vmware) handled this via only calling virObjectUnlock upon successful return from virDomainObjListAdd. This patch will "unify" the approach to use virDomainObjEndAPI for any @vm successfully returned from virDomainObjListAdd. Because list removal is so tightly coupled with list addition, this patch fixes the list removal algorithm to return the object as entered - "locked and reffed". This way, the callers can then decide how to uniformly handle add/remove success and failure. This removes the onus on the caller to "specially handle" the @vm during removal processing. The Add/Remove logic allows for some logic simplification such as in libxl where we can Remove the @vm directly rather than needing to set a @remove_dom boolean and removing after the libxlDomainObjEndJob completes as the @vm is locked/reffed. Signed-off-by: John Ferlan <jferlan@redhat.com> Reviewed-by: Erik Skultety <eskultet@redhat.com>
2018-04-23 14:40:48 +00:00
virDomainObjEndAPI(&vm);
vmwareDriverUnlock(driver);
return dom;
}
static int
vmwareDomainCreateWithFlags(virDomainPtr dom,
unsigned int flags)
{
struct vmware_driver *driver = dom->conn->privateData;
virDomainObjPtr vm;
int ret = -1;
virCheckFlags(0, -1);
vmwareDriverLock(driver);
if (!(vm = vmwareDomObjFromDomainLocked(driver, dom->uuid)))
goto cleanup;
if (vmwareUpdateVMStatus(driver, vm) < 0)
goto cleanup;
if (virDomainObjIsActive(vm)) {
virReportError(VIR_ERR_OPERATION_INVALID,
"%s", _("Domain is already running"));
goto cleanup;
}
ret = vmwareStartVM(driver, vm);
cleanup:
virDomainObjEndAPI(&vm);
vmwareDriverUnlock(driver);
return ret;
}
static int
vmwareDomainCreate(virDomainPtr dom)
{
return vmwareDomainCreateWithFlags(dom, 0);
}
static int
vmwareDomainUndefineFlags(virDomainPtr dom,
unsigned int flags)
{
struct vmware_driver *driver = dom->conn->privateData;
virDomainObjPtr vm;
int ret = -1;
virCheckFlags(0, -1);
vmwareDriverLock(driver);
if (!(vm = vmwareDomObjFromDomainLocked(driver, dom->uuid)))
goto cleanup;
if (!vm->persistent) {
virReportError(VIR_ERR_OPERATION_INVALID,
"%s", _("cannot undefine transient domain"));
goto cleanup;
}
if (vmwareUpdateVMStatus(driver, vm) < 0)
goto cleanup;
conf: Clean up object referencing for Add and Remove When adding a new object to the domain object list, there should have been 2 virObjectRef calls made one for each list into which the object was placed to match the 2 virObjectUnref calls that would occur during Remove as part of virHashRemoveEntry when virObjectFreeHashData is called when the element is removed from the hash table as set up in virDomainObjListNew. Some drivers (libxl, lxc, qemu, and vz) handled this inconsistency by calling virObjectRef upon successful return from virDomainObjListAdd in order to use virDomainObjEndAPI when done with the returned @vm. While others (bhyve, openvz, test, and vmware) handled this via only calling virObjectUnlock upon successful return from virDomainObjListAdd. This patch will "unify" the approach to use virDomainObjEndAPI for any @vm successfully returned from virDomainObjListAdd. Because list removal is so tightly coupled with list addition, this patch fixes the list removal algorithm to return the object as entered - "locked and reffed". This way, the callers can then decide how to uniformly handle add/remove success and failure. This removes the onus on the caller to "specially handle" the @vm during removal processing. The Add/Remove logic allows for some logic simplification such as in libxl where we can Remove the @vm directly rather than needing to set a @remove_dom boolean and removing after the libxlDomainObjEndJob completes as the @vm is locked/reffed. Signed-off-by: John Ferlan <jferlan@redhat.com> Reviewed-by: Erik Skultety <eskultet@redhat.com>
2018-04-23 14:40:48 +00:00
if (virDomainObjIsActive(vm))
vm->persistent = 0;
conf: Clean up object referencing for Add and Remove When adding a new object to the domain object list, there should have been 2 virObjectRef calls made one for each list into which the object was placed to match the 2 virObjectUnref calls that would occur during Remove as part of virHashRemoveEntry when virObjectFreeHashData is called when the element is removed from the hash table as set up in virDomainObjListNew. Some drivers (libxl, lxc, qemu, and vz) handled this inconsistency by calling virObjectRef upon successful return from virDomainObjListAdd in order to use virDomainObjEndAPI when done with the returned @vm. While others (bhyve, openvz, test, and vmware) handled this via only calling virObjectUnlock upon successful return from virDomainObjListAdd. This patch will "unify" the approach to use virDomainObjEndAPI for any @vm successfully returned from virDomainObjListAdd. Because list removal is so tightly coupled with list addition, this patch fixes the list removal algorithm to return the object as entered - "locked and reffed". This way, the callers can then decide how to uniformly handle add/remove success and failure. This removes the onus on the caller to "specially handle" the @vm during removal processing. The Add/Remove logic allows for some logic simplification such as in libxl where we can Remove the @vm directly rather than needing to set a @remove_dom boolean and removing after the libxlDomainObjEndJob completes as the @vm is locked/reffed. Signed-off-by: John Ferlan <jferlan@redhat.com> Reviewed-by: Erik Skultety <eskultet@redhat.com>
2018-04-23 14:40:48 +00:00
else
virDomainObjListRemove(driver->domains, vm);
ret = 0;
cleanup:
virDomainObjEndAPI(&vm);
vmwareDriverUnlock(driver);
return ret;
}
static int
vmwareDomainUndefine(virDomainPtr dom)
{
return vmwareDomainUndefineFlags(dom, 0);
}
static virDomainPtr
vmwareDomainLookupByID(virConnectPtr conn, int id)
{
struct vmware_driver *driver = conn->privateData;
virDomainObjPtr vm;
virDomainPtr dom = NULL;
vmwareDriverLock(driver);
vm = virDomainObjListFindByID(driver->domains, id);
vmwareDriverUnlock(driver);
if (!vm) {
virReportError(VIR_ERR_NO_DOMAIN,
_("no domain with matching id '%d'"), id);
goto cleanup;
}
dom = virGetDomain(conn, vm->def->name, vm->def->uuid, vm->def->id);
cleanup:
virDomainObjEndAPI(&vm);
return dom;
}
static char *
vmwareDomainGetOSType(virDomainPtr dom)
{
struct vmware_driver *driver = dom->conn->privateData;
virDomainObjPtr vm;
char *ret = NULL;
if (!(vm = vmwareDomObjFromDomain(driver, dom->uuid)))
return NULL;
ret = g_strdup(virDomainOSTypeToString(vm->def->os.type));
virDomainObjEndAPI(&vm);
return ret;
}
static virDomainPtr
vmwareDomainLookupByUUID(virConnectPtr conn, const unsigned char *uuid)
{
struct vmware_driver *driver = conn->privateData;
virDomainObjPtr vm;
virDomainPtr dom = NULL;
if (!(vm = vmwareDomObjFromDomain(driver, uuid)))
return NULL;
dom = virGetDomain(conn, vm->def->name, vm->def->uuid, vm->def->id);
virDomainObjEndAPI(&vm);
return dom;
}
static virDomainPtr
vmwareDomainLookupByName(virConnectPtr conn, const char *name)
{
struct vmware_driver *driver = conn->privateData;
virDomainObjPtr vm;
virDomainPtr dom = NULL;
vmwareDriverLock(driver);
vm = virDomainObjListFindByName(driver->domains, name);
vmwareDriverUnlock(driver);
if (!vm) {
virReportError(VIR_ERR_NO_DOMAIN,
_("no domain with matching name '%s'"), name);
goto cleanup;
}
dom = virGetDomain(conn, vm->def->name, vm->def->uuid, vm->def->id);
cleanup:
virDomainObjEndAPI(&vm);
return dom;
}
static int
vmwareDomainIsActive(virDomainPtr dom)
{
struct vmware_driver *driver = dom->conn->privateData;
virDomainObjPtr obj;
int ret = -1;
if (!(obj = vmwareDomObjFromDomain(driver, dom->uuid)))
return -1;
ret = virDomainObjIsActive(obj);
virDomainObjEndAPI(&obj);
return ret;
}
static int
vmwareDomainIsPersistent(virDomainPtr dom)
{
struct vmware_driver *driver = dom->conn->privateData;
virDomainObjPtr obj;
int ret = -1;
if (!(obj = vmwareDomObjFromDomain(driver, dom->uuid)))
return -1;
ret = obj->persistent;
virDomainObjEndAPI(&obj);
return ret;
}
static char *
drivers: prefer unsigned int for flags Now that the public APIs always use unsigned flags, the internal driver callbacks might as well do likewise. * src/driver.h (vrDrvOpen, virDrvDomainCoreDump) (virDrvDomainGetXMLDesc, virDrvNetworkGetXMLDesc) (virDrvNWFilterGetXMLDesc): Update type. * src/remote/remote_protocol.x (remote_open_args) (remote_domain_core_dump_args, remote_domain_get_xml_desc_args) (remote_network_get_xml_desc_args) (remote_nwfilter_get_xml_desc_args): Likewise. * src/test/test_driver.c: Update clients. * src/remote/remote_driver.c: Likewise. * src/xen/xen_hypervisor.c: Likewise. * src/xen/xen_hypervisor.h: Likewise. * src/xen/xen_driver.c: Likewise. * src/xen/xend_internal.c: Likewise. * src/xen/xend_internal.h: Likewise. * src/xen/xm_internal.c: Likewise. * src/xen/xm_internal.h: Likewise. * src/xen/xs_internal.c: Likewise. * src/xen/xs_internal.h: Likewise. * src/xen/xen_inotify.c: Likewise. * src/xen/xen_inotify.h: Likewise. * src/phyp/phyp_driver.c: Likewise. * src/openvz/openvz_driver.c: Likewise. * src/vmware/vmware_driver.c: Likewise. * src/vbox/vbox_driver.c: Likewise. * src/vbox/vbox_tmpl.c: Likewise. * src/xenapi/xenapi_driver.c: Likewise. * src/esx/esx_driver.c: Likewise. * src/esx/esx_interface_driver.c: Likewise. * src/esx/esx_network_driver.c: Likewise. * src/esx/esx_storage_driver.c: Likewise. * src/esx/esx_device_monitor.c: Likewise. * src/esx/esx_secret_driver.c: Likewise. * src/esx/esx_nwfilter_driver.c: Likewise. * src/interface/netcf_driver.c: Likewise. * src/nwfilter/nwfilter_driver.c: Likewise. * src/libxl/libxl_driver.c: Likewise. * src/qemu/qemu_driver.c: Likewise. * src/lxc/lxc_driver.c: Likewise. * src/uml/uml_driver.c: Likewise. * src/network/bridge_driver.c: Likewise. * src/secret/secret_driver.c: Likewise. * src/storage/storage_driver.c: Likewise. * src/node_device/node_device_hal.c: Likewise. * src/node_device/node_device_udev.c: Likewise. * src/remote_protocol-structs: Likewise.
2011-07-06 20:40:19 +00:00
vmwareDomainGetXMLDesc(virDomainPtr dom, unsigned int flags)
{
struct vmware_driver *driver = dom->conn->privateData;
virDomainObjPtr vm;
char *ret = NULL;
domain: Fix unknown flags diagnosis in virDomainGetXMLDesc Many drivers had a comment that they did not validate the incoming 'flags' to virDomainGetXMLDesc() because they were relying on virDomainDefFormat() to do it instead. This used to be the case (at least since 461e0f1a and friends in 0.9.4 added unknown flag checking in general), but regressed in commit 0ecd6851 (1.2.12), when all of the drivers were changed to pass 'flags' through the new helper virDomainDefFormatConvertXMLFlags(). Since this helper silently ignores unknown flags, we need to implement flag checking in each driver instead. Annoyingly, this means that any new flag values added will silently be ignored when targeting an older libvirt, rather than our usual practice of loudly diagnosing an unsupported flag. Add comments in domain_conf.[ch] to remind us to be extra vigilant about the impact when adding flags (a new flag to add data is safe if the older server omitting the requested data doesn't break things in the newer client; a new flag to suppress data rather than enhancing the existing VIR_DOMAIN_XML_SECURE may form a data leak or even a security hole). In the qemu driver, there are multiple callers all funnelling to qemuDomainDefFormatBufInternal(); many of them already validated flags (and often only a subset of the full set of possible flags), but for ease of maintenance, we can also check flags at the common helper function. Signed-off-by: Eric Blake <eblake@redhat.com> Reviewed-by: John Ferlan <jferlan@redhat.com>
2019-02-14 20:25:01 +00:00
virCheckFlags(VIR_DOMAIN_XML_COMMON_FLAGS, NULL);
if (!(vm = vmwareDomObjFromDomain(driver, dom->uuid)))
return NULL;
ret = virDomainDefFormat(vm->def, driver->xmlopt,
virDomainDefFormatConvertXMLFlags(flags));
virDomainObjEndAPI(&vm);
return ret;
}
2012-02-22 09:28:06 +00:00
static char *
vmwareConnectDomainXMLFromNative(virConnectPtr conn, const char *nativeFormat,
const char *nativeConfig,
unsigned int flags)
2012-02-22 09:28:06 +00:00
{
struct vmware_driver *driver = conn->privateData;
virVMXContext ctx;
virDomainDefPtr def = NULL;
char *xml = NULL;
virCheckFlags(0, NULL);
if (STRNEQ(nativeFormat, VMX_CONFIG_FORMAT_ARGV)) {
virReportError(VIR_ERR_INVALID_ARG,
_("Unsupported config format '%s'"), nativeFormat);
2012-02-22 09:28:06 +00:00
return NULL;
}
ctx.parseFileName = vmwareCopyVMXFileName;
ctx.formatFileName = NULL;
ctx.autodetectSCSIControllerModel = NULL;
ctx.datacenterPath = NULL;
2012-02-22 09:28:06 +00:00
def = virVMXParseConfig(&ctx, driver->xmlopt, driver->caps, nativeConfig);
2012-02-22 09:28:06 +00:00
if (def != NULL)
xml = virDomainDefFormat(def, driver->xmlopt,
VIR_DOMAIN_DEF_FORMAT_INACTIVE);
2012-02-22 09:28:06 +00:00
virDomainDefFree(def);
return xml;
}
static int vmwareDomainObjListUpdateDomain(virDomainObjPtr dom, void *data)
{
struct vmware_driver *driver = data;
virObjectLock(dom);
ignore_value(vmwareUpdateVMStatus(driver, dom));
virObjectUnlock(dom);
return 0;
}
static void
vmwareDomainObjListUpdateAll(virDomainObjListPtr doms, struct vmware_driver *driver)
{
virDomainObjListForEach(doms, false, vmwareDomainObjListUpdateDomain, driver);
}
static int
vmwareConnectNumOfDefinedDomains(virConnectPtr conn)
{
struct vmware_driver *driver = conn->privateData;
int n;
vmwareDriverLock(driver);
vmwareDomainObjListUpdateAll(driver->domains, driver);
n = virDomainObjListNumOfDomains(driver->domains, false, NULL, NULL);
vmwareDriverUnlock(driver);
return n;
}
static int
vmwareConnectNumOfDomains(virConnectPtr conn)
{
struct vmware_driver *driver = conn->privateData;
int n;
vmwareDriverLock(driver);
vmwareDomainObjListUpdateAll(driver->domains, driver);
n = virDomainObjListNumOfDomains(driver->domains, true, NULL, NULL);
vmwareDriverUnlock(driver);
return n;
}
static int
vmwareConnectListDomains(virConnectPtr conn, int *ids, int nids)
{
struct vmware_driver *driver = conn->privateData;
int n;
vmwareDriverLock(driver);
vmwareDomainObjListUpdateAll(driver->domains, driver);
n = virDomainObjListGetActiveIDs(driver->domains, ids, nids, NULL, NULL);
vmwareDriverUnlock(driver);
return n;
}
static int
vmwareConnectListDefinedDomains(virConnectPtr conn,
char **const names, int nnames)
{
struct vmware_driver *driver = conn->privateData;
int n;
vmwareDriverLock(driver);
vmwareDomainObjListUpdateAll(driver->domains, driver);
n = virDomainObjListGetInactiveNames(driver->domains, names, nnames,
NULL, NULL);
vmwareDriverUnlock(driver);
return n;
}
static int
vmwareDomainGetInfo(virDomainPtr dom, virDomainInfoPtr info)
{
struct vmware_driver *driver = dom->conn->privateData;
virDomainObjPtr vm;
int ret = -1;
if (!(vm = vmwareDomObjFromDomain(driver, dom->uuid)))
return -1;
if (vmwareUpdateVMStatus(driver, vm) < 0)
goto cleanup;
info->state = virDomainObjGetState(vm, NULL);
info->cpuTime = 0;
info->maxMem = virDomainDefGetMemoryTotal(vm->def);
info->memory = vm->def->mem.cur_balloon;
info->nrVirtCpu = virDomainDefGetVcpus(vm->def);
ret = 0;
cleanup:
virDomainObjEndAPI(&vm);
return ret;
}
static int
vmwareDomainGetState(virDomainPtr dom,
int *state,
int *reason,
unsigned int flags)
{
struct vmware_driver *driver = dom->conn->privateData;
virDomainObjPtr vm;
int ret = -1;
virCheckFlags(0, -1);
if (!(vm = vmwareDomObjFromDomain(driver, dom->uuid)))
return -1;
if (vmwareUpdateVMStatus(driver, vm) < 0)
goto cleanup;
*state = virDomainObjGetState(vm, reason);
ret = 0;
cleanup:
virDomainObjEndAPI(&vm);
return ret;
}
static int
vmwareConnectIsAlive(virConnectPtr conn G_GNUC_UNUSED)
{
return 1;
}
static int
vmwareConnectListAllDomains(virConnectPtr conn,
virDomainPtr **domains,
unsigned int flags)
{
struct vmware_driver *driver = conn->privateData;
int ret = -1;
virCheckFlags(VIR_CONNECT_LIST_DOMAINS_FILTERS_ALL, -1);
vmwareDriverLock(driver);
vmwareDomainObjListUpdateAll(driver->domains, driver);
ret = virDomainObjListExport(driver->domains, conn, domains,
NULL, flags);
vmwareDriverUnlock(driver);
return ret;
}
static int
vmwareDomainHasManagedSaveImage(virDomainPtr dom, unsigned int flags)
{
struct vmware_driver *driver = dom->conn->privateData;
virDomainObjPtr obj;
int ret = -1;
virCheckFlags(0, -1);
if (!(obj = vmwareDomObjFromDomain(driver, dom->uuid)))
return -1;
ret = 0;
virDomainObjEndAPI(&obj);
return ret;
}
static virHypervisorDriver vmwareHypervisorDriver = {
.name = "VMWARE",
.connectOpen = vmwareConnectOpen, /* 0.8.7 */
.connectClose = vmwareConnectClose, /* 0.8.7 */
.connectGetType = vmwareConnectGetType, /* 0.8.7 */
.connectGetVersion = vmwareConnectGetVersion, /* 0.8.7 */
.connectListDomains = vmwareConnectListDomains, /* 0.8.7 */
.connectNumOfDomains = vmwareConnectNumOfDomains, /* 0.8.7 */
.connectListAllDomains = vmwareConnectListAllDomains, /* 0.9.13 */
.domainCreateXML = vmwareDomainCreateXML, /* 0.8.7 */
.domainLookupByID = vmwareDomainLookupByID, /* 0.8.7 */
.domainLookupByUUID = vmwareDomainLookupByUUID, /* 0.8.7 */
.domainLookupByName = vmwareDomainLookupByName, /* 0.8.7 */
.domainSuspend = vmwareDomainSuspend, /* 0.8.7 */
.domainResume = vmwareDomainResume, /* 0.8.7 */
.domainShutdown = vmwareDomainShutdown, /* 0.8.7 */
.domainShutdownFlags = vmwareDomainShutdownFlags, /* 0.9.10 */
.domainReboot = vmwareDomainReboot, /* 0.8.7 */
.domainDestroy = vmwareDomainDestroy, /* 0.8.7 */
.domainDestroyFlags = vmwareDomainDestroyFlags, /* 0.9.4 */
.domainGetOSType = vmwareDomainGetOSType, /* 0.8.7 */
.domainGetInfo = vmwareDomainGetInfo, /* 0.8.7 */
.domainGetState = vmwareDomainGetState, /* 0.9.2 */
.domainGetXMLDesc = vmwareDomainGetXMLDesc, /* 0.8.7 */
.connectDomainXMLFromNative = vmwareConnectDomainXMLFromNative, /* 0.9.11 */
.connectListDefinedDomains = vmwareConnectListDefinedDomains, /* 0.8.7 */
.connectNumOfDefinedDomains = vmwareConnectNumOfDefinedDomains, /* 0.8.7 */
.domainCreate = vmwareDomainCreate, /* 0.8.7 */
.domainCreateWithFlags = vmwareDomainCreateWithFlags, /* 0.8.7 */
.domainDefineXML = vmwareDomainDefineXML, /* 0.8.7 */
.domainDefineXMLFlags = vmwareDomainDefineXMLFlags, /* 1.2.12 */
.domainUndefine = vmwareDomainUndefine, /* 0.8.7 */
.domainUndefineFlags = vmwareDomainUndefineFlags, /* 0.9.4 */
.domainIsActive = vmwareDomainIsActive, /* 0.8.7 */
.domainIsPersistent = vmwareDomainIsPersistent, /* 0.8.7 */
.connectIsAlive = vmwareConnectIsAlive, /* 0.9.8 */
.domainHasManagedSaveImage = vmwareDomainHasManagedSaveImage, /* 1.2.13 */
};
static virConnectDriver vmwareConnectDriver = {
.localOnly = true,
.uriSchemes = (const char *[]){ "vmwareplayer", "vmwarews", "vmwarefusion", NULL },
.hypervisorDriver = &vmwareHypervisorDriver,
};
int
vmwareRegister(void)
{
return virRegisterConnectDriver(&vmwareConnectDriver,
false);
}