mirror of
https://gitlab.com/libvirt/libvirt.git
synced 2024-11-02 19:31:18 +00:00
85ce98250e
One of latest patches (
|
||
---|---|---|
.. | ||
domain_lock.c | ||
domain_lock.h | ||
libvirt_sanlock.aug | ||
lock_driver_nop.c | ||
lock_driver_nop.h | ||
lock_driver_sanlock.c | ||
lock_driver.h | ||
lock_manager.c | ||
lock_manager.h | ||
README | ||
sanlock.conf | ||
test_libvirt_sanlock.aug.in |
Using the Lock Manager APIs =========================== This file describes how to use the lock manager APIs. All the guest lifecycle sequences here have higher level wrappers provided by the 'domain_lock.h' API, which simplify thue usage At libvirtd startup: plugin = virLockManagerPluginLoad("sync-manager"); At libvirtd shtudown: virLockManagerPluginUnload(plugin) At guest startup: manager = virLockManagerNew(plugin, VIR_LOCK_MANAGER_OBJECT_DOMAIN, 0); virLockManagerSetParameter(manager, "id", id); virLockManagerSetParameter(manager, "uuid", uuid); virLockManagerSetParameter(manager, "name", name); foreach disk virLockManagerRegisterResource(manager, VIR_LOCK_MANAGER_RESOURCE_TYPE_DISK, disk.path, ..flags...); if (!virLockManagerAcquireObject(manager)) abort.. run QEMU At guest shutdown: ...send QEMU 'quit' monitor command, and/or kill(qemupid)... if (!virLockManagerShutdown(manager)) kill(supervisorpid); /* XXX or leave it running ??? */ virLockManagerFree(manager); At libvirtd restart with running guests: foreach still running guest manager = virLockManagerNew(driver, VIR_LOCK_MANAGER_START_DOMAIN, VIR_LOCK_MANAGER_NEW_ATTACH); virLockManagerSetParameter(manager, "id", id); virLockManagerSetParameter(manager, "uuid", uuid); virLockManagerSetParameter(manager, "name", name); if (!virLockManagerGetChild(manager, &qemupid)) kill(supervisorpid); /* XXX or leave it running ??? */ With disk hotplug: if (virLockManagerAcquireResource(manager, VIR_LOCK_MANAGER_RESOURCE_TYPE_DISK, disk.path ..flags..)) ...abort hotplug attempt ... ...hotplug the device... With disk unhotplug: ...hotunplug the device... if (virLockManagerReleaseResource(manager, VIR_LOCK_MANAGER_RESOURCE_TYPE_DISK, disk.path ..flags..)) ...log warning ... During migration: 1. On source host if (!virLockManagerPrepareMigrate(manager, hosturi)) ..don't start migration.. 2. On dest host manager = virLockManagerNew(driver, VIR_LOCK_MANAGER_START_DOMAIN, VIR_LOCK_MANAGER_NEW_MIGRATE); virLockManagerSetParameter(manager, "id", id); virLockManagerSetParameter(manager, "uuid", uuid); virLockManagerSetParameter(manager, "name", name); foreach disk virLockManagerRegisterResource(manager, VIR_LOCK_MANAGER_RESOURCE_TYPE_DISK, disk.path, ..flags...); char **supervisorargv; int supervisorargc; supervisor = virLockManagerGetSupervisorPath(manager); virLockManagerGetSupervisorArgs(&argv, &argc); cmd = qemuBuildCommandLine(supervisor, supervisorargv, supervisorargv); supervisorpid = virCommandExec(cmd); if (!virLockManagerGetChild(manager, &qemupid)) kill(supervisorpid); /* XXX or leave it running ??? */ 3. Initiate migration in QEMU on source and wait for completion 4a. On failure 4a1 On target virLockManagerCompleteMigrateIn(manager, VIR_LOCK_MANAGER_MIGRATE_CANCEL); virLockManagerShutdown(manager); virLockManagerFree(manager); 4a2 On source virLockManagerCompleteMigrateIn(manager, VIR_LOCK_MANAGER_MIGRATE_CANCEL); 4b. On succcess 4b1 On target virLockManagerCompleteMigrateIn(manager, 0); 42 On source virLockManagerCompleteMigrateIn(manager, 0); virLockManagerShutdown(manager); virLockManagerFree(manager); Notes: - If a lock manager impl does just VM level leases, it can ignore all the resource paths at startup. - If a lock manager impl does not support migrate it can return an error from all migrate calls - If a lock manger impl does not support hotplug it can return an error from all resource acquire/release calls