libvirt/src/storage/storage_backend_scsi.c

384 lines
11 KiB
C
Raw Normal View History

/*
* storage_backend_scsi.c: storage backend for SCSI handling
*
* Copyright (C) 2007-2008, 2013-2014 Red Hat, Inc.
* Copyright (C) 2007-2008 Daniel P. Berrange
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library. If not, see
* <http://www.gnu.org/licenses/>.
*
* Author: Daniel P. Berrange <berrange redhat com>
*/
#include <config.h>
#include <unistd.h>
#include <stdio.h>
#include <fcntl.h>
#include "virerror.h"
#include "storage_backend_scsi.h"
2012-12-12 18:06:53 +00:00
#include "viralloc.h"
2012-12-12 17:59:27 +00:00
#include "virlog.h"
#include "virfile.h"
#include "vircommand.h"
#include "virstring.h"
#include "storage_util.h"
#include "node_device_conf.h"
#define VIR_FROM_THIS VIR_FROM_STORAGE
VIR_LOG_INIT("storage.storage_backend_scsi");
#define LINUX_SYSFS_SCSI_HOST_PREFIX "/sys/class/scsi_host"
#define LINUX_SYSFS_SCSI_HOST_POSTFIX "device"
#define LINUX_SYSFS_SCSI_HOST_SCAN_STRING "- - -"
storage: Add thread to refresh for createVport https://bugzilla.redhat.com/show_bug.cgi?id=1152382 When libvirt create's the vport (VPORT_CREATE) for the vHBA, there isn't enough "time" between the creation and the running of the following backend->refreshPool after a backend->startPool in order to find the LU's. Population of LU's happens asynchronously when udevEventHandleCallback discovers the "new" vHBA port. Creation of the infrastructure by udev is an iterative process creating and discovering actual storage devices and adjusting the environment. Because of the time it takes to discover and set things up, the backend refreshPool call done after the startPool call will generally fail to find any devices. This leaves the newly started pool appear empty when querying via 'vol-list' after startup. The "workaround" has always been to run pool-refresh after startup (or any time thereafter) in order to find the LU's. Depending on how quickly run after startup, this too may not find any LUs in the pool. Eventually though given enough time and retries it will find something if LU's exist for the vHBA. This patch adds a thread to be executed after the VPORT_CREATE which will attempt to find the LU's without requiring the external run of refresh-pool. It does this by waiting for 5 seconds and searching for the LU's. If any are found, then the thread completes; otherwise, it will retry once more in another 5 seconds. If none are found in that second pass, the thread gives up. Things learned while investigating this... No need to try and fill the pool too quickly or too many times. Over the course of creation, the udev code may 'add', 'change', and 'delete' the same device. So if the refresh code runs and finds something, it may display it only to have a subsequent refresh appear to "lose" the device. The udev processing doesn't seem to have a way to indicate that it's all done with the creation processing of a newly found vHBA. Only the Lone Ranger has silver bullets to fix everything.
2014-11-18 19:51:01 +00:00
typedef struct _virStoragePoolFCRefreshInfo virStoragePoolFCRefreshInfo;
typedef virStoragePoolFCRefreshInfo *virStoragePoolFCRefreshInfoPtr;
struct _virStoragePoolFCRefreshInfo {
char *fchost_name;
unsigned char pool_uuid[VIR_UUID_BUFLEN];
storage: Add thread to refresh for createVport https://bugzilla.redhat.com/show_bug.cgi?id=1152382 When libvirt create's the vport (VPORT_CREATE) for the vHBA, there isn't enough "time" between the creation and the running of the following backend->refreshPool after a backend->startPool in order to find the LU's. Population of LU's happens asynchronously when udevEventHandleCallback discovers the "new" vHBA port. Creation of the infrastructure by udev is an iterative process creating and discovering actual storage devices and adjusting the environment. Because of the time it takes to discover and set things up, the backend refreshPool call done after the startPool call will generally fail to find any devices. This leaves the newly started pool appear empty when querying via 'vol-list' after startup. The "workaround" has always been to run pool-refresh after startup (or any time thereafter) in order to find the LU's. Depending on how quickly run after startup, this too may not find any LUs in the pool. Eventually though given enough time and retries it will find something if LU's exist for the vHBA. This patch adds a thread to be executed after the VPORT_CREATE which will attempt to find the LU's without requiring the external run of refresh-pool. It does this by waiting for 5 seconds and searching for the LU's. If any are found, then the thread completes; otherwise, it will retry once more in another 5 seconds. If none are found in that second pass, the thread gives up. Things learned while investigating this... No need to try and fill the pool too quickly or too many times. Over the course of creation, the udev code may 'add', 'change', and 'delete' the same device. So if the refresh code runs and finds something, it may display it only to have a subsequent refresh appear to "lose" the device. The udev processing doesn't seem to have a way to indicate that it's all done with the creation processing of a newly found vHBA. Only the Lone Ranger has silver bullets to fix everything.
2014-11-18 19:51:01 +00:00
};
static int
virStorageBackendSCSITriggerRescan(uint32_t host)
{
int fd = -1;
int retval = 0;
char *path;
VIR_DEBUG("Triggering rescan of host %d", host);
if (virAsprintf(&path, "%s/host%u/scan",
LINUX_SYSFS_SCSI_HOST_PREFIX, host) < 0) {
retval = -1;
goto out;
}
VIR_DEBUG("Scan trigger path is '%s'", path);
fd = open(path, O_WRONLY);
if (fd < 0) {
virReportSystemError(errno,
_("Could not open '%s' to trigger host scan"),
path);
retval = -1;
goto free_path;
}
if (safewrite(fd,
LINUX_SYSFS_SCSI_HOST_SCAN_STRING,
sizeof(LINUX_SYSFS_SCSI_HOST_SCAN_STRING)) < 0) {
VIR_FORCE_CLOSE(fd);
virReportSystemError(errno,
_("Write to '%s' to trigger host scan failed"),
path);
retval = -1;
}
VIR_FORCE_CLOSE(fd);
free_path:
VIR_FREE(path);
out:
VIR_DEBUG("Rescan of host %d complete", host);
return retval;
}
storage: Add thread to refresh for createVport https://bugzilla.redhat.com/show_bug.cgi?id=1152382 When libvirt create's the vport (VPORT_CREATE) for the vHBA, there isn't enough "time" between the creation and the running of the following backend->refreshPool after a backend->startPool in order to find the LU's. Population of LU's happens asynchronously when udevEventHandleCallback discovers the "new" vHBA port. Creation of the infrastructure by udev is an iterative process creating and discovering actual storage devices and adjusting the environment. Because of the time it takes to discover and set things up, the backend refreshPool call done after the startPool call will generally fail to find any devices. This leaves the newly started pool appear empty when querying via 'vol-list' after startup. The "workaround" has always been to run pool-refresh after startup (or any time thereafter) in order to find the LU's. Depending on how quickly run after startup, this too may not find any LUs in the pool. Eventually though given enough time and retries it will find something if LU's exist for the vHBA. This patch adds a thread to be executed after the VPORT_CREATE which will attempt to find the LU's without requiring the external run of refresh-pool. It does this by waiting for 5 seconds and searching for the LU's. If any are found, then the thread completes; otherwise, it will retry once more in another 5 seconds. If none are found in that second pass, the thread gives up. Things learned while investigating this... No need to try and fill the pool too quickly or too many times. Over the course of creation, the udev code may 'add', 'change', and 'delete' the same device. So if the refresh code runs and finds something, it may display it only to have a subsequent refresh appear to "lose" the device. The udev processing doesn't seem to have a way to indicate that it's all done with the creation processing of a newly found vHBA. Only the Lone Ranger has silver bullets to fix everything.
2014-11-18 19:51:01 +00:00
/**
* Frees opaque data
*
* @opaque Data to be freed
*/
static void
virStoragePoolFCRefreshDataFree(void *opaque)
{
virStoragePoolFCRefreshInfoPtr cbdata = opaque;
VIR_FREE(cbdata->fchost_name);
storage: Add thread to refresh for createVport https://bugzilla.redhat.com/show_bug.cgi?id=1152382 When libvirt create's the vport (VPORT_CREATE) for the vHBA, there isn't enough "time" between the creation and the running of the following backend->refreshPool after a backend->startPool in order to find the LU's. Population of LU's happens asynchronously when udevEventHandleCallback discovers the "new" vHBA port. Creation of the infrastructure by udev is an iterative process creating and discovering actual storage devices and adjusting the environment. Because of the time it takes to discover and set things up, the backend refreshPool call done after the startPool call will generally fail to find any devices. This leaves the newly started pool appear empty when querying via 'vol-list' after startup. The "workaround" has always been to run pool-refresh after startup (or any time thereafter) in order to find the LU's. Depending on how quickly run after startup, this too may not find any LUs in the pool. Eventually though given enough time and retries it will find something if LU's exist for the vHBA. This patch adds a thread to be executed after the VPORT_CREATE which will attempt to find the LU's without requiring the external run of refresh-pool. It does this by waiting for 5 seconds and searching for the LU's. If any are found, then the thread completes; otherwise, it will retry once more in another 5 seconds. If none are found in that second pass, the thread gives up. Things learned while investigating this... No need to try and fill the pool too quickly or too many times. Over the course of creation, the udev code may 'add', 'change', and 'delete' the same device. So if the refresh code runs and finds something, it may display it only to have a subsequent refresh appear to "lose" the device. The udev processing doesn't seem to have a way to indicate that it's all done with the creation processing of a newly found vHBA. Only the Lone Ranger has silver bullets to fix everything.
2014-11-18 19:51:01 +00:00
VIR_FREE(cbdata);
}
/**
* Thread to handle the pool refresh after a VPORT_CREATE is done. In this
* case the 'udevEventHandleCallback' will be executed asynchronously as a
* result of the node device driver callback routine to handle when udev
* notices some sort of device change (such as adding a new device). It takes
* some amount of time (usually a few seconds) for udev to go through the
* process of setting up the new device. Unfortunately, there is nothing
* that says "when" it's done. The immediate virStorageBackendSCSIRefreshPool
* done after virStorageBackendSCSIStartPool (and createVport) occurs too
* quickly to find any devices.
*
* So this thread is designed to wait a few seconds (5), then make the query
* to find the LUs for the pool. If none yet exist, we'll try once more
* to find the LUs before giving up.
*
* Attempting to find devices prior to allowing udev to settle down may result
* in finding devices that then get deleted.
*
* @opaque Pool's Refresh Info containing name and pool object pointer
*/
static void
virStoragePoolFCRefreshThread(void *opaque)
{
virStoragePoolFCRefreshInfoPtr cbdata = opaque;
const char *fchost_name = cbdata->fchost_name;
const unsigned char *pool_uuid = cbdata->pool_uuid;
virStoragePoolObjPtr pool = NULL;
storage: Add thread to refresh for createVport https://bugzilla.redhat.com/show_bug.cgi?id=1152382 When libvirt create's the vport (VPORT_CREATE) for the vHBA, there isn't enough "time" between the creation and the running of the following backend->refreshPool after a backend->startPool in order to find the LU's. Population of LU's happens asynchronously when udevEventHandleCallback discovers the "new" vHBA port. Creation of the infrastructure by udev is an iterative process creating and discovering actual storage devices and adjusting the environment. Because of the time it takes to discover and set things up, the backend refreshPool call done after the startPool call will generally fail to find any devices. This leaves the newly started pool appear empty when querying via 'vol-list' after startup. The "workaround" has always been to run pool-refresh after startup (or any time thereafter) in order to find the LU's. Depending on how quickly run after startup, this too may not find any LUs in the pool. Eventually though given enough time and retries it will find something if LU's exist for the vHBA. This patch adds a thread to be executed after the VPORT_CREATE which will attempt to find the LU's without requiring the external run of refresh-pool. It does this by waiting for 5 seconds and searching for the LU's. If any are found, then the thread completes; otherwise, it will retry once more in another 5 seconds. If none are found in that second pass, the thread gives up. Things learned while investigating this... No need to try and fill the pool too quickly or too many times. Over the course of creation, the udev code may 'add', 'change', and 'delete' the same device. So if the refresh code runs and finds something, it may display it only to have a subsequent refresh appear to "lose" the device. The udev processing doesn't seem to have a way to indicate that it's all done with the creation processing of a newly found vHBA. Only the Lone Ranger has silver bullets to fix everything.
2014-11-18 19:51:01 +00:00
unsigned int host;
int found = 0;
storage: Add thread to refresh for createVport https://bugzilla.redhat.com/show_bug.cgi?id=1152382 When libvirt create's the vport (VPORT_CREATE) for the vHBA, there isn't enough "time" between the creation and the running of the following backend->refreshPool after a backend->startPool in order to find the LU's. Population of LU's happens asynchronously when udevEventHandleCallback discovers the "new" vHBA port. Creation of the infrastructure by udev is an iterative process creating and discovering actual storage devices and adjusting the environment. Because of the time it takes to discover and set things up, the backend refreshPool call done after the startPool call will generally fail to find any devices. This leaves the newly started pool appear empty when querying via 'vol-list' after startup. The "workaround" has always been to run pool-refresh after startup (or any time thereafter) in order to find the LU's. Depending on how quickly run after startup, this too may not find any LUs in the pool. Eventually though given enough time and retries it will find something if LU's exist for the vHBA. This patch adds a thread to be executed after the VPORT_CREATE which will attempt to find the LU's without requiring the external run of refresh-pool. It does this by waiting for 5 seconds and searching for the LU's. If any are found, then the thread completes; otherwise, it will retry once more in another 5 seconds. If none are found in that second pass, the thread gives up. Things learned while investigating this... No need to try and fill the pool too quickly or too many times. Over the course of creation, the udev code may 'add', 'change', and 'delete' the same device. So if the refresh code runs and finds something, it may display it only to have a subsequent refresh appear to "lose" the device. The udev processing doesn't seem to have a way to indicate that it's all done with the creation processing of a newly found vHBA. Only the Lone Ranger has silver bullets to fix everything.
2014-11-18 19:51:01 +00:00
int tries = 2;
do {
sleep(5); /* Give it time */
/* Let's see if the pool still exists - */
if (!(pool = virStoragePoolObjFindPoolByUUID(pool_uuid)))
break;
/* Return with pool lock, if active, we can get the host number,
* successfully, rescan, and find LUN's, then we are happy
storage: Add thread to refresh for createVport https://bugzilla.redhat.com/show_bug.cgi?id=1152382 When libvirt create's the vport (VPORT_CREATE) for the vHBA, there isn't enough "time" between the creation and the running of the following backend->refreshPool after a backend->startPool in order to find the LU's. Population of LU's happens asynchronously when udevEventHandleCallback discovers the "new" vHBA port. Creation of the infrastructure by udev is an iterative process creating and discovering actual storage devices and adjusting the environment. Because of the time it takes to discover and set things up, the backend refreshPool call done after the startPool call will generally fail to find any devices. This leaves the newly started pool appear empty when querying via 'vol-list' after startup. The "workaround" has always been to run pool-refresh after startup (or any time thereafter) in order to find the LU's. Depending on how quickly run after startup, this too may not find any LUs in the pool. Eventually though given enough time and retries it will find something if LU's exist for the vHBA. This patch adds a thread to be executed after the VPORT_CREATE which will attempt to find the LU's without requiring the external run of refresh-pool. It does this by waiting for 5 seconds and searching for the LU's. If any are found, then the thread completes; otherwise, it will retry once more in another 5 seconds. If none are found in that second pass, the thread gives up. Things learned while investigating this... No need to try and fill the pool too quickly or too many times. Over the course of creation, the udev code may 'add', 'change', and 'delete' the same device. So if the refresh code runs and finds something, it may display it only to have a subsequent refresh appear to "lose" the device. The udev processing doesn't seem to have a way to indicate that it's all done with the creation processing of a newly found vHBA. Only the Lone Ranger has silver bullets to fix everything.
2014-11-18 19:51:01 +00:00
*/
VIR_DEBUG("Attempt FC Refresh for pool='%s' name='%s' tries='%d'",
pool->def->name, fchost_name, tries);
pool->def->allocation = pool->def->capacity = pool->def->available = 0;
storage: Add thread to refresh for createVport https://bugzilla.redhat.com/show_bug.cgi?id=1152382 When libvirt create's the vport (VPORT_CREATE) for the vHBA, there isn't enough "time" between the creation and the running of the following backend->refreshPool after a backend->startPool in order to find the LU's. Population of LU's happens asynchronously when udevEventHandleCallback discovers the "new" vHBA port. Creation of the infrastructure by udev is an iterative process creating and discovering actual storage devices and adjusting the environment. Because of the time it takes to discover and set things up, the backend refreshPool call done after the startPool call will generally fail to find any devices. This leaves the newly started pool appear empty when querying via 'vol-list' after startup. The "workaround" has always been to run pool-refresh after startup (or any time thereafter) in order to find the LU's. Depending on how quickly run after startup, this too may not find any LUs in the pool. Eventually though given enough time and retries it will find something if LU's exist for the vHBA. This patch adds a thread to be executed after the VPORT_CREATE which will attempt to find the LU's without requiring the external run of refresh-pool. It does this by waiting for 5 seconds and searching for the LU's. If any are found, then the thread completes; otherwise, it will retry once more in another 5 seconds. If none are found in that second pass, the thread gives up. Things learned while investigating this... No need to try and fill the pool too quickly or too many times. Over the course of creation, the udev code may 'add', 'change', and 'delete' the same device. So if the refresh code runs and finds something, it may display it only to have a subsequent refresh appear to "lose" the device. The udev processing doesn't seem to have a way to indicate that it's all done with the creation processing of a newly found vHBA. Only the Lone Ranger has silver bullets to fix everything.
2014-11-18 19:51:01 +00:00
if (virStoragePoolObjIsActive(pool) &&
virSCSIHostGetNumber(fchost_name, &host) == 0 &&
storage: Add thread to refresh for createVport https://bugzilla.redhat.com/show_bug.cgi?id=1152382 When libvirt create's the vport (VPORT_CREATE) for the vHBA, there isn't enough "time" between the creation and the running of the following backend->refreshPool after a backend->startPool in order to find the LU's. Population of LU's happens asynchronously when udevEventHandleCallback discovers the "new" vHBA port. Creation of the infrastructure by udev is an iterative process creating and discovering actual storage devices and adjusting the environment. Because of the time it takes to discover and set things up, the backend refreshPool call done after the startPool call will generally fail to find any devices. This leaves the newly started pool appear empty when querying via 'vol-list' after startup. The "workaround" has always been to run pool-refresh after startup (or any time thereafter) in order to find the LU's. Depending on how quickly run after startup, this too may not find any LUs in the pool. Eventually though given enough time and retries it will find something if LU's exist for the vHBA. This patch adds a thread to be executed after the VPORT_CREATE which will attempt to find the LU's without requiring the external run of refresh-pool. It does this by waiting for 5 seconds and searching for the LU's. If any are found, then the thread completes; otherwise, it will retry once more in another 5 seconds. If none are found in that second pass, the thread gives up. Things learned while investigating this... No need to try and fill the pool too quickly or too many times. Over the course of creation, the udev code may 'add', 'change', and 'delete' the same device. So if the refresh code runs and finds something, it may display it only to have a subsequent refresh appear to "lose" the device. The udev processing doesn't seem to have a way to indicate that it's all done with the creation processing of a newly found vHBA. Only the Lone Ranger has silver bullets to fix everything.
2014-11-18 19:51:01 +00:00
virStorageBackendSCSITriggerRescan(host) == 0) {
virStoragePoolObjClearVols(pool);
found = virStorageBackendSCSIFindLUs(pool, host);
storage: Add thread to refresh for createVport https://bugzilla.redhat.com/show_bug.cgi?id=1152382 When libvirt create's the vport (VPORT_CREATE) for the vHBA, there isn't enough "time" between the creation and the running of the following backend->refreshPool after a backend->startPool in order to find the LU's. Population of LU's happens asynchronously when udevEventHandleCallback discovers the "new" vHBA port. Creation of the infrastructure by udev is an iterative process creating and discovering actual storage devices and adjusting the environment. Because of the time it takes to discover and set things up, the backend refreshPool call done after the startPool call will generally fail to find any devices. This leaves the newly started pool appear empty when querying via 'vol-list' after startup. The "workaround" has always been to run pool-refresh after startup (or any time thereafter) in order to find the LU's. Depending on how quickly run after startup, this too may not find any LUs in the pool. Eventually though given enough time and retries it will find something if LU's exist for the vHBA. This patch adds a thread to be executed after the VPORT_CREATE which will attempt to find the LU's without requiring the external run of refresh-pool. It does this by waiting for 5 seconds and searching for the LU's. If any are found, then the thread completes; otherwise, it will retry once more in another 5 seconds. If none are found in that second pass, the thread gives up. Things learned while investigating this... No need to try and fill the pool too quickly or too many times. Over the course of creation, the udev code may 'add', 'change', and 'delete' the same device. So if the refresh code runs and finds something, it may display it only to have a subsequent refresh appear to "lose" the device. The udev processing doesn't seem to have a way to indicate that it's all done with the creation processing of a newly found vHBA. Only the Lone Ranger has silver bullets to fix everything.
2014-11-18 19:51:01 +00:00
}
virStoragePoolObjUnlock(pool);
} while (!found && --tries);
if (pool && !found)
storage: Add thread to refresh for createVport https://bugzilla.redhat.com/show_bug.cgi?id=1152382 When libvirt create's the vport (VPORT_CREATE) for the vHBA, there isn't enough "time" between the creation and the running of the following backend->refreshPool after a backend->startPool in order to find the LU's. Population of LU's happens asynchronously when udevEventHandleCallback discovers the "new" vHBA port. Creation of the infrastructure by udev is an iterative process creating and discovering actual storage devices and adjusting the environment. Because of the time it takes to discover and set things up, the backend refreshPool call done after the startPool call will generally fail to find any devices. This leaves the newly started pool appear empty when querying via 'vol-list' after startup. The "workaround" has always been to run pool-refresh after startup (or any time thereafter) in order to find the LU's. Depending on how quickly run after startup, this too may not find any LUs in the pool. Eventually though given enough time and retries it will find something if LU's exist for the vHBA. This patch adds a thread to be executed after the VPORT_CREATE which will attempt to find the LU's without requiring the external run of refresh-pool. It does this by waiting for 5 seconds and searching for the LU's. If any are found, then the thread completes; otherwise, it will retry once more in another 5 seconds. If none are found in that second pass, the thread gives up. Things learned while investigating this... No need to try and fill the pool too quickly or too many times. Over the course of creation, the udev code may 'add', 'change', and 'delete' the same device. So if the refresh code runs and finds something, it may display it only to have a subsequent refresh appear to "lose" the device. The udev processing doesn't seem to have a way to indicate that it's all done with the creation processing of a newly found vHBA. Only the Lone Ranger has silver bullets to fix everything.
2014-11-18 19:51:01 +00:00
VIR_DEBUG("FC Refresh Thread failed to find LU's");
virStoragePoolFCRefreshDataFree(cbdata);
}
static char *
getAdapterName(virStorageAdapterPtr adapter)
{
char *name = NULL;
char *parentaddr = NULL;
if (adapter->type == VIR_STORAGE_ADAPTER_TYPE_SCSI_HOST) {
virStorageAdapterSCSIHostPtr scsi_host = &adapter->data.scsi_host;
if (scsi_host->has_parent) {
virPCIDeviceAddressPtr addr = &scsi_host->parentaddr;
unsigned int unique_id = scsi_host->unique_id;
if (!(name = virSCSIHostGetNameByParentaddr(addr->domain,
addr->bus,
addr->slot,
addr->function,
unique_id)))
goto cleanup;
} else {
ignore_value(VIR_STRDUP(name, scsi_host->name));
}
} else if (adapter->type == VIR_STORAGE_ADAPTER_TYPE_FC_HOST) {
virStorageAdapterFCHostPtr fchost = &adapter->data.fchost;
if (!(name = virVHBAGetHostByWWN(NULL, fchost->wwnn, fchost->wwpn))) {
virReportError(VIR_ERR_XML_ERROR,
_("Failed to find SCSI host with wwnn='%s', "
"wwpn='%s'"), fchost->wwnn, fchost->wwpn);
}
}
cleanup:
VIR_FREE(parentaddr);
return name;
}
static int
createVport(virConnectPtr conn,
virStoragePoolDefPtr def,
const char *configFile,
virStorageAdapterFCHostPtr fchost)
{
char *name = NULL;
storage: Add thread to refresh for createVport https://bugzilla.redhat.com/show_bug.cgi?id=1152382 When libvirt create's the vport (VPORT_CREATE) for the vHBA, there isn't enough "time" between the creation and the running of the following backend->refreshPool after a backend->startPool in order to find the LU's. Population of LU's happens asynchronously when udevEventHandleCallback discovers the "new" vHBA port. Creation of the infrastructure by udev is an iterative process creating and discovering actual storage devices and adjusting the environment. Because of the time it takes to discover and set things up, the backend refreshPool call done after the startPool call will generally fail to find any devices. This leaves the newly started pool appear empty when querying via 'vol-list' after startup. The "workaround" has always been to run pool-refresh after startup (or any time thereafter) in order to find the LU's. Depending on how quickly run after startup, this too may not find any LUs in the pool. Eventually though given enough time and retries it will find something if LU's exist for the vHBA. This patch adds a thread to be executed after the VPORT_CREATE which will attempt to find the LU's without requiring the external run of refresh-pool. It does this by waiting for 5 seconds and searching for the LU's. If any are found, then the thread completes; otherwise, it will retry once more in another 5 seconds. If none are found in that second pass, the thread gives up. Things learned while investigating this... No need to try and fill the pool too quickly or too many times. Over the course of creation, the udev code may 'add', 'change', and 'delete' the same device. So if the refresh code runs and finds something, it may display it only to have a subsequent refresh appear to "lose" the device. The udev processing doesn't seem to have a way to indicate that it's all done with the creation processing of a newly found vHBA. Only the Lone Ranger has silver bullets to fix everything.
2014-11-18 19:51:01 +00:00
virStoragePoolFCRefreshInfoPtr cbdata = NULL;
virThread thread;
int ret = -1;
storage: Introduce 'managed' for the fchost parent https://bugzilla.redhat.com/show_bug.cgi?id=1160926 Introduce a 'managed' attribute to allow libvirt to decide whether to delete a vHBA vport created via external means such as nodedev-create. The code currently decides whether to delete the vHBA based solely on whether the parent was provided at creation time. However, that may not be the desired action, so rather than delete and force someone to create another vHBA via an additional nodedev-create allow the configuration of the storage pool to decide the desired action. During createVport when libvirt does the VPORT_CREATE, set the managed value to YES if not already set to indicate to the deleteVport code that it should delete the vHBA when the pool is destroyed. If libvirtd is restarted all the memory only state was lost, so for a persistent storage pool, use the virStoragePoolSaveConfig in order to write out the managed value. Because we're now saving the current configuration, we need to be sure to not save the parent in the output XML if it was undefined at start. Saving the name would cause future starts to always use the same parent which is not the expected result when not providing a parent. By not providing a parent, libvirt is expected to find the best available vHBA port for each subsequent (re)start. At deleteVport, use the new managed value to decide whether to execute the VPORT_DELETE. Since we no longer save the parent in memory or in XML when provided, if it was not provided, then we have to look it up.
2014-11-10 16:19:51 +00:00
VIR_DEBUG("conn=%p, configFile='%s' parent='%s', wwnn='%s' wwpn='%s'",
conn, NULLSTR(configFile), NULLSTR(fchost->parent),
fchost->wwnn, fchost->wwpn);
storage: Introduce 'managed' for the fchost parent https://bugzilla.redhat.com/show_bug.cgi?id=1160926 Introduce a 'managed' attribute to allow libvirt to decide whether to delete a vHBA vport created via external means such as nodedev-create. The code currently decides whether to delete the vHBA based solely on whether the parent was provided at creation time. However, that may not be the desired action, so rather than delete and force someone to create another vHBA via an additional nodedev-create allow the configuration of the storage pool to decide the desired action. During createVport when libvirt does the VPORT_CREATE, set the managed value to YES if not already set to indicate to the deleteVport code that it should delete the vHBA when the pool is destroyed. If libvirtd is restarted all the memory only state was lost, so for a persistent storage pool, use the virStoragePoolSaveConfig in order to write out the managed value. Because we're now saving the current configuration, we need to be sure to not save the parent in the output XML if it was undefined at start. Saving the name would cause future starts to always use the same parent which is not the expected result when not providing a parent. By not providing a parent, libvirt is expected to find the best available vHBA port for each subsequent (re)start. At deleteVport, use the new managed value to decide whether to execute the VPORT_DELETE. Since we no longer save the parent in memory or in XML when provided, if it was not provided, then we have to look it up.
2014-11-10 16:19:51 +00:00
/* Since we're creating the vHBA, then we need to manage removing it
* as well. Since we need this setting to "live" through a libvirtd
* restart, we need to save the persistent configuration. So if not
* already defined as YES, then force the issue.
*/
if (fchost->managed != VIR_TRISTATE_BOOL_YES) {
fchost->managed = VIR_TRISTATE_BOOL_YES;
storage: Introduce 'managed' for the fchost parent https://bugzilla.redhat.com/show_bug.cgi?id=1160926 Introduce a 'managed' attribute to allow libvirt to decide whether to delete a vHBA vport created via external means such as nodedev-create. The code currently decides whether to delete the vHBA based solely on whether the parent was provided at creation time. However, that may not be the desired action, so rather than delete and force someone to create another vHBA via an additional nodedev-create allow the configuration of the storage pool to decide the desired action. During createVport when libvirt does the VPORT_CREATE, set the managed value to YES if not already set to indicate to the deleteVport code that it should delete the vHBA when the pool is destroyed. If libvirtd is restarted all the memory only state was lost, so for a persistent storage pool, use the virStoragePoolSaveConfig in order to write out the managed value. Because we're now saving the current configuration, we need to be sure to not save the parent in the output XML if it was undefined at start. Saving the name would cause future starts to always use the same parent which is not the expected result when not providing a parent. By not providing a parent, libvirt is expected to find the best available vHBA port for each subsequent (re)start. At deleteVport, use the new managed value to decide whether to execute the VPORT_DELETE. Since we no longer save the parent in memory or in XML when provided, if it was not provided, then we have to look it up.
2014-11-10 16:19:51 +00:00
if (configFile) {
if (virStoragePoolSaveConfig(configFile, def) < 0)
goto cleanup;
storage: Introduce 'managed' for the fchost parent https://bugzilla.redhat.com/show_bug.cgi?id=1160926 Introduce a 'managed' attribute to allow libvirt to decide whether to delete a vHBA vport created via external means such as nodedev-create. The code currently decides whether to delete the vHBA based solely on whether the parent was provided at creation time. However, that may not be the desired action, so rather than delete and force someone to create another vHBA via an additional nodedev-create allow the configuration of the storage pool to decide the desired action. During createVport when libvirt does the VPORT_CREATE, set the managed value to YES if not already set to indicate to the deleteVport code that it should delete the vHBA when the pool is destroyed. If libvirtd is restarted all the memory only state was lost, so for a persistent storage pool, use the virStoragePoolSaveConfig in order to write out the managed value. Because we're now saving the current configuration, we need to be sure to not save the parent in the output XML if it was undefined at start. Saving the name would cause future starts to always use the same parent which is not the expected result when not providing a parent. By not providing a parent, libvirt is expected to find the best available vHBA port for each subsequent (re)start. At deleteVport, use the new managed value to decide whether to execute the VPORT_DELETE. Since we no longer save the parent in memory or in XML when provided, if it was not provided, then we have to look it up.
2014-11-10 16:19:51 +00:00
}
}
if (!(name = virNodeDeviceCreateVport(conn, fchost)))
goto cleanup;
storage: Add thread to refresh for createVport https://bugzilla.redhat.com/show_bug.cgi?id=1152382 When libvirt create's the vport (VPORT_CREATE) for the vHBA, there isn't enough "time" between the creation and the running of the following backend->refreshPool after a backend->startPool in order to find the LU's. Population of LU's happens asynchronously when udevEventHandleCallback discovers the "new" vHBA port. Creation of the infrastructure by udev is an iterative process creating and discovering actual storage devices and adjusting the environment. Because of the time it takes to discover and set things up, the backend refreshPool call done after the startPool call will generally fail to find any devices. This leaves the newly started pool appear empty when querying via 'vol-list' after startup. The "workaround" has always been to run pool-refresh after startup (or any time thereafter) in order to find the LU's. Depending on how quickly run after startup, this too may not find any LUs in the pool. Eventually though given enough time and retries it will find something if LU's exist for the vHBA. This patch adds a thread to be executed after the VPORT_CREATE which will attempt to find the LU's without requiring the external run of refresh-pool. It does this by waiting for 5 seconds and searching for the LU's. If any are found, then the thread completes; otherwise, it will retry once more in another 5 seconds. If none are found in that second pass, the thread gives up. Things learned while investigating this... No need to try and fill the pool too quickly or too many times. Over the course of creation, the udev code may 'add', 'change', and 'delete' the same device. So if the refresh code runs and finds something, it may display it only to have a subsequent refresh appear to "lose" the device. The udev processing doesn't seem to have a way to indicate that it's all done with the creation processing of a newly found vHBA. Only the Lone Ranger has silver bullets to fix everything.
2014-11-18 19:51:01 +00:00
/* Creating our own VPORT didn't leave enough time to find any LUN's,
* so, let's create a thread whose job it is to call the FindLU's with
* retry logic set to true. If the thread isn't created, then no big
* deal since it's still possible to refresh the pool later.
*/
if (VIR_ALLOC(cbdata) == 0) {
memcpy(cbdata->pool_uuid, def->uuid, VIR_UUID_BUFLEN);
VIR_STEAL_PTR(cbdata->fchost_name, name);
if (virThreadCreate(&thread, false, virStoragePoolFCRefreshThread,
cbdata) < 0) {
/* Oh well - at least someone can still refresh afterwards */
VIR_DEBUG("Failed to create FC Pool Refresh Thread");
virStoragePoolFCRefreshDataFree(cbdata);
storage: Add thread to refresh for createVport https://bugzilla.redhat.com/show_bug.cgi?id=1152382 When libvirt create's the vport (VPORT_CREATE) for the vHBA, there isn't enough "time" between the creation and the running of the following backend->refreshPool after a backend->startPool in order to find the LU's. Population of LU's happens asynchronously when udevEventHandleCallback discovers the "new" vHBA port. Creation of the infrastructure by udev is an iterative process creating and discovering actual storage devices and adjusting the environment. Because of the time it takes to discover and set things up, the backend refreshPool call done after the startPool call will generally fail to find any devices. This leaves the newly started pool appear empty when querying via 'vol-list' after startup. The "workaround" has always been to run pool-refresh after startup (or any time thereafter) in order to find the LU's. Depending on how quickly run after startup, this too may not find any LUs in the pool. Eventually though given enough time and retries it will find something if LU's exist for the vHBA. This patch adds a thread to be executed after the VPORT_CREATE which will attempt to find the LU's without requiring the external run of refresh-pool. It does this by waiting for 5 seconds and searching for the LU's. If any are found, then the thread completes; otherwise, it will retry once more in another 5 seconds. If none are found in that second pass, the thread gives up. Things learned while investigating this... No need to try and fill the pool too quickly or too many times. Over the course of creation, the udev code may 'add', 'change', and 'delete' the same device. So if the refresh code runs and finds something, it may display it only to have a subsequent refresh appear to "lose" the device. The udev processing doesn't seem to have a way to indicate that it's all done with the creation processing of a newly found vHBA. Only the Lone Ranger has silver bullets to fix everything.
2014-11-18 19:51:01 +00:00
}
}
ret = 0;
cleanup:
VIR_FREE(name);
return ret;
}
static int
virStorageBackendSCSICheckPool(virStoragePoolObjPtr pool,
bool *isActive)
{
char *path = NULL;
char *name = NULL;
unsigned int host;
int ret = -1;
*isActive = false;
if (!(name = getAdapterName(&pool->def->source.adapter))) {
/* It's normal for the pool with "fc_host" type source
* adapter fails to get the adapter name, since the vHBA
* the adapter based on might be not created yet.
*/
if (pool->def->source.adapter.type ==
VIR_STORAGE_ADAPTER_TYPE_FC_HOST) {
virResetLastError();
return 0;
} else {
return -1;
}
}
if (virSCSIHostGetNumber(name, &host) < 0)
goto cleanup;
if (virAsprintf(&path, "%s/host%d",
LINUX_SYSFS_SCSI_HOST_PREFIX, host) < 0)
goto cleanup;
*isActive = virFileExists(path);
ret = 0;
cleanup:
VIR_FREE(path);
VIR_FREE(name);
return ret;
}
static int
virStorageBackendSCSIRefreshPool(virConnectPtr conn ATTRIBUTE_UNUSED,
virStoragePoolObjPtr pool)
{
char *name = NULL;
unsigned int host;
int ret = -1;
pool->def->allocation = pool->def->capacity = pool->def->available = 0;
if (!(name = getAdapterName(&pool->def->source.adapter)))
return -1;
if (virSCSIHostGetNumber(name, &host) < 0)
goto out;
VIR_DEBUG("Scanning host%u", host);
if (virStorageBackendSCSITriggerRescan(host) < 0)
goto out;
if (virStorageBackendSCSIFindLUs(pool, host) < 0)
goto out;
ret = 0;
out:
VIR_FREE(name);
return ret;
}
static int
virStorageBackendSCSIStartPool(virConnectPtr conn,
virStoragePoolObjPtr pool)
{
if (pool->def->source.adapter.type == VIR_STORAGE_ADAPTER_TYPE_FC_HOST)
return createVport(conn, pool->def, pool->configFile,
&pool->def->source.adapter.data.fchost);
return 0;
}
static int
storage: Introduce 'managed' for the fchost parent https://bugzilla.redhat.com/show_bug.cgi?id=1160926 Introduce a 'managed' attribute to allow libvirt to decide whether to delete a vHBA vport created via external means such as nodedev-create. The code currently decides whether to delete the vHBA based solely on whether the parent was provided at creation time. However, that may not be the desired action, so rather than delete and force someone to create another vHBA via an additional nodedev-create allow the configuration of the storage pool to decide the desired action. During createVport when libvirt does the VPORT_CREATE, set the managed value to YES if not already set to indicate to the deleteVport code that it should delete the vHBA when the pool is destroyed. If libvirtd is restarted all the memory only state was lost, so for a persistent storage pool, use the virStoragePoolSaveConfig in order to write out the managed value. Because we're now saving the current configuration, we need to be sure to not save the parent in the output XML if it was undefined at start. Saving the name would cause future starts to always use the same parent which is not the expected result when not providing a parent. By not providing a parent, libvirt is expected to find the best available vHBA port for each subsequent (re)start. At deleteVport, use the new managed value to decide whether to execute the VPORT_DELETE. Since we no longer save the parent in memory or in XML when provided, if it was not provided, then we have to look it up.
2014-11-10 16:19:51 +00:00
virStorageBackendSCSIStopPool(virConnectPtr conn,
virStoragePoolObjPtr pool)
{
if (pool->def->source.adapter.type == VIR_STORAGE_ADAPTER_TYPE_FC_HOST)
return virNodeDeviceDeleteVport(conn,
&pool->def->source.adapter.data.fchost);
return 0;
}
virStorageBackend virStorageBackendSCSI = {
.type = VIR_STORAGE_POOL_SCSI,
.checkPool = virStorageBackendSCSICheckPool,
.refreshPool = virStorageBackendSCSIRefreshPool,
.startPool = virStorageBackendSCSIStartPool,
.stopPool = virStorageBackendSCSIStopPool,
.uploadVol = virStorageBackendVolUploadLocal,
.downloadVol = virStorageBackendVolDownloadLocal,
.wipeVol = virStorageBackendVolWipeLocal,
};
int
virStorageBackendSCSIRegister(void)
{
return virStorageBackendRegister(&virStorageBackendSCSI);
}