2009-04-01 16:03:22 +00:00
|
|
|
/*
|
|
|
|
* storage_backend_scsi.c: storage backend for SCSI handling
|
|
|
|
*
|
2014-02-12 10:36:06 +00:00
|
|
|
* Copyright (C) 2007-2008, 2013-2014 Red Hat, Inc.
|
2009-04-01 16:03:22 +00:00
|
|
|
* Copyright (C) 2007-2008 Daniel P. Berrange
|
|
|
|
*
|
|
|
|
* This library is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU Lesser General Public
|
|
|
|
* License as published by the Free Software Foundation; either
|
|
|
|
* version 2.1 of the License, or (at your option) any later version.
|
|
|
|
*
|
|
|
|
* This library is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
* Lesser General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU Lesser General Public
|
2012-09-20 22:30:55 +00:00
|
|
|
* License along with this library. If not, see
|
2012-07-21 10:06:23 +00:00
|
|
|
* <http://www.gnu.org/licenses/>.
|
2009-04-01 16:03:22 +00:00
|
|
|
*
|
|
|
|
* Author: Daniel P. Berrange <berrange redhat com>
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <config.h>
|
|
|
|
|
|
|
|
#include <unistd.h>
|
|
|
|
#include <stdio.h>
|
|
|
|
#include <fcntl.h>
|
|
|
|
|
2012-12-13 18:21:53 +00:00
|
|
|
#include "virerror.h"
|
2009-04-01 16:03:22 +00:00
|
|
|
#include "storage_backend_scsi.h"
|
2012-12-12 18:06:53 +00:00
|
|
|
#include "viralloc.h"
|
2012-12-12 17:59:27 +00:00
|
|
|
#include "virlog.h"
|
2011-07-19 18:32:58 +00:00
|
|
|
#include "virfile.h"
|
2012-12-12 16:27:01 +00:00
|
|
|
#include "vircommand.h"
|
2013-04-03 10:36:23 +00:00
|
|
|
#include "virstring.h"
|
2017-01-11 17:04:15 +00:00
|
|
|
#include "storage_util.h"
|
2017-02-18 13:26:21 +00:00
|
|
|
#include "node_device_conf.h"
|
2009-04-01 16:03:22 +00:00
|
|
|
|
|
|
|
#define VIR_FROM_THIS VIR_FROM_STORAGE
|
|
|
|
|
2014-02-28 12:16:17 +00:00
|
|
|
VIR_LOG_INIT("storage.storage_backend_scsi");
|
|
|
|
|
2017-01-13 16:52:41 +00:00
|
|
|
#define LINUX_SYSFS_SCSI_HOST_PREFIX "/sys/class/scsi_host"
|
|
|
|
#define LINUX_SYSFS_SCSI_HOST_POSTFIX "device"
|
|
|
|
#define LINUX_SYSFS_SCSI_HOST_SCAN_STRING "- - -"
|
|
|
|
|
storage: Add thread to refresh for createVport
https://bugzilla.redhat.com/show_bug.cgi?id=1152382
When libvirt create's the vport (VPORT_CREATE) for the vHBA, there isn't
enough "time" between the creation and the running of the following
backend->refreshPool after a backend->startPool in order to find the LU's.
Population of LU's happens asynchronously when udevEventHandleCallback
discovers the "new" vHBA port. Creation of the infrastructure by udev
is an iterative process creating and discovering actual storage devices and
adjusting the environment.
Because of the time it takes to discover and set things up, the backend
refreshPool call done after the startPool call will generally fail to
find any devices. This leaves the newly started pool appear empty when
querying via 'vol-list' after startup. The "workaround" has always been
to run pool-refresh after startup (or any time thereafter) in order to
find the LU's. Depending on how quickly run after startup, this too may
not find any LUs in the pool. Eventually though given enough time and
retries it will find something if LU's exist for the vHBA.
This patch adds a thread to be executed after the VPORT_CREATE which will
attempt to find the LU's without requiring the external run of refresh-pool.
It does this by waiting for 5 seconds and searching for the LU's. If any
are found, then the thread completes; otherwise, it will retry once more
in another 5 seconds. If none are found in that second pass, the thread
gives up.
Things learned while investigating this... No need to try and fill the
pool too quickly or too many times. Over the course of creation, the udev
code may 'add', 'change', and 'delete' the same device. So if the refresh
code runs and finds something, it may display it only to have a subsequent
refresh appear to "lose" the device. The udev processing doesn't seem to
have a way to indicate that it's all done with the creation processing of a
newly found vHBA. Only the Lone Ranger has silver bullets to fix everything.
2014-11-18 19:51:01 +00:00
|
|
|
typedef struct _virStoragePoolFCRefreshInfo virStoragePoolFCRefreshInfo;
|
|
|
|
typedef virStoragePoolFCRefreshInfo *virStoragePoolFCRefreshInfoPtr;
|
|
|
|
struct _virStoragePoolFCRefreshInfo {
|
2015-11-02 19:46:32 +00:00
|
|
|
char *fchost_name;
|
2015-11-02 22:21:11 +00:00
|
|
|
unsigned char pool_uuid[VIR_UUID_BUFLEN];
|
storage: Add thread to refresh for createVport
https://bugzilla.redhat.com/show_bug.cgi?id=1152382
When libvirt create's the vport (VPORT_CREATE) for the vHBA, there isn't
enough "time" between the creation and the running of the following
backend->refreshPool after a backend->startPool in order to find the LU's.
Population of LU's happens asynchronously when udevEventHandleCallback
discovers the "new" vHBA port. Creation of the infrastructure by udev
is an iterative process creating and discovering actual storage devices and
adjusting the environment.
Because of the time it takes to discover and set things up, the backend
refreshPool call done after the startPool call will generally fail to
find any devices. This leaves the newly started pool appear empty when
querying via 'vol-list' after startup. The "workaround" has always been
to run pool-refresh after startup (or any time thereafter) in order to
find the LU's. Depending on how quickly run after startup, this too may
not find any LUs in the pool. Eventually though given enough time and
retries it will find something if LU's exist for the vHBA.
This patch adds a thread to be executed after the VPORT_CREATE which will
attempt to find the LU's without requiring the external run of refresh-pool.
It does this by waiting for 5 seconds and searching for the LU's. If any
are found, then the thread completes; otherwise, it will retry once more
in another 5 seconds. If none are found in that second pass, the thread
gives up.
Things learned while investigating this... No need to try and fill the
pool too quickly or too many times. Over the course of creation, the udev
code may 'add', 'change', and 'delete' the same device. So if the refresh
code runs and finds something, it may display it only to have a subsequent
refresh appear to "lose" the device. The udev processing doesn't seem to
have a way to indicate that it's all done with the creation processing of a
newly found vHBA. Only the Lone Ranger has silver bullets to fix everything.
2014-11-18 19:51:01 +00:00
|
|
|
};
|
|
|
|
|
2015-04-17 01:22:35 +00:00
|
|
|
|
2009-04-07 12:50:17 +00:00
|
|
|
static int
|
2010-02-04 20:02:58 +00:00
|
|
|
virStorageBackendSCSITriggerRescan(uint32_t host)
|
2009-04-07 12:50:17 +00:00
|
|
|
{
|
|
|
|
int fd = -1;
|
|
|
|
int retval = 0;
|
|
|
|
char *path;
|
|
|
|
|
2010-05-20 06:57:06 +00:00
|
|
|
VIR_DEBUG("Triggering rescan of host %d", host);
|
2009-04-07 12:50:17 +00:00
|
|
|
|
2014-06-09 16:41:04 +00:00
|
|
|
if (virAsprintf(&path, "%s/host%u/scan",
|
|
|
|
LINUX_SYSFS_SCSI_HOST_PREFIX, host) < 0) {
|
2009-04-07 12:50:17 +00:00
|
|
|
retval = -1;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2010-05-20 06:57:06 +00:00
|
|
|
VIR_DEBUG("Scan trigger path is '%s'", path);
|
2009-04-07 12:50:17 +00:00
|
|
|
|
|
|
|
fd = open(path, O_WRONLY);
|
|
|
|
|
|
|
|
if (fd < 0) {
|
2010-02-04 20:02:58 +00:00
|
|
|
virReportSystemError(errno,
|
2009-04-07 12:50:17 +00:00
|
|
|
_("Could not open '%s' to trigger host scan"),
|
|
|
|
path);
|
|
|
|
retval = -1;
|
|
|
|
goto free_path;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (safewrite(fd,
|
|
|
|
LINUX_SYSFS_SCSI_HOST_SCAN_STRING,
|
|
|
|
sizeof(LINUX_SYSFS_SCSI_HOST_SCAN_STRING)) < 0) {
|
2010-11-09 20:48:48 +00:00
|
|
|
VIR_FORCE_CLOSE(fd);
|
2010-02-04 20:02:58 +00:00
|
|
|
virReportSystemError(errno,
|
2009-04-07 12:50:17 +00:00
|
|
|
_("Write to '%s' to trigger host scan failed"),
|
|
|
|
path);
|
|
|
|
retval = -1;
|
|
|
|
}
|
|
|
|
|
2010-11-09 20:48:48 +00:00
|
|
|
VIR_FORCE_CLOSE(fd);
|
2014-03-25 06:52:40 +00:00
|
|
|
free_path:
|
2009-04-07 12:50:17 +00:00
|
|
|
VIR_FREE(path);
|
2014-03-25 06:52:40 +00:00
|
|
|
out:
|
2010-05-20 06:57:06 +00:00
|
|
|
VIR_DEBUG("Rescan of host %d complete", host);
|
2009-04-07 12:50:17 +00:00
|
|
|
return retval;
|
|
|
|
}
|
|
|
|
|
storage: Add thread to refresh for createVport
https://bugzilla.redhat.com/show_bug.cgi?id=1152382
When libvirt create's the vport (VPORT_CREATE) for the vHBA, there isn't
enough "time" between the creation and the running of the following
backend->refreshPool after a backend->startPool in order to find the LU's.
Population of LU's happens asynchronously when udevEventHandleCallback
discovers the "new" vHBA port. Creation of the infrastructure by udev
is an iterative process creating and discovering actual storage devices and
adjusting the environment.
Because of the time it takes to discover and set things up, the backend
refreshPool call done after the startPool call will generally fail to
find any devices. This leaves the newly started pool appear empty when
querying via 'vol-list' after startup. The "workaround" has always been
to run pool-refresh after startup (or any time thereafter) in order to
find the LU's. Depending on how quickly run after startup, this too may
not find any LUs in the pool. Eventually though given enough time and
retries it will find something if LU's exist for the vHBA.
This patch adds a thread to be executed after the VPORT_CREATE which will
attempt to find the LU's without requiring the external run of refresh-pool.
It does this by waiting for 5 seconds and searching for the LU's. If any
are found, then the thread completes; otherwise, it will retry once more
in another 5 seconds. If none are found in that second pass, the thread
gives up.
Things learned while investigating this... No need to try and fill the
pool too quickly or too many times. Over the course of creation, the udev
code may 'add', 'change', and 'delete' the same device. So if the refresh
code runs and finds something, it may display it only to have a subsequent
refresh appear to "lose" the device. The udev processing doesn't seem to
have a way to indicate that it's all done with the creation processing of a
newly found vHBA. Only the Lone Ranger has silver bullets to fix everything.
2014-11-18 19:51:01 +00:00
|
|
|
/**
|
|
|
|
* Frees opaque data
|
|
|
|
*
|
|
|
|
* @opaque Data to be freed
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
virStoragePoolFCRefreshDataFree(void *opaque)
|
|
|
|
{
|
|
|
|
virStoragePoolFCRefreshInfoPtr cbdata = opaque;
|
|
|
|
|
2015-11-02 19:46:32 +00:00
|
|
|
VIR_FREE(cbdata->fchost_name);
|
storage: Add thread to refresh for createVport
https://bugzilla.redhat.com/show_bug.cgi?id=1152382
When libvirt create's the vport (VPORT_CREATE) for the vHBA, there isn't
enough "time" between the creation and the running of the following
backend->refreshPool after a backend->startPool in order to find the LU's.
Population of LU's happens asynchronously when udevEventHandleCallback
discovers the "new" vHBA port. Creation of the infrastructure by udev
is an iterative process creating and discovering actual storage devices and
adjusting the environment.
Because of the time it takes to discover and set things up, the backend
refreshPool call done after the startPool call will generally fail to
find any devices. This leaves the newly started pool appear empty when
querying via 'vol-list' after startup. The "workaround" has always been
to run pool-refresh after startup (or any time thereafter) in order to
find the LU's. Depending on how quickly run after startup, this too may
not find any LUs in the pool. Eventually though given enough time and
retries it will find something if LU's exist for the vHBA.
This patch adds a thread to be executed after the VPORT_CREATE which will
attempt to find the LU's without requiring the external run of refresh-pool.
It does this by waiting for 5 seconds and searching for the LU's. If any
are found, then the thread completes; otherwise, it will retry once more
in another 5 seconds. If none are found in that second pass, the thread
gives up.
Things learned while investigating this... No need to try and fill the
pool too quickly or too many times. Over the course of creation, the udev
code may 'add', 'change', and 'delete' the same device. So if the refresh
code runs and finds something, it may display it only to have a subsequent
refresh appear to "lose" the device. The udev processing doesn't seem to
have a way to indicate that it's all done with the creation processing of a
newly found vHBA. Only the Lone Ranger has silver bullets to fix everything.
2014-11-18 19:51:01 +00:00
|
|
|
VIR_FREE(cbdata);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Thread to handle the pool refresh after a VPORT_CREATE is done. In this
|
|
|
|
* case the 'udevEventHandleCallback' will be executed asynchronously as a
|
|
|
|
* result of the node device driver callback routine to handle when udev
|
|
|
|
* notices some sort of device change (such as adding a new device). It takes
|
|
|
|
* some amount of time (usually a few seconds) for udev to go through the
|
|
|
|
* process of setting up the new device. Unfortunately, there is nothing
|
|
|
|
* that says "when" it's done. The immediate virStorageBackendSCSIRefreshPool
|
|
|
|
* done after virStorageBackendSCSIStartPool (and createVport) occurs too
|
|
|
|
* quickly to find any devices.
|
|
|
|
*
|
|
|
|
* So this thread is designed to wait a few seconds (5), then make the query
|
|
|
|
* to find the LUs for the pool. If none yet exist, we'll try once more
|
|
|
|
* to find the LUs before giving up.
|
|
|
|
*
|
|
|
|
* Attempting to find devices prior to allowing udev to settle down may result
|
|
|
|
* in finding devices that then get deleted.
|
|
|
|
*
|
|
|
|
* @opaque Pool's Refresh Info containing name and pool object pointer
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
virStoragePoolFCRefreshThread(void *opaque)
|
|
|
|
{
|
|
|
|
virStoragePoolFCRefreshInfoPtr cbdata = opaque;
|
2015-11-02 19:46:32 +00:00
|
|
|
const char *fchost_name = cbdata->fchost_name;
|
2015-11-02 22:21:11 +00:00
|
|
|
const unsigned char *pool_uuid = cbdata->pool_uuid;
|
|
|
|
virStoragePoolObjPtr pool = NULL;
|
storage: Add thread to refresh for createVport
https://bugzilla.redhat.com/show_bug.cgi?id=1152382
When libvirt create's the vport (VPORT_CREATE) for the vHBA, there isn't
enough "time" between the creation and the running of the following
backend->refreshPool after a backend->startPool in order to find the LU's.
Population of LU's happens asynchronously when udevEventHandleCallback
discovers the "new" vHBA port. Creation of the infrastructure by udev
is an iterative process creating and discovering actual storage devices and
adjusting the environment.
Because of the time it takes to discover and set things up, the backend
refreshPool call done after the startPool call will generally fail to
find any devices. This leaves the newly started pool appear empty when
querying via 'vol-list' after startup. The "workaround" has always been
to run pool-refresh after startup (or any time thereafter) in order to
find the LU's. Depending on how quickly run after startup, this too may
not find any LUs in the pool. Eventually though given enough time and
retries it will find something if LU's exist for the vHBA.
This patch adds a thread to be executed after the VPORT_CREATE which will
attempt to find the LU's without requiring the external run of refresh-pool.
It does this by waiting for 5 seconds and searching for the LU's. If any
are found, then the thread completes; otherwise, it will retry once more
in another 5 seconds. If none are found in that second pass, the thread
gives up.
Things learned while investigating this... No need to try and fill the
pool too quickly or too many times. Over the course of creation, the udev
code may 'add', 'change', and 'delete' the same device. So if the refresh
code runs and finds something, it may display it only to have a subsequent
refresh appear to "lose" the device. The udev processing doesn't seem to
have a way to indicate that it's all done with the creation processing of a
newly found vHBA. Only the Lone Ranger has silver bullets to fix everything.
2014-11-18 19:51:01 +00:00
|
|
|
unsigned int host;
|
2015-04-24 17:57:30 +00:00
|
|
|
int found = 0;
|
storage: Add thread to refresh for createVport
https://bugzilla.redhat.com/show_bug.cgi?id=1152382
When libvirt create's the vport (VPORT_CREATE) for the vHBA, there isn't
enough "time" between the creation and the running of the following
backend->refreshPool after a backend->startPool in order to find the LU's.
Population of LU's happens asynchronously when udevEventHandleCallback
discovers the "new" vHBA port. Creation of the infrastructure by udev
is an iterative process creating and discovering actual storage devices and
adjusting the environment.
Because of the time it takes to discover and set things up, the backend
refreshPool call done after the startPool call will generally fail to
find any devices. This leaves the newly started pool appear empty when
querying via 'vol-list' after startup. The "workaround" has always been
to run pool-refresh after startup (or any time thereafter) in order to
find the LU's. Depending on how quickly run after startup, this too may
not find any LUs in the pool. Eventually though given enough time and
retries it will find something if LU's exist for the vHBA.
This patch adds a thread to be executed after the VPORT_CREATE which will
attempt to find the LU's without requiring the external run of refresh-pool.
It does this by waiting for 5 seconds and searching for the LU's. If any
are found, then the thread completes; otherwise, it will retry once more
in another 5 seconds. If none are found in that second pass, the thread
gives up.
Things learned while investigating this... No need to try and fill the
pool too quickly or too many times. Over the course of creation, the udev
code may 'add', 'change', and 'delete' the same device. So if the refresh
code runs and finds something, it may display it only to have a subsequent
refresh appear to "lose" the device. The udev processing doesn't seem to
have a way to indicate that it's all done with the creation processing of a
newly found vHBA. Only the Lone Ranger has silver bullets to fix everything.
2014-11-18 19:51:01 +00:00
|
|
|
int tries = 2;
|
|
|
|
|
|
|
|
do {
|
|
|
|
sleep(5); /* Give it time */
|
|
|
|
|
2015-11-02 22:21:11 +00:00
|
|
|
/* Let's see if the pool still exists - */
|
|
|
|
if (!(pool = virStoragePoolObjFindPoolByUUID(pool_uuid)))
|
|
|
|
break;
|
|
|
|
|
|
|
|
/* Return with pool lock, if active, we can get the host number,
|
|
|
|
* successfully, rescan, and find LUN's, then we are happy
|
storage: Add thread to refresh for createVport
https://bugzilla.redhat.com/show_bug.cgi?id=1152382
When libvirt create's the vport (VPORT_CREATE) for the vHBA, there isn't
enough "time" between the creation and the running of the following
backend->refreshPool after a backend->startPool in order to find the LU's.
Population of LU's happens asynchronously when udevEventHandleCallback
discovers the "new" vHBA port. Creation of the infrastructure by udev
is an iterative process creating and discovering actual storage devices and
adjusting the environment.
Because of the time it takes to discover and set things up, the backend
refreshPool call done after the startPool call will generally fail to
find any devices. This leaves the newly started pool appear empty when
querying via 'vol-list' after startup. The "workaround" has always been
to run pool-refresh after startup (or any time thereafter) in order to
find the LU's. Depending on how quickly run after startup, this too may
not find any LUs in the pool. Eventually though given enough time and
retries it will find something if LU's exist for the vHBA.
This patch adds a thread to be executed after the VPORT_CREATE which will
attempt to find the LU's without requiring the external run of refresh-pool.
It does this by waiting for 5 seconds and searching for the LU's. If any
are found, then the thread completes; otherwise, it will retry once more
in another 5 seconds. If none are found in that second pass, the thread
gives up.
Things learned while investigating this... No need to try and fill the
pool too quickly or too many times. Over the course of creation, the udev
code may 'add', 'change', and 'delete' the same device. So if the refresh
code runs and finds something, it may display it only to have a subsequent
refresh appear to "lose" the device. The udev processing doesn't seem to
have a way to indicate that it's all done with the creation processing of a
newly found vHBA. Only the Lone Ranger has silver bullets to fix everything.
2014-11-18 19:51:01 +00:00
|
|
|
*/
|
|
|
|
VIR_DEBUG("Attempt FC Refresh for pool='%s' name='%s' tries='%d'",
|
2015-11-02 19:46:32 +00:00
|
|
|
pool->def->name, fchost_name, tries);
|
2016-03-22 08:34:50 +00:00
|
|
|
|
|
|
|
pool->def->allocation = pool->def->capacity = pool->def->available = 0;
|
|
|
|
|
storage: Add thread to refresh for createVport
https://bugzilla.redhat.com/show_bug.cgi?id=1152382
When libvirt create's the vport (VPORT_CREATE) for the vHBA, there isn't
enough "time" between the creation and the running of the following
backend->refreshPool after a backend->startPool in order to find the LU's.
Population of LU's happens asynchronously when udevEventHandleCallback
discovers the "new" vHBA port. Creation of the infrastructure by udev
is an iterative process creating and discovering actual storage devices and
adjusting the environment.
Because of the time it takes to discover and set things up, the backend
refreshPool call done after the startPool call will generally fail to
find any devices. This leaves the newly started pool appear empty when
querying via 'vol-list' after startup. The "workaround" has always been
to run pool-refresh after startup (or any time thereafter) in order to
find the LU's. Depending on how quickly run after startup, this too may
not find any LUs in the pool. Eventually though given enough time and
retries it will find something if LU's exist for the vHBA.
This patch adds a thread to be executed after the VPORT_CREATE which will
attempt to find the LU's without requiring the external run of refresh-pool.
It does this by waiting for 5 seconds and searching for the LU's. If any
are found, then the thread completes; otherwise, it will retry once more
in another 5 seconds. If none are found in that second pass, the thread
gives up.
Things learned while investigating this... No need to try and fill the
pool too quickly or too many times. Over the course of creation, the udev
code may 'add', 'change', and 'delete' the same device. So if the refresh
code runs and finds something, it may display it only to have a subsequent
refresh appear to "lose" the device. The udev processing doesn't seem to
have a way to indicate that it's all done with the creation processing of a
newly found vHBA. Only the Lone Ranger has silver bullets to fix everything.
2014-11-18 19:51:01 +00:00
|
|
|
if (virStoragePoolObjIsActive(pool) &&
|
2017-01-23 19:48:12 +00:00
|
|
|
virSCSIHostGetNumber(fchost_name, &host) == 0 &&
|
storage: Add thread to refresh for createVport
https://bugzilla.redhat.com/show_bug.cgi?id=1152382
When libvirt create's the vport (VPORT_CREATE) for the vHBA, there isn't
enough "time" between the creation and the running of the following
backend->refreshPool after a backend->startPool in order to find the LU's.
Population of LU's happens asynchronously when udevEventHandleCallback
discovers the "new" vHBA port. Creation of the infrastructure by udev
is an iterative process creating and discovering actual storage devices and
adjusting the environment.
Because of the time it takes to discover and set things up, the backend
refreshPool call done after the startPool call will generally fail to
find any devices. This leaves the newly started pool appear empty when
querying via 'vol-list' after startup. The "workaround" has always been
to run pool-refresh after startup (or any time thereafter) in order to
find the LU's. Depending on how quickly run after startup, this too may
not find any LUs in the pool. Eventually though given enough time and
retries it will find something if LU's exist for the vHBA.
This patch adds a thread to be executed after the VPORT_CREATE which will
attempt to find the LU's without requiring the external run of refresh-pool.
It does this by waiting for 5 seconds and searching for the LU's. If any
are found, then the thread completes; otherwise, it will retry once more
in another 5 seconds. If none are found in that second pass, the thread
gives up.
Things learned while investigating this... No need to try and fill the
pool too quickly or too many times. Over the course of creation, the udev
code may 'add', 'change', and 'delete' the same device. So if the refresh
code runs and finds something, it may display it only to have a subsequent
refresh appear to "lose" the device. The udev processing doesn't seem to
have a way to indicate that it's all done with the creation processing of a
newly found vHBA. Only the Lone Ranger has silver bullets to fix everything.
2014-11-18 19:51:01 +00:00
|
|
|
virStorageBackendSCSITriggerRescan(host) == 0) {
|
|
|
|
virStoragePoolObjClearVols(pool);
|
2015-04-17 01:22:35 +00:00
|
|
|
found = virStorageBackendSCSIFindLUs(pool, host);
|
storage: Add thread to refresh for createVport
https://bugzilla.redhat.com/show_bug.cgi?id=1152382
When libvirt create's the vport (VPORT_CREATE) for the vHBA, there isn't
enough "time" between the creation and the running of the following
backend->refreshPool after a backend->startPool in order to find the LU's.
Population of LU's happens asynchronously when udevEventHandleCallback
discovers the "new" vHBA port. Creation of the infrastructure by udev
is an iterative process creating and discovering actual storage devices and
adjusting the environment.
Because of the time it takes to discover and set things up, the backend
refreshPool call done after the startPool call will generally fail to
find any devices. This leaves the newly started pool appear empty when
querying via 'vol-list' after startup. The "workaround" has always been
to run pool-refresh after startup (or any time thereafter) in order to
find the LU's. Depending on how quickly run after startup, this too may
not find any LUs in the pool. Eventually though given enough time and
retries it will find something if LU's exist for the vHBA.
This patch adds a thread to be executed after the VPORT_CREATE which will
attempt to find the LU's without requiring the external run of refresh-pool.
It does this by waiting for 5 seconds and searching for the LU's. If any
are found, then the thread completes; otherwise, it will retry once more
in another 5 seconds. If none are found in that second pass, the thread
gives up.
Things learned while investigating this... No need to try and fill the
pool too quickly or too many times. Over the course of creation, the udev
code may 'add', 'change', and 'delete' the same device. So if the refresh
code runs and finds something, it may display it only to have a subsequent
refresh appear to "lose" the device. The udev processing doesn't seem to
have a way to indicate that it's all done with the creation processing of a
newly found vHBA. Only the Lone Ranger has silver bullets to fix everything.
2014-11-18 19:51:01 +00:00
|
|
|
}
|
|
|
|
virStoragePoolObjUnlock(pool);
|
|
|
|
} while (!found && --tries);
|
|
|
|
|
2015-11-02 22:21:11 +00:00
|
|
|
if (pool && !found)
|
storage: Add thread to refresh for createVport
https://bugzilla.redhat.com/show_bug.cgi?id=1152382
When libvirt create's the vport (VPORT_CREATE) for the vHBA, there isn't
enough "time" between the creation and the running of the following
backend->refreshPool after a backend->startPool in order to find the LU's.
Population of LU's happens asynchronously when udevEventHandleCallback
discovers the "new" vHBA port. Creation of the infrastructure by udev
is an iterative process creating and discovering actual storage devices and
adjusting the environment.
Because of the time it takes to discover and set things up, the backend
refreshPool call done after the startPool call will generally fail to
find any devices. This leaves the newly started pool appear empty when
querying via 'vol-list' after startup. The "workaround" has always been
to run pool-refresh after startup (or any time thereafter) in order to
find the LU's. Depending on how quickly run after startup, this too may
not find any LUs in the pool. Eventually though given enough time and
retries it will find something if LU's exist for the vHBA.
This patch adds a thread to be executed after the VPORT_CREATE which will
attempt to find the LU's without requiring the external run of refresh-pool.
It does this by waiting for 5 seconds and searching for the LU's. If any
are found, then the thread completes; otherwise, it will retry once more
in another 5 seconds. If none are found in that second pass, the thread
gives up.
Things learned while investigating this... No need to try and fill the
pool too quickly or too many times. Over the course of creation, the udev
code may 'add', 'change', and 'delete' the same device. So if the refresh
code runs and finds something, it may display it only to have a subsequent
refresh appear to "lose" the device. The udev processing doesn't seem to
have a way to indicate that it's all done with the creation processing of a
newly found vHBA. Only the Lone Ranger has silver bullets to fix everything.
2014-11-18 19:51:01 +00:00
|
|
|
VIR_DEBUG("FC Refresh Thread failed to find LU's");
|
|
|
|
|
|
|
|
virStoragePoolFCRefreshDataFree(cbdata);
|
|
|
|
}
|
|
|
|
|
2013-03-25 16:43:40 +00:00
|
|
|
static char *
|
2017-02-28 12:38:12 +00:00
|
|
|
getAdapterName(virStorageAdapterPtr adapter)
|
2013-03-25 16:43:40 +00:00
|
|
|
{
|
|
|
|
char *name = NULL;
|
2014-06-09 16:19:16 +00:00
|
|
|
char *parentaddr = NULL;
|
2013-03-25 16:43:40 +00:00
|
|
|
|
2017-02-28 12:38:12 +00:00
|
|
|
if (adapter->type == VIR_STORAGE_ADAPTER_TYPE_SCSI_HOST) {
|
2017-03-10 16:28:35 +00:00
|
|
|
virStorageAdapterSCSIHostPtr scsi_host = &adapter->data.scsi_host;
|
2014-06-09 16:19:16 +00:00
|
|
|
|
2017-03-10 16:28:35 +00:00
|
|
|
if (scsi_host->has_parent) {
|
|
|
|
virPCIDeviceAddressPtr addr = &scsi_host->parentaddr;
|
|
|
|
unsigned int unique_id = scsi_host->unique_id;
|
|
|
|
|
|
|
|
if (!(name = virSCSIHostGetNameByParentaddr(addr->domain,
|
|
|
|
addr->bus,
|
|
|
|
addr->slot,
|
|
|
|
addr->function,
|
2014-10-06 20:18:23 +00:00
|
|
|
unique_id)))
|
2014-06-09 16:19:16 +00:00
|
|
|
goto cleanup;
|
|
|
|
} else {
|
2017-03-10 16:28:35 +00:00
|
|
|
ignore_value(VIR_STRDUP(name, scsi_host->name));
|
2014-06-09 16:19:16 +00:00
|
|
|
}
|
2017-02-28 12:38:12 +00:00
|
|
|
} else if (adapter->type == VIR_STORAGE_ADAPTER_TYPE_FC_HOST) {
|
2017-03-10 16:28:35 +00:00
|
|
|
virStorageAdapterFCHostPtr fchost = &adapter->data.fchost;
|
|
|
|
|
|
|
|
if (!(name = virVHBAGetHostByWWN(NULL, fchost->wwnn, fchost->wwpn))) {
|
2014-03-03 19:57:50 +00:00
|
|
|
virReportError(VIR_ERR_XML_ERROR,
|
|
|
|
_("Failed to find SCSI host with wwnn='%s', "
|
2017-03-10 16:28:35 +00:00
|
|
|
"wwpn='%s'"), fchost->wwnn, fchost->wwpn);
|
2014-03-03 19:57:50 +00:00
|
|
|
}
|
2013-03-25 16:43:40 +00:00
|
|
|
}
|
|
|
|
|
2014-06-09 16:19:16 +00:00
|
|
|
cleanup:
|
|
|
|
VIR_FREE(parentaddr);
|
2013-03-25 16:43:40 +00:00
|
|
|
return name;
|
|
|
|
}
|
|
|
|
|
2014-11-06 17:22:58 +00:00
|
|
|
|
2013-03-25 16:43:41 +00:00
|
|
|
static int
|
2014-11-06 17:22:58 +00:00
|
|
|
createVport(virConnectPtr conn,
|
2017-03-10 16:43:11 +00:00
|
|
|
virStoragePoolDefPtr def,
|
|
|
|
const char *configFile,
|
|
|
|
virStorageAdapterFCHostPtr fchost)
|
2013-03-25 16:43:41 +00:00
|
|
|
{
|
|
|
|
char *name = NULL;
|
storage: Add thread to refresh for createVport
https://bugzilla.redhat.com/show_bug.cgi?id=1152382
When libvirt create's the vport (VPORT_CREATE) for the vHBA, there isn't
enough "time" between the creation and the running of the following
backend->refreshPool after a backend->startPool in order to find the LU's.
Population of LU's happens asynchronously when udevEventHandleCallback
discovers the "new" vHBA port. Creation of the infrastructure by udev
is an iterative process creating and discovering actual storage devices and
adjusting the environment.
Because of the time it takes to discover and set things up, the backend
refreshPool call done after the startPool call will generally fail to
find any devices. This leaves the newly started pool appear empty when
querying via 'vol-list' after startup. The "workaround" has always been
to run pool-refresh after startup (or any time thereafter) in order to
find the LU's. Depending on how quickly run after startup, this too may
not find any LUs in the pool. Eventually though given enough time and
retries it will find something if LU's exist for the vHBA.
This patch adds a thread to be executed after the VPORT_CREATE which will
attempt to find the LU's without requiring the external run of refresh-pool.
It does this by waiting for 5 seconds and searching for the LU's. If any
are found, then the thread completes; otherwise, it will retry once more
in another 5 seconds. If none are found in that second pass, the thread
gives up.
Things learned while investigating this... No need to try and fill the
pool too quickly or too many times. Over the course of creation, the udev
code may 'add', 'change', and 'delete' the same device. So if the refresh
code runs and finds something, it may display it only to have a subsequent
refresh appear to "lose" the device. The udev processing doesn't seem to
have a way to indicate that it's all done with the creation processing of a
newly found vHBA. Only the Lone Ranger has silver bullets to fix everything.
2014-11-18 19:51:01 +00:00
|
|
|
virStoragePoolFCRefreshInfoPtr cbdata = NULL;
|
|
|
|
virThread thread;
|
2016-11-18 12:19:28 +00:00
|
|
|
int ret = -1;
|
2013-03-25 16:43:41 +00:00
|
|
|
|
storage: Introduce 'managed' for the fchost parent
https://bugzilla.redhat.com/show_bug.cgi?id=1160926
Introduce a 'managed' attribute to allow libvirt to decide whether to
delete a vHBA vport created via external means such as nodedev-create.
The code currently decides whether to delete the vHBA based solely on
whether the parent was provided at creation time. However, that may not
be the desired action, so rather than delete and force someone to create
another vHBA via an additional nodedev-create allow the configuration of
the storage pool to decide the desired action.
During createVport when libvirt does the VPORT_CREATE, set the managed
value to YES if not already set to indicate to the deleteVport code that
it should delete the vHBA when the pool is destroyed.
If libvirtd is restarted all the memory only state was lost, so for a
persistent storage pool, use the virStoragePoolSaveConfig in order to
write out the managed value.
Because we're now saving the current configuration, we need to be sure
to not save the parent in the output XML if it was undefined at start.
Saving the name would cause future starts to always use the same parent
which is not the expected result when not providing a parent. By not
providing a parent, libvirt is expected to find the best available
vHBA port for each subsequent (re)start.
At deleteVport, use the new managed value to decide whether to execute
the VPORT_DELETE. Since we no longer save the parent in memory or in
XML when provided, if it was not provided, then we have to look it up.
2014-11-10 16:19:51 +00:00
|
|
|
VIR_DEBUG("conn=%p, configFile='%s' parent='%s', wwnn='%s' wwpn='%s'",
|
2017-03-10 16:43:11 +00:00
|
|
|
conn, NULLSTR(configFile), NULLSTR(fchost->parent),
|
|
|
|
fchost->wwnn, fchost->wwpn);
|
storage: Introduce 'managed' for the fchost parent
https://bugzilla.redhat.com/show_bug.cgi?id=1160926
Introduce a 'managed' attribute to allow libvirt to decide whether to
delete a vHBA vport created via external means such as nodedev-create.
The code currently decides whether to delete the vHBA based solely on
whether the parent was provided at creation time. However, that may not
be the desired action, so rather than delete and force someone to create
another vHBA via an additional nodedev-create allow the configuration of
the storage pool to decide the desired action.
During createVport when libvirt does the VPORT_CREATE, set the managed
value to YES if not already set to indicate to the deleteVport code that
it should delete the vHBA when the pool is destroyed.
If libvirtd is restarted all the memory only state was lost, so for a
persistent storage pool, use the virStoragePoolSaveConfig in order to
write out the managed value.
Because we're now saving the current configuration, we need to be sure
to not save the parent in the output XML if it was undefined at start.
Saving the name would cause future starts to always use the same parent
which is not the expected result when not providing a parent. By not
providing a parent, libvirt is expected to find the best available
vHBA port for each subsequent (re)start.
At deleteVport, use the new managed value to decide whether to execute
the VPORT_DELETE. Since we no longer save the parent in memory or in
XML when provided, if it was not provided, then we have to look it up.
2014-11-10 16:19:51 +00:00
|
|
|
|
|
|
|
|
|
|
|
/* Since we're creating the vHBA, then we need to manage removing it
|
|
|
|
* as well. Since we need this setting to "live" through a libvirtd
|
|
|
|
* restart, we need to save the persistent configuration. So if not
|
|
|
|
* already defined as YES, then force the issue.
|
|
|
|
*/
|
2017-03-10 16:43:11 +00:00
|
|
|
if (fchost->managed != VIR_TRISTATE_BOOL_YES) {
|
|
|
|
fchost->managed = VIR_TRISTATE_BOOL_YES;
|
storage: Introduce 'managed' for the fchost parent
https://bugzilla.redhat.com/show_bug.cgi?id=1160926
Introduce a 'managed' attribute to allow libvirt to decide whether to
delete a vHBA vport created via external means such as nodedev-create.
The code currently decides whether to delete the vHBA based solely on
whether the parent was provided at creation time. However, that may not
be the desired action, so rather than delete and force someone to create
another vHBA via an additional nodedev-create allow the configuration of
the storage pool to decide the desired action.
During createVport when libvirt does the VPORT_CREATE, set the managed
value to YES if not already set to indicate to the deleteVport code that
it should delete the vHBA when the pool is destroyed.
If libvirtd is restarted all the memory only state was lost, so for a
persistent storage pool, use the virStoragePoolSaveConfig in order to
write out the managed value.
Because we're now saving the current configuration, we need to be sure
to not save the parent in the output XML if it was undefined at start.
Saving the name would cause future starts to always use the same parent
which is not the expected result when not providing a parent. By not
providing a parent, libvirt is expected to find the best available
vHBA port for each subsequent (re)start.
At deleteVport, use the new managed value to decide whether to execute
the VPORT_DELETE. Since we no longer save the parent in memory or in
XML when provided, if it was not provided, then we have to look it up.
2014-11-10 16:19:51 +00:00
|
|
|
if (configFile) {
|
2017-03-10 16:43:11 +00:00
|
|
|
if (virStoragePoolSaveConfig(configFile, def) < 0)
|
2016-11-18 12:19:28 +00:00
|
|
|
goto cleanup;
|
storage: Introduce 'managed' for the fchost parent
https://bugzilla.redhat.com/show_bug.cgi?id=1160926
Introduce a 'managed' attribute to allow libvirt to decide whether to
delete a vHBA vport created via external means such as nodedev-create.
The code currently decides whether to delete the vHBA based solely on
whether the parent was provided at creation time. However, that may not
be the desired action, so rather than delete and force someone to create
another vHBA via an additional nodedev-create allow the configuration of
the storage pool to decide the desired action.
During createVport when libvirt does the VPORT_CREATE, set the managed
value to YES if not already set to indicate to the deleteVport code that
it should delete the vHBA when the pool is destroyed.
If libvirtd is restarted all the memory only state was lost, so for a
persistent storage pool, use the virStoragePoolSaveConfig in order to
write out the managed value.
Because we're now saving the current configuration, we need to be sure
to not save the parent in the output XML if it was undefined at start.
Saving the name would cause future starts to always use the same parent
which is not the expected result when not providing a parent. By not
providing a parent, libvirt is expected to find the best available
vHBA port for each subsequent (re)start.
At deleteVport, use the new managed value to decide whether to execute
the VPORT_DELETE. Since we no longer save the parent in memory or in
XML when provided, if it was not provided, then we have to look it up.
2014-11-10 16:19:51 +00:00
|
|
|
}
|
2014-11-06 17:32:46 +00:00
|
|
|
}
|
2013-03-25 16:43:41 +00:00
|
|
|
|
2017-01-27 23:50:57 +00:00
|
|
|
if (!(name = virNodeDeviceCreateVport(conn, fchost)))
|
2016-11-18 12:19:28 +00:00
|
|
|
goto cleanup;
|
2013-03-25 16:43:41 +00:00
|
|
|
|
storage: Add thread to refresh for createVport
https://bugzilla.redhat.com/show_bug.cgi?id=1152382
When libvirt create's the vport (VPORT_CREATE) for the vHBA, there isn't
enough "time" between the creation and the running of the following
backend->refreshPool after a backend->startPool in order to find the LU's.
Population of LU's happens asynchronously when udevEventHandleCallback
discovers the "new" vHBA port. Creation of the infrastructure by udev
is an iterative process creating and discovering actual storage devices and
adjusting the environment.
Because of the time it takes to discover and set things up, the backend
refreshPool call done after the startPool call will generally fail to
find any devices. This leaves the newly started pool appear empty when
querying via 'vol-list' after startup. The "workaround" has always been
to run pool-refresh after startup (or any time thereafter) in order to
find the LU's. Depending on how quickly run after startup, this too may
not find any LUs in the pool. Eventually though given enough time and
retries it will find something if LU's exist for the vHBA.
This patch adds a thread to be executed after the VPORT_CREATE which will
attempt to find the LU's without requiring the external run of refresh-pool.
It does this by waiting for 5 seconds and searching for the LU's. If any
are found, then the thread completes; otherwise, it will retry once more
in another 5 seconds. If none are found in that second pass, the thread
gives up.
Things learned while investigating this... No need to try and fill the
pool too quickly or too many times. Over the course of creation, the udev
code may 'add', 'change', and 'delete' the same device. So if the refresh
code runs and finds something, it may display it only to have a subsequent
refresh appear to "lose" the device. The udev processing doesn't seem to
have a way to indicate that it's all done with the creation processing of a
newly found vHBA. Only the Lone Ranger has silver bullets to fix everything.
2014-11-18 19:51:01 +00:00
|
|
|
/* Creating our own VPORT didn't leave enough time to find any LUN's,
|
|
|
|
* so, let's create a thread whose job it is to call the FindLU's with
|
|
|
|
* retry logic set to true. If the thread isn't created, then no big
|
|
|
|
* deal since it's still possible to refresh the pool later.
|
|
|
|
*/
|
2017-01-27 23:50:57 +00:00
|
|
|
if (VIR_ALLOC(cbdata) == 0) {
|
|
|
|
memcpy(cbdata->pool_uuid, def->uuid, VIR_UUID_BUFLEN);
|
|
|
|
VIR_STEAL_PTR(cbdata->fchost_name, name);
|
|
|
|
|
|
|
|
if (virThreadCreate(&thread, false, virStoragePoolFCRefreshThread,
|
|
|
|
cbdata) < 0) {
|
|
|
|
/* Oh well - at least someone can still refresh afterwards */
|
|
|
|
VIR_DEBUG("Failed to create FC Pool Refresh Thread");
|
|
|
|
virStoragePoolFCRefreshDataFree(cbdata);
|
storage: Add thread to refresh for createVport
https://bugzilla.redhat.com/show_bug.cgi?id=1152382
When libvirt create's the vport (VPORT_CREATE) for the vHBA, there isn't
enough "time" between the creation and the running of the following
backend->refreshPool after a backend->startPool in order to find the LU's.
Population of LU's happens asynchronously when udevEventHandleCallback
discovers the "new" vHBA port. Creation of the infrastructure by udev
is an iterative process creating and discovering actual storage devices and
adjusting the environment.
Because of the time it takes to discover and set things up, the backend
refreshPool call done after the startPool call will generally fail to
find any devices. This leaves the newly started pool appear empty when
querying via 'vol-list' after startup. The "workaround" has always been
to run pool-refresh after startup (or any time thereafter) in order to
find the LU's. Depending on how quickly run after startup, this too may
not find any LUs in the pool. Eventually though given enough time and
retries it will find something if LU's exist for the vHBA.
This patch adds a thread to be executed after the VPORT_CREATE which will
attempt to find the LU's without requiring the external run of refresh-pool.
It does this by waiting for 5 seconds and searching for the LU's. If any
are found, then the thread completes; otherwise, it will retry once more
in another 5 seconds. If none are found in that second pass, the thread
gives up.
Things learned while investigating this... No need to try and fill the
pool too quickly or too many times. Over the course of creation, the udev
code may 'add', 'change', and 'delete' the same device. So if the refresh
code runs and finds something, it may display it only to have a subsequent
refresh appear to "lose" the device. The udev processing doesn't seem to
have a way to indicate that it's all done with the creation processing of a
newly found vHBA. Only the Lone Ranger has silver bullets to fix everything.
2014-11-18 19:51:01 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-11-18 12:19:28 +00:00
|
|
|
ret = 0;
|
|
|
|
|
|
|
|
cleanup:
|
|
|
|
VIR_FREE(name);
|
2014-01-23 10:18:36 +00:00
|
|
|
return ret;
|
2013-03-25 16:43:41 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-11-11 20:09:20 +00:00
|
|
|
static int
|
2015-03-09 14:34:35 +00:00
|
|
|
virStorageBackendSCSICheckPool(virStoragePoolObjPtr pool,
|
2010-11-11 20:09:20 +00:00
|
|
|
bool *isActive)
|
|
|
|
{
|
2013-03-25 16:43:40 +00:00
|
|
|
char *path = NULL;
|
|
|
|
char *name = NULL;
|
2013-03-25 16:43:37 +00:00
|
|
|
unsigned int host;
|
2013-03-25 16:43:40 +00:00
|
|
|
int ret = -1;
|
2010-11-11 20:09:20 +00:00
|
|
|
|
|
|
|
*isActive = false;
|
2013-03-25 16:43:37 +00:00
|
|
|
|
2017-03-10 16:28:35 +00:00
|
|
|
if (!(name = getAdapterName(&pool->def->source.adapter))) {
|
2014-01-06 10:19:34 +00:00
|
|
|
/* It's normal for the pool with "fc_host" type source
|
|
|
|
* adapter fails to get the adapter name, since the vHBA
|
|
|
|
* the adapter based on might be not created yet.
|
|
|
|
*/
|
|
|
|
if (pool->def->source.adapter.type ==
|
2017-02-28 12:38:12 +00:00
|
|
|
VIR_STORAGE_ADAPTER_TYPE_FC_HOST) {
|
2014-01-06 10:19:34 +00:00
|
|
|
virResetLastError();
|
|
|
|
return 0;
|
|
|
|
} else {
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
}
|
2013-03-25 16:43:37 +00:00
|
|
|
|
2017-01-23 19:48:12 +00:00
|
|
|
if (virSCSIHostGetNumber(name, &host) < 0)
|
2013-03-25 16:43:40 +00:00
|
|
|
goto cleanup;
|
|
|
|
|
2014-06-09 16:41:04 +00:00
|
|
|
if (virAsprintf(&path, "%s/host%d",
|
|
|
|
LINUX_SYSFS_SCSI_HOST_PREFIX, host) < 0)
|
2013-03-25 16:43:40 +00:00
|
|
|
goto cleanup;
|
2010-11-11 20:09:20 +00:00
|
|
|
|
2013-09-13 13:32:43 +00:00
|
|
|
*isActive = virFileExists(path);
|
2010-11-11 20:09:20 +00:00
|
|
|
|
2013-03-25 16:43:40 +00:00
|
|
|
ret = 0;
|
2014-03-25 06:52:40 +00:00
|
|
|
cleanup:
|
2010-11-11 20:09:20 +00:00
|
|
|
VIR_FREE(path);
|
2013-03-25 16:43:40 +00:00
|
|
|
VIR_FREE(name);
|
|
|
|
return ret;
|
2010-11-11 20:09:20 +00:00
|
|
|
}
|
2009-04-07 12:50:17 +00:00
|
|
|
|
2009-04-01 16:03:22 +00:00
|
|
|
static int
|
2010-02-10 11:42:56 +00:00
|
|
|
virStorageBackendSCSIRefreshPool(virConnectPtr conn ATTRIBUTE_UNUSED,
|
2009-04-01 16:03:22 +00:00
|
|
|
virStoragePoolObjPtr pool)
|
|
|
|
{
|
2013-03-25 16:43:40 +00:00
|
|
|
char *name = NULL;
|
2013-03-25 16:43:37 +00:00
|
|
|
unsigned int host;
|
2013-03-25 16:43:40 +00:00
|
|
|
int ret = -1;
|
2009-04-01 16:03:22 +00:00
|
|
|
|
|
|
|
pool->def->allocation = pool->def->capacity = pool->def->available = 0;
|
|
|
|
|
2017-03-10 16:28:35 +00:00
|
|
|
if (!(name = getAdapterName(&pool->def->source.adapter)))
|
2013-03-25 16:43:40 +00:00
|
|
|
return -1;
|
|
|
|
|
2017-01-23 19:48:12 +00:00
|
|
|
if (virSCSIHostGetNumber(name, &host) < 0)
|
2009-04-01 16:03:22 +00:00
|
|
|
goto out;
|
|
|
|
|
2010-05-20 06:57:06 +00:00
|
|
|
VIR_DEBUG("Scanning host%u", host);
|
2009-04-01 16:03:22 +00:00
|
|
|
|
2013-03-25 16:43:37 +00:00
|
|
|
if (virStorageBackendSCSITriggerRescan(host) < 0)
|
2009-04-07 12:50:17 +00:00
|
|
|
goto out;
|
|
|
|
|
2015-06-23 18:40:31 +00:00
|
|
|
if (virStorageBackendSCSIFindLUs(pool, host) < 0)
|
|
|
|
goto out;
|
2009-04-01 16:03:22 +00:00
|
|
|
|
2013-03-25 16:43:37 +00:00
|
|
|
ret = 0;
|
2014-03-25 06:52:40 +00:00
|
|
|
out:
|
2013-03-25 16:43:40 +00:00
|
|
|
VIR_FREE(name);
|
2013-03-25 16:43:37 +00:00
|
|
|
return ret;
|
2009-04-01 16:03:22 +00:00
|
|
|
}
|
|
|
|
|
2017-02-28 12:38:12 +00:00
|
|
|
|
2013-03-25 16:43:41 +00:00
|
|
|
static int
|
2014-11-06 17:22:58 +00:00
|
|
|
virStorageBackendSCSIStartPool(virConnectPtr conn,
|
2013-03-25 16:43:41 +00:00
|
|
|
virStoragePoolObjPtr pool)
|
|
|
|
{
|
2017-02-28 12:38:12 +00:00
|
|
|
if (pool->def->source.adapter.type == VIR_STORAGE_ADAPTER_TYPE_FC_HOST)
|
2017-03-10 16:43:11 +00:00
|
|
|
return createVport(conn, pool->def, pool->configFile,
|
|
|
|
&pool->def->source.adapter.data.fchost);
|
|
|
|
|
|
|
|
return 0;
|
2013-03-25 16:43:41 +00:00
|
|
|
}
|
|
|
|
|
2017-02-28 12:38:12 +00:00
|
|
|
|
2013-03-25 16:43:41 +00:00
|
|
|
static int
|
storage: Introduce 'managed' for the fchost parent
https://bugzilla.redhat.com/show_bug.cgi?id=1160926
Introduce a 'managed' attribute to allow libvirt to decide whether to
delete a vHBA vport created via external means such as nodedev-create.
The code currently decides whether to delete the vHBA based solely on
whether the parent was provided at creation time. However, that may not
be the desired action, so rather than delete and force someone to create
another vHBA via an additional nodedev-create allow the configuration of
the storage pool to decide the desired action.
During createVport when libvirt does the VPORT_CREATE, set the managed
value to YES if not already set to indicate to the deleteVport code that
it should delete the vHBA when the pool is destroyed.
If libvirtd is restarted all the memory only state was lost, so for a
persistent storage pool, use the virStoragePoolSaveConfig in order to
write out the managed value.
Because we're now saving the current configuration, we need to be sure
to not save the parent in the output XML if it was undefined at start.
Saving the name would cause future starts to always use the same parent
which is not the expected result when not providing a parent. By not
providing a parent, libvirt is expected to find the best available
vHBA port for each subsequent (re)start.
At deleteVport, use the new managed value to decide whether to execute
the VPORT_DELETE. Since we no longer save the parent in memory or in
XML when provided, if it was not provided, then we have to look it up.
2014-11-10 16:19:51 +00:00
|
|
|
virStorageBackendSCSIStopPool(virConnectPtr conn,
|
2013-03-25 16:43:41 +00:00
|
|
|
virStoragePoolObjPtr pool)
|
|
|
|
{
|
2017-02-28 12:38:12 +00:00
|
|
|
if (pool->def->source.adapter.type == VIR_STORAGE_ADAPTER_TYPE_FC_HOST)
|
2017-03-15 14:56:26 +00:00
|
|
|
return virNodeDeviceDeleteVport(conn,
|
|
|
|
&pool->def->source.adapter.data.fchost);
|
2017-03-10 16:43:11 +00:00
|
|
|
|
|
|
|
return 0;
|
2013-03-25 16:43:41 +00:00
|
|
|
}
|
2009-04-01 16:03:22 +00:00
|
|
|
|
|
|
|
virStorageBackend virStorageBackendSCSI = {
|
|
|
|
.type = VIR_STORAGE_POOL_SCSI,
|
|
|
|
|
2010-11-11 20:09:20 +00:00
|
|
|
.checkPool = virStorageBackendSCSICheckPool,
|
2009-04-01 16:03:22 +00:00
|
|
|
.refreshPool = virStorageBackendSCSIRefreshPool,
|
2013-03-25 16:43:41 +00:00
|
|
|
.startPool = virStorageBackendSCSIStartPool,
|
|
|
|
.stopPool = virStorageBackendSCSIStopPool,
|
2014-07-07 14:50:11 +00:00
|
|
|
.uploadVol = virStorageBackendVolUploadLocal,
|
|
|
|
.downloadVol = virStorageBackendVolDownloadLocal,
|
2014-07-07 14:50:11 +00:00
|
|
|
.wipeVol = virStorageBackendVolWipeLocal,
|
2009-04-01 16:03:22 +00:00
|
|
|
};
|
2017-01-13 15:50:11 +00:00
|
|
|
|
|
|
|
|
|
|
|
int
|
|
|
|
virStorageBackendSCSIRegister(void)
|
|
|
|
{
|
|
|
|
return virStorageBackendRegister(&virStorageBackendSCSI);
|
|
|
|
}
|