src: improve thread naming with human targetted names

Historically threads are given a name based on the C function,
and this name is just used inside libvirt. With OS level thread
naming this name is now visible to debuggers, but also has to
fit in 15 characters on Linux, so function names are too long
in some cases.

Reviewed-by: Michal Privoznik <mprivozn@redhat.com>
Signed-off-by: Daniel P. Berrangé <berrange@redhat.com>
This commit is contained in:
Daniel P. Berrangé 2020-02-14 11:20:10 +00:00
parent c85256b31b
commit 5bff668dfb
18 changed files with 101 additions and 57 deletions

View File

@ -664,6 +664,7 @@ libxlDomainEventHandler(void *data, VIR_LIBXL_EVENT_CONST libxl_event *event)
virThread thread;
g_autoptr(libxlDriverConfig) cfg = NULL;
int ret = -1;
g_autofree char *name = NULL;
if (event->type != LIBXL_EVENT_TYPE_DOMAIN_SHUTDOWN &&
event->type != LIBXL_EVENT_TYPE_DOMAIN_DEATH) {
@ -687,12 +688,13 @@ libxlDomainEventHandler(void *data, VIR_LIBXL_EVENT_CONST libxl_event *event)
shutdown_info->driver = driver;
shutdown_info->event = (libxl_event *)event;
name = g_strdup_printf("ev-%d", event->domid);
if (event->type == LIBXL_EVENT_TYPE_DOMAIN_SHUTDOWN)
ret = virThreadCreate(&thread, false, libxlDomainShutdownThread,
shutdown_info);
ret = virThreadCreateFull(&thread, false, libxlDomainShutdownThread,
name, false, shutdown_info);
else if (event->type == LIBXL_EVENT_TYPE_DOMAIN_DEATH)
ret = virThreadCreate(&thread, false, libxlDomainDeathThread,
shutdown_info);
ret = virThreadCreateFull(&thread, false, libxlDomainDeathThread,
name, false, shutdown_info);
if (ret < 0) {
/*

View File

@ -294,6 +294,7 @@ libxlMigrateDstReceive(virNetSocketPtr sock,
virNetSocketPtr client_sock;
int recvfd = -1;
size_t i;
g_autofree char *name = NULL;
/* Accept migration connection */
if (virNetSocketAccept(sock, &client_sock) < 0 || !client_sock) {
@ -314,8 +315,13 @@ libxlMigrateDstReceive(virNetSocketPtr sock,
VIR_FREE(priv->migrationDstReceiveThr);
if (VIR_ALLOC(priv->migrationDstReceiveThr) < 0)
goto fail;
if (virThreadCreate(priv->migrationDstReceiveThr, true,
libxlDoMigrateDstReceive, args) < 0) {
name = g_strdup_printf("mig-%s", args->vm->def->name);
if (virThreadCreateFull(priv->migrationDstReceiveThr, true,
libxlDoMigrateDstReceive,
name,
false,
args) < 0) {
virReportError(VIR_ERR_OPERATION_FAILED, "%s",
_("Failed to create thread for receiving migration data"));
goto fail;
@ -554,6 +560,7 @@ libxlDomainMigrationDstPrepareTunnel3(virConnectPtr dconn,
char *xmlout = NULL;
int dataFD[2] = { -1, -1 };
int ret = -1;
g_autofree char *name = NULL;
if (libxlDomainMigrationPrepareAny(dconn, def, cookiein, cookieinlen,
&mig, &xmlout, &taint_hook) < 0)
@ -611,7 +618,10 @@ libxlDomainMigrationDstPrepareTunnel3(virConnectPtr dconn,
VIR_FREE(priv->migrationDstReceiveThr);
if (VIR_ALLOC(priv->migrationDstReceiveThr) < 0)
goto error;
if (virThreadCreate(priv->migrationDstReceiveThr, true, libxlDoMigrateDstReceive, args) < 0) {
name = g_strdup_printf("mig-%s", args->vm->def->name);
if (virThreadCreateFull(priv->migrationDstReceiveThr, true,
libxlDoMigrateDstReceive,
name, false, args) < 0) {
virReportError(VIR_ERR_OPERATION_FAILED, "%s",
_("Failed to create thread for receiving migration data"));
goto endjob;
@ -910,6 +920,7 @@ libxlMigrationSrcStartTunnel(libxlDriverPrivatePtr driver,
struct libxlTunnelControl *tc = NULL;
libxlTunnelMigrationThread *arg = NULL;
int ret = -1;
g_autofree char *name = NULL;
if (VIR_ALLOC(tc) < 0)
goto out;
@ -925,8 +936,10 @@ libxlMigrationSrcStartTunnel(libxlDriverPrivatePtr driver,
arg->srcFD = tc->dataFD[0];
/* Write to dest stream */
arg->st = st;
if (virThreadCreate(&tc->thread, true,
libxlTunnel3MigrationSrcFunc, arg) < 0) {
name = g_strdup_printf("mig-%s", vm->def->name);
if (virThreadCreateFull(&tc->thread, true,
libxlTunnel3MigrationSrcFunc,
name, false, arg) < 0) {
virReportError(errno, "%s",
_("Unable to create tunnel migration thread"));
goto out;

View File

@ -335,8 +335,8 @@ int lxcSetupFuse(virLXCFusePtr *f, virDomainDefPtr def)
int lxcStartFuse(virLXCFusePtr fuse)
{
if (virThreadCreate(&fuse->thread, false, lxcFuseRun,
(void *)fuse) < 0) {
if (virThreadCreateFull(&fuse->thread, false, lxcFuseRun,
"lxc-fuse", false, (void *)fuse) < 0) {
lxcFuseDestroy(fuse);
return -1;
}

View File

@ -1863,7 +1863,8 @@ nodeStateInitialize(bool privileged,
udev_monitor_set_receive_buffer_size(priv->udev_monitor,
128 * 1024 * 1024);
if (virThreadCreate(&priv->th, true, udevEventHandleThread, NULL) < 0) {
if (virThreadCreateFull(&priv->th, true, udevEventHandleThread,
"udev-event", false, NULL) < 0) {
virReportSystemError(errno, "%s",
_("failed to create udev handler thread"));
goto unlock;
@ -1889,8 +1890,8 @@ nodeStateInitialize(bool privileged,
if (udevSetupSystemDev() != 0)
goto cleanup;
if (virThreadCreate(&enumThread, false, nodeStateInitializeEnumerate,
udev) < 0) {
if (virThreadCreateFull(&enumThread, false, nodeStateInitializeEnumerate,
"nodedev-init", false, udev) < 0) {
virReportSystemError(errno, "%s",
_("failed to create udev enumerate thread"));
goto cleanup;

View File

@ -1366,9 +1366,10 @@ virNWFilterDHCPSnoopThread(void *req0)
}
tmp = virNetDevGetIndex(req->binding->portdevname, &ifindex);
threadkey = g_strdup(req->threadkey);
worker = virThreadPoolNew(1, 1, 0,
virNWFilterDHCPDecodeWorker,
req);
worker = virThreadPoolNewFull(1, 1, 0,
virNWFilterDHCPDecodeWorker,
"dhcp-decode",
req);
}
/* let creator know how well we initialized */
@ -1638,8 +1639,8 @@ virNWFilterDHCPSnoopReq(virNWFilterTechDriverPtr techdriver,
/* prevent thread from holding req */
virNWFilterSnoopReqLock(req);
if (virThreadCreate(&thread, false, virNWFilterDHCPSnoopThread,
req) != 0) {
if (virThreadCreateFull(&thread, false, virNWFilterDHCPSnoopThread,
"dhcp-snoop", false, req) != 0) {
virReportError(VIR_ERR_INTERNAL_ERROR,
_("virNWFilterDHCPSnoopReq virThreadCreate "
"failed on interface '%s'"), binding->portdevname);

View File

@ -734,10 +734,12 @@ virNWFilterLearnIPAddress(virNWFilterTechDriverPtr techdriver,
if (rc < 0)
goto err_free_req;
if (virThreadCreate(&thread,
false,
learnIPAddressThread,
req) != 0)
if (virThreadCreateFull(&thread,
false,
learnIPAddressThread,
"ip-learn",
false,
req) != 0)
goto err_dereg_req;
return 0;

View File

@ -999,7 +999,8 @@ qemuStateInitialize(bool privileged,
/* must be initialized before trying to reconnect to all the
* running domains since there might occur some QEMU monitor
* events that will be dispatched to the worker pool */
qemu_driver->workerPool = virThreadPoolNew(0, 1, 0, qemuProcessEventHandler, qemu_driver);
qemu_driver->workerPool = virThreadPoolNewFull(0, 1, 0, qemuProcessEventHandler,
"qemu-event", qemu_driver);
if (!qemu_driver->workerPool)
goto error;

View File

@ -3309,9 +3309,11 @@ qemuMigrationSrcStartTunnel(virStreamPtr st,
io->wakeupRecvFD = wakeupFD[0];
io->wakeupSendFD = wakeupFD[1];
if (virThreadCreate(&io->thread, true,
qemuMigrationSrcIOFunc,
io) < 0) {
if (virThreadCreateFull(&io->thread, true,
qemuMigrationSrcIOFunc,
"qemu-mig-tunnel",
false,
io) < 0) {
virReportSystemError(errno, "%s",
_("Unable to create migration thread"));
goto error;

View File

@ -516,13 +516,16 @@ qemuProcessShutdownOrReboot(virQEMUDriverPtr driver,
qemuDomainObjPrivatePtr priv = vm->privateData;
if (priv->fakeReboot) {
g_autofree char *name = g_strdup_printf("reboot-%s", vm->def->name);
qemuDomainSetFakeReboot(driver, vm, false);
virObjectRef(vm);
virThread th;
if (virThreadCreate(&th,
false,
qemuProcessFakeReboot,
vm) < 0) {
if (virThreadCreateFull(&th,
false,
qemuProcessFakeReboot,
name,
false,
vm) < 0) {
VIR_ERROR(_("Failed to create reboot thread, killing domain"));
ignore_value(qemuProcessKill(vm, VIR_QEMU_PROCESS_KILL_NOWAIT));
priv->pausedShutdown = false;
@ -8223,6 +8226,7 @@ qemuProcessReconnectHelper(virDomainObjPtr obj,
virThread thread;
struct qemuProcessReconnectData *src = opaque;
struct qemuProcessReconnectData *data;
g_autofree char *name = NULL;
/* If the VM was inactive, we don't need to reconnect */
if (!obj->pid)
@ -8242,7 +8246,10 @@ qemuProcessReconnectHelper(virDomainObjPtr obj,
virObjectLock(obj);
virObjectRef(obj);
if (virThreadCreate(&thread, false, qemuProcessReconnect, data) < 0) {
name = g_strdup_printf("init-%s", obj->def->name);
if (virThreadCreateFull(&thread, false, qemuProcessReconnect,
name, false, data) < 0) {
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
_("Could not create thread. QEMU initialization "
"might be incomplete"));

View File

@ -713,7 +713,8 @@ static void daemonReloadHandler(virNetDaemonPtr dmn G_GNUC_UNUSED,
return;
}
if (virThreadCreate(&thr, false, daemonReloadHandlerThread, NULL) < 0) {
if (virThreadCreateFull(&thr, false, daemonReloadHandlerThread,
"daemon-reload", false, NULL) < 0) {
/*
* Not much we can do on error here except log it.
*/
@ -770,7 +771,8 @@ static void daemonStop(virNetDaemonPtr dmn)
{
virThread thr;
virObjectRef(dmn);
if (virThreadCreate(&thr, false, daemonStopWorker, dmn) < 0)
if (virThreadCreateFull(&thr, false, daemonStopWorker,
"daemon-stop", false, dmn) < 0)
virObjectUnref(dmn);
}
@ -876,7 +878,8 @@ static int daemonStateInit(virNetDaemonPtr dmn)
{
virThread thr;
virObjectRef(dmn);
if (virThreadCreate(&thr, false, daemonRunStateInit, dmn) < 0) {
if (virThreadCreateFull(&thr, false, daemonRunStateInit,
"daemon-init", false, dmn) < 0) {
virObjectUnref(dmn);
return -1;
}

View File

@ -367,10 +367,11 @@ virNetServerPtr virNetServerNew(const char *name,
if (!(srv = virObjectLockableNew(virNetServerClass)))
return NULL;
if (!(srv->workers = virThreadPoolNew(min_workers, max_workers,
priority_workers,
virNetServerHandleJob,
srv)))
if (!(srv->workers = virThreadPoolNewFull(min_workers, max_workers,
priority_workers,
virNetServerHandleJob,
"rpc-worker",
srv)))
goto error;
srv->name = g_strdup(name);

View File

@ -334,8 +334,8 @@ createVport(virStoragePoolDefPtr def,
memcpy(cbdata->pool_uuid, def->uuid, VIR_UUID_BUFLEN);
cbdata->fchost_name = g_steal_pointer(&name);
if (virThreadCreate(&thread, false, virStoragePoolFCRefreshThread,
cbdata) < 0) {
if (virThreadCreateFull(&thread, false, virStoragePoolFCRefreshThread,
"scsi-refresh", false, cbdata) < 0) {
/* Oh well - at least someone can still refresh afterwards */
VIR_DEBUG("Failed to create FC Pool Refresh Thread");
virStoragePoolFCRefreshDataFree(cbdata);

View File

@ -2367,8 +2367,8 @@ virStorageVolFDStreamCloseCb(virStreamPtr st G_GNUC_UNUSED,
{
virThread thread;
if (virThreadCreate(&thread, false, virStorageVolPoolRefreshThread,
opaque) < 0) {
if (virThreadCreateFull(&thread, false, virStorageVolPoolRefreshThread,
"vol-refresh", false, opaque) < 0) {
/* Not much else can be done */
VIR_ERROR(_("Failed to create thread to handle pool refresh"));
goto error;

View File

@ -2620,8 +2620,9 @@ virCommandRunAsync(virCommandPtr cmd, pid_t *pid)
/* clear any error so we can catch if the helper thread reports one */
cmd->has_error = 0;
if (VIR_ALLOC(cmd->asyncioThread) < 0 ||
virThreadCreate(cmd->asyncioThread, true,
virCommandDoAsyncIOHelper, cmd) < 0) {
virThreadCreateFull(cmd->asyncioThread, true,
virCommandDoAsyncIOHelper,
"cmd-async-io", false, cmd) < 0) {
virReportSystemError(errno, "%s",
_("Unable to create thread "
"to process command's IO"));

View File

@ -1134,10 +1134,12 @@ static int virFDStreamOpenInternal(virStreamPtr st,
goto error;
}
if (virThreadCreate(fdst->thread,
true,
virFDStreamThread,
threadData) < 0)
if (virThreadCreateFull(fdst->thread,
true,
virFDStreamThread,
"fd-stream",
false,
threadData) < 0)
goto error;
}

View File

@ -220,9 +220,11 @@ int virNodeSuspend(unsigned int target,
if (virNodeSuspendSetNodeWakeup(duration) < 0)
goto cleanup;
if (virThreadCreate(&thread, false,
virNodeSuspendHelper,
(void *)cmdString) < 0) {
if (virThreadCreateFull(&thread, false,
virNodeSuspendHelper,
"node-suspend",
false,
(void *)cmdString) < 0) {
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
_("Failed to create thread to suspend the host"));
goto cleanup;

View File

@ -54,7 +54,7 @@ struct _virThreadPool {
bool quit;
virThreadPoolJobFunc jobFunc;
const char *jobFuncName;
const char *jobName;
void *jobOpaque;
virThreadPoolJobList jobList;
size_t jobQueueDepth;
@ -187,6 +187,7 @@ virThreadPoolExpand(virThreadPoolPtr pool, size_t gain, bool priority)
return -1;
for (i = 0; i < gain; i++) {
g_autofree char *name = NULL;
if (VIR_ALLOC(data) < 0)
goto error;
@ -194,10 +195,15 @@ virThreadPoolExpand(virThreadPoolPtr pool, size_t gain, bool priority)
data->cond = priority ? &pool->prioCond : &pool->cond;
data->priority = priority;
if (priority)
name = g_strdup_printf("prio-%s", pool->jobName);
else
name = g_strdup(pool->jobName);
if (virThreadCreateFull(&(*workers)[i],
false,
virThreadPoolWorker,
pool->jobFuncName,
name,
true,
data) < 0) {
VIR_FREE(data);
@ -218,7 +224,7 @@ virThreadPoolNewFull(size_t minWorkers,
size_t maxWorkers,
size_t prioWorkers,
virThreadPoolJobFunc func,
const char *funcName,
const char *name,
void *opaque)
{
virThreadPoolPtr pool;
@ -232,7 +238,7 @@ virThreadPoolNewFull(size_t minWorkers,
pool->jobList.tail = pool->jobList.head = NULL;
pool->jobFunc = func;
pool->jobFuncName = funcName;
pool->jobName = name;
pool->jobOpaque = opaque;
if (virMutexInit(&pool->mutex) < 0)

View File

@ -35,7 +35,7 @@ virThreadPoolPtr virThreadPoolNewFull(size_t minWorkers,
size_t maxWorkers,
size_t prioWorkers,
virThreadPoolJobFunc func,
const char *funcName,
const char *name,
void *opaque) ATTRIBUTE_NONNULL(4);
size_t virThreadPoolGetMinWorkers(virThreadPoolPtr pool);