2011-10-05 17:31:54 +00:00
|
|
|
/*
|
2014-03-07 13:38:51 +00:00
|
|
|
* qemu_agent.c: interaction with QEMU guest agent
|
2011-10-05 17:31:54 +00:00
|
|
|
*
|
2014-03-18 08:15:21 +00:00
|
|
|
* Copyright (C) 2006-2014 Red Hat, Inc.
|
2011-10-05 17:31:54 +00:00
|
|
|
* Copyright (C) 2006 Daniel P. Berrange
|
|
|
|
*
|
|
|
|
* This library is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU Lesser General Public
|
|
|
|
* License as published by the Free Software Foundation; either
|
|
|
|
* version 2.1 of the License, or (at your option) any later version.
|
|
|
|
*
|
|
|
|
* This library is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
* Lesser General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU Lesser General Public
|
2012-09-20 22:30:55 +00:00
|
|
|
* License along with this library. If not, see
|
2012-07-21 10:06:23 +00:00
|
|
|
* <http://www.gnu.org/licenses/>.
|
2011-10-05 17:31:54 +00:00
|
|
|
*/
|
|
|
|
|
|
|
|
#include <config.h>
|
|
|
|
|
|
|
|
#include <poll.h>
|
|
|
|
#include <unistd.h>
|
|
|
|
#include <fcntl.h>
|
|
|
|
#include <sys/time.h>
|
2020-02-12 14:54:19 +00:00
|
|
|
#include <gio/gio.h>
|
2011-10-05 17:31:54 +00:00
|
|
|
|
|
|
|
#include "qemu_agent.h"
|
Add API to change qemu agent response timeout
Some layered products such as oVirt have requested a way to avoid being
blocked by guest agent commands when querying a loaded vm. For example,
many guest agent commands are polled periodically to monitor changes,
and rather than blocking the calling process, they'd prefer to simply
time out when an agent query is taking too long.
This patch adds a way for the user to specify a custom agent timeout
that is applied to all agent commands.
One special case to note here is the 'guest-sync' command. 'guest-sync'
is issued internally prior to calling any other command. (For example,
when libvirt wants to call 'guest-get-fsinfo', we first call
'guest-sync' and then call 'guest-get-fsinfo').
Previously, the 'guest-sync' command used a 5-second timeout
(VIR_DOMAIN_QEMU_AGENT_COMMAND_DEFAULT), whereas the actual command that
followed always blocked indefinitely
(VIR_DOMAIN_QEMU_AGENT_COMMAND_BLOCK). As part of this patch, if a
custom timeout is specified that is shorter than
5 seconds, this new timeout is also used for 'guest-sync'. If there is
no custom timeout or if the custom timeout is longer than 5 seconds, we
will continue to use the 5-second timeout.
Signed-off-by: Jonathon Jongsma <jjongsma@redhat.com>
Signed-off-by: Michal Privoznik <mprivozn@redhat.com>
Reviewed-by: Michal Privoznik <mprivozn@redhat.com>
2019-11-13 22:06:09 +00:00
|
|
|
#include "qemu_domain.h"
|
2012-12-12 18:06:53 +00:00
|
|
|
#include "viralloc.h"
|
2012-12-12 17:59:27 +00:00
|
|
|
#include "virlog.h"
|
2012-12-13 18:21:53 +00:00
|
|
|
#include "virerror.h"
|
2012-12-12 17:53:50 +00:00
|
|
|
#include "virjson.h"
|
2011-10-05 17:31:54 +00:00
|
|
|
#include "virfile.h"
|
2012-02-01 14:44:53 +00:00
|
|
|
#include "virtime.h"
|
2012-07-11 13:35:47 +00:00
|
|
|
#include "virobject.h"
|
2013-04-03 10:36:23 +00:00
|
|
|
#include "virstring.h"
|
2019-04-01 10:14:26 +00:00
|
|
|
#include "virenum.h"
|
2020-02-16 21:59:28 +00:00
|
|
|
#include "virutil.h"
|
2011-10-05 17:31:54 +00:00
|
|
|
|
|
|
|
#define VIR_FROM_THIS VIR_FROM_QEMU
|
|
|
|
|
2014-02-28 12:16:17 +00:00
|
|
|
VIR_LOG_INIT("qemu.qemu_agent");
|
|
|
|
|
2011-10-05 17:31:54 +00:00
|
|
|
#define LINE_ENDING "\n"
|
|
|
|
|
2018-03-01 14:55:26 +00:00
|
|
|
/* We read from QEMU until seeing a \r\n pair to indicate a
|
|
|
|
* completed reply or event. To avoid memory denial-of-service
|
|
|
|
* though, we must have a size limit on amount of data we
|
|
|
|
* buffer. 10 MB is large enough that it ought to cope with
|
|
|
|
* normal QEMU replies, and small enough that we're not
|
|
|
|
* consuming unreasonable mem.
|
|
|
|
*/
|
|
|
|
#define QEMU_AGENT_MAX_RESPONSE (10 * 1024 * 1024)
|
|
|
|
|
2011-10-05 17:31:54 +00:00
|
|
|
typedef struct _qemuAgentMessage qemuAgentMessage;
|
|
|
|
struct _qemuAgentMessage {
|
|
|
|
char *txBuffer;
|
|
|
|
int txOffset;
|
|
|
|
int txLength;
|
|
|
|
|
2020-02-12 17:31:20 +00:00
|
|
|
/* Used by the JSON agent to hold reply / error */
|
2011-10-05 17:31:54 +00:00
|
|
|
char *rxBuffer;
|
|
|
|
int rxLength;
|
|
|
|
void *rxObject;
|
|
|
|
|
|
|
|
/* True if rxBuffer / rxObject are ready, or a
|
2020-02-12 17:31:20 +00:00
|
|
|
* fatal error occurred on the agent channel
|
2011-10-05 17:31:54 +00:00
|
|
|
*/
|
|
|
|
bool finished;
|
2016-09-16 10:35:33 +00:00
|
|
|
/* true for sync command */
|
|
|
|
bool sync;
|
2020-07-09 04:42:21 +00:00
|
|
|
/* id of the issued sync command */
|
2016-09-16 10:35:34 +00:00
|
|
|
unsigned long long id;
|
2016-09-16 10:35:35 +00:00
|
|
|
bool first;
|
2011-10-05 17:31:54 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
struct _qemuAgent {
|
2013-01-09 21:00:32 +00:00
|
|
|
virObjectLockable parent;
|
2012-07-11 13:35:47 +00:00
|
|
|
|
2011-10-05 17:31:54 +00:00
|
|
|
virCond notify;
|
|
|
|
|
|
|
|
int fd;
|
2020-02-12 14:54:19 +00:00
|
|
|
|
|
|
|
GMainContext *context;
|
|
|
|
GSocket *socket;
|
|
|
|
GSource *watch;
|
2011-10-05 17:31:54 +00:00
|
|
|
|
processSerialChangedEvent: Close agent monitor early
https://bugzilla.redhat.com/show_bug.cgi?id=890648
So, imagine you've issued an API that involves guest agent. For
instance, you want to query guest's IP addresses. So the API acquires
QUERY_JOB, locks the guest agent and issues the agent command.
However, for some reason, guest agent replies to initial ping
correctly, but then crashes tragically while executing real command
(in this case guest-network-get-interfaces). Since initial ping went
well, libvirt thinks guest agent is accessible and awaits reply to the
real command. But it will never come. What will is a monitor event.
Our handler (processSerialChangedEvent) will try to acquire
MODIFY_JOB, which will fail obviously because the other thread that's
executing the API already holds a job. So the event handler exits
early, and the QUERY_JOB is never released nor ended.
The way how to solve this is to put flag somewhere in the monitor
internals. The flag is called @running and agent commands are issued
iff the flag is set. The flag itself is set when we connect to the
agent socket. And unset whenever we see DISCONNECT event from the
agent. Moreover, we must wake up all the threads waiting for the
agent. This is done by signalizing the condition they're waiting on.
Signed-off-by: Michal Privoznik <mprivozn@redhat.com>
2015-05-07 09:19:38 +00:00
|
|
|
bool running;
|
2020-03-05 14:47:01 +00:00
|
|
|
bool singleSync;
|
|
|
|
bool inSync;
|
2011-10-05 17:31:54 +00:00
|
|
|
|
2021-03-11 07:16:13 +00:00
|
|
|
virDomainObj *vm;
|
2011-10-05 17:31:54 +00:00
|
|
|
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuAgentCallbacks *cb;
|
2011-10-05 17:31:54 +00:00
|
|
|
|
|
|
|
/* If there's a command being processed this will be
|
|
|
|
* non-NULL */
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuAgentMessage *msg;
|
2011-10-05 17:31:54 +00:00
|
|
|
|
2020-02-12 17:31:20 +00:00
|
|
|
/* Buffer incoming data ready for agent
|
2011-10-05 17:31:54 +00:00
|
|
|
* code to process & find message boundaries */
|
|
|
|
size_t bufferOffset;
|
|
|
|
size_t bufferLength;
|
|
|
|
char *buffer;
|
|
|
|
|
|
|
|
/* If anything went wrong, this will be fed back
|
2020-02-12 17:31:20 +00:00
|
|
|
* the next agent msg */
|
2011-10-05 17:31:54 +00:00
|
|
|
virError lastError;
|
2012-06-15 16:00:13 +00:00
|
|
|
|
|
|
|
/* Some guest agent commands don't return anything
|
2020-02-12 17:31:20 +00:00
|
|
|
* but fire up an event on qemu agent instead.
|
2012-06-15 16:00:13 +00:00
|
|
|
* Take that as indication of successful completion */
|
|
|
|
qemuAgentEvent await_event;
|
Add API to change qemu agent response timeout
Some layered products such as oVirt have requested a way to avoid being
blocked by guest agent commands when querying a loaded vm. For example,
many guest agent commands are polled periodically to monitor changes,
and rather than blocking the calling process, they'd prefer to simply
time out when an agent query is taking too long.
This patch adds a way for the user to specify a custom agent timeout
that is applied to all agent commands.
One special case to note here is the 'guest-sync' command. 'guest-sync'
is issued internally prior to calling any other command. (For example,
when libvirt wants to call 'guest-get-fsinfo', we first call
'guest-sync' and then call 'guest-get-fsinfo').
Previously, the 'guest-sync' command used a 5-second timeout
(VIR_DOMAIN_QEMU_AGENT_COMMAND_DEFAULT), whereas the actual command that
followed always blocked indefinitely
(VIR_DOMAIN_QEMU_AGENT_COMMAND_BLOCK). As part of this patch, if a
custom timeout is specified that is shorter than
5 seconds, this new timeout is also used for 'guest-sync'. If there is
no custom timeout or if the custom timeout is longer than 5 seconds, we
will continue to use the 5-second timeout.
Signed-off-by: Jonathon Jongsma <jjongsma@redhat.com>
Signed-off-by: Michal Privoznik <mprivozn@redhat.com>
Reviewed-by: Michal Privoznik <mprivozn@redhat.com>
2019-11-13 22:06:09 +00:00
|
|
|
int timeout;
|
2011-10-05 17:31:54 +00:00
|
|
|
};
|
|
|
|
|
2021-03-11 07:16:13 +00:00
|
|
|
static virClass *qemuAgentClass;
|
2012-07-11 13:35:47 +00:00
|
|
|
static void qemuAgentDispose(void *obj);
|
|
|
|
|
|
|
|
static int qemuAgentOnceInit(void)
|
|
|
|
{
|
2018-04-17 15:42:33 +00:00
|
|
|
if (!VIR_CLASS_NEW(qemuAgent, virClassForObjectLockable()))
|
2012-07-11 13:35:47 +00:00
|
|
|
return -1;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-01-20 17:23:29 +00:00
|
|
|
VIR_ONCE_GLOBAL_INIT(qemuAgent);
|
2012-07-11 13:35:47 +00:00
|
|
|
|
|
|
|
|
2011-10-05 17:31:54 +00:00
|
|
|
|
2012-07-11 13:35:47 +00:00
|
|
|
static void qemuAgentDispose(void *obj)
|
2011-10-05 17:31:54 +00:00
|
|
|
{
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuAgent *agent = obj;
|
2021-10-27 11:38:22 +00:00
|
|
|
|
2020-02-12 17:31:20 +00:00
|
|
|
VIR_DEBUG("agent=%p", agent);
|
2021-10-27 11:38:22 +00:00
|
|
|
|
|
|
|
if (agent->vm)
|
|
|
|
virObjectUnref(agent->vm);
|
2020-02-12 17:31:20 +00:00
|
|
|
virCondDestroy(&agent->notify);
|
2021-02-03 20:13:53 +00:00
|
|
|
g_free(agent->buffer);
|
2020-02-12 14:54:19 +00:00
|
|
|
g_main_context_unref(agent->context);
|
2020-02-12 17:31:20 +00:00
|
|
|
virResetError(&agent->lastError);
|
2011-10-05 17:31:54 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2020-02-12 17:31:20 +00:00
|
|
|
qemuAgentOpenUnix(const char *socketpath)
|
2011-10-05 17:31:54 +00:00
|
|
|
{
|
|
|
|
struct sockaddr_un addr;
|
2020-02-12 17:31:20 +00:00
|
|
|
int agentfd;
|
2011-10-05 17:31:54 +00:00
|
|
|
|
2020-02-12 17:31:20 +00:00
|
|
|
if ((agentfd = socket(AF_UNIX, SOCK_STREAM, 0)) < 0) {
|
2011-10-05 17:31:54 +00:00
|
|
|
virReportSystemError(errno,
|
|
|
|
"%s", _("failed to create socket"));
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2020-02-12 17:31:20 +00:00
|
|
|
if (virSetCloseExec(agentfd) < 0) {
|
2012-08-27 11:49:21 +00:00
|
|
|
virReportSystemError(errno, "%s",
|
2020-02-12 17:31:20 +00:00
|
|
|
_("Unable to set agent "
|
2012-08-27 11:49:21 +00:00
|
|
|
"close-on-exec flag"));
|
2011-10-05 17:31:54 +00:00
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
|
|
|
|
memset(&addr, 0, sizeof(addr));
|
|
|
|
addr.sun_family = AF_UNIX;
|
2020-02-12 17:31:20 +00:00
|
|
|
if (virStrcpyStatic(addr.sun_path, socketpath) < 0) {
|
2012-07-18 15:22:03 +00:00
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR,
|
2020-02-12 17:31:20 +00:00
|
|
|
_("Socket path %s too big for destination"), socketpath);
|
2011-10-05 17:31:54 +00:00
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
|
2021-11-23 16:34:36 +00:00
|
|
|
if (connect(agentfd, (struct sockaddr *)&addr, sizeof(addr)) < 0) {
|
2011-10-05 17:31:54 +00:00
|
|
|
virReportSystemError(errno, "%s",
|
2020-02-12 17:31:20 +00:00
|
|
|
_("failed to connect to agent socket"));
|
2011-10-05 17:31:54 +00:00
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
|
2020-02-12 17:31:20 +00:00
|
|
|
return agentfd;
|
2011-10-05 17:31:54 +00:00
|
|
|
|
2014-03-25 06:49:44 +00:00
|
|
|
error:
|
2020-02-12 17:31:20 +00:00
|
|
|
VIR_FORCE_CLOSE(agentfd);
|
2011-10-05 17:31:54 +00:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static int
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuAgentIOProcessEvent(qemuAgent *agent,
|
|
|
|
virJSONValue *obj)
|
2011-10-05 17:31:54 +00:00
|
|
|
{
|
|
|
|
const char *type;
|
2020-02-12 17:31:20 +00:00
|
|
|
VIR_DEBUG("agent=%p obj=%p", agent, obj);
|
2011-10-05 17:31:54 +00:00
|
|
|
|
|
|
|
type = virJSONValueObjectGetString(obj, "event");
|
|
|
|
if (!type) {
|
|
|
|
VIR_WARN("missing event type in message");
|
|
|
|
errno = EINVAL;
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuAgentIOProcessLine(qemuAgent *agent,
|
2011-10-05 17:31:54 +00:00
|
|
|
const char *line,
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuAgentMessage *msg)
|
2011-10-05 17:31:54 +00:00
|
|
|
{
|
2021-12-01 09:08:03 +00:00
|
|
|
g_autoptr(virJSONValue) obj = NULL;
|
2011-10-05 17:31:54 +00:00
|
|
|
|
|
|
|
VIR_DEBUG("Line [%s]", line);
|
|
|
|
|
2016-09-16 10:35:33 +00:00
|
|
|
if (!(obj = virJSONValueFromString(line))) {
|
2016-09-16 10:35:35 +00:00
|
|
|
/* receiving garbage on first sync is regular situation */
|
|
|
|
if (msg && msg->sync && msg->first) {
|
2016-09-16 10:35:33 +00:00
|
|
|
VIR_DEBUG("Received garbage on sync");
|
2020-05-05 06:05:18 +00:00
|
|
|
msg->finished = true;
|
2016-09-16 10:35:33 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2021-12-01 09:08:03 +00:00
|
|
|
return -1;
|
2016-09-16 10:35:33 +00:00
|
|
|
}
|
2011-10-05 17:31:54 +00:00
|
|
|
|
2018-03-29 18:30:05 +00:00
|
|
|
if (virJSONValueGetType(obj) != VIR_JSON_TYPE_OBJECT) {
|
2012-07-18 15:22:03 +00:00
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
_("Parsed JSON reply '%s' isn't an object"), line);
|
2021-12-01 09:08:03 +00:00
|
|
|
return -1;
|
2011-10-05 17:31:54 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (virJSONValueObjectHasKey(obj, "QMP") == 1) {
|
2021-12-01 09:08:03 +00:00
|
|
|
return 0;
|
2011-10-05 17:31:54 +00:00
|
|
|
} else if (virJSONValueObjectHasKey(obj, "event") == 1) {
|
2021-12-01 09:08:03 +00:00
|
|
|
return qemuAgentIOProcessEvent(agent, obj);
|
2011-10-05 17:31:54 +00:00
|
|
|
} else if (virJSONValueObjectHasKey(obj, "error") == 1 ||
|
|
|
|
virJSONValueObjectHasKey(obj, "return") == 1) {
|
|
|
|
if (msg) {
|
2016-09-16 10:35:34 +00:00
|
|
|
if (msg->sync) {
|
|
|
|
unsigned long long id;
|
|
|
|
|
|
|
|
if (virJSONValueObjectGetNumberUlong(obj, "return", &id) < 0) {
|
|
|
|
VIR_DEBUG("Ignoring delayed reply on sync");
|
2021-12-01 09:08:03 +00:00
|
|
|
return 0;
|
2016-09-16 10:35:34 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
VIR_DEBUG("Guest returned ID: %llu", id);
|
|
|
|
|
|
|
|
if (msg->id != id) {
|
|
|
|
VIR_DEBUG("Guest agent returned ID: %llu instead of %llu",
|
|
|
|
id, msg->id);
|
2021-12-01 09:08:03 +00:00
|
|
|
return 0;
|
2016-09-16 10:35:34 +00:00
|
|
|
}
|
|
|
|
}
|
2021-03-24 09:32:58 +00:00
|
|
|
msg->rxObject = g_steal_pointer(&obj);
|
2020-05-05 06:05:18 +00:00
|
|
|
msg->finished = true;
|
2011-10-05 17:31:54 +00:00
|
|
|
} else {
|
2016-09-16 10:35:32 +00:00
|
|
|
/* we are out of sync */
|
|
|
|
VIR_DEBUG("Ignoring delayed reply");
|
2011-10-05 17:31:54 +00:00
|
|
|
}
|
2021-12-01 09:08:03 +00:00
|
|
|
|
|
|
|
return 0;
|
2011-10-05 17:31:54 +00:00
|
|
|
}
|
|
|
|
|
2021-12-01 09:08:03 +00:00
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
_("Unknown JSON reply '%s'"), line);
|
|
|
|
return -1;
|
2011-10-05 17:31:54 +00:00
|
|
|
}
|
|
|
|
|
2021-03-11 07:16:13 +00:00
|
|
|
static int qemuAgentIOProcessData(qemuAgent *agent,
|
2011-10-05 17:31:54 +00:00
|
|
|
char *data,
|
|
|
|
size_t len,
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuAgentMessage *msg)
|
2011-10-05 17:31:54 +00:00
|
|
|
{
|
|
|
|
int used = 0;
|
Convert 'int i' to 'size_t i' in src/qemu files
Convert the type of loop iterators named 'i', 'j', k',
'ii', 'jj', 'kk', to be 'size_t' instead of 'int' or
'unsigned int', also santizing 'ii', 'jj', 'kk' to use
the normal 'i', 'j', 'k' naming
Signed-off-by: Daniel P. Berrange <berrange@redhat.com>
2013-07-08 14:09:33 +00:00
|
|
|
size_t i = 0;
|
2011-10-05 17:31:54 +00:00
|
|
|
|
|
|
|
while (used < len) {
|
|
|
|
char *nl = strstr(data + used, LINE_ENDING);
|
|
|
|
|
|
|
|
if (nl) {
|
|
|
|
int got = nl - (data + used);
|
|
|
|
for (i = 0; i < strlen(LINE_ENDING); i++)
|
|
|
|
data[used + got + i] = '\0';
|
2020-02-12 17:31:20 +00:00
|
|
|
if (qemuAgentIOProcessLine(agent, data + used, msg) < 0)
|
2011-10-05 17:31:54 +00:00
|
|
|
return -1;
|
|
|
|
used += got + strlen(LINE_ENDING);
|
|
|
|
} else {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
VIR_DEBUG("Total used %d bytes out of %zd available in buffer", used, len);
|
|
|
|
return used;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* This method processes data that has been received
|
2020-02-12 17:31:20 +00:00
|
|
|
* from the agent. Looking for async events and
|
2011-10-05 17:31:54 +00:00
|
|
|
* replies/errors.
|
|
|
|
*/
|
|
|
|
static int
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuAgentIOProcess(qemuAgent *agent)
|
2011-10-05 17:31:54 +00:00
|
|
|
{
|
|
|
|
int len;
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuAgentMessage *msg = NULL;
|
2011-10-05 17:31:54 +00:00
|
|
|
|
|
|
|
/* See if there's a message ready for reply; that is,
|
|
|
|
* one that has completed writing all its data.
|
|
|
|
*/
|
2020-02-12 17:31:20 +00:00
|
|
|
if (agent->msg && agent->msg->txOffset == agent->msg->txLength)
|
|
|
|
msg = agent->msg;
|
2011-10-05 17:31:54 +00:00
|
|
|
|
2020-02-12 17:31:20 +00:00
|
|
|
len = qemuAgentIOProcessData(agent,
|
|
|
|
agent->buffer, agent->bufferOffset,
|
2011-10-05 17:31:54 +00:00
|
|
|
msg);
|
|
|
|
|
|
|
|
if (len < 0)
|
|
|
|
return -1;
|
|
|
|
|
2020-02-12 17:31:20 +00:00
|
|
|
if (len < agent->bufferOffset) {
|
|
|
|
memmove(agent->buffer, agent->buffer + len, agent->bufferOffset - len);
|
|
|
|
agent->bufferOffset -= len;
|
2011-10-05 17:31:54 +00:00
|
|
|
} else {
|
2020-02-12 17:31:20 +00:00
|
|
|
VIR_FREE(agent->buffer);
|
|
|
|
agent->bufferOffset = agent->bufferLength = 0;
|
2011-10-05 17:31:54 +00:00
|
|
|
}
|
|
|
|
if (msg && msg->finished)
|
2020-02-12 17:31:20 +00:00
|
|
|
virCondBroadcast(&agent->notify);
|
2011-10-05 17:31:54 +00:00
|
|
|
return len;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
2020-02-12 17:31:20 +00:00
|
|
|
* Called when the agent is able to write data
|
|
|
|
* Call this function while holding the agent lock.
|
2011-10-05 17:31:54 +00:00
|
|
|
*/
|
|
|
|
static int
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuAgentIOWrite(qemuAgent *agent)
|
2011-10-05 17:31:54 +00:00
|
|
|
{
|
|
|
|
int done;
|
|
|
|
|
|
|
|
/* If no active message, or fully transmitted, then no-op */
|
2020-02-12 17:31:20 +00:00
|
|
|
if (!agent->msg || agent->msg->txOffset == agent->msg->txLength)
|
2011-10-05 17:31:54 +00:00
|
|
|
return 0;
|
|
|
|
|
2020-02-12 17:31:20 +00:00
|
|
|
done = safewrite(agent->fd,
|
|
|
|
agent->msg->txBuffer + agent->msg->txOffset,
|
|
|
|
agent->msg->txLength - agent->msg->txOffset);
|
2011-10-05 17:31:54 +00:00
|
|
|
|
|
|
|
if (done < 0) {
|
|
|
|
if (errno == EAGAIN)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
virReportSystemError(errno, "%s",
|
2020-02-12 17:31:20 +00:00
|
|
|
_("Unable to write to agent"));
|
2011-10-05 17:31:54 +00:00
|
|
|
return -1;
|
|
|
|
}
|
2020-02-12 17:31:20 +00:00
|
|
|
agent->msg->txOffset += done;
|
2011-10-05 17:31:54 +00:00
|
|
|
return done;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2020-02-12 17:31:20 +00:00
|
|
|
* Called when the agent has incoming data to read
|
|
|
|
* Call this function while holding the agent lock.
|
2011-10-05 17:31:54 +00:00
|
|
|
*
|
|
|
|
* Returns -1 on error, or number of bytes read
|
|
|
|
*/
|
|
|
|
static int
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuAgentIORead(qemuAgent *agent)
|
2011-10-05 17:31:54 +00:00
|
|
|
{
|
2020-02-12 17:31:20 +00:00
|
|
|
size_t avail = agent->bufferLength - agent->bufferOffset;
|
2011-10-05 17:31:54 +00:00
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
if (avail < 1024) {
|
2020-02-12 17:31:20 +00:00
|
|
|
if (agent->bufferLength >= QEMU_AGENT_MAX_RESPONSE) {
|
2018-03-01 14:55:26 +00:00
|
|
|
virReportSystemError(ERANGE,
|
|
|
|
_("No complete agent response found in %d bytes"),
|
|
|
|
QEMU_AGENT_MAX_RESPONSE);
|
|
|
|
return -1;
|
|
|
|
}
|
2021-03-19 23:37:05 +00:00
|
|
|
VIR_REALLOC_N(agent->buffer, agent->bufferLength + 1024);
|
2020-02-12 17:31:20 +00:00
|
|
|
agent->bufferLength += 1024;
|
2011-10-05 17:31:54 +00:00
|
|
|
avail += 1024;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Read as much as we can get into our buffer,
|
|
|
|
until we block on EAGAIN, or hit EOF */
|
|
|
|
while (avail > 1) {
|
|
|
|
int got;
|
2020-02-12 17:31:20 +00:00
|
|
|
got = read(agent->fd,
|
|
|
|
agent->buffer + agent->bufferOffset,
|
2011-10-05 17:31:54 +00:00
|
|
|
avail - 1);
|
|
|
|
if (got < 0) {
|
|
|
|
if (errno == EAGAIN)
|
|
|
|
break;
|
|
|
|
virReportSystemError(errno, "%s",
|
2020-02-12 17:31:20 +00:00
|
|
|
_("Unable to read from agent"));
|
2011-10-05 17:31:54 +00:00
|
|
|
ret = -1;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (got == 0)
|
|
|
|
break;
|
|
|
|
|
|
|
|
ret += got;
|
|
|
|
avail -= got;
|
2020-02-12 17:31:20 +00:00
|
|
|
agent->bufferOffset += got;
|
|
|
|
agent->buffer[agent->bufferOffset] = '\0';
|
2011-10-05 17:31:54 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2020-02-12 14:54:19 +00:00
|
|
|
static gboolean
|
|
|
|
qemuAgentIO(GSocket *socket,
|
|
|
|
GIOCondition cond,
|
|
|
|
gpointer opaque);
|
2011-10-05 17:31:54 +00:00
|
|
|
|
2020-02-12 14:54:19 +00:00
|
|
|
|
|
|
|
static void
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuAgentRegister(qemuAgent *agent)
|
2020-02-12 14:54:19 +00:00
|
|
|
{
|
|
|
|
GIOCondition cond = 0;
|
2018-10-12 02:19:28 +00:00
|
|
|
|
2020-02-12 17:31:20 +00:00
|
|
|
if (agent->lastError.code == VIR_ERR_OK) {
|
2020-02-12 14:54:19 +00:00
|
|
|
cond |= G_IO_IN;
|
2011-10-05 17:31:54 +00:00
|
|
|
|
2020-02-12 17:31:20 +00:00
|
|
|
if (agent->msg && agent->msg->txOffset < agent->msg->txLength)
|
2020-02-12 14:54:19 +00:00
|
|
|
cond |= G_IO_OUT;
|
2011-10-05 17:31:54 +00:00
|
|
|
}
|
|
|
|
|
2020-02-12 14:54:19 +00:00
|
|
|
agent->watch = g_socket_create_source(agent->socket,
|
|
|
|
cond,
|
|
|
|
NULL);
|
|
|
|
|
|
|
|
virObjectRef(agent);
|
|
|
|
g_source_set_callback(agent->watch,
|
|
|
|
(GSourceFunc)qemuAgentIO,
|
|
|
|
agent,
|
|
|
|
(GDestroyNotify)virObjectUnref);
|
|
|
|
|
|
|
|
g_source_attach(agent->watch,
|
|
|
|
agent->context);
|
2011-10-05 17:31:54 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static void
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuAgentUnregister(qemuAgent *agent)
|
2020-02-12 14:54:19 +00:00
|
|
|
{
|
|
|
|
if (agent->watch) {
|
Fix incorrect uses of g_clear_pointer() introduced in 8.1.0
This is a partial revert of 87a43a907f0ad4897a28ad7c216bc70f37270b93
The change to use g_clear_pointer() in more places was accidentally
applied to cases involving vir_g_source_unref().
In some cases, the ordering of g_source_destroy() and
vir_g_source_unref() was reversed, which resulted in the source being
marked as destroyed, after it is already unreferenced. This
use-after-free case might work in many cases, but with versions of
glib older than 2.64.0 it may defer unref to run within the main
thread to avoid a race condition, which creates a large distance
between the g_source_unref() and g_source_destroy().
In some cases, the call to vir_g_source_unref() was replaced with a
second call to g_source_destroy(), leading to a memory leak or worse.
In our experience, the symptoms were that use of libvirt-python became
slower over time, with OpenStack nova-compute initially taking around
one second to periodically query the host PCI devices, and within an
hour it was taking over a minute to complete the same operation, until
it is was eventually running this query back-to-back, resulting in the
nova-compute process consuming 100% of one CPU thread, losing its
RabbitMQ connection frequently, and showing up as down to the control
plane.
Signed-off-by: Mark Mielke <mark.mielke@gmail.com>
Reviewed-by: Ján Tomko <jtomko@redhat.com>
Signed-off-by: Ján Tomko <jtomko@redhat.com>
2022-06-12 19:16:50 +00:00
|
|
|
g_source_destroy(agent->watch);
|
2021-03-16 16:26:06 +00:00
|
|
|
vir_g_source_unref(agent->watch, agent->context);
|
Fix incorrect uses of g_clear_pointer() introduced in 8.1.0
This is a partial revert of 87a43a907f0ad4897a28ad7c216bc70f37270b93
The change to use g_clear_pointer() in more places was accidentally
applied to cases involving vir_g_source_unref().
In some cases, the ordering of g_source_destroy() and
vir_g_source_unref() was reversed, which resulted in the source being
marked as destroyed, after it is already unreferenced. This
use-after-free case might work in many cases, but with versions of
glib older than 2.64.0 it may defer unref to run within the main
thread to avoid a race condition, which creates a large distance
between the g_source_unref() and g_source_destroy().
In some cases, the call to vir_g_source_unref() was replaced with a
second call to g_source_destroy(), leading to a memory leak or worse.
In our experience, the symptoms were that use of libvirt-python became
slower over time, with OpenStack nova-compute initially taking around
one second to periodically query the host PCI devices, and within an
hour it was taking over a minute to complete the same operation, until
it is was eventually running this query back-to-back, resulting in the
nova-compute process consuming 100% of one CPU thread, losing its
RabbitMQ connection frequently, and showing up as down to the control
plane.
Signed-off-by: Mark Mielke <mark.mielke@gmail.com>
Reviewed-by: Ján Tomko <jtomko@redhat.com>
Signed-off-by: Ján Tomko <jtomko@redhat.com>
2022-06-12 19:16:50 +00:00
|
|
|
agent->watch = NULL;
|
2020-02-12 14:54:19 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2021-03-11 07:16:13 +00:00
|
|
|
static void qemuAgentUpdateWatch(qemuAgent *agent)
|
2020-02-12 14:54:19 +00:00
|
|
|
{
|
|
|
|
qemuAgentUnregister(agent);
|
|
|
|
if (agent->socket)
|
|
|
|
qemuAgentRegister(agent);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static gboolean
|
|
|
|
qemuAgentIO(GSocket *socket G_GNUC_UNUSED,
|
|
|
|
GIOCondition cond,
|
|
|
|
gpointer opaque)
|
2014-03-18 08:15:21 +00:00
|
|
|
{
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuAgent *agent = opaque;
|
2011-10-05 17:31:54 +00:00
|
|
|
bool error = false;
|
|
|
|
bool eof = false;
|
|
|
|
|
2020-02-12 17:31:20 +00:00
|
|
|
virObjectRef(agent);
|
|
|
|
/* lock access to the agent and protect fd */
|
|
|
|
virObjectLock(agent);
|
2011-10-05 17:31:54 +00:00
|
|
|
|
2020-02-12 14:54:19 +00:00
|
|
|
if (agent->fd == -1 || !agent->watch) {
|
2020-02-12 17:31:20 +00:00
|
|
|
virObjectUnlock(agent);
|
|
|
|
virObjectUnref(agent);
|
2020-02-12 14:54:19 +00:00
|
|
|
return G_SOURCE_REMOVE;
|
2018-10-12 02:19:28 +00:00
|
|
|
}
|
|
|
|
|
2020-02-12 14:54:19 +00:00
|
|
|
if (agent->lastError.code != VIR_ERR_OK) {
|
|
|
|
if (cond & (G_IO_HUP | G_IO_ERR))
|
2011-10-05 17:31:54 +00:00
|
|
|
eof = true;
|
|
|
|
error = true;
|
|
|
|
} else {
|
2020-02-12 14:54:19 +00:00
|
|
|
if (cond & G_IO_OUT) {
|
2020-02-12 17:31:20 +00:00
|
|
|
if (qemuAgentIOWrite(agent) < 0)
|
2018-04-16 14:44:16 +00:00
|
|
|
error = true;
|
2011-10-05 17:31:54 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (!error &&
|
2020-02-12 14:54:19 +00:00
|
|
|
cond & G_IO_IN) {
|
2020-02-12 17:31:20 +00:00
|
|
|
int got = qemuAgentIORead(agent);
|
2011-10-05 17:31:54 +00:00
|
|
|
if (got < 0) {
|
|
|
|
error = true;
|
|
|
|
} else if (got == 0) {
|
|
|
|
eof = true;
|
|
|
|
} else {
|
2020-02-12 14:54:19 +00:00
|
|
|
/* Ignore hangup/error cond if we read some data, to
|
2011-10-05 17:31:54 +00:00
|
|
|
* give time for that data to be consumed */
|
2020-02-12 14:54:19 +00:00
|
|
|
cond = 0;
|
2011-10-05 17:31:54 +00:00
|
|
|
|
2020-02-12 17:31:20 +00:00
|
|
|
if (qemuAgentIOProcess(agent) < 0)
|
2011-10-05 17:31:54 +00:00
|
|
|
error = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!error &&
|
2020-02-12 14:54:19 +00:00
|
|
|
cond & G_IO_HUP) {
|
2012-07-18 15:22:03 +00:00
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
2020-02-12 17:31:20 +00:00
|
|
|
_("End of file from agent socket"));
|
2013-05-24 10:14:02 +00:00
|
|
|
eof = true;
|
2011-10-05 17:31:54 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (!error && !eof &&
|
2020-02-12 14:54:19 +00:00
|
|
|
cond & G_IO_ERR) {
|
2012-07-18 15:22:03 +00:00
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
2020-02-12 17:31:20 +00:00
|
|
|
_("Invalid file descriptor while waiting for agent"));
|
2013-05-24 10:14:02 +00:00
|
|
|
eof = true;
|
2011-10-05 17:31:54 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (error || eof) {
|
2020-02-12 17:31:20 +00:00
|
|
|
if (agent->lastError.code != VIR_ERR_OK) {
|
2011-10-05 17:31:54 +00:00
|
|
|
/* Already have an error, so clear any new error */
|
|
|
|
virResetLastError();
|
|
|
|
} else {
|
2018-05-05 12:04:21 +00:00
|
|
|
if (virGetLastErrorCode() == VIR_ERR_OK)
|
2012-07-18 15:22:03 +00:00
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
2020-02-12 17:31:20 +00:00
|
|
|
_("Error while processing agent IO"));
|
|
|
|
virCopyLastError(&agent->lastError);
|
2011-10-05 17:31:54 +00:00
|
|
|
virResetLastError();
|
|
|
|
}
|
|
|
|
|
2020-02-12 17:31:20 +00:00
|
|
|
VIR_DEBUG("Error on agent %s", NULLSTR(agent->lastError.message));
|
2011-10-05 17:31:54 +00:00
|
|
|
/* If IO process resulted in an error & we have a message,
|
|
|
|
* then wakeup that waiter */
|
2020-02-12 17:31:20 +00:00
|
|
|
if (agent->msg && !agent->msg->finished) {
|
2020-05-05 06:05:18 +00:00
|
|
|
agent->msg->finished = true;
|
2020-02-12 17:31:20 +00:00
|
|
|
virCondSignal(&agent->notify);
|
2011-10-05 17:31:54 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-02-12 17:31:20 +00:00
|
|
|
qemuAgentUpdateWatch(agent);
|
2011-10-05 17:31:54 +00:00
|
|
|
|
|
|
|
/* We have to unlock to avoid deadlock against command thread,
|
|
|
|
* but is this safe ? I think it is, because the callback
|
2021-03-11 07:16:13 +00:00
|
|
|
* will try to acquire the virDomainObj *mutex next */
|
2011-10-05 17:31:54 +00:00
|
|
|
if (eof) {
|
2021-03-11 07:16:13 +00:00
|
|
|
void (*eofNotify)(qemuAgent *, virDomainObj *)
|
2020-02-12 17:31:20 +00:00
|
|
|
= agent->cb->eofNotify;
|
2021-03-11 07:16:13 +00:00
|
|
|
virDomainObj *vm = agent->vm;
|
2011-10-05 17:31:54 +00:00
|
|
|
|
|
|
|
/* Make sure anyone waiting wakes up now */
|
2020-02-12 17:31:20 +00:00
|
|
|
virCondSignal(&agent->notify);
|
|
|
|
virObjectUnlock(agent);
|
|
|
|
virObjectUnref(agent);
|
2011-10-05 17:31:54 +00:00
|
|
|
VIR_DEBUG("Triggering EOF callback");
|
2020-02-12 17:31:20 +00:00
|
|
|
(eofNotify)(agent, vm);
|
2011-10-05 17:31:54 +00:00
|
|
|
} else if (error) {
|
2021-03-11 07:16:13 +00:00
|
|
|
void (*errorNotify)(qemuAgent *, virDomainObj *)
|
2020-02-12 17:31:20 +00:00
|
|
|
= agent->cb->errorNotify;
|
2021-03-11 07:16:13 +00:00
|
|
|
virDomainObj *vm = agent->vm;
|
2011-10-05 17:31:54 +00:00
|
|
|
|
|
|
|
/* Make sure anyone waiting wakes up now */
|
2020-02-12 17:31:20 +00:00
|
|
|
virCondSignal(&agent->notify);
|
|
|
|
virObjectUnlock(agent);
|
|
|
|
virObjectUnref(agent);
|
2011-10-05 17:31:54 +00:00
|
|
|
VIR_DEBUG("Triggering error callback");
|
2020-02-12 17:31:20 +00:00
|
|
|
(errorNotify)(agent, vm);
|
2011-10-05 17:31:54 +00:00
|
|
|
} else {
|
2020-02-12 17:31:20 +00:00
|
|
|
virObjectUnlock(agent);
|
|
|
|
virObjectUnref(agent);
|
2011-10-05 17:31:54 +00:00
|
|
|
}
|
2020-02-12 14:54:19 +00:00
|
|
|
|
|
|
|
return G_SOURCE_REMOVE;
|
2011-10-05 17:31:54 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuAgent *
|
|
|
|
qemuAgentOpen(virDomainObj *vm,
|
2016-01-08 15:21:30 +00:00
|
|
|
const virDomainChrSourceDef *config,
|
2020-02-12 14:54:19 +00:00
|
|
|
GMainContext *context,
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuAgentCallbacks *cb,
|
2020-03-05 14:47:01 +00:00
|
|
|
bool singleSync)
|
2011-10-05 17:31:54 +00:00
|
|
|
{
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuAgent *agent;
|
2020-02-12 14:54:19 +00:00
|
|
|
g_autoptr(GError) gerr = NULL;
|
2011-10-05 17:31:54 +00:00
|
|
|
|
|
|
|
if (!cb || !cb->eofNotify) {
|
2012-07-18 15:22:03 +00:00
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
|
_("EOF notify callback must be supplied"));
|
2011-10-05 17:31:54 +00:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2012-07-11 13:35:47 +00:00
|
|
|
if (qemuAgentInitialize() < 0)
|
|
|
|
return NULL;
|
|
|
|
|
2020-02-12 17:31:20 +00:00
|
|
|
if (!(agent = virObjectLockableNew(qemuAgentClass)))
|
2011-10-05 17:31:54 +00:00
|
|
|
return NULL;
|
|
|
|
|
2020-02-12 17:31:20 +00:00
|
|
|
agent->timeout = QEMU_DOMAIN_PRIVATE(vm)->agentTimeout;
|
|
|
|
agent->fd = -1;
|
|
|
|
if (virCondInit(&agent->notify) < 0) {
|
2012-08-27 11:49:21 +00:00
|
|
|
virReportSystemError(errno, "%s",
|
2020-02-12 17:31:20 +00:00
|
|
|
_("cannot initialize agent condition"));
|
|
|
|
virObjectUnref(agent);
|
2011-10-05 17:31:54 +00:00
|
|
|
return NULL;
|
|
|
|
}
|
2021-10-27 11:38:22 +00:00
|
|
|
agent->vm = virObjectRef(vm);
|
2020-02-12 17:31:20 +00:00
|
|
|
agent->cb = cb;
|
2020-03-05 14:47:01 +00:00
|
|
|
agent->singleSync = singleSync;
|
2011-10-05 17:31:54 +00:00
|
|
|
|
2020-02-12 11:45:10 +00:00
|
|
|
if (config->type != VIR_DOMAIN_CHR_TYPE_UNIX) {
|
2012-07-18 15:22:03 +00:00
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR,
|
2020-02-12 17:31:20 +00:00
|
|
|
_("unable to handle agent type: %s"),
|
2012-07-18 15:22:03 +00:00
|
|
|
virDomainChrTypeToString(config->type));
|
2011-10-05 17:31:54 +00:00
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2021-10-27 11:38:05 +00:00
|
|
|
virObjectUnlock(vm);
|
2020-02-12 17:31:20 +00:00
|
|
|
agent->fd = qemuAgentOpenUnix(config->data.nix.path);
|
2021-10-27 11:38:05 +00:00
|
|
|
virObjectLock(vm);
|
|
|
|
|
2020-02-12 17:31:20 +00:00
|
|
|
if (agent->fd == -1)
|
2011-10-05 17:31:54 +00:00
|
|
|
goto cleanup;
|
|
|
|
|
2020-02-12 14:54:19 +00:00
|
|
|
agent->context = g_main_context_ref(context);
|
|
|
|
|
|
|
|
agent->socket = g_socket_new_from_fd(agent->fd, &gerr);
|
|
|
|
if (!agent->socket) {
|
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
_("Unable to create socket object: %s"),
|
|
|
|
gerr->message);
|
2011-10-05 17:31:54 +00:00
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2020-02-12 14:54:19 +00:00
|
|
|
qemuAgentRegister(agent);
|
|
|
|
|
2020-02-12 17:31:20 +00:00
|
|
|
agent->running = true;
|
2020-02-12 14:54:19 +00:00
|
|
|
VIR_DEBUG("New agent %p fd=%d", agent, agent->fd);
|
2011-10-05 17:31:54 +00:00
|
|
|
|
2020-02-12 17:31:20 +00:00
|
|
|
return agent;
|
2011-10-05 17:31:54 +00:00
|
|
|
|
2014-03-25 06:49:44 +00:00
|
|
|
cleanup:
|
2020-02-12 17:31:20 +00:00
|
|
|
qemuAgentClose(agent);
|
2011-10-05 17:31:54 +00:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
processSerialChangedEvent: Close agent monitor early
https://bugzilla.redhat.com/show_bug.cgi?id=890648
So, imagine you've issued an API that involves guest agent. For
instance, you want to query guest's IP addresses. So the API acquires
QUERY_JOB, locks the guest agent and issues the agent command.
However, for some reason, guest agent replies to initial ping
correctly, but then crashes tragically while executing real command
(in this case guest-network-get-interfaces). Since initial ping went
well, libvirt thinks guest agent is accessible and awaits reply to the
real command. But it will never come. What will is a monitor event.
Our handler (processSerialChangedEvent) will try to acquire
MODIFY_JOB, which will fail obviously because the other thread that's
executing the API already holds a job. So the event handler exits
early, and the QUERY_JOB is never released nor ended.
The way how to solve this is to put flag somewhere in the monitor
internals. The flag is called @running and agent commands are issued
iff the flag is set. The flag itself is set when we connect to the
agent socket. And unset whenever we see DISCONNECT event from the
agent. Moreover, we must wake up all the threads waiting for the
agent. This is done by signalizing the condition they're waiting on.
Signed-off-by: Michal Privoznik <mprivozn@redhat.com>
2015-05-07 09:19:38 +00:00
|
|
|
static void
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuAgentNotifyCloseLocked(qemuAgent *agent)
|
processSerialChangedEvent: Close agent monitor early
https://bugzilla.redhat.com/show_bug.cgi?id=890648
So, imagine you've issued an API that involves guest agent. For
instance, you want to query guest's IP addresses. So the API acquires
QUERY_JOB, locks the guest agent and issues the agent command.
However, for some reason, guest agent replies to initial ping
correctly, but then crashes tragically while executing real command
(in this case guest-network-get-interfaces). Since initial ping went
well, libvirt thinks guest agent is accessible and awaits reply to the
real command. But it will never come. What will is a monitor event.
Our handler (processSerialChangedEvent) will try to acquire
MODIFY_JOB, which will fail obviously because the other thread that's
executing the API already holds a job. So the event handler exits
early, and the QUERY_JOB is never released nor ended.
The way how to solve this is to put flag somewhere in the monitor
internals. The flag is called @running and agent commands are issued
iff the flag is set. The flag itself is set when we connect to the
agent socket. And unset whenever we see DISCONNECT event from the
agent. Moreover, we must wake up all the threads waiting for the
agent. This is done by signalizing the condition they're waiting on.
Signed-off-by: Michal Privoznik <mprivozn@redhat.com>
2015-05-07 09:19:38 +00:00
|
|
|
{
|
2020-02-12 17:31:20 +00:00
|
|
|
if (agent) {
|
|
|
|
agent->running = false;
|
processSerialChangedEvent: Close agent monitor early
https://bugzilla.redhat.com/show_bug.cgi?id=890648
So, imagine you've issued an API that involves guest agent. For
instance, you want to query guest's IP addresses. So the API acquires
QUERY_JOB, locks the guest agent and issues the agent command.
However, for some reason, guest agent replies to initial ping
correctly, but then crashes tragically while executing real command
(in this case guest-network-get-interfaces). Since initial ping went
well, libvirt thinks guest agent is accessible and awaits reply to the
real command. But it will never come. What will is a monitor event.
Our handler (processSerialChangedEvent) will try to acquire
MODIFY_JOB, which will fail obviously because the other thread that's
executing the API already holds a job. So the event handler exits
early, and the QUERY_JOB is never released nor ended.
The way how to solve this is to put flag somewhere in the monitor
internals. The flag is called @running and agent commands are issued
iff the flag is set. The flag itself is set when we connect to the
agent socket. And unset whenever we see DISCONNECT event from the
agent. Moreover, we must wake up all the threads waiting for the
agent. This is done by signalizing the condition they're waiting on.
Signed-off-by: Michal Privoznik <mprivozn@redhat.com>
2015-05-07 09:19:38 +00:00
|
|
|
|
|
|
|
/* If there is somebody waiting for a message
|
|
|
|
* wake him up. No message will arrive anyway. */
|
2020-02-12 17:31:20 +00:00
|
|
|
if (agent->msg && !agent->msg->finished) {
|
2020-05-05 06:05:18 +00:00
|
|
|
agent->msg->finished = true;
|
2020-02-12 17:31:20 +00:00
|
|
|
virCondSignal(&agent->notify);
|
processSerialChangedEvent: Close agent monitor early
https://bugzilla.redhat.com/show_bug.cgi?id=890648
So, imagine you've issued an API that involves guest agent. For
instance, you want to query guest's IP addresses. So the API acquires
QUERY_JOB, locks the guest agent and issues the agent command.
However, for some reason, guest agent replies to initial ping
correctly, but then crashes tragically while executing real command
(in this case guest-network-get-interfaces). Since initial ping went
well, libvirt thinks guest agent is accessible and awaits reply to the
real command. But it will never come. What will is a monitor event.
Our handler (processSerialChangedEvent) will try to acquire
MODIFY_JOB, which will fail obviously because the other thread that's
executing the API already holds a job. So the event handler exits
early, and the QUERY_JOB is never released nor ended.
The way how to solve this is to put flag somewhere in the monitor
internals. The flag is called @running and agent commands are issued
iff the flag is set. The flag itself is set when we connect to the
agent socket. And unset whenever we see DISCONNECT event from the
agent. Moreover, we must wake up all the threads waiting for the
agent. This is done by signalizing the condition they're waiting on.
Signed-off-by: Michal Privoznik <mprivozn@redhat.com>
2015-05-07 09:19:38 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuAgentNotifyClose(qemuAgent *agent)
|
processSerialChangedEvent: Close agent monitor early
https://bugzilla.redhat.com/show_bug.cgi?id=890648
So, imagine you've issued an API that involves guest agent. For
instance, you want to query guest's IP addresses. So the API acquires
QUERY_JOB, locks the guest agent and issues the agent command.
However, for some reason, guest agent replies to initial ping
correctly, but then crashes tragically while executing real command
(in this case guest-network-get-interfaces). Since initial ping went
well, libvirt thinks guest agent is accessible and awaits reply to the
real command. But it will never come. What will is a monitor event.
Our handler (processSerialChangedEvent) will try to acquire
MODIFY_JOB, which will fail obviously because the other thread that's
executing the API already holds a job. So the event handler exits
early, and the QUERY_JOB is never released nor ended.
The way how to solve this is to put flag somewhere in the monitor
internals. The flag is called @running and agent commands are issued
iff the flag is set. The flag itself is set when we connect to the
agent socket. And unset whenever we see DISCONNECT event from the
agent. Moreover, we must wake up all the threads waiting for the
agent. This is done by signalizing the condition they're waiting on.
Signed-off-by: Michal Privoznik <mprivozn@redhat.com>
2015-05-07 09:19:38 +00:00
|
|
|
{
|
2020-02-12 17:31:20 +00:00
|
|
|
if (!agent)
|
processSerialChangedEvent: Close agent monitor early
https://bugzilla.redhat.com/show_bug.cgi?id=890648
So, imagine you've issued an API that involves guest agent. For
instance, you want to query guest's IP addresses. So the API acquires
QUERY_JOB, locks the guest agent and issues the agent command.
However, for some reason, guest agent replies to initial ping
correctly, but then crashes tragically while executing real command
(in this case guest-network-get-interfaces). Since initial ping went
well, libvirt thinks guest agent is accessible and awaits reply to the
real command. But it will never come. What will is a monitor event.
Our handler (processSerialChangedEvent) will try to acquire
MODIFY_JOB, which will fail obviously because the other thread that's
executing the API already holds a job. So the event handler exits
early, and the QUERY_JOB is never released nor ended.
The way how to solve this is to put flag somewhere in the monitor
internals. The flag is called @running and agent commands are issued
iff the flag is set. The flag itself is set when we connect to the
agent socket. And unset whenever we see DISCONNECT event from the
agent. Moreover, we must wake up all the threads waiting for the
agent. This is done by signalizing the condition they're waiting on.
Signed-off-by: Michal Privoznik <mprivozn@redhat.com>
2015-05-07 09:19:38 +00:00
|
|
|
return;
|
|
|
|
|
2020-02-12 17:31:20 +00:00
|
|
|
VIR_DEBUG("agent=%p", agent);
|
processSerialChangedEvent: Close agent monitor early
https://bugzilla.redhat.com/show_bug.cgi?id=890648
So, imagine you've issued an API that involves guest agent. For
instance, you want to query guest's IP addresses. So the API acquires
QUERY_JOB, locks the guest agent and issues the agent command.
However, for some reason, guest agent replies to initial ping
correctly, but then crashes tragically while executing real command
(in this case guest-network-get-interfaces). Since initial ping went
well, libvirt thinks guest agent is accessible and awaits reply to the
real command. But it will never come. What will is a monitor event.
Our handler (processSerialChangedEvent) will try to acquire
MODIFY_JOB, which will fail obviously because the other thread that's
executing the API already holds a job. So the event handler exits
early, and the QUERY_JOB is never released nor ended.
The way how to solve this is to put flag somewhere in the monitor
internals. The flag is called @running and agent commands are issued
iff the flag is set. The flag itself is set when we connect to the
agent socket. And unset whenever we see DISCONNECT event from the
agent. Moreover, we must wake up all the threads waiting for the
agent. This is done by signalizing the condition they're waiting on.
Signed-off-by: Michal Privoznik <mprivozn@redhat.com>
2015-05-07 09:19:38 +00:00
|
|
|
|
2022-03-24 12:13:14 +00:00
|
|
|
VIR_WITH_OBJECT_LOCK_GUARD(agent) {
|
|
|
|
qemuAgentNotifyCloseLocked(agent);
|
|
|
|
}
|
processSerialChangedEvent: Close agent monitor early
https://bugzilla.redhat.com/show_bug.cgi?id=890648
So, imagine you've issued an API that involves guest agent. For
instance, you want to query guest's IP addresses. So the API acquires
QUERY_JOB, locks the guest agent and issues the agent command.
However, for some reason, guest agent replies to initial ping
correctly, but then crashes tragically while executing real command
(in this case guest-network-get-interfaces). Since initial ping went
well, libvirt thinks guest agent is accessible and awaits reply to the
real command. But it will never come. What will is a monitor event.
Our handler (processSerialChangedEvent) will try to acquire
MODIFY_JOB, which will fail obviously because the other thread that's
executing the API already holds a job. So the event handler exits
early, and the QUERY_JOB is never released nor ended.
The way how to solve this is to put flag somewhere in the monitor
internals. The flag is called @running and agent commands are issued
iff the flag is set. The flag itself is set when we connect to the
agent socket. And unset whenever we see DISCONNECT event from the
agent. Moreover, we must wake up all the threads waiting for the
agent. This is done by signalizing the condition they're waiting on.
Signed-off-by: Michal Privoznik <mprivozn@redhat.com>
2015-05-07 09:19:38 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2021-03-11 07:16:13 +00:00
|
|
|
void qemuAgentClose(qemuAgent *agent)
|
2011-10-05 17:31:54 +00:00
|
|
|
{
|
2020-02-12 17:31:20 +00:00
|
|
|
if (!agent)
|
2011-10-05 17:31:54 +00:00
|
|
|
return;
|
|
|
|
|
2020-02-12 17:31:20 +00:00
|
|
|
VIR_DEBUG("agent=%p", agent);
|
2011-10-05 17:31:54 +00:00
|
|
|
|
2022-03-24 12:13:14 +00:00
|
|
|
VIR_WITH_OBJECT_LOCK_GUARD(agent) {
|
|
|
|
if (agent->socket) {
|
|
|
|
qemuAgentUnregister(agent);
|
|
|
|
g_clear_pointer(&agent->socket, g_object_unref);
|
|
|
|
agent->fd = -1;
|
|
|
|
}
|
2011-10-05 17:31:54 +00:00
|
|
|
|
2022-03-24 12:13:14 +00:00
|
|
|
qemuAgentNotifyCloseLocked(agent);
|
2011-10-05 17:31:54 +00:00
|
|
|
}
|
|
|
|
|
2020-02-12 17:31:20 +00:00
|
|
|
virObjectUnref(agent);
|
2011-10-05 17:31:54 +00:00
|
|
|
}
|
|
|
|
|
2012-08-30 12:19:02 +00:00
|
|
|
#define QEMU_AGENT_WAIT_TIME 5
|
2011-10-05 17:31:54 +00:00
|
|
|
|
2012-02-01 14:44:53 +00:00
|
|
|
/**
|
|
|
|
* qemuAgentSend:
|
2020-02-12 17:31:20 +00:00
|
|
|
* @agent: agent object
|
2012-02-01 14:44:53 +00:00
|
|
|
* @msg: Message
|
2012-08-30 12:19:02 +00:00
|
|
|
* @seconds: number of seconds to wait for the result, it can be either
|
|
|
|
* -2, -1, 0 or positive.
|
2012-02-01 14:44:53 +00:00
|
|
|
*
|
2020-02-12 17:31:20 +00:00
|
|
|
* Send @msg to agent @agent. If @seconds is equal to
|
2012-08-30 12:19:02 +00:00
|
|
|
* VIR_DOMAIN_QEMU_AGENT_COMMAND_BLOCK(-2), this function will block forever
|
|
|
|
* waiting for the result. The value of
|
|
|
|
* VIR_DOMAIN_QEMU_AGENT_COMMAND_DEFAULT(-1) means use default timeout value
|
2017-04-22 19:06:20 +00:00
|
|
|
* and VIR_DOMAIN_QEMU_AGENT_COMMAND_NOWAIT(0) makes this function return
|
2012-08-30 12:19:02 +00:00
|
|
|
* immediately without waiting. Any positive value means the number of seconds
|
|
|
|
* to wait for the result.
|
2012-02-01 14:44:53 +00:00
|
|
|
*
|
|
|
|
* Returns: 0 on success,
|
|
|
|
* -2 on timeout,
|
|
|
|
* -1 otherwise
|
|
|
|
*/
|
2021-03-11 07:16:13 +00:00
|
|
|
static int qemuAgentSend(qemuAgent *agent,
|
|
|
|
qemuAgentMessage *msg,
|
2012-08-23 03:29:21 +00:00
|
|
|
int seconds)
|
2011-10-05 17:31:54 +00:00
|
|
|
{
|
|
|
|
int ret = -1;
|
2012-08-30 12:19:02 +00:00
|
|
|
unsigned long long then = 0;
|
2011-10-05 17:31:54 +00:00
|
|
|
|
|
|
|
/* Check whether qemu quit unexpectedly */
|
2020-02-12 17:31:20 +00:00
|
|
|
if (agent->lastError.code != VIR_ERR_OK) {
|
2011-10-05 17:31:54 +00:00
|
|
|
VIR_DEBUG("Attempt to send command while error is set %s",
|
2020-02-12 17:31:20 +00:00
|
|
|
NULLSTR(agent->lastError.message));
|
|
|
|
virSetError(&agent->lastError);
|
2011-10-05 17:31:54 +00:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2012-08-30 12:19:02 +00:00
|
|
|
if (seconds > VIR_DOMAIN_QEMU_AGENT_COMMAND_BLOCK) {
|
|
|
|
unsigned long long now;
|
2012-02-01 14:44:53 +00:00
|
|
|
if (virTimeMillisNow(&now) < 0)
|
|
|
|
return -1;
|
2012-08-30 12:19:02 +00:00
|
|
|
if (seconds == VIR_DOMAIN_QEMU_AGENT_COMMAND_DEFAULT)
|
|
|
|
seconds = QEMU_AGENT_WAIT_TIME;
|
|
|
|
then = now + seconds * 1000ull;
|
2012-02-01 14:44:53 +00:00
|
|
|
}
|
|
|
|
|
2020-02-12 17:31:20 +00:00
|
|
|
agent->msg = msg;
|
|
|
|
qemuAgentUpdateWatch(agent);
|
2011-10-05 17:31:54 +00:00
|
|
|
|
2020-02-12 17:31:20 +00:00
|
|
|
while (!agent->msg->finished) {
|
|
|
|
if ((then && virCondWaitUntil(&agent->notify, &agent->parent.lock, then) < 0) ||
|
|
|
|
(!then && virCondWait(&agent->notify, &agent->parent.lock) < 0)) {
|
2012-02-01 14:44:53 +00:00
|
|
|
if (errno == ETIMEDOUT) {
|
2012-08-27 10:24:59 +00:00
|
|
|
virReportError(VIR_ERR_AGENT_UNRESPONSIVE, "%s",
|
2012-07-18 15:22:03 +00:00
|
|
|
_("Guest agent not available for now"));
|
2012-02-01 14:44:53 +00:00
|
|
|
ret = -2;
|
|
|
|
} else {
|
2012-08-27 11:49:21 +00:00
|
|
|
virReportSystemError(errno, "%s",
|
2020-02-12 17:31:20 +00:00
|
|
|
_("Unable to wait on agent socket "
|
2012-11-28 12:30:40 +00:00
|
|
|
"condition"));
|
2012-02-01 14:44:53 +00:00
|
|
|
}
|
2020-03-05 14:47:01 +00:00
|
|
|
agent->inSync = false;
|
2011-10-05 17:31:54 +00:00
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-02-12 17:31:20 +00:00
|
|
|
if (agent->lastError.code != VIR_ERR_OK) {
|
2011-10-05 17:31:54 +00:00
|
|
|
VIR_DEBUG("Send command resulted in error %s",
|
2020-02-12 17:31:20 +00:00
|
|
|
NULLSTR(agent->lastError.message));
|
|
|
|
virSetError(&agent->lastError);
|
2011-10-05 17:31:54 +00:00
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = 0;
|
|
|
|
|
2014-03-25 06:49:44 +00:00
|
|
|
cleanup:
|
2020-02-12 17:31:20 +00:00
|
|
|
agent->msg = NULL;
|
|
|
|
qemuAgentUpdateWatch(agent);
|
2011-10-05 17:31:54 +00:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2021-12-03 10:38:27 +00:00
|
|
|
/**
|
|
|
|
* qemuAgentGuestSyncSend:
|
|
|
|
* @agent: agent object
|
|
|
|
* @timeout: timeout for the command
|
|
|
|
* @first: true when this is the first invocation to drain possible leftovers
|
|
|
|
* from the pipe
|
|
|
|
*
|
|
|
|
* Sends a sync request to the guest agent.
|
|
|
|
* Returns: -1 on error
|
|
|
|
* 0 on successful send, but when no reply was received
|
|
|
|
* 1 when a reply was received
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
qemuAgentGuestSyncSend(qemuAgent *agent,
|
|
|
|
int timeout,
|
|
|
|
bool first)
|
|
|
|
{
|
|
|
|
g_autofree char *txMsg = NULL;
|
|
|
|
g_autoptr(virJSONValue) rxObj = NULL;
|
|
|
|
unsigned long long id;
|
|
|
|
qemuAgentMessage sync_msg;
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
memset(&sync_msg, 0, sizeof(sync_msg));
|
|
|
|
|
|
|
|
if (virTimeMillisNow(&id) < 0)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
txMsg = g_strdup_printf("{\"execute\":\"guest-sync\", "
|
|
|
|
"\"arguments\":{\"id\":%llu}}\n", id);
|
|
|
|
|
|
|
|
sync_msg.txBuffer = txMsg;
|
|
|
|
sync_msg.txLength = strlen(txMsg);
|
|
|
|
sync_msg.sync = true;
|
|
|
|
sync_msg.id = id;
|
|
|
|
sync_msg.first = first;
|
|
|
|
|
|
|
|
VIR_DEBUG("Sending guest-sync command with ID: %llu", id);
|
|
|
|
|
|
|
|
rc = qemuAgentSend(agent, &sync_msg, timeout);
|
|
|
|
rxObj = g_steal_pointer(&sync_msg.rxObject);
|
|
|
|
|
|
|
|
VIR_DEBUG("qemuAgentSend returned: %d", rc);
|
|
|
|
|
|
|
|
if (rc < 0)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
if (rxObj)
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2012-02-01 14:44:53 +00:00
|
|
|
/**
|
|
|
|
* qemuAgentGuestSync:
|
2020-02-12 17:31:20 +00:00
|
|
|
* @agent: agent object
|
2012-02-01 14:44:53 +00:00
|
|
|
*
|
|
|
|
* Send guest-sync with unique ID
|
|
|
|
* and wait for reply. If we get one, check if
|
|
|
|
* received ID is equal to given.
|
|
|
|
*
|
|
|
|
* Returns: 0 on success,
|
|
|
|
* -1 otherwise
|
|
|
|
*/
|
|
|
|
static int
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuAgentGuestSync(qemuAgent *agent)
|
2012-02-01 14:44:53 +00:00
|
|
|
{
|
Add API to change qemu agent response timeout
Some layered products such as oVirt have requested a way to avoid being
blocked by guest agent commands when querying a loaded vm. For example,
many guest agent commands are polled periodically to monitor changes,
and rather than blocking the calling process, they'd prefer to simply
time out when an agent query is taking too long.
This patch adds a way for the user to specify a custom agent timeout
that is applied to all agent commands.
One special case to note here is the 'guest-sync' command. 'guest-sync'
is issued internally prior to calling any other command. (For example,
when libvirt wants to call 'guest-get-fsinfo', we first call
'guest-sync' and then call 'guest-get-fsinfo').
Previously, the 'guest-sync' command used a 5-second timeout
(VIR_DOMAIN_QEMU_AGENT_COMMAND_DEFAULT), whereas the actual command that
followed always blocked indefinitely
(VIR_DOMAIN_QEMU_AGENT_COMMAND_BLOCK). As part of this patch, if a
custom timeout is specified that is shorter than
5 seconds, this new timeout is also used for 'guest-sync'. If there is
no custom timeout or if the custom timeout is longer than 5 seconds, we
will continue to use the 5-second timeout.
Signed-off-by: Jonathon Jongsma <jjongsma@redhat.com>
Signed-off-by: Michal Privoznik <mprivozn@redhat.com>
Reviewed-by: Michal Privoznik <mprivozn@redhat.com>
2019-11-13 22:06:09 +00:00
|
|
|
int timeout = VIR_DOMAIN_QEMU_AGENT_COMMAND_DEFAULT;
|
2021-12-03 10:38:27 +00:00
|
|
|
int rc;
|
Add API to change qemu agent response timeout
Some layered products such as oVirt have requested a way to avoid being
blocked by guest agent commands when querying a loaded vm. For example,
many guest agent commands are polled periodically to monitor changes,
and rather than blocking the calling process, they'd prefer to simply
time out when an agent query is taking too long.
This patch adds a way for the user to specify a custom agent timeout
that is applied to all agent commands.
One special case to note here is the 'guest-sync' command. 'guest-sync'
is issued internally prior to calling any other command. (For example,
when libvirt wants to call 'guest-get-fsinfo', we first call
'guest-sync' and then call 'guest-get-fsinfo').
Previously, the 'guest-sync' command used a 5-second timeout
(VIR_DOMAIN_QEMU_AGENT_COMMAND_DEFAULT), whereas the actual command that
followed always blocked indefinitely
(VIR_DOMAIN_QEMU_AGENT_COMMAND_BLOCK). As part of this patch, if a
custom timeout is specified that is shorter than
5 seconds, this new timeout is also used for 'guest-sync'. If there is
no custom timeout or if the custom timeout is longer than 5 seconds, we
will continue to use the 5-second timeout.
Signed-off-by: Jonathon Jongsma <jjongsma@redhat.com>
Signed-off-by: Michal Privoznik <mprivozn@redhat.com>
Reviewed-by: Michal Privoznik <mprivozn@redhat.com>
2019-11-13 22:06:09 +00:00
|
|
|
|
2020-03-05 14:47:01 +00:00
|
|
|
if (agent->singleSync && agent->inSync)
|
|
|
|
return 0;
|
|
|
|
|
Add API to change qemu agent response timeout
Some layered products such as oVirt have requested a way to avoid being
blocked by guest agent commands when querying a loaded vm. For example,
many guest agent commands are polled periodically to monitor changes,
and rather than blocking the calling process, they'd prefer to simply
time out when an agent query is taking too long.
This patch adds a way for the user to specify a custom agent timeout
that is applied to all agent commands.
One special case to note here is the 'guest-sync' command. 'guest-sync'
is issued internally prior to calling any other command. (For example,
when libvirt wants to call 'guest-get-fsinfo', we first call
'guest-sync' and then call 'guest-get-fsinfo').
Previously, the 'guest-sync' command used a 5-second timeout
(VIR_DOMAIN_QEMU_AGENT_COMMAND_DEFAULT), whereas the actual command that
followed always blocked indefinitely
(VIR_DOMAIN_QEMU_AGENT_COMMAND_BLOCK). As part of this patch, if a
custom timeout is specified that is shorter than
5 seconds, this new timeout is also used for 'guest-sync'. If there is
no custom timeout or if the custom timeout is longer than 5 seconds, we
will continue to use the 5-second timeout.
Signed-off-by: Jonathon Jongsma <jjongsma@redhat.com>
Signed-off-by: Michal Privoznik <mprivozn@redhat.com>
Reviewed-by: Michal Privoznik <mprivozn@redhat.com>
2019-11-13 22:06:09 +00:00
|
|
|
/* if user specified a custom agent timeout that is lower than the
|
|
|
|
* default timeout, use the shorter timeout instead */
|
2020-03-20 22:28:10 +00:00
|
|
|
if ((agent->timeout >= 0) && (agent->timeout < QEMU_AGENT_WAIT_TIME))
|
2020-02-12 17:31:20 +00:00
|
|
|
timeout = agent->timeout;
|
2012-02-01 14:44:53 +00:00
|
|
|
|
2021-12-03 10:38:27 +00:00
|
|
|
if ((rc = qemuAgentGuestSyncSend(agent, timeout, true)) < 0)
|
2012-02-01 14:44:53 +00:00
|
|
|
return -1;
|
|
|
|
|
2021-12-03 10:38:27 +00:00
|
|
|
/* successfully sync'd */
|
|
|
|
if (rc == 1)
|
|
|
|
return 0;
|
2012-02-01 14:44:53 +00:00
|
|
|
|
2021-12-03 10:38:27 +00:00
|
|
|
/* send another sync */
|
|
|
|
if ((rc = qemuAgentGuestSyncSend(agent, timeout, false)) < 0)
|
|
|
|
return -1;
|
2012-02-01 14:44:53 +00:00
|
|
|
|
2021-12-03 10:38:27 +00:00
|
|
|
/* successfully sync'd */
|
|
|
|
if (rc == 1)
|
|
|
|
return 0;
|
2020-03-05 14:47:01 +00:00
|
|
|
|
2021-12-03 10:38:27 +00:00
|
|
|
if (agent->running)
|
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
|
_("Missing agent reply object"));
|
|
|
|
else
|
|
|
|
virReportError(VIR_ERR_AGENT_UNRESPONSIVE, "%s",
|
|
|
|
_("Guest agent disappeared while executing command"));
|
2012-02-01 14:44:53 +00:00
|
|
|
|
2021-12-03 10:38:27 +00:00
|
|
|
return -1;
|
2012-02-01 14:44:53 +00:00
|
|
|
}
|
|
|
|
|
2012-04-12 14:35:24 +00:00
|
|
|
static const char *
|
|
|
|
qemuAgentStringifyErrorClass(const char *klass)
|
|
|
|
{
|
|
|
|
if (STREQ_NULLABLE(klass, "BufferOverrun"))
|
|
|
|
return "Buffer overrun";
|
|
|
|
else if (STREQ_NULLABLE(klass, "CommandDisabled"))
|
|
|
|
return "The command has been disabled for this instance";
|
|
|
|
else if (STREQ_NULLABLE(klass, "CommandNotFound"))
|
|
|
|
return "The command has not been found";
|
|
|
|
else if (STREQ_NULLABLE(klass, "FdNotFound"))
|
|
|
|
return "File descriptor not found";
|
|
|
|
else if (STREQ_NULLABLE(klass, "InvalidParameter"))
|
|
|
|
return "Invalid parameter";
|
|
|
|
else if (STREQ_NULLABLE(klass, "InvalidParameterType"))
|
|
|
|
return "Invalid parameter type";
|
|
|
|
else if (STREQ_NULLABLE(klass, "InvalidParameterValue"))
|
|
|
|
return "Invalid parameter value";
|
|
|
|
else if (STREQ_NULLABLE(klass, "OpenFileFailed"))
|
|
|
|
return "Cannot open file";
|
|
|
|
else if (STREQ_NULLABLE(klass, "QgaCommandFailed"))
|
|
|
|
return "Guest agent command failed";
|
|
|
|
else if (STREQ_NULLABLE(klass, "QMPBadInputObjectMember"))
|
|
|
|
return "Bad QMP input object member";
|
|
|
|
else if (STREQ_NULLABLE(klass, "QMPExtraInputObjectMember"))
|
|
|
|
return "Unexpected extra object member";
|
|
|
|
else if (STREQ_NULLABLE(klass, "UndefinedError"))
|
|
|
|
return "An undefined error has occurred";
|
|
|
|
else if (STREQ_NULLABLE(klass, "Unsupported"))
|
|
|
|
return "this feature or command is not currently supported";
|
|
|
|
else if (klass)
|
|
|
|
return klass;
|
|
|
|
else
|
|
|
|
return "unknown QEMU command error";
|
|
|
|
}
|
2011-10-05 17:31:54 +00:00
|
|
|
|
|
|
|
/* Ignoring OOM in this method, since we're already reporting
|
|
|
|
* a more important error
|
|
|
|
*
|
|
|
|
* XXX see qerror.h for different klasses & fill out useful params
|
|
|
|
*/
|
|
|
|
static const char *
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuAgentStringifyError(virJSONValue *error)
|
2011-10-05 17:31:54 +00:00
|
|
|
{
|
|
|
|
const char *klass = virJSONValueObjectGetString(error, "class");
|
2014-10-28 02:41:27 +00:00
|
|
|
const char *detail = virJSONValueObjectGetString(error, "desc");
|
2011-10-05 17:31:54 +00:00
|
|
|
|
|
|
|
/* The QMP 'desc' field is usually sufficient for our generic
|
2014-10-28 02:41:27 +00:00
|
|
|
* error reporting needs. However, if not present, translate
|
|
|
|
* the class into something readable.
|
2011-10-05 17:31:54 +00:00
|
|
|
*/
|
|
|
|
if (!detail)
|
2012-04-12 14:35:24 +00:00
|
|
|
detail = qemuAgentStringifyErrorClass(klass);
|
2011-10-05 17:31:54 +00:00
|
|
|
|
|
|
|
return detail;
|
|
|
|
}
|
|
|
|
|
|
|
|
static const char *
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuAgentCommandName(virJSONValue *cmd)
|
2011-10-05 17:31:54 +00:00
|
|
|
{
|
|
|
|
const char *name = virJSONValueObjectGetString(cmd, "execute");
|
|
|
|
if (name)
|
|
|
|
return name;
|
2021-11-23 17:20:10 +00:00
|
|
|
return "<unknown>";
|
2011-10-05 17:31:54 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuAgentCheckError(virJSONValue *cmd,
|
|
|
|
virJSONValue *reply,
|
2020-03-13 08:49:35 +00:00
|
|
|
bool report_unsupported)
|
2011-10-05 17:31:54 +00:00
|
|
|
{
|
|
|
|
if (virJSONValueObjectHasKey(reply, "error")) {
|
2021-03-11 07:16:13 +00:00
|
|
|
virJSONValue *error = virJSONValueObjectGet(reply, "error");
|
2020-03-13 08:43:10 +00:00
|
|
|
g_autofree char *cmdstr = virJSONValueToString(cmd, false);
|
|
|
|
g_autofree char *replystr = virJSONValueToString(reply, false);
|
2011-10-05 17:31:54 +00:00
|
|
|
|
|
|
|
/* Log the full JSON formatted command & error */
|
2012-11-28 12:30:40 +00:00
|
|
|
VIR_DEBUG("unable to execute QEMU agent command %s: %s",
|
2013-02-22 16:41:38 +00:00
|
|
|
NULLSTR(cmdstr), NULLSTR(replystr));
|
2011-10-05 17:31:54 +00:00
|
|
|
|
|
|
|
/* Only send the user the command name + friendly error */
|
2020-03-13 08:49:35 +00:00
|
|
|
if (!error) {
|
2012-07-18 15:22:03 +00:00
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR,
|
2012-11-28 12:30:40 +00:00
|
|
|
_("unable to execute QEMU agent command '%s'"),
|
2012-07-18 15:22:03 +00:00
|
|
|
qemuAgentCommandName(cmd));
|
2020-03-13 08:49:35 +00:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!report_unsupported) {
|
|
|
|
const char *klass = virJSONValueObjectGetString(error, "class");
|
|
|
|
|
|
|
|
if (STREQ_NULLABLE(klass, "CommandNotFound") ||
|
|
|
|
STREQ_NULLABLE(klass, "CommandDisabled"))
|
|
|
|
return -2;
|
|
|
|
}
|
|
|
|
|
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
_("unable to execute QEMU agent command '%s': %s"),
|
|
|
|
qemuAgentCommandName(cmd),
|
|
|
|
qemuAgentStringifyError(error));
|
2011-10-05 17:31:54 +00:00
|
|
|
|
|
|
|
return -1;
|
2021-11-23 17:20:10 +00:00
|
|
|
}
|
|
|
|
if (!virJSONValueObjectHasKey(reply, "return")) {
|
2020-03-13 08:43:10 +00:00
|
|
|
g_autofree char *cmdstr = virJSONValueToString(cmd, false);
|
|
|
|
g_autofree char *replystr = virJSONValueToString(reply, false);
|
2011-10-05 17:31:54 +00:00
|
|
|
|
|
|
|
VIR_DEBUG("Neither 'return' nor 'error' is set in the JSON reply %s: %s",
|
2013-02-22 16:41:38 +00:00
|
|
|
NULLSTR(cmdstr), NULLSTR(replystr));
|
2012-07-18 15:22:03 +00:00
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR,
|
2012-11-28 12:30:40 +00:00
|
|
|
_("unable to execute QEMU agent command '%s'"),
|
2012-07-18 15:22:03 +00:00
|
|
|
qemuAgentCommandName(cmd));
|
2011-10-05 17:31:54 +00:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2014-04-02 06:57:59 +00:00
|
|
|
static int
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuAgentCommandFull(qemuAgent *agent,
|
|
|
|
virJSONValue *cmd,
|
|
|
|
virJSONValue **reply,
|
2020-03-13 08:49:35 +00:00
|
|
|
int seconds,
|
|
|
|
bool report_unsupported)
|
2014-04-02 06:57:59 +00:00
|
|
|
{
|
|
|
|
int ret = -1;
|
|
|
|
qemuAgentMessage msg;
|
2020-03-27 05:25:55 +00:00
|
|
|
g_autofree char *cmdstr = NULL;
|
2020-02-12 17:31:20 +00:00
|
|
|
int await_event = agent->await_event;
|
2014-04-02 06:57:59 +00:00
|
|
|
|
|
|
|
*reply = NULL;
|
2020-03-05 14:47:02 +00:00
|
|
|
memset(&msg, 0, sizeof(msg));
|
2014-04-02 06:57:59 +00:00
|
|
|
|
2020-02-12 17:31:20 +00:00
|
|
|
if (!agent->running) {
|
processSerialChangedEvent: Close agent monitor early
https://bugzilla.redhat.com/show_bug.cgi?id=890648
So, imagine you've issued an API that involves guest agent. For
instance, you want to query guest's IP addresses. So the API acquires
QUERY_JOB, locks the guest agent and issues the agent command.
However, for some reason, guest agent replies to initial ping
correctly, but then crashes tragically while executing real command
(in this case guest-network-get-interfaces). Since initial ping went
well, libvirt thinks guest agent is accessible and awaits reply to the
real command. But it will never come. What will is a monitor event.
Our handler (processSerialChangedEvent) will try to acquire
MODIFY_JOB, which will fail obviously because the other thread that's
executing the API already holds a job. So the event handler exits
early, and the QUERY_JOB is never released nor ended.
The way how to solve this is to put flag somewhere in the monitor
internals. The flag is called @running and agent commands are issued
iff the flag is set. The flag itself is set when we connect to the
agent socket. And unset whenever we see DISCONNECT event from the
agent. Moreover, we must wake up all the threads waiting for the
agent. This is done by signalizing the condition they're waiting on.
Signed-off-by: Michal Privoznik <mprivozn@redhat.com>
2015-05-07 09:19:38 +00:00
|
|
|
virReportError(VIR_ERR_AGENT_UNRESPONSIVE, "%s",
|
|
|
|
_("Guest agent disappeared while executing command"));
|
2020-03-05 14:47:02 +00:00
|
|
|
goto cleanup;
|
processSerialChangedEvent: Close agent monitor early
https://bugzilla.redhat.com/show_bug.cgi?id=890648
So, imagine you've issued an API that involves guest agent. For
instance, you want to query guest's IP addresses. So the API acquires
QUERY_JOB, locks the guest agent and issues the agent command.
However, for some reason, guest agent replies to initial ping
correctly, but then crashes tragically while executing real command
(in this case guest-network-get-interfaces). Since initial ping went
well, libvirt thinks guest agent is accessible and awaits reply to the
real command. But it will never come. What will is a monitor event.
Our handler (processSerialChangedEvent) will try to acquire
MODIFY_JOB, which will fail obviously because the other thread that's
executing the API already holds a job. So the event handler exits
early, and the QUERY_JOB is never released nor ended.
The way how to solve this is to put flag somewhere in the monitor
internals. The flag is called @running and agent commands are issued
iff the flag is set. The flag itself is set when we connect to the
agent socket. And unset whenever we see DISCONNECT event from the
agent. Moreover, we must wake up all the threads waiting for the
agent. This is done by signalizing the condition they're waiting on.
Signed-off-by: Michal Privoznik <mprivozn@redhat.com>
2015-05-07 09:19:38 +00:00
|
|
|
}
|
|
|
|
|
2020-02-12 17:31:20 +00:00
|
|
|
if (qemuAgentGuestSync(agent) < 0)
|
2020-03-20 07:16:23 +00:00
|
|
|
goto cleanup;
|
2014-04-02 06:57:59 +00:00
|
|
|
|
|
|
|
if (!(cmdstr = virJSONValueToString(cmd, false)))
|
|
|
|
goto cleanup;
|
2019-10-22 13:26:14 +00:00
|
|
|
msg.txBuffer = g_strdup_printf("%s" LINE_ENDING, cmdstr);
|
2014-04-02 06:57:59 +00:00
|
|
|
msg.txLength = strlen(msg.txBuffer);
|
|
|
|
|
|
|
|
VIR_DEBUG("Send command '%s' for write, seconds = %d", cmdstr, seconds);
|
|
|
|
|
2020-02-12 17:31:20 +00:00
|
|
|
ret = qemuAgentSend(agent, &msg, seconds);
|
2014-04-02 06:57:59 +00:00
|
|
|
|
|
|
|
VIR_DEBUG("Receive command reply ret=%d rxObject=%p",
|
|
|
|
ret, msg.rxObject);
|
|
|
|
|
2020-03-05 14:47:02 +00:00
|
|
|
if (ret < 0)
|
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
/* If we haven't obtained any reply but we wait for an
|
|
|
|
* event, then don't report this as error */
|
|
|
|
if (!msg.rxObject) {
|
|
|
|
if (!await_event) {
|
|
|
|
if (agent->running) {
|
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
|
_("Missing agent reply object"));
|
2014-04-02 06:57:59 +00:00
|
|
|
} else {
|
2020-03-05 14:47:02 +00:00
|
|
|
virReportError(VIR_ERR_AGENT_UNRESPONSIVE, "%s",
|
|
|
|
_("Guest agent disappeared while executing command"));
|
2014-04-02 06:57:59 +00:00
|
|
|
}
|
2020-03-05 14:47:02 +00:00
|
|
|
ret = -1;
|
2014-04-02 06:57:59 +00:00
|
|
|
}
|
2020-03-05 14:47:02 +00:00
|
|
|
goto cleanup;
|
2014-04-02 06:57:59 +00:00
|
|
|
}
|
|
|
|
|
2020-03-05 14:47:02 +00:00
|
|
|
*reply = msg.rxObject;
|
2020-03-13 08:49:35 +00:00
|
|
|
ret = qemuAgentCheckError(cmd, *reply, report_unsupported);
|
2020-03-05 14:47:02 +00:00
|
|
|
|
2014-04-02 06:57:59 +00:00
|
|
|
cleanup:
|
|
|
|
VIR_FREE(msg.txBuffer);
|
2020-03-05 14:47:02 +00:00
|
|
|
agent->await_event = QEMU_AGENT_EVENT_NONE;
|
2014-04-02 06:57:59 +00:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2020-03-13 08:49:35 +00:00
|
|
|
static int
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuAgentCommand(qemuAgent *agent,
|
|
|
|
virJSONValue *cmd,
|
|
|
|
virJSONValue **reply,
|
2020-03-13 08:49:35 +00:00
|
|
|
int seconds)
|
|
|
|
{
|
|
|
|
return qemuAgentCommandFull(agent, cmd, reply, seconds, true);
|
|
|
|
}
|
|
|
|
|
2021-03-11 07:16:13 +00:00
|
|
|
static virJSONValue *G_GNUC_NULL_TERMINATED
|
2011-10-05 17:31:54 +00:00
|
|
|
qemuAgentMakeCommand(const char *cmdname,
|
|
|
|
...)
|
|
|
|
{
|
2021-02-11 17:29:40 +00:00
|
|
|
g_autoptr(virJSONValue) obj = NULL;
|
|
|
|
g_autoptr(virJSONValue) jargs = NULL;
|
2011-10-05 17:31:54 +00:00
|
|
|
va_list args;
|
|
|
|
|
|
|
|
va_start(args, cmdname);
|
|
|
|
|
2021-11-09 15:31:35 +00:00
|
|
|
if (virJSONValueObjectAddVArgs(&jargs, args) < 0) {
|
2021-02-11 17:29:40 +00:00
|
|
|
va_end(args);
|
|
|
|
return NULL;
|
|
|
|
}
|
2011-10-05 17:31:54 +00:00
|
|
|
|
|
|
|
va_end(args);
|
|
|
|
|
2021-11-08 16:24:50 +00:00
|
|
|
if (virJSONValueObjectAdd(&obj,
|
|
|
|
"s:execute", cmdname,
|
|
|
|
"A:arguments", &jargs,
|
|
|
|
NULL) < 0)
|
2021-02-11 17:29:40 +00:00
|
|
|
return NULL;
|
2011-10-05 17:31:54 +00:00
|
|
|
|
2021-02-11 17:29:40 +00:00
|
|
|
return g_steal_pointer(&obj);
|
2011-10-05 17:31:54 +00:00
|
|
|
}
|
|
|
|
|
2021-03-11 07:16:13 +00:00
|
|
|
static virJSONValue *
|
2014-05-02 00:06:19 +00:00
|
|
|
qemuAgentMakeStringsArray(const char **strings, unsigned int len)
|
|
|
|
{
|
|
|
|
size_t i;
|
2021-02-12 10:32:46 +00:00
|
|
|
g_autoptr(virJSONValue) ret = virJSONValueNewArray();
|
2014-05-02 00:06:19 +00:00
|
|
|
|
|
|
|
for (i = 0; i < len; i++) {
|
2021-12-20 13:41:03 +00:00
|
|
|
if (virJSONValueArrayAppendString(ret, strings[i]) < 0)
|
2021-02-12 10:32:46 +00:00
|
|
|
return NULL;
|
2014-05-02 00:06:19 +00:00
|
|
|
}
|
|
|
|
|
2021-02-12 10:32:46 +00:00
|
|
|
return g_steal_pointer(&ret);
|
2014-05-02 00:06:19 +00:00
|
|
|
}
|
|
|
|
|
2021-03-11 07:16:13 +00:00
|
|
|
void qemuAgentNotifyEvent(qemuAgent *agent,
|
2012-06-15 16:00:13 +00:00
|
|
|
qemuAgentEvent event)
|
|
|
|
{
|
2022-03-24 12:13:14 +00:00
|
|
|
VIR_LOCK_GUARD lock = virObjectLockGuard(agent);
|
2016-12-12 09:13:44 +00:00
|
|
|
|
2020-02-12 17:31:20 +00:00
|
|
|
VIR_DEBUG("agent=%p event=%d await_event=%d", agent, event, agent->await_event);
|
|
|
|
if (agent->await_event == event) {
|
|
|
|
agent->await_event = QEMU_AGENT_EVENT_NONE;
|
2012-06-15 16:00:13 +00:00
|
|
|
/* somebody waiting for this event, wake him up. */
|
2020-02-12 17:31:20 +00:00
|
|
|
if (agent->msg && !agent->msg->finished) {
|
2020-05-05 06:05:18 +00:00
|
|
|
agent->msg->finished = true;
|
2020-02-12 17:31:20 +00:00
|
|
|
virCondSignal(&agent->notify);
|
2012-06-15 16:00:13 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-10-05 17:31:54 +00:00
|
|
|
VIR_ENUM_DECL(qemuAgentShutdownMode);
|
|
|
|
|
|
|
|
VIR_ENUM_IMPL(qemuAgentShutdownMode,
|
|
|
|
QEMU_AGENT_SHUTDOWN_LAST,
|
2019-01-20 16:30:15 +00:00
|
|
|
"powerdown", "reboot", "halt",
|
|
|
|
);
|
2011-10-05 17:31:54 +00:00
|
|
|
|
2021-03-11 07:16:13 +00:00
|
|
|
int qemuAgentShutdown(qemuAgent *agent,
|
2011-10-05 17:31:54 +00:00
|
|
|
qemuAgentShutdownMode mode)
|
|
|
|
{
|
2021-12-01 09:38:47 +00:00
|
|
|
g_autoptr(virJSONValue) cmd = NULL;
|
|
|
|
g_autoptr(virJSONValue) reply = NULL;
|
2011-10-05 17:31:54 +00:00
|
|
|
|
|
|
|
cmd = qemuAgentMakeCommand("guest-shutdown",
|
|
|
|
"s:mode", qemuAgentShutdownModeTypeToString(mode),
|
|
|
|
NULL);
|
|
|
|
if (!cmd)
|
|
|
|
return -1;
|
|
|
|
|
2012-09-04 10:01:43 +00:00
|
|
|
if (mode == QEMU_AGENT_SHUTDOWN_REBOOT)
|
2020-02-12 17:31:20 +00:00
|
|
|
agent->await_event = QEMU_AGENT_EVENT_RESET;
|
2012-09-04 10:01:43 +00:00
|
|
|
else
|
2020-02-12 17:31:20 +00:00
|
|
|
agent->await_event = QEMU_AGENT_EVENT_SHUTDOWN;
|
2011-10-05 17:31:54 +00:00
|
|
|
|
2021-12-01 09:44:50 +00:00
|
|
|
return qemuAgentCommand(agent, cmd, &reply,
|
|
|
|
VIR_DOMAIN_QEMU_AGENT_COMMAND_SHUTDOWN);
|
2011-10-05 17:31:54 +00:00
|
|
|
}
|
2012-01-24 20:13:40 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* qemuAgentFSFreeze:
|
2020-02-12 17:31:20 +00:00
|
|
|
* @agent: agent object
|
2014-05-02 00:06:19 +00:00
|
|
|
* @mountpoints: Array of mountpoint paths to be frozen, or NULL for all
|
|
|
|
* @nmountpoints: Number of mountpoints to be frozen, or 0 for all
|
2012-01-24 20:13:40 +00:00
|
|
|
*
|
|
|
|
* Issue guest-fsfreeze-freeze command to guest agent,
|
2014-05-02 00:06:19 +00:00
|
|
|
* which freezes file systems mounted on specified mountpoints
|
|
|
|
* (or all file systems when @mountpoints is NULL), and returns
|
2012-01-24 20:13:40 +00:00
|
|
|
* number of frozen file systems on success.
|
|
|
|
*
|
|
|
|
* Returns: number of file system frozen on success,
|
|
|
|
* -1 on error.
|
|
|
|
*/
|
2021-03-11 07:16:13 +00:00
|
|
|
int qemuAgentFSFreeze(qemuAgent *agent, const char **mountpoints,
|
2014-05-02 00:06:19 +00:00
|
|
|
unsigned int nmountpoints)
|
2012-01-24 20:13:40 +00:00
|
|
|
{
|
2021-12-01 09:44:50 +00:00
|
|
|
int nfrozen = 0;
|
2021-12-01 09:38:47 +00:00
|
|
|
g_autoptr(virJSONValue) cmd = NULL;
|
|
|
|
g_autoptr(virJSONValue) reply = NULL;
|
2012-01-24 20:13:40 +00:00
|
|
|
|
2014-05-02 00:06:19 +00:00
|
|
|
if (mountpoints && nmountpoints) {
|
2021-12-01 09:38:47 +00:00
|
|
|
g_autoptr(virJSONValue) arg = qemuAgentMakeStringsArray(mountpoints, nmountpoints);
|
2014-05-02 00:06:19 +00:00
|
|
|
if (!arg)
|
|
|
|
return -1;
|
|
|
|
|
2014-08-08 20:03:41 +00:00
|
|
|
cmd = qemuAgentMakeCommand("guest-fsfreeze-freeze-list",
|
2018-03-30 09:12:57 +00:00
|
|
|
"a:mountpoints", &arg, NULL);
|
2014-05-02 00:06:19 +00:00
|
|
|
} else {
|
|
|
|
cmd = qemuAgentMakeCommand("guest-fsfreeze-freeze", NULL);
|
|
|
|
}
|
2012-01-24 20:13:40 +00:00
|
|
|
|
|
|
|
if (!cmd)
|
2021-12-01 09:44:50 +00:00
|
|
|
return -1;
|
2012-01-24 20:13:40 +00:00
|
|
|
|
2020-03-05 14:47:02 +00:00
|
|
|
if (qemuAgentCommand(agent, cmd, &reply, agent->timeout) < 0)
|
2021-12-01 09:44:50 +00:00
|
|
|
return -1;
|
2012-01-24 20:13:40 +00:00
|
|
|
|
2021-12-01 09:44:50 +00:00
|
|
|
if (virJSONValueObjectGetNumberInt(reply, "return", &nfrozen) < 0) {
|
2012-07-18 15:22:03 +00:00
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
|
_("malformed return value"));
|
2021-12-01 09:44:50 +00:00
|
|
|
return -1;
|
2012-01-24 20:13:40 +00:00
|
|
|
}
|
|
|
|
|
2021-12-01 09:44:50 +00:00
|
|
|
return nfrozen;
|
2012-01-24 20:13:40 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* qemuAgentFSThaw:
|
2020-02-12 17:31:20 +00:00
|
|
|
* @agent: agent object
|
2012-01-24 20:13:40 +00:00
|
|
|
*
|
|
|
|
* Issue guest-fsfreeze-thaw command to guest agent,
|
|
|
|
* which unfreezes all mounted file systems and returns
|
|
|
|
* number of thawed file systems on success.
|
|
|
|
*
|
|
|
|
* Returns: number of file system thawed on success,
|
|
|
|
* -1 on error.
|
|
|
|
*/
|
2021-03-11 07:16:13 +00:00
|
|
|
int qemuAgentFSThaw(qemuAgent *agent)
|
2012-01-24 20:13:40 +00:00
|
|
|
{
|
2021-12-01 09:44:50 +00:00
|
|
|
int nthawed = 0;
|
2021-12-01 09:38:47 +00:00
|
|
|
g_autoptr(virJSONValue) cmd = qemuAgentMakeCommand("guest-fsfreeze-thaw", NULL);
|
|
|
|
g_autoptr(virJSONValue) reply = NULL;
|
2012-01-24 20:13:40 +00:00
|
|
|
|
|
|
|
if (!cmd)
|
|
|
|
return -1;
|
|
|
|
|
2020-03-05 14:47:02 +00:00
|
|
|
if (qemuAgentCommand(agent, cmd, &reply, agent->timeout) < 0)
|
2021-12-01 09:44:50 +00:00
|
|
|
return -1;
|
2012-01-24 20:13:40 +00:00
|
|
|
|
2021-12-01 09:44:50 +00:00
|
|
|
if (virJSONValueObjectGetNumberInt(reply, "return", &nthawed) < 0) {
|
2012-07-18 15:22:03 +00:00
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
|
_("malformed return value"));
|
2021-12-01 09:44:50 +00:00
|
|
|
return -1;
|
2012-01-24 20:13:40 +00:00
|
|
|
}
|
|
|
|
|
2021-12-01 09:44:50 +00:00
|
|
|
return nthawed;
|
2012-01-24 20:13:40 +00:00
|
|
|
}
|
2012-02-13 11:27:25 +00:00
|
|
|
|
|
|
|
VIR_ENUM_DECL(qemuAgentSuspendMode);
|
|
|
|
|
|
|
|
VIR_ENUM_IMPL(qemuAgentSuspendMode,
|
|
|
|
VIR_NODE_SUSPEND_TARGET_LAST,
|
|
|
|
"guest-suspend-ram",
|
|
|
|
"guest-suspend-disk",
|
2019-01-20 16:30:15 +00:00
|
|
|
"guest-suspend-hybrid",
|
|
|
|
);
|
2012-02-13 11:27:25 +00:00
|
|
|
|
|
|
|
int
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuAgentSuspend(qemuAgent *agent,
|
2012-02-13 11:27:25 +00:00
|
|
|
unsigned int target)
|
|
|
|
{
|
2021-12-01 09:38:47 +00:00
|
|
|
g_autoptr(virJSONValue) cmd = NULL;
|
|
|
|
g_autoptr(virJSONValue) reply = NULL;
|
2012-02-13 11:27:25 +00:00
|
|
|
|
|
|
|
cmd = qemuAgentMakeCommand(qemuAgentSuspendModeTypeToString(target),
|
|
|
|
NULL);
|
|
|
|
if (!cmd)
|
|
|
|
return -1;
|
|
|
|
|
2020-02-12 17:31:20 +00:00
|
|
|
agent->await_event = QEMU_AGENT_EVENT_SUSPEND;
|
2012-02-13 11:27:25 +00:00
|
|
|
|
2021-12-01 09:44:50 +00:00
|
|
|
return qemuAgentCommand(agent, cmd, &reply, agent->timeout);
|
2012-02-13 11:27:25 +00:00
|
|
|
}
|
2012-08-23 03:29:22 +00:00
|
|
|
|
|
|
|
int
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuAgentArbitraryCommand(qemuAgent *agent,
|
2012-08-23 03:29:22 +00:00
|
|
|
const char *cmd_str,
|
|
|
|
char **result,
|
|
|
|
int timeout)
|
|
|
|
{
|
2021-12-01 09:44:50 +00:00
|
|
|
int rc;
|
2021-12-01 09:38:47 +00:00
|
|
|
g_autoptr(virJSONValue) cmd = NULL;
|
|
|
|
g_autoptr(virJSONValue) reply = NULL;
|
2012-08-23 03:29:22 +00:00
|
|
|
|
|
|
|
*result = NULL;
|
2013-06-03 13:58:31 +00:00
|
|
|
if (timeout < VIR_DOMAIN_QEMU_AGENT_COMMAND_MIN) {
|
|
|
|
virReportError(VIR_ERR_INVALID_ARG,
|
|
|
|
_("guest agent timeout '%d' is "
|
|
|
|
"less than the minimum '%d'"),
|
|
|
|
timeout, VIR_DOMAIN_QEMU_AGENT_COMMAND_MIN);
|
2021-12-01 09:44:50 +00:00
|
|
|
return -1;
|
2013-06-03 13:58:31 +00:00
|
|
|
}
|
2012-08-23 03:29:22 +00:00
|
|
|
|
2013-06-03 13:58:31 +00:00
|
|
|
if (!(cmd = virJSONValueFromString(cmd_str)))
|
2021-12-01 09:44:50 +00:00
|
|
|
return -1;
|
2013-06-03 13:58:31 +00:00
|
|
|
|
2021-12-01 09:44:50 +00:00
|
|
|
if ((rc = qemuAgentCommand(agent, cmd, &reply, timeout)) < 0)
|
|
|
|
return rc;
|
2012-08-23 03:29:22 +00:00
|
|
|
|
2013-06-03 13:58:31 +00:00
|
|
|
if (!(*result = virJSONValueToString(reply, false)))
|
2021-12-01 09:44:50 +00:00
|
|
|
return -1;
|
2013-06-03 13:58:31 +00:00
|
|
|
|
2021-12-01 09:44:50 +00:00
|
|
|
return rc;
|
2012-08-23 03:29:22 +00:00
|
|
|
}
|
2012-11-20 16:10:29 +00:00
|
|
|
|
|
|
|
int
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuAgentFSTrim(qemuAgent *agent,
|
2012-11-20 16:10:29 +00:00
|
|
|
unsigned long long minimum)
|
|
|
|
{
|
2021-12-01 09:38:47 +00:00
|
|
|
g_autoptr(virJSONValue) cmd = NULL;
|
|
|
|
g_autoptr(virJSONValue) reply = NULL;
|
2012-11-20 16:10:29 +00:00
|
|
|
|
|
|
|
cmd = qemuAgentMakeCommand("guest-fstrim",
|
|
|
|
"U:minimum", minimum,
|
|
|
|
NULL);
|
|
|
|
if (!cmd)
|
2021-12-01 09:44:50 +00:00
|
|
|
return -1;
|
2012-11-20 16:10:29 +00:00
|
|
|
|
2021-12-01 09:44:50 +00:00
|
|
|
return qemuAgentCommand(agent, cmd, &reply, agent->timeout);
|
2012-11-20 16:10:29 +00:00
|
|
|
}
|
2013-04-12 10:14:02 +00:00
|
|
|
|
|
|
|
int
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuAgentGetVCPUs(qemuAgent *agent,
|
|
|
|
qemuAgentCPUInfo **info)
|
2013-04-12 10:14:02 +00:00
|
|
|
{
|
Convert 'int i' to 'size_t i' in src/qemu files
Convert the type of loop iterators named 'i', 'j', k',
'ii', 'jj', 'kk', to be 'size_t' instead of 'int' or
'unsigned int', also santizing 'ii', 'jj', 'kk' to use
the normal 'i', 'j', 'k' naming
Signed-off-by: Daniel P. Berrange <berrange@redhat.com>
2013-07-08 14:09:33 +00:00
|
|
|
size_t i;
|
2021-12-01 09:38:47 +00:00
|
|
|
g_autoptr(virJSONValue) cmd = NULL;
|
|
|
|
g_autoptr(virJSONValue) reply = NULL;
|
2021-03-11 07:16:13 +00:00
|
|
|
virJSONValue *data = NULL;
|
2018-04-19 21:29:02 +00:00
|
|
|
size_t ndata;
|
2013-04-12 10:14:02 +00:00
|
|
|
|
|
|
|
if (!(cmd = qemuAgentMakeCommand("guest-get-vcpus", NULL)))
|
|
|
|
return -1;
|
|
|
|
|
2020-03-05 14:47:02 +00:00
|
|
|
if (qemuAgentCommand(agent, cmd, &reply, agent->timeout) < 0)
|
2021-12-01 09:44:50 +00:00
|
|
|
return -1;
|
2013-04-12 10:14:02 +00:00
|
|
|
|
2018-03-29 18:34:57 +00:00
|
|
|
if (!(data = virJSONValueObjectGetArray(reply, "return"))) {
|
2013-04-12 10:14:02 +00:00
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
|
_("guest-get-vcpus reply was missing return data"));
|
2021-12-01 09:44:50 +00:00
|
|
|
return -1;
|
2013-04-12 10:14:02 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
ndata = virJSONValueArraySize(data);
|
|
|
|
|
2020-10-05 10:28:26 +00:00
|
|
|
*info = g_new0(qemuAgentCPUInfo, ndata);
|
2013-04-12 10:14:02 +00:00
|
|
|
|
|
|
|
for (i = 0; i < ndata; i++) {
|
2021-03-11 07:16:13 +00:00
|
|
|
virJSONValue *entry = virJSONValueArrayGet(data, i);
|
|
|
|
qemuAgentCPUInfo *in = *info + i;
|
2013-04-12 10:14:02 +00:00
|
|
|
|
|
|
|
if (!entry) {
|
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
|
_("array element missing in guest-get-vcpus return "
|
|
|
|
"value"));
|
2021-12-01 09:44:50 +00:00
|
|
|
return -1;
|
2013-04-12 10:14:02 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (virJSONValueObjectGetNumberUint(entry, "logical-id", &in->id) < 0) {
|
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
|
_("'logical-id' missing in reply of guest-get-vcpus"));
|
2021-12-01 09:44:50 +00:00
|
|
|
return -1;
|
2013-04-12 10:14:02 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (virJSONValueObjectGetBoolean(entry, "online", &in->online) < 0) {
|
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
|
_("'online' missing in reply of guest-get-vcpus"));
|
2021-12-01 09:44:50 +00:00
|
|
|
return -1;
|
2013-04-12 10:14:02 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (virJSONValueObjectGetBoolean(entry, "can-offline",
|
|
|
|
&in->offlinable) < 0) {
|
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
|
_("'can-offline' missing in reply of guest-get-vcpus"));
|
2021-12-01 09:44:50 +00:00
|
|
|
return -1;
|
2013-04-12 10:14:02 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-12-01 09:44:50 +00:00
|
|
|
return ndata;
|
2013-04-12 10:14:02 +00:00
|
|
|
}
|
|
|
|
|
2016-06-20 12:15:50 +00:00
|
|
|
|
|
|
|
/* returns the value provided by the guest agent or -1 on internal error */
|
|
|
|
static int
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuAgentSetVCPUsCommand(qemuAgent *agent,
|
|
|
|
qemuAgentCPUInfo *info,
|
2016-06-20 12:15:50 +00:00
|
|
|
size_t ninfo,
|
|
|
|
int *nmodified)
|
2013-04-12 10:14:02 +00:00
|
|
|
{
|
2021-02-12 11:18:54 +00:00
|
|
|
g_autoptr(virJSONValue) cmd = NULL;
|
|
|
|
g_autoptr(virJSONValue) reply = NULL;
|
|
|
|
g_autoptr(virJSONValue) cpus = virJSONValueNewArray();
|
2013-04-12 10:14:02 +00:00
|
|
|
size_t i;
|
2021-02-12 11:18:54 +00:00
|
|
|
int ret;
|
2013-04-12 10:14:02 +00:00
|
|
|
|
2016-06-20 12:15:50 +00:00
|
|
|
*nmodified = 0;
|
|
|
|
|
2013-04-12 10:14:02 +00:00
|
|
|
for (i = 0; i < ninfo; i++) {
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuAgentCPUInfo *in = &info[i];
|
2021-02-12 11:18:54 +00:00
|
|
|
g_autoptr(virJSONValue) cpu = virJSONValueNewObject();
|
2013-04-12 10:14:02 +00:00
|
|
|
|
2016-06-20 12:15:50 +00:00
|
|
|
/* don't set state for cpus that were not touched */
|
|
|
|
if (!in->modified)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
(*nmodified)++;
|
|
|
|
|
2013-04-12 10:14:02 +00:00
|
|
|
if (virJSONValueObjectAppendNumberInt(cpu, "logical-id", in->id) < 0)
|
2021-02-12 11:18:54 +00:00
|
|
|
return -1;
|
2013-04-12 10:14:02 +00:00
|
|
|
|
|
|
|
if (virJSONValueObjectAppendBoolean(cpu, "online", in->online) < 0)
|
2021-02-12 11:18:54 +00:00
|
|
|
return -1;
|
2013-04-12 10:14:02 +00:00
|
|
|
|
2021-02-11 16:57:45 +00:00
|
|
|
if (virJSONValueArrayAppend(cpus, &cpu) < 0)
|
2021-02-12 11:18:54 +00:00
|
|
|
return -1;
|
2013-04-12 10:14:02 +00:00
|
|
|
}
|
|
|
|
|
2021-02-12 11:18:54 +00:00
|
|
|
if (*nmodified == 0)
|
|
|
|
return 0;
|
2016-06-20 12:15:50 +00:00
|
|
|
|
2013-04-12 10:14:02 +00:00
|
|
|
if (!(cmd = qemuAgentMakeCommand("guest-set-vcpus",
|
2018-03-30 09:12:57 +00:00
|
|
|
"a:vcpus", &cpus,
|
2013-04-12 10:14:02 +00:00
|
|
|
NULL)))
|
2021-02-12 11:18:54 +00:00
|
|
|
return -1;
|
2013-04-12 10:14:02 +00:00
|
|
|
|
2020-03-05 14:47:02 +00:00
|
|
|
if (qemuAgentCommand(agent, cmd, &reply, agent->timeout) < 0)
|
2021-02-12 11:18:54 +00:00
|
|
|
return -1;
|
2013-04-12 10:14:02 +00:00
|
|
|
|
2016-06-20 12:15:50 +00:00
|
|
|
/* All negative values are invalid. Return of 0 is bogus since we wouldn't
|
|
|
|
* call the guest agent so that 0 cpus would be set successfully. Reporting
|
|
|
|
* more successfully set vcpus that we've asked for is invalid. */
|
|
|
|
if (virJSONValueObjectGetNumberInt(reply, "return", &ret) < 0 ||
|
|
|
|
ret <= 0 || ret > *nmodified) {
|
2013-04-12 10:14:02 +00:00
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
2016-06-20 12:15:50 +00:00
|
|
|
_("guest agent returned malformed or invalid return value"));
|
2021-02-12 11:18:54 +00:00
|
|
|
return -1;
|
2013-04-12 10:14:02 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
2013-07-30 10:04:21 +00:00
|
|
|
|
|
|
|
|
2016-06-20 12:15:50 +00:00
|
|
|
/**
|
|
|
|
* Set the VCPU state using guest agent.
|
|
|
|
*
|
|
|
|
* Attempts to set the guest agent state for all cpus or until a proper error is
|
|
|
|
* reported by the guest agent. This may require multiple calls.
|
|
|
|
*
|
|
|
|
* Returns -1 on error, 0 on success.
|
|
|
|
*/
|
|
|
|
int
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuAgentSetVCPUs(qemuAgent *agent,
|
|
|
|
qemuAgentCPUInfo *info,
|
2016-06-20 12:15:50 +00:00
|
|
|
size_t ninfo)
|
|
|
|
{
|
|
|
|
int rv;
|
|
|
|
int nmodified;
|
|
|
|
size_t i;
|
|
|
|
|
|
|
|
do {
|
2020-02-12 17:31:20 +00:00
|
|
|
if ((rv = qemuAgentSetVCPUsCommand(agent, info, ninfo, &nmodified)) < 0)
|
2016-06-20 12:15:50 +00:00
|
|
|
return -1;
|
|
|
|
|
|
|
|
/* all vcpus were set successfully */
|
|
|
|
if (rv == nmodified)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
/* un-mark vcpus that were already set */
|
|
|
|
for (i = 0; i < ninfo && rv > 0; i++) {
|
|
|
|
if (!info[i].modified)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
info[i].modified = false;
|
|
|
|
rv--;
|
|
|
|
}
|
|
|
|
} while (1);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2013-07-30 10:04:21 +00:00
|
|
|
/* modify the cpu info structure to set the correct amount of cpus */
|
|
|
|
int
|
|
|
|
qemuAgentUpdateCPUInfo(unsigned int nvcpus,
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuAgentCPUInfo *cpuinfo,
|
2013-07-30 10:04:21 +00:00
|
|
|
int ncpuinfo)
|
|
|
|
{
|
|
|
|
size_t i;
|
|
|
|
int nonline = 0;
|
|
|
|
int nofflinable = 0;
|
2015-06-26 11:58:20 +00:00
|
|
|
ssize_t cpu0 = -1;
|
2013-07-30 10:04:21 +00:00
|
|
|
|
|
|
|
/* count the active and offlinable cpus */
|
|
|
|
for (i = 0; i < ncpuinfo; i++) {
|
2015-06-26 11:58:20 +00:00
|
|
|
if (cpuinfo[i].id == 0)
|
|
|
|
cpu0 = i;
|
|
|
|
|
2013-07-30 10:04:21 +00:00
|
|
|
if (cpuinfo[i].online)
|
|
|
|
nonline++;
|
|
|
|
|
|
|
|
if (cpuinfo[i].offlinable && cpuinfo[i].online)
|
|
|
|
nofflinable++;
|
|
|
|
|
|
|
|
/* This shouldn't happen, but we can't trust the guest agent */
|
|
|
|
if (!cpuinfo[i].online && !cpuinfo[i].offlinable) {
|
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
|
_("Invalid data provided by guest agent"));
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-06-26 11:58:20 +00:00
|
|
|
/* CPU0 was made offlinable in linux a while ago, but certain parts (suspend
|
|
|
|
* to ram) of the kernel still don't cope well with that. Make sure that if
|
|
|
|
* all remaining vCPUs are offlinable, vCPU0 will not be selected to be
|
|
|
|
* offlined automatically */
|
|
|
|
if (nofflinable == nonline && cpu0 >= 0 && cpuinfo[cpu0].online) {
|
|
|
|
cpuinfo[cpu0].offlinable = false;
|
|
|
|
nofflinable--;
|
|
|
|
}
|
|
|
|
|
2013-07-30 10:04:21 +00:00
|
|
|
/* the guest agent reported less cpus than requested */
|
|
|
|
if (nvcpus > ncpuinfo) {
|
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
|
_("guest agent reports less cpu than requested"));
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* not enough offlinable CPUs to support the request */
|
|
|
|
if (nvcpus < nonline - nofflinable) {
|
|
|
|
virReportError(VIR_ERR_INVALID_ARG, "%s",
|
|
|
|
_("Cannot offline enough CPUs"));
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < ncpuinfo; i++) {
|
|
|
|
if (nvcpus < nonline) {
|
|
|
|
/* unplug */
|
|
|
|
if (cpuinfo[i].offlinable && cpuinfo[i].online) {
|
|
|
|
cpuinfo[i].online = false;
|
2016-06-20 12:15:50 +00:00
|
|
|
cpuinfo[i].modified = true;
|
2013-07-30 10:04:21 +00:00
|
|
|
nonline--;
|
|
|
|
}
|
|
|
|
} else if (nvcpus > nonline) {
|
|
|
|
/* plug */
|
|
|
|
if (!cpuinfo[i].online) {
|
|
|
|
cpuinfo[i].online = true;
|
2016-06-20 12:15:50 +00:00
|
|
|
cpuinfo[i].modified = true;
|
2013-07-30 10:04:21 +00:00
|
|
|
nonline++;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
/* done */
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
2014-04-02 17:05:42 +00:00
|
|
|
|
|
|
|
|
2020-03-16 07:26:34 +00:00
|
|
|
/**
|
|
|
|
* qemuAgentGetHostname:
|
|
|
|
*
|
|
|
|
* Gets the guest hostname using the guest agent.
|
|
|
|
*
|
|
|
|
* Returns 0 on success and fills @hostname. On error -1 is returned with an
|
|
|
|
* error reported and if '@report_unsupported' is false -2 is returned if the
|
|
|
|
* guest agent does not support the command without reporting an error
|
|
|
|
*/
|
2018-09-05 04:20:53 +00:00
|
|
|
int
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuAgentGetHostname(qemuAgent *agent,
|
2020-03-16 07:26:34 +00:00
|
|
|
char **hostname,
|
|
|
|
bool report_unsupported)
|
2018-09-05 04:20:53 +00:00
|
|
|
{
|
2020-03-13 09:55:22 +00:00
|
|
|
g_autoptr(virJSONValue) cmd = qemuAgentMakeCommand("guest-get-host-name", NULL);
|
|
|
|
g_autoptr(virJSONValue) reply = NULL;
|
2021-03-11 07:16:13 +00:00
|
|
|
virJSONValue *data = NULL;
|
2018-09-05 04:20:53 +00:00
|
|
|
const char *result = NULL;
|
2020-03-16 07:26:34 +00:00
|
|
|
int rc;
|
2018-09-05 04:20:53 +00:00
|
|
|
|
|
|
|
if (!cmd)
|
2020-03-13 09:55:22 +00:00
|
|
|
return -1;
|
2018-09-05 04:20:53 +00:00
|
|
|
|
2020-03-16 07:26:34 +00:00
|
|
|
if ((rc = qemuAgentCommandFull(agent, cmd, &reply, agent->timeout,
|
|
|
|
report_unsupported)) < 0)
|
|
|
|
return rc;
|
2018-09-05 04:20:53 +00:00
|
|
|
|
|
|
|
if (!(data = virJSONValueObjectGet(reply, "return"))) {
|
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
|
_("malformed return value"));
|
2020-03-13 09:55:22 +00:00
|
|
|
return -1;
|
2018-09-05 04:20:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (!(result = virJSONValueObjectGetString(data, "host-name"))) {
|
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
|
_("'host-name' missing in guest-get-host-name reply"));
|
2020-03-13 09:55:22 +00:00
|
|
|
return -1;
|
2018-09-05 04:20:53 +00:00
|
|
|
}
|
|
|
|
|
2019-10-20 11:49:46 +00:00
|
|
|
*hostname = g_strdup(result);
|
2018-09-05 04:20:53 +00:00
|
|
|
|
2020-03-13 09:55:22 +00:00
|
|
|
return 0;
|
2018-09-05 04:20:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2014-04-02 17:05:42 +00:00
|
|
|
int
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuAgentGetTime(qemuAgent *agent,
|
2014-04-02 17:05:42 +00:00
|
|
|
long long *seconds,
|
|
|
|
unsigned int *nseconds)
|
|
|
|
{
|
|
|
|
unsigned long long json_time;
|
2021-12-01 09:38:47 +00:00
|
|
|
g_autoptr(virJSONValue) cmd = NULL;
|
|
|
|
g_autoptr(virJSONValue) reply = NULL;
|
2014-04-02 17:05:42 +00:00
|
|
|
|
|
|
|
cmd = qemuAgentMakeCommand("guest-get-time",
|
|
|
|
NULL);
|
|
|
|
if (!cmd)
|
2021-12-01 09:44:50 +00:00
|
|
|
return -1;
|
2014-04-02 17:05:42 +00:00
|
|
|
|
2020-03-05 14:47:02 +00:00
|
|
|
if (qemuAgentCommand(agent, cmd, &reply, agent->timeout) < 0)
|
2021-12-01 09:44:50 +00:00
|
|
|
return -1;
|
2014-04-02 17:05:42 +00:00
|
|
|
|
|
|
|
if (virJSONValueObjectGetNumberUlong(reply, "return", &json_time) < 0) {
|
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
|
_("malformed return value"));
|
2021-12-01 09:44:50 +00:00
|
|
|
return -1;
|
2014-04-02 17:05:42 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* guest agent returns time in nanoseconds,
|
|
|
|
* we need it in seconds here */
|
|
|
|
*seconds = json_time / 1000000000LL;
|
|
|
|
*nseconds = json_time % 1000000000LL;
|
2021-12-01 09:44:50 +00:00
|
|
|
return 0;
|
2014-04-02 17:05:42 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
* qemuAgentSetTime:
|
|
|
|
* @setTime: time to set
|
|
|
|
* @sync: let guest agent to read domain's RTC (@setTime is ignored)
|
|
|
|
*/
|
|
|
|
int
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuAgentSetTime(qemuAgent *agent,
|
2014-04-02 17:05:42 +00:00
|
|
|
long long seconds,
|
|
|
|
unsigned int nseconds,
|
2014-05-19 14:36:55 +00:00
|
|
|
bool rtcSync)
|
2014-04-02 17:05:42 +00:00
|
|
|
{
|
2021-12-01 09:38:47 +00:00
|
|
|
g_autoptr(virJSONValue) cmd = NULL;
|
|
|
|
g_autoptr(virJSONValue) reply = NULL;
|
2014-04-02 17:05:42 +00:00
|
|
|
|
2014-05-19 14:36:55 +00:00
|
|
|
if (rtcSync) {
|
2014-04-02 17:05:42 +00:00
|
|
|
cmd = qemuAgentMakeCommand("guest-set-time", NULL);
|
|
|
|
} else {
|
|
|
|
/* guest agent expect time with nanosecond granularity.
|
|
|
|
* Impressing. */
|
|
|
|
long long json_time;
|
|
|
|
|
|
|
|
/* Check if we overflow. For some reason qemu doesn't handle unsigned
|
2020-02-12 17:31:20 +00:00
|
|
|
* long long on the agent well as it silently truncates numbers to
|
2014-04-02 17:05:42 +00:00
|
|
|
* signed long long. Therefore we must check overflow against LLONG_MAX
|
|
|
|
* not ULLONG_MAX. */
|
|
|
|
if (seconds > LLONG_MAX / 1000000000LL) {
|
|
|
|
virReportError(VIR_ERR_INVALID_ARG,
|
|
|
|
_("Time '%lld' is too big for guest agent"),
|
|
|
|
seconds);
|
2021-12-01 09:44:50 +00:00
|
|
|
return -1;
|
2014-04-02 17:05:42 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
json_time = seconds * 1000000000LL;
|
|
|
|
json_time += nseconds;
|
|
|
|
cmd = qemuAgentMakeCommand("guest-set-time",
|
|
|
|
"I:time", json_time,
|
|
|
|
NULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!cmd)
|
2021-12-01 09:44:50 +00:00
|
|
|
return -1;
|
2014-04-02 17:05:42 +00:00
|
|
|
|
2020-03-05 14:47:02 +00:00
|
|
|
if (qemuAgentCommand(agent, cmd, &reply, agent->timeout) < 0)
|
2021-12-01 09:44:50 +00:00
|
|
|
return -1;
|
2014-04-02 17:05:42 +00:00
|
|
|
|
2021-12-01 09:44:50 +00:00
|
|
|
return 0;
|
2014-04-02 17:05:42 +00:00
|
|
|
}
|
2014-11-22 01:27:38 +00:00
|
|
|
|
2020-11-20 18:09:41 +00:00
|
|
|
void
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuAgentDiskAddressFree(qemuAgentDiskAddress *info)
|
2019-08-23 16:31:20 +00:00
|
|
|
{
|
|
|
|
if (!info)
|
|
|
|
return;
|
|
|
|
|
2020-01-10 23:32:16 +00:00
|
|
|
g_free(info->serial);
|
|
|
|
g_free(info->bus_type);
|
|
|
|
g_free(info->devnode);
|
2020-12-10 11:37:26 +00:00
|
|
|
g_free(info->ccw_addr);
|
2020-01-10 23:32:16 +00:00
|
|
|
g_free(info);
|
2019-08-23 16:31:20 +00:00
|
|
|
}
|
|
|
|
|
2020-11-20 18:09:45 +00:00
|
|
|
|
|
|
|
void
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuAgentDiskInfoFree(qemuAgentDiskInfo *info)
|
2020-11-20 18:09:45 +00:00
|
|
|
{
|
|
|
|
if (!info)
|
|
|
|
return;
|
|
|
|
|
|
|
|
g_free(info->name);
|
|
|
|
g_strfreev(info->dependencies);
|
|
|
|
qemuAgentDiskAddressFree(info->address);
|
|
|
|
g_free(info->alias);
|
|
|
|
g_free(info);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2020-01-10 23:32:13 +00:00
|
|
|
void
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuAgentFSInfoFree(qemuAgentFSInfo *info)
|
2019-08-23 16:31:20 +00:00
|
|
|
{
|
|
|
|
size_t i;
|
|
|
|
|
|
|
|
if (!info)
|
|
|
|
return;
|
|
|
|
|
2020-01-10 23:32:16 +00:00
|
|
|
g_free(info->mountpoint);
|
|
|
|
g_free(info->name);
|
|
|
|
g_free(info->fstype);
|
2019-08-23 16:31:20 +00:00
|
|
|
|
|
|
|
for (i = 0; i < info->ndisks; i++)
|
2020-11-20 18:09:40 +00:00
|
|
|
qemuAgentDiskAddressFree(info->disks[i]);
|
2020-01-10 23:32:16 +00:00
|
|
|
g_free(info->disks);
|
2019-08-23 16:31:20 +00:00
|
|
|
|
2020-01-10 23:32:16 +00:00
|
|
|
g_free(info);
|
2019-08-23 16:31:20 +00:00
|
|
|
}
|
|
|
|
|
2020-11-20 18:09:42 +00:00
|
|
|
|
2021-03-11 07:16:13 +00:00
|
|
|
static qemuAgentDiskAddress *
|
|
|
|
qemuAgentGetDiskAddress(virJSONValue *json)
|
2020-11-20 18:09:42 +00:00
|
|
|
{
|
2021-03-11 07:16:13 +00:00
|
|
|
virJSONValue *pci;
|
|
|
|
virJSONValue *ccw;
|
2020-11-20 18:09:42 +00:00
|
|
|
g_autoptr(qemuAgentDiskAddress) addr = NULL;
|
|
|
|
|
|
|
|
addr = g_new0(qemuAgentDiskAddress, 1);
|
|
|
|
addr->bus_type = g_strdup(virJSONValueObjectGetString(json, "bus-type"));
|
|
|
|
addr->serial = g_strdup(virJSONValueObjectGetString(json, "serial"));
|
|
|
|
addr->devnode = g_strdup(virJSONValueObjectGetString(json, "dev"));
|
|
|
|
|
|
|
|
#define GET_DISK_ADDR(jsonObject, var, name) \
|
|
|
|
do { \
|
|
|
|
if (virJSONValueObjectGetNumberUint(jsonObject, name, var) < 0) { \
|
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR, \
|
|
|
|
_("'%s' missing"), name); \
|
|
|
|
return NULL; \
|
|
|
|
} \
|
|
|
|
} while (0)
|
|
|
|
|
|
|
|
GET_DISK_ADDR(json, &addr->bus, "bus");
|
|
|
|
GET_DISK_ADDR(json, &addr->target, "target");
|
|
|
|
GET_DISK_ADDR(json, &addr->unit, "unit");
|
|
|
|
|
|
|
|
if (!(pci = virJSONValueObjectGet(json, "pci-controller"))) {
|
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
|
_("'pci-controller' missing"));
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
GET_DISK_ADDR(pci, &addr->pci_controller.domain, "domain");
|
|
|
|
GET_DISK_ADDR(pci, &addr->pci_controller.bus, "bus");
|
|
|
|
GET_DISK_ADDR(pci, &addr->pci_controller.slot, "slot");
|
|
|
|
GET_DISK_ADDR(pci, &addr->pci_controller.function, "function");
|
2020-11-25 11:06:46 +00:00
|
|
|
|
|
|
|
if ((ccw = virJSONValueObjectGet(json, "ccw-address"))) {
|
2022-05-13 10:31:01 +00:00
|
|
|
g_autofree virCCWDeviceAddress *ccw_addr = NULL;
|
2020-12-10 11:37:26 +00:00
|
|
|
|
2022-05-13 10:31:01 +00:00
|
|
|
ccw_addr = g_new0(virCCWDeviceAddress, 1);
|
2020-12-10 11:37:26 +00:00
|
|
|
|
|
|
|
GET_DISK_ADDR(ccw, &ccw_addr->cssid, "cssid");
|
|
|
|
if (ccw_addr->cssid == 0) /* Guest CSSID 0 is 0xfe on host */
|
|
|
|
ccw_addr->cssid = 0xfe;
|
|
|
|
GET_DISK_ADDR(ccw, &ccw_addr->ssid, "ssid");
|
|
|
|
GET_DISK_ADDR(ccw, &ccw_addr->devno, "devno");
|
|
|
|
|
|
|
|
addr->ccw_addr = g_steal_pointer(&ccw_addr);
|
2020-11-25 11:06:46 +00:00
|
|
|
}
|
2020-11-20 18:09:42 +00:00
|
|
|
#undef GET_DISK_ADDR
|
|
|
|
|
|
|
|
return g_steal_pointer(&addr);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2019-08-23 16:31:20 +00:00
|
|
|
static int
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuAgentGetFSInfoFillDisks(virJSONValue *jsondisks,
|
|
|
|
qemuAgentFSInfo *fsinfo)
|
2019-08-23 16:31:20 +00:00
|
|
|
{
|
|
|
|
size_t ndisks;
|
|
|
|
size_t i;
|
|
|
|
|
|
|
|
if (!virJSONValueIsArray(jsondisks)) {
|
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
|
_("Malformed guest-get-fsinfo 'disk' data array"));
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
ndisks = virJSONValueArraySize(jsondisks);
|
|
|
|
|
2020-01-10 23:32:15 +00:00
|
|
|
if (ndisks)
|
2021-03-11 07:16:13 +00:00
|
|
|
fsinfo->disks = g_new0(qemuAgentDiskAddress *, ndisks);
|
2019-08-23 16:31:20 +00:00
|
|
|
fsinfo->ndisks = ndisks;
|
|
|
|
|
|
|
|
for (i = 0; i < fsinfo->ndisks; i++) {
|
2021-03-11 07:16:13 +00:00
|
|
|
virJSONValue *jsondisk = virJSONValueArrayGet(jsondisks, i);
|
2019-08-23 16:31:20 +00:00
|
|
|
|
|
|
|
if (!jsondisk) {
|
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
_("array element '%zd' of '%zd' missing in "
|
|
|
|
"guest-get-fsinfo 'disk' data"),
|
|
|
|
i, fsinfo->ndisks);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2020-11-20 18:09:42 +00:00
|
|
|
if (!(fsinfo->disks[i] = qemuAgentGetDiskAddress(jsondisk)))
|
2019-08-23 16:31:20 +00:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-03-12 15:37:11 +00:00
|
|
|
/* Returns: number of entries in '@info' on success
|
2020-03-16 07:37:13 +00:00
|
|
|
* -2 when agent command is not supported by the agent and
|
|
|
|
* 'report_unsupported' is false (libvirt error is not reported)
|
|
|
|
* -1 otherwise (libvirt error is reported)
|
2019-08-27 20:35:54 +00:00
|
|
|
*/
|
2020-01-10 23:32:13 +00:00
|
|
|
int
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuAgentGetFSInfo(qemuAgent *agent,
|
|
|
|
qemuAgentFSInfo ***info,
|
2020-03-16 07:37:13 +00:00
|
|
|
bool report_unsupported)
|
2014-11-22 01:27:38 +00:00
|
|
|
{
|
2019-08-23 16:31:20 +00:00
|
|
|
size_t i;
|
2014-11-22 01:27:38 +00:00
|
|
|
int ret = -1;
|
2019-10-15 12:47:50 +00:00
|
|
|
g_autoptr(virJSONValue) cmd = NULL;
|
|
|
|
g_autoptr(virJSONValue) reply = NULL;
|
2021-03-11 07:16:13 +00:00
|
|
|
virJSONValue *data;
|
2019-08-23 16:31:20 +00:00
|
|
|
size_t ndata = 0;
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuAgentFSInfo **info_ret = NULL;
|
2020-03-16 07:37:13 +00:00
|
|
|
int rc;
|
2014-11-22 01:27:38 +00:00
|
|
|
|
|
|
|
cmd = qemuAgentMakeCommand("guest-get-fsinfo", NULL);
|
|
|
|
if (!cmd)
|
|
|
|
return ret;
|
|
|
|
|
2020-03-16 07:37:13 +00:00
|
|
|
if ((rc = qemuAgentCommandFull(agent, cmd, &reply, agent->timeout,
|
|
|
|
report_unsupported)) < 0)
|
|
|
|
return rc;
|
2014-11-22 01:27:38 +00:00
|
|
|
|
|
|
|
if (!(data = virJSONValueObjectGet(reply, "return"))) {
|
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
|
_("guest-get-fsinfo reply was missing return data"));
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2018-04-19 21:29:02 +00:00
|
|
|
if (!virJSONValueIsArray(data)) {
|
2014-11-22 01:27:38 +00:00
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
2018-04-19 21:29:02 +00:00
|
|
|
_("Malformed guest-get-fsinfo data array"));
|
2014-11-22 01:27:38 +00:00
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
ndata = virJSONValueArraySize(data);
|
2018-04-19 21:29:02 +00:00
|
|
|
if (ndata == 0) {
|
2014-11-22 01:27:38 +00:00
|
|
|
ret = 0;
|
2016-12-12 09:13:42 +00:00
|
|
|
*info = NULL;
|
2014-11-22 01:27:38 +00:00
|
|
|
goto cleanup;
|
|
|
|
}
|
2021-03-11 07:16:13 +00:00
|
|
|
info_ret = g_new0(qemuAgentFSInfo *, ndata);
|
2014-11-22 01:27:38 +00:00
|
|
|
|
|
|
|
for (i = 0; i < ndata; i++) {
|
|
|
|
/* Reverse the order to arrange in mount order */
|
2021-03-11 07:16:13 +00:00
|
|
|
virJSONValue *entry = virJSONValueArrayGet(data, ndata - 1 - i);
|
|
|
|
virJSONValue *disk;
|
2019-08-23 16:31:20 +00:00
|
|
|
unsigned long long bytes_val;
|
|
|
|
const char *result = NULL;
|
2014-11-22 01:27:38 +00:00
|
|
|
|
|
|
|
if (!entry) {
|
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR,
|
2015-10-08 08:17:42 +00:00
|
|
|
_("array element '%zd' of '%zd' missing in "
|
2014-11-22 01:27:38 +00:00
|
|
|
"guest-get-fsinfo return data"),
|
|
|
|
i, ndata);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2020-01-10 23:32:16 +00:00
|
|
|
info_ret[i] = g_new0(qemuAgentFSInfo, 1);
|
2014-11-22 01:27:38 +00:00
|
|
|
|
2018-09-05 04:20:56 +00:00
|
|
|
if (!(result = virJSONValueObjectGetString(entry, "mountpoint"))) {
|
2014-11-22 01:27:38 +00:00
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
|
_("'mountpoint' missing in reply of "
|
|
|
|
"guest-get-fsinfo"));
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2019-10-20 11:49:46 +00:00
|
|
|
info_ret[i]->mountpoint = g_strdup(result);
|
2018-09-05 04:20:56 +00:00
|
|
|
|
|
|
|
if (!(result = virJSONValueObjectGetString(entry, "name"))) {
|
2014-11-22 01:27:38 +00:00
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
|
_("'name' missing in reply of guest-get-fsinfo"));
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2019-10-20 11:49:46 +00:00
|
|
|
info_ret[i]->name = g_strdup(result);
|
2018-09-05 04:20:56 +00:00
|
|
|
|
|
|
|
if (!(result = virJSONValueObjectGetString(entry, "type"))) {
|
2014-11-22 01:27:38 +00:00
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
|
_("'type' missing in reply of guest-get-fsinfo"));
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2019-10-20 11:49:46 +00:00
|
|
|
info_ret[i]->fstype = g_strdup(result);
|
2018-09-05 04:20:56 +00:00
|
|
|
|
2014-11-22 01:27:38 +00:00
|
|
|
|
2019-08-23 16:31:20 +00:00
|
|
|
/* 'used-bytes' and 'total-bytes' were added in qemu-ga 3.0 */
|
|
|
|
if (virJSONValueObjectHasKey(entry, "used-bytes")) {
|
|
|
|
if (virJSONValueObjectGetNumberUlong(entry, "used-bytes",
|
|
|
|
&bytes_val) < 0) {
|
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
|
_("Error getting 'used-bytes' in reply of guest-get-fsinfo"));
|
2014-11-22 01:27:38 +00:00
|
|
|
goto cleanup;
|
|
|
|
}
|
2019-08-23 16:31:20 +00:00
|
|
|
info_ret[i]->used_bytes = bytes_val;
|
|
|
|
} else {
|
|
|
|
info_ret[i]->used_bytes = -1;
|
|
|
|
}
|
2014-11-22 01:27:38 +00:00
|
|
|
|
2019-08-23 16:31:20 +00:00
|
|
|
if (virJSONValueObjectHasKey(entry, "total-bytes")) {
|
|
|
|
if (virJSONValueObjectGetNumberUlong(entry, "total-bytes",
|
|
|
|
&bytes_val) < 0) {
|
2014-11-22 01:27:38 +00:00
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
2019-08-23 16:31:20 +00:00
|
|
|
_("Error getting 'total-bytes' in reply of guest-get-fsinfo"));
|
2014-11-22 01:27:38 +00:00
|
|
|
goto cleanup;
|
|
|
|
}
|
2019-08-23 16:31:20 +00:00
|
|
|
info_ret[i]->total_bytes = bytes_val;
|
|
|
|
} else {
|
|
|
|
info_ret[i]->total_bytes = -1;
|
|
|
|
}
|
2014-11-22 01:27:38 +00:00
|
|
|
|
2019-08-23 16:31:20 +00:00
|
|
|
if (!(disk = virJSONValueObjectGet(entry, "disk"))) {
|
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
|
_("'disk' missing in reply of guest-get-fsinfo"));
|
|
|
|
goto cleanup;
|
2014-11-22 01:27:38 +00:00
|
|
|
}
|
2019-08-23 16:31:20 +00:00
|
|
|
|
2020-01-10 23:32:12 +00:00
|
|
|
if (qemuAgentGetFSInfoFillDisks(disk, info_ret[i]) < 0)
|
2019-08-23 16:31:20 +00:00
|
|
|
goto cleanup;
|
2014-11-22 01:27:38 +00:00
|
|
|
}
|
|
|
|
|
2019-10-16 11:43:18 +00:00
|
|
|
*info = g_steal_pointer(&info_ret);
|
2014-11-22 01:27:38 +00:00
|
|
|
ret = ndata;
|
|
|
|
|
|
|
|
cleanup:
|
|
|
|
if (info_ret) {
|
|
|
|
for (i = 0; i < ndata; i++)
|
2019-08-23 16:31:20 +00:00
|
|
|
qemuAgentFSInfoFree(info_ret[i]);
|
2020-01-10 23:32:16 +00:00
|
|
|
g_free(info_ret);
|
2014-11-22 01:27:38 +00:00
|
|
|
}
|
2019-08-23 16:31:20 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2020-10-05 18:35:08 +00:00
|
|
|
|
|
|
|
static int
|
|
|
|
qemuAgentGetInterfaceOneAddress(virDomainIPAddressPtr ip_addr,
|
2021-03-11 07:16:13 +00:00
|
|
|
virJSONValue *ip_addr_obj,
|
2020-10-05 18:35:08 +00:00
|
|
|
const char *name)
|
|
|
|
{
|
|
|
|
const char *type, *addr;
|
|
|
|
|
|
|
|
type = virJSONValueObjectGetString(ip_addr_obj, "ip-address-type");
|
|
|
|
if (!type) {
|
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
_("qemu agent didn't provide 'ip-address-type'"
|
|
|
|
" field for interface '%s'"), name);
|
|
|
|
return -1;
|
2020-10-07 12:04:04 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (STRNEQ(type, "ipv4") && STRNEQ(type, "ipv6")) {
|
2020-10-05 18:35:08 +00:00
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
_("unknown ip address type '%s'"),
|
|
|
|
type);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
addr = virJSONValueObjectGetString(ip_addr_obj, "ip-address");
|
|
|
|
if (!addr) {
|
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
_("qemu agent didn't provide 'ip-address'"
|
|
|
|
" field for interface '%s'"), name);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (virJSONValueObjectGetNumberUint(ip_addr_obj, "prefix",
|
|
|
|
&ip_addr->prefix) < 0) {
|
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
|
_("malformed 'prefix' field"));
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2020-10-07 12:04:04 +00:00
|
|
|
if (STREQ(type, "ipv4"))
|
|
|
|
ip_addr->type = VIR_IP_ADDR_TYPE_IPV4;
|
|
|
|
else
|
|
|
|
ip_addr->type = VIR_IP_ADDR_TYPE_IPV6;
|
|
|
|
|
|
|
|
ip_addr->addr = g_strdup(addr);
|
2020-10-05 18:35:08 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2020-10-05 19:48:12 +00:00
|
|
|
/**
|
|
|
|
* qemuAgentGetInterfaceAddresses:
|
|
|
|
* @ifaces_ret: the array to put/update the interface in
|
|
|
|
* @ifaces_count: the number of interfaces in that array
|
|
|
|
* @ifaces_store: hash table into @ifaces_ret by interface name
|
2020-10-05 20:45:08 +00:00
|
|
|
* @iface_obj: one item from the JSON array of interfaces
|
2020-10-05 19:48:12 +00:00
|
|
|
*
|
2020-10-05 20:45:08 +00:00
|
|
|
* This function processes @iface_obj (which represents
|
2020-10-05 19:48:12 +00:00
|
|
|
* information about a single interface) and adds the information
|
|
|
|
* into the ifaces_ret array.
|
|
|
|
*
|
|
|
|
* If we're processing an interface alias, the suffix is stripped
|
|
|
|
* and information is appended to the entry found via the @ifaces_store
|
|
|
|
* hash table.
|
|
|
|
*
|
|
|
|
* Otherwise, the next free position in @ifaces_ret is used,
|
|
|
|
* its address added to @ifaces_store, and @ifaces_count incremented.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
qemuAgentGetInterfaceAddresses(virDomainInterfacePtr **ifaces_ret,
|
|
|
|
size_t *ifaces_count,
|
2020-10-22 17:04:18 +00:00
|
|
|
GHashTable *ifaces_store,
|
2021-03-11 07:16:13 +00:00
|
|
|
virJSONValue *iface_obj)
|
2020-10-05 19:48:12 +00:00
|
|
|
{
|
2021-03-11 07:16:13 +00:00
|
|
|
virJSONValue *ip_addr_arr = NULL;
|
2020-10-05 20:35:34 +00:00
|
|
|
const char *hwaddr, *name = NULL;
|
2020-10-05 19:48:12 +00:00
|
|
|
virDomainInterfacePtr iface = NULL;
|
2020-10-05 20:35:34 +00:00
|
|
|
g_autofree char *ifname = NULL;
|
2020-10-05 19:48:12 +00:00
|
|
|
size_t addrs_count = 0;
|
|
|
|
size_t j;
|
|
|
|
|
|
|
|
/* interface name is required to be presented */
|
2020-10-05 20:45:08 +00:00
|
|
|
name = virJSONValueObjectGetString(iface_obj, "name");
|
2020-10-05 19:48:12 +00:00
|
|
|
if (!name) {
|
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
|
_("qemu agent didn't provide 'name' field"));
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Handle interface alias (<ifname>:<alias>) */
|
2020-10-05 20:35:34 +00:00
|
|
|
ifname = g_strdelimit(g_strdup(name), ":", '\0');
|
2020-10-05 19:48:12 +00:00
|
|
|
|
2020-10-05 20:35:34 +00:00
|
|
|
iface = virHashLookup(ifaces_store, ifname);
|
2020-10-05 19:48:12 +00:00
|
|
|
|
|
|
|
/* If the hash table doesn't contain this iface, add it */
|
|
|
|
if (!iface) {
|
2021-03-19 23:37:03 +00:00
|
|
|
VIR_EXPAND_N(*ifaces_ret, *ifaces_count, 1);
|
2020-10-05 19:48:12 +00:00
|
|
|
|
|
|
|
iface = g_new0(virDomainInterface, 1);
|
|
|
|
(*ifaces_ret)[*ifaces_count - 1] = iface;
|
|
|
|
|
2020-10-05 20:35:34 +00:00
|
|
|
if (virHashAddEntry(ifaces_store, ifname, iface) < 0)
|
2020-10-05 19:48:12 +00:00
|
|
|
return -1;
|
|
|
|
|
|
|
|
iface->naddrs = 0;
|
2020-10-05 20:35:34 +00:00
|
|
|
iface->name = g_strdup(ifname);
|
2020-10-05 19:48:12 +00:00
|
|
|
|
2020-10-05 20:45:08 +00:00
|
|
|
hwaddr = virJSONValueObjectGetString(iface_obj, "hardware-address");
|
2020-10-05 19:48:12 +00:00
|
|
|
iface->hwaddr = g_strdup(hwaddr);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* as well as IP address which - moreover -
|
|
|
|
* can be presented multiple times */
|
2020-10-05 20:45:08 +00:00
|
|
|
ip_addr_arr = virJSONValueObjectGet(iface_obj, "ip-addresses");
|
2020-10-05 19:48:12 +00:00
|
|
|
if (!ip_addr_arr)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (!virJSONValueIsArray(ip_addr_arr)) {
|
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
|
_("Malformed ip-addresses array"));
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* If current iface already exists, continue with the count */
|
|
|
|
addrs_count = iface->naddrs;
|
|
|
|
|
2021-03-19 23:37:03 +00:00
|
|
|
VIR_EXPAND_N(iface->addrs, addrs_count, virJSONValueArraySize(ip_addr_arr));
|
2020-10-05 19:48:12 +00:00
|
|
|
|
|
|
|
for (j = 0; j < virJSONValueArraySize(ip_addr_arr); j++) {
|
2021-03-11 07:16:13 +00:00
|
|
|
virJSONValue *ip_addr_obj = virJSONValueArrayGet(ip_addr_arr, j);
|
2020-10-05 19:48:12 +00:00
|
|
|
virDomainIPAddressPtr ip_addr = iface->addrs + iface->naddrs;
|
|
|
|
iface->naddrs++;
|
|
|
|
|
|
|
|
if (qemuAgentGetInterfaceOneAddress(ip_addr, ip_addr_obj, name) < 0)
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2020-10-07 12:33:08 +00:00
|
|
|
static int
|
|
|
|
qemuAgentGetAllInterfaceAddresses(virDomainInterfacePtr **ifaces_ret,
|
2021-03-11 07:16:13 +00:00
|
|
|
virJSONValue *ret_array)
|
2020-10-07 12:33:08 +00:00
|
|
|
{
|
2020-10-22 17:04:18 +00:00
|
|
|
g_autoptr(GHashTable) ifaces_store = NULL;
|
2020-10-07 12:33:08 +00:00
|
|
|
size_t ifaces_count = 0;
|
|
|
|
size_t i;
|
|
|
|
|
2020-10-15 12:54:39 +00:00
|
|
|
*ifaces_ret = NULL;
|
2020-10-07 12:33:08 +00:00
|
|
|
/* Hash table to handle the interface alias */
|
|
|
|
ifaces_store = virHashNew(NULL);
|
|
|
|
|
|
|
|
for (i = 0; i < virJSONValueArraySize(ret_array); i++) {
|
2021-03-11 07:16:13 +00:00
|
|
|
virJSONValue *iface_obj = virJSONValueArrayGet(ret_array, i);
|
2020-10-07 12:33:08 +00:00
|
|
|
|
|
|
|
if (qemuAgentGetInterfaceAddresses(ifaces_ret, &ifaces_count,
|
|
|
|
ifaces_store, iface_obj) < 0)
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
|
|
|
|
return ifaces_count;
|
|
|
|
|
|
|
|
error:
|
2020-10-15 12:54:39 +00:00
|
|
|
if (*ifaces_ret) {
|
2020-10-07 12:33:08 +00:00
|
|
|
for (i = 0; i < ifaces_count; i++)
|
2020-10-15 12:54:39 +00:00
|
|
|
virDomainInterfaceFree((*ifaces_ret)[i]);
|
2020-10-07 12:33:08 +00:00
|
|
|
}
|
|
|
|
VIR_FREE(*ifaces_ret);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2015-01-25 18:38:48 +00:00
|
|
|
/*
|
|
|
|
* qemuAgentGetInterfaces:
|
2020-02-12 17:31:20 +00:00
|
|
|
* @agent: agent object
|
2015-01-25 18:38:48 +00:00
|
|
|
* @ifaces: pointer to an array of pointers pointing to interface objects
|
|
|
|
*
|
|
|
|
* Issue guest-network-get-interfaces to guest agent, which returns a
|
|
|
|
* list of interfaces of a running domain along with their IP and MAC
|
|
|
|
* addresses.
|
|
|
|
*
|
|
|
|
* Returns: number of interfaces on success, -1 on error.
|
|
|
|
*/
|
|
|
|
int
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuAgentGetInterfaces(qemuAgent *agent,
|
2021-10-15 10:07:49 +00:00
|
|
|
virDomainInterfacePtr **ifaces,
|
|
|
|
bool report_unsupported)
|
2015-01-25 18:38:48 +00:00
|
|
|
{
|
2020-10-05 20:04:34 +00:00
|
|
|
g_autoptr(virJSONValue) cmd = NULL;
|
|
|
|
g_autoptr(virJSONValue) reply = NULL;
|
2021-03-11 07:16:13 +00:00
|
|
|
virJSONValue *ret_array = NULL;
|
2021-10-15 10:07:49 +00:00
|
|
|
int rc;
|
2015-01-25 18:38:48 +00:00
|
|
|
|
|
|
|
if (!(cmd = qemuAgentMakeCommand("guest-network-get-interfaces", NULL)))
|
2020-10-05 20:05:55 +00:00
|
|
|
return -1;
|
2015-01-25 18:38:48 +00:00
|
|
|
|
2021-10-15 10:07:49 +00:00
|
|
|
if ((rc = qemuAgentCommandFull(agent, cmd, &reply, agent->timeout,
|
|
|
|
report_unsupported)) < 0)
|
|
|
|
return rc;
|
2015-01-25 18:38:48 +00:00
|
|
|
|
2020-10-05 20:09:08 +00:00
|
|
|
if (!(ret_array = virJSONValueObjectGetArray(reply, "return"))) {
|
2015-01-25 18:38:48 +00:00
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
|
_("qemu agent didn't return an array of interfaces"));
|
2020-10-05 20:05:55 +00:00
|
|
|
return -1;
|
2015-01-25 18:38:48 +00:00
|
|
|
}
|
|
|
|
|
2020-10-07 12:33:08 +00:00
|
|
|
return qemuAgentGetAllInterfaceAddresses(ifaces, ret_array);
|
2015-01-25 18:38:48 +00:00
|
|
|
}
|
2015-05-18 10:42:07 +00:00
|
|
|
|
|
|
|
|
|
|
|
int
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuAgentSetUserPassword(qemuAgent *agent,
|
2015-05-18 10:42:07 +00:00
|
|
|
const char *user,
|
|
|
|
const char *password,
|
|
|
|
bool crypted)
|
|
|
|
{
|
2020-03-23 11:13:59 +00:00
|
|
|
g_autoptr(virJSONValue) cmd = NULL;
|
|
|
|
g_autoptr(virJSONValue) reply = NULL;
|
|
|
|
g_autofree char *password64 = NULL;
|
2015-05-18 10:42:07 +00:00
|
|
|
|
2019-09-16 12:29:20 +00:00
|
|
|
password64 = g_base64_encode((unsigned char *)password,
|
|
|
|
strlen(password));
|
2015-05-18 10:42:07 +00:00
|
|
|
|
|
|
|
if (!(cmd = qemuAgentMakeCommand("guest-set-user-password",
|
|
|
|
"b:crypted", crypted,
|
|
|
|
"s:username", user,
|
|
|
|
"s:password", password64,
|
|
|
|
NULL)))
|
2020-03-23 11:13:59 +00:00
|
|
|
return -1;
|
2015-05-18 10:42:07 +00:00
|
|
|
|
2020-03-05 14:47:02 +00:00
|
|
|
if (qemuAgentCommand(agent, cmd, &reply, agent->timeout) < 0)
|
2020-03-23 11:13:59 +00:00
|
|
|
return -1;
|
2015-05-18 10:42:07 +00:00
|
|
|
|
2020-03-23 11:13:59 +00:00
|
|
|
return 0;
|
2015-05-18 10:42:07 +00:00
|
|
|
}
|
2019-08-23 16:31:17 +00:00
|
|
|
|
2019-08-27 20:35:54 +00:00
|
|
|
/* Returns: 0 on success
|
2020-03-16 07:37:13 +00:00
|
|
|
* -2 when agent command is not supported by the agent and
|
|
|
|
* 'report_unsupported' is false (libvirt error is not reported)
|
|
|
|
* -1 otherwise (libvirt error is reported)
|
2019-08-27 20:35:54 +00:00
|
|
|
*/
|
2019-08-23 16:31:17 +00:00
|
|
|
int
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuAgentGetUsers(qemuAgent *agent,
|
2019-08-23 16:31:17 +00:00
|
|
|
virTypedParameterPtr *params,
|
|
|
|
int *nparams,
|
2020-03-16 07:37:13 +00:00
|
|
|
int *maxparams,
|
|
|
|
bool report_unsupported)
|
2019-08-23 16:31:17 +00:00
|
|
|
{
|
2019-10-15 12:47:50 +00:00
|
|
|
g_autoptr(virJSONValue) cmd = NULL;
|
|
|
|
g_autoptr(virJSONValue) reply = NULL;
|
2021-03-11 07:16:13 +00:00
|
|
|
virJSONValue *data = NULL;
|
2019-08-23 16:31:17 +00:00
|
|
|
size_t ndata;
|
|
|
|
size_t i;
|
2020-03-16 07:37:13 +00:00
|
|
|
int rc;
|
2019-08-23 16:31:17 +00:00
|
|
|
|
|
|
|
if (!(cmd = qemuAgentMakeCommand("guest-get-users", NULL)))
|
|
|
|
return -1;
|
|
|
|
|
2020-03-16 07:37:13 +00:00
|
|
|
if ((rc = qemuAgentCommandFull(agent, cmd, &reply, agent->timeout,
|
|
|
|
report_unsupported)) < 0)
|
|
|
|
return rc;
|
2019-08-23 16:31:17 +00:00
|
|
|
|
|
|
|
if (!(data = virJSONValueObjectGetArray(reply, "return"))) {
|
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
|
_("guest-get-users reply was missing return data"));
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
ndata = virJSONValueArraySize(data);
|
|
|
|
|
|
|
|
if (virTypedParamsAddUInt(params, nparams, maxparams,
|
|
|
|
"user.count", ndata) < 0)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
for (i = 0; i < ndata; i++) {
|
2021-03-11 07:16:13 +00:00
|
|
|
virJSONValue *entry = virJSONValueArrayGet(data, i);
|
2019-08-23 16:31:17 +00:00
|
|
|
char param_name[VIR_TYPED_PARAM_FIELD_LENGTH];
|
|
|
|
const char *strvalue;
|
|
|
|
double logintime;
|
|
|
|
|
|
|
|
if (!entry) {
|
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
|
_("array element missing in guest-get-users return "
|
|
|
|
"value"));
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!(strvalue = virJSONValueObjectGetString(entry, "user"))) {
|
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
|
_("'user' missing in reply of guest-get-users"));
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2019-11-13 13:53:42 +00:00
|
|
|
g_snprintf(param_name, VIR_TYPED_PARAM_FIELD_LENGTH, "user.%zu.name", i);
|
2019-08-23 16:31:17 +00:00
|
|
|
if (virTypedParamsAddString(params, nparams, maxparams,
|
|
|
|
param_name, strvalue) < 0)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
/* 'domain' is only present for windows guests */
|
|
|
|
if ((strvalue = virJSONValueObjectGetString(entry, "domain"))) {
|
2019-11-13 13:53:42 +00:00
|
|
|
g_snprintf(param_name, VIR_TYPED_PARAM_FIELD_LENGTH,
|
|
|
|
"user.%zu.domain", i);
|
2019-08-23 16:31:17 +00:00
|
|
|
if (virTypedParamsAddString(params, nparams, maxparams,
|
|
|
|
param_name, strvalue) < 0)
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (virJSONValueObjectGetNumberDouble(entry, "login-time", &logintime) < 0) {
|
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
|
_("'login-time' missing in reply of guest-get-users"));
|
|
|
|
return -1;
|
|
|
|
}
|
2019-11-13 13:53:42 +00:00
|
|
|
g_snprintf(param_name, VIR_TYPED_PARAM_FIELD_LENGTH,
|
|
|
|
"user.%zu.login-time", i);
|
2019-08-23 16:31:17 +00:00
|
|
|
if (virTypedParamsAddULLong(params, nparams, maxparams,
|
|
|
|
param_name, logintime * 1000) < 0)
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2020-03-13 09:02:48 +00:00
|
|
|
return 0;
|
2019-08-23 16:31:17 +00:00
|
|
|
}
|
2019-08-23 16:31:18 +00:00
|
|
|
|
2019-08-27 20:35:54 +00:00
|
|
|
/* Returns: 0 on success
|
2020-03-16 07:37:13 +00:00
|
|
|
* -2 when agent command is not supported by the agent and
|
|
|
|
* 'report_unsupported' is false (libvirt error is not reported)
|
|
|
|
* -1 otherwise (libvirt error is reported)
|
2019-08-27 20:35:54 +00:00
|
|
|
*/
|
2019-08-23 16:31:18 +00:00
|
|
|
int
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuAgentGetOSInfo(qemuAgent *agent,
|
2019-08-23 16:31:18 +00:00
|
|
|
virTypedParameterPtr *params,
|
|
|
|
int *nparams,
|
2020-03-16 07:37:13 +00:00
|
|
|
int *maxparams,
|
|
|
|
bool report_unsupported)
|
2019-08-23 16:31:18 +00:00
|
|
|
{
|
2019-10-15 12:47:50 +00:00
|
|
|
g_autoptr(virJSONValue) cmd = NULL;
|
|
|
|
g_autoptr(virJSONValue) reply = NULL;
|
2021-03-11 07:16:13 +00:00
|
|
|
virJSONValue *data = NULL;
|
2020-03-16 07:37:13 +00:00
|
|
|
int rc;
|
2019-08-23 16:31:18 +00:00
|
|
|
|
|
|
|
if (!(cmd = qemuAgentMakeCommand("guest-get-osinfo", NULL)))
|
|
|
|
return -1;
|
|
|
|
|
2020-03-16 07:37:13 +00:00
|
|
|
if ((rc = qemuAgentCommandFull(agent, cmd, &reply, agent->timeout,
|
|
|
|
report_unsupported)) < 0)
|
|
|
|
return rc;
|
2019-08-23 16:31:18 +00:00
|
|
|
|
|
|
|
if (!(data = virJSONValueObjectGetObject(reply, "return"))) {
|
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
|
_("guest-get-osinfo reply was missing return data"));
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
#define OSINFO_ADD_PARAM(agent_string_, param_string_) \
|
|
|
|
do { \
|
|
|
|
const char *result; \
|
|
|
|
if ((result = virJSONValueObjectGetString(data, agent_string_))) { \
|
|
|
|
if (virTypedParamsAddString(params, nparams, maxparams, \
|
|
|
|
param_string_, result) < 0) { \
|
|
|
|
return -1; \
|
|
|
|
} \
|
|
|
|
} \
|
|
|
|
} while (0)
|
|
|
|
OSINFO_ADD_PARAM("id", "os.id");
|
|
|
|
OSINFO_ADD_PARAM("name", "os.name");
|
|
|
|
OSINFO_ADD_PARAM("pretty-name", "os.pretty-name");
|
|
|
|
OSINFO_ADD_PARAM("version", "os.version");
|
|
|
|
OSINFO_ADD_PARAM("version-id", "os.version-id");
|
|
|
|
OSINFO_ADD_PARAM("machine", "os.machine");
|
|
|
|
OSINFO_ADD_PARAM("variant", "os.variant");
|
|
|
|
OSINFO_ADD_PARAM("variant-id", "os.variant-id");
|
|
|
|
OSINFO_ADD_PARAM("kernel-release", "os.kernel-release");
|
|
|
|
OSINFO_ADD_PARAM("kernel-version", "os.kernel-version");
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
2019-08-23 16:31:19 +00:00
|
|
|
|
2019-08-27 20:35:54 +00:00
|
|
|
/* Returns: 0 on success
|
2020-03-16 07:37:13 +00:00
|
|
|
* -2 when agent command is not supported by the agent and
|
|
|
|
* 'report_unsupported' is false (libvirt error is not reported)
|
|
|
|
* -1 otherwise (libvirt error is reported)
|
2019-08-27 20:35:54 +00:00
|
|
|
*/
|
2019-08-23 16:31:19 +00:00
|
|
|
int
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuAgentGetTimezone(qemuAgent *agent,
|
2019-08-23 16:31:19 +00:00
|
|
|
virTypedParameterPtr *params,
|
|
|
|
int *nparams,
|
2020-03-16 07:37:13 +00:00
|
|
|
int *maxparams,
|
|
|
|
bool report_unsupported)
|
2019-08-23 16:31:19 +00:00
|
|
|
{
|
2019-10-15 12:47:50 +00:00
|
|
|
g_autoptr(virJSONValue) cmd = NULL;
|
|
|
|
g_autoptr(virJSONValue) reply = NULL;
|
2021-03-11 07:16:13 +00:00
|
|
|
virJSONValue *data = NULL;
|
2019-08-23 16:31:19 +00:00
|
|
|
const char *name;
|
|
|
|
int offset;
|
2020-03-16 07:37:13 +00:00
|
|
|
int rc;
|
2019-08-23 16:31:19 +00:00
|
|
|
|
|
|
|
if (!(cmd = qemuAgentMakeCommand("guest-get-timezone", NULL)))
|
|
|
|
return -1;
|
|
|
|
|
2020-03-16 07:37:13 +00:00
|
|
|
if ((rc = qemuAgentCommandFull(agent, cmd, &reply, agent->timeout,
|
|
|
|
report_unsupported)) < 0)
|
|
|
|
return rc;
|
2019-08-23 16:31:19 +00:00
|
|
|
|
|
|
|
if (!(data = virJSONValueObjectGetObject(reply, "return"))) {
|
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
|
_("guest-get-timezone reply was missing return data"));
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if ((name = virJSONValueObjectGetString(data, "zone")) &&
|
|
|
|
virTypedParamsAddString(params, nparams, maxparams,
|
|
|
|
"timezone.name", name) < 0)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
if ((virJSONValueObjectGetNumberInt(data, "offset", &offset)) < 0) {
|
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
|
_("'offset' missing in reply of guest-get-timezone"));
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (virTypedParamsAddInt(params, nparams, maxparams,
|
|
|
|
"timezone.offset", offset) < 0)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
Add API to change qemu agent response timeout
Some layered products such as oVirt have requested a way to avoid being
blocked by guest agent commands when querying a loaded vm. For example,
many guest agent commands are polled periodically to monitor changes,
and rather than blocking the calling process, they'd prefer to simply
time out when an agent query is taking too long.
This patch adds a way for the user to specify a custom agent timeout
that is applied to all agent commands.
One special case to note here is the 'guest-sync' command. 'guest-sync'
is issued internally prior to calling any other command. (For example,
when libvirt wants to call 'guest-get-fsinfo', we first call
'guest-sync' and then call 'guest-get-fsinfo').
Previously, the 'guest-sync' command used a 5-second timeout
(VIR_DOMAIN_QEMU_AGENT_COMMAND_DEFAULT), whereas the actual command that
followed always blocked indefinitely
(VIR_DOMAIN_QEMU_AGENT_COMMAND_BLOCK). As part of this patch, if a
custom timeout is specified that is shorter than
5 seconds, this new timeout is also used for 'guest-sync'. If there is
no custom timeout or if the custom timeout is longer than 5 seconds, we
will continue to use the 5-second timeout.
Signed-off-by: Jonathon Jongsma <jjongsma@redhat.com>
Signed-off-by: Michal Privoznik <mprivozn@redhat.com>
Reviewed-by: Michal Privoznik <mprivozn@redhat.com>
2019-11-13 22:06:09 +00:00
|
|
|
|
|
|
|
/* qemuAgentSetResponseTimeout:
|
2020-02-12 17:31:20 +00:00
|
|
|
* @agent: agent object
|
|
|
|
* @timeout: number of seconds to wait for agent response
|
Add API to change qemu agent response timeout
Some layered products such as oVirt have requested a way to avoid being
blocked by guest agent commands when querying a loaded vm. For example,
many guest agent commands are polled periodically to monitor changes,
and rather than blocking the calling process, they'd prefer to simply
time out when an agent query is taking too long.
This patch adds a way for the user to specify a custom agent timeout
that is applied to all agent commands.
One special case to note here is the 'guest-sync' command. 'guest-sync'
is issued internally prior to calling any other command. (For example,
when libvirt wants to call 'guest-get-fsinfo', we first call
'guest-sync' and then call 'guest-get-fsinfo').
Previously, the 'guest-sync' command used a 5-second timeout
(VIR_DOMAIN_QEMU_AGENT_COMMAND_DEFAULT), whereas the actual command that
followed always blocked indefinitely
(VIR_DOMAIN_QEMU_AGENT_COMMAND_BLOCK). As part of this patch, if a
custom timeout is specified that is shorter than
5 seconds, this new timeout is also used for 'guest-sync'. If there is
no custom timeout or if the custom timeout is longer than 5 seconds, we
will continue to use the 5-second timeout.
Signed-off-by: Jonathon Jongsma <jjongsma@redhat.com>
Signed-off-by: Michal Privoznik <mprivozn@redhat.com>
Reviewed-by: Michal Privoznik <mprivozn@redhat.com>
2019-11-13 22:06:09 +00:00
|
|
|
*
|
|
|
|
* The agent object must be locked prior to calling this function.
|
|
|
|
*/
|
|
|
|
void
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuAgentSetResponseTimeout(qemuAgent *agent,
|
Add API to change qemu agent response timeout
Some layered products such as oVirt have requested a way to avoid being
blocked by guest agent commands when querying a loaded vm. For example,
many guest agent commands are polled periodically to monitor changes,
and rather than blocking the calling process, they'd prefer to simply
time out when an agent query is taking too long.
This patch adds a way for the user to specify a custom agent timeout
that is applied to all agent commands.
One special case to note here is the 'guest-sync' command. 'guest-sync'
is issued internally prior to calling any other command. (For example,
when libvirt wants to call 'guest-get-fsinfo', we first call
'guest-sync' and then call 'guest-get-fsinfo').
Previously, the 'guest-sync' command used a 5-second timeout
(VIR_DOMAIN_QEMU_AGENT_COMMAND_DEFAULT), whereas the actual command that
followed always blocked indefinitely
(VIR_DOMAIN_QEMU_AGENT_COMMAND_BLOCK). As part of this patch, if a
custom timeout is specified that is shorter than
5 seconds, this new timeout is also used for 'guest-sync'. If there is
no custom timeout or if the custom timeout is longer than 5 seconds, we
will continue to use the 5-second timeout.
Signed-off-by: Jonathon Jongsma <jjongsma@redhat.com>
Signed-off-by: Michal Privoznik <mprivozn@redhat.com>
Reviewed-by: Michal Privoznik <mprivozn@redhat.com>
2019-11-13 22:06:09 +00:00
|
|
|
int timeout)
|
|
|
|
{
|
2020-02-12 17:31:20 +00:00
|
|
|
agent->timeout = timeout;
|
Add API to change qemu agent response timeout
Some layered products such as oVirt have requested a way to avoid being
blocked by guest agent commands when querying a loaded vm. For example,
many guest agent commands are polled periodically to monitor changes,
and rather than blocking the calling process, they'd prefer to simply
time out when an agent query is taking too long.
This patch adds a way for the user to specify a custom agent timeout
that is applied to all agent commands.
One special case to note here is the 'guest-sync' command. 'guest-sync'
is issued internally prior to calling any other command. (For example,
when libvirt wants to call 'guest-get-fsinfo', we first call
'guest-sync' and then call 'guest-get-fsinfo').
Previously, the 'guest-sync' command used a 5-second timeout
(VIR_DOMAIN_QEMU_AGENT_COMMAND_DEFAULT), whereas the actual command that
followed always blocked indefinitely
(VIR_DOMAIN_QEMU_AGENT_COMMAND_BLOCK). As part of this patch, if a
custom timeout is specified that is shorter than
5 seconds, this new timeout is also used for 'guest-sync'. If there is
no custom timeout or if the custom timeout is longer than 5 seconds, we
will continue to use the 5-second timeout.
Signed-off-by: Jonathon Jongsma <jjongsma@redhat.com>
Signed-off-by: Michal Privoznik <mprivozn@redhat.com>
Reviewed-by: Michal Privoznik <mprivozn@redhat.com>
2019-11-13 22:06:09 +00:00
|
|
|
}
|
2020-11-07 09:12:53 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* qemuAgentSSHGetAuthorizedKeys:
|
|
|
|
* @agent: agent object
|
|
|
|
* @user: user to get authorized keys for
|
|
|
|
* @keys: Array of authorized keys
|
|
|
|
*
|
|
|
|
* Fetch the public keys from @user's $HOME/.ssh/authorized_keys.
|
|
|
|
*
|
|
|
|
* Returns: number of keys returned on success,
|
|
|
|
* -1 otherwise (error is reported)
|
|
|
|
*/
|
|
|
|
int
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuAgentSSHGetAuthorizedKeys(qemuAgent *agent,
|
2020-11-07 09:12:53 +00:00
|
|
|
const char *user,
|
|
|
|
char ***keys)
|
|
|
|
{
|
|
|
|
g_autoptr(virJSONValue) cmd = NULL;
|
|
|
|
g_autoptr(virJSONValue) reply = NULL;
|
2021-03-11 07:16:13 +00:00
|
|
|
virJSONValue *data = NULL;
|
2020-11-07 09:12:53 +00:00
|
|
|
|
|
|
|
if (!(cmd = qemuAgentMakeCommand("guest-ssh-get-authorized-keys",
|
|
|
|
"s:username", user,
|
|
|
|
NULL)))
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
if (qemuAgentCommand(agent, cmd, &reply, agent->timeout) < 0)
|
|
|
|
return -1;
|
|
|
|
|
2020-12-01 10:47:30 +00:00
|
|
|
if (!(data = virJSONValueObjectGetObject(reply, "return"))) {
|
2020-11-07 09:12:53 +00:00
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
|
_("qemu agent didn't return an array of keys"));
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2020-12-01 10:47:30 +00:00
|
|
|
if (!(*keys = virJSONValueObjectGetStringArray(data, "keys")))
|
|
|
|
return -1;
|
2020-11-07 09:12:53 +00:00
|
|
|
|
2020-12-01 10:47:30 +00:00
|
|
|
return g_strv_length(*keys);
|
2020-11-07 09:12:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
* qemuAgentSSHAddAuthorizedKeys:
|
|
|
|
* @agent: agent object
|
|
|
|
* @user: user to add authorized keys for
|
|
|
|
* @keys: Array of authorized keys
|
|
|
|
* @nkeys: number of items in @keys array
|
|
|
|
* @reset: whether to truncate authorized keys file before writing
|
|
|
|
*
|
|
|
|
* Append SSH @keys into the @user's authorized keys file. If
|
|
|
|
* @reset is true then the file is truncated before write and
|
|
|
|
* thus contains only newly added @keys.
|
|
|
|
*
|
|
|
|
* Returns: 0 on success,
|
|
|
|
* -1 otherwise (error is reported)
|
|
|
|
*/
|
|
|
|
int
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuAgentSSHAddAuthorizedKeys(qemuAgent *agent,
|
2020-11-07 09:12:53 +00:00
|
|
|
const char *user,
|
|
|
|
const char **keys,
|
|
|
|
size_t nkeys,
|
|
|
|
bool reset)
|
|
|
|
{
|
|
|
|
g_autoptr(virJSONValue) cmd = NULL;
|
|
|
|
g_autoptr(virJSONValue) reply = NULL;
|
|
|
|
g_autoptr(virJSONValue) jkeys = NULL;
|
|
|
|
|
|
|
|
jkeys = qemuAgentMakeStringsArray(keys, nkeys);
|
|
|
|
if (jkeys == NULL)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
if (!(cmd = qemuAgentMakeCommand("guest-ssh-add-authorized-keys",
|
|
|
|
"s:username", user,
|
|
|
|
"a:keys", &jkeys,
|
|
|
|
"b:reset", reset,
|
|
|
|
NULL)))
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
return qemuAgentCommand(agent, cmd, &reply, agent->timeout);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
* qemuAgentSSHRemoveAuthorizedKeys:
|
|
|
|
* @agent: agent object
|
|
|
|
* @user: user to remove authorized keys for
|
|
|
|
* @keys: Array of authorized keys
|
|
|
|
* @nkeys: number of items in @keys array
|
|
|
|
*
|
|
|
|
* Remove SSH @keys from the @user's authorized keys file. It's
|
|
|
|
* not considered an error when trying to remove a non-existent
|
|
|
|
* key.
|
|
|
|
*
|
|
|
|
* Returns: 0 on success,
|
|
|
|
* -1 otherwise (error is reported)
|
|
|
|
*/
|
|
|
|
int
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuAgentSSHRemoveAuthorizedKeys(qemuAgent *agent,
|
2020-11-07 09:12:53 +00:00
|
|
|
const char *user,
|
|
|
|
const char **keys,
|
|
|
|
size_t nkeys)
|
|
|
|
{
|
|
|
|
g_autoptr(virJSONValue) cmd = NULL;
|
|
|
|
g_autoptr(virJSONValue) reply = NULL;
|
|
|
|
g_autoptr(virJSONValue) jkeys = NULL;
|
|
|
|
|
|
|
|
jkeys = qemuAgentMakeStringsArray(keys, nkeys);
|
|
|
|
if (jkeys == NULL)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
if (!(cmd = qemuAgentMakeCommand("guest-ssh-remove-authorized-keys",
|
|
|
|
"s:username", user,
|
|
|
|
"a:keys", &jkeys,
|
|
|
|
NULL)))
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
return qemuAgentCommand(agent, cmd, &reply, agent->timeout);
|
|
|
|
}
|
2020-11-20 18:09:45 +00:00
|
|
|
|
|
|
|
|
2021-03-11 07:16:13 +00:00
|
|
|
int qemuAgentGetDisks(qemuAgent *agent,
|
|
|
|
qemuAgentDiskInfo ***disks,
|
2020-11-20 18:09:45 +00:00
|
|
|
bool report_unsupported)
|
|
|
|
{
|
|
|
|
g_autoptr(virJSONValue) cmd = NULL;
|
|
|
|
g_autoptr(virJSONValue) reply = NULL;
|
2021-03-11 07:16:13 +00:00
|
|
|
virJSONValue *data = NULL;
|
2020-11-20 18:09:45 +00:00
|
|
|
size_t ndata;
|
|
|
|
size_t i;
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
if (!(cmd = qemuAgentMakeCommand("guest-get-disks", NULL)))
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
if ((rc = qemuAgentCommandFull(agent, cmd, &reply, agent->timeout,
|
|
|
|
report_unsupported)) < 0)
|
|
|
|
return rc;
|
|
|
|
|
|
|
|
if (!(data = virJSONValueObjectGetArray(reply, "return"))) {
|
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
|
_("qemu agent didn't return an array of disks"));
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
ndata = virJSONValueArraySize(data);
|
|
|
|
|
2021-03-11 07:16:13 +00:00
|
|
|
*disks = g_new0(qemuAgentDiskInfo *, ndata);
|
2020-11-20 18:09:45 +00:00
|
|
|
|
|
|
|
for (i = 0; i < ndata; i++) {
|
2021-03-11 07:16:13 +00:00
|
|
|
virJSONValue *addr;
|
|
|
|
virJSONValue *entry = virJSONValueArrayGet(data, i);
|
|
|
|
qemuAgentDiskInfo *disk;
|
2020-11-20 18:09:45 +00:00
|
|
|
|
|
|
|
if (!entry) {
|
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
|
_("array element missing in guest-get-disks return "
|
|
|
|
"value"));
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
|
|
|
|
disk = g_new0(qemuAgentDiskInfo, 1);
|
|
|
|
(*disks)[i] = disk;
|
|
|
|
|
|
|
|
disk->name = g_strdup(virJSONValueObjectGetString(entry, "name"));
|
|
|
|
if (!disk->name) {
|
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
|
_("'name' missing in reply of guest-get-disks"));
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (virJSONValueObjectGetBoolean(entry, "partition", &disk->partition) < 0) {
|
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
|
_("'partition' missing in reply of guest-get-disks"));
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
|
|
|
|
disk->dependencies = virJSONValueObjectGetStringArray(entry, "dependencies");
|
|
|
|
disk->alias = g_strdup(virJSONValueObjectGetString(entry, "alias"));
|
|
|
|
addr = virJSONValueObjectGetObject(entry, "address");
|
|
|
|
if (addr) {
|
|
|
|
disk->address = qemuAgentGetDiskAddress(addr);
|
|
|
|
if (!disk->address)
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return ndata;
|
|
|
|
|
|
|
|
error:
|
|
|
|
for (i = 0; i < ndata; i++) {
|
|
|
|
qemuAgentDiskInfoFree((*disks)[i]);
|
|
|
|
}
|
|
|
|
g_free(*disks);
|
|
|
|
return -1;
|
|
|
|
}
|