2011-10-05 17:31:54 +00:00
|
|
|
/*
|
2014-03-07 13:38:51 +00:00
|
|
|
* qemu_agent.c: interaction with QEMU guest agent
|
2011-10-05 17:31:54 +00:00
|
|
|
*
|
2014-03-18 08:15:21 +00:00
|
|
|
* Copyright (C) 2006-2014 Red Hat, Inc.
|
2011-10-05 17:31:54 +00:00
|
|
|
* Copyright (C) 2006 Daniel P. Berrange
|
|
|
|
*
|
|
|
|
* This library is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU Lesser General Public
|
|
|
|
* License as published by the Free Software Foundation; either
|
|
|
|
* version 2.1 of the License, or (at your option) any later version.
|
|
|
|
*
|
|
|
|
* This library is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
* Lesser General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU Lesser General Public
|
2012-09-20 22:30:55 +00:00
|
|
|
* License along with this library. If not, see
|
2012-07-21 10:06:23 +00:00
|
|
|
* <http://www.gnu.org/licenses/>.
|
2011-10-05 17:31:54 +00:00
|
|
|
*/
|
|
|
|
|
|
|
|
#include <config.h>
|
|
|
|
|
|
|
|
#include <poll.h>
|
|
|
|
#include <unistd.h>
|
|
|
|
#include <fcntl.h>
|
|
|
|
#include <sys/time.h>
|
2020-02-12 14:54:19 +00:00
|
|
|
#include <gio/gio.h>
|
2011-10-05 17:31:54 +00:00
|
|
|
|
|
|
|
#include "qemu_agent.h"
|
Add API to change qemu agent response timeout
Some layered products such as oVirt have requested a way to avoid being
blocked by guest agent commands when querying a loaded vm. For example,
many guest agent commands are polled periodically to monitor changes,
and rather than blocking the calling process, they'd prefer to simply
time out when an agent query is taking too long.
This patch adds a way for the user to specify a custom agent timeout
that is applied to all agent commands.
One special case to note here is the 'guest-sync' command. 'guest-sync'
is issued internally prior to calling any other command. (For example,
when libvirt wants to call 'guest-get-fsinfo', we first call
'guest-sync' and then call 'guest-get-fsinfo').
Previously, the 'guest-sync' command used a 5-second timeout
(VIR_DOMAIN_QEMU_AGENT_COMMAND_DEFAULT), whereas the actual command that
followed always blocked indefinitely
(VIR_DOMAIN_QEMU_AGENT_COMMAND_BLOCK). As part of this patch, if a
custom timeout is specified that is shorter than
5 seconds, this new timeout is also used for 'guest-sync'. If there is
no custom timeout or if the custom timeout is longer than 5 seconds, we
will continue to use the 5-second timeout.
Signed-off-by: Jonathon Jongsma <jjongsma@redhat.com>
Signed-off-by: Michal Privoznik <mprivozn@redhat.com>
Reviewed-by: Michal Privoznik <mprivozn@redhat.com>
2019-11-13 22:06:09 +00:00
|
|
|
#include "qemu_domain.h"
|
2012-12-12 18:06:53 +00:00
|
|
|
#include "viralloc.h"
|
2012-12-12 17:59:27 +00:00
|
|
|
#include "virlog.h"
|
2012-12-13 18:21:53 +00:00
|
|
|
#include "virerror.h"
|
2012-12-12 17:53:50 +00:00
|
|
|
#include "virjson.h"
|
2011-10-05 17:31:54 +00:00
|
|
|
#include "virfile.h"
|
2012-09-24 16:54:51 +00:00
|
|
|
#include "virprocess.h"
|
2012-02-01 14:44:53 +00:00
|
|
|
#include "virtime.h"
|
2012-07-11 13:35:47 +00:00
|
|
|
#include "virobject.h"
|
2013-04-03 10:36:23 +00:00
|
|
|
#include "virstring.h"
|
2019-04-01 10:14:26 +00:00
|
|
|
#include "virenum.h"
|
2020-01-14 17:38:59 +00:00
|
|
|
#include "virsocket.h"
|
2020-02-16 21:59:28 +00:00
|
|
|
#include "virutil.h"
|
2011-10-05 17:31:54 +00:00
|
|
|
|
|
|
|
#define VIR_FROM_THIS VIR_FROM_QEMU
|
|
|
|
|
2014-02-28 12:16:17 +00:00
|
|
|
VIR_LOG_INIT("qemu.qemu_agent");
|
|
|
|
|
2011-10-05 17:31:54 +00:00
|
|
|
#define LINE_ENDING "\n"
|
|
|
|
|
|
|
|
#define DEBUG_IO 0
|
|
|
|
#define DEBUG_RAW_IO 0
|
|
|
|
|
2018-03-01 14:55:26 +00:00
|
|
|
/* We read from QEMU until seeing a \r\n pair to indicate a
|
|
|
|
* completed reply or event. To avoid memory denial-of-service
|
|
|
|
* though, we must have a size limit on amount of data we
|
|
|
|
* buffer. 10 MB is large enough that it ought to cope with
|
|
|
|
* normal QEMU replies, and small enough that we're not
|
|
|
|
* consuming unreasonable mem.
|
|
|
|
*/
|
|
|
|
#define QEMU_AGENT_MAX_RESPONSE (10 * 1024 * 1024)
|
|
|
|
|
2011-10-05 17:31:54 +00:00
|
|
|
/* When you are the first to uncomment this,
|
|
|
|
* don't forget to uncomment the corresponding
|
|
|
|
* part in qemuAgentIOProcessEvent as well.
|
|
|
|
*
|
|
|
|
static struct {
|
|
|
|
const char *type;
|
2020-02-12 17:31:20 +00:00
|
|
|
void (*handler)(qemuAgentPtr agent, virJSONValuePtr data);
|
2011-10-05 17:31:54 +00:00
|
|
|
} eventHandlers[] = {
|
|
|
|
};
|
|
|
|
*/
|
|
|
|
|
|
|
|
typedef struct _qemuAgentMessage qemuAgentMessage;
|
|
|
|
typedef qemuAgentMessage *qemuAgentMessagePtr;
|
|
|
|
|
|
|
|
struct _qemuAgentMessage {
|
|
|
|
char *txBuffer;
|
|
|
|
int txOffset;
|
|
|
|
int txLength;
|
|
|
|
|
2020-02-12 17:31:20 +00:00
|
|
|
/* Used by the JSON agent to hold reply / error */
|
2011-10-05 17:31:54 +00:00
|
|
|
char *rxBuffer;
|
|
|
|
int rxLength;
|
|
|
|
void *rxObject;
|
|
|
|
|
|
|
|
/* True if rxBuffer / rxObject are ready, or a
|
2020-02-12 17:31:20 +00:00
|
|
|
* fatal error occurred on the agent channel
|
2011-10-05 17:31:54 +00:00
|
|
|
*/
|
|
|
|
bool finished;
|
2016-09-16 10:35:33 +00:00
|
|
|
/* true for sync command */
|
|
|
|
bool sync;
|
2020-07-09 04:42:21 +00:00
|
|
|
/* id of the issued sync command */
|
2016-09-16 10:35:34 +00:00
|
|
|
unsigned long long id;
|
2016-09-16 10:35:35 +00:00
|
|
|
bool first;
|
2011-10-05 17:31:54 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
struct _qemuAgent {
|
2013-01-09 21:00:32 +00:00
|
|
|
virObjectLockable parent;
|
2012-07-11 13:35:47 +00:00
|
|
|
|
2011-10-05 17:31:54 +00:00
|
|
|
virCond notify;
|
|
|
|
|
|
|
|
int fd;
|
2020-02-12 14:54:19 +00:00
|
|
|
|
|
|
|
GMainContext *context;
|
|
|
|
GSocket *socket;
|
|
|
|
GSource *watch;
|
2011-10-05 17:31:54 +00:00
|
|
|
|
processSerialChangedEvent: Close agent monitor early
https://bugzilla.redhat.com/show_bug.cgi?id=890648
So, imagine you've issued an API that involves guest agent. For
instance, you want to query guest's IP addresses. So the API acquires
QUERY_JOB, locks the guest agent and issues the agent command.
However, for some reason, guest agent replies to initial ping
correctly, but then crashes tragically while executing real command
(in this case guest-network-get-interfaces). Since initial ping went
well, libvirt thinks guest agent is accessible and awaits reply to the
real command. But it will never come. What will is a monitor event.
Our handler (processSerialChangedEvent) will try to acquire
MODIFY_JOB, which will fail obviously because the other thread that's
executing the API already holds a job. So the event handler exits
early, and the QUERY_JOB is never released nor ended.
The way how to solve this is to put flag somewhere in the monitor
internals. The flag is called @running and agent commands are issued
iff the flag is set. The flag itself is set when we connect to the
agent socket. And unset whenever we see DISCONNECT event from the
agent. Moreover, we must wake up all the threads waiting for the
agent. This is done by signalizing the condition they're waiting on.
Signed-off-by: Michal Privoznik <mprivozn@redhat.com>
2015-05-07 09:19:38 +00:00
|
|
|
bool running;
|
2020-03-05 14:47:01 +00:00
|
|
|
bool singleSync;
|
|
|
|
bool inSync;
|
2011-10-05 17:31:54 +00:00
|
|
|
|
|
|
|
virDomainObjPtr vm;
|
|
|
|
|
|
|
|
qemuAgentCallbacksPtr cb;
|
|
|
|
|
|
|
|
/* If there's a command being processed this will be
|
|
|
|
* non-NULL */
|
|
|
|
qemuAgentMessagePtr msg;
|
|
|
|
|
2020-02-12 17:31:20 +00:00
|
|
|
/* Buffer incoming data ready for agent
|
2011-10-05 17:31:54 +00:00
|
|
|
* code to process & find message boundaries */
|
|
|
|
size_t bufferOffset;
|
|
|
|
size_t bufferLength;
|
|
|
|
char *buffer;
|
|
|
|
|
|
|
|
/* If anything went wrong, this will be fed back
|
2020-02-12 17:31:20 +00:00
|
|
|
* the next agent msg */
|
2011-10-05 17:31:54 +00:00
|
|
|
virError lastError;
|
2012-06-15 16:00:13 +00:00
|
|
|
|
|
|
|
/* Some guest agent commands don't return anything
|
2020-02-12 17:31:20 +00:00
|
|
|
* but fire up an event on qemu agent instead.
|
2012-06-15 16:00:13 +00:00
|
|
|
* Take that as indication of successful completion */
|
|
|
|
qemuAgentEvent await_event;
|
Add API to change qemu agent response timeout
Some layered products such as oVirt have requested a way to avoid being
blocked by guest agent commands when querying a loaded vm. For example,
many guest agent commands are polled periodically to monitor changes,
and rather than blocking the calling process, they'd prefer to simply
time out when an agent query is taking too long.
This patch adds a way for the user to specify a custom agent timeout
that is applied to all agent commands.
One special case to note here is the 'guest-sync' command. 'guest-sync'
is issued internally prior to calling any other command. (For example,
when libvirt wants to call 'guest-get-fsinfo', we first call
'guest-sync' and then call 'guest-get-fsinfo').
Previously, the 'guest-sync' command used a 5-second timeout
(VIR_DOMAIN_QEMU_AGENT_COMMAND_DEFAULT), whereas the actual command that
followed always blocked indefinitely
(VIR_DOMAIN_QEMU_AGENT_COMMAND_BLOCK). As part of this patch, if a
custom timeout is specified that is shorter than
5 seconds, this new timeout is also used for 'guest-sync'. If there is
no custom timeout or if the custom timeout is longer than 5 seconds, we
will continue to use the 5-second timeout.
Signed-off-by: Jonathon Jongsma <jjongsma@redhat.com>
Signed-off-by: Michal Privoznik <mprivozn@redhat.com>
Reviewed-by: Michal Privoznik <mprivozn@redhat.com>
2019-11-13 22:06:09 +00:00
|
|
|
int timeout;
|
2011-10-05 17:31:54 +00:00
|
|
|
};
|
|
|
|
|
2012-07-11 13:35:47 +00:00
|
|
|
static virClassPtr qemuAgentClass;
|
|
|
|
static void qemuAgentDispose(void *obj);
|
|
|
|
|
|
|
|
static int qemuAgentOnceInit(void)
|
|
|
|
{
|
2018-04-17 15:42:33 +00:00
|
|
|
if (!VIR_CLASS_NEW(qemuAgent, virClassForObjectLockable()))
|
2012-07-11 13:35:47 +00:00
|
|
|
return -1;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-01-20 17:23:29 +00:00
|
|
|
VIR_ONCE_GLOBAL_INIT(qemuAgent);
|
2012-07-11 13:35:47 +00:00
|
|
|
|
|
|
|
|
2011-10-05 17:31:54 +00:00
|
|
|
#if DEBUG_RAW_IO
|
|
|
|
static char *
|
|
|
|
qemuAgentEscapeNonPrintable(const char *text)
|
|
|
|
{
|
Convert 'int i' to 'size_t i' in src/qemu files
Convert the type of loop iterators named 'i', 'j', k',
'ii', 'jj', 'kk', to be 'size_t' instead of 'int' or
'unsigned int', also santizing 'ii', 'jj', 'kk' to use
the normal 'i', 'j', 'k' naming
Signed-off-by: Daniel P. Berrange <berrange@redhat.com>
2013-07-08 14:09:33 +00:00
|
|
|
size_t i;
|
2020-07-02 22:26:41 +00:00
|
|
|
g_auto(virBuffer) buf = VIR_BUFFER_INITIALIZER;
|
2013-05-21 07:21:20 +00:00
|
|
|
for (i = 0; text[i] != '\0'; i++) {
|
2011-10-05 17:31:54 +00:00
|
|
|
if (text[i] == '\\')
|
|
|
|
virBufferAddLit(&buf, "\\\\");
|
2019-11-18 14:14:47 +00:00
|
|
|
else if (g_ascii_isprint(text[i]) || text[i] == '\n' ||
|
2011-10-05 17:31:54 +00:00
|
|
|
(text[i] == '\r' && text[i+1] == '\n'))
|
|
|
|
virBufferAddChar(&buf, text[i]);
|
|
|
|
else
|
|
|
|
virBufferAsprintf(&buf, "\\x%02x", text[i]);
|
|
|
|
}
|
|
|
|
return virBufferContentAndReset(&buf);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
2012-07-11 13:35:47 +00:00
|
|
|
static void qemuAgentDispose(void *obj)
|
2011-10-05 17:31:54 +00:00
|
|
|
{
|
2020-02-12 17:31:20 +00:00
|
|
|
qemuAgentPtr agent = obj;
|
|
|
|
VIR_DEBUG("agent=%p", agent);
|
|
|
|
if (agent->cb && agent->cb->destroy)
|
|
|
|
(agent->cb->destroy)(agent, agent->vm);
|
|
|
|
virCondDestroy(&agent->notify);
|
|
|
|
VIR_FREE(agent->buffer);
|
2020-02-12 14:54:19 +00:00
|
|
|
g_main_context_unref(agent->context);
|
2020-02-12 17:31:20 +00:00
|
|
|
virResetError(&agent->lastError);
|
2011-10-05 17:31:54 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2020-02-12 17:31:20 +00:00
|
|
|
qemuAgentOpenUnix(const char *socketpath)
|
2011-10-05 17:31:54 +00:00
|
|
|
{
|
|
|
|
struct sockaddr_un addr;
|
2020-02-12 17:31:20 +00:00
|
|
|
int agentfd;
|
2016-04-08 11:11:10 +00:00
|
|
|
int ret = -1;
|
2011-10-05 17:31:54 +00:00
|
|
|
|
2020-02-12 17:31:20 +00:00
|
|
|
if ((agentfd = socket(AF_UNIX, SOCK_STREAM, 0)) < 0) {
|
2011-10-05 17:31:54 +00:00
|
|
|
virReportSystemError(errno,
|
|
|
|
"%s", _("failed to create socket"));
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2020-02-12 17:31:20 +00:00
|
|
|
if (virSetCloseExec(agentfd) < 0) {
|
2012-08-27 11:49:21 +00:00
|
|
|
virReportSystemError(errno, "%s",
|
2020-02-12 17:31:20 +00:00
|
|
|
_("Unable to set agent "
|
2012-08-27 11:49:21 +00:00
|
|
|
"close-on-exec flag"));
|
2011-10-05 17:31:54 +00:00
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
|
|
|
|
memset(&addr, 0, sizeof(addr));
|
|
|
|
addr.sun_family = AF_UNIX;
|
2020-02-12 17:31:20 +00:00
|
|
|
if (virStrcpyStatic(addr.sun_path, socketpath) < 0) {
|
2012-07-18 15:22:03 +00:00
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR,
|
2020-02-12 17:31:20 +00:00
|
|
|
_("Socket path %s too big for destination"), socketpath);
|
2011-10-05 17:31:54 +00:00
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
|
2020-02-12 17:31:20 +00:00
|
|
|
ret = connect(agentfd, (struct sockaddr *)&addr, sizeof(addr));
|
2018-04-16 14:44:16 +00:00
|
|
|
if (ret < 0) {
|
2011-10-05 17:31:54 +00:00
|
|
|
virReportSystemError(errno, "%s",
|
2020-02-12 17:31:20 +00:00
|
|
|
_("failed to connect to agent socket"));
|
2011-10-05 17:31:54 +00:00
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
|
2020-02-12 17:31:20 +00:00
|
|
|
return agentfd;
|
2011-10-05 17:31:54 +00:00
|
|
|
|
2014-03-25 06:49:44 +00:00
|
|
|
error:
|
2020-02-12 17:31:20 +00:00
|
|
|
VIR_FORCE_CLOSE(agentfd);
|
2011-10-05 17:31:54 +00:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static int
|
2020-02-12 17:31:20 +00:00
|
|
|
qemuAgentIOProcessEvent(qemuAgentPtr agent,
|
2011-10-05 17:31:54 +00:00
|
|
|
virJSONValuePtr obj)
|
|
|
|
{
|
|
|
|
const char *type;
|
2020-02-12 17:31:20 +00:00
|
|
|
VIR_DEBUG("agent=%p obj=%p", agent, obj);
|
2011-10-05 17:31:54 +00:00
|
|
|
|
|
|
|
type = virJSONValueObjectGetString(obj, "event");
|
|
|
|
if (!type) {
|
|
|
|
VIR_WARN("missing event type in message");
|
|
|
|
errno = EINVAL;
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2019-10-15 11:55:26 +00:00
|
|
|
for (i = 0; i < G_N_ELEMENTS(eventHandlers); i++) {
|
2011-10-05 17:31:54 +00:00
|
|
|
if (STREQ(eventHandlers[i].type, type)) {
|
|
|
|
virJSONValuePtr data = virJSONValueObjectGet(obj, "data");
|
|
|
|
VIR_DEBUG("handle %s handler=%p data=%p", type,
|
|
|
|
eventHandlers[i].handler, data);
|
2020-02-12 17:31:20 +00:00
|
|
|
(eventHandlers[i].handler)(agent, data);
|
2011-10-05 17:31:54 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
*/
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2020-02-12 17:31:20 +00:00
|
|
|
qemuAgentIOProcessLine(qemuAgentPtr agent,
|
2011-10-05 17:31:54 +00:00
|
|
|
const char *line,
|
|
|
|
qemuAgentMessagePtr msg)
|
|
|
|
{
|
|
|
|
virJSONValuePtr obj = NULL;
|
|
|
|
int ret = -1;
|
|
|
|
|
|
|
|
VIR_DEBUG("Line [%s]", line);
|
|
|
|
|
2016-09-16 10:35:33 +00:00
|
|
|
if (!(obj = virJSONValueFromString(line))) {
|
2016-09-16 10:35:35 +00:00
|
|
|
/* receiving garbage on first sync is regular situation */
|
|
|
|
if (msg && msg->sync && msg->first) {
|
2016-09-16 10:35:33 +00:00
|
|
|
VIR_DEBUG("Received garbage on sync");
|
2020-05-05 06:05:18 +00:00
|
|
|
msg->finished = true;
|
2016-09-16 10:35:33 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2011-10-05 17:31:54 +00:00
|
|
|
goto cleanup;
|
2016-09-16 10:35:33 +00:00
|
|
|
}
|
2011-10-05 17:31:54 +00:00
|
|
|
|
2018-03-29 18:30:05 +00:00
|
|
|
if (virJSONValueGetType(obj) != VIR_JSON_TYPE_OBJECT) {
|
2012-07-18 15:22:03 +00:00
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
_("Parsed JSON reply '%s' isn't an object"), line);
|
2011-10-05 17:31:54 +00:00
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (virJSONValueObjectHasKey(obj, "QMP") == 1) {
|
|
|
|
ret = 0;
|
|
|
|
} else if (virJSONValueObjectHasKey(obj, "event") == 1) {
|
2020-02-12 17:31:20 +00:00
|
|
|
ret = qemuAgentIOProcessEvent(agent, obj);
|
2011-10-05 17:31:54 +00:00
|
|
|
} else if (virJSONValueObjectHasKey(obj, "error") == 1 ||
|
|
|
|
virJSONValueObjectHasKey(obj, "return") == 1) {
|
|
|
|
if (msg) {
|
2016-09-16 10:35:34 +00:00
|
|
|
if (msg->sync) {
|
|
|
|
unsigned long long id;
|
|
|
|
|
|
|
|
if (virJSONValueObjectGetNumberUlong(obj, "return", &id) < 0) {
|
|
|
|
VIR_DEBUG("Ignoring delayed reply on sync");
|
|
|
|
ret = 0;
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
VIR_DEBUG("Guest returned ID: %llu", id);
|
|
|
|
|
|
|
|
if (msg->id != id) {
|
|
|
|
VIR_DEBUG("Guest agent returned ID: %llu instead of %llu",
|
|
|
|
id, msg->id);
|
|
|
|
ret = 0;
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
}
|
2011-10-05 17:31:54 +00:00
|
|
|
msg->rxObject = obj;
|
2020-05-05 06:05:18 +00:00
|
|
|
msg->finished = true;
|
2011-10-05 17:31:54 +00:00
|
|
|
obj = NULL;
|
|
|
|
} else {
|
2016-09-16 10:35:32 +00:00
|
|
|
/* we are out of sync */
|
|
|
|
VIR_DEBUG("Ignoring delayed reply");
|
2011-10-05 17:31:54 +00:00
|
|
|
}
|
2016-09-16 10:35:32 +00:00
|
|
|
ret = 0;
|
2011-10-05 17:31:54 +00:00
|
|
|
} else {
|
2012-07-18 15:22:03 +00:00
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
_("Unknown JSON reply '%s'"), line);
|
2011-10-05 17:31:54 +00:00
|
|
|
}
|
|
|
|
|
2014-03-25 06:49:44 +00:00
|
|
|
cleanup:
|
2011-10-05 17:31:54 +00:00
|
|
|
virJSONValueFree(obj);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2020-02-12 17:31:20 +00:00
|
|
|
static int qemuAgentIOProcessData(qemuAgentPtr agent,
|
2011-10-05 17:31:54 +00:00
|
|
|
char *data,
|
|
|
|
size_t len,
|
|
|
|
qemuAgentMessagePtr msg)
|
|
|
|
{
|
|
|
|
int used = 0;
|
Convert 'int i' to 'size_t i' in src/qemu files
Convert the type of loop iterators named 'i', 'j', k',
'ii', 'jj', 'kk', to be 'size_t' instead of 'int' or
'unsigned int', also santizing 'ii', 'jj', 'kk' to use
the normal 'i', 'j', 'k' naming
Signed-off-by: Daniel P. Berrange <berrange@redhat.com>
2013-07-08 14:09:33 +00:00
|
|
|
size_t i = 0;
|
2011-10-05 17:31:54 +00:00
|
|
|
#if DEBUG_IO
|
|
|
|
# if DEBUG_RAW_IO
|
2020-03-27 05:25:55 +00:00
|
|
|
g_autofree char *str1 = qemuAgentEscapeNonPrintable(data);
|
2019-01-08 14:24:07 +00:00
|
|
|
VIR_ERROR(_("[%s]"), str1);
|
2011-10-05 17:31:54 +00:00
|
|
|
# else
|
|
|
|
VIR_DEBUG("Data %zu bytes [%s]", len, data);
|
|
|
|
# endif
|
|
|
|
#endif
|
|
|
|
|
|
|
|
while (used < len) {
|
|
|
|
char *nl = strstr(data + used, LINE_ENDING);
|
|
|
|
|
|
|
|
if (nl) {
|
|
|
|
int got = nl - (data + used);
|
|
|
|
for (i = 0; i < strlen(LINE_ENDING); i++)
|
|
|
|
data[used + got + i] = '\0';
|
2020-02-12 17:31:20 +00:00
|
|
|
if (qemuAgentIOProcessLine(agent, data + used, msg) < 0)
|
2011-10-05 17:31:54 +00:00
|
|
|
return -1;
|
|
|
|
used += got + strlen(LINE_ENDING);
|
|
|
|
} else {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
VIR_DEBUG("Total used %d bytes out of %zd available in buffer", used, len);
|
|
|
|
return used;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* This method processes data that has been received
|
2020-02-12 17:31:20 +00:00
|
|
|
* from the agent. Looking for async events and
|
2011-10-05 17:31:54 +00:00
|
|
|
* replies/errors.
|
|
|
|
*/
|
|
|
|
static int
|
2020-02-12 17:31:20 +00:00
|
|
|
qemuAgentIOProcess(qemuAgentPtr agent)
|
2011-10-05 17:31:54 +00:00
|
|
|
{
|
|
|
|
int len;
|
|
|
|
qemuAgentMessagePtr msg = NULL;
|
|
|
|
|
|
|
|
/* See if there's a message ready for reply; that is,
|
|
|
|
* one that has completed writing all its data.
|
|
|
|
*/
|
2020-02-12 17:31:20 +00:00
|
|
|
if (agent->msg && agent->msg->txOffset == agent->msg->txLength)
|
|
|
|
msg = agent->msg;
|
2011-10-05 17:31:54 +00:00
|
|
|
|
|
|
|
#if DEBUG_IO
|
|
|
|
# if DEBUG_RAW_IO
|
2020-03-27 05:25:55 +00:00
|
|
|
g_autofree char *str1 = qemuAgentEscapeNonPrintable(msg ? msg->txBuffer : "");
|
|
|
|
g_autofree char *str2 = qemuAgentEscapeNonPrintable(agent->buffer);
|
2011-10-05 17:31:54 +00:00
|
|
|
VIR_ERROR(_("Process %zu %p %p [[[%s]]][[[%s]]]"),
|
2020-02-12 17:31:20 +00:00
|
|
|
agent->bufferOffset, agent->msg, msg, str1, str2);
|
2011-10-05 17:31:54 +00:00
|
|
|
# else
|
2020-02-12 17:31:20 +00:00
|
|
|
VIR_DEBUG("Process %zu", agent->bufferOffset);
|
2011-10-05 17:31:54 +00:00
|
|
|
# endif
|
|
|
|
#endif
|
|
|
|
|
2020-02-12 17:31:20 +00:00
|
|
|
len = qemuAgentIOProcessData(agent,
|
|
|
|
agent->buffer, agent->bufferOffset,
|
2011-10-05 17:31:54 +00:00
|
|
|
msg);
|
|
|
|
|
|
|
|
if (len < 0)
|
|
|
|
return -1;
|
|
|
|
|
2020-02-12 17:31:20 +00:00
|
|
|
if (len < agent->bufferOffset) {
|
|
|
|
memmove(agent->buffer, agent->buffer + len, agent->bufferOffset - len);
|
|
|
|
agent->bufferOffset -= len;
|
2011-10-05 17:31:54 +00:00
|
|
|
} else {
|
2020-02-12 17:31:20 +00:00
|
|
|
VIR_FREE(agent->buffer);
|
|
|
|
agent->bufferOffset = agent->bufferLength = 0;
|
2011-10-05 17:31:54 +00:00
|
|
|
}
|
|
|
|
#if DEBUG_IO
|
2020-02-12 17:31:20 +00:00
|
|
|
VIR_DEBUG("Process done %zu used %d", agent->bufferOffset, len);
|
2011-10-05 17:31:54 +00:00
|
|
|
#endif
|
|
|
|
if (msg && msg->finished)
|
2020-02-12 17:31:20 +00:00
|
|
|
virCondBroadcast(&agent->notify);
|
2011-10-05 17:31:54 +00:00
|
|
|
return len;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
2020-02-12 17:31:20 +00:00
|
|
|
* Called when the agent is able to write data
|
|
|
|
* Call this function while holding the agent lock.
|
2011-10-05 17:31:54 +00:00
|
|
|
*/
|
|
|
|
static int
|
2020-02-12 17:31:20 +00:00
|
|
|
qemuAgentIOWrite(qemuAgentPtr agent)
|
2011-10-05 17:31:54 +00:00
|
|
|
{
|
|
|
|
int done;
|
|
|
|
|
|
|
|
/* If no active message, or fully transmitted, then no-op */
|
2020-02-12 17:31:20 +00:00
|
|
|
if (!agent->msg || agent->msg->txOffset == agent->msg->txLength)
|
2011-10-05 17:31:54 +00:00
|
|
|
return 0;
|
|
|
|
|
2020-02-12 17:31:20 +00:00
|
|
|
done = safewrite(agent->fd,
|
|
|
|
agent->msg->txBuffer + agent->msg->txOffset,
|
|
|
|
agent->msg->txLength - agent->msg->txOffset);
|
2011-10-05 17:31:54 +00:00
|
|
|
|
|
|
|
if (done < 0) {
|
|
|
|
if (errno == EAGAIN)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
virReportSystemError(errno, "%s",
|
2020-02-12 17:31:20 +00:00
|
|
|
_("Unable to write to agent"));
|
2011-10-05 17:31:54 +00:00
|
|
|
return -1;
|
|
|
|
}
|
2020-02-12 17:31:20 +00:00
|
|
|
agent->msg->txOffset += done;
|
2011-10-05 17:31:54 +00:00
|
|
|
return done;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2020-02-12 17:31:20 +00:00
|
|
|
* Called when the agent has incoming data to read
|
|
|
|
* Call this function while holding the agent lock.
|
2011-10-05 17:31:54 +00:00
|
|
|
*
|
|
|
|
* Returns -1 on error, or number of bytes read
|
|
|
|
*/
|
|
|
|
static int
|
2020-02-12 17:31:20 +00:00
|
|
|
qemuAgentIORead(qemuAgentPtr agent)
|
2011-10-05 17:31:54 +00:00
|
|
|
{
|
2020-02-12 17:31:20 +00:00
|
|
|
size_t avail = agent->bufferLength - agent->bufferOffset;
|
2011-10-05 17:31:54 +00:00
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
if (avail < 1024) {
|
2020-02-12 17:31:20 +00:00
|
|
|
if (agent->bufferLength >= QEMU_AGENT_MAX_RESPONSE) {
|
2018-03-01 14:55:26 +00:00
|
|
|
virReportSystemError(ERANGE,
|
|
|
|
_("No complete agent response found in %d bytes"),
|
|
|
|
QEMU_AGENT_MAX_RESPONSE);
|
|
|
|
return -1;
|
|
|
|
}
|
2020-02-12 17:31:20 +00:00
|
|
|
if (VIR_REALLOC_N(agent->buffer,
|
|
|
|
agent->bufferLength + 1024) < 0)
|
2011-10-05 17:31:54 +00:00
|
|
|
return -1;
|
2020-02-12 17:31:20 +00:00
|
|
|
agent->bufferLength += 1024;
|
2011-10-05 17:31:54 +00:00
|
|
|
avail += 1024;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Read as much as we can get into our buffer,
|
|
|
|
until we block on EAGAIN, or hit EOF */
|
|
|
|
while (avail > 1) {
|
|
|
|
int got;
|
2020-02-12 17:31:20 +00:00
|
|
|
got = read(agent->fd,
|
|
|
|
agent->buffer + agent->bufferOffset,
|
2011-10-05 17:31:54 +00:00
|
|
|
avail - 1);
|
|
|
|
if (got < 0) {
|
|
|
|
if (errno == EAGAIN)
|
|
|
|
break;
|
|
|
|
virReportSystemError(errno, "%s",
|
2020-02-12 17:31:20 +00:00
|
|
|
_("Unable to read from agent"));
|
2011-10-05 17:31:54 +00:00
|
|
|
ret = -1;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (got == 0)
|
|
|
|
break;
|
|
|
|
|
|
|
|
ret += got;
|
|
|
|
avail -= got;
|
2020-02-12 17:31:20 +00:00
|
|
|
agent->bufferOffset += got;
|
|
|
|
agent->buffer[agent->bufferOffset] = '\0';
|
2011-10-05 17:31:54 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#if DEBUG_IO
|
2020-02-12 17:31:20 +00:00
|
|
|
VIR_DEBUG("Now read %zu bytes of data", agent->bufferOffset);
|
2011-10-05 17:31:54 +00:00
|
|
|
#endif
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2020-02-12 14:54:19 +00:00
|
|
|
static gboolean
|
|
|
|
qemuAgentIO(GSocket *socket,
|
|
|
|
GIOCondition cond,
|
|
|
|
gpointer opaque);
|
2011-10-05 17:31:54 +00:00
|
|
|
|
2020-02-12 14:54:19 +00:00
|
|
|
|
|
|
|
static void
|
|
|
|
qemuAgentRegister(qemuAgentPtr agent)
|
|
|
|
{
|
|
|
|
GIOCondition cond = 0;
|
2018-10-12 02:19:28 +00:00
|
|
|
|
2020-02-12 17:31:20 +00:00
|
|
|
if (agent->lastError.code == VIR_ERR_OK) {
|
2020-02-12 14:54:19 +00:00
|
|
|
cond |= G_IO_IN;
|
2011-10-05 17:31:54 +00:00
|
|
|
|
2020-02-12 17:31:20 +00:00
|
|
|
if (agent->msg && agent->msg->txOffset < agent->msg->txLength)
|
2020-02-12 14:54:19 +00:00
|
|
|
cond |= G_IO_OUT;
|
2011-10-05 17:31:54 +00:00
|
|
|
}
|
|
|
|
|
2020-02-12 14:54:19 +00:00
|
|
|
agent->watch = g_socket_create_source(agent->socket,
|
|
|
|
cond,
|
|
|
|
NULL);
|
|
|
|
|
|
|
|
virObjectRef(agent);
|
|
|
|
g_source_set_callback(agent->watch,
|
|
|
|
(GSourceFunc)qemuAgentIO,
|
|
|
|
agent,
|
|
|
|
(GDestroyNotify)virObjectUnref);
|
|
|
|
|
|
|
|
g_source_attach(agent->watch,
|
|
|
|
agent->context);
|
2011-10-05 17:31:54 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static void
|
2020-02-12 14:54:19 +00:00
|
|
|
qemuAgentUnregister(qemuAgentPtr agent)
|
|
|
|
{
|
|
|
|
if (agent->watch) {
|
|
|
|
g_source_destroy(agent->watch);
|
|
|
|
g_source_unref(agent->watch);
|
|
|
|
agent->watch = NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static void qemuAgentUpdateWatch(qemuAgentPtr agent)
|
|
|
|
{
|
|
|
|
qemuAgentUnregister(agent);
|
|
|
|
if (agent->socket)
|
|
|
|
qemuAgentRegister(agent);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static gboolean
|
|
|
|
qemuAgentIO(GSocket *socket G_GNUC_UNUSED,
|
|
|
|
GIOCondition cond,
|
|
|
|
gpointer opaque)
|
2014-03-18 08:15:21 +00:00
|
|
|
{
|
2020-02-12 17:31:20 +00:00
|
|
|
qemuAgentPtr agent = opaque;
|
2011-10-05 17:31:54 +00:00
|
|
|
bool error = false;
|
|
|
|
bool eof = false;
|
|
|
|
|
2020-02-12 17:31:20 +00:00
|
|
|
virObjectRef(agent);
|
|
|
|
/* lock access to the agent and protect fd */
|
|
|
|
virObjectLock(agent);
|
2011-10-05 17:31:54 +00:00
|
|
|
#if DEBUG_IO
|
2020-02-12 14:54:19 +00:00
|
|
|
VIR_DEBUG("Agent %p I/O on watch %d socket %p cond %d", agent, agent->socket, cond);
|
2011-10-05 17:31:54 +00:00
|
|
|
#endif
|
|
|
|
|
2020-02-12 14:54:19 +00:00
|
|
|
if (agent->fd == -1 || !agent->watch) {
|
2020-02-12 17:31:20 +00:00
|
|
|
virObjectUnlock(agent);
|
|
|
|
virObjectUnref(agent);
|
2020-02-12 14:54:19 +00:00
|
|
|
return G_SOURCE_REMOVE;
|
2018-10-12 02:19:28 +00:00
|
|
|
}
|
|
|
|
|
2020-02-12 14:54:19 +00:00
|
|
|
if (agent->lastError.code != VIR_ERR_OK) {
|
|
|
|
if (cond & (G_IO_HUP | G_IO_ERR))
|
2011-10-05 17:31:54 +00:00
|
|
|
eof = true;
|
|
|
|
error = true;
|
|
|
|
} else {
|
2020-02-12 14:54:19 +00:00
|
|
|
if (cond & G_IO_OUT) {
|
2020-02-12 17:31:20 +00:00
|
|
|
if (qemuAgentIOWrite(agent) < 0)
|
2018-04-16 14:44:16 +00:00
|
|
|
error = true;
|
2011-10-05 17:31:54 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (!error &&
|
2020-02-12 14:54:19 +00:00
|
|
|
cond & G_IO_IN) {
|
2020-02-12 17:31:20 +00:00
|
|
|
int got = qemuAgentIORead(agent);
|
2011-10-05 17:31:54 +00:00
|
|
|
if (got < 0) {
|
|
|
|
error = true;
|
|
|
|
} else if (got == 0) {
|
|
|
|
eof = true;
|
|
|
|
} else {
|
2020-02-12 14:54:19 +00:00
|
|
|
/* Ignore hangup/error cond if we read some data, to
|
2011-10-05 17:31:54 +00:00
|
|
|
* give time for that data to be consumed */
|
2020-02-12 14:54:19 +00:00
|
|
|
cond = 0;
|
2011-10-05 17:31:54 +00:00
|
|
|
|
2020-02-12 17:31:20 +00:00
|
|
|
if (qemuAgentIOProcess(agent) < 0)
|
2011-10-05 17:31:54 +00:00
|
|
|
error = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!error &&
|
2020-02-12 14:54:19 +00:00
|
|
|
cond & G_IO_HUP) {
|
2012-07-18 15:22:03 +00:00
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
2020-02-12 17:31:20 +00:00
|
|
|
_("End of file from agent socket"));
|
2013-05-24 10:14:02 +00:00
|
|
|
eof = true;
|
2011-10-05 17:31:54 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (!error && !eof &&
|
2020-02-12 14:54:19 +00:00
|
|
|
cond & G_IO_ERR) {
|
2012-07-18 15:22:03 +00:00
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
2020-02-12 17:31:20 +00:00
|
|
|
_("Invalid file descriptor while waiting for agent"));
|
2013-05-24 10:14:02 +00:00
|
|
|
eof = true;
|
2011-10-05 17:31:54 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (error || eof) {
|
2020-02-12 17:31:20 +00:00
|
|
|
if (agent->lastError.code != VIR_ERR_OK) {
|
2011-10-05 17:31:54 +00:00
|
|
|
/* Already have an error, so clear any new error */
|
|
|
|
virResetLastError();
|
|
|
|
} else {
|
2018-05-05 12:04:21 +00:00
|
|
|
if (virGetLastErrorCode() == VIR_ERR_OK)
|
2012-07-18 15:22:03 +00:00
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
2020-02-12 17:31:20 +00:00
|
|
|
_("Error while processing agent IO"));
|
|
|
|
virCopyLastError(&agent->lastError);
|
2011-10-05 17:31:54 +00:00
|
|
|
virResetLastError();
|
|
|
|
}
|
|
|
|
|
2020-02-12 17:31:20 +00:00
|
|
|
VIR_DEBUG("Error on agent %s", NULLSTR(agent->lastError.message));
|
2011-10-05 17:31:54 +00:00
|
|
|
/* If IO process resulted in an error & we have a message,
|
|
|
|
* then wakeup that waiter */
|
2020-02-12 17:31:20 +00:00
|
|
|
if (agent->msg && !agent->msg->finished) {
|
2020-05-05 06:05:18 +00:00
|
|
|
agent->msg->finished = true;
|
2020-02-12 17:31:20 +00:00
|
|
|
virCondSignal(&agent->notify);
|
2011-10-05 17:31:54 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-02-12 17:31:20 +00:00
|
|
|
qemuAgentUpdateWatch(agent);
|
2011-10-05 17:31:54 +00:00
|
|
|
|
|
|
|
/* We have to unlock to avoid deadlock against command thread,
|
|
|
|
* but is this safe ? I think it is, because the callback
|
|
|
|
* will try to acquire the virDomainObjPtr mutex next */
|
|
|
|
if (eof) {
|
|
|
|
void (*eofNotify)(qemuAgentPtr, virDomainObjPtr)
|
2020-02-12 17:31:20 +00:00
|
|
|
= agent->cb->eofNotify;
|
|
|
|
virDomainObjPtr vm = agent->vm;
|
2011-10-05 17:31:54 +00:00
|
|
|
|
|
|
|
/* Make sure anyone waiting wakes up now */
|
2020-02-12 17:31:20 +00:00
|
|
|
virCondSignal(&agent->notify);
|
|
|
|
virObjectUnlock(agent);
|
|
|
|
virObjectUnref(agent);
|
2011-10-05 17:31:54 +00:00
|
|
|
VIR_DEBUG("Triggering EOF callback");
|
2020-02-12 17:31:20 +00:00
|
|
|
(eofNotify)(agent, vm);
|
2011-10-05 17:31:54 +00:00
|
|
|
} else if (error) {
|
|
|
|
void (*errorNotify)(qemuAgentPtr, virDomainObjPtr)
|
2020-02-12 17:31:20 +00:00
|
|
|
= agent->cb->errorNotify;
|
|
|
|
virDomainObjPtr vm = agent->vm;
|
2011-10-05 17:31:54 +00:00
|
|
|
|
|
|
|
/* Make sure anyone waiting wakes up now */
|
2020-02-12 17:31:20 +00:00
|
|
|
virCondSignal(&agent->notify);
|
|
|
|
virObjectUnlock(agent);
|
|
|
|
virObjectUnref(agent);
|
2011-10-05 17:31:54 +00:00
|
|
|
VIR_DEBUG("Triggering error callback");
|
2020-02-12 17:31:20 +00:00
|
|
|
(errorNotify)(agent, vm);
|
2011-10-05 17:31:54 +00:00
|
|
|
} else {
|
2020-02-12 17:31:20 +00:00
|
|
|
virObjectUnlock(agent);
|
|
|
|
virObjectUnref(agent);
|
2011-10-05 17:31:54 +00:00
|
|
|
}
|
2020-02-12 14:54:19 +00:00
|
|
|
|
|
|
|
return G_SOURCE_REMOVE;
|
2011-10-05 17:31:54 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
qemuAgentPtr
|
|
|
|
qemuAgentOpen(virDomainObjPtr vm,
|
2016-01-08 15:21:30 +00:00
|
|
|
const virDomainChrSourceDef *config,
|
2020-02-12 14:54:19 +00:00
|
|
|
GMainContext *context,
|
2020-03-05 14:47:01 +00:00
|
|
|
qemuAgentCallbacksPtr cb,
|
|
|
|
bool singleSync)
|
2011-10-05 17:31:54 +00:00
|
|
|
{
|
2020-02-12 17:31:20 +00:00
|
|
|
qemuAgentPtr agent;
|
2020-02-12 14:54:19 +00:00
|
|
|
g_autoptr(GError) gerr = NULL;
|
2011-10-05 17:31:54 +00:00
|
|
|
|
|
|
|
if (!cb || !cb->eofNotify) {
|
2012-07-18 15:22:03 +00:00
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
|
_("EOF notify callback must be supplied"));
|
2011-10-05 17:31:54 +00:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2012-07-11 13:35:47 +00:00
|
|
|
if (qemuAgentInitialize() < 0)
|
|
|
|
return NULL;
|
|
|
|
|
2020-02-12 17:31:20 +00:00
|
|
|
if (!(agent = virObjectLockableNew(qemuAgentClass)))
|
2011-10-05 17:31:54 +00:00
|
|
|
return NULL;
|
|
|
|
|
2020-02-12 17:31:20 +00:00
|
|
|
agent->timeout = QEMU_DOMAIN_PRIVATE(vm)->agentTimeout;
|
|
|
|
agent->fd = -1;
|
|
|
|
if (virCondInit(&agent->notify) < 0) {
|
2012-08-27 11:49:21 +00:00
|
|
|
virReportSystemError(errno, "%s",
|
2020-02-12 17:31:20 +00:00
|
|
|
_("cannot initialize agent condition"));
|
|
|
|
virObjectUnref(agent);
|
2011-10-05 17:31:54 +00:00
|
|
|
return NULL;
|
|
|
|
}
|
2020-02-12 17:31:20 +00:00
|
|
|
agent->vm = vm;
|
|
|
|
agent->cb = cb;
|
2020-03-05 14:47:01 +00:00
|
|
|
agent->singleSync = singleSync;
|
2011-10-05 17:31:54 +00:00
|
|
|
|
2020-02-12 11:45:10 +00:00
|
|
|
if (config->type != VIR_DOMAIN_CHR_TYPE_UNIX) {
|
2012-07-18 15:22:03 +00:00
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR,
|
2020-02-12 17:31:20 +00:00
|
|
|
_("unable to handle agent type: %s"),
|
2012-07-18 15:22:03 +00:00
|
|
|
virDomainChrTypeToString(config->type));
|
2011-10-05 17:31:54 +00:00
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2020-02-12 17:31:20 +00:00
|
|
|
agent->fd = qemuAgentOpenUnix(config->data.nix.path);
|
|
|
|
if (agent->fd == -1)
|
2011-10-05 17:31:54 +00:00
|
|
|
goto cleanup;
|
|
|
|
|
2020-02-12 14:54:19 +00:00
|
|
|
agent->context = g_main_context_ref(context);
|
|
|
|
|
|
|
|
agent->socket = g_socket_new_from_fd(agent->fd, &gerr);
|
|
|
|
if (!agent->socket) {
|
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
_("Unable to create socket object: %s"),
|
|
|
|
gerr->message);
|
2011-10-05 17:31:54 +00:00
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2020-02-12 14:54:19 +00:00
|
|
|
qemuAgentRegister(agent);
|
|
|
|
|
2020-02-12 17:31:20 +00:00
|
|
|
agent->running = true;
|
2020-02-12 14:54:19 +00:00
|
|
|
VIR_DEBUG("New agent %p fd=%d", agent, agent->fd);
|
2011-10-05 17:31:54 +00:00
|
|
|
|
2020-02-12 17:31:20 +00:00
|
|
|
return agent;
|
2011-10-05 17:31:54 +00:00
|
|
|
|
2014-03-25 06:49:44 +00:00
|
|
|
cleanup:
|
2011-10-05 17:31:54 +00:00
|
|
|
/* We don't want the 'destroy' callback invoked during
|
|
|
|
* cleanup from construction failure, because that can
|
|
|
|
* give a double-unref on virDomainObjPtr in the caller,
|
|
|
|
* so kill the callbacks now.
|
|
|
|
*/
|
2020-02-12 17:31:20 +00:00
|
|
|
agent->cb = NULL;
|
|
|
|
qemuAgentClose(agent);
|
2011-10-05 17:31:54 +00:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
processSerialChangedEvent: Close agent monitor early
https://bugzilla.redhat.com/show_bug.cgi?id=890648
So, imagine you've issued an API that involves guest agent. For
instance, you want to query guest's IP addresses. So the API acquires
QUERY_JOB, locks the guest agent and issues the agent command.
However, for some reason, guest agent replies to initial ping
correctly, but then crashes tragically while executing real command
(in this case guest-network-get-interfaces). Since initial ping went
well, libvirt thinks guest agent is accessible and awaits reply to the
real command. But it will never come. What will is a monitor event.
Our handler (processSerialChangedEvent) will try to acquire
MODIFY_JOB, which will fail obviously because the other thread that's
executing the API already holds a job. So the event handler exits
early, and the QUERY_JOB is never released nor ended.
The way how to solve this is to put flag somewhere in the monitor
internals. The flag is called @running and agent commands are issued
iff the flag is set. The flag itself is set when we connect to the
agent socket. And unset whenever we see DISCONNECT event from the
agent. Moreover, we must wake up all the threads waiting for the
agent. This is done by signalizing the condition they're waiting on.
Signed-off-by: Michal Privoznik <mprivozn@redhat.com>
2015-05-07 09:19:38 +00:00
|
|
|
static void
|
2020-02-12 17:31:20 +00:00
|
|
|
qemuAgentNotifyCloseLocked(qemuAgentPtr agent)
|
processSerialChangedEvent: Close agent monitor early
https://bugzilla.redhat.com/show_bug.cgi?id=890648
So, imagine you've issued an API that involves guest agent. For
instance, you want to query guest's IP addresses. So the API acquires
QUERY_JOB, locks the guest agent and issues the agent command.
However, for some reason, guest agent replies to initial ping
correctly, but then crashes tragically while executing real command
(in this case guest-network-get-interfaces). Since initial ping went
well, libvirt thinks guest agent is accessible and awaits reply to the
real command. But it will never come. What will is a monitor event.
Our handler (processSerialChangedEvent) will try to acquire
MODIFY_JOB, which will fail obviously because the other thread that's
executing the API already holds a job. So the event handler exits
early, and the QUERY_JOB is never released nor ended.
The way how to solve this is to put flag somewhere in the monitor
internals. The flag is called @running and agent commands are issued
iff the flag is set. The flag itself is set when we connect to the
agent socket. And unset whenever we see DISCONNECT event from the
agent. Moreover, we must wake up all the threads waiting for the
agent. This is done by signalizing the condition they're waiting on.
Signed-off-by: Michal Privoznik <mprivozn@redhat.com>
2015-05-07 09:19:38 +00:00
|
|
|
{
|
2020-02-12 17:31:20 +00:00
|
|
|
if (agent) {
|
|
|
|
agent->running = false;
|
processSerialChangedEvent: Close agent monitor early
https://bugzilla.redhat.com/show_bug.cgi?id=890648
So, imagine you've issued an API that involves guest agent. For
instance, you want to query guest's IP addresses. So the API acquires
QUERY_JOB, locks the guest agent and issues the agent command.
However, for some reason, guest agent replies to initial ping
correctly, but then crashes tragically while executing real command
(in this case guest-network-get-interfaces). Since initial ping went
well, libvirt thinks guest agent is accessible and awaits reply to the
real command. But it will never come. What will is a monitor event.
Our handler (processSerialChangedEvent) will try to acquire
MODIFY_JOB, which will fail obviously because the other thread that's
executing the API already holds a job. So the event handler exits
early, and the QUERY_JOB is never released nor ended.
The way how to solve this is to put flag somewhere in the monitor
internals. The flag is called @running and agent commands are issued
iff the flag is set. The flag itself is set when we connect to the
agent socket. And unset whenever we see DISCONNECT event from the
agent. Moreover, we must wake up all the threads waiting for the
agent. This is done by signalizing the condition they're waiting on.
Signed-off-by: Michal Privoznik <mprivozn@redhat.com>
2015-05-07 09:19:38 +00:00
|
|
|
|
|
|
|
/* If there is somebody waiting for a message
|
|
|
|
* wake him up. No message will arrive anyway. */
|
2020-02-12 17:31:20 +00:00
|
|
|
if (agent->msg && !agent->msg->finished) {
|
2020-05-05 06:05:18 +00:00
|
|
|
agent->msg->finished = true;
|
2020-02-12 17:31:20 +00:00
|
|
|
virCondSignal(&agent->notify);
|
processSerialChangedEvent: Close agent monitor early
https://bugzilla.redhat.com/show_bug.cgi?id=890648
So, imagine you've issued an API that involves guest agent. For
instance, you want to query guest's IP addresses. So the API acquires
QUERY_JOB, locks the guest agent and issues the agent command.
However, for some reason, guest agent replies to initial ping
correctly, but then crashes tragically while executing real command
(in this case guest-network-get-interfaces). Since initial ping went
well, libvirt thinks guest agent is accessible and awaits reply to the
real command. But it will never come. What will is a monitor event.
Our handler (processSerialChangedEvent) will try to acquire
MODIFY_JOB, which will fail obviously because the other thread that's
executing the API already holds a job. So the event handler exits
early, and the QUERY_JOB is never released nor ended.
The way how to solve this is to put flag somewhere in the monitor
internals. The flag is called @running and agent commands are issued
iff the flag is set. The flag itself is set when we connect to the
agent socket. And unset whenever we see DISCONNECT event from the
agent. Moreover, we must wake up all the threads waiting for the
agent. This is done by signalizing the condition they're waiting on.
Signed-off-by: Michal Privoznik <mprivozn@redhat.com>
2015-05-07 09:19:38 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void
|
2020-02-12 17:31:20 +00:00
|
|
|
qemuAgentNotifyClose(qemuAgentPtr agent)
|
processSerialChangedEvent: Close agent monitor early
https://bugzilla.redhat.com/show_bug.cgi?id=890648
So, imagine you've issued an API that involves guest agent. For
instance, you want to query guest's IP addresses. So the API acquires
QUERY_JOB, locks the guest agent and issues the agent command.
However, for some reason, guest agent replies to initial ping
correctly, but then crashes tragically while executing real command
(in this case guest-network-get-interfaces). Since initial ping went
well, libvirt thinks guest agent is accessible and awaits reply to the
real command. But it will never come. What will is a monitor event.
Our handler (processSerialChangedEvent) will try to acquire
MODIFY_JOB, which will fail obviously because the other thread that's
executing the API already holds a job. So the event handler exits
early, and the QUERY_JOB is never released nor ended.
The way how to solve this is to put flag somewhere in the monitor
internals. The flag is called @running and agent commands are issued
iff the flag is set. The flag itself is set when we connect to the
agent socket. And unset whenever we see DISCONNECT event from the
agent. Moreover, we must wake up all the threads waiting for the
agent. This is done by signalizing the condition they're waiting on.
Signed-off-by: Michal Privoznik <mprivozn@redhat.com>
2015-05-07 09:19:38 +00:00
|
|
|
{
|
2020-02-12 17:31:20 +00:00
|
|
|
if (!agent)
|
processSerialChangedEvent: Close agent monitor early
https://bugzilla.redhat.com/show_bug.cgi?id=890648
So, imagine you've issued an API that involves guest agent. For
instance, you want to query guest's IP addresses. So the API acquires
QUERY_JOB, locks the guest agent and issues the agent command.
However, for some reason, guest agent replies to initial ping
correctly, but then crashes tragically while executing real command
(in this case guest-network-get-interfaces). Since initial ping went
well, libvirt thinks guest agent is accessible and awaits reply to the
real command. But it will never come. What will is a monitor event.
Our handler (processSerialChangedEvent) will try to acquire
MODIFY_JOB, which will fail obviously because the other thread that's
executing the API already holds a job. So the event handler exits
early, and the QUERY_JOB is never released nor ended.
The way how to solve this is to put flag somewhere in the monitor
internals. The flag is called @running and agent commands are issued
iff the flag is set. The flag itself is set when we connect to the
agent socket. And unset whenever we see DISCONNECT event from the
agent. Moreover, we must wake up all the threads waiting for the
agent. This is done by signalizing the condition they're waiting on.
Signed-off-by: Michal Privoznik <mprivozn@redhat.com>
2015-05-07 09:19:38 +00:00
|
|
|
return;
|
|
|
|
|
2020-02-12 17:31:20 +00:00
|
|
|
VIR_DEBUG("agent=%p", agent);
|
processSerialChangedEvent: Close agent monitor early
https://bugzilla.redhat.com/show_bug.cgi?id=890648
So, imagine you've issued an API that involves guest agent. For
instance, you want to query guest's IP addresses. So the API acquires
QUERY_JOB, locks the guest agent and issues the agent command.
However, for some reason, guest agent replies to initial ping
correctly, but then crashes tragically while executing real command
(in this case guest-network-get-interfaces). Since initial ping went
well, libvirt thinks guest agent is accessible and awaits reply to the
real command. But it will never come. What will is a monitor event.
Our handler (processSerialChangedEvent) will try to acquire
MODIFY_JOB, which will fail obviously because the other thread that's
executing the API already holds a job. So the event handler exits
early, and the QUERY_JOB is never released nor ended.
The way how to solve this is to put flag somewhere in the monitor
internals. The flag is called @running and agent commands are issued
iff the flag is set. The flag itself is set when we connect to the
agent socket. And unset whenever we see DISCONNECT event from the
agent. Moreover, we must wake up all the threads waiting for the
agent. This is done by signalizing the condition they're waiting on.
Signed-off-by: Michal Privoznik <mprivozn@redhat.com>
2015-05-07 09:19:38 +00:00
|
|
|
|
2020-02-12 17:31:20 +00:00
|
|
|
virObjectLock(agent);
|
|
|
|
qemuAgentNotifyCloseLocked(agent);
|
|
|
|
virObjectUnlock(agent);
|
processSerialChangedEvent: Close agent monitor early
https://bugzilla.redhat.com/show_bug.cgi?id=890648
So, imagine you've issued an API that involves guest agent. For
instance, you want to query guest's IP addresses. So the API acquires
QUERY_JOB, locks the guest agent and issues the agent command.
However, for some reason, guest agent replies to initial ping
correctly, but then crashes tragically while executing real command
(in this case guest-network-get-interfaces). Since initial ping went
well, libvirt thinks guest agent is accessible and awaits reply to the
real command. But it will never come. What will is a monitor event.
Our handler (processSerialChangedEvent) will try to acquire
MODIFY_JOB, which will fail obviously because the other thread that's
executing the API already holds a job. So the event handler exits
early, and the QUERY_JOB is never released nor ended.
The way how to solve this is to put flag somewhere in the monitor
internals. The flag is called @running and agent commands are issued
iff the flag is set. The flag itself is set when we connect to the
agent socket. And unset whenever we see DISCONNECT event from the
agent. Moreover, we must wake up all the threads waiting for the
agent. This is done by signalizing the condition they're waiting on.
Signed-off-by: Michal Privoznik <mprivozn@redhat.com>
2015-05-07 09:19:38 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2020-02-12 17:31:20 +00:00
|
|
|
void qemuAgentClose(qemuAgentPtr agent)
|
2011-10-05 17:31:54 +00:00
|
|
|
{
|
2020-02-12 17:31:20 +00:00
|
|
|
if (!agent)
|
2011-10-05 17:31:54 +00:00
|
|
|
return;
|
|
|
|
|
2020-02-12 17:31:20 +00:00
|
|
|
VIR_DEBUG("agent=%p", agent);
|
2011-10-05 17:31:54 +00:00
|
|
|
|
2020-02-12 17:31:20 +00:00
|
|
|
virObjectLock(agent);
|
2011-10-05 17:31:54 +00:00
|
|
|
|
2020-02-12 14:54:19 +00:00
|
|
|
if (agent->socket) {
|
|
|
|
qemuAgentUnregister(agent);
|
|
|
|
g_object_unref(agent->socket);
|
|
|
|
agent->socket = NULL;
|
|
|
|
agent->fd = -1;
|
2011-10-05 17:31:54 +00:00
|
|
|
}
|
|
|
|
|
2020-02-12 17:31:20 +00:00
|
|
|
qemuAgentNotifyCloseLocked(agent);
|
|
|
|
virObjectUnlock(agent);
|
2012-06-15 16:00:13 +00:00
|
|
|
|
2020-02-12 17:31:20 +00:00
|
|
|
virObjectUnref(agent);
|
2011-10-05 17:31:54 +00:00
|
|
|
}
|
|
|
|
|
2012-08-30 12:19:02 +00:00
|
|
|
#define QEMU_AGENT_WAIT_TIME 5
|
2011-10-05 17:31:54 +00:00
|
|
|
|
2012-02-01 14:44:53 +00:00
|
|
|
/**
|
|
|
|
* qemuAgentSend:
|
2020-02-12 17:31:20 +00:00
|
|
|
* @agent: agent object
|
2012-02-01 14:44:53 +00:00
|
|
|
* @msg: Message
|
2012-08-30 12:19:02 +00:00
|
|
|
* @seconds: number of seconds to wait for the result, it can be either
|
|
|
|
* -2, -1, 0 or positive.
|
2012-02-01 14:44:53 +00:00
|
|
|
*
|
2020-02-12 17:31:20 +00:00
|
|
|
* Send @msg to agent @agent. If @seconds is equal to
|
2012-08-30 12:19:02 +00:00
|
|
|
* VIR_DOMAIN_QEMU_AGENT_COMMAND_BLOCK(-2), this function will block forever
|
|
|
|
* waiting for the result. The value of
|
|
|
|
* VIR_DOMAIN_QEMU_AGENT_COMMAND_DEFAULT(-1) means use default timeout value
|
2017-04-22 19:06:20 +00:00
|
|
|
* and VIR_DOMAIN_QEMU_AGENT_COMMAND_NOWAIT(0) makes this function return
|
2012-08-30 12:19:02 +00:00
|
|
|
* immediately without waiting. Any positive value means the number of seconds
|
|
|
|
* to wait for the result.
|
2012-02-01 14:44:53 +00:00
|
|
|
*
|
|
|
|
* Returns: 0 on success,
|
|
|
|
* -2 on timeout,
|
|
|
|
* -1 otherwise
|
|
|
|
*/
|
2020-02-12 17:31:20 +00:00
|
|
|
static int qemuAgentSend(qemuAgentPtr agent,
|
2012-02-01 14:44:53 +00:00
|
|
|
qemuAgentMessagePtr msg,
|
2012-08-23 03:29:21 +00:00
|
|
|
int seconds)
|
2011-10-05 17:31:54 +00:00
|
|
|
{
|
|
|
|
int ret = -1;
|
2012-08-30 12:19:02 +00:00
|
|
|
unsigned long long then = 0;
|
2011-10-05 17:31:54 +00:00
|
|
|
|
|
|
|
/* Check whether qemu quit unexpectedly */
|
2020-02-12 17:31:20 +00:00
|
|
|
if (agent->lastError.code != VIR_ERR_OK) {
|
2011-10-05 17:31:54 +00:00
|
|
|
VIR_DEBUG("Attempt to send command while error is set %s",
|
2020-02-12 17:31:20 +00:00
|
|
|
NULLSTR(agent->lastError.message));
|
|
|
|
virSetError(&agent->lastError);
|
2011-10-05 17:31:54 +00:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2012-08-30 12:19:02 +00:00
|
|
|
if (seconds > VIR_DOMAIN_QEMU_AGENT_COMMAND_BLOCK) {
|
|
|
|
unsigned long long now;
|
2012-02-01 14:44:53 +00:00
|
|
|
if (virTimeMillisNow(&now) < 0)
|
|
|
|
return -1;
|
2012-08-30 12:19:02 +00:00
|
|
|
if (seconds == VIR_DOMAIN_QEMU_AGENT_COMMAND_DEFAULT)
|
|
|
|
seconds = QEMU_AGENT_WAIT_TIME;
|
|
|
|
then = now + seconds * 1000ull;
|
2012-02-01 14:44:53 +00:00
|
|
|
}
|
|
|
|
|
2020-02-12 17:31:20 +00:00
|
|
|
agent->msg = msg;
|
|
|
|
qemuAgentUpdateWatch(agent);
|
2011-10-05 17:31:54 +00:00
|
|
|
|
2020-02-12 17:31:20 +00:00
|
|
|
while (!agent->msg->finished) {
|
|
|
|
if ((then && virCondWaitUntil(&agent->notify, &agent->parent.lock, then) < 0) ||
|
|
|
|
(!then && virCondWait(&agent->notify, &agent->parent.lock) < 0)) {
|
2012-02-01 14:44:53 +00:00
|
|
|
if (errno == ETIMEDOUT) {
|
2012-08-27 10:24:59 +00:00
|
|
|
virReportError(VIR_ERR_AGENT_UNRESPONSIVE, "%s",
|
2012-07-18 15:22:03 +00:00
|
|
|
_("Guest agent not available for now"));
|
2012-02-01 14:44:53 +00:00
|
|
|
ret = -2;
|
|
|
|
} else {
|
2012-08-27 11:49:21 +00:00
|
|
|
virReportSystemError(errno, "%s",
|
2020-02-12 17:31:20 +00:00
|
|
|
_("Unable to wait on agent socket "
|
2012-11-28 12:30:40 +00:00
|
|
|
"condition"));
|
2012-02-01 14:44:53 +00:00
|
|
|
}
|
2020-03-05 14:47:01 +00:00
|
|
|
agent->inSync = false;
|
2011-10-05 17:31:54 +00:00
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-02-12 17:31:20 +00:00
|
|
|
if (agent->lastError.code != VIR_ERR_OK) {
|
2011-10-05 17:31:54 +00:00
|
|
|
VIR_DEBUG("Send command resulted in error %s",
|
2020-02-12 17:31:20 +00:00
|
|
|
NULLSTR(agent->lastError.message));
|
|
|
|
virSetError(&agent->lastError);
|
2011-10-05 17:31:54 +00:00
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = 0;
|
|
|
|
|
2014-03-25 06:49:44 +00:00
|
|
|
cleanup:
|
2020-02-12 17:31:20 +00:00
|
|
|
agent->msg = NULL;
|
|
|
|
qemuAgentUpdateWatch(agent);
|
2011-10-05 17:31:54 +00:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2012-02-01 14:44:53 +00:00
|
|
|
/**
|
|
|
|
* qemuAgentGuestSync:
|
2020-02-12 17:31:20 +00:00
|
|
|
* @agent: agent object
|
2012-02-01 14:44:53 +00:00
|
|
|
*
|
|
|
|
* Send guest-sync with unique ID
|
|
|
|
* and wait for reply. If we get one, check if
|
|
|
|
* received ID is equal to given.
|
|
|
|
*
|
|
|
|
* Returns: 0 on success,
|
|
|
|
* -1 otherwise
|
|
|
|
*/
|
|
|
|
static int
|
2020-02-12 17:31:20 +00:00
|
|
|
qemuAgentGuestSync(qemuAgentPtr agent)
|
2012-02-01 14:44:53 +00:00
|
|
|
{
|
|
|
|
int ret = -1;
|
|
|
|
int send_ret;
|
2016-09-16 10:35:34 +00:00
|
|
|
unsigned long long id;
|
2012-02-01 14:44:53 +00:00
|
|
|
qemuAgentMessage sync_msg;
|
Add API to change qemu agent response timeout
Some layered products such as oVirt have requested a way to avoid being
blocked by guest agent commands when querying a loaded vm. For example,
many guest agent commands are polled periodically to monitor changes,
and rather than blocking the calling process, they'd prefer to simply
time out when an agent query is taking too long.
This patch adds a way for the user to specify a custom agent timeout
that is applied to all agent commands.
One special case to note here is the 'guest-sync' command. 'guest-sync'
is issued internally prior to calling any other command. (For example,
when libvirt wants to call 'guest-get-fsinfo', we first call
'guest-sync' and then call 'guest-get-fsinfo').
Previously, the 'guest-sync' command used a 5-second timeout
(VIR_DOMAIN_QEMU_AGENT_COMMAND_DEFAULT), whereas the actual command that
followed always blocked indefinitely
(VIR_DOMAIN_QEMU_AGENT_COMMAND_BLOCK). As part of this patch, if a
custom timeout is specified that is shorter than
5 seconds, this new timeout is also used for 'guest-sync'. If there is
no custom timeout or if the custom timeout is longer than 5 seconds, we
will continue to use the 5-second timeout.
Signed-off-by: Jonathon Jongsma <jjongsma@redhat.com>
Signed-off-by: Michal Privoznik <mprivozn@redhat.com>
Reviewed-by: Michal Privoznik <mprivozn@redhat.com>
2019-11-13 22:06:09 +00:00
|
|
|
int timeout = VIR_DOMAIN_QEMU_AGENT_COMMAND_DEFAULT;
|
|
|
|
|
2020-03-05 14:47:01 +00:00
|
|
|
if (agent->singleSync && agent->inSync)
|
|
|
|
return 0;
|
|
|
|
|
Add API to change qemu agent response timeout
Some layered products such as oVirt have requested a way to avoid being
blocked by guest agent commands when querying a loaded vm. For example,
many guest agent commands are polled periodically to monitor changes,
and rather than blocking the calling process, they'd prefer to simply
time out when an agent query is taking too long.
This patch adds a way for the user to specify a custom agent timeout
that is applied to all agent commands.
One special case to note here is the 'guest-sync' command. 'guest-sync'
is issued internally prior to calling any other command. (For example,
when libvirt wants to call 'guest-get-fsinfo', we first call
'guest-sync' and then call 'guest-get-fsinfo').
Previously, the 'guest-sync' command used a 5-second timeout
(VIR_DOMAIN_QEMU_AGENT_COMMAND_DEFAULT), whereas the actual command that
followed always blocked indefinitely
(VIR_DOMAIN_QEMU_AGENT_COMMAND_BLOCK). As part of this patch, if a
custom timeout is specified that is shorter than
5 seconds, this new timeout is also used for 'guest-sync'. If there is
no custom timeout or if the custom timeout is longer than 5 seconds, we
will continue to use the 5-second timeout.
Signed-off-by: Jonathon Jongsma <jjongsma@redhat.com>
Signed-off-by: Michal Privoznik <mprivozn@redhat.com>
Reviewed-by: Michal Privoznik <mprivozn@redhat.com>
2019-11-13 22:06:09 +00:00
|
|
|
/* if user specified a custom agent timeout that is lower than the
|
|
|
|
* default timeout, use the shorter timeout instead */
|
2020-03-20 22:28:10 +00:00
|
|
|
if ((agent->timeout >= 0) && (agent->timeout < QEMU_AGENT_WAIT_TIME))
|
2020-02-12 17:31:20 +00:00
|
|
|
timeout = agent->timeout;
|
2012-02-01 14:44:53 +00:00
|
|
|
|
|
|
|
memset(&sync_msg, 0, sizeof(sync_msg));
|
2016-09-16 10:35:35 +00:00
|
|
|
/* set only on first sync */
|
|
|
|
sync_msg.first = true;
|
2012-02-01 14:44:53 +00:00
|
|
|
|
2016-09-16 10:35:35 +00:00
|
|
|
retry:
|
2012-02-01 14:44:53 +00:00
|
|
|
if (virTimeMillisNow(&id) < 0)
|
|
|
|
return -1;
|
|
|
|
|
2019-10-22 13:26:14 +00:00
|
|
|
sync_msg.txBuffer = g_strdup_printf("{\"execute\":\"guest-sync\", "
|
|
|
|
"\"arguments\":{\"id\":%llu}}\n", id);
|
2012-02-01 14:44:53 +00:00
|
|
|
|
|
|
|
sync_msg.txLength = strlen(sync_msg.txBuffer);
|
2016-09-16 10:35:33 +00:00
|
|
|
sync_msg.sync = true;
|
2016-09-16 10:35:34 +00:00
|
|
|
sync_msg.id = id;
|
2012-02-01 14:44:53 +00:00
|
|
|
|
|
|
|
VIR_DEBUG("Sending guest-sync command with ID: %llu", id);
|
|
|
|
|
2020-02-12 17:31:20 +00:00
|
|
|
send_ret = qemuAgentSend(agent, &sync_msg, timeout);
|
2012-02-01 14:44:53 +00:00
|
|
|
|
|
|
|
VIR_DEBUG("qemuAgentSend returned: %d", send_ret);
|
|
|
|
|
2013-07-25 08:59:21 +00:00
|
|
|
if (send_ret < 0)
|
2012-02-01 14:44:53 +00:00
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
if (!sync_msg.rxObject) {
|
2016-09-16 10:35:35 +00:00
|
|
|
if (sync_msg.first) {
|
|
|
|
VIR_FREE(sync_msg.txBuffer);
|
|
|
|
memset(&sync_msg, 0, sizeof(sync_msg));
|
|
|
|
goto retry;
|
|
|
|
} else {
|
2020-02-12 17:31:20 +00:00
|
|
|
if (agent->running)
|
2016-09-16 10:35:36 +00:00
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
2020-02-12 17:31:20 +00:00
|
|
|
_("Missing agent reply object"));
|
2016-09-16 10:35:36 +00:00
|
|
|
else
|
|
|
|
virReportError(VIR_ERR_AGENT_UNRESPONSIVE, "%s",
|
|
|
|
_("Guest agent disappeared while executing command"));
|
2016-09-16 10:35:35 +00:00
|
|
|
goto cleanup;
|
|
|
|
}
|
2012-02-01 14:44:53 +00:00
|
|
|
}
|
|
|
|
|
2020-03-05 14:47:01 +00:00
|
|
|
if (agent->singleSync)
|
|
|
|
agent->inSync = true;
|
|
|
|
|
2012-02-01 14:44:53 +00:00
|
|
|
ret = 0;
|
|
|
|
|
2014-03-25 06:49:44 +00:00
|
|
|
cleanup:
|
2012-02-01 14:44:53 +00:00
|
|
|
virJSONValueFree(sync_msg.rxObject);
|
|
|
|
VIR_FREE(sync_msg.txBuffer);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2012-04-12 14:35:24 +00:00
|
|
|
static const char *
|
|
|
|
qemuAgentStringifyErrorClass(const char *klass)
|
|
|
|
{
|
|
|
|
if (STREQ_NULLABLE(klass, "BufferOverrun"))
|
|
|
|
return "Buffer overrun";
|
|
|
|
else if (STREQ_NULLABLE(klass, "CommandDisabled"))
|
|
|
|
return "The command has been disabled for this instance";
|
|
|
|
else if (STREQ_NULLABLE(klass, "CommandNotFound"))
|
|
|
|
return "The command has not been found";
|
|
|
|
else if (STREQ_NULLABLE(klass, "FdNotFound"))
|
|
|
|
return "File descriptor not found";
|
|
|
|
else if (STREQ_NULLABLE(klass, "InvalidParameter"))
|
|
|
|
return "Invalid parameter";
|
|
|
|
else if (STREQ_NULLABLE(klass, "InvalidParameterType"))
|
|
|
|
return "Invalid parameter type";
|
|
|
|
else if (STREQ_NULLABLE(klass, "InvalidParameterValue"))
|
|
|
|
return "Invalid parameter value";
|
|
|
|
else if (STREQ_NULLABLE(klass, "OpenFileFailed"))
|
|
|
|
return "Cannot open file";
|
|
|
|
else if (STREQ_NULLABLE(klass, "QgaCommandFailed"))
|
|
|
|
return "Guest agent command failed";
|
|
|
|
else if (STREQ_NULLABLE(klass, "QMPBadInputObjectMember"))
|
|
|
|
return "Bad QMP input object member";
|
|
|
|
else if (STREQ_NULLABLE(klass, "QMPExtraInputObjectMember"))
|
|
|
|
return "Unexpected extra object member";
|
|
|
|
else if (STREQ_NULLABLE(klass, "UndefinedError"))
|
|
|
|
return "An undefined error has occurred";
|
|
|
|
else if (STREQ_NULLABLE(klass, "Unsupported"))
|
|
|
|
return "this feature or command is not currently supported";
|
|
|
|
else if (klass)
|
|
|
|
return klass;
|
|
|
|
else
|
|
|
|
return "unknown QEMU command error";
|
|
|
|
}
|
2011-10-05 17:31:54 +00:00
|
|
|
|
|
|
|
/* Ignoring OOM in this method, since we're already reporting
|
|
|
|
* a more important error
|
|
|
|
*
|
|
|
|
* XXX see qerror.h for different klasses & fill out useful params
|
|
|
|
*/
|
|
|
|
static const char *
|
|
|
|
qemuAgentStringifyError(virJSONValuePtr error)
|
|
|
|
{
|
|
|
|
const char *klass = virJSONValueObjectGetString(error, "class");
|
2014-10-28 02:41:27 +00:00
|
|
|
const char *detail = virJSONValueObjectGetString(error, "desc");
|
2011-10-05 17:31:54 +00:00
|
|
|
|
|
|
|
/* The QMP 'desc' field is usually sufficient for our generic
|
2014-10-28 02:41:27 +00:00
|
|
|
* error reporting needs. However, if not present, translate
|
|
|
|
* the class into something readable.
|
2011-10-05 17:31:54 +00:00
|
|
|
*/
|
|
|
|
if (!detail)
|
2012-04-12 14:35:24 +00:00
|
|
|
detail = qemuAgentStringifyErrorClass(klass);
|
2011-10-05 17:31:54 +00:00
|
|
|
|
|
|
|
return detail;
|
|
|
|
}
|
|
|
|
|
|
|
|
static const char *
|
|
|
|
qemuAgentCommandName(virJSONValuePtr cmd)
|
|
|
|
{
|
|
|
|
const char *name = virJSONValueObjectGetString(cmd, "execute");
|
|
|
|
if (name)
|
|
|
|
return name;
|
|
|
|
else
|
|
|
|
return "<unknown>";
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
qemuAgentCheckError(virJSONValuePtr cmd,
|
2020-03-13 08:49:35 +00:00
|
|
|
virJSONValuePtr reply,
|
|
|
|
bool report_unsupported)
|
2011-10-05 17:31:54 +00:00
|
|
|
{
|
|
|
|
if (virJSONValueObjectHasKey(reply, "error")) {
|
|
|
|
virJSONValuePtr error = virJSONValueObjectGet(reply, "error");
|
2020-03-13 08:43:10 +00:00
|
|
|
g_autofree char *cmdstr = virJSONValueToString(cmd, false);
|
|
|
|
g_autofree char *replystr = virJSONValueToString(reply, false);
|
2011-10-05 17:31:54 +00:00
|
|
|
|
|
|
|
/* Log the full JSON formatted command & error */
|
2012-11-28 12:30:40 +00:00
|
|
|
VIR_DEBUG("unable to execute QEMU agent command %s: %s",
|
2013-02-22 16:41:38 +00:00
|
|
|
NULLSTR(cmdstr), NULLSTR(replystr));
|
2011-10-05 17:31:54 +00:00
|
|
|
|
|
|
|
/* Only send the user the command name + friendly error */
|
2020-03-13 08:49:35 +00:00
|
|
|
if (!error) {
|
2012-07-18 15:22:03 +00:00
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR,
|
2012-11-28 12:30:40 +00:00
|
|
|
_("unable to execute QEMU agent command '%s'"),
|
2012-07-18 15:22:03 +00:00
|
|
|
qemuAgentCommandName(cmd));
|
2020-03-13 08:49:35 +00:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!report_unsupported) {
|
|
|
|
const char *klass = virJSONValueObjectGetString(error, "class");
|
|
|
|
|
|
|
|
if (STREQ_NULLABLE(klass, "CommandNotFound") ||
|
|
|
|
STREQ_NULLABLE(klass, "CommandDisabled"))
|
|
|
|
return -2;
|
|
|
|
}
|
|
|
|
|
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
_("unable to execute QEMU agent command '%s': %s"),
|
|
|
|
qemuAgentCommandName(cmd),
|
|
|
|
qemuAgentStringifyError(error));
|
2011-10-05 17:31:54 +00:00
|
|
|
|
|
|
|
return -1;
|
|
|
|
} else if (!virJSONValueObjectHasKey(reply, "return")) {
|
2020-03-13 08:43:10 +00:00
|
|
|
g_autofree char *cmdstr = virJSONValueToString(cmd, false);
|
|
|
|
g_autofree char *replystr = virJSONValueToString(reply, false);
|
2011-10-05 17:31:54 +00:00
|
|
|
|
|
|
|
VIR_DEBUG("Neither 'return' nor 'error' is set in the JSON reply %s: %s",
|
2013-02-22 16:41:38 +00:00
|
|
|
NULLSTR(cmdstr), NULLSTR(replystr));
|
2012-07-18 15:22:03 +00:00
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR,
|
2012-11-28 12:30:40 +00:00
|
|
|
_("unable to execute QEMU agent command '%s'"),
|
2012-07-18 15:22:03 +00:00
|
|
|
qemuAgentCommandName(cmd));
|
2011-10-05 17:31:54 +00:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2014-04-02 06:57:59 +00:00
|
|
|
static int
|
2020-03-13 08:49:35 +00:00
|
|
|
qemuAgentCommandFull(qemuAgentPtr agent,
|
|
|
|
virJSONValuePtr cmd,
|
|
|
|
virJSONValuePtr *reply,
|
|
|
|
int seconds,
|
|
|
|
bool report_unsupported)
|
2014-04-02 06:57:59 +00:00
|
|
|
{
|
|
|
|
int ret = -1;
|
|
|
|
qemuAgentMessage msg;
|
2020-03-27 05:25:55 +00:00
|
|
|
g_autofree char *cmdstr = NULL;
|
2020-02-12 17:31:20 +00:00
|
|
|
int await_event = agent->await_event;
|
2014-04-02 06:57:59 +00:00
|
|
|
|
|
|
|
*reply = NULL;
|
2020-03-05 14:47:02 +00:00
|
|
|
memset(&msg, 0, sizeof(msg));
|
2014-04-02 06:57:59 +00:00
|
|
|
|
2020-02-12 17:31:20 +00:00
|
|
|
if (!agent->running) {
|
processSerialChangedEvent: Close agent monitor early
https://bugzilla.redhat.com/show_bug.cgi?id=890648
So, imagine you've issued an API that involves guest agent. For
instance, you want to query guest's IP addresses. So the API acquires
QUERY_JOB, locks the guest agent and issues the agent command.
However, for some reason, guest agent replies to initial ping
correctly, but then crashes tragically while executing real command
(in this case guest-network-get-interfaces). Since initial ping went
well, libvirt thinks guest agent is accessible and awaits reply to the
real command. But it will never come. What will is a monitor event.
Our handler (processSerialChangedEvent) will try to acquire
MODIFY_JOB, which will fail obviously because the other thread that's
executing the API already holds a job. So the event handler exits
early, and the QUERY_JOB is never released nor ended.
The way how to solve this is to put flag somewhere in the monitor
internals. The flag is called @running and agent commands are issued
iff the flag is set. The flag itself is set when we connect to the
agent socket. And unset whenever we see DISCONNECT event from the
agent. Moreover, we must wake up all the threads waiting for the
agent. This is done by signalizing the condition they're waiting on.
Signed-off-by: Michal Privoznik <mprivozn@redhat.com>
2015-05-07 09:19:38 +00:00
|
|
|
virReportError(VIR_ERR_AGENT_UNRESPONSIVE, "%s",
|
|
|
|
_("Guest agent disappeared while executing command"));
|
2020-03-05 14:47:02 +00:00
|
|
|
goto cleanup;
|
processSerialChangedEvent: Close agent monitor early
https://bugzilla.redhat.com/show_bug.cgi?id=890648
So, imagine you've issued an API that involves guest agent. For
instance, you want to query guest's IP addresses. So the API acquires
QUERY_JOB, locks the guest agent and issues the agent command.
However, for some reason, guest agent replies to initial ping
correctly, but then crashes tragically while executing real command
(in this case guest-network-get-interfaces). Since initial ping went
well, libvirt thinks guest agent is accessible and awaits reply to the
real command. But it will never come. What will is a monitor event.
Our handler (processSerialChangedEvent) will try to acquire
MODIFY_JOB, which will fail obviously because the other thread that's
executing the API already holds a job. So the event handler exits
early, and the QUERY_JOB is never released nor ended.
The way how to solve this is to put flag somewhere in the monitor
internals. The flag is called @running and agent commands are issued
iff the flag is set. The flag itself is set when we connect to the
agent socket. And unset whenever we see DISCONNECT event from the
agent. Moreover, we must wake up all the threads waiting for the
agent. This is done by signalizing the condition they're waiting on.
Signed-off-by: Michal Privoznik <mprivozn@redhat.com>
2015-05-07 09:19:38 +00:00
|
|
|
}
|
|
|
|
|
2020-02-12 17:31:20 +00:00
|
|
|
if (qemuAgentGuestSync(agent) < 0)
|
2020-03-20 07:16:23 +00:00
|
|
|
goto cleanup;
|
2014-04-02 06:57:59 +00:00
|
|
|
|
|
|
|
if (!(cmdstr = virJSONValueToString(cmd, false)))
|
|
|
|
goto cleanup;
|
2019-10-22 13:26:14 +00:00
|
|
|
msg.txBuffer = g_strdup_printf("%s" LINE_ENDING, cmdstr);
|
2014-04-02 06:57:59 +00:00
|
|
|
msg.txLength = strlen(msg.txBuffer);
|
|
|
|
|
|
|
|
VIR_DEBUG("Send command '%s' for write, seconds = %d", cmdstr, seconds);
|
|
|
|
|
2020-02-12 17:31:20 +00:00
|
|
|
ret = qemuAgentSend(agent, &msg, seconds);
|
2014-04-02 06:57:59 +00:00
|
|
|
|
|
|
|
VIR_DEBUG("Receive command reply ret=%d rxObject=%p",
|
|
|
|
ret, msg.rxObject);
|
|
|
|
|
2020-03-05 14:47:02 +00:00
|
|
|
if (ret < 0)
|
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
/* If we haven't obtained any reply but we wait for an
|
|
|
|
* event, then don't report this as error */
|
|
|
|
if (!msg.rxObject) {
|
|
|
|
if (!await_event) {
|
|
|
|
if (agent->running) {
|
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
|
_("Missing agent reply object"));
|
2014-04-02 06:57:59 +00:00
|
|
|
} else {
|
2020-03-05 14:47:02 +00:00
|
|
|
virReportError(VIR_ERR_AGENT_UNRESPONSIVE, "%s",
|
|
|
|
_("Guest agent disappeared while executing command"));
|
2014-04-02 06:57:59 +00:00
|
|
|
}
|
2020-03-05 14:47:02 +00:00
|
|
|
ret = -1;
|
2014-04-02 06:57:59 +00:00
|
|
|
}
|
2020-03-05 14:47:02 +00:00
|
|
|
goto cleanup;
|
2014-04-02 06:57:59 +00:00
|
|
|
}
|
|
|
|
|
2020-03-05 14:47:02 +00:00
|
|
|
*reply = msg.rxObject;
|
2020-03-13 08:49:35 +00:00
|
|
|
ret = qemuAgentCheckError(cmd, *reply, report_unsupported);
|
2020-03-05 14:47:02 +00:00
|
|
|
|
2014-04-02 06:57:59 +00:00
|
|
|
cleanup:
|
|
|
|
VIR_FREE(msg.txBuffer);
|
2020-03-05 14:47:02 +00:00
|
|
|
agent->await_event = QEMU_AGENT_EVENT_NONE;
|
2014-04-02 06:57:59 +00:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2020-03-13 08:49:35 +00:00
|
|
|
static int
|
|
|
|
qemuAgentCommand(qemuAgentPtr agent,
|
|
|
|
virJSONValuePtr cmd,
|
|
|
|
virJSONValuePtr *reply,
|
|
|
|
int seconds)
|
|
|
|
{
|
|
|
|
return qemuAgentCommandFull(agent, cmd, reply, seconds, true);
|
|
|
|
}
|
|
|
|
|
2019-10-14 12:13:31 +00:00
|
|
|
static virJSONValuePtr G_GNUC_NULL_TERMINATED
|
2011-10-05 17:31:54 +00:00
|
|
|
qemuAgentMakeCommand(const char *cmdname,
|
|
|
|
...)
|
|
|
|
{
|
2020-03-04 09:04:33 +00:00
|
|
|
virJSONValuePtr obj = virJSONValueNewObject();
|
2011-10-05 17:31:54 +00:00
|
|
|
virJSONValuePtr jargs = NULL;
|
|
|
|
va_list args;
|
|
|
|
|
|
|
|
va_start(args, cmdname);
|
|
|
|
|
|
|
|
if (virJSONValueObjectAppendString(obj, "execute", cmdname) < 0)
|
2013-07-04 10:14:12 +00:00
|
|
|
goto error;
|
2011-10-05 17:31:54 +00:00
|
|
|
|
2015-04-24 13:32:15 +00:00
|
|
|
if (virJSONValueObjectCreateVArgs(&jargs, args) < 0)
|
|
|
|
goto error;
|
2011-10-05 17:31:54 +00:00
|
|
|
|
|
|
|
if (jargs &&
|
|
|
|
virJSONValueObjectAppend(obj, "arguments", jargs) < 0)
|
2013-07-04 10:14:12 +00:00
|
|
|
goto error;
|
2011-10-05 17:31:54 +00:00
|
|
|
|
|
|
|
va_end(args);
|
|
|
|
|
|
|
|
return obj;
|
|
|
|
|
2014-03-25 06:49:44 +00:00
|
|
|
error:
|
2011-10-05 17:31:54 +00:00
|
|
|
virJSONValueFree(obj);
|
|
|
|
virJSONValueFree(jargs);
|
|
|
|
va_end(args);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2014-05-02 00:06:19 +00:00
|
|
|
static virJSONValuePtr
|
|
|
|
qemuAgentMakeStringsArray(const char **strings, unsigned int len)
|
|
|
|
{
|
|
|
|
size_t i;
|
|
|
|
virJSONValuePtr ret = virJSONValueNewArray(), str;
|
|
|
|
|
|
|
|
for (i = 0; i < len; i++) {
|
|
|
|
str = virJSONValueNewString(strings[i]);
|
|
|
|
if (!str)
|
|
|
|
goto error;
|
|
|
|
|
|
|
|
if (virJSONValueArrayAppend(ret, str) < 0) {
|
|
|
|
virJSONValueFree(str);
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
error:
|
|
|
|
virJSONValueFree(ret);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2020-02-12 17:31:20 +00:00
|
|
|
void qemuAgentNotifyEvent(qemuAgentPtr agent,
|
2012-06-15 16:00:13 +00:00
|
|
|
qemuAgentEvent event)
|
|
|
|
{
|
2020-02-12 17:31:20 +00:00
|
|
|
virObjectLock(agent);
|
2016-12-12 09:13:44 +00:00
|
|
|
|
2020-02-12 17:31:20 +00:00
|
|
|
VIR_DEBUG("agent=%p event=%d await_event=%d", agent, event, agent->await_event);
|
|
|
|
if (agent->await_event == event) {
|
|
|
|
agent->await_event = QEMU_AGENT_EVENT_NONE;
|
2012-06-15 16:00:13 +00:00
|
|
|
/* somebody waiting for this event, wake him up. */
|
2020-02-12 17:31:20 +00:00
|
|
|
if (agent->msg && !agent->msg->finished) {
|
2020-05-05 06:05:18 +00:00
|
|
|
agent->msg->finished = true;
|
2020-02-12 17:31:20 +00:00
|
|
|
virCondSignal(&agent->notify);
|
2012-06-15 16:00:13 +00:00
|
|
|
}
|
|
|
|
}
|
2016-12-12 09:13:44 +00:00
|
|
|
|
2020-02-12 17:31:20 +00:00
|
|
|
virObjectUnlock(agent);
|
2012-06-15 16:00:13 +00:00
|
|
|
}
|
|
|
|
|
2011-10-05 17:31:54 +00:00
|
|
|
VIR_ENUM_DECL(qemuAgentShutdownMode);
|
|
|
|
|
|
|
|
VIR_ENUM_IMPL(qemuAgentShutdownMode,
|
|
|
|
QEMU_AGENT_SHUTDOWN_LAST,
|
2019-01-20 16:30:15 +00:00
|
|
|
"powerdown", "reboot", "halt",
|
|
|
|
);
|
2011-10-05 17:31:54 +00:00
|
|
|
|
2020-02-12 17:31:20 +00:00
|
|
|
int qemuAgentShutdown(qemuAgentPtr agent,
|
2011-10-05 17:31:54 +00:00
|
|
|
qemuAgentShutdownMode mode)
|
|
|
|
{
|
|
|
|
int ret = -1;
|
|
|
|
virJSONValuePtr cmd;
|
|
|
|
virJSONValuePtr reply = NULL;
|
|
|
|
|
|
|
|
cmd = qemuAgentMakeCommand("guest-shutdown",
|
|
|
|
"s:mode", qemuAgentShutdownModeTypeToString(mode),
|
|
|
|
NULL);
|
|
|
|
if (!cmd)
|
|
|
|
return -1;
|
|
|
|
|
2012-09-04 10:01:43 +00:00
|
|
|
if (mode == QEMU_AGENT_SHUTDOWN_REBOOT)
|
2020-02-12 17:31:20 +00:00
|
|
|
agent->await_event = QEMU_AGENT_EVENT_RESET;
|
2012-09-04 10:01:43 +00:00
|
|
|
else
|
2020-02-12 17:31:20 +00:00
|
|
|
agent->await_event = QEMU_AGENT_EVENT_SHUTDOWN;
|
2020-03-05 14:47:02 +00:00
|
|
|
ret = qemuAgentCommand(agent, cmd, &reply,
|
2015-04-01 09:13:08 +00:00
|
|
|
VIR_DOMAIN_QEMU_AGENT_COMMAND_SHUTDOWN);
|
2011-10-05 17:31:54 +00:00
|
|
|
|
|
|
|
virJSONValueFree(cmd);
|
|
|
|
virJSONValueFree(reply);
|
|
|
|
return ret;
|
|
|
|
}
|
2012-01-24 20:13:40 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* qemuAgentFSFreeze:
|
2020-02-12 17:31:20 +00:00
|
|
|
* @agent: agent object
|
2014-05-02 00:06:19 +00:00
|
|
|
* @mountpoints: Array of mountpoint paths to be frozen, or NULL for all
|
|
|
|
* @nmountpoints: Number of mountpoints to be frozen, or 0 for all
|
2012-01-24 20:13:40 +00:00
|
|
|
*
|
|
|
|
* Issue guest-fsfreeze-freeze command to guest agent,
|
2014-05-02 00:06:19 +00:00
|
|
|
* which freezes file systems mounted on specified mountpoints
|
|
|
|
* (or all file systems when @mountpoints is NULL), and returns
|
2012-01-24 20:13:40 +00:00
|
|
|
* number of frozen file systems on success.
|
|
|
|
*
|
|
|
|
* Returns: number of file system frozen on success,
|
|
|
|
* -1 on error.
|
|
|
|
*/
|
2020-02-12 17:31:20 +00:00
|
|
|
int qemuAgentFSFreeze(qemuAgentPtr agent, const char **mountpoints,
|
2014-05-02 00:06:19 +00:00
|
|
|
unsigned int nmountpoints)
|
2012-01-24 20:13:40 +00:00
|
|
|
{
|
|
|
|
int ret = -1;
|
2014-08-27 18:42:41 +00:00
|
|
|
virJSONValuePtr cmd, arg = NULL;
|
2012-01-24 20:13:40 +00:00
|
|
|
virJSONValuePtr reply = NULL;
|
|
|
|
|
2014-05-02 00:06:19 +00:00
|
|
|
if (mountpoints && nmountpoints) {
|
|
|
|
arg = qemuAgentMakeStringsArray(mountpoints, nmountpoints);
|
|
|
|
if (!arg)
|
|
|
|
return -1;
|
|
|
|
|
2014-08-08 20:03:41 +00:00
|
|
|
cmd = qemuAgentMakeCommand("guest-fsfreeze-freeze-list",
|
2018-03-30 09:12:57 +00:00
|
|
|
"a:mountpoints", &arg, NULL);
|
2014-05-02 00:06:19 +00:00
|
|
|
} else {
|
|
|
|
cmd = qemuAgentMakeCommand("guest-fsfreeze-freeze", NULL);
|
|
|
|
}
|
2012-01-24 20:13:40 +00:00
|
|
|
|
|
|
|
if (!cmd)
|
2014-08-27 18:42:41 +00:00
|
|
|
goto cleanup;
|
2012-01-24 20:13:40 +00:00
|
|
|
|
2020-03-05 14:47:02 +00:00
|
|
|
if (qemuAgentCommand(agent, cmd, &reply, agent->timeout) < 0)
|
2012-01-24 20:13:40 +00:00
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
if (virJSONValueObjectGetNumberInt(reply, "return", &ret) < 0) {
|
2012-07-18 15:22:03 +00:00
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
|
_("malformed return value"));
|
2012-01-24 20:13:40 +00:00
|
|
|
}
|
|
|
|
|
2014-03-25 06:49:44 +00:00
|
|
|
cleanup:
|
2014-08-27 18:42:41 +00:00
|
|
|
virJSONValueFree(arg);
|
2012-01-24 20:13:40 +00:00
|
|
|
virJSONValueFree(cmd);
|
|
|
|
virJSONValueFree(reply);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* qemuAgentFSThaw:
|
2020-02-12 17:31:20 +00:00
|
|
|
* @agent: agent object
|
2012-01-24 20:13:40 +00:00
|
|
|
*
|
|
|
|
* Issue guest-fsfreeze-thaw command to guest agent,
|
|
|
|
* which unfreezes all mounted file systems and returns
|
|
|
|
* number of thawed file systems on success.
|
|
|
|
*
|
|
|
|
* Returns: number of file system thawed on success,
|
|
|
|
* -1 on error.
|
|
|
|
*/
|
2020-02-12 17:31:20 +00:00
|
|
|
int qemuAgentFSThaw(qemuAgentPtr agent)
|
2012-01-24 20:13:40 +00:00
|
|
|
{
|
|
|
|
int ret = -1;
|
|
|
|
virJSONValuePtr cmd;
|
|
|
|
virJSONValuePtr reply = NULL;
|
|
|
|
|
|
|
|
cmd = qemuAgentMakeCommand("guest-fsfreeze-thaw", NULL);
|
|
|
|
|
|
|
|
if (!cmd)
|
|
|
|
return -1;
|
|
|
|
|
2020-03-05 14:47:02 +00:00
|
|
|
if (qemuAgentCommand(agent, cmd, &reply, agent->timeout) < 0)
|
2012-01-24 20:13:40 +00:00
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
if (virJSONValueObjectGetNumberInt(reply, "return", &ret) < 0) {
|
2012-07-18 15:22:03 +00:00
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
|
_("malformed return value"));
|
2012-01-24 20:13:40 +00:00
|
|
|
}
|
|
|
|
|
2014-03-25 06:49:44 +00:00
|
|
|
cleanup:
|
2012-01-24 20:13:40 +00:00
|
|
|
virJSONValueFree(cmd);
|
|
|
|
virJSONValueFree(reply);
|
|
|
|
return ret;
|
|
|
|
}
|
2012-02-13 11:27:25 +00:00
|
|
|
|
|
|
|
VIR_ENUM_DECL(qemuAgentSuspendMode);
|
|
|
|
|
|
|
|
VIR_ENUM_IMPL(qemuAgentSuspendMode,
|
|
|
|
VIR_NODE_SUSPEND_TARGET_LAST,
|
|
|
|
"guest-suspend-ram",
|
|
|
|
"guest-suspend-disk",
|
2019-01-20 16:30:15 +00:00
|
|
|
"guest-suspend-hybrid",
|
|
|
|
);
|
2012-02-13 11:27:25 +00:00
|
|
|
|
|
|
|
int
|
2020-02-12 17:31:20 +00:00
|
|
|
qemuAgentSuspend(qemuAgentPtr agent,
|
2012-02-13 11:27:25 +00:00
|
|
|
unsigned int target)
|
|
|
|
{
|
|
|
|
int ret = -1;
|
|
|
|
virJSONValuePtr cmd;
|
|
|
|
virJSONValuePtr reply = NULL;
|
|
|
|
|
|
|
|
cmd = qemuAgentMakeCommand(qemuAgentSuspendModeTypeToString(target),
|
|
|
|
NULL);
|
|
|
|
if (!cmd)
|
|
|
|
return -1;
|
|
|
|
|
2020-02-12 17:31:20 +00:00
|
|
|
agent->await_event = QEMU_AGENT_EVENT_SUSPEND;
|
2020-03-05 14:47:02 +00:00
|
|
|
ret = qemuAgentCommand(agent, cmd, &reply, agent->timeout);
|
2012-02-13 11:27:25 +00:00
|
|
|
|
|
|
|
virJSONValueFree(cmd);
|
|
|
|
virJSONValueFree(reply);
|
|
|
|
return ret;
|
|
|
|
}
|
2012-08-23 03:29:22 +00:00
|
|
|
|
|
|
|
int
|
2020-02-12 17:31:20 +00:00
|
|
|
qemuAgentArbitraryCommand(qemuAgentPtr agent,
|
2012-08-23 03:29:22 +00:00
|
|
|
const char *cmd_str,
|
|
|
|
char **result,
|
|
|
|
int timeout)
|
|
|
|
{
|
|
|
|
int ret = -1;
|
2013-06-03 13:58:31 +00:00
|
|
|
virJSONValuePtr cmd = NULL;
|
2012-08-23 03:29:22 +00:00
|
|
|
virJSONValuePtr reply = NULL;
|
|
|
|
|
|
|
|
*result = NULL;
|
2013-06-03 13:58:31 +00:00
|
|
|
if (timeout < VIR_DOMAIN_QEMU_AGENT_COMMAND_MIN) {
|
|
|
|
virReportError(VIR_ERR_INVALID_ARG,
|
|
|
|
_("guest agent timeout '%d' is "
|
|
|
|
"less than the minimum '%d'"),
|
|
|
|
timeout, VIR_DOMAIN_QEMU_AGENT_COMMAND_MIN);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
2012-08-23 03:29:22 +00:00
|
|
|
|
2013-06-03 13:58:31 +00:00
|
|
|
if (!(cmd = virJSONValueFromString(cmd_str)))
|
|
|
|
goto cleanup;
|
|
|
|
|
2020-03-05 14:47:02 +00:00
|
|
|
if ((ret = qemuAgentCommand(agent, cmd, &reply, timeout)) < 0)
|
2013-06-03 13:58:31 +00:00
|
|
|
goto cleanup;
|
2012-08-23 03:29:22 +00:00
|
|
|
|
2013-06-03 13:58:31 +00:00
|
|
|
if (!(*result = virJSONValueToString(reply, false)))
|
|
|
|
ret = -1;
|
2012-08-23 03:29:22 +00:00
|
|
|
|
2013-06-03 13:58:31 +00:00
|
|
|
|
2014-03-25 06:49:44 +00:00
|
|
|
cleanup:
|
2012-08-23 03:29:22 +00:00
|
|
|
virJSONValueFree(cmd);
|
|
|
|
virJSONValueFree(reply);
|
|
|
|
return ret;
|
|
|
|
}
|
2012-11-20 16:10:29 +00:00
|
|
|
|
|
|
|
int
|
2020-02-12 17:31:20 +00:00
|
|
|
qemuAgentFSTrim(qemuAgentPtr agent,
|
2012-11-20 16:10:29 +00:00
|
|
|
unsigned long long minimum)
|
|
|
|
{
|
|
|
|
int ret = -1;
|
|
|
|
virJSONValuePtr cmd;
|
|
|
|
virJSONValuePtr reply = NULL;
|
|
|
|
|
|
|
|
cmd = qemuAgentMakeCommand("guest-fstrim",
|
|
|
|
"U:minimum", minimum,
|
|
|
|
NULL);
|
|
|
|
if (!cmd)
|
|
|
|
return ret;
|
|
|
|
|
2020-03-05 14:47:02 +00:00
|
|
|
ret = qemuAgentCommand(agent, cmd, &reply, agent->timeout);
|
2012-11-20 16:10:29 +00:00
|
|
|
|
|
|
|
virJSONValueFree(cmd);
|
|
|
|
virJSONValueFree(reply);
|
|
|
|
return ret;
|
|
|
|
}
|
2013-04-12 10:14:02 +00:00
|
|
|
|
|
|
|
int
|
2020-02-12 17:31:20 +00:00
|
|
|
qemuAgentGetVCPUs(qemuAgentPtr agent,
|
2013-04-12 10:14:02 +00:00
|
|
|
qemuAgentCPUInfoPtr *info)
|
|
|
|
{
|
|
|
|
int ret = -1;
|
Convert 'int i' to 'size_t i' in src/qemu files
Convert the type of loop iterators named 'i', 'j', k',
'ii', 'jj', 'kk', to be 'size_t' instead of 'int' or
'unsigned int', also santizing 'ii', 'jj', 'kk' to use
the normal 'i', 'j', 'k' naming
Signed-off-by: Daniel P. Berrange <berrange@redhat.com>
2013-07-08 14:09:33 +00:00
|
|
|
size_t i;
|
2013-04-12 10:14:02 +00:00
|
|
|
virJSONValuePtr cmd;
|
|
|
|
virJSONValuePtr reply = NULL;
|
|
|
|
virJSONValuePtr data = NULL;
|
2018-04-19 21:29:02 +00:00
|
|
|
size_t ndata;
|
2013-04-12 10:14:02 +00:00
|
|
|
|
|
|
|
if (!(cmd = qemuAgentMakeCommand("guest-get-vcpus", NULL)))
|
|
|
|
return -1;
|
|
|
|
|
2020-03-05 14:47:02 +00:00
|
|
|
if (qemuAgentCommand(agent, cmd, &reply, agent->timeout) < 0)
|
2013-04-12 10:14:02 +00:00
|
|
|
goto cleanup;
|
|
|
|
|
2018-03-29 18:34:57 +00:00
|
|
|
if (!(data = virJSONValueObjectGetArray(reply, "return"))) {
|
2013-04-12 10:14:02 +00:00
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
|
_("guest-get-vcpus reply was missing return data"));
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
ndata = virJSONValueArraySize(data);
|
|
|
|
|
2020-10-05 10:28:26 +00:00
|
|
|
*info = g_new0(qemuAgentCPUInfo, ndata);
|
2013-04-12 10:14:02 +00:00
|
|
|
|
|
|
|
for (i = 0; i < ndata; i++) {
|
|
|
|
virJSONValuePtr entry = virJSONValueArrayGet(data, i);
|
|
|
|
qemuAgentCPUInfoPtr in = *info + i;
|
|
|
|
|
|
|
|
if (!entry) {
|
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
|
_("array element missing in guest-get-vcpus return "
|
|
|
|
"value"));
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (virJSONValueObjectGetNumberUint(entry, "logical-id", &in->id) < 0) {
|
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
|
_("'logical-id' missing in reply of guest-get-vcpus"));
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (virJSONValueObjectGetBoolean(entry, "online", &in->online) < 0) {
|
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
|
_("'online' missing in reply of guest-get-vcpus"));
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (virJSONValueObjectGetBoolean(entry, "can-offline",
|
|
|
|
&in->offlinable) < 0) {
|
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
|
_("'can-offline' missing in reply of guest-get-vcpus"));
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = ndata;
|
|
|
|
|
2014-03-25 06:49:44 +00:00
|
|
|
cleanup:
|
2013-04-12 10:14:02 +00:00
|
|
|
virJSONValueFree(cmd);
|
|
|
|
virJSONValueFree(reply);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2016-06-20 12:15:50 +00:00
|
|
|
|
|
|
|
/* returns the value provided by the guest agent or -1 on internal error */
|
|
|
|
static int
|
2020-02-12 17:31:20 +00:00
|
|
|
qemuAgentSetVCPUsCommand(qemuAgentPtr agent,
|
2016-06-20 12:15:50 +00:00
|
|
|
qemuAgentCPUInfoPtr info,
|
|
|
|
size_t ninfo,
|
|
|
|
int *nmodified)
|
2013-04-12 10:14:02 +00:00
|
|
|
{
|
|
|
|
int ret = -1;
|
|
|
|
virJSONValuePtr cmd = NULL;
|
|
|
|
virJSONValuePtr reply = NULL;
|
|
|
|
virJSONValuePtr cpus = NULL;
|
|
|
|
virJSONValuePtr cpu = NULL;
|
|
|
|
size_t i;
|
|
|
|
|
2016-06-20 12:15:50 +00:00
|
|
|
*nmodified = 0;
|
|
|
|
|
2013-04-12 10:14:02 +00:00
|
|
|
/* create the key data array */
|
2020-01-31 07:18:36 +00:00
|
|
|
cpus = virJSONValueNewArray();
|
2013-04-12 10:14:02 +00:00
|
|
|
|
|
|
|
for (i = 0; i < ninfo; i++) {
|
|
|
|
qemuAgentCPUInfoPtr in = &info[i];
|
|
|
|
|
2016-06-20 12:15:50 +00:00
|
|
|
/* don't set state for cpus that were not touched */
|
|
|
|
if (!in->modified)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
(*nmodified)++;
|
|
|
|
|
2013-04-12 10:14:02 +00:00
|
|
|
/* create single cpu object */
|
2020-03-04 09:04:33 +00:00
|
|
|
cpu = virJSONValueNewObject();
|
2013-04-12 10:14:02 +00:00
|
|
|
|
|
|
|
if (virJSONValueObjectAppendNumberInt(cpu, "logical-id", in->id) < 0)
|
2013-07-04 10:14:12 +00:00
|
|
|
goto cleanup;
|
2013-04-12 10:14:02 +00:00
|
|
|
|
|
|
|
if (virJSONValueObjectAppendBoolean(cpu, "online", in->online) < 0)
|
2013-07-04 10:14:12 +00:00
|
|
|
goto cleanup;
|
2013-04-12 10:14:02 +00:00
|
|
|
|
|
|
|
if (virJSONValueArrayAppend(cpus, cpu) < 0)
|
2013-07-04 10:14:12 +00:00
|
|
|
goto cleanup;
|
2013-04-12 10:14:02 +00:00
|
|
|
|
|
|
|
cpu = NULL;
|
|
|
|
}
|
|
|
|
|
2016-06-20 12:15:50 +00:00
|
|
|
if (*nmodified == 0) {
|
|
|
|
ret = 0;
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2013-04-12 10:14:02 +00:00
|
|
|
if (!(cmd = qemuAgentMakeCommand("guest-set-vcpus",
|
2018-03-30 09:12:57 +00:00
|
|
|
"a:vcpus", &cpus,
|
2013-04-12 10:14:02 +00:00
|
|
|
NULL)))
|
|
|
|
goto cleanup;
|
|
|
|
|
2020-03-05 14:47:02 +00:00
|
|
|
if (qemuAgentCommand(agent, cmd, &reply, agent->timeout) < 0)
|
2013-04-12 10:14:02 +00:00
|
|
|
goto cleanup;
|
|
|
|
|
2016-06-20 12:15:50 +00:00
|
|
|
/* All negative values are invalid. Return of 0 is bogus since we wouldn't
|
|
|
|
* call the guest agent so that 0 cpus would be set successfully. Reporting
|
|
|
|
* more successfully set vcpus that we've asked for is invalid. */
|
|
|
|
if (virJSONValueObjectGetNumberInt(reply, "return", &ret) < 0 ||
|
|
|
|
ret <= 0 || ret > *nmodified) {
|
2013-04-12 10:14:02 +00:00
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
2016-06-20 12:15:50 +00:00
|
|
|
_("guest agent returned malformed or invalid return value"));
|
|
|
|
ret = -1;
|
2013-04-12 10:14:02 +00:00
|
|
|
}
|
|
|
|
|
2014-03-25 06:49:44 +00:00
|
|
|
cleanup:
|
2013-04-12 10:14:02 +00:00
|
|
|
virJSONValueFree(cmd);
|
|
|
|
virJSONValueFree(reply);
|
|
|
|
virJSONValueFree(cpu);
|
|
|
|
virJSONValueFree(cpus);
|
|
|
|
return ret;
|
|
|
|
}
|
2013-07-30 10:04:21 +00:00
|
|
|
|
|
|
|
|
2016-06-20 12:15:50 +00:00
|
|
|
/**
|
|
|
|
* Set the VCPU state using guest agent.
|
|
|
|
*
|
|
|
|
* Attempts to set the guest agent state for all cpus or until a proper error is
|
|
|
|
* reported by the guest agent. This may require multiple calls.
|
|
|
|
*
|
|
|
|
* Returns -1 on error, 0 on success.
|
|
|
|
*/
|
|
|
|
int
|
2020-02-12 17:31:20 +00:00
|
|
|
qemuAgentSetVCPUs(qemuAgentPtr agent,
|
2016-06-20 12:15:50 +00:00
|
|
|
qemuAgentCPUInfoPtr info,
|
|
|
|
size_t ninfo)
|
|
|
|
{
|
|
|
|
int rv;
|
|
|
|
int nmodified;
|
|
|
|
size_t i;
|
|
|
|
|
|
|
|
do {
|
2020-02-12 17:31:20 +00:00
|
|
|
if ((rv = qemuAgentSetVCPUsCommand(agent, info, ninfo, &nmodified)) < 0)
|
2016-06-20 12:15:50 +00:00
|
|
|
return -1;
|
|
|
|
|
|
|
|
/* all vcpus were set successfully */
|
|
|
|
if (rv == nmodified)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
/* un-mark vcpus that were already set */
|
|
|
|
for (i = 0; i < ninfo && rv > 0; i++) {
|
|
|
|
if (!info[i].modified)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
info[i].modified = false;
|
|
|
|
rv--;
|
|
|
|
}
|
|
|
|
} while (1);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2013-07-30 10:04:21 +00:00
|
|
|
/* modify the cpu info structure to set the correct amount of cpus */
|
|
|
|
int
|
|
|
|
qemuAgentUpdateCPUInfo(unsigned int nvcpus,
|
|
|
|
qemuAgentCPUInfoPtr cpuinfo,
|
|
|
|
int ncpuinfo)
|
|
|
|
{
|
|
|
|
size_t i;
|
|
|
|
int nonline = 0;
|
|
|
|
int nofflinable = 0;
|
2015-06-26 11:58:20 +00:00
|
|
|
ssize_t cpu0 = -1;
|
2013-07-30 10:04:21 +00:00
|
|
|
|
|
|
|
/* count the active and offlinable cpus */
|
|
|
|
for (i = 0; i < ncpuinfo; i++) {
|
2015-06-26 11:58:20 +00:00
|
|
|
if (cpuinfo[i].id == 0)
|
|
|
|
cpu0 = i;
|
|
|
|
|
2013-07-30 10:04:21 +00:00
|
|
|
if (cpuinfo[i].online)
|
|
|
|
nonline++;
|
|
|
|
|
|
|
|
if (cpuinfo[i].offlinable && cpuinfo[i].online)
|
|
|
|
nofflinable++;
|
|
|
|
|
|
|
|
/* This shouldn't happen, but we can't trust the guest agent */
|
|
|
|
if (!cpuinfo[i].online && !cpuinfo[i].offlinable) {
|
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
|
_("Invalid data provided by guest agent"));
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-06-26 11:58:20 +00:00
|
|
|
/* CPU0 was made offlinable in linux a while ago, but certain parts (suspend
|
|
|
|
* to ram) of the kernel still don't cope well with that. Make sure that if
|
|
|
|
* all remaining vCPUs are offlinable, vCPU0 will not be selected to be
|
|
|
|
* offlined automatically */
|
|
|
|
if (nofflinable == nonline && cpu0 >= 0 && cpuinfo[cpu0].online) {
|
|
|
|
cpuinfo[cpu0].offlinable = false;
|
|
|
|
nofflinable--;
|
|
|
|
}
|
|
|
|
|
2013-07-30 10:04:21 +00:00
|
|
|
/* the guest agent reported less cpus than requested */
|
|
|
|
if (nvcpus > ncpuinfo) {
|
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
|
_("guest agent reports less cpu than requested"));
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* not enough offlinable CPUs to support the request */
|
|
|
|
if (nvcpus < nonline - nofflinable) {
|
|
|
|
virReportError(VIR_ERR_INVALID_ARG, "%s",
|
|
|
|
_("Cannot offline enough CPUs"));
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < ncpuinfo; i++) {
|
|
|
|
if (nvcpus < nonline) {
|
|
|
|
/* unplug */
|
|
|
|
if (cpuinfo[i].offlinable && cpuinfo[i].online) {
|
|
|
|
cpuinfo[i].online = false;
|
2016-06-20 12:15:50 +00:00
|
|
|
cpuinfo[i].modified = true;
|
2013-07-30 10:04:21 +00:00
|
|
|
nonline--;
|
|
|
|
}
|
|
|
|
} else if (nvcpus > nonline) {
|
|
|
|
/* plug */
|
|
|
|
if (!cpuinfo[i].online) {
|
|
|
|
cpuinfo[i].online = true;
|
2016-06-20 12:15:50 +00:00
|
|
|
cpuinfo[i].modified = true;
|
2013-07-30 10:04:21 +00:00
|
|
|
nonline++;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
/* done */
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
2014-04-02 17:05:42 +00:00
|
|
|
|
|
|
|
|
2020-03-16 07:26:34 +00:00
|
|
|
/**
|
|
|
|
* qemuAgentGetHostname:
|
|
|
|
*
|
|
|
|
* Gets the guest hostname using the guest agent.
|
|
|
|
*
|
|
|
|
* Returns 0 on success and fills @hostname. On error -1 is returned with an
|
|
|
|
* error reported and if '@report_unsupported' is false -2 is returned if the
|
|
|
|
* guest agent does not support the command without reporting an error
|
|
|
|
*/
|
2018-09-05 04:20:53 +00:00
|
|
|
int
|
2020-02-12 17:31:20 +00:00
|
|
|
qemuAgentGetHostname(qemuAgentPtr agent,
|
2020-03-16 07:26:34 +00:00
|
|
|
char **hostname,
|
|
|
|
bool report_unsupported)
|
2018-09-05 04:20:53 +00:00
|
|
|
{
|
2020-03-13 09:55:22 +00:00
|
|
|
g_autoptr(virJSONValue) cmd = qemuAgentMakeCommand("guest-get-host-name", NULL);
|
|
|
|
g_autoptr(virJSONValue) reply = NULL;
|
2018-09-05 04:20:53 +00:00
|
|
|
virJSONValuePtr data = NULL;
|
|
|
|
const char *result = NULL;
|
2020-03-16 07:26:34 +00:00
|
|
|
int rc;
|
2018-09-05 04:20:53 +00:00
|
|
|
|
|
|
|
if (!cmd)
|
2020-03-13 09:55:22 +00:00
|
|
|
return -1;
|
2018-09-05 04:20:53 +00:00
|
|
|
|
2020-03-16 07:26:34 +00:00
|
|
|
if ((rc = qemuAgentCommandFull(agent, cmd, &reply, agent->timeout,
|
|
|
|
report_unsupported)) < 0)
|
|
|
|
return rc;
|
2018-09-05 04:20:53 +00:00
|
|
|
|
|
|
|
if (!(data = virJSONValueObjectGet(reply, "return"))) {
|
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
|
_("malformed return value"));
|
2020-03-13 09:55:22 +00:00
|
|
|
return -1;
|
2018-09-05 04:20:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (!(result = virJSONValueObjectGetString(data, "host-name"))) {
|
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
|
_("'host-name' missing in guest-get-host-name reply"));
|
2020-03-13 09:55:22 +00:00
|
|
|
return -1;
|
2018-09-05 04:20:53 +00:00
|
|
|
}
|
|
|
|
|
2019-10-20 11:49:46 +00:00
|
|
|
*hostname = g_strdup(result);
|
2018-09-05 04:20:53 +00:00
|
|
|
|
2020-03-13 09:55:22 +00:00
|
|
|
return 0;
|
2018-09-05 04:20:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2014-04-02 17:05:42 +00:00
|
|
|
int
|
2020-02-12 17:31:20 +00:00
|
|
|
qemuAgentGetTime(qemuAgentPtr agent,
|
2014-04-02 17:05:42 +00:00
|
|
|
long long *seconds,
|
|
|
|
unsigned int *nseconds)
|
|
|
|
{
|
|
|
|
int ret = -1;
|
|
|
|
unsigned long long json_time;
|
|
|
|
virJSONValuePtr cmd;
|
|
|
|
virJSONValuePtr reply = NULL;
|
|
|
|
|
|
|
|
cmd = qemuAgentMakeCommand("guest-get-time",
|
|
|
|
NULL);
|
|
|
|
if (!cmd)
|
|
|
|
return ret;
|
|
|
|
|
2020-03-05 14:47:02 +00:00
|
|
|
if (qemuAgentCommand(agent, cmd, &reply, agent->timeout) < 0)
|
2014-04-02 17:05:42 +00:00
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
if (virJSONValueObjectGetNumberUlong(reply, "return", &json_time) < 0) {
|
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
|
_("malformed return value"));
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* guest agent returns time in nanoseconds,
|
|
|
|
* we need it in seconds here */
|
|
|
|
*seconds = json_time / 1000000000LL;
|
|
|
|
*nseconds = json_time % 1000000000LL;
|
|
|
|
ret = 0;
|
|
|
|
|
|
|
|
cleanup:
|
|
|
|
virJSONValueFree(cmd);
|
|
|
|
virJSONValueFree(reply);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
* qemuAgentSetTime:
|
|
|
|
* @setTime: time to set
|
|
|
|
* @sync: let guest agent to read domain's RTC (@setTime is ignored)
|
|
|
|
*/
|
|
|
|
int
|
2020-02-12 17:31:20 +00:00
|
|
|
qemuAgentSetTime(qemuAgentPtr agent,
|
2014-04-02 17:05:42 +00:00
|
|
|
long long seconds,
|
|
|
|
unsigned int nseconds,
|
2014-05-19 14:36:55 +00:00
|
|
|
bool rtcSync)
|
2014-04-02 17:05:42 +00:00
|
|
|
{
|
|
|
|
int ret = -1;
|
|
|
|
virJSONValuePtr cmd;
|
|
|
|
virJSONValuePtr reply = NULL;
|
|
|
|
|
2014-05-19 14:36:55 +00:00
|
|
|
if (rtcSync) {
|
2014-04-02 17:05:42 +00:00
|
|
|
cmd = qemuAgentMakeCommand("guest-set-time", NULL);
|
|
|
|
} else {
|
|
|
|
/* guest agent expect time with nanosecond granularity.
|
|
|
|
* Impressing. */
|
|
|
|
long long json_time;
|
|
|
|
|
|
|
|
/* Check if we overflow. For some reason qemu doesn't handle unsigned
|
2020-02-12 17:31:20 +00:00
|
|
|
* long long on the agent well as it silently truncates numbers to
|
2014-04-02 17:05:42 +00:00
|
|
|
* signed long long. Therefore we must check overflow against LLONG_MAX
|
|
|
|
* not ULLONG_MAX. */
|
|
|
|
if (seconds > LLONG_MAX / 1000000000LL) {
|
|
|
|
virReportError(VIR_ERR_INVALID_ARG,
|
|
|
|
_("Time '%lld' is too big for guest agent"),
|
|
|
|
seconds);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
json_time = seconds * 1000000000LL;
|
|
|
|
json_time += nseconds;
|
|
|
|
cmd = qemuAgentMakeCommand("guest-set-time",
|
|
|
|
"I:time", json_time,
|
|
|
|
NULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!cmd)
|
|
|
|
return ret;
|
|
|
|
|
2020-03-05 14:47:02 +00:00
|
|
|
if (qemuAgentCommand(agent, cmd, &reply, agent->timeout) < 0)
|
2014-04-02 17:05:42 +00:00
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
ret = 0;
|
|
|
|
cleanup:
|
|
|
|
virJSONValueFree(cmd);
|
|
|
|
virJSONValueFree(reply);
|
|
|
|
return ret;
|
|
|
|
}
|
2014-11-22 01:27:38 +00:00
|
|
|
|
2020-11-20 18:09:41 +00:00
|
|
|
void
|
2020-11-20 18:09:40 +00:00
|
|
|
qemuAgentDiskAddressFree(qemuAgentDiskAddressPtr info)
|
2019-08-23 16:31:20 +00:00
|
|
|
{
|
|
|
|
if (!info)
|
|
|
|
return;
|
|
|
|
|
2020-01-10 23:32:16 +00:00
|
|
|
g_free(info->serial);
|
|
|
|
g_free(info->bus_type);
|
|
|
|
g_free(info->devnode);
|
2020-12-10 11:37:26 +00:00
|
|
|
g_free(info->ccw_addr);
|
2020-01-10 23:32:16 +00:00
|
|
|
g_free(info);
|
2019-08-23 16:31:20 +00:00
|
|
|
}
|
|
|
|
|
2020-11-20 18:09:45 +00:00
|
|
|
|
|
|
|
void
|
|
|
|
qemuAgentDiskInfoFree(qemuAgentDiskInfoPtr info)
|
|
|
|
{
|
|
|
|
if (!info)
|
|
|
|
return;
|
|
|
|
|
|
|
|
g_free(info->name);
|
|
|
|
g_strfreev(info->dependencies);
|
|
|
|
qemuAgentDiskAddressFree(info->address);
|
|
|
|
g_free(info->alias);
|
|
|
|
g_free(info);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2020-01-10 23:32:13 +00:00
|
|
|
void
|
2019-08-23 16:31:20 +00:00
|
|
|
qemuAgentFSInfoFree(qemuAgentFSInfoPtr info)
|
|
|
|
{
|
|
|
|
size_t i;
|
|
|
|
|
|
|
|
if (!info)
|
|
|
|
return;
|
|
|
|
|
2020-01-10 23:32:16 +00:00
|
|
|
g_free(info->mountpoint);
|
|
|
|
g_free(info->name);
|
|
|
|
g_free(info->fstype);
|
2019-08-23 16:31:20 +00:00
|
|
|
|
|
|
|
for (i = 0; i < info->ndisks; i++)
|
2020-11-20 18:09:40 +00:00
|
|
|
qemuAgentDiskAddressFree(info->disks[i]);
|
2020-01-10 23:32:16 +00:00
|
|
|
g_free(info->disks);
|
2019-08-23 16:31:20 +00:00
|
|
|
|
2020-01-10 23:32:16 +00:00
|
|
|
g_free(info);
|
2019-08-23 16:31:20 +00:00
|
|
|
}
|
|
|
|
|
2020-11-20 18:09:42 +00:00
|
|
|
|
|
|
|
static qemuAgentDiskAddressPtr
|
|
|
|
qemuAgentGetDiskAddress(virJSONValuePtr json)
|
|
|
|
{
|
|
|
|
virJSONValuePtr pci;
|
2020-11-25 11:06:46 +00:00
|
|
|
virJSONValuePtr ccw;
|
2020-11-20 18:09:42 +00:00
|
|
|
g_autoptr(qemuAgentDiskAddress) addr = NULL;
|
|
|
|
|
|
|
|
addr = g_new0(qemuAgentDiskAddress, 1);
|
|
|
|
addr->bus_type = g_strdup(virJSONValueObjectGetString(json, "bus-type"));
|
|
|
|
addr->serial = g_strdup(virJSONValueObjectGetString(json, "serial"));
|
|
|
|
addr->devnode = g_strdup(virJSONValueObjectGetString(json, "dev"));
|
|
|
|
|
|
|
|
#define GET_DISK_ADDR(jsonObject, var, name) \
|
|
|
|
do { \
|
|
|
|
if (virJSONValueObjectGetNumberUint(jsonObject, name, var) < 0) { \
|
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR, \
|
|
|
|
_("'%s' missing"), name); \
|
|
|
|
return NULL; \
|
|
|
|
} \
|
|
|
|
} while (0)
|
|
|
|
|
|
|
|
GET_DISK_ADDR(json, &addr->bus, "bus");
|
|
|
|
GET_DISK_ADDR(json, &addr->target, "target");
|
|
|
|
GET_DISK_ADDR(json, &addr->unit, "unit");
|
|
|
|
|
|
|
|
if (!(pci = virJSONValueObjectGet(json, "pci-controller"))) {
|
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
|
_("'pci-controller' missing"));
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
GET_DISK_ADDR(pci, &addr->pci_controller.domain, "domain");
|
|
|
|
GET_DISK_ADDR(pci, &addr->pci_controller.bus, "bus");
|
|
|
|
GET_DISK_ADDR(pci, &addr->pci_controller.slot, "slot");
|
|
|
|
GET_DISK_ADDR(pci, &addr->pci_controller.function, "function");
|
2020-11-25 11:06:46 +00:00
|
|
|
|
|
|
|
if ((ccw = virJSONValueObjectGet(json, "ccw-address"))) {
|
2020-12-10 11:37:26 +00:00
|
|
|
g_autofree virDomainDeviceCCWAddressPtr ccw_addr = NULL;
|
|
|
|
|
|
|
|
ccw_addr = g_new0(virDomainDeviceCCWAddress, 1);
|
|
|
|
|
|
|
|
GET_DISK_ADDR(ccw, &ccw_addr->cssid, "cssid");
|
|
|
|
if (ccw_addr->cssid == 0) /* Guest CSSID 0 is 0xfe on host */
|
|
|
|
ccw_addr->cssid = 0xfe;
|
|
|
|
GET_DISK_ADDR(ccw, &ccw_addr->ssid, "ssid");
|
|
|
|
GET_DISK_ADDR(ccw, &ccw_addr->devno, "devno");
|
|
|
|
|
|
|
|
addr->ccw_addr = g_steal_pointer(&ccw_addr);
|
2020-11-25 11:06:46 +00:00
|
|
|
}
|
2020-11-20 18:09:42 +00:00
|
|
|
#undef GET_DISK_ADDR
|
|
|
|
|
|
|
|
return g_steal_pointer(&addr);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2019-08-23 16:31:20 +00:00
|
|
|
static int
|
2020-01-10 23:32:10 +00:00
|
|
|
qemuAgentGetFSInfoFillDisks(virJSONValuePtr jsondisks,
|
2020-01-10 23:32:12 +00:00
|
|
|
qemuAgentFSInfoPtr fsinfo)
|
2019-08-23 16:31:20 +00:00
|
|
|
{
|
|
|
|
size_t ndisks;
|
|
|
|
size_t i;
|
|
|
|
|
|
|
|
if (!virJSONValueIsArray(jsondisks)) {
|
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
|
_("Malformed guest-get-fsinfo 'disk' data array"));
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
ndisks = virJSONValueArraySize(jsondisks);
|
|
|
|
|
2020-01-10 23:32:15 +00:00
|
|
|
if (ndisks)
|
2020-11-20 18:09:40 +00:00
|
|
|
fsinfo->disks = g_new0(qemuAgentDiskAddressPtr, ndisks);
|
2019-08-23 16:31:20 +00:00
|
|
|
fsinfo->ndisks = ndisks;
|
|
|
|
|
|
|
|
for (i = 0; i < fsinfo->ndisks; i++) {
|
|
|
|
virJSONValuePtr jsondisk = virJSONValueArrayGet(jsondisks, i);
|
|
|
|
|
|
|
|
if (!jsondisk) {
|
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
_("array element '%zd' of '%zd' missing in "
|
|
|
|
"guest-get-fsinfo 'disk' data"),
|
|
|
|
i, fsinfo->ndisks);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2020-11-20 18:09:42 +00:00
|
|
|
if (!(fsinfo->disks[i] = qemuAgentGetDiskAddress(jsondisk)))
|
2019-08-23 16:31:20 +00:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-03-12 15:37:11 +00:00
|
|
|
/* Returns: number of entries in '@info' on success
|
2020-03-16 07:37:13 +00:00
|
|
|
* -2 when agent command is not supported by the agent and
|
|
|
|
* 'report_unsupported' is false (libvirt error is not reported)
|
|
|
|
* -1 otherwise (libvirt error is reported)
|
2019-08-27 20:35:54 +00:00
|
|
|
*/
|
2020-01-10 23:32:13 +00:00
|
|
|
int
|
2020-02-12 17:31:20 +00:00
|
|
|
qemuAgentGetFSInfo(qemuAgentPtr agent,
|
2020-03-16 07:37:13 +00:00
|
|
|
qemuAgentFSInfoPtr **info,
|
|
|
|
bool report_unsupported)
|
2014-11-22 01:27:38 +00:00
|
|
|
{
|
2019-08-23 16:31:20 +00:00
|
|
|
size_t i;
|
2014-11-22 01:27:38 +00:00
|
|
|
int ret = -1;
|
2019-10-15 12:47:50 +00:00
|
|
|
g_autoptr(virJSONValue) cmd = NULL;
|
|
|
|
g_autoptr(virJSONValue) reply = NULL;
|
2014-11-22 01:27:38 +00:00
|
|
|
virJSONValuePtr data;
|
2019-08-23 16:31:20 +00:00
|
|
|
size_t ndata = 0;
|
|
|
|
qemuAgentFSInfoPtr *info_ret = NULL;
|
2020-03-16 07:37:13 +00:00
|
|
|
int rc;
|
2014-11-22 01:27:38 +00:00
|
|
|
|
|
|
|
cmd = qemuAgentMakeCommand("guest-get-fsinfo", NULL);
|
|
|
|
if (!cmd)
|
|
|
|
return ret;
|
|
|
|
|
2020-03-16 07:37:13 +00:00
|
|
|
if ((rc = qemuAgentCommandFull(agent, cmd, &reply, agent->timeout,
|
|
|
|
report_unsupported)) < 0)
|
|
|
|
return rc;
|
2014-11-22 01:27:38 +00:00
|
|
|
|
|
|
|
if (!(data = virJSONValueObjectGet(reply, "return"))) {
|
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
|
_("guest-get-fsinfo reply was missing return data"));
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2018-04-19 21:29:02 +00:00
|
|
|
if (!virJSONValueIsArray(data)) {
|
2014-11-22 01:27:38 +00:00
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
2018-04-19 21:29:02 +00:00
|
|
|
_("Malformed guest-get-fsinfo data array"));
|
2014-11-22 01:27:38 +00:00
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
ndata = virJSONValueArraySize(data);
|
2018-04-19 21:29:02 +00:00
|
|
|
if (ndata == 0) {
|
2014-11-22 01:27:38 +00:00
|
|
|
ret = 0;
|
2016-12-12 09:13:42 +00:00
|
|
|
*info = NULL;
|
2014-11-22 01:27:38 +00:00
|
|
|
goto cleanup;
|
|
|
|
}
|
2020-01-10 23:32:16 +00:00
|
|
|
info_ret = g_new0(qemuAgentFSInfoPtr, ndata);
|
2014-11-22 01:27:38 +00:00
|
|
|
|
|
|
|
for (i = 0; i < ndata; i++) {
|
|
|
|
/* Reverse the order to arrange in mount order */
|
|
|
|
virJSONValuePtr entry = virJSONValueArrayGet(data, ndata - 1 - i);
|
2019-08-23 16:31:20 +00:00
|
|
|
virJSONValuePtr disk;
|
|
|
|
unsigned long long bytes_val;
|
|
|
|
const char *result = NULL;
|
2014-11-22 01:27:38 +00:00
|
|
|
|
|
|
|
if (!entry) {
|
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR,
|
2015-10-08 08:17:42 +00:00
|
|
|
_("array element '%zd' of '%zd' missing in "
|
2014-11-22 01:27:38 +00:00
|
|
|
"guest-get-fsinfo return data"),
|
|
|
|
i, ndata);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2020-01-10 23:32:16 +00:00
|
|
|
info_ret[i] = g_new0(qemuAgentFSInfo, 1);
|
2014-11-22 01:27:38 +00:00
|
|
|
|
2018-09-05 04:20:56 +00:00
|
|
|
if (!(result = virJSONValueObjectGetString(entry, "mountpoint"))) {
|
2014-11-22 01:27:38 +00:00
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
|
_("'mountpoint' missing in reply of "
|
|
|
|
"guest-get-fsinfo"));
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2019-10-20 11:49:46 +00:00
|
|
|
info_ret[i]->mountpoint = g_strdup(result);
|
2018-09-05 04:20:56 +00:00
|
|
|
|
|
|
|
if (!(result = virJSONValueObjectGetString(entry, "name"))) {
|
2014-11-22 01:27:38 +00:00
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
|
_("'name' missing in reply of guest-get-fsinfo"));
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2019-10-20 11:49:46 +00:00
|
|
|
info_ret[i]->name = g_strdup(result);
|
2018-09-05 04:20:56 +00:00
|
|
|
|
|
|
|
if (!(result = virJSONValueObjectGetString(entry, "type"))) {
|
2014-11-22 01:27:38 +00:00
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
|
_("'type' missing in reply of guest-get-fsinfo"));
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2019-10-20 11:49:46 +00:00
|
|
|
info_ret[i]->fstype = g_strdup(result);
|
2018-09-05 04:20:56 +00:00
|
|
|
|
2014-11-22 01:27:38 +00:00
|
|
|
|
2019-08-23 16:31:20 +00:00
|
|
|
/* 'used-bytes' and 'total-bytes' were added in qemu-ga 3.0 */
|
|
|
|
if (virJSONValueObjectHasKey(entry, "used-bytes")) {
|
|
|
|
if (virJSONValueObjectGetNumberUlong(entry, "used-bytes",
|
|
|
|
&bytes_val) < 0) {
|
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
|
_("Error getting 'used-bytes' in reply of guest-get-fsinfo"));
|
2014-11-22 01:27:38 +00:00
|
|
|
goto cleanup;
|
|
|
|
}
|
2019-08-23 16:31:20 +00:00
|
|
|
info_ret[i]->used_bytes = bytes_val;
|
|
|
|
} else {
|
|
|
|
info_ret[i]->used_bytes = -1;
|
|
|
|
}
|
2014-11-22 01:27:38 +00:00
|
|
|
|
2019-08-23 16:31:20 +00:00
|
|
|
if (virJSONValueObjectHasKey(entry, "total-bytes")) {
|
|
|
|
if (virJSONValueObjectGetNumberUlong(entry, "total-bytes",
|
|
|
|
&bytes_val) < 0) {
|
2014-11-22 01:27:38 +00:00
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
2019-08-23 16:31:20 +00:00
|
|
|
_("Error getting 'total-bytes' in reply of guest-get-fsinfo"));
|
2014-11-22 01:27:38 +00:00
|
|
|
goto cleanup;
|
|
|
|
}
|
2019-08-23 16:31:20 +00:00
|
|
|
info_ret[i]->total_bytes = bytes_val;
|
|
|
|
} else {
|
|
|
|
info_ret[i]->total_bytes = -1;
|
|
|
|
}
|
2014-11-22 01:27:38 +00:00
|
|
|
|
2019-08-23 16:31:20 +00:00
|
|
|
if (!(disk = virJSONValueObjectGet(entry, "disk"))) {
|
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
|
_("'disk' missing in reply of guest-get-fsinfo"));
|
|
|
|
goto cleanup;
|
2014-11-22 01:27:38 +00:00
|
|
|
}
|
2019-08-23 16:31:20 +00:00
|
|
|
|
2020-01-10 23:32:12 +00:00
|
|
|
if (qemuAgentGetFSInfoFillDisks(disk, info_ret[i]) < 0)
|
2019-08-23 16:31:20 +00:00
|
|
|
goto cleanup;
|
2014-11-22 01:27:38 +00:00
|
|
|
}
|
|
|
|
|
2019-10-16 11:43:18 +00:00
|
|
|
*info = g_steal_pointer(&info_ret);
|
2014-11-22 01:27:38 +00:00
|
|
|
ret = ndata;
|
|
|
|
|
|
|
|
cleanup:
|
|
|
|
if (info_ret) {
|
|
|
|
for (i = 0; i < ndata; i++)
|
2019-08-23 16:31:20 +00:00
|
|
|
qemuAgentFSInfoFree(info_ret[i]);
|
2020-01-10 23:32:16 +00:00
|
|
|
g_free(info_ret);
|
2014-11-22 01:27:38 +00:00
|
|
|
}
|
2019-08-23 16:31:20 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2020-10-05 18:35:08 +00:00
|
|
|
|
|
|
|
static int
|
|
|
|
qemuAgentGetInterfaceOneAddress(virDomainIPAddressPtr ip_addr,
|
|
|
|
virJSONValuePtr ip_addr_obj,
|
|
|
|
const char *name)
|
|
|
|
{
|
|
|
|
const char *type, *addr;
|
|
|
|
|
|
|
|
type = virJSONValueObjectGetString(ip_addr_obj, "ip-address-type");
|
|
|
|
if (!type) {
|
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
_("qemu agent didn't provide 'ip-address-type'"
|
|
|
|
" field for interface '%s'"), name);
|
|
|
|
return -1;
|
2020-10-07 12:04:04 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (STRNEQ(type, "ipv4") && STRNEQ(type, "ipv6")) {
|
2020-10-05 18:35:08 +00:00
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
_("unknown ip address type '%s'"),
|
|
|
|
type);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
addr = virJSONValueObjectGetString(ip_addr_obj, "ip-address");
|
|
|
|
if (!addr) {
|
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
_("qemu agent didn't provide 'ip-address'"
|
|
|
|
" field for interface '%s'"), name);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (virJSONValueObjectGetNumberUint(ip_addr_obj, "prefix",
|
|
|
|
&ip_addr->prefix) < 0) {
|
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
|
_("malformed 'prefix' field"));
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2020-10-07 12:04:04 +00:00
|
|
|
if (STREQ(type, "ipv4"))
|
|
|
|
ip_addr->type = VIR_IP_ADDR_TYPE_IPV4;
|
|
|
|
else
|
|
|
|
ip_addr->type = VIR_IP_ADDR_TYPE_IPV6;
|
|
|
|
|
|
|
|
ip_addr->addr = g_strdup(addr);
|
2020-10-05 18:35:08 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2020-10-05 19:48:12 +00:00
|
|
|
/**
|
|
|
|
* qemuAgentGetInterfaceAddresses:
|
|
|
|
* @ifaces_ret: the array to put/update the interface in
|
|
|
|
* @ifaces_count: the number of interfaces in that array
|
|
|
|
* @ifaces_store: hash table into @ifaces_ret by interface name
|
2020-10-05 20:45:08 +00:00
|
|
|
* @iface_obj: one item from the JSON array of interfaces
|
2020-10-05 19:48:12 +00:00
|
|
|
*
|
2020-10-05 20:45:08 +00:00
|
|
|
* This function processes @iface_obj (which represents
|
2020-10-05 19:48:12 +00:00
|
|
|
* information about a single interface) and adds the information
|
|
|
|
* into the ifaces_ret array.
|
|
|
|
*
|
|
|
|
* If we're processing an interface alias, the suffix is stripped
|
|
|
|
* and information is appended to the entry found via the @ifaces_store
|
|
|
|
* hash table.
|
|
|
|
*
|
|
|
|
* Otherwise, the next free position in @ifaces_ret is used,
|
|
|
|
* its address added to @ifaces_store, and @ifaces_count incremented.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
qemuAgentGetInterfaceAddresses(virDomainInterfacePtr **ifaces_ret,
|
|
|
|
size_t *ifaces_count,
|
2020-10-22 17:04:18 +00:00
|
|
|
GHashTable *ifaces_store,
|
2020-10-05 20:45:08 +00:00
|
|
|
virJSONValuePtr iface_obj)
|
2020-10-05 19:48:12 +00:00
|
|
|
{
|
|
|
|
virJSONValuePtr ip_addr_arr = NULL;
|
2020-10-05 20:35:34 +00:00
|
|
|
const char *hwaddr, *name = NULL;
|
2020-10-05 19:48:12 +00:00
|
|
|
virDomainInterfacePtr iface = NULL;
|
2020-10-05 20:35:34 +00:00
|
|
|
g_autofree char *ifname = NULL;
|
2020-10-05 19:48:12 +00:00
|
|
|
size_t addrs_count = 0;
|
|
|
|
size_t j;
|
|
|
|
|
|
|
|
/* interface name is required to be presented */
|
2020-10-05 20:45:08 +00:00
|
|
|
name = virJSONValueObjectGetString(iface_obj, "name");
|
2020-10-05 19:48:12 +00:00
|
|
|
if (!name) {
|
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
|
_("qemu agent didn't provide 'name' field"));
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Handle interface alias (<ifname>:<alias>) */
|
2020-10-05 20:35:34 +00:00
|
|
|
ifname = g_strdelimit(g_strdup(name), ":", '\0');
|
2020-10-05 19:48:12 +00:00
|
|
|
|
2020-10-05 20:35:34 +00:00
|
|
|
iface = virHashLookup(ifaces_store, ifname);
|
2020-10-05 19:48:12 +00:00
|
|
|
|
|
|
|
/* If the hash table doesn't contain this iface, add it */
|
|
|
|
if (!iface) {
|
|
|
|
if (VIR_EXPAND_N(*ifaces_ret, *ifaces_count, 1) < 0)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
iface = g_new0(virDomainInterface, 1);
|
|
|
|
(*ifaces_ret)[*ifaces_count - 1] = iface;
|
|
|
|
|
2020-10-05 20:35:34 +00:00
|
|
|
if (virHashAddEntry(ifaces_store, ifname, iface) < 0)
|
2020-10-05 19:48:12 +00:00
|
|
|
return -1;
|
|
|
|
|
|
|
|
iface->naddrs = 0;
|
2020-10-05 20:35:34 +00:00
|
|
|
iface->name = g_strdup(ifname);
|
2020-10-05 19:48:12 +00:00
|
|
|
|
2020-10-05 20:45:08 +00:00
|
|
|
hwaddr = virJSONValueObjectGetString(iface_obj, "hardware-address");
|
2020-10-05 19:48:12 +00:00
|
|
|
iface->hwaddr = g_strdup(hwaddr);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* as well as IP address which - moreover -
|
|
|
|
* can be presented multiple times */
|
2020-10-05 20:45:08 +00:00
|
|
|
ip_addr_arr = virJSONValueObjectGet(iface_obj, "ip-addresses");
|
2020-10-05 19:48:12 +00:00
|
|
|
if (!ip_addr_arr)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (!virJSONValueIsArray(ip_addr_arr)) {
|
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
|
_("Malformed ip-addresses array"));
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* If current iface already exists, continue with the count */
|
|
|
|
addrs_count = iface->naddrs;
|
|
|
|
|
|
|
|
if (VIR_EXPAND_N(iface->addrs, addrs_count,
|
|
|
|
virJSONValueArraySize(ip_addr_arr)) < 0)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
for (j = 0; j < virJSONValueArraySize(ip_addr_arr); j++) {
|
|
|
|
virJSONValuePtr ip_addr_obj = virJSONValueArrayGet(ip_addr_arr, j);
|
|
|
|
virDomainIPAddressPtr ip_addr = iface->addrs + iface->naddrs;
|
|
|
|
iface->naddrs++;
|
|
|
|
|
|
|
|
if (qemuAgentGetInterfaceOneAddress(ip_addr, ip_addr_obj, name) < 0)
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2020-10-07 12:33:08 +00:00
|
|
|
static int
|
|
|
|
qemuAgentGetAllInterfaceAddresses(virDomainInterfacePtr **ifaces_ret,
|
|
|
|
virJSONValuePtr ret_array)
|
|
|
|
{
|
2020-10-22 17:04:18 +00:00
|
|
|
g_autoptr(GHashTable) ifaces_store = NULL;
|
2020-10-07 12:33:08 +00:00
|
|
|
size_t ifaces_count = 0;
|
|
|
|
size_t i;
|
|
|
|
|
2020-10-15 12:54:39 +00:00
|
|
|
*ifaces_ret = NULL;
|
2020-10-07 12:33:08 +00:00
|
|
|
/* Hash table to handle the interface alias */
|
|
|
|
ifaces_store = virHashNew(NULL);
|
|
|
|
|
|
|
|
for (i = 0; i < virJSONValueArraySize(ret_array); i++) {
|
|
|
|
virJSONValuePtr iface_obj = virJSONValueArrayGet(ret_array, i);
|
|
|
|
|
|
|
|
if (qemuAgentGetInterfaceAddresses(ifaces_ret, &ifaces_count,
|
|
|
|
ifaces_store, iface_obj) < 0)
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
|
|
|
|
return ifaces_count;
|
|
|
|
|
|
|
|
error:
|
2020-10-15 12:54:39 +00:00
|
|
|
if (*ifaces_ret) {
|
2020-10-07 12:33:08 +00:00
|
|
|
for (i = 0; i < ifaces_count; i++)
|
2020-10-15 12:54:39 +00:00
|
|
|
virDomainInterfaceFree((*ifaces_ret)[i]);
|
2020-10-07 12:33:08 +00:00
|
|
|
}
|
|
|
|
VIR_FREE(*ifaces_ret);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2015-01-25 18:38:48 +00:00
|
|
|
/*
|
|
|
|
* qemuAgentGetInterfaces:
|
2020-02-12 17:31:20 +00:00
|
|
|
* @agent: agent object
|
2015-01-25 18:38:48 +00:00
|
|
|
* @ifaces: pointer to an array of pointers pointing to interface objects
|
|
|
|
*
|
|
|
|
* Issue guest-network-get-interfaces to guest agent, which returns a
|
|
|
|
* list of interfaces of a running domain along with their IP and MAC
|
|
|
|
* addresses.
|
|
|
|
*
|
|
|
|
* Returns: number of interfaces on success, -1 on error.
|
|
|
|
*/
|
|
|
|
int
|
2020-02-12 17:31:20 +00:00
|
|
|
qemuAgentGetInterfaces(qemuAgentPtr agent,
|
2015-01-25 18:38:48 +00:00
|
|
|
virDomainInterfacePtr **ifaces)
|
|
|
|
{
|
2020-10-05 20:04:34 +00:00
|
|
|
g_autoptr(virJSONValue) cmd = NULL;
|
|
|
|
g_autoptr(virJSONValue) reply = NULL;
|
2015-01-25 18:38:48 +00:00
|
|
|
virJSONValuePtr ret_array = NULL;
|
|
|
|
|
|
|
|
if (!(cmd = qemuAgentMakeCommand("guest-network-get-interfaces", NULL)))
|
2020-10-05 20:05:55 +00:00
|
|
|
return -1;
|
2015-01-25 18:38:48 +00:00
|
|
|
|
2020-03-05 14:47:02 +00:00
|
|
|
if (qemuAgentCommand(agent, cmd, &reply, agent->timeout) < 0)
|
2020-10-05 20:05:55 +00:00
|
|
|
return -1;
|
2015-01-25 18:38:48 +00:00
|
|
|
|
2020-10-05 20:09:08 +00:00
|
|
|
if (!(ret_array = virJSONValueObjectGetArray(reply, "return"))) {
|
2015-01-25 18:38:48 +00:00
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
|
_("qemu agent didn't return an array of interfaces"));
|
2020-10-05 20:05:55 +00:00
|
|
|
return -1;
|
2015-01-25 18:38:48 +00:00
|
|
|
}
|
|
|
|
|
2020-10-07 12:33:08 +00:00
|
|
|
return qemuAgentGetAllInterfaceAddresses(ifaces, ret_array);
|
2015-01-25 18:38:48 +00:00
|
|
|
}
|
2015-05-18 10:42:07 +00:00
|
|
|
|
|
|
|
|
|
|
|
int
|
2020-02-12 17:31:20 +00:00
|
|
|
qemuAgentSetUserPassword(qemuAgentPtr agent,
|
2015-05-18 10:42:07 +00:00
|
|
|
const char *user,
|
|
|
|
const char *password,
|
|
|
|
bool crypted)
|
|
|
|
{
|
2020-03-23 11:13:59 +00:00
|
|
|
g_autoptr(virJSONValue) cmd = NULL;
|
|
|
|
g_autoptr(virJSONValue) reply = NULL;
|
|
|
|
g_autofree char *password64 = NULL;
|
2015-05-18 10:42:07 +00:00
|
|
|
|
2019-09-16 12:29:20 +00:00
|
|
|
password64 = g_base64_encode((unsigned char *)password,
|
|
|
|
strlen(password));
|
2015-05-18 10:42:07 +00:00
|
|
|
|
|
|
|
if (!(cmd = qemuAgentMakeCommand("guest-set-user-password",
|
|
|
|
"b:crypted", crypted,
|
|
|
|
"s:username", user,
|
|
|
|
"s:password", password64,
|
|
|
|
NULL)))
|
2020-03-23 11:13:59 +00:00
|
|
|
return -1;
|
2015-05-18 10:42:07 +00:00
|
|
|
|
2020-03-05 14:47:02 +00:00
|
|
|
if (qemuAgentCommand(agent, cmd, &reply, agent->timeout) < 0)
|
2020-03-23 11:13:59 +00:00
|
|
|
return -1;
|
2015-05-18 10:42:07 +00:00
|
|
|
|
2020-03-23 11:13:59 +00:00
|
|
|
return 0;
|
2015-05-18 10:42:07 +00:00
|
|
|
}
|
2019-08-23 16:31:17 +00:00
|
|
|
|
2019-08-27 20:35:54 +00:00
|
|
|
/* Returns: 0 on success
|
2020-03-16 07:37:13 +00:00
|
|
|
* -2 when agent command is not supported by the agent and
|
|
|
|
* 'report_unsupported' is false (libvirt error is not reported)
|
|
|
|
* -1 otherwise (libvirt error is reported)
|
2019-08-27 20:35:54 +00:00
|
|
|
*/
|
2019-08-23 16:31:17 +00:00
|
|
|
int
|
2020-02-12 17:31:20 +00:00
|
|
|
qemuAgentGetUsers(qemuAgentPtr agent,
|
2019-08-23 16:31:17 +00:00
|
|
|
virTypedParameterPtr *params,
|
|
|
|
int *nparams,
|
2020-03-16 07:37:13 +00:00
|
|
|
int *maxparams,
|
|
|
|
bool report_unsupported)
|
2019-08-23 16:31:17 +00:00
|
|
|
{
|
2019-10-15 12:47:50 +00:00
|
|
|
g_autoptr(virJSONValue) cmd = NULL;
|
|
|
|
g_autoptr(virJSONValue) reply = NULL;
|
2019-08-23 16:31:17 +00:00
|
|
|
virJSONValuePtr data = NULL;
|
|
|
|
size_t ndata;
|
|
|
|
size_t i;
|
2020-03-16 07:37:13 +00:00
|
|
|
int rc;
|
2019-08-23 16:31:17 +00:00
|
|
|
|
|
|
|
if (!(cmd = qemuAgentMakeCommand("guest-get-users", NULL)))
|
|
|
|
return -1;
|
|
|
|
|
2020-03-16 07:37:13 +00:00
|
|
|
if ((rc = qemuAgentCommandFull(agent, cmd, &reply, agent->timeout,
|
|
|
|
report_unsupported)) < 0)
|
|
|
|
return rc;
|
2019-08-23 16:31:17 +00:00
|
|
|
|
|
|
|
if (!(data = virJSONValueObjectGetArray(reply, "return"))) {
|
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
|
_("guest-get-users reply was missing return data"));
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
ndata = virJSONValueArraySize(data);
|
|
|
|
|
|
|
|
if (virTypedParamsAddUInt(params, nparams, maxparams,
|
|
|
|
"user.count", ndata) < 0)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
for (i = 0; i < ndata; i++) {
|
|
|
|
virJSONValuePtr entry = virJSONValueArrayGet(data, i);
|
|
|
|
char param_name[VIR_TYPED_PARAM_FIELD_LENGTH];
|
|
|
|
const char *strvalue;
|
|
|
|
double logintime;
|
|
|
|
|
|
|
|
if (!entry) {
|
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
|
_("array element missing in guest-get-users return "
|
|
|
|
"value"));
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!(strvalue = virJSONValueObjectGetString(entry, "user"))) {
|
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
|
_("'user' missing in reply of guest-get-users"));
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2019-11-13 13:53:42 +00:00
|
|
|
g_snprintf(param_name, VIR_TYPED_PARAM_FIELD_LENGTH, "user.%zu.name", i);
|
2019-08-23 16:31:17 +00:00
|
|
|
if (virTypedParamsAddString(params, nparams, maxparams,
|
|
|
|
param_name, strvalue) < 0)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
/* 'domain' is only present for windows guests */
|
|
|
|
if ((strvalue = virJSONValueObjectGetString(entry, "domain"))) {
|
2019-11-13 13:53:42 +00:00
|
|
|
g_snprintf(param_name, VIR_TYPED_PARAM_FIELD_LENGTH,
|
|
|
|
"user.%zu.domain", i);
|
2019-08-23 16:31:17 +00:00
|
|
|
if (virTypedParamsAddString(params, nparams, maxparams,
|
|
|
|
param_name, strvalue) < 0)
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (virJSONValueObjectGetNumberDouble(entry, "login-time", &logintime) < 0) {
|
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
|
_("'login-time' missing in reply of guest-get-users"));
|
|
|
|
return -1;
|
|
|
|
}
|
2019-11-13 13:53:42 +00:00
|
|
|
g_snprintf(param_name, VIR_TYPED_PARAM_FIELD_LENGTH,
|
|
|
|
"user.%zu.login-time", i);
|
2019-08-23 16:31:17 +00:00
|
|
|
if (virTypedParamsAddULLong(params, nparams, maxparams,
|
|
|
|
param_name, logintime * 1000) < 0)
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2020-03-13 09:02:48 +00:00
|
|
|
return 0;
|
2019-08-23 16:31:17 +00:00
|
|
|
}
|
2019-08-23 16:31:18 +00:00
|
|
|
|
2019-08-27 20:35:54 +00:00
|
|
|
/* Returns: 0 on success
|
2020-03-16 07:37:13 +00:00
|
|
|
* -2 when agent command is not supported by the agent and
|
|
|
|
* 'report_unsupported' is false (libvirt error is not reported)
|
|
|
|
* -1 otherwise (libvirt error is reported)
|
2019-08-27 20:35:54 +00:00
|
|
|
*/
|
2019-08-23 16:31:18 +00:00
|
|
|
int
|
2020-02-12 17:31:20 +00:00
|
|
|
qemuAgentGetOSInfo(qemuAgentPtr agent,
|
2019-08-23 16:31:18 +00:00
|
|
|
virTypedParameterPtr *params,
|
|
|
|
int *nparams,
|
2020-03-16 07:37:13 +00:00
|
|
|
int *maxparams,
|
|
|
|
bool report_unsupported)
|
2019-08-23 16:31:18 +00:00
|
|
|
{
|
2019-10-15 12:47:50 +00:00
|
|
|
g_autoptr(virJSONValue) cmd = NULL;
|
|
|
|
g_autoptr(virJSONValue) reply = NULL;
|
2019-08-23 16:31:18 +00:00
|
|
|
virJSONValuePtr data = NULL;
|
2020-03-16 07:37:13 +00:00
|
|
|
int rc;
|
2019-08-23 16:31:18 +00:00
|
|
|
|
|
|
|
if (!(cmd = qemuAgentMakeCommand("guest-get-osinfo", NULL)))
|
|
|
|
return -1;
|
|
|
|
|
2020-03-16 07:37:13 +00:00
|
|
|
if ((rc = qemuAgentCommandFull(agent, cmd, &reply, agent->timeout,
|
|
|
|
report_unsupported)) < 0)
|
|
|
|
return rc;
|
2019-08-23 16:31:18 +00:00
|
|
|
|
|
|
|
if (!(data = virJSONValueObjectGetObject(reply, "return"))) {
|
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
|
_("guest-get-osinfo reply was missing return data"));
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
#define OSINFO_ADD_PARAM(agent_string_, param_string_) \
|
|
|
|
do { \
|
|
|
|
const char *result; \
|
|
|
|
if ((result = virJSONValueObjectGetString(data, agent_string_))) { \
|
|
|
|
if (virTypedParamsAddString(params, nparams, maxparams, \
|
|
|
|
param_string_, result) < 0) { \
|
|
|
|
return -1; \
|
|
|
|
} \
|
|
|
|
} \
|
|
|
|
} while (0)
|
|
|
|
OSINFO_ADD_PARAM("id", "os.id");
|
|
|
|
OSINFO_ADD_PARAM("name", "os.name");
|
|
|
|
OSINFO_ADD_PARAM("pretty-name", "os.pretty-name");
|
|
|
|
OSINFO_ADD_PARAM("version", "os.version");
|
|
|
|
OSINFO_ADD_PARAM("version-id", "os.version-id");
|
|
|
|
OSINFO_ADD_PARAM("machine", "os.machine");
|
|
|
|
OSINFO_ADD_PARAM("variant", "os.variant");
|
|
|
|
OSINFO_ADD_PARAM("variant-id", "os.variant-id");
|
|
|
|
OSINFO_ADD_PARAM("kernel-release", "os.kernel-release");
|
|
|
|
OSINFO_ADD_PARAM("kernel-version", "os.kernel-version");
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
2019-08-23 16:31:19 +00:00
|
|
|
|
2019-08-27 20:35:54 +00:00
|
|
|
/* Returns: 0 on success
|
2020-03-16 07:37:13 +00:00
|
|
|
* -2 when agent command is not supported by the agent and
|
|
|
|
* 'report_unsupported' is false (libvirt error is not reported)
|
|
|
|
* -1 otherwise (libvirt error is reported)
|
2019-08-27 20:35:54 +00:00
|
|
|
*/
|
2019-08-23 16:31:19 +00:00
|
|
|
int
|
2020-02-12 17:31:20 +00:00
|
|
|
qemuAgentGetTimezone(qemuAgentPtr agent,
|
2019-08-23 16:31:19 +00:00
|
|
|
virTypedParameterPtr *params,
|
|
|
|
int *nparams,
|
2020-03-16 07:37:13 +00:00
|
|
|
int *maxparams,
|
|
|
|
bool report_unsupported)
|
2019-08-23 16:31:19 +00:00
|
|
|
{
|
2019-10-15 12:47:50 +00:00
|
|
|
g_autoptr(virJSONValue) cmd = NULL;
|
|
|
|
g_autoptr(virJSONValue) reply = NULL;
|
2019-08-23 16:31:19 +00:00
|
|
|
virJSONValuePtr data = NULL;
|
|
|
|
const char *name;
|
|
|
|
int offset;
|
2020-03-16 07:37:13 +00:00
|
|
|
int rc;
|
2019-08-23 16:31:19 +00:00
|
|
|
|
|
|
|
if (!(cmd = qemuAgentMakeCommand("guest-get-timezone", NULL)))
|
|
|
|
return -1;
|
|
|
|
|
2020-03-16 07:37:13 +00:00
|
|
|
if ((rc = qemuAgentCommandFull(agent, cmd, &reply, agent->timeout,
|
|
|
|
report_unsupported)) < 0)
|
|
|
|
return rc;
|
2019-08-23 16:31:19 +00:00
|
|
|
|
|
|
|
if (!(data = virJSONValueObjectGetObject(reply, "return"))) {
|
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
|
_("guest-get-timezone reply was missing return data"));
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if ((name = virJSONValueObjectGetString(data, "zone")) &&
|
|
|
|
virTypedParamsAddString(params, nparams, maxparams,
|
|
|
|
"timezone.name", name) < 0)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
if ((virJSONValueObjectGetNumberInt(data, "offset", &offset)) < 0) {
|
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
|
_("'offset' missing in reply of guest-get-timezone"));
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (virTypedParamsAddInt(params, nparams, maxparams,
|
|
|
|
"timezone.offset", offset) < 0)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
Add API to change qemu agent response timeout
Some layered products such as oVirt have requested a way to avoid being
blocked by guest agent commands when querying a loaded vm. For example,
many guest agent commands are polled periodically to monitor changes,
and rather than blocking the calling process, they'd prefer to simply
time out when an agent query is taking too long.
This patch adds a way for the user to specify a custom agent timeout
that is applied to all agent commands.
One special case to note here is the 'guest-sync' command. 'guest-sync'
is issued internally prior to calling any other command. (For example,
when libvirt wants to call 'guest-get-fsinfo', we first call
'guest-sync' and then call 'guest-get-fsinfo').
Previously, the 'guest-sync' command used a 5-second timeout
(VIR_DOMAIN_QEMU_AGENT_COMMAND_DEFAULT), whereas the actual command that
followed always blocked indefinitely
(VIR_DOMAIN_QEMU_AGENT_COMMAND_BLOCK). As part of this patch, if a
custom timeout is specified that is shorter than
5 seconds, this new timeout is also used for 'guest-sync'. If there is
no custom timeout or if the custom timeout is longer than 5 seconds, we
will continue to use the 5-second timeout.
Signed-off-by: Jonathon Jongsma <jjongsma@redhat.com>
Signed-off-by: Michal Privoznik <mprivozn@redhat.com>
Reviewed-by: Michal Privoznik <mprivozn@redhat.com>
2019-11-13 22:06:09 +00:00
|
|
|
|
|
|
|
/* qemuAgentSetResponseTimeout:
|
2020-02-12 17:31:20 +00:00
|
|
|
* @agent: agent object
|
|
|
|
* @timeout: number of seconds to wait for agent response
|
Add API to change qemu agent response timeout
Some layered products such as oVirt have requested a way to avoid being
blocked by guest agent commands when querying a loaded vm. For example,
many guest agent commands are polled periodically to monitor changes,
and rather than blocking the calling process, they'd prefer to simply
time out when an agent query is taking too long.
This patch adds a way for the user to specify a custom agent timeout
that is applied to all agent commands.
One special case to note here is the 'guest-sync' command. 'guest-sync'
is issued internally prior to calling any other command. (For example,
when libvirt wants to call 'guest-get-fsinfo', we first call
'guest-sync' and then call 'guest-get-fsinfo').
Previously, the 'guest-sync' command used a 5-second timeout
(VIR_DOMAIN_QEMU_AGENT_COMMAND_DEFAULT), whereas the actual command that
followed always blocked indefinitely
(VIR_DOMAIN_QEMU_AGENT_COMMAND_BLOCK). As part of this patch, if a
custom timeout is specified that is shorter than
5 seconds, this new timeout is also used for 'guest-sync'. If there is
no custom timeout or if the custom timeout is longer than 5 seconds, we
will continue to use the 5-second timeout.
Signed-off-by: Jonathon Jongsma <jjongsma@redhat.com>
Signed-off-by: Michal Privoznik <mprivozn@redhat.com>
Reviewed-by: Michal Privoznik <mprivozn@redhat.com>
2019-11-13 22:06:09 +00:00
|
|
|
*
|
|
|
|
* The agent object must be locked prior to calling this function.
|
|
|
|
*/
|
|
|
|
void
|
2020-02-12 17:31:20 +00:00
|
|
|
qemuAgentSetResponseTimeout(qemuAgentPtr agent,
|
Add API to change qemu agent response timeout
Some layered products such as oVirt have requested a way to avoid being
blocked by guest agent commands when querying a loaded vm. For example,
many guest agent commands are polled periodically to monitor changes,
and rather than blocking the calling process, they'd prefer to simply
time out when an agent query is taking too long.
This patch adds a way for the user to specify a custom agent timeout
that is applied to all agent commands.
One special case to note here is the 'guest-sync' command. 'guest-sync'
is issued internally prior to calling any other command. (For example,
when libvirt wants to call 'guest-get-fsinfo', we first call
'guest-sync' and then call 'guest-get-fsinfo').
Previously, the 'guest-sync' command used a 5-second timeout
(VIR_DOMAIN_QEMU_AGENT_COMMAND_DEFAULT), whereas the actual command that
followed always blocked indefinitely
(VIR_DOMAIN_QEMU_AGENT_COMMAND_BLOCK). As part of this patch, if a
custom timeout is specified that is shorter than
5 seconds, this new timeout is also used for 'guest-sync'. If there is
no custom timeout or if the custom timeout is longer than 5 seconds, we
will continue to use the 5-second timeout.
Signed-off-by: Jonathon Jongsma <jjongsma@redhat.com>
Signed-off-by: Michal Privoznik <mprivozn@redhat.com>
Reviewed-by: Michal Privoznik <mprivozn@redhat.com>
2019-11-13 22:06:09 +00:00
|
|
|
int timeout)
|
|
|
|
{
|
2020-02-12 17:31:20 +00:00
|
|
|
agent->timeout = timeout;
|
Add API to change qemu agent response timeout
Some layered products such as oVirt have requested a way to avoid being
blocked by guest agent commands when querying a loaded vm. For example,
many guest agent commands are polled periodically to monitor changes,
and rather than blocking the calling process, they'd prefer to simply
time out when an agent query is taking too long.
This patch adds a way for the user to specify a custom agent timeout
that is applied to all agent commands.
One special case to note here is the 'guest-sync' command. 'guest-sync'
is issued internally prior to calling any other command. (For example,
when libvirt wants to call 'guest-get-fsinfo', we first call
'guest-sync' and then call 'guest-get-fsinfo').
Previously, the 'guest-sync' command used a 5-second timeout
(VIR_DOMAIN_QEMU_AGENT_COMMAND_DEFAULT), whereas the actual command that
followed always blocked indefinitely
(VIR_DOMAIN_QEMU_AGENT_COMMAND_BLOCK). As part of this patch, if a
custom timeout is specified that is shorter than
5 seconds, this new timeout is also used for 'guest-sync'. If there is
no custom timeout or if the custom timeout is longer than 5 seconds, we
will continue to use the 5-second timeout.
Signed-off-by: Jonathon Jongsma <jjongsma@redhat.com>
Signed-off-by: Michal Privoznik <mprivozn@redhat.com>
Reviewed-by: Michal Privoznik <mprivozn@redhat.com>
2019-11-13 22:06:09 +00:00
|
|
|
}
|
2020-11-07 09:12:53 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* qemuAgentSSHGetAuthorizedKeys:
|
|
|
|
* @agent: agent object
|
|
|
|
* @user: user to get authorized keys for
|
|
|
|
* @keys: Array of authorized keys
|
|
|
|
*
|
|
|
|
* Fetch the public keys from @user's $HOME/.ssh/authorized_keys.
|
|
|
|
*
|
|
|
|
* Returns: number of keys returned on success,
|
|
|
|
* -1 otherwise (error is reported)
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
qemuAgentSSHGetAuthorizedKeys(qemuAgentPtr agent,
|
|
|
|
const char *user,
|
|
|
|
char ***keys)
|
|
|
|
{
|
|
|
|
g_autoptr(virJSONValue) cmd = NULL;
|
|
|
|
g_autoptr(virJSONValue) reply = NULL;
|
|
|
|
virJSONValuePtr data = NULL;
|
|
|
|
|
|
|
|
if (!(cmd = qemuAgentMakeCommand("guest-ssh-get-authorized-keys",
|
|
|
|
"s:username", user,
|
|
|
|
NULL)))
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
if (qemuAgentCommand(agent, cmd, &reply, agent->timeout) < 0)
|
|
|
|
return -1;
|
|
|
|
|
2020-12-01 10:47:30 +00:00
|
|
|
if (!(data = virJSONValueObjectGetObject(reply, "return"))) {
|
2020-11-07 09:12:53 +00:00
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
|
_("qemu agent didn't return an array of keys"));
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2020-12-01 10:47:30 +00:00
|
|
|
if (!(*keys = virJSONValueObjectGetStringArray(data, "keys")))
|
|
|
|
return -1;
|
2020-11-07 09:12:53 +00:00
|
|
|
|
2020-12-01 10:47:30 +00:00
|
|
|
return g_strv_length(*keys);
|
2020-11-07 09:12:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
* qemuAgentSSHAddAuthorizedKeys:
|
|
|
|
* @agent: agent object
|
|
|
|
* @user: user to add authorized keys for
|
|
|
|
* @keys: Array of authorized keys
|
|
|
|
* @nkeys: number of items in @keys array
|
|
|
|
* @reset: whether to truncate authorized keys file before writing
|
|
|
|
*
|
|
|
|
* Append SSH @keys into the @user's authorized keys file. If
|
|
|
|
* @reset is true then the file is truncated before write and
|
|
|
|
* thus contains only newly added @keys.
|
|
|
|
*
|
|
|
|
* Returns: 0 on success,
|
|
|
|
* -1 otherwise (error is reported)
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
qemuAgentSSHAddAuthorizedKeys(qemuAgentPtr agent,
|
|
|
|
const char *user,
|
|
|
|
const char **keys,
|
|
|
|
size_t nkeys,
|
|
|
|
bool reset)
|
|
|
|
{
|
|
|
|
g_autoptr(virJSONValue) cmd = NULL;
|
|
|
|
g_autoptr(virJSONValue) reply = NULL;
|
|
|
|
g_autoptr(virJSONValue) jkeys = NULL;
|
|
|
|
|
|
|
|
jkeys = qemuAgentMakeStringsArray(keys, nkeys);
|
|
|
|
if (jkeys == NULL)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
if (!(cmd = qemuAgentMakeCommand("guest-ssh-add-authorized-keys",
|
|
|
|
"s:username", user,
|
|
|
|
"a:keys", &jkeys,
|
|
|
|
"b:reset", reset,
|
|
|
|
NULL)))
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
return qemuAgentCommand(agent, cmd, &reply, agent->timeout);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
* qemuAgentSSHRemoveAuthorizedKeys:
|
|
|
|
* @agent: agent object
|
|
|
|
* @user: user to remove authorized keys for
|
|
|
|
* @keys: Array of authorized keys
|
|
|
|
* @nkeys: number of items in @keys array
|
|
|
|
*
|
|
|
|
* Remove SSH @keys from the @user's authorized keys file. It's
|
|
|
|
* not considered an error when trying to remove a non-existent
|
|
|
|
* key.
|
|
|
|
*
|
|
|
|
* Returns: 0 on success,
|
|
|
|
* -1 otherwise (error is reported)
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
qemuAgentSSHRemoveAuthorizedKeys(qemuAgentPtr agent,
|
|
|
|
const char *user,
|
|
|
|
const char **keys,
|
|
|
|
size_t nkeys)
|
|
|
|
{
|
|
|
|
g_autoptr(virJSONValue) cmd = NULL;
|
|
|
|
g_autoptr(virJSONValue) reply = NULL;
|
|
|
|
g_autoptr(virJSONValue) jkeys = NULL;
|
|
|
|
|
|
|
|
jkeys = qemuAgentMakeStringsArray(keys, nkeys);
|
|
|
|
if (jkeys == NULL)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
if (!(cmd = qemuAgentMakeCommand("guest-ssh-remove-authorized-keys",
|
|
|
|
"s:username", user,
|
|
|
|
"a:keys", &jkeys,
|
|
|
|
NULL)))
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
return qemuAgentCommand(agent, cmd, &reply, agent->timeout);
|
|
|
|
}
|
2020-11-20 18:09:45 +00:00
|
|
|
|
|
|
|
|
|
|
|
int qemuAgentGetDisks(qemuAgentPtr agent,
|
|
|
|
qemuAgentDiskInfoPtr **disks,
|
|
|
|
bool report_unsupported)
|
|
|
|
{
|
|
|
|
g_autoptr(virJSONValue) cmd = NULL;
|
|
|
|
g_autoptr(virJSONValue) reply = NULL;
|
|
|
|
virJSONValuePtr data = NULL;
|
|
|
|
size_t ndata;
|
|
|
|
size_t i;
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
if (!(cmd = qemuAgentMakeCommand("guest-get-disks", NULL)))
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
if ((rc = qemuAgentCommandFull(agent, cmd, &reply, agent->timeout,
|
|
|
|
report_unsupported)) < 0)
|
|
|
|
return rc;
|
|
|
|
|
|
|
|
if (!(data = virJSONValueObjectGetArray(reply, "return"))) {
|
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
|
_("qemu agent didn't return an array of disks"));
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
ndata = virJSONValueArraySize(data);
|
|
|
|
|
|
|
|
*disks = g_new0(qemuAgentDiskInfoPtr, ndata);
|
|
|
|
|
|
|
|
for (i = 0; i < ndata; i++) {
|
|
|
|
virJSONValuePtr addr;
|
|
|
|
virJSONValuePtr entry = virJSONValueArrayGet(data, i);
|
|
|
|
qemuAgentDiskInfoPtr disk;
|
|
|
|
|
|
|
|
if (!entry) {
|
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
|
_("array element missing in guest-get-disks return "
|
|
|
|
"value"));
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
|
|
|
|
disk = g_new0(qemuAgentDiskInfo, 1);
|
|
|
|
(*disks)[i] = disk;
|
|
|
|
|
|
|
|
disk->name = g_strdup(virJSONValueObjectGetString(entry, "name"));
|
|
|
|
if (!disk->name) {
|
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
|
_("'name' missing in reply of guest-get-disks"));
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (virJSONValueObjectGetBoolean(entry, "partition", &disk->partition) < 0) {
|
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
|
_("'partition' missing in reply of guest-get-disks"));
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
|
|
|
|
disk->dependencies = virJSONValueObjectGetStringArray(entry, "dependencies");
|
|
|
|
disk->alias = g_strdup(virJSONValueObjectGetString(entry, "alias"));
|
|
|
|
addr = virJSONValueObjectGetObject(entry, "address");
|
|
|
|
if (addr) {
|
|
|
|
disk->address = qemuAgentGetDiskAddress(addr);
|
|
|
|
if (!disk->address)
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return ndata;
|
|
|
|
|
|
|
|
error:
|
|
|
|
for (i = 0; i < ndata; i++) {
|
|
|
|
qemuAgentDiskInfoFree((*disks)[i]);
|
|
|
|
}
|
|
|
|
g_free(*disks);
|
|
|
|
return -1;
|
|
|
|
}
|