1
0
mirror of https://passt.top/passt synced 2025-01-18 18:35:15 +00:00

vhost-user: add VHOST_USER_SET_LOG_BASE command

Sets logging shared memory space.

When the back-end has VHOST_USER_PROTOCOL_F_LOG_SHMFD protocol feature,
the log memory fd is provided in the ancillary data of
VHOST_USER_SET_LOG_BASE message, the size and offset of shared memory
area provided in the message.

Signed-off-by: Laurent Vivier <lvivier@redhat.com>
This commit is contained in:
Laurent Vivier 2024-12-13 14:23:14 +01:00
parent ebdc7c6fbc
commit 26190d93c0
5 changed files with 168 additions and 3 deletions

3
util.h
View File

@ -152,6 +152,9 @@ static inline void barrier(void) { __asm__ __volatile__("" ::: "memory"); }
#define smp_wmb() smp_mb_release() #define smp_wmb() smp_mb_release()
#define smp_rmb() smp_mb_acquire() #define smp_rmb() smp_mb_acquire()
#define qatomic_or(ptr, n) \
((void) __atomic_fetch_or(ptr, n, __ATOMIC_SEQ_CST))
#define NS_FN_STACK_SIZE (1024 * 1024) /* 1MiB */ #define NS_FN_STACK_SIZE (1024 * 1024) /* 1MiB */
int do_clone(int (*fn)(void *), char *stack_area, size_t stack_size, int flags, int do_clone(int (*fn)(void *), char *stack_area, size_t stack_size, int flags,

View File

@ -510,6 +510,12 @@ static bool vu_set_mem_table_exec(struct vu_dev *vdev,
*/ */
static void vu_close_log(struct vu_dev *vdev) static void vu_close_log(struct vu_dev *vdev)
{ {
if (vdev->log_table) {
if (munmap(vdev->log_table, vdev->log_size) != 0)
die_perror("close log munmap() error");
vdev->log_table = NULL;
}
if (vdev->log_call_fd != -1) { if (vdev->log_call_fd != -1) {
close(vdev->log_call_fd); close(vdev->log_call_fd);
vdev->log_call_fd = -1; vdev->log_call_fd = -1;
@ -520,7 +526,6 @@ static void vu_close_log(struct vu_dev *vdev)
* vu_log_kick() - Inform the front-end that the log has been modified * vu_log_kick() - Inform the front-end that the log has been modified
* @vdev: vhost-user device * @vdev: vhost-user device
*/ */
/* cppcheck-suppress unusedFunction */
void vu_log_kick(const struct vu_dev *vdev) void vu_log_kick(const struct vu_dev *vdev)
{ {
if (vdev->log_call_fd != -1) { if (vdev->log_call_fd != -1) {
@ -532,6 +537,84 @@ void vu_log_kick(const struct vu_dev *vdev)
} }
} }
/**
* vu_log_page() -- Update logging table
* @log_table: Base address of the logging table
* @page: Page number that has been updated
*/
/* NOLINTNEXTLINE(readability-non-const-parameter) */
static void vu_log_page(uint8_t *log_table, uint64_t page)
{
qatomic_or(&log_table[page / 8], 1 << (page % 8));
}
/**
* vu_log_write() -- Log memory write
* @dev: Vhost-user device
* @address: Memory address
* @length: Memory size
*/
void vu_log_write(const struct vu_dev *vdev, uint64_t address, uint64_t length)
{
uint64_t page;
if (!vdev->log_table || !length ||
!vu_has_feature(vdev, VHOST_F_LOG_ALL))
return;
page = address / VHOST_LOG_PAGE;
while (page * VHOST_LOG_PAGE < address + length) {
vu_log_page(vdev->log_table, page);
page++;
}
vu_log_kick(vdev);
}
/**
* vu_set_log_base_exec() - Set the memory log base
* @vdev: vhost-user device
* @vmsg: vhost-user message
*
* Return: False as no reply is requested
*
* #syscalls:vu mmap munmap
*/
static bool vu_set_log_base_exec(struct vu_dev *vdev,
struct vhost_user_msg *msg)
{
uint64_t log_mmap_size, log_mmap_offset;
void *base;
int fd;
if (msg->fd_num != 1 || msg->hdr.size != sizeof(msg->payload.log))
die("Invalid log_base message");
fd = msg->fds[0];
log_mmap_offset = msg->payload.log.mmap_offset;
log_mmap_size = msg->payload.log.mmap_size;
debug("Log mmap_offset: %"PRId64, log_mmap_offset);
debug("Log mmap_size: %"PRId64, log_mmap_size);
base = mmap(0, log_mmap_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd,
log_mmap_offset);
close(fd);
if (base == MAP_FAILED)
die("log mmap error");
if (vdev->log_table)
munmap(vdev->log_table, vdev->log_size);
vdev->log_table = base;
vdev->log_size = log_mmap_size;
msg->hdr.size = sizeof(msg->payload.u64);
msg->fd_num = 0;
return true;
}
/** /**
* vu_set_log_fd_exec() -- Set the eventfd used to report logging update * vu_set_log_fd_exec() -- Set the eventfd used to report logging update
* @vdev: vhost-user device * @vdev: vhost-user device
@ -915,6 +998,7 @@ void vu_init(struct ctx *c)
.notification = true, .notification = true,
}; };
} }
c->vdev->log_table = NULL;
c->vdev->log_call_fd = -1; c->vdev->log_call_fd = -1;
} }
@ -984,6 +1068,7 @@ static bool (*vu_handle[VHOST_USER_MAX])(struct vu_dev *vdev,
[VHOST_USER_GET_QUEUE_NUM] = vu_get_queue_num_exec, [VHOST_USER_GET_QUEUE_NUM] = vu_get_queue_num_exec,
[VHOST_USER_SET_OWNER] = vu_set_owner_exec, [VHOST_USER_SET_OWNER] = vu_set_owner_exec,
[VHOST_USER_SET_MEM_TABLE] = vu_set_mem_table_exec, [VHOST_USER_SET_MEM_TABLE] = vu_set_mem_table_exec,
[VHOST_USER_SET_LOG_BASE] = vu_set_log_base_exec,
[VHOST_USER_SET_LOG_FD] = vu_set_log_fd_exec, [VHOST_USER_SET_LOG_FD] = vu_set_log_fd_exec,
[VHOST_USER_SET_VRING_NUM] = vu_set_vring_num_exec, [VHOST_USER_SET_VRING_NUM] = vu_set_vring_num_exec,
[VHOST_USER_SET_VRING_ADDR] = vu_set_vring_addr_exec, [VHOST_USER_SET_VRING_ADDR] = vu_set_vring_addr_exec,

View File

@ -15,6 +15,7 @@
#include "iov.h" #include "iov.h"
#define VHOST_USER_F_PROTOCOL_FEATURES 30 #define VHOST_USER_F_PROTOCOL_FEATURES 30
#define VHOST_LOG_PAGE 4096
#define VHOST_MEMORY_BASELINE_NREGIONS 8 #define VHOST_MEMORY_BASELINE_NREGIONS 8
@ -241,5 +242,7 @@ void vu_print_capabilities(void);
void vu_init(struct ctx *c); void vu_init(struct ctx *c);
void vu_cleanup(struct vu_dev *vdev); void vu_cleanup(struct vu_dev *vdev);
void vu_log_kick(const struct vu_dev *vdev); void vu_log_kick(const struct vu_dev *vdev);
void vu_log_write(const struct vu_dev *vdev, uint64_t address,
uint64_t length);
void vu_control_handler(struct vu_dev *vdev, int fd, uint32_t events); void vu_control_handler(struct vu_dev *vdev, int fd, uint32_t events);
#endif /* VHOST_USER_H */ #endif /* VHOST_USER_H */

View File

@ -81,6 +81,7 @@
#include "util.h" #include "util.h"
#include "virtio.h" #include "virtio.h"
#include "vhost_user.h"
#define VIRTQUEUE_MAX_SIZE 1024 #define VIRTQUEUE_MAX_SIZE 1024
@ -592,7 +593,72 @@ static inline void vring_used_write(const struct vu_dev *vdev,
struct vring_used *used = vq->vring.used; struct vring_used *used = vq->vring.used;
used->ring[i] = *uelem; used->ring[i] = *uelem;
(void)vdev; vu_log_write(vdev, vq->vring.log_guest_addr +
offsetof(struct vring_used, ring[i]),
sizeof(used->ring[i]));
}
/**
* vu_log_queue_fill() -- Log virtqueue memory update
* @dev: Vhost-user device
* @vq: Virtqueue
* @index: Descriptor ring index
* @len: Size of the element
*/
static void vu_log_queue_fill(const struct vu_dev *vdev, struct vu_virtq *vq,
unsigned int index, unsigned int len)
{
struct vring_desc desc_buf[VIRTQUEUE_MAX_SIZE];
struct vring_desc *desc = vq->vring.desc;
unsigned int max, min;
unsigned num_bufs = 0;
uint64_t read_len;
if (!vdev->log_table || !len || !vu_has_feature(vdev, VHOST_F_LOG_ALL))
return;
max = vq->vring.num;
if (le16toh(desc[index].flags) & VRING_DESC_F_INDIRECT) {
unsigned int desc_len;
uint64_t desc_addr;
if (le32toh(desc[index].len) % sizeof(struct vring_desc))
die("Invalid size for indirect buffer table");
/* loop over the indirect descriptor table */
desc_addr = le64toh(desc[index].addr);
desc_len = le32toh(desc[index].len);
max = desc_len / sizeof(struct vring_desc);
read_len = desc_len;
desc = vu_gpa_to_va(vdev, &read_len, desc_addr);
if (desc && read_len != desc_len) {
/* Failed to use zero copy */
desc = NULL;
if (!virtqueue_read_indirect_desc(vdev, desc_buf,
desc_addr,
desc_len))
desc = desc_buf;
}
if (!desc)
die("Invalid indirect buffer table");
index = 0;
}
do {
if (++num_bufs > max)
die("Looped descriptor");
if (le16toh(desc[index].flags) & VRING_DESC_F_WRITE) {
min = MIN(le32toh(desc[index].len), len);
vu_log_write(vdev, le64toh(desc[index].addr), min);
len -= min;
}
} while (len > 0 &&
(virtqueue_read_next_desc(desc, index, max, &index) ==
VIRTQUEUE_READ_DESC_MORE));
} }
@ -614,6 +680,8 @@ void vu_queue_fill_by_index(const struct vu_dev *vdev, struct vu_virtq *vq,
if (!vq->vring.avail) if (!vq->vring.avail)
return; return;
vu_log_queue_fill(vdev, vq, index, len);
idx = (idx + vq->used_idx) % vq->vring.num; idx = (idx + vq->used_idx) % vq->vring.num;
uelem.id = htole32(index); uelem.id = htole32(index);
@ -646,7 +714,9 @@ static inline void vring_used_idx_set(const struct vu_dev *vdev,
struct vu_virtq *vq, uint16_t val) struct vu_virtq *vq, uint16_t val)
{ {
vq->vring.used->idx = htole16(val); vq->vring.used->idx = htole16(val);
(void)vdev; vu_log_write(vdev, vq->vring.log_guest_addr +
offsetof(struct vring_used, idx),
sizeof(vq->vring.used->idx));
vq->used_idx = val; vq->used_idx = val;
} }

View File

@ -104,6 +104,8 @@ struct vu_dev_region {
* @features: Vhost-user features * @features: Vhost-user features
* @protocol_features: Vhost-user protocol features * @protocol_features: Vhost-user protocol features
* @log_call_fd: Eventfd to report logging update * @log_call_fd: Eventfd to report logging update
* @log_size: Size of the logging memory region
* @log_table: Base of the logging memory region
*/ */
struct vu_dev { struct vu_dev {
struct ctx *context; struct ctx *context;
@ -113,6 +115,8 @@ struct vu_dev {
uint64_t features; uint64_t features;
uint64_t protocol_features; uint64_t protocol_features;
int log_call_fd; int log_call_fd;
uint64_t log_size;
uint8_t *log_table;
}; };
/** /**