2019-03-07 13:56:43 +00:00
|
|
|
[package]
|
|
|
|
authors = ["The Chromium OS Authors"]
|
2022-04-08 15:11:04 +00:00
|
|
|
edition = "2021"
|
2024-05-08 08:56:31 +00:00
|
|
|
name = "devices"
|
|
|
|
version = "0.1.0"
|
2019-03-07 13:56:43 +00:00
|
|
|
|
|
|
|
[dependencies]
|
2024-05-08 08:56:31 +00:00
|
|
|
acpi_tables = { git = "https://github.com/rust-vmm/acpi_tables", branch = "main" }
|
2024-09-09 23:21:09 +00:00
|
|
|
anyhow = "1.0.87"
|
2021-03-23 11:41:36 +00:00
|
|
|
arch = { path = "../arch" }
|
2024-08-12 23:39:04 +00:00
|
|
|
bitflags = "2.6.0"
|
2024-03-29 23:33:33 +00:00
|
|
|
byteorder = "1.5.0"
|
2023-06-14 13:04:07 +00:00
|
|
|
event_monitor = { path = "../event_monitor" }
|
2022-06-01 05:24:12 +00:00
|
|
|
hypervisor = { path = "../hypervisor" }
|
2024-09-05 23:17:26 +00:00
|
|
|
libc = "0.2.158"
|
2024-07-15 23:26:18 +00:00
|
|
|
log = "0.4.22"
|
devices: Add pvmemcontrol device
Pvmemcontrol provides a way for the guest to control its physical memory
properties, and enables optimizations and security features. For
example, the guest can provide information to the host where parts of a
hugepage may be unbacked, or sensitive data may not be swapped out, etc.
Pvmemcontrol allows guests to manipulate its gPTE entries in the SLAT,
and also some other properties of the memory map the back's host memory.
This is achieved by using the KVM_CAP_SYNC_MMU capability. When this
capability is available, the changes in the backing of the memory region
on the host are automatically reflected into the guest. For example, an
mmap() or madvise() that affects the region will be made visible
immediately.
There are two components of the implementation: the guest Linux driver
and Virtual Machine Monitor (VMM) device. A guest-allocated shared
buffer is negotiated per-cpu through a few PCI MMIO registers, the VMM
device assigns a unique command for each per-cpu buffer. The guest
writes its pvmemcontrol request in the per-cpu buffer, then writes the
corresponding command into the command register, calling into the VMM
device to perform the pvmemcontrol request.
The synchronous per-cpu shared buffer approach avoids the kick and busy
waiting that the guest would have to do with virtio virtqueue transport.
The Cloud Hypervisor component can be enabled with --pvmemcontrol.
Co-developed-by: Stanko Novakovic <stanko@google.com>
Co-developed-by: Pasha Tatashin <tatashin@google.com>
Signed-off-by: Yuanchu Xie <yuanchu@google.com>
2023-10-25 22:46:47 +00:00
|
|
|
num_enum = "0.7.2"
|
2023-06-14 13:04:07 +00:00
|
|
|
pci = { path = "../pci" }
|
2024-08-16 23:15:30 +00:00
|
|
|
serde = { version = "1.0.208", features = ["derive"] }
|
2024-07-16 23:17:07 +00:00
|
|
|
thiserror = "1.0.62"
|
2022-09-12 20:46:03 +00:00
|
|
|
tpm = { path = "../tpm" }
|
2023-06-14 13:04:07 +00:00
|
|
|
vm-allocator = { path = "../vm-allocator" }
|
2020-01-20 14:28:58 +00:00
|
|
|
vm-device = { path = "../vm-device" }
|
2024-09-25 11:21:05 +00:00
|
|
|
vm-memory = { version = "0.15.0", features = [
|
devices: Add pvmemcontrol device
Pvmemcontrol provides a way for the guest to control its physical memory
properties, and enables optimizations and security features. For
example, the guest can provide information to the host where parts of a
hugepage may be unbacked, or sensitive data may not be swapped out, etc.
Pvmemcontrol allows guests to manipulate its gPTE entries in the SLAT,
and also some other properties of the memory map the back's host memory.
This is achieved by using the KVM_CAP_SYNC_MMU capability. When this
capability is available, the changes in the backing of the memory region
on the host are automatically reflected into the guest. For example, an
mmap() or madvise() that affects the region will be made visible
immediately.
There are two components of the implementation: the guest Linux driver
and Virtual Machine Monitor (VMM) device. A guest-allocated shared
buffer is negotiated per-cpu through a few PCI MMIO registers, the VMM
device assigns a unique command for each per-cpu buffer. The guest
writes its pvmemcontrol request in the per-cpu buffer, then writes the
corresponding command into the command register, calling into the VMM
device to perform the pvmemcontrol request.
The synchronous per-cpu shared buffer approach avoids the kick and busy
waiting that the guest would have to do with virtio virtqueue transport.
The Cloud Hypervisor component can be enabled with --pvmemcontrol.
Co-developed-by: Stanko Novakovic <stanko@google.com>
Co-developed-by: Pasha Tatashin <tatashin@google.com>
Signed-off-by: Yuanchu Xie <yuanchu@google.com>
2023-10-25 22:46:47 +00:00
|
|
|
"backend-atomic",
|
|
|
|
"backend-bitmap",
|
|
|
|
"backend-mmap",
|
|
|
|
] }
|
2019-12-12 01:36:13 +00:00
|
|
|
vm-migration = { path = "../vm-migration" }
|
build: Bump vmm-sys-util crate and its consumers
This patch bumps the following crates, including `kvm-bindings@0.7.0`*,
`kvm-ioctls@0.16.0`**, `linux-loader@0.11.0`, `versionize@0.2.0`,
`versionize_derive@0.1.6`***, `vhost@0.10.0`,
`vhost-user-backend@0.13.1`, `virtio-queue@0.11.0`, `vm-memory@0.14.0`,
`vmm-sys-util@0.12.1`, and the latest of `vfio-bindings`, `vfio-ioctls`,
`mshv-bindings`,`mshv-ioctls`, and `vfio-user`.
* A fork of the `kvm-bindings` crate is being used to support
serialization of various structs for migration [1]. Also, code changes
are made to accommodate the updated `struct xsave` from the Linux
kernel. Note: these changes related to `struct xsave` break
live-upgrade.
** The new `kvm-ioctls` crate introduced breaking changes for
the `get/set_one_reg` API on `aarch64` [2], so code changes are made to
the new APIs.
*** A fork of the `versionize_derive` crate is being used to support
versionize on packed structs [3].
[1] https://github.com/cloud-hypervisor/kvm-bindings/tree/ch-v0.7.0
[2] https://github.com/rust-vmm/kvm-ioctls/pull/223
[3] https://github.com/cloud-hypervisor/versionize_derive/tree/ch-0.1.6
Fixes: #6072
Signed-off-by: Bo Chen <chen.bo@intel.com>
2024-01-23 17:29:40 +00:00
|
|
|
vmm-sys-util = "0.12.1"
|
2019-03-07 13:56:43 +00:00
|
|
|
|
2022-07-22 19:43:29 +00:00
|
|
|
[target.'cfg(target_arch = "aarch64")'.dependencies]
|
|
|
|
arch = { path = "../arch" }
|
|
|
|
|
2019-09-03 13:54:06 +00:00
|
|
|
[features]
|
2019-09-05 15:29:55 +00:00
|
|
|
default = []
|
devices: Add pvmemcontrol device
Pvmemcontrol provides a way for the guest to control its physical memory
properties, and enables optimizations and security features. For
example, the guest can provide information to the host where parts of a
hugepage may be unbacked, or sensitive data may not be swapped out, etc.
Pvmemcontrol allows guests to manipulate its gPTE entries in the SLAT,
and also some other properties of the memory map the back's host memory.
This is achieved by using the KVM_CAP_SYNC_MMU capability. When this
capability is available, the changes in the backing of the memory region
on the host are automatically reflected into the guest. For example, an
mmap() or madvise() that affects the region will be made visible
immediately.
There are two components of the implementation: the guest Linux driver
and Virtual Machine Monitor (VMM) device. A guest-allocated shared
buffer is negotiated per-cpu through a few PCI MMIO registers, the VMM
device assigns a unique command for each per-cpu buffer. The guest
writes its pvmemcontrol request in the per-cpu buffer, then writes the
corresponding command into the command register, calling into the VMM
device to perform the pvmemcontrol request.
The synchronous per-cpu shared buffer approach avoids the kick and busy
waiting that the guest would have to do with virtio virtqueue transport.
The Cloud Hypervisor component can be enabled with --pvmemcontrol.
Co-developed-by: Stanko Novakovic <stanko@google.com>
Co-developed-by: Pasha Tatashin <tatashin@google.com>
Signed-off-by: Yuanchu Xie <yuanchu@google.com>
2023-10-25 22:46:47 +00:00
|
|
|
pvmemcontrol = []
|