cloud-hypervisor/devices/Cargo.toml

38 lines
933 B
TOML
Raw Normal View History

[package]
authors = ["The Chromium OS Authors"]
edition = "2021"
name = "devices"
version = "0.1.0"
[dependencies]
acpi_tables = { git = "https://github.com/rust-vmm/acpi_tables", branch = "main" }
anyhow = "1.0.87"
arch = { path = "../arch" }
bitflags = "2.6.0"
byteorder = "1.5.0"
event_monitor = { path = "../event_monitor" }
hypervisor = { path = "../hypervisor" }
libc = "0.2.158"
log = "0.4.22"
devices: Add pvmemcontrol device Pvmemcontrol provides a way for the guest to control its physical memory properties, and enables optimizations and security features. For example, the guest can provide information to the host where parts of a hugepage may be unbacked, or sensitive data may not be swapped out, etc. Pvmemcontrol allows guests to manipulate its gPTE entries in the SLAT, and also some other properties of the memory map the back's host memory. This is achieved by using the KVM_CAP_SYNC_MMU capability. When this capability is available, the changes in the backing of the memory region on the host are automatically reflected into the guest. For example, an mmap() or madvise() that affects the region will be made visible immediately. There are two components of the implementation: the guest Linux driver and Virtual Machine Monitor (VMM) device. A guest-allocated shared buffer is negotiated per-cpu through a few PCI MMIO registers, the VMM device assigns a unique command for each per-cpu buffer. The guest writes its pvmemcontrol request in the per-cpu buffer, then writes the corresponding command into the command register, calling into the VMM device to perform the pvmemcontrol request. The synchronous per-cpu shared buffer approach avoids the kick and busy waiting that the guest would have to do with virtio virtqueue transport. The Cloud Hypervisor component can be enabled with --pvmemcontrol. Co-developed-by: Stanko Novakovic <stanko@google.com> Co-developed-by: Pasha Tatashin <tatashin@google.com> Signed-off-by: Yuanchu Xie <yuanchu@google.com>
2023-10-25 22:46:47 +00:00
num_enum = "0.7.2"
pci = { path = "../pci" }
serde = { version = "1.0.208", features = ["derive"] }
thiserror = "1.0.62"
tpm = { path = "../tpm" }
vm-allocator = { path = "../vm-allocator" }
vm-device = { path = "../vm-device" }
vm-memory = { version = "0.15.0", features = [
devices: Add pvmemcontrol device Pvmemcontrol provides a way for the guest to control its physical memory properties, and enables optimizations and security features. For example, the guest can provide information to the host where parts of a hugepage may be unbacked, or sensitive data may not be swapped out, etc. Pvmemcontrol allows guests to manipulate its gPTE entries in the SLAT, and also some other properties of the memory map the back's host memory. This is achieved by using the KVM_CAP_SYNC_MMU capability. When this capability is available, the changes in the backing of the memory region on the host are automatically reflected into the guest. For example, an mmap() or madvise() that affects the region will be made visible immediately. There are two components of the implementation: the guest Linux driver and Virtual Machine Monitor (VMM) device. A guest-allocated shared buffer is negotiated per-cpu through a few PCI MMIO registers, the VMM device assigns a unique command for each per-cpu buffer. The guest writes its pvmemcontrol request in the per-cpu buffer, then writes the corresponding command into the command register, calling into the VMM device to perform the pvmemcontrol request. The synchronous per-cpu shared buffer approach avoids the kick and busy waiting that the guest would have to do with virtio virtqueue transport. The Cloud Hypervisor component can be enabled with --pvmemcontrol. Co-developed-by: Stanko Novakovic <stanko@google.com> Co-developed-by: Pasha Tatashin <tatashin@google.com> Signed-off-by: Yuanchu Xie <yuanchu@google.com>
2023-10-25 22:46:47 +00:00
"backend-atomic",
"backend-bitmap",
"backend-mmap",
] }
vm-migration = { path = "../vm-migration" }
vmm-sys-util = "0.12.1"
[target.'cfg(target_arch = "aarch64")'.dependencies]
arch = { path = "../arch" }
[features]
default = []
devices: Add pvmemcontrol device Pvmemcontrol provides a way for the guest to control its physical memory properties, and enables optimizations and security features. For example, the guest can provide information to the host where parts of a hugepage may be unbacked, or sensitive data may not be swapped out, etc. Pvmemcontrol allows guests to manipulate its gPTE entries in the SLAT, and also some other properties of the memory map the back's host memory. This is achieved by using the KVM_CAP_SYNC_MMU capability. When this capability is available, the changes in the backing of the memory region on the host are automatically reflected into the guest. For example, an mmap() or madvise() that affects the region will be made visible immediately. There are two components of the implementation: the guest Linux driver and Virtual Machine Monitor (VMM) device. A guest-allocated shared buffer is negotiated per-cpu through a few PCI MMIO registers, the VMM device assigns a unique command for each per-cpu buffer. The guest writes its pvmemcontrol request in the per-cpu buffer, then writes the corresponding command into the command register, calling into the VMM device to perform the pvmemcontrol request. The synchronous per-cpu shared buffer approach avoids the kick and busy waiting that the guest would have to do with virtio virtqueue transport. The Cloud Hypervisor component can be enabled with --pvmemcontrol. Co-developed-by: Stanko Novakovic <stanko@google.com> Co-developed-by: Pasha Tatashin <tatashin@google.com> Signed-off-by: Yuanchu Xie <yuanchu@google.com>
2023-10-25 22:46:47 +00:00
pvmemcontrol = []